Skip to content

Commit

Permalink
Merge branch 'cdiddy77:main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
michellrodriguez08 authored May 30, 2024
2 parents da03b45 + 46d4220 commit 6454897
Show file tree
Hide file tree
Showing 106 changed files with 7,385 additions and 160 deletions.
1 change: 1 addition & 0 deletions .eslintrc.js
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ module.exports = {
"./tsconfig.json",
"docsite/tsconfig.json",
"examples/objectdetection/tsconfig.json",
"examples/facelandmarkdetection/tsconfig.json",
],
ecmaFeatures: {
jsx: true,
Expand Down
41 changes: 21 additions & 20 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -130,35 +130,36 @@ jobs:
echo "turbo_cache_hit=1" >> $GITHUB_ENV
fi
- name: Cache cocoapods
if: env.turbo_cache_hit != 1
id: cocoapods-cache
uses: actions/cache@v3
with:
path: |
**/ios/Pods
key: ${{ runner.os }}-cocoapods-${{ hashFiles('examples/objectdetection/ios/Podfile.lock') }}
restore-keys: |
${{ runner.os }}-cocoapods-
- name: setup-cocoapods
# - name: Cache cocoapods for objectdetection
# if: env.turbo_cache_hit != 1
# id: cocoapods-cache-objectdetection
# uses: actions/cache@v3
# with:
# path: |
# **/ios/Pods
# key: ${{ runner.os }}-cocoapods-${{ hashFiles('examples/objectdetection/ios/Podfile.lock') }}
# restore-keys: |
# ${{ runner.os }}-cocoapods-

- name: setup-cocoapods-objectdetection
uses: maxim-lobanov/setup-cocoapods@v1
with:
podfile-path: examples/objectdetection/ios/Podfile.lock

- name: check-podfile-and-manifest
run: |
echo "Podfile.lock"
tail -n 3 examples/objectdetection/ios/Podfile.lock
echo "Manifest.lock"
tail -n 3 examples/objectdetection/ios/Pods/Manifest.lock

- name: setup-cocoapods-facelandmarkdetection
uses: maxim-lobanov/setup-cocoapods@v1
with:
podfile-path: examples/facelandmarkdetection/ios/Podfile.lock

- name: Install cocoapods
# disabling cache to see if that helps
# if: steps.cocoapods-cache.outputs.cache-hit != 'true'
run: |
cd examples/objectdetection/ios
pod install
cd ../../..
cd examples/facelandmarkdetection/ios
pod install
env:
NO_FLIPPER: 1

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
diff --git a/android/build.gradle b/android/build.gradle
index d48bc32641758c29011dc8f1992015665390e02c..87caa57ac73c47b235bd046d75c1c880be9be489 100644
--- a/android/build.gradle
+++ b/android/build.gradle
@@ -1,5 +1,6 @@
import java.nio.file.Paths
import com.android.Version
+import org.gradle.nativeplatform.platform.internal.DefaultNativePlatform

def agpVersion = Version.ANDROID_GRADLE_PLUGIN_VERSION.tokenize('.')[0].toInteger()
def androidManifestPath = agpVersion >= 7 ? 'src/main/AndroidManifest.xml' : 'src/hasNamespace/AndroidManifest.xml'
@@ -90,11 +91,27 @@ if (hasWorklets) {
def enableCodeScanner = safeExtGetBool('VisionCamera_enableCodeScanner', false)
logger.warn("[VisionCamera] VisionCamera_enableCodeScanner is set to $enableCodeScanner!")

+static def findTools(baseDir) {
+ def basePath = baseDir.toPath().normalize()
+ while (basePath) {
+ def toolsPath = Paths.get(basePath.toString(), "tools")
+ if (toolsPath.toFile().exists()) {
+ return toolsPath.toString()
+ }
+ basePath = basePath.getParent()
+ }
+ throw new GradleException("react-native-vision-camera: Failed to find tools/ path!")
+}
+
+def toolsDir = findTools(projectDir)
+
repositories {
google()
mavenCentral()
}

+def os = DefaultNativePlatform.currentOperatingSystem;
+
android {
if (agpVersion >= 7) {
namespace "com.mrousavy.camera"
@@ -131,9 +148,17 @@ android {
externalNativeBuild {
cmake {
cppFlags "-O2 -frtti -fexceptions -Wall -Wno-unused-variable -fstack-protector-all"
- arguments "-DANDROID_STL=c++_shared",
- "-DNODE_MODULES_DIR=${nodeModules}",
- "-DENABLE_FRAME_PROCESSORS=${enableFrameProcessors ? "ON" : "OFF"}"
+ if (os.isWindows()) {
+ arguments "-DANDROID_STL=c++_shared",
+ "-DNODE_MODULES_DIR=${nodeModules}",
+ "-DENABLE_FRAME_PROCESSORS=${hasWorklets ? "ON" : "OFF"}",
+ "-DCMAKE_MAKE_PROGRAM=${toolsDir}\\windows\\ninja.exe",
+ "-DCMAKE_OBJECT_PATH_MAX=1024"
+ } else {
+ arguments "-DANDROID_STL=c++_shared",
+ "-DNODE_MODULES_DIR=${nodeModules}",
+ "-DENABLE_FRAME_PROCESSORS=${hasWorklets ? "ON" : "OFF"}"
+ }
abiFilters (*reactNativeArchitectures())
}
}
diff --git a/android/src/main/java/com/mrousavy/camera/frameprocessors/VisionCameraProxy.kt b/android/src/main/java/com/mrousavy/camera/frameprocessors/VisionCameraProxy.kt
index d697befefc1a1d5b8f21ae9b2925161d03b79306..8de418b0b3196000fa09990b1658bb4196a5c4f9 100644
--- a/android/src/main/java/com/mrousavy/camera/frameprocessors/VisionCameraProxy.kt
+++ b/android/src/main/java/com/mrousavy/camera/frameprocessors/VisionCameraProxy.kt
@@ -7,12 +7,14 @@ import com.facebook.jni.HybridData
import com.facebook.proguard.annotations.DoNotStrip
import com.facebook.react.bridge.ReactApplicationContext
import com.facebook.react.bridge.UiThreadUtil
+import com.facebook.react.common.annotations.FrameworkAPI
import com.facebook.react.turbomodule.core.CallInvokerHolderImpl
import com.facebook.react.uimanager.UIManagerHelper
import com.mrousavy.camera.core.ViewNotFoundError
import com.mrousavy.camera.react.CameraView
import java.lang.ref.WeakReference

+@OptIn(FrameworkAPI::class)
@Suppress("KotlinJniMissingFunction") // we use fbjni.
class VisionCameraProxy(private val reactContext: ReactApplicationContext) {
companion object {
19 changes: 19 additions & 0 deletions .yarn/patches/react-native-vision-camera-patch-cc9d789186.patch
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
diff --git a/android/src/main/java/com/mrousavy/camera/frameprocessors/VisionCameraProxy.kt b/android/src/main/java/com/mrousavy/camera/frameprocessors/VisionCameraProxy.kt
index d697befefc1a1d5b8f21ae9b2925161d03b79306..8de418b0b3196000fa09990b1658bb4196a5c4f9 100644
--- a/android/src/main/java/com/mrousavy/camera/frameprocessors/VisionCameraProxy.kt
+++ b/android/src/main/java/com/mrousavy/camera/frameprocessors/VisionCameraProxy.kt
@@ -7,12 +7,14 @@ import com.facebook.jni.HybridData
import com.facebook.proguard.annotations.DoNotStrip
import com.facebook.react.bridge.ReactApplicationContext
import com.facebook.react.bridge.UiThreadUtil
+import com.facebook.react.common.annotations.FrameworkAPI
import com.facebook.react.turbomodule.core.CallInvokerHolderImpl
import com.facebook.react.uimanager.UIManagerHelper
import com.mrousavy.camera.core.ViewNotFoundError
import com.mrousavy.camera.react.CameraView
import java.lang.ref.WeakReference

+@OptIn(FrameworkAPI::class)
@Suppress("KotlinJniMissingFunction") // we use fbjni.
class VisionCameraProxy(private val reactContext: ReactApplicationContext) {
companion object {
2 changes: 1 addition & 1 deletion ReactNativeMediaPipe.podspec
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ Pod::Spec.new do |s|

s.source_files = "ios/**/*.{h,m,mm,swift}"

s.dependency "MediaPipeTasksVision", "0.10.5"
s.dependency "MediaPipeTasksVision", "0.10.12"
s.dependency "VisionCamera"

# Use install_modules_dependencies helper to install the dependencies if React Native version >=0.71.0.
Expand Down
12 changes: 10 additions & 2 deletions android/src/main/java/com/reactnativemediapipe/MediapipePackage.kt
Original file line number Diff line number Diff line change
Expand Up @@ -5,18 +5,26 @@ import com.facebook.react.bridge.NativeModule
import com.facebook.react.bridge.ReactApplicationContext
import com.facebook.react.uimanager.ViewManager
import com.mrousavy.camera.frameprocessors.FrameProcessorPluginRegistry
import com.reactnativemediapipe.facelandmarkdetection.FaceLandmarkDetectionFrameProcessorPlugin
import com.reactnativemediapipe.facelandmarkdetection.FaceLandmarkDetectionModule
import com.reactnativemediapipe.objectdetection.ObjectDetectionFrameProcessorPlugin
import com.reactnativemediapipe.objectdetection.ObjectDetectionModule


class MediapipePackage : ReactPackage {
companion object {
init {
FrameProcessorPluginRegistry.addFrameProcessorPlugin("objectDetection") { _, _ ->
ObjectDetectionFrameProcessorPlugin()
}
FrameProcessorPluginRegistry.addFrameProcessorPlugin("faceLandmarkDetection") { _, _ ->
FaceLandmarkDetectionFrameProcessorPlugin()
}
}
} override fun createNativeModules(reactContext: ReactApplicationContext): List<NativeModule> {
return listOf(ObjectDetectionModule(reactContext))
}

override fun createNativeModules(reactContext: ReactApplicationContext): List<NativeModule> {
return listOf(ObjectDetectionModule(reactContext), FaceLandmarkDetectionModule(reactContext))
}

override fun createViewManagers(reactContext: ReactApplicationContext): List<ViewManager<*, *>> {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,142 @@
package com.reactnativemediapipe.facelandmarkdetection

import com.facebook.react.bridge.Arguments
import com.facebook.react.bridge.WritableArray
import com.facebook.react.bridge.WritableMap
import com.facebook.react.bridge.WritableNativeArray
import com.facebook.react.bridge.WritableNativeMap
import com.google.mediapipe.tasks.components.containers.Category
import com.google.mediapipe.tasks.components.containers.Classifications
import com.google.mediapipe.tasks.components.containers.Connection
import com.google.mediapipe.tasks.components.containers.NormalizedLandmark
import com.google.mediapipe.tasks.vision.facelandmarker.FaceLandmarkerResult
import com.mrousavy.camera.core.types.Orientation

// Converts NormalizedLandmark to WritableMap
fun normalizedLandmarkToWritableMap(landmark: NormalizedLandmark): WritableMap {
val map = WritableNativeMap()
map.putDouble("x", landmark.x().toDouble())
map.putDouble("y", landmark.y().toDouble())
map.putDouble("z", landmark.z().toDouble())
return map
}

// Converts TransformMatrix to WritableMap
fun transformMatrixToWritableMap(matrix: FloatArray): WritableMap {
val map = WritableNativeMap()
val dataArray = WritableNativeArray()

for (value in matrix) {
dataArray.pushDouble(value.toDouble())
}

map.putInt("rows", 4)
map.putInt("columns", 4)
map.putArray("data", dataArray)
return map
}

// Converts Classifications to WritableMap
fun classificationsToWritableMap(classification: Classifications): WritableMap {
val map = WritableNativeMap()
val categoriesArray = WritableNativeArray()

classification.categories().forEach { category ->
val categoryMap = WritableNativeMap()
categoryMap.putString("label", category.categoryName())
categoryMap.putDouble("score", category.score().toDouble())
categoriesArray.pushMap(categoryMap)
}

map.putInt("headIndex", classification.headIndex())
classification.headName()?.let {
map.putString("headName", it.toString())
}
map.putArray("categories", categoriesArray)
return map
}

fun categoryListToWritableMap(classification: List<Category>): WritableMap {
val map = WritableNativeMap()
val categoriesArray = WritableNativeArray()

classification.forEach { category ->
val categoryMap = WritableNativeMap()
categoryMap.putString("label", category.categoryName())
categoryMap.putDouble("score", category.score().toDouble())
categoriesArray.pushMap(categoryMap)
}

map.putArray("categories", categoriesArray)
return map
}

fun convertResultBundleToWritableMap(resultBundle: FaceLandmarkDetectorHelper.ResultBundle): WritableMap {
val map = Arguments.createMap()
val resultsArray = Arguments.createArray()
resultBundle.results.forEach { result ->
resultsArray.pushMap(faceLandmarkerResultToWritableMap(result))
}
map.putArray("results", resultsArray)
map.putInt("inputImageHeight", resultBundle.inputImageHeight)
map.putInt("inputImageWidth", resultBundle.inputImageWidth)
map.putInt("inputImageRotation", resultBundle.inputImageRotation)
map.putDouble("inferenceTime", resultBundle.inferenceTime.toDouble())
return map
}

fun faceLandmarkerResultToWritableMap(result: FaceLandmarkerResult): WritableMap {
val resultMap = WritableNativeMap()
val landmarksArray = WritableNativeArray()
val blendshapesArray = WritableNativeArray()
val matricesArray = WritableNativeArray()

result.faceLandmarks().forEach { face ->
val faceArray = WritableNativeArray()
face.forEach { landmark -> faceArray.pushMap(normalizedLandmarkToWritableMap(landmark)) }
landmarksArray.pushArray(faceArray)
}

result.faceBlendshapes().ifPresent { listOfListOfCategories ->
// Iterate over the list of list of categories
listOfListOfCategories.forEach { list ->
// Convert each list of categories to a WritableMap
val map: WritableMap = categoryListToWritableMap(list)
// Push the WritableMap to the blendshapesArray
blendshapesArray.pushMap(map)
}
}

result.facialTransformationMatrixes().ifPresent { listOfMatrices ->
listOfMatrices.forEach { matrix ->
matricesArray.pushMap(transformMatrixToWritableMap(matrix))

}
}

resultMap.putArray("faceLandmarks", landmarksArray)
resultMap.putArray("faceBlendshapes", blendshapesArray)
resultMap.putArray("facialTransformationMatrixes", matricesArray)

return resultMap
}

fun orientationToDegrees(orientation: Orientation): Int =
when (orientation) {
Orientation.PORTRAIT -> 0
Orientation.LANDSCAPE_LEFT -> 90
Orientation.PORTRAIT_UPSIDE_DOWN -> 180
Orientation.LANDSCAPE_RIGHT -> -90
}


fun connectionSetToWritableArray(connections: Set<Connection>): WritableArray {
val result = WritableNativeArray()
connections.forEach {
val map = Arguments.createMap()
map.putInt("start", it.start())
map.putInt("end", it.end())
result.pushMap(map)
}
return result
}
Loading

0 comments on commit 6454897

Please sign in to comment.