chore: Move everything into package/
(#1745)
* Move everything into package * Remove .DS_Store * Move scripts and eslintrc to package * Create CODE_OF_CONDUCT.md * fix some links * Update all links (I think) * Update generated docs * Update notice-yarn-changes.yml * Update validate-android.yml * Update validate-cpp.yml * Delete notice-yarn-changes.yml * Update validate-cpp.yml * Update validate-cpp.yml * Update validate-js.yml * Update validate-cpp.yml * Update validate-cpp.yml * wrong c++ style * Revert "wrong c++ style" This reverts commit 55a3575589c6f13f8b05134d83384f55e0601ab2.
This commit is contained in:
5
package/android/.editorconfig
Normal file
5
package/android/.editorconfig
Normal file
@@ -0,0 +1,5 @@
|
||||
[*.{kt,kts}]
|
||||
indent_size=2
|
||||
insert_final_newline=true
|
||||
max_line_length=off
|
||||
disabled_rules=no-wildcard-imports
|
17
package/android/.project
Normal file
17
package/android/.project
Normal file
@@ -0,0 +1,17 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<projectDescription>
|
||||
<name>android_</name>
|
||||
<comment>Project android_ created by Buildship.</comment>
|
||||
<projects>
|
||||
</projects>
|
||||
<buildSpec>
|
||||
<buildCommand>
|
||||
<name>org.eclipse.buildship.core.gradleprojectbuilder</name>
|
||||
<arguments>
|
||||
</arguments>
|
||||
</buildCommand>
|
||||
</buildSpec>
|
||||
<natures>
|
||||
<nature>org.eclipse.buildship.core.gradleprojectnature</nature>
|
||||
</natures>
|
||||
</projectDescription>
|
13
package/android/.settings/org.eclipse.buildship.core.prefs
Normal file
13
package/android/.settings/org.eclipse.buildship.core.prefs
Normal file
@@ -0,0 +1,13 @@
|
||||
arguments=
|
||||
auto.sync=false
|
||||
build.scans.enabled=false
|
||||
connection.gradle.distribution=GRADLE_DISTRIBUTION(VERSION(6.0))
|
||||
connection.project.dir=
|
||||
eclipse.preferences.version=1
|
||||
gradle.user.home=
|
||||
java.home=/Library/Java/JavaVirtualMachines/jdk1.8.0_144.jdk/Contents/Home
|
||||
jvm.arguments=
|
||||
offline.mode=false
|
||||
override.workspace.settings=true
|
||||
show.console.view=true
|
||||
show.executions.view=true
|
73
package/android/CMakeLists.txt
Normal file
73
package/android/CMakeLists.txt
Normal file
@@ -0,0 +1,73 @@
|
||||
project(VisionCamera)
|
||||
cmake_minimum_required(VERSION 3.9.0)
|
||||
|
||||
set(CMAKE_VERBOSE_MAKEFILE ON)
|
||||
set(PACKAGE_NAME "VisionCamera")
|
||||
set(BUILD_DIR ${CMAKE_SOURCE_DIR}/build)
|
||||
set(CMAKE_VERBOSE_MAKEFILE ON)
|
||||
set(CMAKE_CXX_STANDARD 17)
|
||||
|
||||
# Third party libraries (Prefabs)
|
||||
find_package(ReactAndroid REQUIRED CONFIG)
|
||||
find_package(fbjni REQUIRED CONFIG)
|
||||
find_library(LOG_LIB log)
|
||||
|
||||
add_definitions(-DVISION_CAMERA_ENABLE_FRAME_PROCESSORS=${ENABLE_FRAME_PROCESSORS})
|
||||
|
||||
|
||||
# Add react-native-vision-camera sources
|
||||
add_library(
|
||||
${PACKAGE_NAME}
|
||||
SHARED
|
||||
../cpp/JSITypedArray.cpp
|
||||
src/main/cpp/VisionCamera.cpp
|
||||
src/main/cpp/VideoPipeline.cpp
|
||||
src/main/cpp/PassThroughShader.cpp
|
||||
src/main/cpp/OpenGLContext.cpp
|
||||
src/main/cpp/OpenGLRenderer.cpp
|
||||
# Frame Processor
|
||||
src/main/cpp/frameprocessor/FrameHostObject.cpp
|
||||
src/main/cpp/frameprocessor/FrameProcessorPluginHostObject.cpp
|
||||
src/main/cpp/frameprocessor/JSIJNIConversion.cpp
|
||||
src/main/cpp/frameprocessor/VisionCameraProxy.cpp
|
||||
src/main/cpp/frameprocessor/java-bindings/JFrame.cpp
|
||||
src/main/cpp/frameprocessor/java-bindings/JFrameProcessor.cpp
|
||||
src/main/cpp/frameprocessor/java-bindings/JFrameProcessorPlugin.cpp
|
||||
src/main/cpp/frameprocessor/java-bindings/JVisionCameraProxy.cpp
|
||||
src/main/cpp/frameprocessor/java-bindings/JVisionCameraScheduler.cpp
|
||||
)
|
||||
|
||||
# Header Search Paths (includes)
|
||||
target_include_directories(
|
||||
${PACKAGE_NAME}
|
||||
PRIVATE
|
||||
"../cpp"
|
||||
"src/main/cpp"
|
||||
"src/main/cpp/frameprocessor"
|
||||
"src/main/cpp/frameprocessor/java-bindings"
|
||||
"${NODE_MODULES_DIR}/react-native/ReactCommon"
|
||||
"${NODE_MODULES_DIR}/react-native/ReactCommon/callinvoker"
|
||||
"${NODE_MODULES_DIR}/react-native/ReactAndroid/src/main/jni/react/turbomodule" # <-- CallInvokerHolder JNI wrapper
|
||||
)
|
||||
|
||||
# Link everything together
|
||||
target_link_libraries(
|
||||
${PACKAGE_NAME}
|
||||
${LOG_LIB} # <-- Logcat logger
|
||||
android # <-- Android JNI core
|
||||
ReactAndroid::jsi # <-- RN: JSI
|
||||
ReactAndroid::reactnativejni # <-- RN: React Native JNI bindings
|
||||
fbjni::fbjni # <-- fbjni
|
||||
GLESv2 # <-- OpenGL (for VideoPipeline)
|
||||
EGL # <-- OpenGL (EGL) (for VideoPipeline)
|
||||
)
|
||||
|
||||
# Optionally also add Frame Processors here
|
||||
if(ENABLE_FRAME_PROCESSORS)
|
||||
find_package(react-native-worklets-core REQUIRED CONFIG)
|
||||
target_link_libraries(
|
||||
${PACKAGE_NAME}
|
||||
react-native-worklets-core::rnworklets
|
||||
)
|
||||
message("VisionCamera: Frame Processors enabled!")
|
||||
endif()
|
26
package/android/README.md
Normal file
26
package/android/README.md
Normal file
@@ -0,0 +1,26 @@
|
||||
# android
|
||||
|
||||
This folder contains the Android-platform-specific code for react-native-vision-camera.
|
||||
|
||||
## Prerequesites
|
||||
|
||||
1. Install ktlint
|
||||
```sh
|
||||
brew install ktlint
|
||||
```
|
||||
|
||||
## Getting Started
|
||||
|
||||
It is recommended that you work on the code using the Example project (`example/android/`), since that always includes the React Native header files, plus you can easily test changes that way.
|
||||
|
||||
You can however still edit the library project here by opening this folder with Android Studio.
|
||||
|
||||
## Committing
|
||||
|
||||
Before committing, make sure that you're not violating the Kotlin codestyles. To do that, run the following command:
|
||||
|
||||
```bash
|
||||
yarn check-android
|
||||
```
|
||||
|
||||
This will also try to automatically fix any errors by re-formatting the Kotlin code.
|
167
package/android/build.gradle
Normal file
167
package/android/build.gradle
Normal file
@@ -0,0 +1,167 @@
|
||||
import java.nio.file.Paths
|
||||
|
||||
buildscript {
|
||||
def kotlin_version = rootProject.ext.has('kotlinVersion') ? rootProject.ext.get('kotlinVersion') : project.properties['VisionCamera_kotlinVersion']
|
||||
|
||||
repositories {
|
||||
maven {
|
||||
url "https://plugins.gradle.org/m2/"
|
||||
}
|
||||
mavenCentral()
|
||||
google()
|
||||
}
|
||||
|
||||
dependencies {
|
||||
classpath "com.android.tools.build:gradle:7.4.2"
|
||||
classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version"
|
||||
classpath "org.jetbrains.kotlin:kotlin-android-extensions:$kotlin_version"
|
||||
}
|
||||
}
|
||||
|
||||
def resolveBuildType() {
|
||||
Gradle gradle = getGradle()
|
||||
String tskReqStr = gradle.getStartParameter().getTaskRequests()['args'].toString()
|
||||
|
||||
return tskReqStr.contains('Release') ? 'release' : 'debug'
|
||||
}
|
||||
|
||||
def isNewArchitectureEnabled() {
|
||||
// To opt-in for the New Architecture, you can either:
|
||||
// - Set `newArchEnabled` to true inside the `gradle.properties` file
|
||||
// - Invoke gradle with `-newArchEnabled=true`
|
||||
// - Set an environment variable `ORG_GRADLE_PROJECT_newArchEnabled=true`
|
||||
return project.hasProperty("newArchEnabled") && project.newArchEnabled == "true"
|
||||
}
|
||||
|
||||
if (isNewArchitectureEnabled()) {
|
||||
apply plugin: 'com.facebook.react'
|
||||
}
|
||||
apply plugin: 'com.android.library'
|
||||
apply plugin: 'kotlin-android'
|
||||
|
||||
def safeExtGet(prop, fallback) {
|
||||
rootProject.ext.has(prop) ? rootProject.ext.get(prop) : fallback
|
||||
}
|
||||
|
||||
def reactNativeArchitectures() {
|
||||
def value = project.getProperties().get("reactNativeArchitectures")
|
||||
return value ? value.split(",") : ["armeabi-v7a", "x86", "x86_64", "arm64-v8a"]
|
||||
}
|
||||
|
||||
static def findNodeModules(baseDir) {
|
||||
def basePath = baseDir.toPath().normalize()
|
||||
// Node's module resolution algorithm searches up to the root directory,
|
||||
// after which the base path will be null
|
||||
while (basePath) {
|
||||
def nodeModulesPath = Paths.get(basePath.toString(), "node_modules")
|
||||
def reactNativePath = Paths.get(nodeModulesPath.toString(), "react-native")
|
||||
if (nodeModulesPath.toFile().exists() && reactNativePath.toFile().exists()) {
|
||||
return nodeModulesPath.toString()
|
||||
}
|
||||
basePath = basePath.getParent()
|
||||
}
|
||||
throw new GradleException("react-native-vision-camera: Failed to find node_modules/ path!")
|
||||
}
|
||||
|
||||
def nodeModules = findNodeModules(projectDir)
|
||||
|
||||
def hasWorklets = !safeExtGet("VisionCamera_disableFrameProcessors", false) && findProject(":react-native-worklets-core") != null
|
||||
logger.warn("[VisionCamera] react-native-worklets-core ${hasWorklets ? "found" : "not found"}, Frame Processors ${hasWorklets ? "enabled" : "disabled"}!")
|
||||
|
||||
repositories {
|
||||
google()
|
||||
mavenCentral()
|
||||
}
|
||||
|
||||
android {
|
||||
namespace "com.mrousavy.camera.example"
|
||||
|
||||
// Used to override the NDK path/version on internal CI or by allowing
|
||||
// users to customize the NDK path/version from their root project (e.g. for M1 support)
|
||||
if (rootProject.hasProperty("ndkPath")) {
|
||||
ndkPath rootProject.ext.ndkPath
|
||||
}
|
||||
if (rootProject.hasProperty("ndkVersion")) {
|
||||
ndkVersion rootProject.ext.ndkVersion
|
||||
}
|
||||
|
||||
buildFeatures {
|
||||
prefab true
|
||||
}
|
||||
|
||||
defaultConfig {
|
||||
minSdkVersion safeExtGet('minSdkVersion', 26)
|
||||
compileSdkVersion safeExtGet('compileSdkVersion', 33)
|
||||
targetSdkVersion safeExtGet('targetSdkVersion', 33)
|
||||
versionCode 1
|
||||
versionName "1.0"
|
||||
buildConfigField "boolean", "IS_NEW_ARCHITECTURE_ENABLED", isNewArchitectureEnabled().toString()
|
||||
|
||||
externalNativeBuild {
|
||||
cmake {
|
||||
cppFlags "-O2 -frtti -fexceptions -Wall -Wno-unused-variable -fstack-protector-all"
|
||||
arguments "-DANDROID_STL=c++_shared",
|
||||
"-DNODE_MODULES_DIR=${nodeModules}",
|
||||
"-DENABLE_FRAME_PROCESSORS=${hasWorklets}"
|
||||
abiFilters (*reactNativeArchitectures())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
compileOptions {
|
||||
sourceCompatibility JavaVersion.VERSION_1_8
|
||||
targetCompatibility JavaVersion.VERSION_1_8
|
||||
}
|
||||
|
||||
externalNativeBuild {
|
||||
cmake {
|
||||
path "CMakeLists.txt"
|
||||
}
|
||||
}
|
||||
packagingOptions {
|
||||
doNotStrip resolveBuildType() == 'debug' ? "**/**/*.so" : ''
|
||||
excludes = [
|
||||
"META-INF",
|
||||
"META-INF/**",
|
||||
"**/libc++_shared.so",
|
||||
"**/libfbjni.so",
|
||||
"**/libjsi.so",
|
||||
"**/libfolly_json.so",
|
||||
"**/libfolly_runtime.so",
|
||||
"**/libglog.so",
|
||||
"**/libhermes.so",
|
||||
"**/libhermes-executor-debug.so",
|
||||
"**/libhermes_executor.so",
|
||||
"**/libreactnativejni.so",
|
||||
"**/libturbomodulejsijni.so",
|
||||
"**/libreact_nativemodule_core.so",
|
||||
"**/libjscexecutor.so"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
dependencies {
|
||||
//noinspection GradleDynamicVersion
|
||||
implementation 'com.facebook.react:react-android:+'
|
||||
implementation "org.jetbrains.kotlinx:kotlinx-coroutines-android:1.5.2"
|
||||
|
||||
if (hasWorklets) {
|
||||
// Frame Processor integration (optional)
|
||||
implementation project(":react-native-worklets-core")
|
||||
}
|
||||
}
|
||||
|
||||
// Resolves "LOCAL_SRC_FILES points to a missing file, Check that libfb.so exists or that its path is correct".
|
||||
tasks.configureEach { task ->
|
||||
if (task.name.contains("configureCMakeDebug")) {
|
||||
rootProject.getTasksByName("packageReactNdkDebugLibs", true).forEach {
|
||||
task.dependsOn(it)
|
||||
}
|
||||
}
|
||||
// We want to add a dependency for both configureCMakeRelease and configureCMakeRelWithDebInfo
|
||||
if (task.name.contains("configureCMakeRel")) {
|
||||
rootProject.getTasksByName("packageReactNdkReleaseLibs", true).forEach {
|
||||
task.dependsOn(it)
|
||||
}
|
||||
}
|
||||
}
|
19
package/android/gradle.properties
Normal file
19
package/android/gradle.properties
Normal file
@@ -0,0 +1,19 @@
|
||||
## For more details on how to configure your build environment visit
|
||||
# http://www.gradle.org/docs/current/userguide/build_environment.html
|
||||
#
|
||||
# Specifies the JVM arguments used for the daemon process.
|
||||
# The setting is particularly useful for tweaking memory settings.
|
||||
# Default value: -Xmx1024m -XX:MaxPermSize=256m
|
||||
org.gradle.jvmargs=-Xms512M -Xmx4g -XX:MaxPermSize=1024m -XX:MaxMetaspaceSize=1g -Dkotlin.daemon.jvm.options="-Xmx1g"
|
||||
org.gradle.parallel=true
|
||||
org.gradle.daemon=true
|
||||
org.gradle.configureondemand=true
|
||||
#
|
||||
# When configured, Gradle will run in incubating parallel mode.
|
||||
# This option should only be used with decoupled projects. More details, visit
|
||||
# http://www.gradle.org/docs/current/userguide/multi_project_builds.html#sec:decoupled_projects
|
||||
# org.gradle.parallel=true
|
||||
#Fri Feb 19 20:46:14 CET 2021
|
||||
VisionCamera_kotlinVersion=1.7.20
|
||||
android.enableJetifier=true
|
||||
android.useAndroidX=true
|
BIN
package/android/gradle/wrapper/gradle-wrapper.jar
vendored
Normal file
BIN
package/android/gradle/wrapper/gradle-wrapper.jar
vendored
Normal file
Binary file not shown.
5
package/android/gradle/wrapper/gradle-wrapper.properties
vendored
Normal file
5
package/android/gradle/wrapper/gradle-wrapper.properties
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
distributionBase=GRADLE_USER_HOME
|
||||
distributionPath=wrapper/dists
|
||||
distributionUrl=https\://services.gradle.org/distributions/gradle-7.5.1-all.zip
|
||||
zipStoreBase=GRADLE_USER_HOME
|
||||
zipStorePath=wrapper/dists
|
234
package/android/gradlew
vendored
Executable file
234
package/android/gradlew
vendored
Executable file
@@ -0,0 +1,234 @@
|
||||
#!/bin/sh
|
||||
|
||||
#
|
||||
# Copyright © 2015-2021 the original authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# https://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
##############################################################################
|
||||
#
|
||||
# Gradle start up script for POSIX generated by Gradle.
|
||||
#
|
||||
# Important for running:
|
||||
#
|
||||
# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is
|
||||
# noncompliant, but you have some other compliant shell such as ksh or
|
||||
# bash, then to run this script, type that shell name before the whole
|
||||
# command line, like:
|
||||
#
|
||||
# ksh Gradle
|
||||
#
|
||||
# Busybox and similar reduced shells will NOT work, because this script
|
||||
# requires all of these POSIX shell features:
|
||||
# * functions;
|
||||
# * expansions «$var», «${var}», «${var:-default}», «${var+SET}»,
|
||||
# «${var#prefix}», «${var%suffix}», and «$( cmd )»;
|
||||
# * compound commands having a testable exit status, especially «case»;
|
||||
# * various built-in commands including «command», «set», and «ulimit».
|
||||
#
|
||||
# Important for patching:
|
||||
#
|
||||
# (2) This script targets any POSIX shell, so it avoids extensions provided
|
||||
# by Bash, Ksh, etc; in particular arrays are avoided.
|
||||
#
|
||||
# The "traditional" practice of packing multiple parameters into a
|
||||
# space-separated string is a well documented source of bugs and security
|
||||
# problems, so this is (mostly) avoided, by progressively accumulating
|
||||
# options in "$@", and eventually passing that to Java.
|
||||
#
|
||||
# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS,
|
||||
# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly;
|
||||
# see the in-line comments for details.
|
||||
#
|
||||
# There are tweaks for specific operating systems such as AIX, CygWin,
|
||||
# Darwin, MinGW, and NonStop.
|
||||
#
|
||||
# (3) This script is generated from the Groovy template
|
||||
# https://github.com/gradle/gradle/blob/master/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt
|
||||
# within the Gradle project.
|
||||
#
|
||||
# You can find Gradle at https://github.com/gradle/gradle/.
|
||||
#
|
||||
##############################################################################
|
||||
|
||||
# Attempt to set APP_HOME
|
||||
|
||||
# Resolve links: $0 may be a link
|
||||
app_path=$0
|
||||
|
||||
# Need this for daisy-chained symlinks.
|
||||
while
|
||||
APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path
|
||||
[ -h "$app_path" ]
|
||||
do
|
||||
ls=$( ls -ld "$app_path" )
|
||||
link=${ls#*' -> '}
|
||||
case $link in #(
|
||||
/*) app_path=$link ;; #(
|
||||
*) app_path=$APP_HOME$link ;;
|
||||
esac
|
||||
done
|
||||
|
||||
APP_HOME=$( cd "${APP_HOME:-./}" && pwd -P ) || exit
|
||||
|
||||
APP_NAME="Gradle"
|
||||
APP_BASE_NAME=${0##*/}
|
||||
|
||||
# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
|
||||
DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"'
|
||||
|
||||
# Use the maximum available, or set MAX_FD != -1 to use that value.
|
||||
MAX_FD=maximum
|
||||
|
||||
warn () {
|
||||
echo "$*"
|
||||
} >&2
|
||||
|
||||
die () {
|
||||
echo
|
||||
echo "$*"
|
||||
echo
|
||||
exit 1
|
||||
} >&2
|
||||
|
||||
# OS specific support (must be 'true' or 'false').
|
||||
cygwin=false
|
||||
msys=false
|
||||
darwin=false
|
||||
nonstop=false
|
||||
case "$( uname )" in #(
|
||||
CYGWIN* ) cygwin=true ;; #(
|
||||
Darwin* ) darwin=true ;; #(
|
||||
MSYS* | MINGW* ) msys=true ;; #(
|
||||
NONSTOP* ) nonstop=true ;;
|
||||
esac
|
||||
|
||||
CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
|
||||
|
||||
|
||||
# Determine the Java command to use to start the JVM.
|
||||
if [ -n "$JAVA_HOME" ] ; then
|
||||
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
|
||||
# IBM's JDK on AIX uses strange locations for the executables
|
||||
JAVACMD=$JAVA_HOME/jre/sh/java
|
||||
else
|
||||
JAVACMD=$JAVA_HOME/bin/java
|
||||
fi
|
||||
if [ ! -x "$JAVACMD" ] ; then
|
||||
die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
|
||||
|
||||
Please set the JAVA_HOME variable in your environment to match the
|
||||
location of your Java installation."
|
||||
fi
|
||||
else
|
||||
JAVACMD=java
|
||||
which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
|
||||
|
||||
Please set the JAVA_HOME variable in your environment to match the
|
||||
location of your Java installation."
|
||||
fi
|
||||
|
||||
# Increase the maximum file descriptors if we can.
|
||||
if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then
|
||||
case $MAX_FD in #(
|
||||
max*)
|
||||
MAX_FD=$( ulimit -H -n ) ||
|
||||
warn "Could not query maximum file descriptor limit"
|
||||
esac
|
||||
case $MAX_FD in #(
|
||||
'' | soft) :;; #(
|
||||
*)
|
||||
ulimit -n "$MAX_FD" ||
|
||||
warn "Could not set maximum file descriptor limit to $MAX_FD"
|
||||
esac
|
||||
fi
|
||||
|
||||
# Collect all arguments for the java command, stacking in reverse order:
|
||||
# * args from the command line
|
||||
# * the main class name
|
||||
# * -classpath
|
||||
# * -D...appname settings
|
||||
# * --module-path (only if needed)
|
||||
# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables.
|
||||
|
||||
# For Cygwin or MSYS, switch paths to Windows format before running java
|
||||
if "$cygwin" || "$msys" ; then
|
||||
APP_HOME=$( cygpath --path --mixed "$APP_HOME" )
|
||||
CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" )
|
||||
|
||||
JAVACMD=$( cygpath --unix "$JAVACMD" )
|
||||
|
||||
# Now convert the arguments - kludge to limit ourselves to /bin/sh
|
||||
for arg do
|
||||
if
|
||||
case $arg in #(
|
||||
-*) false ;; # don't mess with options #(
|
||||
/?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath
|
||||
[ -e "$t" ] ;; #(
|
||||
*) false ;;
|
||||
esac
|
||||
then
|
||||
arg=$( cygpath --path --ignore --mixed "$arg" )
|
||||
fi
|
||||
# Roll the args list around exactly as many times as the number of
|
||||
# args, so each arg winds up back in the position where it started, but
|
||||
# possibly modified.
|
||||
#
|
||||
# NB: a `for` loop captures its iteration list before it begins, so
|
||||
# changing the positional parameters here affects neither the number of
|
||||
# iterations, nor the values presented in `arg`.
|
||||
shift # remove old arg
|
||||
set -- "$@" "$arg" # push replacement arg
|
||||
done
|
||||
fi
|
||||
|
||||
# Collect all arguments for the java command;
|
||||
# * $DEFAULT_JVM_OPTS, $JAVA_OPTS, and $GRADLE_OPTS can contain fragments of
|
||||
# shell script including quotes and variable substitutions, so put them in
|
||||
# double quotes to make sure that they get re-expanded; and
|
||||
# * put everything else in single quotes, so that it's not re-expanded.
|
||||
|
||||
set -- \
|
||||
"-Dorg.gradle.appname=$APP_BASE_NAME" \
|
||||
-classpath "$CLASSPATH" \
|
||||
org.gradle.wrapper.GradleWrapperMain \
|
||||
"$@"
|
||||
|
||||
# Use "xargs" to parse quoted args.
|
||||
#
|
||||
# With -n1 it outputs one arg per line, with the quotes and backslashes removed.
|
||||
#
|
||||
# In Bash we could simply go:
|
||||
#
|
||||
# readarray ARGS < <( xargs -n1 <<<"$var" ) &&
|
||||
# set -- "${ARGS[@]}" "$@"
|
||||
#
|
||||
# but POSIX shell has neither arrays nor command substitution, so instead we
|
||||
# post-process each arg (as a line of input to sed) to backslash-escape any
|
||||
# character that might be a shell metacharacter, then use eval to reverse
|
||||
# that process (while maintaining the separation between arguments), and wrap
|
||||
# the whole thing up as a single "set" statement.
|
||||
#
|
||||
# This will of course break if any of these variables contains a newline or
|
||||
# an unmatched quote.
|
||||
#
|
||||
|
||||
eval "set -- $(
|
||||
printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" |
|
||||
xargs -n1 |
|
||||
sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' |
|
||||
tr '\n' ' '
|
||||
)" '"$@"'
|
||||
|
||||
exec "$JAVACMD" "$@"
|
89
package/android/gradlew.bat
vendored
Normal file
89
package/android/gradlew.bat
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
@rem
|
||||
@rem Copyright 2015 the original author or authors.
|
||||
@rem
|
||||
@rem Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@rem you may not use this file except in compliance with the License.
|
||||
@rem You may obtain a copy of the License at
|
||||
@rem
|
||||
@rem https://www.apache.org/licenses/LICENSE-2.0
|
||||
@rem
|
||||
@rem Unless required by applicable law or agreed to in writing, software
|
||||
@rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@rem See the License for the specific language governing permissions and
|
||||
@rem limitations under the License.
|
||||
@rem
|
||||
|
||||
@if "%DEBUG%" == "" @echo off
|
||||
@rem ##########################################################################
|
||||
@rem
|
||||
@rem Gradle startup script for Windows
|
||||
@rem
|
||||
@rem ##########################################################################
|
||||
|
||||
@rem Set local scope for the variables with windows NT shell
|
||||
if "%OS%"=="Windows_NT" setlocal
|
||||
|
||||
set DIRNAME=%~dp0
|
||||
if "%DIRNAME%" == "" set DIRNAME=.
|
||||
set APP_BASE_NAME=%~n0
|
||||
set APP_HOME=%DIRNAME%
|
||||
|
||||
@rem Resolve any "." and ".." in APP_HOME to make it shorter.
|
||||
for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi
|
||||
|
||||
@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
|
||||
set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m"
|
||||
|
||||
@rem Find java.exe
|
||||
if defined JAVA_HOME goto findJavaFromJavaHome
|
||||
|
||||
set JAVA_EXE=java.exe
|
||||
%JAVA_EXE% -version >NUL 2>&1
|
||||
if "%ERRORLEVEL%" == "0" goto execute
|
||||
|
||||
echo.
|
||||
echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
|
||||
echo.
|
||||
echo Please set the JAVA_HOME variable in your environment to match the
|
||||
echo location of your Java installation.
|
||||
|
||||
goto fail
|
||||
|
||||
:findJavaFromJavaHome
|
||||
set JAVA_HOME=%JAVA_HOME:"=%
|
||||
set JAVA_EXE=%JAVA_HOME%/bin/java.exe
|
||||
|
||||
if exist "%JAVA_EXE%" goto execute
|
||||
|
||||
echo.
|
||||
echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
|
||||
echo.
|
||||
echo Please set the JAVA_HOME variable in your environment to match the
|
||||
echo location of your Java installation.
|
||||
|
||||
goto fail
|
||||
|
||||
:execute
|
||||
@rem Setup the command line
|
||||
|
||||
set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
|
||||
|
||||
|
||||
@rem Execute Gradle
|
||||
"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %*
|
||||
|
||||
:end
|
||||
@rem End local scope for the variables with windows NT shell
|
||||
if "%ERRORLEVEL%"=="0" goto mainEnd
|
||||
|
||||
:fail
|
||||
rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
|
||||
rem the _cmd.exe /c_ return code!
|
||||
if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
|
||||
exit /b 1
|
||||
|
||||
:mainEnd
|
||||
if "%OS%"=="Windows_NT" endlocal
|
||||
|
||||
:omega
|
3
package/android/settings.gradle
Normal file
3
package/android/settings.gradle
Normal file
@@ -0,0 +1,3 @@
|
||||
rootProject.name = 'VisionCamera'
|
||||
|
||||
include ':VisionCamera'
|
4
package/android/src/main/AndroidManifest.xml
Normal file
4
package/android/src/main/AndroidManifest.xml
Normal file
@@ -0,0 +1,4 @@
|
||||
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
|
||||
package="com.mrousavy.camera">
|
||||
|
||||
</manifest>
|
163
package/android/src/main/cpp/OpenGLContext.cpp
Normal file
163
package/android/src/main/cpp/OpenGLContext.cpp
Normal file
@@ -0,0 +1,163 @@
|
||||
//
|
||||
// Created by Marc Rousavy on 29.08.23.
|
||||
//
|
||||
|
||||
#include "OpenGLContext.h"
|
||||
|
||||
#include <EGL/egl.h>
|
||||
#include <GLES2/gl2.h>
|
||||
#include <GLES2/gl2ext.h>
|
||||
|
||||
#include <android/log.h>
|
||||
#include <android/native_window.h>
|
||||
|
||||
#include "OpenGLError.h"
|
||||
|
||||
namespace vision {
|
||||
|
||||
std::shared_ptr<OpenGLContext> OpenGLContext::CreateWithOffscreenSurface() {
|
||||
return std::unique_ptr<OpenGLContext>(new OpenGLContext());
|
||||
}
|
||||
|
||||
OpenGLContext::~OpenGLContext() {
|
||||
destroy();
|
||||
}
|
||||
|
||||
void OpenGLContext::destroy() {
|
||||
if (display != EGL_NO_DISPLAY) {
|
||||
eglMakeCurrent(display, offscreenSurface, offscreenSurface, context);
|
||||
if (offscreenSurface != EGL_NO_SURFACE) {
|
||||
__android_log_print(ANDROID_LOG_INFO, TAG, "Destroying OpenGL Surface...");
|
||||
eglDestroySurface(display, offscreenSurface);
|
||||
offscreenSurface = EGL_NO_SURFACE;
|
||||
}
|
||||
if (context != EGL_NO_CONTEXT) {
|
||||
__android_log_print(ANDROID_LOG_INFO, TAG, "Destroying OpenGL Context...");
|
||||
eglDestroyContext(display, context);
|
||||
context = EGL_NO_CONTEXT;
|
||||
}
|
||||
__android_log_print(ANDROID_LOG_INFO, TAG, "Destroying OpenGL Display...");
|
||||
eglTerminate(display);
|
||||
display = EGL_NO_DISPLAY;
|
||||
config = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void OpenGLContext::ensureOpenGL() {
|
||||
bool successful;
|
||||
// EGLDisplay
|
||||
if (display == EGL_NO_DISPLAY) {
|
||||
__android_log_print(ANDROID_LOG_INFO, TAG, "Initializing EGLDisplay..");
|
||||
display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
|
||||
if (display == EGL_NO_DISPLAY)
|
||||
throw OpenGLError("Failed to get default OpenGL Display!");
|
||||
|
||||
EGLint major;
|
||||
EGLint minor;
|
||||
successful = eglInitialize(display, &major, &minor);
|
||||
if (!successful)
|
||||
throw OpenGLError("Failed to initialize OpenGL!");
|
||||
}
|
||||
|
||||
// EGLConfig
|
||||
if (config == nullptr) {
|
||||
__android_log_print(ANDROID_LOG_INFO, TAG, "Initializing EGLConfig..");
|
||||
EGLint attributes[] = {EGL_RENDERABLE_TYPE,
|
||||
EGL_OPENGL_ES2_BIT,
|
||||
EGL_SURFACE_TYPE,
|
||||
EGL_WINDOW_BIT,
|
||||
EGL_RED_SIZE,
|
||||
8,
|
||||
EGL_GREEN_SIZE,
|
||||
8,
|
||||
EGL_BLUE_SIZE,
|
||||
8,
|
||||
EGL_ALPHA_SIZE,
|
||||
8,
|
||||
EGL_DEPTH_SIZE,
|
||||
0,
|
||||
EGL_STENCIL_SIZE,
|
||||
0,
|
||||
EGL_NONE};
|
||||
EGLint numConfigs;
|
||||
successful = eglChooseConfig(display, attributes, &config, 1, &numConfigs);
|
||||
if (!successful || numConfigs == 0)
|
||||
throw OpenGLError("Failed to choose OpenGL config!");
|
||||
}
|
||||
|
||||
// EGLContext
|
||||
if (context == EGL_NO_CONTEXT) {
|
||||
__android_log_print(ANDROID_LOG_INFO, TAG, "Initializing EGLContext..");
|
||||
EGLint contextAttributes[] = {EGL_CONTEXT_CLIENT_VERSION, 2, EGL_NONE};
|
||||
context = eglCreateContext(display, config, nullptr, contextAttributes);
|
||||
if (context == EGL_NO_CONTEXT)
|
||||
throw OpenGLError("Failed to create OpenGL context!");
|
||||
}
|
||||
|
||||
// EGLSurface
|
||||
if (offscreenSurface == EGL_NO_SURFACE) {
|
||||
// If we don't have a surface at all
|
||||
__android_log_print(ANDROID_LOG_INFO, TAG, "Initializing 1x1 offscreen pbuffer EGLSurface..");
|
||||
EGLint attributes[] = {EGL_WIDTH, 1, EGL_HEIGHT, 1, EGL_NONE};
|
||||
offscreenSurface = eglCreatePbufferSurface(display, config, attributes);
|
||||
if (offscreenSurface == EGL_NO_SURFACE)
|
||||
throw OpenGLError("Failed to create OpenGL Surface!");
|
||||
}
|
||||
}
|
||||
|
||||
void OpenGLContext::use() {
|
||||
this->use(offscreenSurface);
|
||||
}
|
||||
|
||||
void OpenGLContext::use(EGLSurface surface) {
|
||||
if (surface == EGL_NO_SURFACE)
|
||||
throw OpenGLError("Cannot render to a null Surface!");
|
||||
|
||||
// 1. Make sure the OpenGL context is initialized
|
||||
this->ensureOpenGL();
|
||||
|
||||
// 2. Make the OpenGL context current
|
||||
bool successful = eglMakeCurrent(display, surface, surface, context);
|
||||
if (!successful || eglGetError() != EGL_SUCCESS)
|
||||
throw OpenGLError("Failed to use current OpenGL context!");
|
||||
|
||||
// 3. Caller can now render to this surface
|
||||
}
|
||||
|
||||
void OpenGLContext::flush() const {
|
||||
bool successful = eglSwapBuffers(display, eglGetCurrentSurface(EGL_DRAW));
|
||||
if (!successful || eglGetError() != EGL_SUCCESS)
|
||||
throw OpenGLError("Failed to swap OpenGL buffers!");
|
||||
}
|
||||
|
||||
OpenGLTexture OpenGLContext::createTexture(OpenGLTexture::Type type, int width, int height) {
|
||||
// 1. Make sure the OpenGL context is initialized
|
||||
this->ensureOpenGL();
|
||||
|
||||
// 2. Make the OpenGL context current
|
||||
bool successful = eglMakeCurrent(display, offscreenSurface, offscreenSurface, context);
|
||||
if (!successful || eglGetError() != EGL_SUCCESS)
|
||||
throw OpenGLError("Failed to use current OpenGL context!");
|
||||
|
||||
GLuint textureId;
|
||||
glGenTextures(1, &textureId);
|
||||
|
||||
GLenum target;
|
||||
switch (type) {
|
||||
case OpenGLTexture::Type::ExternalOES:
|
||||
target = GL_TEXTURE_EXTERNAL_OES;
|
||||
break;
|
||||
case OpenGLTexture::Type::Texture2D:
|
||||
target = GL_TEXTURE_2D;
|
||||
break;
|
||||
default:
|
||||
throw std::runtime_error("Invalid OpenGL Texture Type!");
|
||||
}
|
||||
glBindTexture(target, textureId);
|
||||
glTexParameteri(target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
|
||||
glTexParameteri(target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
|
||||
|
||||
return {.id = textureId, .target = target, .width = width, .height = height};
|
||||
}
|
||||
|
||||
} // namespace vision
|
73
package/android/src/main/cpp/OpenGLContext.h
Normal file
73
package/android/src/main/cpp/OpenGLContext.h
Normal file
@@ -0,0 +1,73 @@
|
||||
//
|
||||
// Created by Marc Rousavy on 29.08.23.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <EGL/egl.h>
|
||||
#include <GLES2/gl2.h>
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
|
||||
#include "OpenGLTexture.h"
|
||||
#include "PassThroughShader.h"
|
||||
|
||||
namespace vision {
|
||||
|
||||
/**
|
||||
* An OpenGL Context that can be used to render to different surfaces.
|
||||
* By default, it creates an off-screen PixelBuffer surface.
|
||||
*/
|
||||
class OpenGLContext {
|
||||
public:
|
||||
/**
|
||||
* Create a new instance of the OpenGLContext that draws to an off-screen PixelBuffer surface.
|
||||
* This will not perform any OpenGL operations yet, and is therefore safe to call from any Thread.
|
||||
*/
|
||||
static std::shared_ptr<OpenGLContext> CreateWithOffscreenSurface();
|
||||
/**
|
||||
* Destroy the OpenGL Context. This needs to be called on the same thread that `use()` was called.
|
||||
*/
|
||||
~OpenGLContext();
|
||||
|
||||
/**
|
||||
* Use this OpenGL Context to render to the given EGLSurface.
|
||||
* After the `renderFunc` returns, the default offscreen PixelBuffer surface becomes active again.
|
||||
*/
|
||||
void use(EGLSurface surface);
|
||||
|
||||
/**
|
||||
* Use this OpenGL Context to render to the offscreen PixelBuffer surface.
|
||||
*/
|
||||
void use();
|
||||
|
||||
/**
|
||||
* Flushes all drawing operations by swapping the buffers and submitting the Frame to the GPU
|
||||
*/
|
||||
void flush() const;
|
||||
|
||||
/**
|
||||
* Create a new texture on this context
|
||||
*/
|
||||
OpenGLTexture createTexture(OpenGLTexture::Type type, int width, int height);
|
||||
|
||||
public:
|
||||
EGLDisplay display = EGL_NO_DISPLAY;
|
||||
EGLContext context = EGL_NO_CONTEXT;
|
||||
EGLSurface offscreenSurface = EGL_NO_SURFACE;
|
||||
EGLConfig config = nullptr;
|
||||
|
||||
private:
|
||||
OpenGLContext() = default;
|
||||
void destroy();
|
||||
void ensureOpenGL();
|
||||
|
||||
private:
|
||||
PassThroughShader _passThroughShader;
|
||||
|
||||
private:
|
||||
static constexpr auto TAG = "OpenGLContext";
|
||||
};
|
||||
|
||||
} // namespace vision
|
35
package/android/src/main/cpp/OpenGLError.h
Normal file
35
package/android/src/main/cpp/OpenGLError.h
Normal file
@@ -0,0 +1,35 @@
|
||||
//
|
||||
// Created by Marc Rousavy on 09.08.23.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <GLES2/gl2.h>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
|
||||
namespace vision {
|
||||
|
||||
inline std::string getEglErrorIfAny() {
|
||||
EGLint error = glGetError();
|
||||
if (error != GL_NO_ERROR)
|
||||
return " Error: " + std::to_string(error);
|
||||
error = eglGetError();
|
||||
if (error != EGL_SUCCESS)
|
||||
return " Error: " + std::to_string(error);
|
||||
return "";
|
||||
}
|
||||
|
||||
class OpenGLError : public std::runtime_error {
|
||||
public:
|
||||
explicit OpenGLError(const std::string&& message)
|
||||
: std::runtime_error(message + getEglErrorIfAny()) {}
|
||||
|
||||
static inline void checkIfError(const std::string&& message) {
|
||||
auto error = getEglErrorIfAny();
|
||||
if (error.length() > 0)
|
||||
throw std::runtime_error(message + error);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace vision
|
76
package/android/src/main/cpp/OpenGLRenderer.cpp
Normal file
76
package/android/src/main/cpp/OpenGLRenderer.cpp
Normal file
@@ -0,0 +1,76 @@
|
||||
//
|
||||
// Created by Marc Rousavy on 29.08.23.
|
||||
//
|
||||
|
||||
#include "OpenGLRenderer.h"
|
||||
|
||||
#include <EGL/egl.h>
|
||||
#include <GLES2/gl2.h>
|
||||
#include <GLES2/gl2ext.h>
|
||||
|
||||
#include <android/log.h>
|
||||
#include <android/native_window.h>
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "OpenGLError.h"
|
||||
|
||||
namespace vision {
|
||||
|
||||
std::unique_ptr<OpenGLRenderer>
|
||||
OpenGLRenderer::CreateWithWindowSurface(std::shared_ptr<OpenGLContext> context,
|
||||
ANativeWindow* surface) {
|
||||
return std::unique_ptr<OpenGLRenderer>(new OpenGLRenderer(std::move(context), surface));
|
||||
}
|
||||
|
||||
OpenGLRenderer::OpenGLRenderer(std::shared_ptr<OpenGLContext> context, ANativeWindow* surface) {
|
||||
_context = std::move(context);
|
||||
_outputSurface = surface;
|
||||
_width = ANativeWindow_getWidth(surface);
|
||||
_height = ANativeWindow_getHeight(surface);
|
||||
}
|
||||
|
||||
OpenGLRenderer::~OpenGLRenderer() {
|
||||
if (_outputSurface != nullptr) {
|
||||
ANativeWindow_release(_outputSurface);
|
||||
}
|
||||
destroy();
|
||||
}
|
||||
|
||||
void OpenGLRenderer::destroy() {
|
||||
if (_context != nullptr && _surface != EGL_NO_DISPLAY) {
|
||||
__android_log_print(ANDROID_LOG_INFO, TAG, "Destroying OpenGL Surface...");
|
||||
eglDestroySurface(_context->display, _surface);
|
||||
_surface = EGL_NO_SURFACE;
|
||||
}
|
||||
}
|
||||
|
||||
void OpenGLRenderer::renderTextureToSurface(const OpenGLTexture& texture, float* transformMatrix) {
|
||||
if (_surface == EGL_NO_SURFACE) {
|
||||
__android_log_print(ANDROID_LOG_INFO, TAG, "Creating Window Surface...");
|
||||
_context->use();
|
||||
_surface = eglCreateWindowSurface(_context->display, _context->config, _outputSurface, nullptr);
|
||||
}
|
||||
|
||||
// 1. Activate the OpenGL context for this surface
|
||||
_context->use(_surface);
|
||||
|
||||
// 2. Set the viewport for rendering
|
||||
glViewport(0, 0, _width, _height);
|
||||
glDisable(GL_BLEND);
|
||||
|
||||
// 3. Bind the input texture
|
||||
glBindTexture(texture.target, texture.id);
|
||||
glTexParameteri(texture.target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
|
||||
glTexParameteri(texture.target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
|
||||
glTexParameteri(texture.target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
|
||||
glTexParameteri(texture.target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
|
||||
|
||||
// 4. Draw it using the pass-through shader which also applies transforms
|
||||
_passThroughShader.draw(texture, transformMatrix);
|
||||
|
||||
// 5. Swap buffers to pass it to the window surface
|
||||
eglSwapBuffers(_context->display, _surface);
|
||||
}
|
||||
|
||||
} // namespace vision
|
62
package/android/src/main/cpp/OpenGLRenderer.h
Normal file
62
package/android/src/main/cpp/OpenGLRenderer.h
Normal file
@@ -0,0 +1,62 @@
|
||||
//
|
||||
// Created by Marc Rousavy on 29.08.23.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "PassThroughShader.h"
|
||||
#include <EGL/egl.h>
|
||||
#include <GLES2/gl2.h>
|
||||
#include <android/native_window.h>
|
||||
#include <memory>
|
||||
|
||||
#include "OpenGLContext.h"
|
||||
#include "OpenGLTexture.h"
|
||||
|
||||
namespace vision {
|
||||
|
||||
class OpenGLRenderer {
|
||||
public:
|
||||
/**
|
||||
* Create a new instance of the OpenGLRenderer that draws to an on-screen window surface.
|
||||
* This will not perform any OpenGL operations yet, and is therefore safe to call from any Thread.
|
||||
*
|
||||
* Note: The `surface` is considered moved, and the OpenGL context will release it when it is
|
||||
* being deleted.
|
||||
*/
|
||||
static std::unique_ptr<OpenGLRenderer>
|
||||
CreateWithWindowSurface(std::shared_ptr<OpenGLContext> context, ANativeWindow* surface);
|
||||
/**
|
||||
* Destroy the OpenGL Context. This needs to be called on the same thread that `use()` was called.
|
||||
*/
|
||||
~OpenGLRenderer();
|
||||
|
||||
/**
|
||||
* Renders the given Texture to the Surface
|
||||
*/
|
||||
void renderTextureToSurface(const OpenGLTexture& texture, float* transformMatrix);
|
||||
|
||||
/**
|
||||
* Destroys the OpenGL context. This needs to be called on the same thread that `use()` was
|
||||
* called. After calling `destroy()`, it is legal to call `use()` again, which will re-construct
|
||||
* everything.
|
||||
*/
|
||||
void destroy();
|
||||
|
||||
private:
|
||||
explicit OpenGLRenderer(std::shared_ptr<OpenGLContext> context, ANativeWindow* surface);
|
||||
|
||||
private:
|
||||
int _width = 0, _height = 0;
|
||||
std::shared_ptr<OpenGLContext> _context;
|
||||
ANativeWindow* _outputSurface;
|
||||
EGLSurface _surface = EGL_NO_SURFACE;
|
||||
|
||||
private:
|
||||
PassThroughShader _passThroughShader;
|
||||
|
||||
private:
|
||||
static constexpr auto TAG = "OpenGLRenderer";
|
||||
};
|
||||
|
||||
} // namespace vision
|
22
package/android/src/main/cpp/OpenGLTexture.h
Normal file
22
package/android/src/main/cpp/OpenGLTexture.h
Normal file
@@ -0,0 +1,22 @@
|
||||
//
|
||||
// Created by Marc Rousavy on 30.08.23.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <GLES2/gl2.h>
|
||||
#include <GLES2/gl2ext.h>
|
||||
#include <stdexcept>
|
||||
|
||||
struct OpenGLTexture {
|
||||
enum Type { Texture2D, ExternalOES };
|
||||
|
||||
// The ID of the texture as returned in glGenTextures(..)
|
||||
GLuint id;
|
||||
// GL_TEXTURE_2D or GL_TEXTURE_EXTERNAL_OES
|
||||
GLenum target;
|
||||
|
||||
// Width and height of the texture
|
||||
int width = 0;
|
||||
int height = 0;
|
||||
};
|
111
package/android/src/main/cpp/PassThroughShader.cpp
Normal file
111
package/android/src/main/cpp/PassThroughShader.cpp
Normal file
@@ -0,0 +1,111 @@
|
||||
//
|
||||
// Created by Marc Rousavy on 28.08.23.
|
||||
//
|
||||
|
||||
#include "PassThroughShader.h"
|
||||
#include "OpenGLError.h"
|
||||
#include <EGL/egl.h>
|
||||
#include <GLES2/gl2.h>
|
||||
#include <GLES2/gl2ext.h>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
namespace vision {
|
||||
|
||||
PassThroughShader::~PassThroughShader() {
|
||||
if (_programId != NO_SHADER) {
|
||||
glDeleteProgram(_programId);
|
||||
_programId = NO_SHADER;
|
||||
}
|
||||
|
||||
if (_vertexBuffer != NO_BUFFER) {
|
||||
glDeleteBuffers(1, &_vertexBuffer);
|
||||
_vertexBuffer = NO_BUFFER;
|
||||
}
|
||||
}
|
||||
|
||||
void PassThroughShader::draw(const OpenGLTexture& texture, float* transformMatrix) {
|
||||
// 1. Set up Shader Program
|
||||
if (_programId == NO_SHADER) {
|
||||
_programId = createProgram();
|
||||
glUseProgram(_programId);
|
||||
_vertexParameters = {
|
||||
.aPosition = glGetAttribLocation(_programId, "aPosition"),
|
||||
.aTexCoord = glGetAttribLocation(_programId, "aTexCoord"),
|
||||
.uTransformMatrix = glGetUniformLocation(_programId, "uTransformMatrix"),
|
||||
};
|
||||
_fragmentParameters = {
|
||||
.uTexture = glGetUniformLocation(_programId, "uTexture"),
|
||||
};
|
||||
}
|
||||
|
||||
glUseProgram(_programId);
|
||||
|
||||
// 2. Set up Vertices Buffer
|
||||
if (_vertexBuffer == NO_BUFFER) {
|
||||
glGenBuffers(1, &_vertexBuffer);
|
||||
glBindBuffer(GL_ARRAY_BUFFER, _vertexBuffer);
|
||||
glBufferData(GL_ARRAY_BUFFER, sizeof(VERTICES), VERTICES, GL_STATIC_DRAW);
|
||||
}
|
||||
|
||||
// 3. Pass all uniforms/attributes for vertex shader
|
||||
glEnableVertexAttribArray(_vertexParameters.aPosition);
|
||||
glVertexAttribPointer(_vertexParameters.aPosition, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex),
|
||||
reinterpret_cast<void*>(offsetof(Vertex, position)));
|
||||
|
||||
glEnableVertexAttribArray(_vertexParameters.aTexCoord);
|
||||
glVertexAttribPointer(_vertexParameters.aTexCoord, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex),
|
||||
reinterpret_cast<void*>(offsetof(Vertex, texCoord)));
|
||||
|
||||
glUniformMatrix4fv(_vertexParameters.uTransformMatrix, 1, GL_FALSE, transformMatrix);
|
||||
|
||||
// 4. Pass texture to fragment shader
|
||||
glActiveTexture(GL_TEXTURE0);
|
||||
glBindTexture(texture.target, texture.id);
|
||||
glUniform1i(_fragmentParameters.uTexture, 0);
|
||||
|
||||
// 5. Draw!
|
||||
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
|
||||
}
|
||||
|
||||
GLuint PassThroughShader::loadShader(GLenum shaderType, const char* shaderCode) {
|
||||
GLuint shader = glCreateShader(shaderType);
|
||||
if (shader == 0)
|
||||
throw OpenGLError("Failed to load shader!");
|
||||
|
||||
glShaderSource(shader, 1, &shaderCode, nullptr);
|
||||
glCompileShader(shader);
|
||||
GLint compileStatus = GL_FALSE;
|
||||
glGetShaderiv(shader, GL_COMPILE_STATUS, &compileStatus);
|
||||
if (compileStatus == GL_FALSE) {
|
||||
glDeleteShader(shader);
|
||||
throw OpenGLError("Failed to compile shader!");
|
||||
}
|
||||
return shader;
|
||||
}
|
||||
|
||||
GLuint PassThroughShader::createProgram() {
|
||||
GLuint vertexShader = loadShader(GL_VERTEX_SHADER, VERTEX_SHADER);
|
||||
GLuint fragmentShader = loadShader(GL_FRAGMENT_SHADER, FRAGMENT_SHADER);
|
||||
|
||||
GLuint program = glCreateProgram();
|
||||
if (program == 0)
|
||||
throw OpenGLError("Failed to create pass-through program!");
|
||||
|
||||
glAttachShader(program, vertexShader);
|
||||
OpenGLError::checkIfError("Failed to attach Vertex Shader!");
|
||||
|
||||
glAttachShader(program, fragmentShader);
|
||||
OpenGLError::checkIfError("Failed to attach Fragment Shader!");
|
||||
|
||||
glLinkProgram(program);
|
||||
GLint linkStatus = GL_FALSE;
|
||||
glGetProgramiv(program, GL_LINK_STATUS, &linkStatus);
|
||||
if (!linkStatus) {
|
||||
glDeleteProgram(program);
|
||||
throw OpenGLError("Failed to load pass-through program!");
|
||||
}
|
||||
return program;
|
||||
}
|
||||
|
||||
} // namespace vision
|
84
package/android/src/main/cpp/PassThroughShader.h
Normal file
84
package/android/src/main/cpp/PassThroughShader.h
Normal file
@@ -0,0 +1,84 @@
|
||||
//
|
||||
// Created by Marc Rousavy on 28.08.23.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <EGL/egl.h>
|
||||
#include <GLES2/gl2.h>
|
||||
|
||||
#include "OpenGLTexture.h"
|
||||
|
||||
namespace vision {
|
||||
|
||||
#define NO_SHADER 0
|
||||
#define NO_POSITION 0
|
||||
#define NO_BUFFER 0
|
||||
|
||||
struct Vertex {
|
||||
GLfloat position[2];
|
||||
GLfloat texCoord[2];
|
||||
};
|
||||
|
||||
class PassThroughShader {
|
||||
public:
|
||||
PassThroughShader() = default;
|
||||
~PassThroughShader();
|
||||
|
||||
/**
|
||||
* Draw the texture using this shader.
|
||||
* Note: At the moment, only EXTERNAL textures are supported by the Shader.
|
||||
*/
|
||||
void draw(const OpenGLTexture& texture, float* transformMatrix);
|
||||
|
||||
private:
|
||||
// Loading
|
||||
static GLuint loadShader(GLenum shaderType, const char* shaderCode);
|
||||
static GLuint createProgram();
|
||||
|
||||
private:
|
||||
// Parameters
|
||||
GLuint _programId = NO_SHADER;
|
||||
GLuint _vertexBuffer = NO_BUFFER;
|
||||
struct VertexParameters {
|
||||
GLint aPosition = NO_POSITION;
|
||||
GLint aTexCoord = NO_POSITION;
|
||||
GLint uTransformMatrix = NO_POSITION;
|
||||
} _vertexParameters;
|
||||
struct FragmentParameters {
|
||||
GLint uTexture = NO_POSITION;
|
||||
} _fragmentParameters;
|
||||
|
||||
private:
|
||||
// Statics
|
||||
static constexpr Vertex VERTICES[] = {
|
||||
{{-1.0f, -1.0f}, {0.0f, 0.0f}}, // bottom-left
|
||||
{{1.0f, -1.0f}, {1.0f, 0.0f}}, // bottom-right
|
||||
{{-1.0f, 1.0f}, {0.0f, 1.0f}}, // top-left
|
||||
{{1.0f, 1.0f}, {1.0f, 1.0f}} // top-right
|
||||
};
|
||||
|
||||
static constexpr char VERTEX_SHADER[] = R"(
|
||||
attribute vec4 aPosition;
|
||||
attribute vec2 aTexCoord;
|
||||
uniform mat4 uTransformMatrix;
|
||||
varying vec2 vTexCoord;
|
||||
|
||||
void main() {
|
||||
gl_Position = aPosition;
|
||||
vTexCoord = (uTransformMatrix * vec4(aTexCoord, 0.0, 1.0)).xy;
|
||||
}
|
||||
)";
|
||||
static constexpr char FRAGMENT_SHADER[] = R"(
|
||||
#extension GL_OES_EGL_image_external : require
|
||||
precision mediump float;
|
||||
varying vec2 vTexCoord;
|
||||
uniform samplerExternalOES uTexture;
|
||||
|
||||
void main() {
|
||||
gl_FragColor = texture2D(uTexture, vTexCoord);
|
||||
}
|
||||
)";
|
||||
};
|
||||
|
||||
} // namespace vision
|
125
package/android/src/main/cpp/VideoPipeline.cpp
Normal file
125
package/android/src/main/cpp/VideoPipeline.cpp
Normal file
@@ -0,0 +1,125 @@
|
||||
//
|
||||
// Created by Marc Rousavy on 25.08.23.
|
||||
//
|
||||
|
||||
#include "VideoPipeline.h"
|
||||
#include "OpenGLError.h"
|
||||
|
||||
#include <EGL/egl.h>
|
||||
#include <EGL/eglext.h>
|
||||
#include <GLES/gl.h>
|
||||
#include <GLES2/gl2.h>
|
||||
#include <GLES2/gl2ext.h>
|
||||
#include <android/native_window_jni.h>
|
||||
|
||||
#include <chrono>
|
||||
|
||||
#include "JFrameProcessor.h"
|
||||
#include "OpenGLTexture.h"
|
||||
|
||||
namespace vision {
|
||||
|
||||
jni::local_ref<VideoPipeline::jhybriddata>
|
||||
VideoPipeline::initHybrid(jni::alias_ref<jhybridobject> jThis, int width, int height) {
|
||||
return makeCxxInstance(jThis, width, height);
|
||||
}
|
||||
|
||||
VideoPipeline::VideoPipeline(jni::alias_ref<jhybridobject> jThis, int width, int height)
|
||||
: _javaPart(jni::make_global(jThis)) {
|
||||
_width = width;
|
||||
_height = height;
|
||||
_context = OpenGLContext::CreateWithOffscreenSurface();
|
||||
}
|
||||
|
||||
VideoPipeline::~VideoPipeline() {
|
||||
// 1. Remove output surfaces
|
||||
removeFrameProcessorOutputSurface();
|
||||
removeRecordingSessionOutputSurface();
|
||||
// 2. Delete the input textures
|
||||
if (_inputTexture != std::nullopt) {
|
||||
glDeleteTextures(1, &_inputTexture->id);
|
||||
_inputTexture = std::nullopt;
|
||||
}
|
||||
// 3. Destroy the OpenGL context
|
||||
_context = nullptr;
|
||||
}
|
||||
|
||||
void VideoPipeline::removeFrameProcessorOutputSurface() {
|
||||
if (_frameProcessorOutput)
|
||||
_frameProcessorOutput->destroy();
|
||||
_frameProcessorOutput = nullptr;
|
||||
}
|
||||
|
||||
void VideoPipeline::setFrameProcessorOutputSurface(jobject surface) {
|
||||
// 1. Delete existing output surface
|
||||
removeFrameProcessorOutputSurface();
|
||||
|
||||
// 2. Set new output surface if it is not null
|
||||
ANativeWindow* window = ANativeWindow_fromSurface(jni::Environment::current(), surface);
|
||||
_frameProcessorOutput = OpenGLRenderer::CreateWithWindowSurface(_context, window);
|
||||
}
|
||||
|
||||
void VideoPipeline::removeRecordingSessionOutputSurface() {
|
||||
if (_recordingSessionOutput)
|
||||
_recordingSessionOutput->destroy();
|
||||
_recordingSessionOutput = nullptr;
|
||||
}
|
||||
|
||||
void VideoPipeline::setRecordingSessionOutputSurface(jobject surface) {
|
||||
// 1. Delete existing output surface
|
||||
removeRecordingSessionOutputSurface();
|
||||
|
||||
// 2. Set new output surface if it is not null
|
||||
ANativeWindow* window = ANativeWindow_fromSurface(jni::Environment::current(), surface);
|
||||
_recordingSessionOutput = OpenGLRenderer::CreateWithWindowSurface(_context, window);
|
||||
}
|
||||
|
||||
int VideoPipeline::getInputTextureId() {
|
||||
if (_inputTexture == std::nullopt) {
|
||||
_inputTexture = _context->createTexture(OpenGLTexture::Type::ExternalOES, _width, _height);
|
||||
}
|
||||
|
||||
return static_cast<int>(_inputTexture->id);
|
||||
}
|
||||
|
||||
void VideoPipeline::onBeforeFrame() {
|
||||
_context->use();
|
||||
|
||||
glBindTexture(_inputTexture->target, _inputTexture->id);
|
||||
}
|
||||
|
||||
void VideoPipeline::onFrame(jni::alias_ref<jni::JArrayFloat> transformMatrixParam) {
|
||||
// Get the OpenGL transform Matrix (transforms, scales, rotations)
|
||||
float transformMatrix[16];
|
||||
transformMatrixParam->getRegion(0, 16, transformMatrix);
|
||||
|
||||
OpenGLTexture& texture = _inputTexture.value();
|
||||
|
||||
if (_frameProcessorOutput) {
|
||||
__android_log_print(ANDROID_LOG_INFO, TAG, "Rendering to FrameProcessor..");
|
||||
_frameProcessorOutput->renderTextureToSurface(texture, transformMatrix);
|
||||
}
|
||||
if (_recordingSessionOutput) {
|
||||
__android_log_print(ANDROID_LOG_INFO, TAG, "Rendering to RecordingSession..");
|
||||
_recordingSessionOutput->renderTextureToSurface(texture, transformMatrix);
|
||||
}
|
||||
}
|
||||
|
||||
void VideoPipeline::registerNatives() {
|
||||
registerHybrid({
|
||||
makeNativeMethod("initHybrid", VideoPipeline::initHybrid),
|
||||
makeNativeMethod("setFrameProcessorOutputSurface",
|
||||
VideoPipeline::setFrameProcessorOutputSurface),
|
||||
makeNativeMethod("removeFrameProcessorOutputSurface",
|
||||
VideoPipeline::removeFrameProcessorOutputSurface),
|
||||
makeNativeMethod("setRecordingSessionOutputSurface",
|
||||
VideoPipeline::setRecordingSessionOutputSurface),
|
||||
makeNativeMethod("removeRecordingSessionOutputSurface",
|
||||
VideoPipeline::removeRecordingSessionOutputSurface),
|
||||
makeNativeMethod("getInputTextureId", VideoPipeline::getInputTextureId),
|
||||
makeNativeMethod("onBeforeFrame", VideoPipeline::onBeforeFrame),
|
||||
makeNativeMethod("onFrame", VideoPipeline::onFrame),
|
||||
});
|
||||
}
|
||||
|
||||
} // namespace vision
|
67
package/android/src/main/cpp/VideoPipeline.h
Normal file
67
package/android/src/main/cpp/VideoPipeline.h
Normal file
@@ -0,0 +1,67 @@
|
||||
//
|
||||
// Created by Marc Rousavy on 25.08.23.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "OpenGLContext.h"
|
||||
#include "OpenGLRenderer.h"
|
||||
#include "PassThroughShader.h"
|
||||
#include <EGL/egl.h>
|
||||
#include <android/native_window.h>
|
||||
#include <fbjni/fbjni.h>
|
||||
#include <jni.h>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
|
||||
namespace vision {
|
||||
|
||||
using namespace facebook;
|
||||
|
||||
class VideoPipeline : public jni::HybridClass<VideoPipeline> {
|
||||
public:
|
||||
static auto constexpr kJavaDescriptor = "Lcom/mrousavy/camera/core/VideoPipeline;";
|
||||
static jni::local_ref<jhybriddata> initHybrid(jni::alias_ref<jhybridobject> jThis, int width,
|
||||
int height);
|
||||
static void registerNatives();
|
||||
|
||||
public:
|
||||
~VideoPipeline();
|
||||
|
||||
// -> SurfaceTexture input
|
||||
int getInputTextureId();
|
||||
|
||||
// <- Frame Processor output
|
||||
void setFrameProcessorOutputSurface(jobject surface);
|
||||
void removeFrameProcessorOutputSurface();
|
||||
|
||||
// <- MediaRecorder output
|
||||
void setRecordingSessionOutputSurface(jobject surface);
|
||||
void removeRecordingSessionOutputSurface();
|
||||
|
||||
// Frame callbacks
|
||||
void onBeforeFrame();
|
||||
void onFrame(jni::alias_ref<jni::JArrayFloat> transformMatrix);
|
||||
|
||||
private:
|
||||
// Private constructor. Use `create(..)` to create new instances.
|
||||
explicit VideoPipeline(jni::alias_ref<jhybridobject> jThis, int width, int height);
|
||||
|
||||
private:
|
||||
// Input Surface Texture
|
||||
std::optional<OpenGLTexture> _inputTexture = std::nullopt;
|
||||
int _width = 0;
|
||||
int _height = 0;
|
||||
|
||||
// Output Contexts
|
||||
std::shared_ptr<OpenGLContext> _context = nullptr;
|
||||
std::unique_ptr<OpenGLRenderer> _frameProcessorOutput = nullptr;
|
||||
std::unique_ptr<OpenGLRenderer> _recordingSessionOutput = nullptr;
|
||||
|
||||
private:
|
||||
friend HybridBase;
|
||||
jni::global_ref<javaobject> _javaPart;
|
||||
static constexpr auto TAG = "VideoPipeline";
|
||||
};
|
||||
|
||||
} // namespace vision
|
19
package/android/src/main/cpp/VisionCamera.cpp
Normal file
19
package/android/src/main/cpp/VisionCamera.cpp
Normal file
@@ -0,0 +1,19 @@
|
||||
#include "JFrameProcessor.h"
|
||||
#include "JVisionCameraProxy.h"
|
||||
#include "JVisionCameraScheduler.h"
|
||||
#include "VideoPipeline.h"
|
||||
#include "VisionCameraProxy.h"
|
||||
#include <fbjni/fbjni.h>
|
||||
#include <jni.h>
|
||||
|
||||
JNIEXPORT jint JNICALL JNI_OnLoad(JavaVM* vm, void*) {
|
||||
return facebook::jni::initialize(vm, [] {
|
||||
vision::VisionCameraInstaller::registerNatives();
|
||||
vision::JVisionCameraProxy::registerNatives();
|
||||
vision::JVisionCameraScheduler::registerNatives();
|
||||
vision::VideoPipeline::registerNatives();
|
||||
#if VISION_CAMERA_ENABLE_FRAME_PROCESSORS
|
||||
vision::JFrameProcessor::registerNatives();
|
||||
#endif
|
||||
});
|
||||
}
|
158
package/android/src/main/cpp/frameprocessor/FrameHostObject.cpp
Normal file
158
package/android/src/main/cpp/frameprocessor/FrameHostObject.cpp
Normal file
@@ -0,0 +1,158 @@
|
||||
//
|
||||
// Created by Marc on 19/06/2021.
|
||||
//
|
||||
|
||||
#include "FrameHostObject.h"
|
||||
|
||||
#include <fbjni/fbjni.h>
|
||||
#include <jni.h>
|
||||
|
||||
#include "JSITypedArray.h"
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace vision {
|
||||
|
||||
using namespace facebook;
|
||||
|
||||
FrameHostObject::FrameHostObject(const jni::alias_ref<JFrame::javaobject>& frame)
|
||||
: frame(make_global(frame)) {}
|
||||
|
||||
FrameHostObject::~FrameHostObject() {
|
||||
// Hermes' Garbage Collector (Hades GC) calls destructors on a separate Thread
|
||||
// which might not be attached to JNI. Ensure that we use the JNI class loader when
|
||||
// deallocating the `frame` HybridClass, because otherwise JNI cannot call the Java
|
||||
// destroy() function.
|
||||
jni::ThreadScope::WithClassLoader([&] { frame.reset(); });
|
||||
}
|
||||
|
||||
std::vector<jsi::PropNameID> FrameHostObject::getPropertyNames(jsi::Runtime& rt) {
|
||||
std::vector<jsi::PropNameID> result;
|
||||
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("width")));
|
||||
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("height")));
|
||||
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("bytesPerRow")));
|
||||
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("planesCount")));
|
||||
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("orientation")));
|
||||
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("isMirrored")));
|
||||
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("timestamp")));
|
||||
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("pixelFormat")));
|
||||
// Conversion
|
||||
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("toString")));
|
||||
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("toArrayBuffer")));
|
||||
// Ref Management
|
||||
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("isValid")));
|
||||
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("incrementRefCount")));
|
||||
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("decrementRefCount")));
|
||||
return result;
|
||||
}
|
||||
|
||||
jsi::Value FrameHostObject::get(jsi::Runtime& runtime, const jsi::PropNameID& propName) {
|
||||
auto name = propName.utf8(runtime);
|
||||
|
||||
if (name == "incrementRefCount") {
|
||||
jsi::HostFunctionType incrementRefCount = [=](jsi::Runtime& runtime, const jsi::Value& thisArg,
|
||||
const jsi::Value* args,
|
||||
size_t count) -> jsi::Value {
|
||||
// Increment retain count by one.
|
||||
this->frame->incrementRefCount();
|
||||
return jsi::Value::undefined();
|
||||
};
|
||||
return jsi::Function::createFromHostFunction(
|
||||
runtime, jsi::PropNameID::forUtf8(runtime, "incrementRefCount"), 0, incrementRefCount);
|
||||
}
|
||||
if (name == "decrementRefCount") {
|
||||
auto decrementRefCount = [=](jsi::Runtime& runtime, const jsi::Value& thisArg,
|
||||
const jsi::Value* args, size_t count) -> jsi::Value {
|
||||
// Decrement retain count by one. If the retain count is zero, the Frame gets closed.
|
||||
this->frame->decrementRefCount();
|
||||
return jsi::Value::undefined();
|
||||
};
|
||||
return jsi::Function::createFromHostFunction(
|
||||
runtime, jsi::PropNameID::forUtf8(runtime, "decrementRefCount"), 0, decrementRefCount);
|
||||
}
|
||||
if (name == "toString") {
|
||||
jsi::HostFunctionType toString = [=](jsi::Runtime& runtime, const jsi::Value& thisArg,
|
||||
const jsi::Value* args, size_t count) -> jsi::Value {
|
||||
if (!this->frame) {
|
||||
return jsi::String::createFromUtf8(runtime, "[closed frame]");
|
||||
}
|
||||
auto width = this->frame->getWidth();
|
||||
auto height = this->frame->getHeight();
|
||||
auto str = std::to_string(width) + " x " + std::to_string(height) + " Frame";
|
||||
return jsi::String::createFromUtf8(runtime, str);
|
||||
};
|
||||
return jsi::Function::createFromHostFunction(
|
||||
runtime, jsi::PropNameID::forUtf8(runtime, "toString"), 0, toString);
|
||||
}
|
||||
if (name == "toArrayBuffer") {
|
||||
jsi::HostFunctionType toArrayBuffer = [=](jsi::Runtime& runtime, const jsi::Value& thisArg,
|
||||
const jsi::Value* args, size_t count) -> jsi::Value {
|
||||
auto buffer = this->frame->toByteBuffer();
|
||||
if (!buffer->isDirect()) {
|
||||
throw std::runtime_error(
|
||||
"Failed to get byte content of Frame - array is not direct ByteBuffer!");
|
||||
}
|
||||
auto size = buffer->getDirectSize();
|
||||
|
||||
static constexpr auto ARRAYBUFFER_CACHE_PROP_NAME = "__frameArrayBufferCache";
|
||||
if (!runtime.global().hasProperty(runtime, ARRAYBUFFER_CACHE_PROP_NAME)) {
|
||||
vision::TypedArray<vision::TypedArrayKind::Uint8ClampedArray> arrayBuffer(runtime, size);
|
||||
runtime.global().setProperty(runtime, ARRAYBUFFER_CACHE_PROP_NAME, arrayBuffer);
|
||||
}
|
||||
|
||||
// Get from global JS cache
|
||||
auto arrayBufferCache =
|
||||
runtime.global().getPropertyAsObject(runtime, ARRAYBUFFER_CACHE_PROP_NAME);
|
||||
auto arrayBuffer = vision::getTypedArray(runtime, arrayBufferCache)
|
||||
.get<vision::TypedArrayKind::Uint8ClampedArray>(runtime);
|
||||
if (arrayBuffer.size(runtime) != size) {
|
||||
arrayBuffer = vision::TypedArray<vision::TypedArrayKind::Uint8ClampedArray>(runtime, size);
|
||||
runtime.global().setProperty(runtime, ARRAYBUFFER_CACHE_PROP_NAME, arrayBuffer);
|
||||
}
|
||||
|
||||
// directly write to C++ JSI ArrayBuffer
|
||||
auto destinationBuffer = arrayBuffer.data(runtime);
|
||||
memcpy(destinationBuffer, buffer->getDirectAddress(), sizeof(uint8_t) * size);
|
||||
|
||||
return arrayBuffer;
|
||||
};
|
||||
return jsi::Function::createFromHostFunction(
|
||||
runtime, jsi::PropNameID::forUtf8(runtime, "toArrayBuffer"), 0, toArrayBuffer);
|
||||
}
|
||||
|
||||
if (name == "isValid") {
|
||||
return jsi::Value(this->frame && this->frame->getIsValid());
|
||||
}
|
||||
if (name == "width") {
|
||||
return jsi::Value(this->frame->getWidth());
|
||||
}
|
||||
if (name == "height") {
|
||||
return jsi::Value(this->frame->getHeight());
|
||||
}
|
||||
if (name == "isMirrored") {
|
||||
return jsi::Value(this->frame->getIsMirrored());
|
||||
}
|
||||
if (name == "orientation") {
|
||||
auto string = this->frame->getOrientation();
|
||||
return jsi::String::createFromUtf8(runtime, string->toStdString());
|
||||
}
|
||||
if (name == "pixelFormat") {
|
||||
auto string = this->frame->getPixelFormat();
|
||||
return jsi::String::createFromUtf8(runtime, string->toStdString());
|
||||
}
|
||||
if (name == "timestamp") {
|
||||
return jsi::Value(static_cast<double>(this->frame->getTimestamp()));
|
||||
}
|
||||
if (name == "bytesPerRow") {
|
||||
return jsi::Value(this->frame->getBytesPerRow());
|
||||
}
|
||||
if (name == "planesCount") {
|
||||
return jsi::Value(this->frame->getPlanesCount());
|
||||
}
|
||||
|
||||
// fallback to base implementation
|
||||
return HostObject::get(runtime, propName);
|
||||
}
|
||||
|
||||
} // namespace vision
|
@@ -0,0 +1,32 @@
|
||||
//
|
||||
// Created by Marc on 19/06/2021.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <fbjni/fbjni.h>
|
||||
#include <jni.h>
|
||||
#include <jsi/jsi.h>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "JFrame.h"
|
||||
|
||||
namespace vision {
|
||||
|
||||
using namespace facebook;
|
||||
|
||||
class JSI_EXPORT FrameHostObject : public jsi::HostObject {
|
||||
public:
|
||||
explicit FrameHostObject(const jni::alias_ref<JFrame::javaobject>& frame);
|
||||
~FrameHostObject();
|
||||
|
||||
public:
|
||||
jsi::Value get(jsi::Runtime&, const jsi::PropNameID& name) override;
|
||||
std::vector<jsi::PropNameID> getPropertyNames(jsi::Runtime& rt) override;
|
||||
|
||||
public:
|
||||
jni::global_ref<JFrame> frame;
|
||||
};
|
||||
|
||||
} // namespace vision
|
@@ -0,0 +1,54 @@
|
||||
//
|
||||
// Created by Marc Rousavy on 21.07.23.
|
||||
//
|
||||
|
||||
#include "FrameProcessorPluginHostObject.h"
|
||||
#include "FrameHostObject.h"
|
||||
#include "JSIJNIConversion.h"
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace vision {
|
||||
|
||||
using namespace facebook;
|
||||
|
||||
std::vector<jsi::PropNameID>
|
||||
FrameProcessorPluginHostObject::getPropertyNames(jsi::Runtime& runtime) {
|
||||
std::vector<jsi::PropNameID> result;
|
||||
result.push_back(jsi::PropNameID::forUtf8(runtime, std::string("call")));
|
||||
return result;
|
||||
}
|
||||
|
||||
jsi::Value FrameProcessorPluginHostObject::get(jsi::Runtime& runtime,
|
||||
const jsi::PropNameID& propName) {
|
||||
auto name = propName.utf8(runtime);
|
||||
|
||||
if (name == "call") {
|
||||
return jsi::Function::createFromHostFunction(
|
||||
runtime, jsi::PropNameID::forUtf8(runtime, "call"), 2,
|
||||
[=](jsi::Runtime& runtime, const jsi::Value& thisValue, const jsi::Value* arguments,
|
||||
size_t count) -> jsi::Value {
|
||||
// Frame is first argument
|
||||
auto frameHostObject =
|
||||
arguments[0].asObject(runtime).asHostObject<FrameHostObject>(runtime);
|
||||
auto frame = frameHostObject->frame;
|
||||
|
||||
// Options are second argument (possibly undefined)
|
||||
local_ref<JMap<jstring, jobject>> options = nullptr;
|
||||
if (count > 1) {
|
||||
options =
|
||||
JSIJNIConversion::convertJSIObjectToJNIMap(runtime, arguments[1].asObject(runtime));
|
||||
}
|
||||
|
||||
// Call actual plugin
|
||||
auto result = _plugin->callback(frame, options);
|
||||
|
||||
// Convert result value to jsi::Value (possibly undefined)
|
||||
return JSIJNIConversion::convertJNIObjectToJSIValue(runtime, result);
|
||||
});
|
||||
}
|
||||
|
||||
return jsi::Value::undefined();
|
||||
}
|
||||
|
||||
} // namespace vision
|
@@ -0,0 +1,31 @@
|
||||
//
|
||||
// Created by Marc Rousavy on 21.07.23.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "JFrameProcessorPlugin.h"
|
||||
#include <fbjni/fbjni.h>
|
||||
#include <jsi/jsi.h>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
namespace vision {
|
||||
|
||||
using namespace facebook;
|
||||
|
||||
class FrameProcessorPluginHostObject : public jsi::HostObject {
|
||||
public:
|
||||
explicit FrameProcessorPluginHostObject(jni::alias_ref<JFrameProcessorPlugin::javaobject> plugin)
|
||||
: _plugin(make_global(plugin)) {}
|
||||
~FrameProcessorPluginHostObject() {}
|
||||
|
||||
public:
|
||||
std::vector<jsi::PropNameID> getPropertyNames(jsi::Runtime& runtime) override;
|
||||
jsi::Value get(jsi::Runtime& runtime, const jsi::PropNameID& name) override;
|
||||
|
||||
private:
|
||||
jni::global_ref<JFrameProcessorPlugin::javaobject> _plugin;
|
||||
};
|
||||
|
||||
} // namespace vision
|
150
package/android/src/main/cpp/frameprocessor/JSIJNIConversion.cpp
Normal file
150
package/android/src/main/cpp/frameprocessor/JSIJNIConversion.cpp
Normal file
@@ -0,0 +1,150 @@
|
||||
//
|
||||
// Created by Marc Rousavy on 22.06.21.
|
||||
//
|
||||
|
||||
#include "JSIJNIConversion.h"
|
||||
|
||||
#include <android/log.h>
|
||||
#include <fbjni/fbjni.h>
|
||||
#include <jni.h>
|
||||
#include <jsi/jsi.h>
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
#include "FrameHostObject.h"
|
||||
#include "JFrame.h"
|
||||
|
||||
namespace vision {
|
||||
|
||||
using namespace facebook;
|
||||
|
||||
jni::local_ref<jni::JMap<jstring, jobject>>
|
||||
JSIJNIConversion::convertJSIObjectToJNIMap(jsi::Runtime& runtime, const jsi::Object& object) {
|
||||
auto propertyNames = object.getPropertyNames(runtime);
|
||||
auto size = propertyNames.size(runtime);
|
||||
auto hashMap = jni::JHashMap<jstring, jobject>::create();
|
||||
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
auto propName = propertyNames.getValueAtIndex(runtime, i).asString(runtime);
|
||||
auto key = jni::make_jstring(propName.utf8(runtime));
|
||||
auto value = object.getProperty(runtime, propName);
|
||||
|
||||
if (value.isNull() || value.isUndefined()) {
|
||||
// null
|
||||
|
||||
hashMap->put(key, nullptr);
|
||||
} else if (value.isBool()) {
|
||||
// Boolean
|
||||
|
||||
auto boolean = value.getBool();
|
||||
hashMap->put(key, jni::JBoolean::valueOf(boolean));
|
||||
} else if (value.isNumber()) {
|
||||
// Double
|
||||
|
||||
auto number = value.getNumber();
|
||||
hashMap->put(key, jni::JDouble::valueOf(number));
|
||||
} else if (value.isString()) {
|
||||
// String
|
||||
|
||||
auto str = value.getString(runtime).utf8(runtime);
|
||||
hashMap->put(key, jni::make_jstring(str));
|
||||
} else if (value.isObject()) {
|
||||
// Object
|
||||
|
||||
auto valueAsObject = value.getObject(runtime);
|
||||
|
||||
if (valueAsObject.isArray(runtime)) {
|
||||
// List<Object>
|
||||
} else if (valueAsObject.isHostObject(runtime)) {
|
||||
throw std::runtime_error("You can't pass HostObjects here.");
|
||||
} else {
|
||||
// Map<String, Object>
|
||||
|
||||
auto map = convertJSIObjectToJNIMap(runtime, valueAsObject);
|
||||
hashMap->put(key, map);
|
||||
}
|
||||
} else {
|
||||
auto stringRepresentation = value.toString(runtime).utf8(runtime);
|
||||
throw std::runtime_error("Failed to convert jsi::Value to JNI value - unsupported type!" +
|
||||
stringRepresentation);
|
||||
}
|
||||
}
|
||||
|
||||
return hashMap;
|
||||
}
|
||||
|
||||
jsi::Value JSIJNIConversion::convertJNIObjectToJSIValue(jsi::Runtime& runtime,
|
||||
const jni::local_ref<jobject>& object) {
|
||||
if (object == nullptr) {
|
||||
// null
|
||||
|
||||
return jsi::Value::undefined();
|
||||
} else if (object->isInstanceOf(jni::JBoolean::javaClassStatic())) {
|
||||
// Boolean
|
||||
|
||||
static const auto getBooleanFunc =
|
||||
jni::findClassLocal("java/lang/Boolean")->getMethod<jboolean()>("booleanValue");
|
||||
auto boolean = getBooleanFunc(object.get());
|
||||
return jsi::Value(boolean == true);
|
||||
} else if (object->isInstanceOf(jni::JDouble::javaClassStatic())) {
|
||||
// Double
|
||||
|
||||
static const auto getDoubleFunc =
|
||||
jni::findClassLocal("java/lang/Double")->getMethod<jdouble()>("doubleValue");
|
||||
auto d = getDoubleFunc(object.get());
|
||||
return jsi::Value(d);
|
||||
} else if (object->isInstanceOf(jni::JInteger::javaClassStatic())) {
|
||||
// Integer
|
||||
|
||||
static const auto getIntegerFunc =
|
||||
jni::findClassLocal("java/lang/Integer")->getMethod<jint()>("intValue");
|
||||
auto i = getIntegerFunc(object.get());
|
||||
return jsi::Value(i);
|
||||
} else if (object->isInstanceOf(jni::JString::javaClassStatic())) {
|
||||
// String
|
||||
|
||||
return jsi::String::createFromUtf8(runtime, object->toString());
|
||||
} else if (object->isInstanceOf(JList<jobject>::javaClassStatic())) {
|
||||
// List<E>
|
||||
|
||||
auto arrayList = static_ref_cast<JList<jobject>>(object);
|
||||
auto size = arrayList->size();
|
||||
|
||||
auto result = jsi::Array(runtime, size);
|
||||
size_t i = 0;
|
||||
for (const auto& item : *arrayList) {
|
||||
result.setValueAtIndex(runtime, i, convertJNIObjectToJSIValue(runtime, item));
|
||||
i++;
|
||||
}
|
||||
return result;
|
||||
} else if (object->isInstanceOf(JMap<jstring, jobject>::javaClassStatic())) {
|
||||
// Map<K, V>
|
||||
|
||||
auto map = static_ref_cast<JMap<jstring, jobject>>(object);
|
||||
|
||||
auto result = jsi::Object(runtime);
|
||||
for (const auto& entry : *map) {
|
||||
auto key = entry.first->toString();
|
||||
auto value = entry.second;
|
||||
auto jsiValue = convertJNIObjectToJSIValue(runtime, value);
|
||||
result.setProperty(runtime, key.c_str(), jsiValue);
|
||||
}
|
||||
return result;
|
||||
} else if (object->isInstanceOf(JFrame::javaClassStatic())) {
|
||||
// Frame
|
||||
auto frame = static_ref_cast<JFrame>(object);
|
||||
|
||||
// box into HostObject
|
||||
auto hostObject = std::make_shared<FrameHostObject>(frame);
|
||||
return jsi::Object::createFromHostObject(runtime, hostObject);
|
||||
}
|
||||
|
||||
auto type = object->getClass()->toString();
|
||||
auto message = "Received unknown JNI type \"" + type + "\"! Cannot convert to jsi::Value.";
|
||||
__android_log_write(ANDROID_LOG_ERROR, "VisionCamera", message.c_str());
|
||||
throw std::runtime_error(message);
|
||||
}
|
||||
|
||||
} // namespace vision
|
@@ -0,0 +1,25 @@
|
||||
//
|
||||
// Created by Marc Rousavy on 22.06.21.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <fbjni/fbjni.h>
|
||||
#include <jni.h>
|
||||
#include <jsi/jsi.h>
|
||||
|
||||
namespace vision {
|
||||
|
||||
namespace JSIJNIConversion {
|
||||
|
||||
using namespace facebook;
|
||||
|
||||
jni::local_ref<jni::JMap<jstring, jobject>> convertJSIObjectToJNIMap(jsi::Runtime& runtime,
|
||||
const jsi::Object& object);
|
||||
|
||||
jsi::Value convertJNIObjectToJSIValue(jsi::Runtime& runtime,
|
||||
const jni::local_ref<jobject>& object);
|
||||
|
||||
} // namespace JSIJNIConversion
|
||||
|
||||
} // namespace vision
|
@@ -0,0 +1,116 @@
|
||||
//
|
||||
// Created by Marc Rousavy on 21.07.23.
|
||||
//
|
||||
|
||||
#include "VisionCameraProxy.h"
|
||||
#include <jsi/jsi.h>
|
||||
|
||||
#include "JFrameProcessor.h"
|
||||
#include "JFrameProcessorPlugin.h"
|
||||
#include "JSIJNIConversion.h"
|
||||
|
||||
#include <android/log.h>
|
||||
#include <fbjni/fbjni.h>
|
||||
|
||||
#include "FrameProcessorPluginHostObject.h"
|
||||
#include "JSITypedArray.h"
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#if VISION_CAMERA_ENABLE_FRAME_PROCESSORS
|
||||
#include <react-native-worklets-core/WKTJsiWorkletContext.h>
|
||||
#endif
|
||||
|
||||
namespace vision {
|
||||
|
||||
using namespace facebook;
|
||||
|
||||
VisionCameraProxy::VisionCameraProxy(
|
||||
const jni::alias_ref<JVisionCameraProxy::javaobject>& javaProxy) {
|
||||
_javaProxy = make_global(javaProxy);
|
||||
}
|
||||
|
||||
VisionCameraProxy::~VisionCameraProxy() {}
|
||||
|
||||
std::vector<jsi::PropNameID> VisionCameraProxy::getPropertyNames(jsi::Runtime& runtime) {
|
||||
std::vector<jsi::PropNameID> result;
|
||||
result.push_back(jsi::PropNameID::forUtf8(runtime, std::string("setFrameProcessor")));
|
||||
result.push_back(jsi::PropNameID::forUtf8(runtime, std::string("removeFrameProcessor")));
|
||||
result.push_back(jsi::PropNameID::forUtf8(runtime, std::string("getFrameProcessorPlugin")));
|
||||
return result;
|
||||
}
|
||||
|
||||
void VisionCameraProxy::setFrameProcessor(int viewTag, jsi::Runtime& runtime,
|
||||
const jsi::Object& object) {
|
||||
_javaProxy->cthis()->setFrameProcessor(viewTag, runtime, object);
|
||||
}
|
||||
|
||||
void VisionCameraProxy::removeFrameProcessor(int viewTag) {
|
||||
_javaProxy->cthis()->removeFrameProcessor(viewTag);
|
||||
}
|
||||
|
||||
jsi::Value VisionCameraProxy::getFrameProcessorPlugin(jsi::Runtime& runtime,
|
||||
const std::string& name,
|
||||
const jsi::Object& jsOptions) {
|
||||
auto options = JSIJNIConversion::convertJSIObjectToJNIMap(runtime, jsOptions);
|
||||
|
||||
auto plugin = _javaProxy->cthis()->getFrameProcessorPlugin(name, options);
|
||||
|
||||
auto pluginHostObject = std::make_shared<FrameProcessorPluginHostObject>(plugin);
|
||||
return jsi::Object::createFromHostObject(runtime, pluginHostObject);
|
||||
}
|
||||
|
||||
jsi::Value VisionCameraProxy::get(jsi::Runtime& runtime, const jsi::PropNameID& propName) {
|
||||
auto name = propName.utf8(runtime);
|
||||
|
||||
if (name == "setFrameProcessor") {
|
||||
return jsi::Function::createFromHostFunction(
|
||||
runtime, jsi::PropNameID::forUtf8(runtime, "setFrameProcessor"), 1,
|
||||
[this](jsi::Runtime& runtime, const jsi::Value& thisValue, const jsi::Value* arguments,
|
||||
size_t count) -> jsi::Value {
|
||||
auto viewTag = arguments[0].asNumber();
|
||||
auto object = arguments[1].asObject(runtime);
|
||||
this->setFrameProcessor(static_cast<int>(viewTag), runtime, object);
|
||||
return jsi::Value::undefined();
|
||||
});
|
||||
}
|
||||
if (name == "removeFrameProcessor") {
|
||||
return jsi::Function::createFromHostFunction(
|
||||
runtime, jsi::PropNameID::forUtf8(runtime, "removeFrameProcessor"), 1,
|
||||
[this](jsi::Runtime& runtime, const jsi::Value& thisValue, const jsi::Value* arguments,
|
||||
size_t count) -> jsi::Value {
|
||||
auto viewTag = arguments[0].asNumber();
|
||||
this->removeFrameProcessor(static_cast<int>(viewTag));
|
||||
return jsi::Value::undefined();
|
||||
});
|
||||
}
|
||||
if (name == "getFrameProcessorPlugin") {
|
||||
return jsi::Function::createFromHostFunction(
|
||||
runtime, jsi::PropNameID::forUtf8(runtime, "getFrameProcessorPlugin"), 1,
|
||||
[this](jsi::Runtime& runtime, const jsi::Value& thisValue, const jsi::Value* arguments,
|
||||
size_t count) -> jsi::Value {
|
||||
if (count < 1 || !arguments[0].isString()) {
|
||||
throw jsi::JSError(runtime, "First argument needs to be a string (pluginName)!");
|
||||
}
|
||||
auto pluginName = arguments[0].asString(runtime).utf8(runtime);
|
||||
auto options = count > 1 ? arguments[1].asObject(runtime) : jsi::Object(runtime);
|
||||
|
||||
return this->getFrameProcessorPlugin(runtime, pluginName, options);
|
||||
});
|
||||
}
|
||||
|
||||
return jsi::Value::undefined();
|
||||
}
|
||||
|
||||
void VisionCameraInstaller::install(jni::alias_ref<jni::JClass>,
|
||||
jni::alias_ref<JVisionCameraProxy::javaobject> proxy) {
|
||||
// global.VisionCameraProxy
|
||||
auto visionCameraProxy = std::make_shared<VisionCameraProxy>(proxy);
|
||||
jsi::Runtime& runtime = *proxy->cthis()->getJSRuntime();
|
||||
runtime.global().setProperty(runtime, "VisionCameraProxy",
|
||||
jsi::Object::createFromHostObject(runtime, visionCameraProxy));
|
||||
}
|
||||
|
||||
} // namespace vision
|
@@ -0,0 +1,51 @@
|
||||
//
|
||||
// Created by Marc Rousavy on 21.07.23.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <jsi/jsi.h>
|
||||
|
||||
#include "JVisionCameraProxy.h"
|
||||
#include "JVisionCameraScheduler.h"
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace vision {
|
||||
|
||||
using namespace facebook;
|
||||
|
||||
class VisionCameraProxy : public jsi::HostObject {
|
||||
public:
|
||||
explicit VisionCameraProxy(const jni::alias_ref<JVisionCameraProxy::javaobject>& javaProxy);
|
||||
~VisionCameraProxy();
|
||||
|
||||
public:
|
||||
std::vector<jsi::PropNameID> getPropertyNames(jsi::Runtime& runtime) override;
|
||||
jsi::Value get(jsi::Runtime& runtime, const jsi::PropNameID& name) override;
|
||||
|
||||
private:
|
||||
void setFrameProcessor(int viewTag, jsi::Runtime& runtime, const jsi::Object& frameProcessor);
|
||||
void removeFrameProcessor(int viewTag);
|
||||
jsi::Value getFrameProcessorPlugin(jsi::Runtime& runtime, const std::string& name,
|
||||
const jsi::Object& options);
|
||||
|
||||
private:
|
||||
jni::global_ref<JVisionCameraProxy::javaobject> _javaProxy;
|
||||
static constexpr const char* TAG = "VisionCameraProxy";
|
||||
};
|
||||
|
||||
class VisionCameraInstaller : public jni::JavaClass<VisionCameraInstaller> {
|
||||
public:
|
||||
static auto constexpr kJavaDescriptor =
|
||||
"Lcom/mrousavy/camera/frameprocessor/VisionCameraInstaller;";
|
||||
static void registerNatives() {
|
||||
javaClassStatic()->registerNatives(
|
||||
{makeNativeMethod("install", VisionCameraInstaller::install)});
|
||||
}
|
||||
static void install(jni::alias_ref<jni::JClass> clazz,
|
||||
jni::alias_ref<JVisionCameraProxy::javaobject> proxy);
|
||||
};
|
||||
|
||||
} // namespace vision
|
@@ -0,0 +1,81 @@
|
||||
//
|
||||
// Created by Marc on 21.07.2023.
|
||||
//
|
||||
|
||||
#include "JFrame.h"
|
||||
|
||||
#include <fbjni/ByteBuffer.h>
|
||||
#include <fbjni/fbjni.h>
|
||||
#include <jni.h>
|
||||
|
||||
namespace vision {
|
||||
|
||||
using namespace facebook;
|
||||
using namespace jni;
|
||||
|
||||
int JFrame::getWidth() const {
|
||||
static const auto getWidthMethod = getClass()->getMethod<jint()>("getWidth");
|
||||
return getWidthMethod(self());
|
||||
}
|
||||
|
||||
int JFrame::getHeight() const {
|
||||
static const auto getWidthMethod = getClass()->getMethod<jint()>("getHeight");
|
||||
return getWidthMethod(self());
|
||||
}
|
||||
|
||||
bool JFrame::getIsValid() const {
|
||||
static const auto getIsValidMethod = getClass()->getMethod<jboolean()>("getIsValid");
|
||||
return getIsValidMethod(self());
|
||||
}
|
||||
|
||||
bool JFrame::getIsMirrored() const {
|
||||
static const auto getIsMirroredMethod = getClass()->getMethod<jboolean()>("getIsMirrored");
|
||||
return getIsMirroredMethod(self());
|
||||
}
|
||||
|
||||
jlong JFrame::getTimestamp() const {
|
||||
static const auto getTimestampMethod = getClass()->getMethod<jlong()>("getTimestamp");
|
||||
return getTimestampMethod(self());
|
||||
}
|
||||
|
||||
local_ref<JString> JFrame::getOrientation() const {
|
||||
static const auto getOrientationMethod = getClass()->getMethod<JString()>("getOrientation");
|
||||
return getOrientationMethod(self());
|
||||
}
|
||||
|
||||
local_ref<JString> JFrame::getPixelFormat() const {
|
||||
static const auto getPixelFormatMethod = getClass()->getMethod<JString()>("getPixelFormat");
|
||||
return getPixelFormatMethod(self());
|
||||
}
|
||||
|
||||
int JFrame::getPlanesCount() const {
|
||||
static const auto getPlanesCountMethod = getClass()->getMethod<jint()>("getPlanesCount");
|
||||
return getPlanesCountMethod(self());
|
||||
}
|
||||
|
||||
int JFrame::getBytesPerRow() const {
|
||||
static const auto getBytesPerRowMethod = getClass()->getMethod<jint()>("getBytesPerRow");
|
||||
return getBytesPerRowMethod(self());
|
||||
}
|
||||
|
||||
local_ref<JByteBuffer> JFrame::toByteBuffer() const {
|
||||
static const auto toByteBufferMethod = getClass()->getMethod<JByteBuffer()>("toByteBuffer");
|
||||
return toByteBufferMethod(self());
|
||||
}
|
||||
|
||||
void JFrame::incrementRefCount() {
|
||||
static const auto incrementRefCountMethod = getClass()->getMethod<void()>("incrementRefCount");
|
||||
incrementRefCountMethod(self());
|
||||
}
|
||||
|
||||
void JFrame::decrementRefCount() {
|
||||
static const auto decrementRefCountMethod = getClass()->getMethod<void()>("decrementRefCount");
|
||||
decrementRefCountMethod(self());
|
||||
}
|
||||
|
||||
void JFrame::close() {
|
||||
static const auto closeMethod = getClass()->getMethod<void()>("close");
|
||||
closeMethod(self());
|
||||
}
|
||||
|
||||
} // namespace vision
|
@@ -0,0 +1,35 @@
|
||||
//
|
||||
// Created by Marc on 21.07.2023.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <fbjni/ByteBuffer.h>
|
||||
#include <fbjni/fbjni.h>
|
||||
#include <jni.h>
|
||||
|
||||
namespace vision {
|
||||
|
||||
using namespace facebook;
|
||||
using namespace jni;
|
||||
|
||||
struct JFrame : public JavaClass<JFrame> {
|
||||
static constexpr auto kJavaDescriptor = "Lcom/mrousavy/camera/frameprocessor/Frame;";
|
||||
|
||||
public:
|
||||
int getWidth() const;
|
||||
int getHeight() const;
|
||||
bool getIsValid() const;
|
||||
bool getIsMirrored() const;
|
||||
int getPlanesCount() const;
|
||||
int getBytesPerRow() const;
|
||||
jlong getTimestamp() const;
|
||||
local_ref<JString> getOrientation() const;
|
||||
local_ref<JString> getPixelFormat() const;
|
||||
local_ref<JByteBuffer> toByteBuffer() const;
|
||||
void incrementRefCount();
|
||||
void decrementRefCount();
|
||||
void close();
|
||||
};
|
||||
|
||||
} // namespace vision
|
@@ -0,0 +1,70 @@
|
||||
//
|
||||
// Created by Marc Rousavy on 29.09.21.
|
||||
//
|
||||
|
||||
#if VISION_CAMERA_ENABLE_FRAME_PROCESSORS
|
||||
|
||||
#include "JFrameProcessor.h"
|
||||
#include <fbjni/fbjni.h>
|
||||
#include <jni.h>
|
||||
|
||||
#include "JFrame.h"
|
||||
#include <utility>
|
||||
|
||||
namespace vision {
|
||||
|
||||
using namespace facebook;
|
||||
using namespace jni;
|
||||
|
||||
void JFrameProcessor::registerNatives() {
|
||||
registerHybrid({makeNativeMethod("call", JFrameProcessor::call)});
|
||||
}
|
||||
|
||||
using TSelf = jni::local_ref<JFrameProcessor::javaobject>;
|
||||
|
||||
JFrameProcessor::JFrameProcessor(std::shared_ptr<RNWorklet::JsiWorklet> worklet,
|
||||
std::shared_ptr<RNWorklet::JsiWorkletContext> context) {
|
||||
_workletContext = std::move(context);
|
||||
_workletInvoker = std::make_shared<RNWorklet::WorkletInvoker>(worklet);
|
||||
}
|
||||
|
||||
TSelf JFrameProcessor::create(const std::shared_ptr<RNWorklet::JsiWorklet>& worklet,
|
||||
const std::shared_ptr<RNWorklet::JsiWorkletContext>& context) {
|
||||
return JFrameProcessor::newObjectCxxArgs(worklet, context);
|
||||
}
|
||||
|
||||
void JFrameProcessor::callWithFrameHostObject(
|
||||
const std::shared_ptr<FrameHostObject>& frameHostObject) const {
|
||||
// Call the Frame Processor on the Worklet Runtime
|
||||
jsi::Runtime& runtime = _workletContext->getWorkletRuntime();
|
||||
|
||||
try {
|
||||
// Wrap HostObject as JSI Value
|
||||
auto argument = jsi::Object::createFromHostObject(runtime, frameHostObject);
|
||||
jsi::Value jsValue(std::move(argument));
|
||||
|
||||
// Call the Worklet with the Frame JS Host Object as an argument
|
||||
_workletInvoker->call(runtime, jsi::Value::undefined(), &jsValue, 1);
|
||||
} catch (jsi::JSError& jsError) {
|
||||
// JS Error occured, print it to console.
|
||||
const std::string& message = jsError.getMessage();
|
||||
|
||||
_workletContext->invokeOnJsThread([message](jsi::Runtime& jsRuntime) {
|
||||
auto logFn = jsRuntime.global()
|
||||
.getPropertyAsObject(jsRuntime, "console")
|
||||
.getPropertyAsFunction(jsRuntime, "error");
|
||||
logFn.call(jsRuntime, jsi::String::createFromUtf8(
|
||||
jsRuntime, "Frame Processor threw an error: " + message));
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
void JFrameProcessor::call(jni::alias_ref<JFrame::javaobject> frame) {
|
||||
// Create the Frame Host Object wrapping the internal Frame
|
||||
auto frameHostObject = std::make_shared<FrameHostObject>(frame);
|
||||
callWithFrameHostObject(frameHostObject);
|
||||
}
|
||||
|
||||
} // namespace vision
|
||||
|
||||
#endif
|
@@ -0,0 +1,54 @@
|
||||
//
|
||||
// Created by Marc Rousavy on 29.09.21
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#if VISION_CAMERA_ENABLE_FRAME_PROCESSORS
|
||||
|
||||
#include <fbjni/fbjni.h>
|
||||
#include <jni.h>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include <react-native-worklets-core/WKTJsiHostObject.h>
|
||||
#include <react-native-worklets-core/WKTJsiWorklet.h>
|
||||
|
||||
#include "FrameHostObject.h"
|
||||
#include "JFrame.h"
|
||||
|
||||
namespace vision {
|
||||
|
||||
using namespace facebook;
|
||||
|
||||
struct JFrameProcessor : public jni::HybridClass<JFrameProcessor> {
|
||||
public:
|
||||
static auto constexpr kJavaDescriptor = "Lcom/mrousavy/camera/frameprocessor/FrameProcessor;";
|
||||
static void registerNatives();
|
||||
static jni::local_ref<JFrameProcessor::javaobject>
|
||||
create(const std::shared_ptr<RNWorklet::JsiWorklet>& worklet,
|
||||
const std::shared_ptr<RNWorklet::JsiWorkletContext>& context);
|
||||
|
||||
public:
|
||||
/**
|
||||
* Call the JS Frame Processor.
|
||||
*/
|
||||
void call(alias_ref<JFrame::javaobject> frame);
|
||||
|
||||
private:
|
||||
// Private constructor. Use `create(..)` to create new instances.
|
||||
explicit JFrameProcessor(std::shared_ptr<RNWorklet::JsiWorklet> worklet,
|
||||
std::shared_ptr<RNWorklet::JsiWorkletContext> context);
|
||||
|
||||
private:
|
||||
void callWithFrameHostObject(const std::shared_ptr<FrameHostObject>& frameHostObject) const;
|
||||
|
||||
private:
|
||||
friend HybridBase;
|
||||
std::shared_ptr<RNWorklet::WorkletInvoker> _workletInvoker;
|
||||
std::shared_ptr<RNWorklet::JsiWorkletContext> _workletContext;
|
||||
};
|
||||
|
||||
} // namespace vision
|
||||
|
||||
#endif
|
@@ -0,0 +1,26 @@
|
||||
//
|
||||
// Created by Marc Rousavy on 29.09.21.
|
||||
//
|
||||
|
||||
#include "JFrameProcessorPlugin.h"
|
||||
|
||||
#include <fbjni/fbjni.h>
|
||||
#include <jni.h>
|
||||
|
||||
namespace vision {
|
||||
|
||||
using namespace facebook;
|
||||
using namespace jni;
|
||||
|
||||
using TCallback = jobject(alias_ref<JFrame::javaobject>, alias_ref<JMap<jstring, jobject>> params);
|
||||
|
||||
local_ref<jobject>
|
||||
JFrameProcessorPlugin::callback(const alias_ref<JFrame::javaobject>& frame,
|
||||
const alias_ref<JMap<jstring, jobject>>& params) const {
|
||||
auto callbackMethod = getClass()->getMethod<TCallback>("callback");
|
||||
|
||||
auto result = callbackMethod(self(), frame, params);
|
||||
return make_local(result);
|
||||
}
|
||||
|
||||
} // namespace vision
|
@@ -0,0 +1,30 @@
|
||||
//
|
||||
// Created by Marc Rousavy on 29.09.21
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <fbjni/fbjni.h>
|
||||
#include <jni.h>
|
||||
#include <string>
|
||||
|
||||
#include "JFrame.h"
|
||||
|
||||
namespace vision {
|
||||
|
||||
using namespace facebook;
|
||||
using namespace jni;
|
||||
|
||||
struct JFrameProcessorPlugin : public JavaClass<JFrameProcessorPlugin> {
|
||||
static constexpr auto kJavaDescriptor =
|
||||
"Lcom/mrousavy/camera/frameprocessor/FrameProcessorPlugin;";
|
||||
|
||||
public:
|
||||
/**
|
||||
* Call the plugin.
|
||||
*/
|
||||
local_ref<jobject> callback(const alias_ref<JFrame::javaobject>& frame,
|
||||
const alias_ref<JMap<jstring, jobject>>& params) const;
|
||||
};
|
||||
|
||||
} // namespace vision
|
@@ -0,0 +1,117 @@
|
||||
//
|
||||
// Created by Marc Rousavy on 21.07.23.
|
||||
//
|
||||
|
||||
#include "JVisionCameraProxy.h"
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
#include <jsi/jsi.h>
|
||||
|
||||
#include "FrameProcessorPluginHostObject.h"
|
||||
#include "JSITypedArray.h"
|
||||
|
||||
#if VISION_CAMERA_ENABLE_FRAME_PROCESSORS
|
||||
#include <react-native-worklets-core/WKTJsiWorklet.h>
|
||||
#include <react-native-worklets-core/WKTJsiWorkletContext.h>
|
||||
#endif
|
||||
|
||||
namespace vision {
|
||||
|
||||
using TSelf = local_ref<HybridClass<JVisionCameraProxy>::jhybriddata>;
|
||||
using TJSCallInvokerHolder = jni::alias_ref<facebook::react::CallInvokerHolder::javaobject>;
|
||||
using TScheduler = jni::alias_ref<JVisionCameraScheduler::javaobject>;
|
||||
using TOptions = jni::local_ref<JMap<jstring, jobject>>;
|
||||
|
||||
JVisionCameraProxy::JVisionCameraProxy(
|
||||
const jni::alias_ref<JVisionCameraProxy::jhybridobject>& javaThis, jsi::Runtime* runtime,
|
||||
const std::shared_ptr<facebook::react::CallInvoker>& callInvoker,
|
||||
const jni::global_ref<JVisionCameraScheduler::javaobject>& scheduler) {
|
||||
_javaPart = make_global(javaThis);
|
||||
_runtime = runtime;
|
||||
|
||||
#if VISION_CAMERA_ENABLE_FRAME_PROCESSORS
|
||||
__android_log_write(ANDROID_LOG_INFO, TAG, "Creating Worklet Context...");
|
||||
|
||||
auto runOnJS = [callInvoker](std::function<void()>&& f) {
|
||||
// Run on React JS Runtime
|
||||
callInvoker->invokeAsync(std::move(f));
|
||||
};
|
||||
auto runOnWorklet = [scheduler](std::function<void()>&& f) {
|
||||
// Run on Frame Processor Worklet Runtime
|
||||
scheduler->cthis()->dispatchAsync([f = std::move(f)]() { f(); });
|
||||
};
|
||||
_workletContext = std::make_shared<RNWorklet::JsiWorkletContext>("VisionCamera", runtime, runOnJS,
|
||||
runOnWorklet);
|
||||
__android_log_write(ANDROID_LOG_INFO, TAG, "Worklet Context created!");
|
||||
#else
|
||||
__android_log_write(ANDROID_LOG_INFO, TAG, "Frame Processors are disabled!");
|
||||
#endif
|
||||
}
|
||||
|
||||
JVisionCameraProxy::~JVisionCameraProxy() {
|
||||
#if VISION_CAMERA_ENABLE_FRAME_PROCESSORS
|
||||
__android_log_write(ANDROID_LOG_INFO, TAG, "Destroying Context...");
|
||||
// Destroy ArrayBuffer cache for both the JS and the Worklet Runtime.
|
||||
invalidateArrayBufferCache(*_workletContext->getJsRuntime());
|
||||
invalidateArrayBufferCache(_workletContext->getWorkletRuntime());
|
||||
#endif
|
||||
}
|
||||
|
||||
void JVisionCameraProxy::setFrameProcessor(int viewTag, jsi::Runtime& runtime,
|
||||
const jsi::Object& frameProcessorObject) {
|
||||
#if VISION_CAMERA_ENABLE_FRAME_PROCESSORS
|
||||
auto frameProcessorType =
|
||||
frameProcessorObject.getProperty(runtime, "type").asString(runtime).utf8(runtime);
|
||||
auto worklet = std::make_shared<RNWorklet::JsiWorklet>(
|
||||
runtime, frameProcessorObject.getProperty(runtime, "frameProcessor"));
|
||||
|
||||
jni::local_ref<JFrameProcessor::javaobject> frameProcessor;
|
||||
if (frameProcessorType == "frame-processor") {
|
||||
frameProcessor = JFrameProcessor::create(worklet, _workletContext);
|
||||
} else {
|
||||
throw std::runtime_error("Unknown FrameProcessor.type passed! Received: " + frameProcessorType);
|
||||
}
|
||||
|
||||
auto setFrameProcessorMethod =
|
||||
javaClassLocal()->getMethod<void(int, alias_ref<JFrameProcessor::javaobject>)>(
|
||||
"setFrameProcessor");
|
||||
setFrameProcessorMethod(_javaPart, viewTag, frameProcessor);
|
||||
#else
|
||||
throw std::runtime_error("system/frame-processors-unavailable: Frame Processors are disabled!");
|
||||
#endif
|
||||
}
|
||||
|
||||
void JVisionCameraProxy::removeFrameProcessor(int viewTag) {
|
||||
auto removeFrameProcessorMethod = javaClassLocal()->getMethod<void(int)>("removeFrameProcessor");
|
||||
removeFrameProcessorMethod(_javaPart, viewTag);
|
||||
}
|
||||
|
||||
local_ref<JFrameProcessorPlugin::javaobject>
|
||||
JVisionCameraProxy::getFrameProcessorPlugin(const std::string& name, TOptions options) {
|
||||
auto getFrameProcessorPluginMethod =
|
||||
javaClassLocal()->getMethod<JFrameProcessorPlugin(local_ref<jstring>, TOptions)>(
|
||||
"getFrameProcessorPlugin");
|
||||
return getFrameProcessorPluginMethod(_javaPart, make_jstring(name), std::move(options));
|
||||
}
|
||||
|
||||
void JVisionCameraProxy::registerNatives() {
|
||||
registerHybrid({makeNativeMethod("initHybrid", JVisionCameraProxy::initHybrid)});
|
||||
}
|
||||
|
||||
TSelf JVisionCameraProxy::initHybrid(alias_ref<jhybridobject> jThis, jlong jsRuntimePointer,
|
||||
TJSCallInvokerHolder jsCallInvokerHolder,
|
||||
const TScheduler& scheduler) {
|
||||
__android_log_write(ANDROID_LOG_INFO, TAG, "Initializing VisionCameraProxy...");
|
||||
|
||||
// cast from JNI hybrid objects to C++ instances
|
||||
auto jsRuntime = reinterpret_cast<jsi::Runtime*>(jsRuntimePointer);
|
||||
auto jsCallInvoker = jsCallInvokerHolder->cthis()->getCallInvoker();
|
||||
auto sharedScheduler = make_global(scheduler);
|
||||
|
||||
return makeCxxInstance(jThis, jsRuntime, jsCallInvoker, sharedScheduler);
|
||||
}
|
||||
|
||||
} // namespace vision
|
@@ -0,0 +1,61 @@
|
||||
//
|
||||
// Created by Marc Rousavy on 21.07.23.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <ReactCommon/CallInvokerHolder.h>
|
||||
#include <fbjni/fbjni.h>
|
||||
#include <jsi/jsi.h>
|
||||
|
||||
#include "JFrameProcessor.h"
|
||||
#include "JFrameProcessorPlugin.h"
|
||||
#include "JVisionCameraScheduler.h"
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#if VISION_CAMERA_ENABLE_FRAME_PROCESSORS
|
||||
#include <react-native-worklets-core/WKTJsiWorkletContext.h>
|
||||
#endif
|
||||
|
||||
namespace vision {
|
||||
|
||||
using namespace facebook;
|
||||
|
||||
class JVisionCameraProxy : public jni::HybridClass<JVisionCameraProxy> {
|
||||
public:
|
||||
~JVisionCameraProxy();
|
||||
static void registerNatives();
|
||||
|
||||
void setFrameProcessor(int viewTag, jsi::Runtime& runtime, const jsi::Object& frameProcessor);
|
||||
void removeFrameProcessor(int viewTag);
|
||||
jni::local_ref<JFrameProcessorPlugin::javaobject>
|
||||
getFrameProcessorPlugin(const std::string& name, jni::local_ref<JMap<jstring, jobject>> options);
|
||||
|
||||
jsi::Runtime* getJSRuntime() {
|
||||
return _runtime;
|
||||
}
|
||||
|
||||
private:
|
||||
friend HybridBase;
|
||||
jni::global_ref<JVisionCameraProxy::javaobject> _javaPart;
|
||||
jsi::Runtime* _runtime;
|
||||
#if VISION_CAMERA_ENABLE_FRAME_PROCESSORS
|
||||
std::shared_ptr<RNWorklet::JsiWorkletContext> _workletContext;
|
||||
#endif
|
||||
|
||||
static auto constexpr TAG = "VisionCameraProxy";
|
||||
static auto constexpr kJavaDescriptor = "Lcom/mrousavy/camera/frameprocessor/VisionCameraProxy;";
|
||||
|
||||
explicit JVisionCameraProxy(const jni::alias_ref<JVisionCameraProxy::jhybridobject>& javaThis,
|
||||
jsi::Runtime* jsRuntime,
|
||||
const std::shared_ptr<facebook::react::CallInvoker>& jsCallInvoker,
|
||||
const jni::global_ref<JVisionCameraScheduler::javaobject>& scheduler);
|
||||
static jni::local_ref<jhybriddata>
|
||||
initHybrid(jni::alias_ref<jhybridobject> javaThis, jlong jsRuntimePointer,
|
||||
jni::alias_ref<facebook::react::CallInvokerHolder::javaobject> jsCallInvokerHolder,
|
||||
const jni::alias_ref<JVisionCameraScheduler::javaobject>& scheduler);
|
||||
};
|
||||
|
||||
} // namespace vision
|
@@ -0,0 +1,44 @@
|
||||
//
|
||||
// Created by Marc Rousavy on 25.07.21.
|
||||
//
|
||||
|
||||
#include "JVisionCameraScheduler.h"
|
||||
#include <fbjni/fbjni.h>
|
||||
|
||||
namespace vision {
|
||||
|
||||
using namespace facebook;
|
||||
using TSelf = jni::local_ref<JVisionCameraScheduler::jhybriddata>;
|
||||
|
||||
TSelf JVisionCameraScheduler::initHybrid(jni::alias_ref<jhybridobject> jThis) {
|
||||
return makeCxxInstance(jThis);
|
||||
}
|
||||
|
||||
void JVisionCameraScheduler::dispatchAsync(const std::function<void()>& job) {
|
||||
// 1. add job to queue
|
||||
_jobs.push(job);
|
||||
scheduleTrigger();
|
||||
}
|
||||
|
||||
void JVisionCameraScheduler::scheduleTrigger() {
|
||||
// 2. schedule `triggerUI` to be called on the java thread
|
||||
static auto method = _javaPart->getClass()->getMethod<void()>("scheduleTrigger");
|
||||
method(_javaPart.get());
|
||||
}
|
||||
|
||||
void JVisionCameraScheduler::trigger() {
|
||||
std::unique_lock<std::mutex> lock(_mutex);
|
||||
// 3. call job we enqueued in step 1.
|
||||
auto job = _jobs.front();
|
||||
job();
|
||||
_jobs.pop();
|
||||
}
|
||||
|
||||
void JVisionCameraScheduler::registerNatives() {
|
||||
registerHybrid({
|
||||
makeNativeMethod("initHybrid", JVisionCameraScheduler::initHybrid),
|
||||
makeNativeMethod("trigger", JVisionCameraScheduler::trigger),
|
||||
});
|
||||
}
|
||||
|
||||
} // namespace vision
|
@@ -0,0 +1,51 @@
|
||||
//
|
||||
// Created by Marc Rousavy on 25.07.21.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <fbjni/fbjni.h>
|
||||
#include <jni.h>
|
||||
#include <mutex>
|
||||
#include <queue>
|
||||
|
||||
namespace vision {
|
||||
|
||||
using namespace facebook;
|
||||
|
||||
/**
|
||||
* A Scheduler that runs methods on the Frame Processor Thread (which is a Java Thread).
|
||||
* In order to call something on the Java Frame Processor Thread, you have to:
|
||||
*
|
||||
* 1. Call `dispatchAsync(..)` with the given C++ Method.
|
||||
* 2. Internally, `scheduleTrigger()` will get called, which is a Java Method.
|
||||
* 3. The `scheduleTrigger()` Java Method will switch to the Frame Processor Java Thread and call
|
||||
* `trigger()` on there
|
||||
* 4. `trigger()` is a C++ function here that just calls the passed C++ Method from step 1.
|
||||
*/
|
||||
class JVisionCameraScheduler : public jni::HybridClass<JVisionCameraScheduler> {
|
||||
public:
|
||||
static auto constexpr kJavaDescriptor =
|
||||
"Lcom/mrousavy/camera/frameprocessor/VisionCameraScheduler;";
|
||||
static jni::local_ref<jhybriddata> initHybrid(jni::alias_ref<jhybridobject> jThis);
|
||||
static void registerNatives();
|
||||
|
||||
// schedules the given job to be run on the VisionCamera FP Thread at some future point in time
|
||||
void dispatchAsync(const std::function<void()>& job);
|
||||
|
||||
private:
|
||||
friend HybridBase;
|
||||
jni::global_ref<JVisionCameraScheduler::javaobject> _javaPart;
|
||||
std::queue<std::function<void()>> _jobs;
|
||||
std::mutex _mutex;
|
||||
|
||||
explicit JVisionCameraScheduler(jni::alias_ref<JVisionCameraScheduler::jhybridobject> jThis)
|
||||
: _javaPart(jni::make_global(jThis)) {}
|
||||
|
||||
// Schedules a call to `trigger` on the VisionCamera FP Thread
|
||||
void scheduleTrigger();
|
||||
// Calls the latest job in the job queue
|
||||
void trigger();
|
||||
};
|
||||
|
||||
} // namespace vision
|
@@ -0,0 +1,16 @@
|
||||
package com.mrousavy.camera
|
||||
|
||||
import com.facebook.react.ReactPackage
|
||||
import com.facebook.react.bridge.NativeModule
|
||||
import com.facebook.react.bridge.ReactApplicationContext
|
||||
import com.facebook.react.uimanager.ViewManager
|
||||
|
||||
class CameraPackage : ReactPackage {
|
||||
override fun createNativeModules(reactContext: ReactApplicationContext): List<NativeModule> {
|
||||
return listOf(CameraViewModule(reactContext))
|
||||
}
|
||||
|
||||
override fun createViewManagers(reactContext: ReactApplicationContext): List<ViewManager<*, *>> {
|
||||
return listOf(CameraViewManager())
|
||||
}
|
||||
}
|
@@ -0,0 +1,35 @@
|
||||
package com.mrousavy.camera
|
||||
|
||||
import android.os.Handler
|
||||
import android.os.HandlerThread
|
||||
import kotlinx.coroutines.CoroutineDispatcher
|
||||
import kotlinx.coroutines.android.asCoroutineDispatcher
|
||||
import kotlinx.coroutines.asExecutor
|
||||
import java.util.concurrent.Executor
|
||||
|
||||
class CameraQueues {
|
||||
companion object {
|
||||
val cameraQueue = CameraQueue("mrousavy/VisionCamera.main")
|
||||
val videoQueue = CameraQueue("mrousavy/VisionCamera.video")
|
||||
}
|
||||
|
||||
class CameraQueue(name: String) {
|
||||
val handler: Handler
|
||||
private val thread: HandlerThread
|
||||
val executor: Executor
|
||||
val coroutineDispatcher: CoroutineDispatcher
|
||||
|
||||
init {
|
||||
thread = HandlerThread(name)
|
||||
thread.start()
|
||||
handler = Handler(thread.looper)
|
||||
coroutineDispatcher = handler.asCoroutineDispatcher(name)
|
||||
executor = coroutineDispatcher.asExecutor()
|
||||
}
|
||||
|
||||
protected fun finalize() {
|
||||
thread.quitSafely()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -0,0 +1,48 @@
|
||||
package com.mrousavy.camera
|
||||
|
||||
import android.util.Log
|
||||
import com.facebook.react.bridge.Arguments
|
||||
import com.facebook.react.bridge.ReactContext
|
||||
import com.facebook.react.bridge.WritableMap
|
||||
import com.facebook.react.uimanager.events.RCTEventEmitter
|
||||
|
||||
fun CameraView.invokeOnInitialized() {
|
||||
Log.i(CameraView.TAG, "invokeOnInitialized()")
|
||||
|
||||
val reactContext = context as ReactContext
|
||||
reactContext.getJSModule(RCTEventEmitter::class.java).receiveEvent(id, "cameraInitialized", null)
|
||||
}
|
||||
|
||||
fun CameraView.invokeOnError(error: Throwable) {
|
||||
Log.e(CameraView.TAG, "invokeOnError(...):")
|
||||
error.printStackTrace()
|
||||
|
||||
val cameraError = when (error) {
|
||||
is CameraError -> error
|
||||
else -> UnknownCameraError(error)
|
||||
}
|
||||
val event = Arguments.createMap()
|
||||
event.putString("code", cameraError.code)
|
||||
event.putString("message", cameraError.message)
|
||||
cameraError.cause?.let { cause ->
|
||||
event.putMap("cause", errorToMap(cause))
|
||||
}
|
||||
val reactContext = context as ReactContext
|
||||
reactContext.getJSModule(RCTEventEmitter::class.java).receiveEvent(id, "cameraError", event)
|
||||
}
|
||||
|
||||
fun CameraView.invokeOnViewReady() {
|
||||
val event = Arguments.createMap()
|
||||
val reactContext = context as ReactContext
|
||||
reactContext.getJSModule(RCTEventEmitter::class.java).receiveEvent(id, "cameraViewReady", event)
|
||||
}
|
||||
|
||||
private fun errorToMap(error: Throwable): WritableMap {
|
||||
val map = Arguments.createMap()
|
||||
map.putString("message", error.message)
|
||||
map.putString("stacktrace", error.stackTraceToString())
|
||||
error.cause?.let { cause ->
|
||||
map.putMap("cause", errorToMap(cause))
|
||||
}
|
||||
return map
|
||||
}
|
@@ -0,0 +1,9 @@
|
||||
package com.mrousavy.camera
|
||||
|
||||
import com.facebook.react.bridge.ReadableMap
|
||||
|
||||
suspend fun CameraView.focus(pointMap: ReadableMap) {
|
||||
val x = pointMap.getInt("x")
|
||||
val y = pointMap.getInt("y")
|
||||
cameraSession.focus(x, y)
|
||||
}
|
@@ -0,0 +1,64 @@
|
||||
package com.mrousavy.camera
|
||||
|
||||
import android.Manifest
|
||||
import android.annotation.SuppressLint
|
||||
import android.content.pm.PackageManager
|
||||
import androidx.core.content.ContextCompat
|
||||
import com.facebook.react.bridge.*
|
||||
import com.mrousavy.camera.parsers.Torch
|
||||
import com.mrousavy.camera.parsers.VideoCodec
|
||||
import com.mrousavy.camera.parsers.VideoFileType
|
||||
import com.mrousavy.camera.core.RecordingSession
|
||||
import com.mrousavy.camera.utils.makeErrorMap
|
||||
import java.util.*
|
||||
|
||||
suspend fun CameraView.startRecording(options: ReadableMap, onRecordCallback: Callback) {
|
||||
// check audio permission
|
||||
if (audio == true) {
|
||||
if (ContextCompat.checkSelfPermission(context, Manifest.permission.RECORD_AUDIO) != PackageManager.PERMISSION_GRANTED) {
|
||||
throw MicrophonePermissionError()
|
||||
}
|
||||
}
|
||||
|
||||
if (options.hasKey("flash")) {
|
||||
val enableFlash = options.getString("flash") == "on"
|
||||
// overrides current torch mode value to enable flash while recording
|
||||
cameraSession.setTorchMode(enableFlash)
|
||||
}
|
||||
var codec = VideoCodec.H264
|
||||
if (options.hasKey("videoCodec")) {
|
||||
codec = VideoCodec.fromUnionValue(options.getString("videoCodec"))
|
||||
}
|
||||
var fileType = VideoFileType.MP4
|
||||
if (options.hasKey("fileType")) {
|
||||
fileType = VideoFileType.fromUnionValue(options.getString("fileType"))
|
||||
}
|
||||
|
||||
val callback = { video: RecordingSession.Video ->
|
||||
val map = Arguments.createMap()
|
||||
map.putString("path", video.path)
|
||||
map.putDouble("duration", video.durationMs.toDouble() / 1000.0)
|
||||
onRecordCallback(map, null)
|
||||
}
|
||||
val onError = { error: RecorderError ->
|
||||
val errorMap = makeErrorMap(error.code, error.message)
|
||||
onRecordCallback(null, errorMap)
|
||||
}
|
||||
cameraSession.startRecording(audio == true, codec, fileType, callback, onError)
|
||||
}
|
||||
|
||||
@SuppressLint("RestrictedApi")
|
||||
suspend fun CameraView.pauseRecording() {
|
||||
cameraSession.pauseRecording()
|
||||
}
|
||||
|
||||
@SuppressLint("RestrictedApi")
|
||||
suspend fun CameraView.resumeRecording() {
|
||||
cameraSession.resumeRecording()
|
||||
}
|
||||
|
||||
@SuppressLint("RestrictedApi")
|
||||
suspend fun CameraView.stopRecording() {
|
||||
cameraSession.stopRecording()
|
||||
cameraSession.setTorchMode(torch == Torch.ON)
|
||||
}
|
@@ -0,0 +1,115 @@
|
||||
package com.mrousavy.camera
|
||||
|
||||
import android.annotation.SuppressLint
|
||||
import android.content.Context
|
||||
import android.graphics.Bitmap
|
||||
import android.graphics.BitmapFactory
|
||||
import android.graphics.ImageFormat
|
||||
import android.graphics.Matrix
|
||||
import android.hardware.camera2.*
|
||||
import android.util.Log
|
||||
import com.facebook.react.bridge.Arguments
|
||||
import com.facebook.react.bridge.ReadableMap
|
||||
import com.facebook.react.bridge.WritableMap
|
||||
import com.mrousavy.camera.core.CameraSession
|
||||
import com.mrousavy.camera.parsers.Flash
|
||||
import com.mrousavy.camera.parsers.QualityPrioritization
|
||||
import com.mrousavy.camera.utils.*
|
||||
import kotlinx.coroutines.*
|
||||
import java.io.File
|
||||
import java.io.FileOutputStream
|
||||
import java.io.OutputStream
|
||||
|
||||
private const val TAG = "CameraView.takePhoto"
|
||||
|
||||
@SuppressLint("UnsafeOptInUsageError")
|
||||
suspend fun CameraView.takePhoto(optionsMap: ReadableMap): WritableMap {
|
||||
val options = optionsMap.toHashMap()
|
||||
Log.i(TAG, "Taking photo... Options: $options")
|
||||
|
||||
val qualityPrioritization = options["qualityPrioritization"] as? String ?: "balanced"
|
||||
val flash = options["flash"] as? String ?: "off"
|
||||
val enableAutoRedEyeReduction = options["enableAutoRedEyeReduction"] == true
|
||||
val enableAutoStabilization = options["enableAutoStabilization"] == true
|
||||
val enableShutterSound = options["enableShutterSound"] as? Boolean ?: true
|
||||
|
||||
val flashMode = Flash.fromUnionValue(flash)
|
||||
val qualityPrioritizationMode = QualityPrioritization.fromUnionValue(qualityPrioritization)
|
||||
|
||||
val photo = cameraSession.takePhoto(qualityPrioritizationMode,
|
||||
flashMode,
|
||||
enableShutterSound,
|
||||
enableAutoRedEyeReduction,
|
||||
enableAutoStabilization,
|
||||
outputOrientation)
|
||||
|
||||
photo.use {
|
||||
Log.i(TAG, "Successfully captured ${photo.image.width} x ${photo.image.height} photo!")
|
||||
|
||||
val cameraCharacteristics = cameraManager.getCameraCharacteristics(cameraId!!)
|
||||
|
||||
val path = savePhotoToFile(context, cameraCharacteristics, photo)
|
||||
|
||||
Log.i(TAG, "Successfully saved photo to file! $path")
|
||||
|
||||
val map = Arguments.createMap()
|
||||
map.putString("path", path)
|
||||
map.putInt("width", photo.image.width)
|
||||
map.putInt("height", photo.image.height)
|
||||
map.putString("orientation", photo.orientation.unionValue)
|
||||
map.putBoolean("isRawPhoto", photo.format == ImageFormat.RAW_SENSOR)
|
||||
map.putBoolean("isMirrored", photo.isMirrored)
|
||||
|
||||
return map
|
||||
}
|
||||
}
|
||||
|
||||
private fun writeImageToStream(imageBytes: ByteArray, stream: OutputStream, isMirrored: Boolean) {
|
||||
if (isMirrored) {
|
||||
val bitmap = BitmapFactory.decodeByteArray(imageBytes, 0, imageBytes.size)
|
||||
val matrix = Matrix()
|
||||
matrix.preScale(-1f, 1f)
|
||||
val processedBitmap = Bitmap.createBitmap(bitmap, 0, 0, bitmap.width, bitmap.height, matrix, false)
|
||||
processedBitmap.compress(Bitmap.CompressFormat.JPEG, 100, stream)
|
||||
} else {
|
||||
stream.write(imageBytes)
|
||||
}
|
||||
}
|
||||
|
||||
private suspend fun savePhotoToFile(context: Context,
|
||||
cameraCharacteristics: CameraCharacteristics,
|
||||
photo: CameraSession.CapturedPhoto): String {
|
||||
return withContext(Dispatchers.IO) {
|
||||
when (photo.format) {
|
||||
// When the format is JPEG or DEPTH JPEG we can simply save the bytes as-is
|
||||
ImageFormat.JPEG, ImageFormat.DEPTH_JPEG -> {
|
||||
val buffer = photo.image.planes[0].buffer
|
||||
val bytes = ByteArray(buffer.remaining()).apply { buffer.get(this) }
|
||||
val file = createFile(context, ".jpg")
|
||||
FileOutputStream(file).use { stream ->
|
||||
writeImageToStream(bytes, stream, photo.isMirrored)
|
||||
}
|
||||
return@withContext file.absolutePath
|
||||
}
|
||||
|
||||
// When the format is RAW we use the DngCreator utility library
|
||||
ImageFormat.RAW_SENSOR -> {
|
||||
val dngCreator = DngCreator(cameraCharacteristics, photo.metadata)
|
||||
val file = createFile(context, ".dng")
|
||||
FileOutputStream(file).use { stream ->
|
||||
// TODO: Make sure orientation is loaded properly here?
|
||||
dngCreator.writeImage(stream, photo.image)
|
||||
}
|
||||
return@withContext file.absolutePath
|
||||
}
|
||||
|
||||
else -> {
|
||||
throw Error("Failed to save Photo to file, image format is not supported! ${photo.format}")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private fun createFile(context: Context, extension: String): File {
|
||||
return File.createTempFile("mrousavy", extension, context.cacheDir).apply { deleteOnExit() }
|
||||
}
|
247
package/android/src/main/java/com/mrousavy/camera/CameraView.kt
Normal file
247
package/android/src/main/java/com/mrousavy/camera/CameraView.kt
Normal file
@@ -0,0 +1,247 @@
|
||||
package com.mrousavy.camera
|
||||
|
||||
import android.Manifest
|
||||
import android.annotation.SuppressLint
|
||||
import android.content.Context
|
||||
import android.content.pm.PackageManager
|
||||
import android.content.res.Configuration
|
||||
import android.hardware.camera2.CameraManager
|
||||
import android.util.Log
|
||||
import android.util.Size
|
||||
import android.view.ScaleGestureDetector
|
||||
import android.view.Surface
|
||||
import android.view.View
|
||||
import android.widget.FrameLayout
|
||||
import androidx.core.content.ContextCompat
|
||||
import com.facebook.react.bridge.ReadableMap
|
||||
import com.mrousavy.camera.core.CameraSession
|
||||
import com.mrousavy.camera.core.PreviewView
|
||||
import com.mrousavy.camera.extensions.containsAny
|
||||
import com.mrousavy.camera.extensions.installHierarchyFitter
|
||||
import com.mrousavy.camera.frameprocessor.FrameProcessor
|
||||
import com.mrousavy.camera.parsers.Orientation
|
||||
import com.mrousavy.camera.parsers.PixelFormat
|
||||
import com.mrousavy.camera.parsers.Torch
|
||||
import com.mrousavy.camera.parsers.VideoStabilizationMode
|
||||
import com.mrousavy.camera.core.outputs.CameraOutputs
|
||||
import kotlinx.coroutines.CoroutineScope
|
||||
import kotlinx.coroutines.Dispatchers
|
||||
import kotlinx.coroutines.launch
|
||||
|
||||
//
|
||||
// TODOs for the CameraView which are currently too hard to implement either because of CameraX' limitations, or my brain capacity.
|
||||
//
|
||||
// CameraView
|
||||
// TODO: High-speed video recordings (export in CameraViewModule::getAvailableVideoDevices(), and set in CameraView::configurePreview()) (120FPS+)
|
||||
|
||||
// CameraView+RecordVideo
|
||||
// TODO: Better startRecording()/stopRecording() (promise + callback, wait for TurboModules/JSI)
|
||||
|
||||
// CameraView+TakePhoto
|
||||
// TODO: takePhoto() depth data
|
||||
// TODO: takePhoto() raw capture
|
||||
// TODO: takePhoto() return with jsi::Value Image reference for faster capture
|
||||
|
||||
@SuppressLint("ClickableViewAccessibility", "ViewConstructor", "MissingPermission")
|
||||
class CameraView(context: Context) : FrameLayout(context) {
|
||||
companion object {
|
||||
const val TAG = "CameraView"
|
||||
|
||||
private val propsThatRequirePreviewReconfiguration = arrayListOf("cameraId")
|
||||
private val propsThatRequireSessionReconfiguration = arrayListOf("cameraId", "format", "photo", "video", "enableFrameProcessor", "pixelFormat")
|
||||
private val propsThatRequireFormatReconfiguration = arrayListOf("fps", "hdr", "videoStabilizationMode", "lowLightBoost")
|
||||
}
|
||||
|
||||
// react properties
|
||||
// props that require reconfiguring
|
||||
var cameraId: String? = null
|
||||
var enableDepthData = false
|
||||
var enableHighQualityPhotos: Boolean? = null
|
||||
var enablePortraitEffectsMatteDelivery = false
|
||||
// use-cases
|
||||
var photo: Boolean? = null
|
||||
var video: Boolean? = null
|
||||
var audio: Boolean? = null
|
||||
var enableFrameProcessor = false
|
||||
var pixelFormat: PixelFormat = PixelFormat.NATIVE
|
||||
// props that require format reconfiguring
|
||||
var format: ReadableMap? = null
|
||||
var fps: Int? = null
|
||||
var videoStabilizationMode: VideoStabilizationMode? = null
|
||||
var hdr: Boolean? = null // nullable bool
|
||||
var lowLightBoost: Boolean? = null // nullable bool
|
||||
// other props
|
||||
var isActive = false
|
||||
var torch: Torch = Torch.OFF
|
||||
var zoom: Float = 1f // in "factor"
|
||||
var orientation: Orientation? = null
|
||||
var enableZoomGesture: Boolean = false
|
||||
|
||||
// private properties
|
||||
private var isMounted = false
|
||||
internal val cameraManager = context.getSystemService(Context.CAMERA_SERVICE) as CameraManager
|
||||
|
||||
// session
|
||||
internal val cameraSession: CameraSession
|
||||
private var previewView: View? = null
|
||||
private var previewSurface: Surface? = null
|
||||
|
||||
internal var frameProcessor: FrameProcessor? = null
|
||||
set(value) {
|
||||
field = value
|
||||
cameraSession.frameProcessor = frameProcessor
|
||||
}
|
||||
|
||||
private val inputOrientation: Orientation
|
||||
get() = cameraSession.orientation
|
||||
internal val outputOrientation: Orientation
|
||||
get() = orientation ?: inputOrientation
|
||||
|
||||
init {
|
||||
this.installHierarchyFitter()
|
||||
setupPreviewView()
|
||||
cameraSession = CameraSession(context, cameraManager, { invokeOnInitialized() }, { error -> invokeOnError(error) })
|
||||
}
|
||||
|
||||
override fun onConfigurationChanged(newConfig: Configuration?) {
|
||||
super.onConfigurationChanged(newConfig)
|
||||
// TODO: updateOrientation()
|
||||
}
|
||||
|
||||
override fun onAttachedToWindow() {
|
||||
super.onAttachedToWindow()
|
||||
if (!isMounted) {
|
||||
isMounted = true
|
||||
invokeOnViewReady()
|
||||
}
|
||||
updateLifecycle()
|
||||
}
|
||||
|
||||
override fun onDetachedFromWindow() {
|
||||
super.onDetachedFromWindow()
|
||||
updateLifecycle()
|
||||
}
|
||||
|
||||
private fun setupPreviewView() {
|
||||
removeView(previewView)
|
||||
this.previewSurface = null
|
||||
|
||||
val cameraId = cameraId ?: return
|
||||
val previewView = PreviewView(context, cameraManager, cameraId) { surface ->
|
||||
previewSurface = surface
|
||||
configureSession()
|
||||
}
|
||||
previewView.layoutParams = LayoutParams(LayoutParams.MATCH_PARENT, LayoutParams.MATCH_PARENT)
|
||||
addView(previewView)
|
||||
this.previewView = previewView
|
||||
}
|
||||
|
||||
fun update(changedProps: ArrayList<String>) {
|
||||
Log.i(TAG, "Props changed: $changedProps")
|
||||
try {
|
||||
val shouldReconfigurePreview = changedProps.containsAny(propsThatRequirePreviewReconfiguration)
|
||||
val shouldReconfigureSession = shouldReconfigurePreview || changedProps.containsAny(propsThatRequireSessionReconfiguration)
|
||||
val shouldReconfigureFormat = shouldReconfigureSession || changedProps.containsAny(propsThatRequireFormatReconfiguration)
|
||||
val shouldReconfigureZoom = shouldReconfigureSession || changedProps.contains("zoom")
|
||||
val shouldReconfigureTorch = shouldReconfigureSession || changedProps.contains("torch")
|
||||
val shouldUpdateOrientation = /* TODO: When should we reconfigure this? */ shouldReconfigureSession || changedProps.contains("orientation")
|
||||
val shouldCheckActive = shouldReconfigureFormat || changedProps.contains("isActive")
|
||||
val shouldReconfigureZoomGesture = changedProps.contains("enableZoomGesture")
|
||||
|
||||
if (shouldReconfigurePreview) {
|
||||
setupPreviewView()
|
||||
}
|
||||
if (shouldReconfigureSession) {
|
||||
configureSession()
|
||||
}
|
||||
if (shouldReconfigureFormat) {
|
||||
configureFormat()
|
||||
}
|
||||
if (shouldCheckActive) {
|
||||
updateLifecycle()
|
||||
}
|
||||
|
||||
if (shouldReconfigureZoom) {
|
||||
updateZoom()
|
||||
}
|
||||
if (shouldReconfigureTorch) {
|
||||
updateTorch()
|
||||
}
|
||||
if (shouldUpdateOrientation) {
|
||||
// TODO: updateOrientation()
|
||||
}
|
||||
if (shouldReconfigureZoomGesture) {
|
||||
updateZoomGesture()
|
||||
}
|
||||
} catch (e: Throwable) {
|
||||
Log.e(TAG, "update() threw: ${e.message}")
|
||||
invokeOnError(e)
|
||||
}
|
||||
}
|
||||
|
||||
private fun configureSession() {
|
||||
try {
|
||||
Log.i(TAG, "Configuring Camera Device...")
|
||||
|
||||
if (ContextCompat.checkSelfPermission(context, Manifest.permission.CAMERA) != PackageManager.PERMISSION_GRANTED) {
|
||||
throw CameraPermissionError()
|
||||
}
|
||||
val cameraId = cameraId ?: throw NoCameraDeviceError()
|
||||
|
||||
val format = format
|
||||
val targetVideoSize = if (format != null) Size(format.getInt("videoWidth"), format.getInt("videoHeight")) else null
|
||||
val targetPhotoSize = if (format != null) Size(format.getInt("photoWidth"), format.getInt("photoHeight")) else null
|
||||
// TODO: Allow previewSurface to be null/none
|
||||
val previewSurface = previewSurface ?: return
|
||||
|
||||
val previewOutput = CameraOutputs.PreviewOutput(previewSurface)
|
||||
val photoOutput = if (photo == true) {
|
||||
CameraOutputs.PhotoOutput(targetPhotoSize)
|
||||
} else null
|
||||
val videoOutput = if (video == true || enableFrameProcessor) {
|
||||
CameraOutputs.VideoOutput(targetVideoSize, video == true, enableFrameProcessor, pixelFormat.toImageFormat())
|
||||
} else null
|
||||
|
||||
cameraSession.configureSession(cameraId, previewOutput, photoOutput, videoOutput)
|
||||
} catch (e: Throwable) {
|
||||
Log.e(TAG, "Failed to configure session: ${e.message}", e)
|
||||
invokeOnError(e)
|
||||
}
|
||||
}
|
||||
|
||||
private fun configureFormat() {
|
||||
cameraSession.configureFormat(fps, videoStabilizationMode, hdr, lowLightBoost)
|
||||
}
|
||||
|
||||
private fun updateLifecycle() {
|
||||
cameraSession.setIsActive(isActive && isAttachedToWindow)
|
||||
}
|
||||
|
||||
private fun updateZoom() {
|
||||
cameraSession.setZoom(zoom)
|
||||
}
|
||||
|
||||
private fun updateTorch() {
|
||||
CoroutineScope(Dispatchers.Default).launch {
|
||||
cameraSession.setTorchMode(torch == Torch.ON)
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressLint("ClickableViewAccessibility")
|
||||
private fun updateZoomGesture() {
|
||||
if (enableZoomGesture) {
|
||||
val scaleGestureDetector = ScaleGestureDetector(context, object: ScaleGestureDetector.SimpleOnScaleGestureListener() {
|
||||
override fun onScale(detector: ScaleGestureDetector): Boolean {
|
||||
zoom *= detector.scaleFactor
|
||||
cameraSession.setZoom(zoom)
|
||||
return true
|
||||
}
|
||||
})
|
||||
setOnTouchListener { _, event ->
|
||||
scaleGestureDetector.onTouchEvent(event)
|
||||
}
|
||||
} else {
|
||||
setOnTouchListener(null)
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,192 @@
|
||||
package com.mrousavy.camera
|
||||
|
||||
import com.facebook.react.bridge.ReadableMap
|
||||
import com.facebook.react.common.MapBuilder
|
||||
import com.facebook.react.uimanager.ThemedReactContext
|
||||
import com.facebook.react.uimanager.ViewGroupManager
|
||||
import com.facebook.react.uimanager.annotations.ReactProp
|
||||
import com.mrousavy.camera.parsers.Orientation
|
||||
import com.mrousavy.camera.parsers.PixelFormat
|
||||
import com.mrousavy.camera.parsers.Torch
|
||||
import com.mrousavy.camera.parsers.VideoStabilizationMode
|
||||
|
||||
@Suppress("unused")
|
||||
class CameraViewManager : ViewGroupManager<CameraView>() {
|
||||
|
||||
public override fun createViewInstance(context: ThemedReactContext): CameraView {
|
||||
return CameraView(context)
|
||||
}
|
||||
|
||||
override fun onAfterUpdateTransaction(view: CameraView) {
|
||||
super.onAfterUpdateTransaction(view)
|
||||
val changedProps = cameraViewTransactions[view] ?: ArrayList()
|
||||
view.update(changedProps)
|
||||
cameraViewTransactions.remove(view)
|
||||
}
|
||||
|
||||
override fun getExportedCustomDirectEventTypeConstants(): MutableMap<String, Any>? {
|
||||
return MapBuilder.builder<String, Any>()
|
||||
.put("cameraViewReady", MapBuilder.of("registrationName", "onViewReady"))
|
||||
.put("cameraInitialized", MapBuilder.of("registrationName", "onInitialized"))
|
||||
.put("cameraError", MapBuilder.of("registrationName", "onError"))
|
||||
.build()
|
||||
}
|
||||
|
||||
override fun getName(): String {
|
||||
return TAG
|
||||
}
|
||||
|
||||
@ReactProp(name = "cameraId")
|
||||
fun setCameraId(view: CameraView, cameraId: String) {
|
||||
if (view.cameraId != cameraId)
|
||||
addChangedPropToTransaction(view, "cameraId")
|
||||
view.cameraId = cameraId
|
||||
}
|
||||
|
||||
@ReactProp(name = "photo")
|
||||
fun setPhoto(view: CameraView, photo: Boolean?) {
|
||||
if (view.photo != photo)
|
||||
addChangedPropToTransaction(view, "photo")
|
||||
view.photo = photo
|
||||
}
|
||||
|
||||
@ReactProp(name = "video")
|
||||
fun setVideo(view: CameraView, video: Boolean?) {
|
||||
if (view.video != video)
|
||||
addChangedPropToTransaction(view, "video")
|
||||
view.video = video
|
||||
}
|
||||
|
||||
@ReactProp(name = "audio")
|
||||
fun setAudio(view: CameraView, audio: Boolean?) {
|
||||
if (view.audio != audio)
|
||||
addChangedPropToTransaction(view, "audio")
|
||||
view.audio = audio
|
||||
}
|
||||
|
||||
@ReactProp(name = "enableFrameProcessor")
|
||||
fun setEnableFrameProcessor(view: CameraView, enableFrameProcessor: Boolean) {
|
||||
if (view.enableFrameProcessor != enableFrameProcessor)
|
||||
addChangedPropToTransaction(view, "enableFrameProcessor")
|
||||
view.enableFrameProcessor = enableFrameProcessor
|
||||
}
|
||||
|
||||
@ReactProp(name = "pixelFormat")
|
||||
fun setPixelFormat(view: CameraView, pixelFormat: String?) {
|
||||
val newPixelFormat = PixelFormat.fromUnionValue(pixelFormat)
|
||||
if (view.pixelFormat != newPixelFormat)
|
||||
addChangedPropToTransaction(view, "pixelFormat")
|
||||
view.pixelFormat = newPixelFormat ?: PixelFormat.NATIVE
|
||||
}
|
||||
|
||||
@ReactProp(name = "enableDepthData")
|
||||
fun setEnableDepthData(view: CameraView, enableDepthData: Boolean) {
|
||||
if (view.enableDepthData != enableDepthData)
|
||||
addChangedPropToTransaction(view, "enableDepthData")
|
||||
view.enableDepthData = enableDepthData
|
||||
}
|
||||
|
||||
@ReactProp(name = "enableZoomGesture")
|
||||
fun setEnableZoomGesture(view: CameraView, enableZoomGesture: Boolean) {
|
||||
if (view.enableZoomGesture != enableZoomGesture)
|
||||
addChangedPropToTransaction(view, "enableZoomGesture")
|
||||
view.enableZoomGesture = enableZoomGesture
|
||||
}
|
||||
|
||||
@ReactProp(name = "videoStabilizationMode")
|
||||
fun setVideoStabilizationMode(view: CameraView, videoStabilizationMode: String?) {
|
||||
val newMode = VideoStabilizationMode.fromUnionValue(videoStabilizationMode)
|
||||
if (view.videoStabilizationMode != newMode)
|
||||
addChangedPropToTransaction(view, "videoStabilizationMode")
|
||||
view.videoStabilizationMode = newMode
|
||||
}
|
||||
|
||||
@ReactProp(name = "enableHighQualityPhotos")
|
||||
fun setEnableHighQualityPhotos(view: CameraView, enableHighQualityPhotos: Boolean?) {
|
||||
if (view.enableHighQualityPhotos != enableHighQualityPhotos)
|
||||
addChangedPropToTransaction(view, "enableHighQualityPhotos")
|
||||
view.enableHighQualityPhotos = enableHighQualityPhotos
|
||||
}
|
||||
|
||||
@ReactProp(name = "enablePortraitEffectsMatteDelivery")
|
||||
fun setEnablePortraitEffectsMatteDelivery(view: CameraView, enablePortraitEffectsMatteDelivery: Boolean) {
|
||||
if (view.enablePortraitEffectsMatteDelivery != enablePortraitEffectsMatteDelivery)
|
||||
addChangedPropToTransaction(view, "enablePortraitEffectsMatteDelivery")
|
||||
view.enablePortraitEffectsMatteDelivery = enablePortraitEffectsMatteDelivery
|
||||
}
|
||||
|
||||
@ReactProp(name = "format")
|
||||
fun setFormat(view: CameraView, format: ReadableMap?) {
|
||||
if (view.format != format)
|
||||
addChangedPropToTransaction(view, "format")
|
||||
view.format = format
|
||||
}
|
||||
|
||||
// TODO: Change when TurboModules release.
|
||||
// We're treating -1 as "null" here, because when I make the fps parameter
|
||||
// of type "Int?" the react bridge throws an error.
|
||||
@ReactProp(name = "fps", defaultInt = -1)
|
||||
fun setFps(view: CameraView, fps: Int) {
|
||||
if (view.fps != fps)
|
||||
addChangedPropToTransaction(view, "fps")
|
||||
view.fps = if (fps > 0) fps else null
|
||||
}
|
||||
|
||||
@ReactProp(name = "hdr")
|
||||
fun setHdr(view: CameraView, hdr: Boolean?) {
|
||||
if (view.hdr != hdr)
|
||||
addChangedPropToTransaction(view, "hdr")
|
||||
view.hdr = hdr
|
||||
}
|
||||
|
||||
@ReactProp(name = "lowLightBoost")
|
||||
fun setLowLightBoost(view: CameraView, lowLightBoost: Boolean?) {
|
||||
if (view.lowLightBoost != lowLightBoost)
|
||||
addChangedPropToTransaction(view, "lowLightBoost")
|
||||
view.lowLightBoost = lowLightBoost
|
||||
}
|
||||
|
||||
@ReactProp(name = "isActive")
|
||||
fun setIsActive(view: CameraView, isActive: Boolean) {
|
||||
if (view.isActive != isActive)
|
||||
addChangedPropToTransaction(view, "isActive")
|
||||
view.isActive = isActive
|
||||
}
|
||||
|
||||
@ReactProp(name = "torch")
|
||||
fun setTorch(view: CameraView, torch: String) {
|
||||
val newMode = Torch.fromUnionValue(torch)
|
||||
if (view.torch != newMode)
|
||||
addChangedPropToTransaction(view, "torch")
|
||||
view.torch = newMode
|
||||
}
|
||||
|
||||
@ReactProp(name = "zoom")
|
||||
fun setZoom(view: CameraView, zoom: Double) {
|
||||
val zoomFloat = zoom.toFloat()
|
||||
if (view.zoom != zoomFloat)
|
||||
addChangedPropToTransaction(view, "zoom")
|
||||
view.zoom = zoomFloat
|
||||
}
|
||||
|
||||
@ReactProp(name = "orientation")
|
||||
fun setOrientation(view: CameraView, orientation: String?) {
|
||||
val newMode = Orientation.fromUnionValue(orientation)
|
||||
if (view.orientation != newMode)
|
||||
addChangedPropToTransaction(view, "orientation")
|
||||
view.orientation = newMode
|
||||
}
|
||||
|
||||
companion object {
|
||||
const val TAG = "CameraView"
|
||||
|
||||
val cameraViewTransactions: HashMap<CameraView, ArrayList<String>> = HashMap()
|
||||
|
||||
private fun addChangedPropToTransaction(view: CameraView, changedProp: String) {
|
||||
if (cameraViewTransactions[view] == null) {
|
||||
cameraViewTransactions[view] = ArrayList()
|
||||
}
|
||||
cameraViewTransactions[view]!!.add(changedProp)
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,220 @@
|
||||
package com.mrousavy.camera
|
||||
|
||||
import android.Manifest
|
||||
import android.content.Context
|
||||
import android.content.pm.PackageManager
|
||||
import android.hardware.camera2.CameraManager
|
||||
import android.util.Log
|
||||
import androidx.core.content.ContextCompat
|
||||
import com.facebook.react.bridge.*
|
||||
import com.facebook.react.module.annotations.ReactModule
|
||||
import com.facebook.react.modules.core.PermissionAwareActivity
|
||||
import com.facebook.react.modules.core.PermissionListener
|
||||
import com.facebook.react.uimanager.UIManagerHelper
|
||||
import com.mrousavy.camera.core.CameraDeviceDetails
|
||||
import com.mrousavy.camera.frameprocessor.VisionCameraInstaller
|
||||
import com.mrousavy.camera.frameprocessor.VisionCameraProxy
|
||||
import com.mrousavy.camera.parsers.*
|
||||
import com.mrousavy.camera.utils.*
|
||||
import kotlinx.coroutines.*
|
||||
import kotlin.coroutines.resume
|
||||
import kotlin.coroutines.resumeWithException
|
||||
import kotlin.coroutines.suspendCoroutine
|
||||
|
||||
@ReactModule(name = CameraViewModule.TAG)
|
||||
@Suppress("unused")
|
||||
class CameraViewModule(reactContext: ReactApplicationContext): ReactContextBaseJavaModule(reactContext) {
|
||||
companion object {
|
||||
const val TAG = "CameraView"
|
||||
var RequestCode = 10
|
||||
}
|
||||
|
||||
private val coroutineScope = CoroutineScope(Dispatchers.Default) // TODO: or Dispatchers.Main?
|
||||
|
||||
override fun invalidate() {
|
||||
super.invalidate()
|
||||
if (coroutineScope.isActive) {
|
||||
coroutineScope.cancel("CameraViewModule has been destroyed.")
|
||||
}
|
||||
}
|
||||
|
||||
override fun getName(): String {
|
||||
return TAG
|
||||
}
|
||||
|
||||
private suspend fun findCameraView(viewId: Int): CameraView {
|
||||
return suspendCoroutine { continuation ->
|
||||
UiThreadUtil.runOnUiThread {
|
||||
Log.d(TAG, "Finding view $viewId...")
|
||||
val view = if (reactApplicationContext != null) UIManagerHelper.getUIManager(reactApplicationContext, viewId)?.resolveView(viewId) as CameraView? else null
|
||||
Log.d(TAG, if (reactApplicationContext != null) "Found view $viewId!" else "Couldn't find view $viewId!")
|
||||
if (view != null) continuation.resume(view)
|
||||
else continuation.resumeWithException(ViewNotFoundError(viewId))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ReactMethod(isBlockingSynchronousMethod = true)
|
||||
fun installFrameProcessorBindings(): Boolean {
|
||||
return try {
|
||||
val proxy = VisionCameraProxy(reactApplicationContext)
|
||||
VisionCameraInstaller.install(proxy)
|
||||
true
|
||||
} catch (e: Error) {
|
||||
Log.e(TAG, "Failed to install Frame Processor JSI Bindings!", e)
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
fun takePhoto(viewTag: Int, options: ReadableMap, promise: Promise) {
|
||||
coroutineScope.launch {
|
||||
val view = findCameraView(viewTag)
|
||||
withPromise(promise) {
|
||||
view.takePhoto(options)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: startRecording() cannot be awaited, because I can't have a Promise and a onRecordedCallback in the same function. Hopefully TurboModules allows that
|
||||
@ReactMethod
|
||||
fun startRecording(viewTag: Int, options: ReadableMap, onRecordCallback: Callback) {
|
||||
coroutineScope.launch {
|
||||
val view = findCameraView(viewTag)
|
||||
try {
|
||||
view.startRecording(options, onRecordCallback)
|
||||
} catch (error: CameraError) {
|
||||
val map = makeErrorMap("${error.domain}/${error.id}", error.message, error)
|
||||
onRecordCallback(null, map)
|
||||
} catch (error: Throwable) {
|
||||
val map = makeErrorMap("capture/unknown", "An unknown error occurred while trying to start a video recording! ${error.message}", error)
|
||||
onRecordCallback(null, map)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
fun pauseRecording(viewTag: Int, promise: Promise) {
|
||||
coroutineScope.launch {
|
||||
withPromise(promise) {
|
||||
val view = findCameraView(viewTag)
|
||||
view.pauseRecording()
|
||||
return@withPromise null
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
fun resumeRecording(viewTag: Int, promise: Promise) {
|
||||
coroutineScope.launch {
|
||||
val view = findCameraView(viewTag)
|
||||
withPromise(promise) {
|
||||
view.resumeRecording()
|
||||
return@withPromise null
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
fun stopRecording(viewTag: Int, promise: Promise) {
|
||||
coroutineScope.launch {
|
||||
val view = findCameraView(viewTag)
|
||||
withPromise(promise) {
|
||||
view.stopRecording()
|
||||
return@withPromise null
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
fun focus(viewTag: Int, point: ReadableMap, promise: Promise) {
|
||||
coroutineScope.launch {
|
||||
val view = findCameraView(viewTag)
|
||||
withPromise(promise) {
|
||||
view.focus(point)
|
||||
return@withPromise null
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
fun getAvailableCameraDevices(promise: Promise) {
|
||||
coroutineScope.launch {
|
||||
withPromise(promise) {
|
||||
val manager = reactApplicationContext.getSystemService(Context.CAMERA_SERVICE) as CameraManager
|
||||
|
||||
val devices = Arguments.createArray()
|
||||
manager.cameraIdList.forEach { cameraId ->
|
||||
val device = CameraDeviceDetails(manager, cameraId)
|
||||
devices.pushMap(device.toMap())
|
||||
}
|
||||
promise.resolve(devices)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private fun canRequestPermission(permission: String): Boolean {
|
||||
val activity = currentActivity as? PermissionAwareActivity
|
||||
return activity?.shouldShowRequestPermissionRationale(permission) ?: false
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
fun getCameraPermissionStatus(promise: Promise) {
|
||||
val status = ContextCompat.checkSelfPermission(reactApplicationContext, Manifest.permission.CAMERA)
|
||||
var parsed = PermissionStatus.fromPermissionStatus(status)
|
||||
if (parsed == PermissionStatus.DENIED && canRequestPermission(Manifest.permission.CAMERA)) {
|
||||
parsed = PermissionStatus.NOT_DETERMINED
|
||||
}
|
||||
promise.resolve(parsed.unionValue)
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
fun getMicrophonePermissionStatus(promise: Promise) {
|
||||
val status = ContextCompat.checkSelfPermission(reactApplicationContext, Manifest.permission.RECORD_AUDIO)
|
||||
var parsed = PermissionStatus.fromPermissionStatus(status)
|
||||
if (parsed == PermissionStatus.DENIED && canRequestPermission(Manifest.permission.RECORD_AUDIO)) {
|
||||
parsed = PermissionStatus.NOT_DETERMINED
|
||||
}
|
||||
promise.resolve(parsed.unionValue)
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
fun requestCameraPermission(promise: Promise) {
|
||||
val activity = reactApplicationContext.currentActivity
|
||||
if (activity is PermissionAwareActivity) {
|
||||
val currentRequestCode = RequestCode++
|
||||
val listener = PermissionListener { requestCode: Int, _: Array<String>, grantResults: IntArray ->
|
||||
if (requestCode == currentRequestCode) {
|
||||
val permissionStatus = if (grantResults.isNotEmpty()) grantResults[0] else PackageManager.PERMISSION_DENIED
|
||||
val parsed = PermissionStatus.fromPermissionStatus(permissionStatus)
|
||||
promise.resolve(parsed.unionValue)
|
||||
return@PermissionListener true
|
||||
}
|
||||
return@PermissionListener false
|
||||
}
|
||||
activity.requestPermissions(arrayOf(Manifest.permission.CAMERA), currentRequestCode, listener)
|
||||
} else {
|
||||
promise.reject("NO_ACTIVITY", "No PermissionAwareActivity was found! Make sure the app has launched before calling this function.")
|
||||
}
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
fun requestMicrophonePermission(promise: Promise) {
|
||||
val activity = reactApplicationContext.currentActivity
|
||||
if (activity is PermissionAwareActivity) {
|
||||
val currentRequestCode = RequestCode++
|
||||
val listener = PermissionListener { requestCode: Int, _: Array<String>, grantResults: IntArray ->
|
||||
if (requestCode == currentRequestCode) {
|
||||
val permissionStatus = if (grantResults.isNotEmpty()) grantResults[0] else PackageManager.PERMISSION_DENIED
|
||||
val parsed = PermissionStatus.fromPermissionStatus(permissionStatus)
|
||||
promise.resolve(parsed.unionValue)
|
||||
return@PermissionListener true
|
||||
}
|
||||
return@PermissionListener false
|
||||
}
|
||||
activity.requestPermissions(arrayOf(Manifest.permission.RECORD_AUDIO), currentRequestCode, listener)
|
||||
} else {
|
||||
promise.reject("NO_ACTIVITY", "No PermissionAwareActivity was found! Make sure the app has launched before calling this function.")
|
||||
}
|
||||
}
|
||||
}
|
67
package/android/src/main/java/com/mrousavy/camera/Errors.kt
Normal file
67
package/android/src/main/java/com/mrousavy/camera/Errors.kt
Normal file
@@ -0,0 +1,67 @@
|
||||
package com.mrousavy.camera
|
||||
|
||||
import com.mrousavy.camera.parsers.CameraDeviceError
|
||||
import com.mrousavy.camera.core.outputs.CameraOutputs
|
||||
|
||||
abstract class CameraError(
|
||||
/**
|
||||
* The domain of the error. Error domains are used to group errors.
|
||||
*
|
||||
* Example: "permission"
|
||||
*/
|
||||
val domain: String,
|
||||
/**
|
||||
* The id of the error. Errors are uniquely identified under a given domain.
|
||||
*
|
||||
* Example: "microphone-permission-denied"
|
||||
*/
|
||||
val id: String,
|
||||
/**
|
||||
* A detailed error description of "what went wrong".
|
||||
*
|
||||
* Example: "The microphone permission was denied!"
|
||||
*/
|
||||
message: String,
|
||||
/**
|
||||
* A throwable that caused this error.
|
||||
*/
|
||||
cause: Throwable? = null
|
||||
) : Throwable("[$domain/$id] $message", cause)
|
||||
|
||||
val CameraError.code: String
|
||||
get() = "$domain/$id"
|
||||
|
||||
class MicrophonePermissionError : CameraError("permission", "microphone-permission-denied", "The Microphone permission was denied! If you want to record Video without sound, pass `audio={false}`.")
|
||||
class CameraPermissionError : CameraError("permission", "camera-permission-denied", "The Camera permission was denied!")
|
||||
|
||||
class InvalidTypeScriptUnionError(unionName: String, unionValue: String) : CameraError("parameter", "invalid-parameter", "The given value for $unionName could not be parsed! (Received: $unionValue)")
|
||||
|
||||
class NoCameraDeviceError : CameraError("device", "no-device", "No device was set! Use `getAvailableCameraDevices()` to select a suitable Camera device.")
|
||||
class NoFlashAvailableError : CameraError("device", "flash-unavailable", "The Camera Device does not have a flash unit! Make sure you select a device where `hasFlash`/`hasTorch` is true!")
|
||||
class PixelFormatNotSupportedError(format: String) : CameraError("device", "pixel-format-not-supported", "The pixelFormat $format is not supported on the given Camera Device!")
|
||||
|
||||
class HdrNotContainedInFormatError : CameraError(
|
||||
"format", "invalid-hdr",
|
||||
"The currently selected format does not support HDR capture! " +
|
||||
"Make sure you select a format which includes `supportsPhotoHDR`!"
|
||||
)
|
||||
|
||||
class CameraNotReadyError : CameraError("session", "camera-not-ready", "The Camera is not ready yet! Wait for the onInitialized() callback!")
|
||||
class CameraCannotBeOpenedError(cameraId: String, error: CameraDeviceError) : CameraError("session", "camera-cannot-be-opened", "The given Camera device (id: $cameraId) could not be opened! Error: $error")
|
||||
class CameraSessionCannotBeConfiguredError(cameraId: String, outputs: CameraOutputs) : CameraError("session", "cannot-create-session", "Failed to create a Camera Session for Camera $cameraId! Outputs: $outputs")
|
||||
class CameraDisconnectedError(cameraId: String, error: CameraDeviceError) : CameraError("session", "camera-has-been-disconnected", "The given Camera device (id: $cameraId) has been disconnected! Error: $error")
|
||||
|
||||
class VideoNotEnabledError : CameraError("capture", "video-not-enabled", "Video capture is disabled! Pass `video={true}` to enable video recordings.")
|
||||
class PhotoNotEnabledError : CameraError("capture", "photo-not-enabled", "Photo capture is disabled! Pass `photo={true}` to enable photo capture.")
|
||||
class CaptureAbortedError(wasImageCaptured: Boolean) : CameraError("capture", "aborted", "The image capture was aborted! Was Image captured: $wasImageCaptured")
|
||||
class UnknownCaptureError(wasImageCaptured: Boolean) : CameraError("capture", "unknown", "An unknown error occurred while trying to capture an Image! Was Image captured: $wasImageCaptured")
|
||||
|
||||
class RecorderError(name: String, extra: Int) : CameraError("capture", "recorder-error", "An error occured while recording a video! $name $extra")
|
||||
|
||||
class NoRecordingInProgressError : CameraError("capture", "no-recording-in-progress", "There was no active video recording in progress! Did you call stopRecording() twice?")
|
||||
class RecordingInProgressError : CameraError("capture", "recording-in-progress", "There is already an active video recording in progress! Did you call startRecording() twice?")
|
||||
|
||||
class ViewNotFoundError(viewId: Int) : CameraError("system", "view-not-found", "The given view (ID $viewId) was not found in the view manager.")
|
||||
|
||||
class UnknownCameraError(cause: Throwable?) : CameraError("unknown", "unknown", cause?.message ?: "An unknown camera error occured.", cause)
|
||||
|
@@ -0,0 +1,243 @@
|
||||
package com.mrousavy.camera.core
|
||||
|
||||
import android.graphics.ImageFormat
|
||||
import android.hardware.camera2.CameraCharacteristics
|
||||
import android.hardware.camera2.CameraManager
|
||||
import android.hardware.camera2.CameraMetadata
|
||||
import android.hardware.camera2.params.DynamicRangeProfiles
|
||||
import android.os.Build
|
||||
import android.util.Range
|
||||
import android.util.Size
|
||||
import com.facebook.react.bridge.Arguments
|
||||
import com.facebook.react.bridge.ReadableArray
|
||||
import com.facebook.react.bridge.ReadableMap
|
||||
import com.mrousavy.camera.extensions.bigger
|
||||
import com.mrousavy.camera.extensions.getPhotoSizes
|
||||
import com.mrousavy.camera.extensions.getVideoSizes
|
||||
import com.mrousavy.camera.parsers.PixelFormat
|
||||
import com.mrousavy.camera.parsers.HardwareLevel
|
||||
import com.mrousavy.camera.parsers.LensFacing
|
||||
import com.mrousavy.camera.parsers.Orientation
|
||||
import com.mrousavy.camera.parsers.VideoStabilizationMode
|
||||
import kotlin.math.PI
|
||||
import kotlin.math.atan
|
||||
|
||||
class CameraDeviceDetails(private val cameraManager: CameraManager, private val cameraId: String) {
|
||||
private val characteristics = cameraManager.getCameraCharacteristics(cameraId)
|
||||
private val hardwareLevel = HardwareLevel.fromCameraCharacteristics(characteristics)
|
||||
private val capabilities = characteristics.get(CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES) ?: IntArray(0)
|
||||
private val extensions = getSupportedExtensions()
|
||||
|
||||
// device characteristics
|
||||
private val isMultiCam = capabilities.contains(11 /* TODO: CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA */)
|
||||
private val supportsDepthCapture = capabilities.contains(8 /* TODO: CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES_DEPTH_OUTPUT */)
|
||||
private val supportsRawCapture = capabilities.contains(CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES_RAW)
|
||||
private val supportsLowLightBoost = extensions.contains(4 /* TODO: CameraExtensionCharacteristics.EXTENSION_NIGHT */)
|
||||
private val lensFacing = LensFacing.fromCameraCharacteristics(characteristics)
|
||||
private val hasFlash = characteristics.get(CameraCharacteristics.FLASH_INFO_AVAILABLE) ?: false
|
||||
private val focalLengths = characteristics.get(CameraCharacteristics.LENS_INFO_AVAILABLE_FOCAL_LENGTHS) ?: floatArrayOf(35f /* 35mm default */)
|
||||
private val sensorSize = characteristics.get(CameraCharacteristics.SENSOR_INFO_PHYSICAL_SIZE)!!
|
||||
private val sensorOrientation = characteristics.get(CameraCharacteristics.SENSOR_ORIENTATION)!!
|
||||
private val name = (if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.P) characteristics.get(CameraCharacteristics.INFO_VERSION)
|
||||
else null) ?: "$lensFacing (${cameraId})"
|
||||
|
||||
// "formats" (all possible configurations for this device)
|
||||
private val zoomRange = (if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.R) characteristics.get(CameraCharacteristics.CONTROL_ZOOM_RATIO_RANGE)
|
||||
else null) ?: Range(1f, characteristics.get(CameraCharacteristics.SCALER_AVAILABLE_MAX_DIGITAL_ZOOM) ?: 1f)
|
||||
private val minZoom = zoomRange.lower.toDouble()
|
||||
private val maxZoom = zoomRange.upper.toDouble()
|
||||
|
||||
private val cameraConfig = characteristics.get(CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP)!!
|
||||
private val isoRange = characteristics.get(CameraCharacteristics.SENSOR_INFO_SENSITIVITY_RANGE) ?: Range(0, 0)
|
||||
private val digitalStabilizationModes = characteristics.get(CameraCharacteristics.CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES) ?: IntArray(0)
|
||||
private val opticalStabilizationModes = characteristics.get(CameraCharacteristics.LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION) ?: IntArray(0)
|
||||
private val supportsPhotoHdr = extensions.contains(3 /* TODO: CameraExtensionCharacteristics.EXTENSION_HDR */)
|
||||
private val supportsVideoHdr = getHasVideoHdr()
|
||||
|
||||
private val videoFormat = ImageFormat.YUV_420_888
|
||||
|
||||
// get extensions (HDR, Night Mode, ..)
|
||||
private fun getSupportedExtensions(): List<Int> {
|
||||
return if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.S) {
|
||||
val extensions = cameraManager.getCameraExtensionCharacteristics(cameraId)
|
||||
extensions.supportedExtensions
|
||||
} else {
|
||||
emptyList()
|
||||
}
|
||||
}
|
||||
|
||||
private fun getHasVideoHdr(): Boolean {
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.TIRAMISU) {
|
||||
if (capabilities.contains(CameraMetadata.REQUEST_AVAILABLE_CAPABILITIES_DYNAMIC_RANGE_TEN_BIT)) {
|
||||
val availableProfiles = characteristics.get(CameraCharacteristics.REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES)
|
||||
?: DynamicRangeProfiles(LongArray(0))
|
||||
return availableProfiles.supportedProfiles.contains(DynamicRangeProfiles.HLG10)
|
||||
|| availableProfiles.supportedProfiles.contains(DynamicRangeProfiles.HDR10)
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
private fun createStabilizationModes(): ReadableArray {
|
||||
val array = Arguments.createArray()
|
||||
digitalStabilizationModes.forEach { videoStabilizationMode ->
|
||||
val mode = VideoStabilizationMode.fromDigitalVideoStabilizationMode(videoStabilizationMode)
|
||||
array.pushString(mode.unionValue)
|
||||
}
|
||||
opticalStabilizationModes.forEach { videoStabilizationMode ->
|
||||
val mode = VideoStabilizationMode.fromOpticalVideoStabilizationMode(videoStabilizationMode)
|
||||
array.pushString(mode.unionValue)
|
||||
}
|
||||
return array
|
||||
}
|
||||
|
||||
|
||||
// 35mm is 135 film format, a standard in which focal lengths are usually measured
|
||||
private val size35mm = Size(36, 24)
|
||||
|
||||
private fun getDeviceTypes(): ReadableArray {
|
||||
// TODO: Check if getDeviceType() works correctly, even for logical multi-cameras
|
||||
|
||||
// To get valid focal length standards we have to upscale to the 35mm measurement (film standard)
|
||||
val cropFactor = size35mm.bigger / sensorSize.bigger
|
||||
|
||||
val deviceTypes = Arguments.createArray()
|
||||
|
||||
// https://en.wikipedia.org/wiki/Telephoto_lens
|
||||
val containsTelephoto = focalLengths.any { l -> (l * cropFactor) > 35 } // TODO: Telephoto lenses are > 85mm, but we don't have anything between that range..
|
||||
// val containsNormalLens = focalLengths.any { l -> (l * cropFactor) > 35 && (l * cropFactor) <= 55 }
|
||||
// https://en.wikipedia.org/wiki/Wide-angle_lens
|
||||
val containsWideAngle = focalLengths.any { l -> (l * cropFactor) >= 24 && (l * cropFactor) <= 35 }
|
||||
// https://en.wikipedia.org/wiki/Ultra_wide_angle_lens
|
||||
val containsUltraWideAngle = focalLengths.any { l -> (l * cropFactor) < 24 }
|
||||
|
||||
if (containsTelephoto)
|
||||
deviceTypes.pushString("telephoto-camera")
|
||||
if (containsWideAngle)
|
||||
deviceTypes.pushString("wide-angle-camera")
|
||||
if (containsUltraWideAngle)
|
||||
deviceTypes.pushString("ultra-wide-angle-camera")
|
||||
|
||||
return deviceTypes
|
||||
}
|
||||
|
||||
private fun getFieldOfView(): Double {
|
||||
return 2 * atan(sensorSize.bigger / (focalLengths[0] * 2)) * (180 / PI)
|
||||
}
|
||||
|
||||
private fun getVideoSizes(): List<Size> {
|
||||
return characteristics.getVideoSizes(cameraId, videoFormat)
|
||||
}
|
||||
private fun getPhotoSizes(): List<Size> {
|
||||
return characteristics.getPhotoSizes(ImageFormat.JPEG)
|
||||
}
|
||||
|
||||
private fun getFormats(): ReadableArray {
|
||||
val array = Arguments.createArray()
|
||||
|
||||
val videoSizes = getVideoSizes()
|
||||
val photoSizes = getPhotoSizes()
|
||||
|
||||
videoSizes.forEach { videoSize ->
|
||||
val frameDuration = cameraConfig.getOutputMinFrameDuration(videoFormat, videoSize)
|
||||
val maxFps = (1.0 / (frameDuration.toDouble() / 1_000_000_000)).toInt()
|
||||
|
||||
photoSizes.forEach { photoSize ->
|
||||
val map = buildFormatMap(photoSize, videoSize, Range(1, maxFps))
|
||||
array.pushMap(map)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Add high-speed video ranges (high-fps / slow-motion)
|
||||
|
||||
return array
|
||||
}
|
||||
|
||||
// Get available pixel formats for the given Size
|
||||
private fun createPixelFormats(size: Size): ReadableArray {
|
||||
val formats = cameraConfig.outputFormats
|
||||
val array = Arguments.createArray()
|
||||
formats.forEach { format ->
|
||||
val sizes = cameraConfig.getOutputSizes(format)
|
||||
val hasSize = sizes.any { it.width == size.width && it.height == size.height }
|
||||
if (hasSize) {
|
||||
array.pushString(PixelFormat.fromImageFormat(format).unionValue)
|
||||
}
|
||||
}
|
||||
return array
|
||||
}
|
||||
|
||||
private fun buildFormatMap(photoSize: Size, videoSize: Size, fpsRange: Range<Int>): ReadableMap {
|
||||
val map = Arguments.createMap()
|
||||
map.putInt("photoHeight", photoSize.height)
|
||||
map.putInt("photoWidth", photoSize.width)
|
||||
map.putInt("videoHeight", videoSize.height)
|
||||
map.putInt("videoWidth", videoSize.width)
|
||||
map.putInt("minISO", isoRange.lower)
|
||||
map.putInt("maxISO", isoRange.upper)
|
||||
map.putInt("minFps", fpsRange.lower)
|
||||
map.putInt("maxFps", fpsRange.upper)
|
||||
map.putDouble("fieldOfView", getFieldOfView())
|
||||
map.putBoolean("supportsVideoHDR", supportsVideoHdr)
|
||||
map.putBoolean("supportsPhotoHDR", supportsPhotoHdr)
|
||||
map.putString("autoFocusSystem", "contrast-detection") // TODO: Is this wrong?
|
||||
map.putArray("videoStabilizationModes", createStabilizationModes())
|
||||
map.putArray("pixelFormats", createPixelFormats(videoSize))
|
||||
return map
|
||||
}
|
||||
|
||||
// convert to React Native JS object (map)
|
||||
fun toMap(): ReadableMap {
|
||||
val map = Arguments.createMap()
|
||||
map.putString("id", cameraId)
|
||||
map.putArray("devices", getDeviceTypes())
|
||||
map.putString("position", lensFacing.unionValue)
|
||||
map.putString("name", name)
|
||||
map.putBoolean("hasFlash", hasFlash)
|
||||
map.putBoolean("hasTorch", hasFlash)
|
||||
map.putBoolean("isMultiCam", isMultiCam)
|
||||
map.putBoolean("supportsRawCapture", supportsRawCapture)
|
||||
map.putBoolean("supportsDepthCapture", supportsDepthCapture)
|
||||
map.putBoolean("supportsLowLightBoost", supportsLowLightBoost)
|
||||
map.putBoolean("supportsFocus", true) // I believe every device here supports focussing
|
||||
map.putDouble("minZoom", minZoom)
|
||||
map.putDouble("maxZoom", maxZoom)
|
||||
map.putDouble("neutralZoom", 1.0) // Zoom is always relative to 1.0 on Android
|
||||
map.putString("hardwareLevel", hardwareLevel.unionValue)
|
||||
map.putString("sensorOrientation", Orientation.fromRotationDegrees(sensorOrientation).unionValue)
|
||||
|
||||
val array = Arguments.createArray()
|
||||
cameraConfig.outputFormats.forEach { f ->
|
||||
val str = when (f) {
|
||||
ImageFormat.YUV_420_888 -> "YUV_420_888"
|
||||
ImageFormat.YUV_422_888 -> "YUV_422_888"
|
||||
ImageFormat.YUV_444_888 -> "YUV_444_888"
|
||||
ImageFormat.JPEG -> "JPEG"
|
||||
ImageFormat.DEPTH16 -> "DEPTH16"
|
||||
ImageFormat.DEPTH_JPEG -> "DEPTH_JPEG"
|
||||
ImageFormat.FLEX_RGBA_8888 -> "FLEX_RGBA_8888"
|
||||
ImageFormat.FLEX_RGB_888 -> "FLEX_RGB_888"
|
||||
ImageFormat.YUY2 -> "YUY2"
|
||||
ImageFormat.Y8 -> "Y8"
|
||||
ImageFormat.YV12 -> "YV12"
|
||||
ImageFormat.HEIC -> "HEIC"
|
||||
ImageFormat.PRIVATE -> "PRIVATE"
|
||||
ImageFormat.RAW_PRIVATE -> "RAW_PRIVATE"
|
||||
ImageFormat.RAW_SENSOR -> "RAW_SENSOR"
|
||||
ImageFormat.RAW10 -> "RAW10"
|
||||
ImageFormat.RAW12 -> "RAW12"
|
||||
ImageFormat.NV16 -> "NV16"
|
||||
ImageFormat.NV21 -> "NV21"
|
||||
ImageFormat.UNKNOWN -> "UNKNOWN"
|
||||
ImageFormat.YCBCR_P010 -> "YCBCR_P010"
|
||||
else -> "unknown ($f)"
|
||||
}
|
||||
array.pushString(str)
|
||||
}
|
||||
map.putArray("pixelFormats", array)
|
||||
|
||||
map.putArray("formats", getFormats())
|
||||
|
||||
return map
|
||||
}
|
||||
}
|
@@ -0,0 +1,563 @@
|
||||
package com.mrousavy.camera.core
|
||||
|
||||
import android.content.Context
|
||||
import android.graphics.Point
|
||||
import android.hardware.camera2.CameraCaptureSession
|
||||
import android.hardware.camera2.CameraCharacteristics
|
||||
import android.hardware.camera2.CameraDevice
|
||||
import android.hardware.camera2.CameraManager
|
||||
import android.hardware.camera2.CameraMetadata
|
||||
import android.hardware.camera2.CaptureRequest
|
||||
import android.hardware.camera2.CaptureResult
|
||||
import android.hardware.camera2.TotalCaptureResult
|
||||
import android.hardware.camera2.params.MeteringRectangle
|
||||
import android.media.Image
|
||||
import android.os.Build
|
||||
import android.util.Log
|
||||
import android.util.Range
|
||||
import android.util.Size
|
||||
import com.mrousavy.camera.CameraNotReadyError
|
||||
import com.mrousavy.camera.CameraQueues
|
||||
import com.mrousavy.camera.CameraView
|
||||
import com.mrousavy.camera.CaptureAbortedError
|
||||
import com.mrousavy.camera.NoRecordingInProgressError
|
||||
import com.mrousavy.camera.PhotoNotEnabledError
|
||||
import com.mrousavy.camera.RecorderError
|
||||
import com.mrousavy.camera.RecordingInProgressError
|
||||
import com.mrousavy.camera.VideoNotEnabledError
|
||||
import com.mrousavy.camera.extensions.SessionType
|
||||
import com.mrousavy.camera.extensions.capture
|
||||
import com.mrousavy.camera.extensions.createCaptureSession
|
||||
import com.mrousavy.camera.extensions.createPhotoCaptureRequest
|
||||
import com.mrousavy.camera.extensions.openCamera
|
||||
import com.mrousavy.camera.extensions.tryClose
|
||||
import com.mrousavy.camera.extensions.zoomed
|
||||
import com.mrousavy.camera.frameprocessor.FrameProcessor
|
||||
import com.mrousavy.camera.parsers.Flash
|
||||
import com.mrousavy.camera.parsers.Orientation
|
||||
import com.mrousavy.camera.parsers.QualityPrioritization
|
||||
import com.mrousavy.camera.parsers.VideoCodec
|
||||
import com.mrousavy.camera.parsers.VideoFileType
|
||||
import com.mrousavy.camera.parsers.VideoStabilizationMode
|
||||
import com.mrousavy.camera.core.outputs.CameraOutputs
|
||||
import kotlinx.coroutines.CoroutineScope
|
||||
import kotlinx.coroutines.launch
|
||||
import kotlinx.coroutines.sync.Mutex
|
||||
import kotlinx.coroutines.sync.withLock
|
||||
import java.io.Closeable
|
||||
import java.util.concurrent.CancellationException
|
||||
import kotlin.coroutines.CoroutineContext
|
||||
|
||||
class CameraSession(private val context: Context,
|
||||
private val cameraManager: CameraManager,
|
||||
private val onInitialized: () -> Unit,
|
||||
private val onError: (e: Throwable) -> Unit): CoroutineScope, Closeable, CameraOutputs.Callback, CameraManager.AvailabilityCallback() {
|
||||
companion object {
|
||||
private const val TAG = "CameraSession"
|
||||
|
||||
// TODO: Samsung advertises 60 FPS but only allows 30 FPS for some reason.
|
||||
private val CAN_SET_FPS = !Build.MANUFACTURER.equals("samsung", true)
|
||||
}
|
||||
|
||||
data class CapturedPhoto(val image: Image,
|
||||
val metadata: TotalCaptureResult,
|
||||
val orientation: Orientation,
|
||||
val isMirrored: Boolean,
|
||||
val format: Int): Closeable {
|
||||
override fun close() {
|
||||
image.close()
|
||||
}
|
||||
}
|
||||
|
||||
// setInput(..)
|
||||
private var cameraId: String? = null
|
||||
|
||||
// setOutputs(..)
|
||||
private var outputs: CameraOutputs? = null
|
||||
|
||||
// setIsActive(..)
|
||||
private var isActive = false
|
||||
|
||||
// configureFormat(..)
|
||||
private var fps: Int? = null
|
||||
private var videoStabilizationMode: VideoStabilizationMode? = null
|
||||
private var lowLightBoost: Boolean? = null
|
||||
private var hdr: Boolean? = null
|
||||
|
||||
// zoom(..)
|
||||
private var zoom: Float = 1.0f
|
||||
|
||||
private var captureSession: CameraCaptureSession? = null
|
||||
private var cameraDevice: CameraDevice? = null
|
||||
private var previewRequest: CaptureRequest.Builder? = null
|
||||
private val photoOutputSynchronizer = PhotoOutputSynchronizer()
|
||||
private val mutex = Mutex()
|
||||
private var isRunning = false
|
||||
private var enableTorch = false
|
||||
// Video Outputs
|
||||
private var recording: RecordingSession? = null
|
||||
set(value) {
|
||||
field = value
|
||||
updateVideoOutputs()
|
||||
}
|
||||
var frameProcessor: FrameProcessor? = null
|
||||
set(value) {
|
||||
field = value
|
||||
updateVideoOutputs()
|
||||
}
|
||||
|
||||
override val coroutineContext: CoroutineContext = CameraQueues.cameraQueue.coroutineDispatcher
|
||||
|
||||
init {
|
||||
cameraManager.registerAvailabilityCallback(this, CameraQueues.cameraQueue.handler)
|
||||
}
|
||||
|
||||
override fun close() {
|
||||
cameraManager.unregisterAvailabilityCallback(this)
|
||||
photoOutputSynchronizer.clear()
|
||||
captureSession?.close()
|
||||
cameraDevice?.tryClose()
|
||||
outputs?.close()
|
||||
isRunning = false
|
||||
}
|
||||
|
||||
val orientation: Orientation
|
||||
get() {
|
||||
val cameraId = cameraId ?: return Orientation.PORTRAIT
|
||||
val characteristics = cameraManager.getCameraCharacteristics(cameraId)
|
||||
val sensorRotation = characteristics.get(CameraCharacteristics.SENSOR_ORIENTATION) ?: 0
|
||||
return Orientation.fromRotationDegrees(sensorRotation)
|
||||
}
|
||||
|
||||
fun configureSession(cameraId: String,
|
||||
preview: CameraOutputs.PreviewOutput? = null,
|
||||
photo: CameraOutputs.PhotoOutput? = null,
|
||||
video: CameraOutputs.VideoOutput? = null) {
|
||||
Log.i(TAG, "Configuring Session for Camera $cameraId...")
|
||||
val outputs = CameraOutputs(cameraId,
|
||||
cameraManager,
|
||||
preview,
|
||||
photo,
|
||||
video,
|
||||
hdr == true,
|
||||
this)
|
||||
if (this.cameraId == cameraId && this.outputs == outputs && isActive == isRunning) {
|
||||
Log.i(TAG, "Nothing changed in configuration, canceling..")
|
||||
}
|
||||
|
||||
// 1. Close previous outputs
|
||||
this.outputs?.close()
|
||||
// 2. Assign new outputs
|
||||
this.outputs = outputs
|
||||
// 3. Update with existing render targets (surfaces)
|
||||
updateVideoOutputs()
|
||||
|
||||
this.cameraId = cameraId
|
||||
launch {
|
||||
startRunning()
|
||||
}
|
||||
}
|
||||
|
||||
fun configureFormat(fps: Int? = null,
|
||||
videoStabilizationMode: VideoStabilizationMode? = null,
|
||||
hdr: Boolean? = null,
|
||||
lowLightBoost: Boolean? = null) {
|
||||
Log.i(TAG, "Setting Format (fps: $fps | videoStabilization: $videoStabilizationMode | hdr: $hdr | lowLightBoost: $lowLightBoost)...")
|
||||
this.fps = fps
|
||||
this.videoStabilizationMode = videoStabilizationMode
|
||||
this.hdr = hdr
|
||||
this.lowLightBoost = lowLightBoost
|
||||
|
||||
var needsReconfiguration = false
|
||||
val currentOutputs = outputs
|
||||
if (currentOutputs != null && currentOutputs.enableHdr != hdr) {
|
||||
// Update existing HDR for Outputs
|
||||
this.outputs = CameraOutputs(currentOutputs.cameraId,
|
||||
cameraManager,
|
||||
currentOutputs.preview,
|
||||
currentOutputs.photo,
|
||||
currentOutputs.video,
|
||||
hdr,
|
||||
this)
|
||||
needsReconfiguration = true
|
||||
}
|
||||
launch {
|
||||
if (needsReconfiguration) startRunning()
|
||||
else updateRepeatingRequest()
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Starts or stops the Camera.
|
||||
*/
|
||||
fun setIsActive(isActive: Boolean) {
|
||||
Log.i(TAG, "Setting isActive: $isActive (isRunning: $isRunning)")
|
||||
this.isActive = isActive
|
||||
if (isActive == isRunning) return
|
||||
|
||||
launch {
|
||||
if (isActive) {
|
||||
startRunning()
|
||||
} else {
|
||||
stopRunning()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private fun updateVideoOutputs() {
|
||||
val videoPipeline = outputs?.videoOutput?.videoPipeline ?: return
|
||||
val previewOutput = outputs?.previewOutput
|
||||
videoPipeline.setRecordingSessionOutput(this.recording)
|
||||
videoPipeline.setFrameProcessorOutput(this.frameProcessor)
|
||||
}
|
||||
|
||||
suspend fun takePhoto(qualityPrioritization: QualityPrioritization,
|
||||
flashMode: Flash,
|
||||
enableShutterSound: Boolean,
|
||||
enableRedEyeReduction: Boolean,
|
||||
enableAutoStabilization: Boolean,
|
||||
outputOrientation: Orientation): CapturedPhoto {
|
||||
val captureSession = captureSession ?: throw CameraNotReadyError()
|
||||
val outputs = outputs ?: throw CameraNotReadyError()
|
||||
|
||||
val photoOutput = outputs.photoOutput ?: throw PhotoNotEnabledError()
|
||||
|
||||
Log.i(TAG, "Photo capture 0/3 - preparing capture request (${photoOutput.size.width}x${photoOutput.size.height})...")
|
||||
|
||||
val cameraCharacteristics = cameraManager.getCameraCharacteristics(captureSession.device.id)
|
||||
val orientation = outputOrientation.toSensorRelativeOrientation(cameraCharacteristics)
|
||||
val captureRequest = captureSession.device.createPhotoCaptureRequest(cameraManager,
|
||||
photoOutput.surface,
|
||||
zoom,
|
||||
qualityPrioritization,
|
||||
flashMode,
|
||||
enableRedEyeReduction,
|
||||
enableAutoStabilization,
|
||||
orientation)
|
||||
Log.i(TAG, "Photo capture 1/3 - starting capture...")
|
||||
val result = captureSession.capture(captureRequest, enableShutterSound)
|
||||
val timestamp = result[CaptureResult.SENSOR_TIMESTAMP]!!
|
||||
Log.i(TAG, "Photo capture 2/3 complete - received metadata with timestamp $timestamp")
|
||||
try {
|
||||
val image = photoOutputSynchronizer.await(timestamp)
|
||||
|
||||
val isMirrored = cameraCharacteristics.get(CameraCharacteristics.LENS_FACING) == CameraCharacteristics.LENS_FACING_FRONT
|
||||
|
||||
Log.i(TAG, "Photo capture 3/3 complete - received ${image.width} x ${image.height} image.")
|
||||
return CapturedPhoto(image, result, orientation, isMirrored, image.format)
|
||||
} catch (e: CancellationException) {
|
||||
throw CaptureAbortedError(false)
|
||||
}
|
||||
}
|
||||
|
||||
override fun onPhotoCaptured(image: Image) {
|
||||
Log.i(CameraView.TAG, "Photo captured! ${image.width} x ${image.height}")
|
||||
photoOutputSynchronizer.set(image.timestamp, image)
|
||||
}
|
||||
|
||||
suspend fun startRecording(enableAudio: Boolean,
|
||||
codec: VideoCodec,
|
||||
fileType: VideoFileType,
|
||||
callback: (video: RecordingSession.Video) -> Unit,
|
||||
onError: (error: RecorderError) -> Unit) {
|
||||
mutex.withLock {
|
||||
if (recording != null) throw RecordingInProgressError()
|
||||
val outputs = outputs ?: throw CameraNotReadyError()
|
||||
val videoOutput = outputs.videoOutput ?: throw VideoNotEnabledError()
|
||||
|
||||
val recording = RecordingSession(context, videoOutput.size, enableAudio, fps, codec, orientation, fileType, callback, onError)
|
||||
recording.start()
|
||||
this.recording = recording
|
||||
}
|
||||
}
|
||||
|
||||
suspend fun stopRecording() {
|
||||
mutex.withLock {
|
||||
val recording = recording ?: throw NoRecordingInProgressError()
|
||||
|
||||
recording.stop()
|
||||
this.recording = null
|
||||
}
|
||||
}
|
||||
|
||||
suspend fun pauseRecording() {
|
||||
mutex.withLock {
|
||||
val recording = recording ?: throw NoRecordingInProgressError()
|
||||
recording.pause()
|
||||
}
|
||||
}
|
||||
|
||||
suspend fun resumeRecording() {
|
||||
mutex.withLock {
|
||||
val recording = recording ?: throw NoRecordingInProgressError()
|
||||
recording.resume()
|
||||
}
|
||||
}
|
||||
|
||||
suspend fun setTorchMode(enableTorch: Boolean) {
|
||||
if (this.enableTorch != enableTorch) {
|
||||
this.enableTorch = enableTorch
|
||||
updateRepeatingRequest()
|
||||
}
|
||||
}
|
||||
|
||||
fun setZoom(zoom: Float) {
|
||||
if (this.zoom != zoom) {
|
||||
this.zoom = zoom
|
||||
launch {
|
||||
updateRepeatingRequest()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
suspend fun focus(x: Int, y: Int) {
|
||||
val captureSession = captureSession ?: throw CameraNotReadyError()
|
||||
val previewOutput = outputs?.previewOutput ?: throw CameraNotReadyError()
|
||||
val characteristics = cameraManager.getCameraCharacteristics(captureSession.device.id)
|
||||
val sensorSize = characteristics.get(CameraCharacteristics.SENSOR_INFO_ACTIVE_ARRAY_SIZE)!!
|
||||
val previewSize = previewOutput.size
|
||||
val pX = x.toDouble() / previewSize.width * sensorSize.height()
|
||||
val pY = y.toDouble() / previewSize.height * sensorSize.width()
|
||||
val point = Point(pX.toInt(), pY.toInt())
|
||||
|
||||
Log.i(TAG, "Focusing (${point.x}, ${point.y})...")
|
||||
focus(point)
|
||||
}
|
||||
|
||||
override fun onCameraAvailable(cameraId: String) {
|
||||
super.onCameraAvailable(cameraId)
|
||||
Log.i(TAG, "Camera became available: $cameraId")
|
||||
}
|
||||
|
||||
override fun onCameraUnavailable(cameraId: String) {
|
||||
super.onCameraUnavailable(cameraId)
|
||||
Log.i(TAG, "Camera became un-available: $cameraId")
|
||||
}
|
||||
|
||||
private suspend fun focus(point: Point) {
|
||||
mutex.withLock {
|
||||
val captureSession = captureSession ?: throw CameraNotReadyError()
|
||||
val request = previewRequest ?: throw CameraNotReadyError()
|
||||
|
||||
val weight = MeteringRectangle.METERING_WEIGHT_MAX - 1
|
||||
val focusAreaTouch = MeteringRectangle(point, Size(150, 150), weight)
|
||||
|
||||
// Quickly pause preview
|
||||
captureSession.stopRepeating()
|
||||
|
||||
request.set(CaptureRequest.CONTROL_AF_TRIGGER, CameraMetadata.CONTROL_AF_TRIGGER_CANCEL)
|
||||
request.set(CaptureRequest.CONTROL_AF_MODE, CaptureRequest.CONTROL_AF_MODE_OFF)
|
||||
captureSession.capture(request.build(), null, null)
|
||||
|
||||
// Add AF trigger with focus region
|
||||
val characteristics = cameraManager.getCameraCharacteristics(captureSession.device.id)
|
||||
val maxSupportedFocusRegions = characteristics.get(CameraCharacteristics.CONTROL_MAX_REGIONS_AE) ?: 0
|
||||
if (maxSupportedFocusRegions >= 1) {
|
||||
request.set(CaptureRequest.CONTROL_AF_REGIONS, arrayOf(focusAreaTouch))
|
||||
}
|
||||
request.set(CaptureRequest.CONTROL_MODE, CameraMetadata.CONTROL_MODE_AUTO)
|
||||
request.set(CaptureRequest.CONTROL_AF_MODE, CaptureRequest.CONTROL_AF_MODE_AUTO)
|
||||
request.set(CaptureRequest.CONTROL_AF_TRIGGER, CameraMetadata.CONTROL_AF_TRIGGER_START)
|
||||
|
||||
captureSession.capture(request.build(), false)
|
||||
|
||||
// Resume preview
|
||||
request.set(CaptureRequest.CONTROL_AF_TRIGGER, CaptureRequest.CONTROL_AF_TRIGGER_IDLE)
|
||||
captureSession.setRepeatingRequest(request.build(), null, null)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Opens a [CameraDevice]. If there already is an open Camera for the given [cameraId], use that.
|
||||
*/
|
||||
private suspend fun getCameraDevice(cameraId: String, onClosed: (error: Throwable) -> Unit): CameraDevice {
|
||||
val currentDevice = cameraDevice
|
||||
if (currentDevice?.id == cameraId) {
|
||||
// We already opened that device
|
||||
return currentDevice
|
||||
}
|
||||
// Close previous device
|
||||
cameraDevice?.tryClose()
|
||||
cameraDevice = null
|
||||
|
||||
val device = cameraManager.openCamera(cameraId, { camera, reason ->
|
||||
Log.d(TAG, "Camera Closed ($cameraDevice == $camera)")
|
||||
if (cameraDevice == camera) {
|
||||
// The current CameraDevice has been closed, handle that!
|
||||
onClosed(reason)
|
||||
cameraDevice = null
|
||||
} else {
|
||||
// A new CameraDevice has been opened, we don't care about this one anymore.
|
||||
}
|
||||
}, CameraQueues.cameraQueue)
|
||||
|
||||
// Cache device in memory
|
||||
cameraDevice = device
|
||||
return device
|
||||
}
|
||||
|
||||
// Caches the result of outputs.hashCode() of the last getCaptureSession call
|
||||
private var lastOutputsHashCode: Int? = null
|
||||
|
||||
private suspend fun getCaptureSession(cameraDevice: CameraDevice,
|
||||
outputs: CameraOutputs,
|
||||
onClosed: () -> Unit): CameraCaptureSession {
|
||||
val currentSession = captureSession
|
||||
if (currentSession?.device == cameraDevice && outputs.hashCode() == lastOutputsHashCode) {
|
||||
// We already opened a CameraCaptureSession on this device
|
||||
return currentSession
|
||||
}
|
||||
captureSession?.close()
|
||||
captureSession = null
|
||||
|
||||
val session = cameraDevice.createCaptureSession(cameraManager, SessionType.REGULAR, outputs, { session ->
|
||||
Log.d(TAG, "Capture Session Closed ($captureSession == $session)")
|
||||
if (captureSession == session) {
|
||||
// The current CameraCaptureSession has been closed, handle that!
|
||||
onClosed()
|
||||
captureSession = null
|
||||
} else {
|
||||
// A new CameraCaptureSession has been opened, we don't care about this one anymore.
|
||||
}
|
||||
}, CameraQueues.cameraQueue)
|
||||
|
||||
// Cache session in memory
|
||||
captureSession = session
|
||||
lastOutputsHashCode = outputs.hashCode()
|
||||
// New session initialized
|
||||
onInitialized()
|
||||
return session
|
||||
}
|
||||
|
||||
private fun getPreviewCaptureRequest(fps: Int? = null,
|
||||
videoStabilizationMode: VideoStabilizationMode? = null,
|
||||
lowLightBoost: Boolean? = null,
|
||||
hdr: Boolean? = null,
|
||||
torch: Boolean? = null): CaptureRequest {
|
||||
val captureRequest = previewRequest ?: throw CameraNotReadyError()
|
||||
|
||||
// FPS
|
||||
val fpsRange = if (fps != null && CAN_SET_FPS) Range(fps, fps) else Range(30, 30)
|
||||
captureRequest.set(CaptureRequest.CONTROL_AE_TARGET_FPS_RANGE, fpsRange)
|
||||
|
||||
// Video Stabilization
|
||||
captureRequest.set(CaptureRequest.CONTROL_VIDEO_STABILIZATION_MODE, videoStabilizationMode?.toDigitalStabilizationMode())
|
||||
captureRequest.set(CaptureRequest.LENS_OPTICAL_STABILIZATION_MODE, videoStabilizationMode?.toOpticalStabilizationMode())
|
||||
|
||||
// Night/HDR Mode
|
||||
val sceneMode = if (hdr == true) CaptureRequest.CONTROL_SCENE_MODE_HDR else if (lowLightBoost == true) CaptureRequest.CONTROL_SCENE_MODE_NIGHT else null
|
||||
captureRequest.set(CaptureRequest.CONTROL_SCENE_MODE, sceneMode)
|
||||
captureRequest.set(CaptureRequest.CONTROL_MODE, if (sceneMode != null) CaptureRequest.CONTROL_MODE_USE_SCENE_MODE else CaptureRequest.CONTROL_MODE_AUTO)
|
||||
|
||||
// Zoom
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.R) {
|
||||
captureRequest.set(CaptureRequest.CONTROL_ZOOM_RATIO, zoom)
|
||||
} else {
|
||||
val cameraCharacteristics = cameraManager.getCameraCharacteristics(cameraId!!)
|
||||
val size = cameraCharacteristics.get(CameraCharacteristics.SENSOR_INFO_ACTIVE_ARRAY_SIZE)!!
|
||||
captureRequest.set(CaptureRequest.SCALER_CROP_REGION, size.zoomed(zoom))
|
||||
}
|
||||
|
||||
// Torch Mode
|
||||
val torchMode = if (torch == true) CaptureRequest.FLASH_MODE_TORCH else CaptureRequest.FLASH_MODE_OFF
|
||||
captureRequest.set(CaptureRequest.FLASH_MODE, torchMode)
|
||||
|
||||
return captureRequest.build()
|
||||
}
|
||||
|
||||
private fun destroy() {
|
||||
Log.i(TAG, "Destroying session..")
|
||||
captureSession?.stopRepeating()
|
||||
captureSession?.close()
|
||||
captureSession = null
|
||||
|
||||
cameraDevice?.close()
|
||||
cameraDevice = null
|
||||
|
||||
isRunning = false
|
||||
}
|
||||
|
||||
private suspend fun startRunning() {
|
||||
isRunning = false
|
||||
val cameraId = cameraId ?: return
|
||||
if (!isActive) return
|
||||
|
||||
Log.i(TAG, "Starting Camera Session...")
|
||||
|
||||
try {
|
||||
mutex.withLock {
|
||||
val outputs = outputs
|
||||
if (outputs == null || outputs.size == 0) {
|
||||
Log.i(TAG, "CameraSession doesn't have any Outputs, canceling..")
|
||||
destroy()
|
||||
return@withLock
|
||||
}
|
||||
|
||||
// 1. Open Camera Device
|
||||
val camera = getCameraDevice(cameraId) { reason ->
|
||||
isRunning = false
|
||||
onError(reason)
|
||||
}
|
||||
|
||||
// 2. Create capture session with outputs
|
||||
val session = getCaptureSession(camera, outputs) {
|
||||
isRunning = false
|
||||
}
|
||||
|
||||
// 3. Create request template
|
||||
val template = if (outputs.videoOutput != null) CameraDevice.TEMPLATE_RECORD else CameraDevice.TEMPLATE_PREVIEW
|
||||
val captureRequest = camera.createCaptureRequest(template)
|
||||
outputs.previewOutput?.let { output ->
|
||||
Log.i(TAG, "Adding output surface ${output.outputType}..")
|
||||
captureRequest.addTarget(output.surface)
|
||||
}
|
||||
outputs.videoOutput?.let { output ->
|
||||
Log.i(TAG, "Adding output surface ${output.outputType}..")
|
||||
captureRequest.addTarget(output.surface)
|
||||
}
|
||||
|
||||
Log.i(TAG, "Camera Session initialized! Starting repeating request..")
|
||||
isRunning = true
|
||||
this.previewRequest = captureRequest
|
||||
this.captureSession = session
|
||||
this.cameraDevice = camera
|
||||
}
|
||||
|
||||
updateRepeatingRequest()
|
||||
} catch (e: IllegalStateException) {
|
||||
Log.e(TAG, "Failed to start Camera Session, this session is already closed.", e)
|
||||
}
|
||||
}
|
||||
|
||||
private suspend fun updateRepeatingRequest() {
|
||||
mutex.withLock {
|
||||
val session = captureSession
|
||||
if (session == null) {
|
||||
// Not yet ready. Start session first, then it will update repeating request.
|
||||
startRunning()
|
||||
return
|
||||
}
|
||||
|
||||
val fps = fps
|
||||
val videoStabilizationMode = videoStabilizationMode
|
||||
val lowLightBoost = lowLightBoost
|
||||
val hdr = hdr
|
||||
|
||||
val repeatingRequest = getPreviewCaptureRequest(fps, videoStabilizationMode, lowLightBoost, hdr)
|
||||
Log.d(TAG, "Setting Repeating Request..")
|
||||
session.setRepeatingRequest(repeatingRequest, null, null)
|
||||
}
|
||||
}
|
||||
|
||||
private suspend fun stopRunning() {
|
||||
Log.i(TAG, "Stopping Camera Session...")
|
||||
try {
|
||||
mutex.withLock {
|
||||
destroy()
|
||||
Log.i(TAG, "Camera Session stopped!")
|
||||
}
|
||||
} catch (e: IllegalStateException) {
|
||||
Log.e(TAG, "Failed to stop Camera Session, this session is already closed.", e)
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,32 @@
|
||||
package com.mrousavy.camera.core;
|
||||
|
||||
import android.media.Image
|
||||
import kotlinx.coroutines.CompletableDeferred
|
||||
|
||||
class PhotoOutputSynchronizer {
|
||||
private val photoOutputQueue = HashMap<Long, CompletableDeferred<Image>>()
|
||||
|
||||
private operator fun get(key: Long): CompletableDeferred<Image> {
|
||||
if (!photoOutputQueue.containsKey(key)) {
|
||||
photoOutputQueue[key] = CompletableDeferred()
|
||||
}
|
||||
return photoOutputQueue[key]!!
|
||||
}
|
||||
|
||||
suspend fun await(timestamp: Long): Image {
|
||||
val image = this[timestamp].await()
|
||||
photoOutputQueue.remove(timestamp)
|
||||
return image
|
||||
}
|
||||
|
||||
fun set(timestamp: Long, image: Image) {
|
||||
this[timestamp].complete(image)
|
||||
}
|
||||
|
||||
fun clear() {
|
||||
photoOutputQueue.forEach {
|
||||
it.value.cancel()
|
||||
}
|
||||
photoOutputQueue.clear()
|
||||
}
|
||||
}
|
@@ -0,0 +1,71 @@
|
||||
package com.mrousavy.camera.core
|
||||
|
||||
import android.annotation.SuppressLint
|
||||
import android.content.Context
|
||||
import android.hardware.camera2.CameraManager
|
||||
import android.util.Log
|
||||
import android.util.Size
|
||||
import android.view.Surface
|
||||
import android.view.SurfaceHolder
|
||||
import android.view.SurfaceView
|
||||
import com.mrousavy.camera.extensions.getPreviewSize
|
||||
import kotlin.math.roundToInt
|
||||
|
||||
@SuppressLint("ViewConstructor")
|
||||
class PreviewView(context: Context,
|
||||
cameraManager: CameraManager,
|
||||
cameraId: String,
|
||||
private val onSurfaceChanged: (surface: Surface?) -> Unit): SurfaceView(context) {
|
||||
private val targetSize: Size
|
||||
private val aspectRatio: Float
|
||||
get() = targetSize.width.toFloat() / targetSize.height.toFloat()
|
||||
|
||||
init {
|
||||
val characteristics = cameraManager.getCameraCharacteristics(cameraId)
|
||||
targetSize = characteristics.getPreviewSize()
|
||||
|
||||
Log.i(TAG, "Using Preview Size ${targetSize.width} x ${targetSize.height}.")
|
||||
holder.setFixedSize(targetSize.width, targetSize.height)
|
||||
holder.addCallback(object: SurfaceHolder.Callback {
|
||||
override fun surfaceCreated(holder: SurfaceHolder) {
|
||||
Log.i(TAG, "Surface created! ${holder.surface}")
|
||||
onSurfaceChanged(holder.surface)
|
||||
}
|
||||
|
||||
override fun surfaceChanged(holder: SurfaceHolder, format: Int, width: Int, height: Int) {
|
||||
Log.i(TAG, "Surface resized! ${holder.surface} ($width x $height in format #$format)")
|
||||
}
|
||||
|
||||
override fun surfaceDestroyed(holder: SurfaceHolder) {
|
||||
Log.i(TAG, "Surface destroyed! ${holder.surface}")
|
||||
onSurfaceChanged(null)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
override fun onMeasure(widthMeasureSpec: Int, heightMeasureSpec: Int) {
|
||||
super.onMeasure(widthMeasureSpec, heightMeasureSpec)
|
||||
val width = MeasureSpec.getSize(widthMeasureSpec)
|
||||
val height = MeasureSpec.getSize(heightMeasureSpec)
|
||||
Log.d(TAG, "onMeasure($width, $height)")
|
||||
|
||||
// Performs center-crop transformation of the camera frames
|
||||
val newWidth: Int
|
||||
val newHeight: Int
|
||||
val actualRatio = if (width > height) aspectRatio else 1f / aspectRatio
|
||||
if (width < height * actualRatio) {
|
||||
newHeight = height
|
||||
newWidth = (height * actualRatio).roundToInt()
|
||||
} else {
|
||||
newWidth = width
|
||||
newHeight = (width / actualRatio).roundToInt()
|
||||
}
|
||||
|
||||
Log.d(TAG, "Measured dimensions set: $newWidth x $newHeight")
|
||||
setMeasuredDimension(newWidth, newHeight)
|
||||
}
|
||||
|
||||
companion object {
|
||||
private const val TAG = "NativePreviewView"
|
||||
}
|
||||
}
|
@@ -0,0 +1,135 @@
|
||||
package com.mrousavy.camera.core
|
||||
|
||||
import android.content.Context
|
||||
import android.media.ImageWriter
|
||||
import android.media.MediaCodec
|
||||
import android.media.MediaRecorder
|
||||
import android.os.Build
|
||||
import android.util.Log
|
||||
import android.util.Size
|
||||
import android.view.Surface
|
||||
import com.mrousavy.camera.RecorderError
|
||||
import com.mrousavy.camera.parsers.Orientation
|
||||
import com.mrousavy.camera.parsers.VideoCodec
|
||||
import com.mrousavy.camera.parsers.VideoFileType
|
||||
import java.io.File
|
||||
|
||||
class RecordingSession(context: Context,
|
||||
val size: Size,
|
||||
private val enableAudio: Boolean,
|
||||
private val fps: Int? = null,
|
||||
private val codec: VideoCodec = VideoCodec.H264,
|
||||
private val orientation: Orientation,
|
||||
private val fileType: VideoFileType = VideoFileType.MP4,
|
||||
private val callback: (video: Video) -> Unit,
|
||||
private val onError: (error: RecorderError) -> Unit) {
|
||||
companion object {
|
||||
private const val TAG = "RecordingSession"
|
||||
// bits per second
|
||||
private const val VIDEO_BIT_RATE = 10_000_000
|
||||
private const val AUDIO_SAMPLING_RATE = 44_100
|
||||
private const val AUDIO_BIT_RATE = 16 * AUDIO_SAMPLING_RATE
|
||||
private const val AUDIO_CHANNELS = 1
|
||||
}
|
||||
|
||||
data class Video(val path: String, val durationMs: Long)
|
||||
|
||||
private val recorder: MediaRecorder
|
||||
private val outputFile: File
|
||||
private var startTime: Long? = null
|
||||
private var imageWriter: ImageWriter? = null
|
||||
val surface: Surface = MediaCodec.createPersistentInputSurface()
|
||||
|
||||
init {
|
||||
|
||||
outputFile = File.createTempFile("mrousavy", fileType.toExtension(), context.cacheDir)
|
||||
|
||||
Log.i(TAG, "Creating RecordingSession for ${outputFile.absolutePath}")
|
||||
|
||||
recorder = if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.S) MediaRecorder(context) else MediaRecorder()
|
||||
|
||||
if (enableAudio) recorder.setAudioSource(MediaRecorder.AudioSource.CAMCORDER)
|
||||
recorder.setVideoSource(MediaRecorder.VideoSource.SURFACE)
|
||||
|
||||
recorder.setOutputFormat(MediaRecorder.OutputFormat.MPEG_4)
|
||||
recorder.setOutputFile(outputFile.absolutePath)
|
||||
recorder.setVideoEncodingBitRate(VIDEO_BIT_RATE)
|
||||
recorder.setVideoSize(size.height, size.width)
|
||||
if (fps != null) recorder.setVideoFrameRate(fps)
|
||||
|
||||
Log.i(TAG, "Using $codec Video Codec..")
|
||||
recorder.setVideoEncoder(codec.toVideoCodec())
|
||||
if (enableAudio) {
|
||||
Log.i(TAG, "Adding Audio Channel..")
|
||||
recorder.setAudioEncoder(MediaRecorder.AudioEncoder.AAC)
|
||||
recorder.setAudioEncodingBitRate(AUDIO_BIT_RATE)
|
||||
recorder.setAudioSamplingRate(AUDIO_SAMPLING_RATE)
|
||||
recorder.setAudioChannels(AUDIO_CHANNELS)
|
||||
}
|
||||
recorder.setInputSurface(surface)
|
||||
//recorder.setOrientationHint(orientation.toDegrees())
|
||||
|
||||
recorder.setOnErrorListener { _, what, extra ->
|
||||
Log.e(TAG, "MediaRecorder Error: $what ($extra)")
|
||||
stop()
|
||||
val name = when (what) {
|
||||
MediaRecorder.MEDIA_RECORDER_ERROR_UNKNOWN -> "unknown"
|
||||
MediaRecorder.MEDIA_ERROR_SERVER_DIED -> "server-died"
|
||||
else -> "unknown"
|
||||
}
|
||||
onError(RecorderError(name, extra))
|
||||
}
|
||||
recorder.setOnInfoListener { _, what, extra ->
|
||||
Log.i(TAG, "MediaRecorder Info: $what ($extra)")
|
||||
}
|
||||
|
||||
Log.i(TAG, "Created $this!")
|
||||
}
|
||||
|
||||
fun start() {
|
||||
synchronized(this) {
|
||||
Log.i(TAG, "Starting RecordingSession..")
|
||||
recorder.prepare()
|
||||
recorder.start()
|
||||
startTime = System.currentTimeMillis()
|
||||
}
|
||||
}
|
||||
|
||||
fun stop() {
|
||||
synchronized(this) {
|
||||
Log.i(TAG, "Stopping RecordingSession..")
|
||||
try {
|
||||
recorder.stop()
|
||||
recorder.release()
|
||||
|
||||
imageWriter?.close()
|
||||
imageWriter = null
|
||||
} catch (e: Error) {
|
||||
Log.e(TAG, "Failed to stop MediaRecorder!", e)
|
||||
}
|
||||
|
||||
val stopTime = System.currentTimeMillis()
|
||||
val durationMs = stopTime - (startTime ?: stopTime)
|
||||
callback(Video(outputFile.absolutePath, durationMs))
|
||||
}
|
||||
}
|
||||
|
||||
fun pause() {
|
||||
synchronized(this) {
|
||||
Log.i(TAG, "Pausing Recording Session..")
|
||||
recorder.pause()
|
||||
}
|
||||
}
|
||||
|
||||
fun resume() {
|
||||
synchronized(this) {
|
||||
Log.i(TAG, "Resuming Recording Session..")
|
||||
recorder.resume()
|
||||
}
|
||||
}
|
||||
|
||||
override fun toString(): String {
|
||||
val audio = if (enableAudio) "with audio" else "without audio"
|
||||
return "${size.width} x ${size.height} @ $fps FPS $codec $fileType $orientation RecordingSession ($audio)"
|
||||
}
|
||||
}
|
@@ -0,0 +1,168 @@
|
||||
package com.mrousavy.camera.core
|
||||
|
||||
import android.graphics.ImageFormat
|
||||
import android.graphics.SurfaceTexture
|
||||
import android.media.ImageReader
|
||||
import android.media.ImageWriter
|
||||
import android.media.MediaRecorder
|
||||
import android.util.Log
|
||||
import android.view.Surface
|
||||
import com.facebook.jni.HybridData
|
||||
import com.mrousavy.camera.frameprocessor.Frame
|
||||
import com.mrousavy.camera.frameprocessor.FrameProcessor
|
||||
import com.mrousavy.camera.parsers.Orientation
|
||||
import java.io.Closeable
|
||||
|
||||
/**
|
||||
* An OpenGL pipeline for streaming Camera Frames to one or more outputs.
|
||||
* Currently, [VideoPipeline] can stream to a [FrameProcessor] and a [MediaRecorder].
|
||||
*
|
||||
* @param [width] The width of the Frames to stream (> 0)
|
||||
* @param [height] The height of the Frames to stream (> 0)
|
||||
* @param [format] The format of the Frames to stream. ([ImageFormat.PRIVATE], [ImageFormat.YUV_420_888] or [ImageFormat.JPEG])
|
||||
*/
|
||||
@Suppress("KotlinJniMissingFunction")
|
||||
class VideoPipeline(val width: Int,
|
||||
val height: Int,
|
||||
val format: Int = ImageFormat.PRIVATE,
|
||||
private val isMirrored: Boolean = false): SurfaceTexture.OnFrameAvailableListener, Closeable {
|
||||
companion object {
|
||||
private const val MAX_IMAGES = 3
|
||||
private const val TAG = "VideoPipeline"
|
||||
}
|
||||
|
||||
private val mHybridData: HybridData
|
||||
private var openGLTextureId: Int? = null
|
||||
private var transformMatrix = FloatArray(16)
|
||||
private var isActive = true
|
||||
|
||||
// Output 1
|
||||
private var frameProcessor: FrameProcessor? = null
|
||||
private var imageReader: ImageReader? = null
|
||||
|
||||
// Output 2
|
||||
private var recordingSession: RecordingSession? = null
|
||||
|
||||
// Input
|
||||
private val surfaceTexture: SurfaceTexture
|
||||
val surface: Surface
|
||||
|
||||
init {
|
||||
mHybridData = initHybrid(width, height)
|
||||
surfaceTexture = SurfaceTexture(false)
|
||||
surfaceTexture.setDefaultBufferSize(width, height)
|
||||
surfaceTexture.setOnFrameAvailableListener(this)
|
||||
surface = Surface(surfaceTexture)
|
||||
}
|
||||
|
||||
override fun close() {
|
||||
synchronized(this) {
|
||||
isActive = false
|
||||
imageReader?.close()
|
||||
imageReader = null
|
||||
frameProcessor = null
|
||||
recordingSession = null
|
||||
surfaceTexture.release()
|
||||
mHybridData.resetNative()
|
||||
}
|
||||
}
|
||||
|
||||
override fun onFrameAvailable(surfaceTexture: SurfaceTexture) {
|
||||
synchronized(this) {
|
||||
if (!isActive) return@synchronized
|
||||
|
||||
// 1. Attach Surface to OpenGL context
|
||||
if (openGLTextureId == null) {
|
||||
openGLTextureId = getInputTextureId()
|
||||
surfaceTexture.attachToGLContext(openGLTextureId!!)
|
||||
Log.i(TAG, "Attached Texture to Context $openGLTextureId")
|
||||
}
|
||||
|
||||
// 2. Prepare the OpenGL context (eglMakeCurrent)
|
||||
onBeforeFrame()
|
||||
|
||||
// 3. Update the OpenGL texture
|
||||
surfaceTexture.updateTexImage()
|
||||
|
||||
// 4. Get the transform matrix from the SurfaceTexture (rotations/scales applied by Camera)
|
||||
surfaceTexture.getTransformMatrix(transformMatrix)
|
||||
|
||||
// 5. Draw it with applied rotation/mirroring
|
||||
onFrame(transformMatrix)
|
||||
}
|
||||
}
|
||||
|
||||
private fun getImageReader(): ImageReader {
|
||||
val imageReader = ImageReader.newInstance(width, height, format, MAX_IMAGES)
|
||||
imageReader.setOnImageAvailableListener({ reader ->
|
||||
Log.i("VideoPipeline", "ImageReader::onImageAvailable!")
|
||||
val image = reader.acquireLatestImage() ?: return@setOnImageAvailableListener
|
||||
|
||||
// TODO: Get correct orientation and isMirrored
|
||||
val frame = Frame(image, image.timestamp, Orientation.PORTRAIT, isMirrored)
|
||||
frame.incrementRefCount()
|
||||
frameProcessor?.call(frame)
|
||||
frame.decrementRefCount()
|
||||
}, null)
|
||||
return imageReader
|
||||
}
|
||||
|
||||
/**
|
||||
* Configures the Pipeline to also call the given [FrameProcessor].
|
||||
* * If the [frameProcessor] is `null`, this output channel will be removed.
|
||||
* * If the [frameProcessor] is not `null`, the [VideoPipeline] will create Frames
|
||||
* using an [ImageWriter] and call the [FrameProcessor] with those Frames.
|
||||
*/
|
||||
fun setFrameProcessorOutput(frameProcessor: FrameProcessor?) {
|
||||
synchronized(this) {
|
||||
Log.i(TAG, "Setting $width x $height FrameProcessor Output...")
|
||||
this.frameProcessor = frameProcessor
|
||||
|
||||
if (frameProcessor != null) {
|
||||
if (this.imageReader == null) {
|
||||
// 1. Create new ImageReader that just calls the Frame Processor
|
||||
this.imageReader = getImageReader()
|
||||
}
|
||||
|
||||
// 2. Configure OpenGL pipeline to stream Frames into the ImageReader's surface
|
||||
setFrameProcessorOutputSurface(imageReader!!.surface)
|
||||
} else {
|
||||
// 1. Configure OpenGL pipeline to stop streaming Frames into the ImageReader's surface
|
||||
removeFrameProcessorOutputSurface()
|
||||
|
||||
// 2. Close the ImageReader
|
||||
this.imageReader?.close()
|
||||
this.imageReader = null
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Configures the Pipeline to also write Frames to a Surface from a [MediaRecorder].
|
||||
* * If the [surface] is `null`, this output channel will be removed.
|
||||
* * If the [surface] is not `null`, the [VideoPipeline] will write Frames to this Surface.
|
||||
*/
|
||||
fun setRecordingSessionOutput(recordingSession: RecordingSession?) {
|
||||
synchronized(this) {
|
||||
Log.i(TAG, "Setting $width x $height RecordingSession Output...")
|
||||
if (recordingSession != null) {
|
||||
// Configure OpenGL pipeline to stream Frames into the Recording Session's surface
|
||||
setRecordingSessionOutputSurface(recordingSession.surface)
|
||||
this.recordingSession = recordingSession
|
||||
} else {
|
||||
// Configure OpenGL pipeline to stop streaming Frames into the Recording Session's surface
|
||||
removeRecordingSessionOutputSurface()
|
||||
this.recordingSession = null
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private external fun getInputTextureId(): Int
|
||||
private external fun onBeforeFrame()
|
||||
private external fun onFrame(transformMatrix: FloatArray)
|
||||
private external fun setFrameProcessorOutputSurface(surface: Any)
|
||||
private external fun removeFrameProcessorOutputSurface()
|
||||
private external fun setRecordingSessionOutputSurface(surface: Any)
|
||||
private external fun removeRecordingSessionOutputSurface()
|
||||
private external fun initHybrid(width: Int, height: Int): HybridData
|
||||
}
|
@@ -0,0 +1,130 @@
|
||||
package com.mrousavy.camera.core.outputs
|
||||
|
||||
import android.graphics.ImageFormat
|
||||
import android.hardware.camera2.CameraCharacteristics
|
||||
import android.hardware.camera2.CameraManager
|
||||
import android.media.Image
|
||||
import android.media.ImageReader
|
||||
import android.util.Log
|
||||
import android.util.Size
|
||||
import android.view.Surface
|
||||
import com.mrousavy.camera.CameraQueues
|
||||
import com.mrousavy.camera.extensions.closestToOrMax
|
||||
import com.mrousavy.camera.extensions.getPhotoSizes
|
||||
import com.mrousavy.camera.extensions.getPreviewSize
|
||||
import com.mrousavy.camera.extensions.getVideoSizes
|
||||
import com.mrousavy.camera.core.VideoPipeline
|
||||
import java.io.Closeable
|
||||
|
||||
class CameraOutputs(val cameraId: String,
|
||||
cameraManager: CameraManager,
|
||||
val preview: PreviewOutput? = null,
|
||||
val photo: PhotoOutput? = null,
|
||||
val video: VideoOutput? = null,
|
||||
val enableHdr: Boolean? = false,
|
||||
val callback: Callback
|
||||
): Closeable {
|
||||
companion object {
|
||||
private const val TAG = "CameraOutputs"
|
||||
const val PHOTO_OUTPUT_BUFFER_SIZE = 3
|
||||
}
|
||||
|
||||
data class PreviewOutput(val surface: Surface)
|
||||
data class PhotoOutput(val targetSize: Size? = null,
|
||||
val format: Int = ImageFormat.JPEG)
|
||||
data class VideoOutput(val targetSize: Size? = null,
|
||||
val enableRecording: Boolean = false,
|
||||
val enableFrameProcessor: Boolean? = false,
|
||||
val format: Int = ImageFormat.PRIVATE)
|
||||
|
||||
interface Callback {
|
||||
fun onPhotoCaptured(image: Image)
|
||||
}
|
||||
|
||||
var previewOutput: SurfaceOutput? = null
|
||||
private set
|
||||
var photoOutput: ImageReaderOutput? = null
|
||||
private set
|
||||
var videoOutput: VideoPipelineOutput? = null
|
||||
private set
|
||||
|
||||
val size: Int
|
||||
get() {
|
||||
var size = 0
|
||||
if (previewOutput != null) size++
|
||||
if (photoOutput != null) size++
|
||||
if (videoOutput != null) size++
|
||||
return size
|
||||
}
|
||||
|
||||
override fun equals(other: Any?): Boolean {
|
||||
if (other !is CameraOutputs) return false
|
||||
return this.cameraId == other.cameraId
|
||||
&& this.preview?.surface == other.preview?.surface
|
||||
&& this.photo?.targetSize == other.photo?.targetSize
|
||||
&& this.photo?.format == other.photo?.format
|
||||
&& this.video?.enableRecording == other.video?.enableRecording
|
||||
&& this.video?.targetSize == other.video?.targetSize
|
||||
&& this.video?.format == other.video?.format
|
||||
&& this.enableHdr == other.enableHdr
|
||||
}
|
||||
|
||||
override fun hashCode(): Int {
|
||||
var result = cameraId.hashCode()
|
||||
result += (preview?.hashCode() ?: 0)
|
||||
result += (photo?.hashCode() ?: 0)
|
||||
result += (video?.hashCode() ?: 0)
|
||||
return result
|
||||
}
|
||||
|
||||
override fun close() {
|
||||
photoOutput?.close()
|
||||
videoOutput?.close()
|
||||
}
|
||||
|
||||
override fun toString(): String {
|
||||
val strings = arrayListOf<String>()
|
||||
previewOutput?.let { strings.add(it.toString()) }
|
||||
photoOutput?.let { strings.add(it.toString()) }
|
||||
videoOutput?.let { strings.add(it.toString()) }
|
||||
return strings.joinToString(", ", "[", "]")
|
||||
}
|
||||
|
||||
init {
|
||||
val characteristics = cameraManager.getCameraCharacteristics(cameraId)
|
||||
val isMirrored = characteristics.get(CameraCharacteristics.LENS_FACING) == CameraCharacteristics.LENS_FACING_FRONT
|
||||
|
||||
Log.i(TAG, "Preparing Outputs for Camera $cameraId...")
|
||||
|
||||
// Preview output: Low resolution repeating images (SurfaceView)
|
||||
if (preview != null) {
|
||||
Log.i(TAG, "Adding native preview view output.")
|
||||
previewOutput = SurfaceOutput(preview.surface, characteristics.getPreviewSize(), SurfaceOutput.OutputType.PREVIEW)
|
||||
}
|
||||
|
||||
// Photo output: High quality still images (takePhoto())
|
||||
if (photo != null) {
|
||||
val size = characteristics.getPhotoSizes(photo.format).closestToOrMax(photo.targetSize)
|
||||
|
||||
val imageReader = ImageReader.newInstance(size.width, size.height, photo.format, PHOTO_OUTPUT_BUFFER_SIZE)
|
||||
imageReader.setOnImageAvailableListener({ reader ->
|
||||
val image = reader.acquireLatestImage() ?: return@setOnImageAvailableListener
|
||||
callback.onPhotoCaptured(image)
|
||||
}, CameraQueues.cameraQueue.handler)
|
||||
|
||||
Log.i(TAG, "Adding ${size.width}x${size.height} photo output. (Format: ${photo.format})")
|
||||
photoOutput = ImageReaderOutput(imageReader, SurfaceOutput.OutputType.PHOTO)
|
||||
}
|
||||
|
||||
// Video output: High resolution repeating images (startRecording() or useFrameProcessor())
|
||||
if (video != null) {
|
||||
val size = characteristics.getVideoSizes(cameraId, video.format).closestToOrMax(video.targetSize)
|
||||
val videoPipeline = VideoPipeline(size.width, size.height, video.format, isMirrored)
|
||||
|
||||
Log.i(TAG, "Adding ${size.width}x${size.height} video output. (Format: ${video.format})")
|
||||
videoOutput = VideoPipelineOutput(videoPipeline, SurfaceOutput.OutputType.VIDEO)
|
||||
}
|
||||
|
||||
Log.i(TAG, "Prepared $size Outputs for Camera $cameraId!")
|
||||
}
|
||||
}
|
@@ -0,0 +1,22 @@
|
||||
package com.mrousavy.camera.core.outputs
|
||||
|
||||
import android.media.ImageReader
|
||||
import android.util.Log
|
||||
import android.util.Size
|
||||
import java.io.Closeable
|
||||
|
||||
/**
|
||||
* A [SurfaceOutput] that uses an [ImageReader] as it's surface.
|
||||
*/
|
||||
class ImageReaderOutput(private val imageReader: ImageReader,
|
||||
outputType: OutputType,
|
||||
dynamicRangeProfile: Long? = null): Closeable, SurfaceOutput(imageReader.surface, Size(imageReader.width, imageReader.height), outputType, dynamicRangeProfile) {
|
||||
override fun close() {
|
||||
Log.i(TAG, "Closing ${imageReader.width}x${imageReader.height} $outputType ImageReader..")
|
||||
imageReader.close()
|
||||
}
|
||||
|
||||
override fun toString(): String {
|
||||
return "$outputType (${imageReader.width} x ${imageReader.height} in format #${imageReader.imageFormat})"
|
||||
}
|
||||
}
|
@@ -0,0 +1,79 @@
|
||||
package com.mrousavy.camera.core.outputs
|
||||
|
||||
import android.hardware.camera2.CameraCharacteristics
|
||||
import android.hardware.camera2.CameraMetadata
|
||||
import android.hardware.camera2.params.OutputConfiguration
|
||||
import android.os.Build
|
||||
import android.util.Log
|
||||
import android.util.Size
|
||||
import android.view.Surface
|
||||
import androidx.annotation.RequiresApi
|
||||
import java.io.Closeable
|
||||
|
||||
/**
|
||||
* A general-purpose Camera Output that writes to a [Surface]
|
||||
*/
|
||||
open class SurfaceOutput(val surface: Surface,
|
||||
val size: Size,
|
||||
val outputType: OutputType,
|
||||
private val dynamicRangeProfile: Long? = null,
|
||||
private val closeSurfaceOnEnd: Boolean = false): Closeable {
|
||||
companion object {
|
||||
const val TAG = "SurfaceOutput"
|
||||
|
||||
private fun supportsOutputType(characteristics: CameraCharacteristics, outputType: OutputType): Boolean {
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.TIRAMISU) {
|
||||
val availableUseCases = characteristics.get(CameraCharacteristics.SCALER_AVAILABLE_STREAM_USE_CASES)
|
||||
if (availableUseCases != null) {
|
||||
if (availableUseCases.contains(outputType.toOutputType().toLong())) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
fun toOutputConfiguration(characteristics: CameraCharacteristics): OutputConfiguration {
|
||||
val result = OutputConfiguration(surface)
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.TIRAMISU) {
|
||||
if (dynamicRangeProfile != null) {
|
||||
result.dynamicRangeProfile = dynamicRangeProfile
|
||||
Log.i(TAG, "Using dynamic range profile ${result.dynamicRangeProfile} for $outputType output.")
|
||||
}
|
||||
if (supportsOutputType(characteristics, outputType)) {
|
||||
result.streamUseCase = outputType.toOutputType().toLong()
|
||||
Log.i(TAG, "Using optimized stream use case ${result.streamUseCase} for $outputType output.")
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
override fun toString(): String {
|
||||
return "$outputType (${size.width} x ${size.height})"
|
||||
}
|
||||
|
||||
override fun close() {
|
||||
if (closeSurfaceOnEnd) {
|
||||
surface.release()
|
||||
}
|
||||
}
|
||||
|
||||
enum class OutputType {
|
||||
PHOTO,
|
||||
VIDEO,
|
||||
PREVIEW,
|
||||
VIDEO_AND_PREVIEW;
|
||||
|
||||
@RequiresApi(Build.VERSION_CODES.TIRAMISU)
|
||||
fun toOutputType(): Int {
|
||||
return when(this) {
|
||||
PHOTO -> CameraMetadata.SCALER_AVAILABLE_STREAM_USE_CASES_STILL_CAPTURE
|
||||
VIDEO -> CameraMetadata.SCALER_AVAILABLE_STREAM_USE_CASES_VIDEO_RECORD
|
||||
PREVIEW -> CameraMetadata.SCALER_AVAILABLE_STREAM_USE_CASES_PREVIEW
|
||||
VIDEO_AND_PREVIEW -> CameraMetadata.SCALER_AVAILABLE_STREAM_USE_CASES_PREVIEW_VIDEO_STILL
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,22 @@
|
||||
package com.mrousavy.camera.core.outputs
|
||||
|
||||
import android.util.Log
|
||||
import android.util.Size
|
||||
import com.mrousavy.camera.core.VideoPipeline
|
||||
import java.io.Closeable
|
||||
|
||||
/**
|
||||
* A [SurfaceOutput] that uses a [VideoPipeline] as it's surface.
|
||||
*/
|
||||
class VideoPipelineOutput(val videoPipeline: VideoPipeline,
|
||||
outputType: OutputType,
|
||||
dynamicRangeProfile: Long? = null): Closeable, SurfaceOutput(videoPipeline.surface, Size(videoPipeline.width, videoPipeline.height), outputType, dynamicRangeProfile) {
|
||||
override fun close() {
|
||||
Log.i(TAG, "Closing ${videoPipeline.width}x${videoPipeline.height} Video Pipeline..")
|
||||
videoPipeline.close()
|
||||
}
|
||||
|
||||
override fun toString(): String {
|
||||
return "$outputType (${videoPipeline.width} x ${videoPipeline.height} in format #${videoPipeline.format})"
|
||||
}
|
||||
}
|
@@ -0,0 +1,53 @@
|
||||
package com.mrousavy.camera.extensions
|
||||
|
||||
import android.hardware.camera2.CameraCaptureSession
|
||||
import android.hardware.camera2.CaptureFailure
|
||||
import android.hardware.camera2.CaptureRequest
|
||||
import android.hardware.camera2.TotalCaptureResult
|
||||
import android.media.MediaActionSound
|
||||
import com.mrousavy.camera.CameraQueues
|
||||
import com.mrousavy.camera.CaptureAbortedError
|
||||
import com.mrousavy.camera.UnknownCaptureError
|
||||
import kotlin.coroutines.resume
|
||||
import kotlin.coroutines.resumeWithException
|
||||
import kotlin.coroutines.suspendCoroutine
|
||||
|
||||
suspend fun CameraCaptureSession.capture(captureRequest: CaptureRequest, enableShutterSound: Boolean): TotalCaptureResult {
|
||||
return suspendCoroutine { continuation ->
|
||||
this.capture(captureRequest, object: CameraCaptureSession.CaptureCallback() {
|
||||
override fun onCaptureCompleted(
|
||||
session: CameraCaptureSession,
|
||||
request: CaptureRequest,
|
||||
result: TotalCaptureResult
|
||||
) {
|
||||
super.onCaptureCompleted(session, request, result)
|
||||
|
||||
continuation.resume(result)
|
||||
}
|
||||
|
||||
override fun onCaptureStarted(session: CameraCaptureSession, request: CaptureRequest, timestamp: Long, frameNumber: Long) {
|
||||
super.onCaptureStarted(session, request, timestamp, frameNumber)
|
||||
|
||||
if (enableShutterSound) {
|
||||
val mediaActionSound = MediaActionSound()
|
||||
mediaActionSound.play(MediaActionSound.SHUTTER_CLICK)
|
||||
}
|
||||
}
|
||||
|
||||
override fun onCaptureFailed(
|
||||
session: CameraCaptureSession,
|
||||
request: CaptureRequest,
|
||||
failure: CaptureFailure
|
||||
) {
|
||||
super.onCaptureFailed(session, request, failure)
|
||||
val wasImageCaptured = failure.wasImageCaptured()
|
||||
val error = when (failure.reason) {
|
||||
CaptureFailure.REASON_ERROR -> UnknownCaptureError(wasImageCaptured)
|
||||
CaptureFailure.REASON_FLUSHED -> CaptureAbortedError(wasImageCaptured)
|
||||
else -> UnknownCaptureError(wasImageCaptured)
|
||||
}
|
||||
continuation.resumeWithException(error)
|
||||
}
|
||||
}, CameraQueues.cameraQueue.handler)
|
||||
}
|
||||
}
|
@@ -0,0 +1,68 @@
|
||||
package com.mrousavy.camera.extensions
|
||||
|
||||
import android.content.res.Resources
|
||||
import android.hardware.camera2.CameraCharacteristics
|
||||
import android.media.CamcorderProfile
|
||||
import android.os.Build
|
||||
import android.util.Log
|
||||
import android.util.Size
|
||||
import android.view.SurfaceHolder
|
||||
import android.view.SurfaceView
|
||||
|
||||
private fun getMaximumPreviewSize(): Size {
|
||||
// See https://developer.android.com/reference/android/hardware/camera2/params/StreamConfigurationMap
|
||||
// According to the Android Developer documentation, PREVIEW streams can have a resolution
|
||||
// of up to the phone's display's resolution, with a maximum of 1920x1080.
|
||||
val display1080p = Size(1920, 1080)
|
||||
val displaySize = Size(Resources.getSystem().displayMetrics.widthPixels, Resources.getSystem().displayMetrics.heightPixels)
|
||||
val isHighResScreen = displaySize.bigger >= display1080p.bigger || displaySize.smaller >= display1080p.smaller
|
||||
Log.i("PreviewSize", "Phone has a ${displaySize.width} x ${displaySize.height} screen.")
|
||||
return if (isHighResScreen) display1080p else displaySize
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the maximum Preview Resolution this device is capable of streaming at. (For [SurfaceView])
|
||||
*/
|
||||
fun CameraCharacteristics.getPreviewSize(): Size {
|
||||
val config = this.get(CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP)!!
|
||||
val previewSize = getMaximumPreviewSize()
|
||||
val outputSizes = config.getOutputSizes(SurfaceHolder::class.java).sortedByDescending { it.width * it.height }
|
||||
return outputSizes.first { it.bigger <= previewSize.bigger && it.smaller <= previewSize.smaller }
|
||||
}
|
||||
|
||||
private fun getMaximumVideoSize(cameraId: String): Size? {
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.S) {
|
||||
val profiles = CamcorderProfile.getAll(cameraId, CamcorderProfile.QUALITY_HIGH)
|
||||
if (profiles != null) {
|
||||
val largestProfile = profiles.videoProfiles.filterNotNull().maxByOrNull { it.width * it.height }
|
||||
if (largestProfile != null) {
|
||||
return Size(largestProfile.width, largestProfile.height)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
val cameraIdInt = cameraId.toIntOrNull()
|
||||
if (cameraIdInt != null) {
|
||||
val profile = CamcorderProfile.get(cameraIdInt, CamcorderProfile.QUALITY_HIGH)
|
||||
return Size(profile.videoFrameWidth, profile.videoFrameHeight)
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
fun CameraCharacteristics.getVideoSizes(cameraId: String, format: Int): List<Size> {
|
||||
val config = this.get(CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP)!!
|
||||
val sizes = config.getOutputSizes(format) ?: emptyArray()
|
||||
val maxVideoSize = getMaximumVideoSize(cameraId)
|
||||
if (maxVideoSize != null) {
|
||||
return sizes.filter { it.bigger <= maxVideoSize.bigger }
|
||||
}
|
||||
return sizes.toList()
|
||||
}
|
||||
|
||||
fun CameraCharacteristics.getPhotoSizes(format: Int): List<Size> {
|
||||
val config = this.get(CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP)!!
|
||||
val sizes = config.getOutputSizes(format) ?: emptyArray()
|
||||
val highResSizes = config.getHighResolutionOutputSizes(format) ?: emptyArray()
|
||||
return sizes.plus(highResSizes).toList()
|
||||
}
|
@@ -0,0 +1,95 @@
|
||||
package com.mrousavy.camera.extensions
|
||||
|
||||
import android.hardware.camera2.CameraCaptureSession
|
||||
import android.hardware.camera2.CameraCharacteristics
|
||||
import android.hardware.camera2.CameraDevice
|
||||
import android.hardware.camera2.CameraManager
|
||||
import android.hardware.camera2.params.OutputConfiguration
|
||||
import android.hardware.camera2.params.SessionConfiguration
|
||||
import android.os.Build
|
||||
import android.util.Log
|
||||
import androidx.annotation.RequiresApi
|
||||
import com.mrousavy.camera.CameraQueues
|
||||
import com.mrousavy.camera.CameraSessionCannotBeConfiguredError
|
||||
import com.mrousavy.camera.core.outputs.CameraOutputs
|
||||
import kotlinx.coroutines.suspendCancellableCoroutine
|
||||
import kotlin.coroutines.resume
|
||||
import kotlin.coroutines.resumeWithException
|
||||
|
||||
enum class SessionType {
|
||||
REGULAR,
|
||||
HIGH_SPEED;
|
||||
|
||||
@RequiresApi(Build.VERSION_CODES.P)
|
||||
fun toSessionType(): Int {
|
||||
return when(this) {
|
||||
REGULAR -> SessionConfiguration.SESSION_REGULAR
|
||||
HIGH_SPEED -> SessionConfiguration.SESSION_HIGH_SPEED
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private const val TAG = "CreateCaptureSession"
|
||||
private var sessionId = 1000
|
||||
|
||||
suspend fun CameraDevice.createCaptureSession(cameraManager: CameraManager,
|
||||
sessionType: SessionType,
|
||||
outputs: CameraOutputs,
|
||||
onClosed: (session: CameraCaptureSession) -> Unit,
|
||||
queue: CameraQueues.CameraQueue): CameraCaptureSession {
|
||||
return suspendCancellableCoroutine { continuation ->
|
||||
val characteristics = cameraManager.getCameraCharacteristics(id)
|
||||
val hardwareLevel = characteristics.get(CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL)!!
|
||||
val sessionId = sessionId++
|
||||
Log.i(TAG, "Camera $id: Creating Capture Session #$sessionId... " +
|
||||
"Hardware Level: $hardwareLevel} | Outputs: $outputs")
|
||||
|
||||
val callback = object: CameraCaptureSession.StateCallback() {
|
||||
override fun onConfigured(session: CameraCaptureSession) {
|
||||
Log.i(TAG, "Camera $id: Capture Session #$sessionId configured!")
|
||||
continuation.resume(session)
|
||||
}
|
||||
|
||||
override fun onConfigureFailed(session: CameraCaptureSession) {
|
||||
Log.e(TAG, "Camera $id: Failed to configure Capture Session #$sessionId!")
|
||||
continuation.resumeWithException(CameraSessionCannotBeConfiguredError(id, outputs))
|
||||
}
|
||||
|
||||
override fun onClosed(session: CameraCaptureSession) {
|
||||
super.onClosed(session)
|
||||
Log.i(TAG, "Camera $id: Capture Session #$sessionId closed!")
|
||||
onClosed(session)
|
||||
}
|
||||
}
|
||||
|
||||
val outputConfigurations = arrayListOf<OutputConfiguration>()
|
||||
outputs.previewOutput?.let { output ->
|
||||
outputConfigurations.add(output.toOutputConfiguration(characteristics))
|
||||
}
|
||||
outputs.photoOutput?.let { output ->
|
||||
outputConfigurations.add(output.toOutputConfiguration(characteristics))
|
||||
}
|
||||
outputs.videoOutput?.let { output ->
|
||||
outputConfigurations.add(output.toOutputConfiguration(characteristics))
|
||||
}
|
||||
if (outputs.enableHdr == true && Build.VERSION.SDK_INT >= Build.VERSION_CODES.TIRAMISU) {
|
||||
val supportedProfiles = characteristics.get(CameraCharacteristics.REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES)
|
||||
val hdrProfile = supportedProfiles?.bestProfile ?: supportedProfiles?.supportedProfiles?.firstOrNull()
|
||||
if (hdrProfile != null) {
|
||||
Log.i(TAG, "Camera $id: Using HDR Profile $hdrProfile...")
|
||||
outputConfigurations.forEach { it.dynamicRangeProfile = hdrProfile }
|
||||
} else {
|
||||
Log.w(TAG, "Camera $id: HDR was enabled, but the device does not support any matching HDR profile!")
|
||||
}
|
||||
}
|
||||
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.P) {
|
||||
Log.i(TAG, "Using new API (>=28)")
|
||||
val config = SessionConfiguration(sessionType.toSessionType(), outputConfigurations, queue.executor, callback)
|
||||
this.createCaptureSession(config)
|
||||
} else {
|
||||
Log.i(TAG, "Using legacy API (<28)")
|
||||
this.createCaptureSessionByOutputConfigurations(outputConfigurations, callback, queue.handler)
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,97 @@
|
||||
package com.mrousavy.camera.extensions
|
||||
|
||||
import android.hardware.camera2.CameraCharacteristics
|
||||
import android.hardware.camera2.CameraDevice
|
||||
import android.hardware.camera2.CameraManager
|
||||
import android.hardware.camera2.CaptureRequest
|
||||
import android.os.Build
|
||||
import android.view.Surface
|
||||
import com.mrousavy.camera.parsers.Flash
|
||||
import com.mrousavy.camera.parsers.Orientation
|
||||
import com.mrousavy.camera.parsers.QualityPrioritization
|
||||
|
||||
private fun supportsSnapshotCapture(cameraCharacteristics: CameraCharacteristics): Boolean {
|
||||
// As per CameraDevice.TEMPLATE_VIDEO_SNAPSHOT in documentation:
|
||||
val hardwareLevel = cameraCharacteristics.get(CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL)!!
|
||||
if (hardwareLevel == CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_LEGACY) return false
|
||||
|
||||
val capabilities = cameraCharacteristics.get(CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES)!!
|
||||
val hasDepth = capabilities.contains(CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES_DEPTH_OUTPUT)
|
||||
val isBackwardsCompatible = !capabilities.contains(CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES_BACKWARD_COMPATIBLE)
|
||||
if (hasDepth && !isBackwardsCompatible) return false
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
fun CameraDevice.createPhotoCaptureRequest(cameraManager: CameraManager,
|
||||
surface: Surface,
|
||||
zoom: Float,
|
||||
qualityPrioritization: QualityPrioritization,
|
||||
flashMode: Flash,
|
||||
enableRedEyeReduction: Boolean,
|
||||
enableAutoStabilization: Boolean,
|
||||
orientation: Orientation): CaptureRequest {
|
||||
val cameraCharacteristics = cameraManager.getCameraCharacteristics(this.id)
|
||||
|
||||
val template = if (qualityPrioritization == QualityPrioritization.SPEED && supportsSnapshotCapture(cameraCharacteristics)) {
|
||||
CameraDevice.TEMPLATE_VIDEO_SNAPSHOT
|
||||
} else {
|
||||
CameraDevice.TEMPLATE_STILL_CAPTURE
|
||||
}
|
||||
val captureRequest = this.createCaptureRequest(template)
|
||||
|
||||
// TODO: Maybe we can even expose that prop directly?
|
||||
val jpegQuality = when (qualityPrioritization) {
|
||||
QualityPrioritization.SPEED -> 85
|
||||
QualityPrioritization.BALANCED -> 92
|
||||
QualityPrioritization.QUALITY -> 100
|
||||
}
|
||||
captureRequest.set(CaptureRequest.JPEG_QUALITY, jpegQuality.toByte())
|
||||
|
||||
captureRequest.set(CaptureRequest.JPEG_ORIENTATION, orientation.toDegrees())
|
||||
|
||||
when (flashMode) {
|
||||
// Set the Flash Mode
|
||||
Flash.OFF -> {
|
||||
captureRequest.set(CaptureRequest.CONTROL_AE_MODE, CaptureRequest.CONTROL_AE_MODE_ON)
|
||||
}
|
||||
Flash.ON -> {
|
||||
captureRequest.set(CaptureRequest.CONTROL_AE_MODE, CaptureRequest.CONTROL_AE_MODE_ON_ALWAYS_FLASH)
|
||||
}
|
||||
Flash.AUTO -> {
|
||||
if (enableRedEyeReduction) {
|
||||
captureRequest.set(CaptureRequest.CONTROL_AE_MODE, CaptureRequest.CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE)
|
||||
} else {
|
||||
captureRequest.set(CaptureRequest.CONTROL_AE_MODE, CaptureRequest.CONTROL_AE_MODE_ON_AUTO_FLASH)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (enableAutoStabilization) {
|
||||
// Enable optical or digital image stabilization
|
||||
val digitalStabilization = cameraCharacteristics.get(CameraCharacteristics.CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES)
|
||||
val hasDigitalStabilization = digitalStabilization?.contains(CameraCharacteristics.CONTROL_VIDEO_STABILIZATION_MODE_ON) ?: false
|
||||
|
||||
val opticalStabilization = cameraCharacteristics.get(CameraCharacteristics.LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION)
|
||||
val hasOpticalStabilization = opticalStabilization?.contains(CameraCharacteristics.LENS_OPTICAL_STABILIZATION_MODE_ON) ?: false
|
||||
if (hasOpticalStabilization) {
|
||||
captureRequest.set(CaptureRequest.CONTROL_VIDEO_STABILIZATION_MODE, CaptureRequest.CONTROL_VIDEO_STABILIZATION_MODE_OFF)
|
||||
captureRequest.set(CaptureRequest.LENS_OPTICAL_STABILIZATION_MODE, CaptureRequest.LENS_OPTICAL_STABILIZATION_MODE_ON)
|
||||
} else if (hasDigitalStabilization) {
|
||||
captureRequest.set(CaptureRequest.CONTROL_VIDEO_STABILIZATION_MODE, CaptureRequest.CONTROL_VIDEO_STABILIZATION_MODE_ON)
|
||||
} else {
|
||||
// no stabilization is supported. ignore it
|
||||
}
|
||||
}
|
||||
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.R) {
|
||||
captureRequest.set(CaptureRequest.CONTROL_ZOOM_RATIO, zoom)
|
||||
} else {
|
||||
val size = cameraCharacteristics.get(CameraCharacteristics.SENSOR_INFO_ACTIVE_ARRAY_SIZE)!!
|
||||
captureRequest.set(CaptureRequest.SCALER_CROP_REGION, size.zoomed(zoom))
|
||||
}
|
||||
|
||||
captureRequest.addTarget(surface)
|
||||
|
||||
return captureRequest.build()
|
||||
}
|
@@ -0,0 +1,68 @@
|
||||
package com.mrousavy.camera.extensions
|
||||
|
||||
import android.annotation.SuppressLint
|
||||
import android.hardware.camera2.CameraDevice
|
||||
import android.hardware.camera2.CameraManager
|
||||
import android.os.Build
|
||||
import android.util.Log
|
||||
import com.mrousavy.camera.CameraCannotBeOpenedError
|
||||
import com.mrousavy.camera.CameraDisconnectedError
|
||||
import com.mrousavy.camera.CameraQueues
|
||||
import com.mrousavy.camera.parsers.CameraDeviceError
|
||||
import kotlinx.coroutines.suspendCancellableCoroutine
|
||||
import kotlin.coroutines.resume
|
||||
import kotlin.coroutines.resumeWithException
|
||||
|
||||
private const val TAG = "CameraManager"
|
||||
|
||||
@SuppressLint("MissingPermission")
|
||||
suspend fun CameraManager.openCamera(cameraId: String,
|
||||
onDisconnected: (camera: CameraDevice, reason: Throwable) -> Unit,
|
||||
queue: CameraQueues.CameraQueue): CameraDevice {
|
||||
return suspendCancellableCoroutine { continuation ->
|
||||
Log.i(TAG, "Camera $cameraId: Opening...")
|
||||
|
||||
val callback = object: CameraDevice.StateCallback() {
|
||||
override fun onOpened(camera: CameraDevice) {
|
||||
Log.i(TAG, "Camera $cameraId: Opened!")
|
||||
continuation.resume(camera)
|
||||
}
|
||||
|
||||
override fun onDisconnected(camera: CameraDevice) {
|
||||
Log.i(TAG, "Camera $cameraId: Disconnected!")
|
||||
if (continuation.isActive) {
|
||||
continuation.resumeWithException(CameraCannotBeOpenedError(cameraId, CameraDeviceError.DISCONNECTED))
|
||||
} else {
|
||||
onDisconnected(camera, CameraDisconnectedError(cameraId, CameraDeviceError.DISCONNECTED))
|
||||
}
|
||||
camera.tryClose()
|
||||
}
|
||||
|
||||
override fun onError(camera: CameraDevice, errorCode: Int) {
|
||||
Log.e(TAG, "Camera $cameraId: Error! $errorCode")
|
||||
val error = CameraDeviceError.fromCameraDeviceError(errorCode)
|
||||
if (continuation.isActive) {
|
||||
continuation.resumeWithException(CameraCannotBeOpenedError(cameraId, error))
|
||||
} else {
|
||||
onDisconnected(camera, CameraDisconnectedError(cameraId, error))
|
||||
}
|
||||
camera.tryClose()
|
||||
}
|
||||
}
|
||||
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.P) {
|
||||
this.openCamera(cameraId, queue.executor, callback)
|
||||
} else {
|
||||
this.openCamera(cameraId, callback, queue.handler)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fun CameraDevice.tryClose() {
|
||||
try {
|
||||
Log.i(TAG, "Camera $id: Closing...")
|
||||
this.close()
|
||||
} catch (e: Throwable) {
|
||||
Log.e(TAG, "Camera $id: Failed to close!", e)
|
||||
}
|
||||
}
|
@@ -0,0 +1,27 @@
|
||||
package com.mrousavy.camera.extensions
|
||||
|
||||
import android.hardware.camera2.params.DynamicRangeProfiles
|
||||
import android.os.Build
|
||||
import androidx.annotation.RequiresApi
|
||||
|
||||
private fun Set<Long>.firstMatch(filter: Set<Long>): Long? {
|
||||
filter.forEach { f ->
|
||||
if (this.contains(f)) {
|
||||
return f
|
||||
}
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
@RequiresApi(Build.VERSION_CODES.TIRAMISU)
|
||||
private val bestProfiles = setOf(
|
||||
DynamicRangeProfiles.HDR10_PLUS,
|
||||
DynamicRangeProfiles.HDR10,
|
||||
DynamicRangeProfiles.HLG10
|
||||
)
|
||||
|
||||
val DynamicRangeProfiles.bestProfile: Long?
|
||||
@RequiresApi(Build.VERSION_CODES.TIRAMISU)
|
||||
get() {
|
||||
return supportedProfiles.firstMatch(bestProfiles)
|
||||
}
|
@@ -0,0 +1,21 @@
|
||||
package com.mrousavy.camera.extensions
|
||||
|
||||
import android.os.Handler
|
||||
import java.util.concurrent.Semaphore
|
||||
|
||||
/**
|
||||
* Posts a Message to this Handler and blocks the calling Thread until the Handler finished executing the given job.
|
||||
*/
|
||||
fun Handler.postAndWait(job: () -> Unit) {
|
||||
val semaphore = Semaphore(0)
|
||||
|
||||
this.post {
|
||||
try {
|
||||
job()
|
||||
} finally {
|
||||
semaphore.release()
|
||||
}
|
||||
}
|
||||
|
||||
semaphore.acquire()
|
||||
}
|
@@ -0,0 +1,5 @@
|
||||
package com.mrousavy.camera.extensions
|
||||
|
||||
fun <T> List<T>.containsAny(elements: List<T>): Boolean {
|
||||
return elements.any { element -> this.contains(element) }
|
||||
}
|
@@ -0,0 +1,14 @@
|
||||
package com.mrousavy.camera.extensions
|
||||
|
||||
import android.graphics.Rect
|
||||
|
||||
fun Rect.zoomed(zoomFactor: Float): Rect {
|
||||
val height = bottom - top
|
||||
val width = right - left
|
||||
|
||||
val left = this.left + (width / zoomFactor / 2)
|
||||
val top = this.top + (height / zoomFactor / 2)
|
||||
val right = this.right - (width / zoomFactor / 2)
|
||||
val bottom = this.bottom - (height / zoomFactor / 2)
|
||||
return Rect(left.toInt(), top.toInt(), right.toInt(), bottom.toInt())
|
||||
}
|
@@ -0,0 +1,44 @@
|
||||
package com.mrousavy.camera.extensions
|
||||
|
||||
import android.util.Size
|
||||
import android.util.SizeF
|
||||
import android.view.Surface
|
||||
import kotlin.math.abs
|
||||
import kotlin.math.max
|
||||
import kotlin.math.min
|
||||
|
||||
fun List<Size>.closestToOrMax(size: Size?): Size {
|
||||
return if (size != null) {
|
||||
this.minBy { abs(it.width - size.width) + abs(it.height - size.height) }
|
||||
} else {
|
||||
this.maxBy { it.width * it.height }
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Rotate by a given Surface Rotation
|
||||
*/
|
||||
fun Size.rotated(surfaceRotation: Int): Size {
|
||||
return when (surfaceRotation) {
|
||||
Surface.ROTATION_0 -> Size(width, height)
|
||||
Surface.ROTATION_90 -> Size(height, width)
|
||||
Surface.ROTATION_180 -> Size(width, height)
|
||||
Surface.ROTATION_270 -> Size(height, width)
|
||||
else -> Size(width, height)
|
||||
}
|
||||
}
|
||||
|
||||
val Size.bigger: Int
|
||||
get() = max(width, height)
|
||||
val Size.smaller: Int
|
||||
get() = min(width, height)
|
||||
|
||||
val SizeF.bigger: Float
|
||||
get() = max(this.width, this.height)
|
||||
val SizeF.smaller: Float
|
||||
get() = min(this.width, this.height)
|
||||
|
||||
operator fun Size.compareTo(other: Size): Int {
|
||||
return (this.width * this.height).compareTo(other.width * other.height)
|
||||
}
|
||||
|
@@ -0,0 +1,20 @@
|
||||
package com.mrousavy.camera.extensions
|
||||
|
||||
import android.view.View
|
||||
import android.view.ViewGroup
|
||||
|
||||
// React does not trigger onLayout events for dynamically added views (`addView`).
|
||||
// This fixes that.
|
||||
// https://github.com/facebook/react-native/issues/17968#issuecomment-633308615
|
||||
fun ViewGroup.installHierarchyFitter() {
|
||||
setOnHierarchyChangeListener(object : ViewGroup.OnHierarchyChangeListener {
|
||||
override fun onChildViewRemoved(parent: View?, child: View?) = Unit
|
||||
override fun onChildViewAdded(parent: View?, child: View?) {
|
||||
parent?.measure(
|
||||
View.MeasureSpec.makeMeasureSpec(measuredWidth, View.MeasureSpec.EXACTLY),
|
||||
View.MeasureSpec.makeMeasureSpec(measuredHeight, View.MeasureSpec.EXACTLY)
|
||||
)
|
||||
parent?.layout(0, 0, parent.measuredWidth, parent.measuredHeight)
|
||||
}
|
||||
})
|
||||
}
|
@@ -0,0 +1,17 @@
|
||||
package com.mrousavy.camera.extensions
|
||||
|
||||
import com.facebook.react.bridge.WritableMap
|
||||
|
||||
fun WritableMap.putInt(key: String, value: Int?) {
|
||||
if (value == null)
|
||||
this.putNull(key)
|
||||
else
|
||||
this.putInt(key, value)
|
||||
}
|
||||
|
||||
fun WritableMap.putDouble(key: String, value: Double?) {
|
||||
if (value == null)
|
||||
this.putNull(key)
|
||||
else
|
||||
this.putDouble(key, value)
|
||||
}
|
@@ -0,0 +1,147 @@
|
||||
package com.mrousavy.camera.frameprocessor;
|
||||
|
||||
import android.graphics.ImageFormat;
|
||||
import android.media.Image;
|
||||
import com.facebook.proguard.annotations.DoNotStrip;
|
||||
import com.mrousavy.camera.parsers.PixelFormat;
|
||||
import com.mrousavy.camera.parsers.Orientation;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
public class Frame {
|
||||
private final Image image;
|
||||
private final boolean isMirrored;
|
||||
private final long timestamp;
|
||||
private final Orientation orientation;
|
||||
private int refCount = 0;
|
||||
|
||||
public Frame(Image image, long timestamp, Orientation orientation, boolean isMirrored) {
|
||||
this.image = image;
|
||||
this.timestamp = timestamp;
|
||||
this.orientation = orientation;
|
||||
this.isMirrored = isMirrored;
|
||||
}
|
||||
|
||||
public Image getImage() {
|
||||
return image;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
@DoNotStrip
|
||||
public int getWidth() {
|
||||
return image.getWidth();
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
@DoNotStrip
|
||||
public int getHeight() {
|
||||
return image.getHeight();
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
@DoNotStrip
|
||||
public boolean getIsValid() {
|
||||
try {
|
||||
// will throw an exception if the image is already closed
|
||||
image.getCropRect();
|
||||
// no exception thrown, image must still be valid.
|
||||
return true;
|
||||
} catch (Exception e) {
|
||||
// exception thrown, image has already been closed.
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
@DoNotStrip
|
||||
public boolean getIsMirrored() {
|
||||
return isMirrored;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
@DoNotStrip
|
||||
public long getTimestamp() {
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
@DoNotStrip
|
||||
public String getOrientation() {
|
||||
return orientation.getUnionValue();
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
@DoNotStrip
|
||||
public String getPixelFormat() {
|
||||
PixelFormat format = PixelFormat.Companion.fromImageFormat(image.getFormat());
|
||||
return format.getUnionValue();
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
@DoNotStrip
|
||||
public int getPlanesCount() {
|
||||
return image.getPlanes().length;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
@DoNotStrip
|
||||
public int getBytesPerRow() {
|
||||
return image.getPlanes()[0].getRowStride();
|
||||
}
|
||||
|
||||
private static ByteBuffer byteArrayCache;
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
@DoNotStrip
|
||||
public ByteBuffer toByteBuffer() {
|
||||
switch (image.getFormat()) {
|
||||
case ImageFormat.YUV_420_888:
|
||||
ByteBuffer yBuffer = image.getPlanes()[0].getBuffer();
|
||||
ByteBuffer uBuffer = image.getPlanes()[1].getBuffer();
|
||||
ByteBuffer vBuffer = image.getPlanes()[2].getBuffer();
|
||||
int ySize = yBuffer.remaining();
|
||||
int uSize = uBuffer.remaining();
|
||||
int vSize = vBuffer.remaining();
|
||||
int totalSize = ySize + uSize + vSize;
|
||||
|
||||
if (byteArrayCache != null) byteArrayCache.rewind();
|
||||
if (byteArrayCache == null || byteArrayCache.remaining() != totalSize) {
|
||||
byteArrayCache = ByteBuffer.allocateDirect(totalSize);
|
||||
}
|
||||
|
||||
byteArrayCache.put(yBuffer).put(uBuffer).put(vBuffer);
|
||||
|
||||
return byteArrayCache;
|
||||
case ImageFormat.JPEG:
|
||||
return image.getPlanes()[0].getBuffer();
|
||||
default:
|
||||
throw new RuntimeException("Cannot convert Frame with Format " + image.getFormat() + " to byte array!");
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
@DoNotStrip
|
||||
public void incrementRefCount() {
|
||||
synchronized (this) {
|
||||
refCount++;
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
@DoNotStrip
|
||||
public void decrementRefCount() {
|
||||
synchronized (this) {
|
||||
refCount--;
|
||||
if (refCount <= 0) {
|
||||
// If no reference is held on this Image, close it.
|
||||
image.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
@DoNotStrip
|
||||
private void close() {
|
||||
image.close();
|
||||
}
|
||||
}
|
@@ -0,0 +1,27 @@
|
||||
package com.mrousavy.camera.frameprocessor;
|
||||
|
||||
import androidx.annotation.Keep;
|
||||
import androidx.annotation.NonNull;
|
||||
import androidx.annotation.Nullable;
|
||||
|
||||
import com.facebook.jni.HybridData;
|
||||
import com.facebook.proguard.annotations.DoNotStrip;
|
||||
|
||||
/**
|
||||
* Represents a JS Frame Processor
|
||||
*/
|
||||
@SuppressWarnings("JavaJniMissingFunction") // we're using fbjni.
|
||||
public final class FrameProcessor {
|
||||
/**
|
||||
* Call the JS Frame Processor function with the given Frame
|
||||
*/
|
||||
public native void call(Frame frame);
|
||||
|
||||
@DoNotStrip
|
||||
@Keep
|
||||
private final HybridData mHybridData;
|
||||
|
||||
public FrameProcessor(HybridData hybridData) {
|
||||
mHybridData = hybridData;
|
||||
}
|
||||
}
|
@@ -0,0 +1,25 @@
|
||||
package com.mrousavy.camera.frameprocessor;
|
||||
|
||||
import androidx.annotation.Keep;
|
||||
import androidx.annotation.NonNull;
|
||||
import androidx.annotation.Nullable;
|
||||
import com.facebook.proguard.annotations.DoNotStrip;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Declares a Frame Processor Plugin.
|
||||
*/
|
||||
@DoNotStrip
|
||||
@Keep
|
||||
public abstract class FrameProcessorPlugin {
|
||||
/**
|
||||
* The actual Frame Processor plugin callback. Called for every frame the ImageAnalyzer receives.
|
||||
* @param frame The Frame from the Camera. Don't call .close() on this, as VisionCamera handles that.
|
||||
* @return You can return any primitive, map or array you want. See the
|
||||
* <a href="https://react-native-vision-camera.com/docs/guides/frame-processors-plugins-overview#types">Types</a>
|
||||
* table for a list of supported types.
|
||||
*/
|
||||
@DoNotStrip
|
||||
@Keep
|
||||
public abstract @Nullable Object callback(@NonNull Frame frame, @Nullable Map<String, Object> params);
|
||||
}
|
@@ -0,0 +1,35 @@
|
||||
package com.mrousavy.camera.frameprocessor;
|
||||
|
||||
import androidx.annotation.Keep;
|
||||
import androidx.annotation.Nullable;
|
||||
import com.facebook.proguard.annotations.DoNotStrip;
|
||||
import java.util.Map;
|
||||
import java.util.HashMap;
|
||||
|
||||
@DoNotStrip
|
||||
@Keep
|
||||
public class FrameProcessorPluginRegistry {
|
||||
private static final Map<String, PluginInitializer> Plugins = new HashMap<>();
|
||||
|
||||
@DoNotStrip
|
||||
@Keep
|
||||
public static void addFrameProcessorPlugin(String name, PluginInitializer pluginInitializer) {
|
||||
assert !Plugins.containsKey(name) : "Tried to add a Frame Processor Plugin with a name that already exists! " +
|
||||
"Either choose unique names, or remove the unused plugin. Name: ";
|
||||
Plugins.put(name, pluginInitializer);
|
||||
}
|
||||
|
||||
@DoNotStrip
|
||||
@Keep
|
||||
public static FrameProcessorPlugin getPlugin(String name, Map<String, Object> options) {
|
||||
PluginInitializer initializer = Plugins.get(name);
|
||||
if (initializer == null) {
|
||||
return null;
|
||||
}
|
||||
return initializer.initializePlugin(options);
|
||||
}
|
||||
|
||||
public interface PluginInitializer {
|
||||
FrameProcessorPlugin initializePlugin(@Nullable Map<String, Object> options);
|
||||
}
|
||||
}
|
@@ -0,0 +1,6 @@
|
||||
package com.mrousavy.camera.frameprocessor;
|
||||
|
||||
@SuppressWarnings("JavaJniMissingFunction") // we use fbjni
|
||||
public class VisionCameraInstaller {
|
||||
public static native void install(VisionCameraProxy proxy);
|
||||
}
|
@@ -0,0 +1,82 @@
|
||||
package com.mrousavy.camera.frameprocessor
|
||||
|
||||
import android.util.Log
|
||||
import androidx.annotation.Keep
|
||||
import androidx.annotation.UiThread
|
||||
import com.facebook.jni.HybridData
|
||||
import com.facebook.proguard.annotations.DoNotStrip
|
||||
import com.facebook.react.bridge.ReactApplicationContext
|
||||
import com.facebook.react.bridge.UiThreadUtil
|
||||
import com.facebook.react.turbomodule.core.CallInvokerHolderImpl
|
||||
import com.facebook.react.uimanager.UIManagerHelper
|
||||
import com.mrousavy.camera.CameraView
|
||||
import com.mrousavy.camera.ViewNotFoundError
|
||||
import java.lang.ref.WeakReference
|
||||
|
||||
@Suppress("KotlinJniMissingFunction") // we use fbjni.
|
||||
class VisionCameraProxy(context: ReactApplicationContext) {
|
||||
companion object {
|
||||
const val TAG = "VisionCameraProxy"
|
||||
init {
|
||||
try {
|
||||
System.loadLibrary("VisionCamera")
|
||||
} catch (e: UnsatisfiedLinkError) {
|
||||
Log.e(TAG, "Failed to load VisionCamera C++ library!", e)
|
||||
throw e
|
||||
}
|
||||
}
|
||||
}
|
||||
@DoNotStrip
|
||||
@Keep
|
||||
private var mHybridData: HybridData
|
||||
private var mContext: WeakReference<ReactApplicationContext>
|
||||
private var mScheduler: VisionCameraScheduler
|
||||
|
||||
init {
|
||||
val jsCallInvokerHolder = context.catalystInstance.jsCallInvokerHolder as CallInvokerHolderImpl
|
||||
val jsRuntimeHolder = context.javaScriptContextHolder.get()
|
||||
mScheduler = VisionCameraScheduler()
|
||||
mContext = WeakReference(context)
|
||||
mHybridData = initHybrid(jsRuntimeHolder, jsCallInvokerHolder, mScheduler)
|
||||
}
|
||||
|
||||
@UiThread
|
||||
private fun findCameraViewById(viewId: Int): CameraView {
|
||||
Log.d(TAG, "Finding view $viewId...")
|
||||
val ctx = mContext.get()
|
||||
val view = if (ctx != null) UIManagerHelper.getUIManager(ctx, viewId)?.resolveView(viewId) as CameraView? else null
|
||||
Log.d(TAG, if (view != null) "Found view $viewId!" else "Couldn't find view $viewId!")
|
||||
return view ?: throw ViewNotFoundError(viewId)
|
||||
}
|
||||
|
||||
@DoNotStrip
|
||||
@Keep
|
||||
fun setFrameProcessor(viewId: Int, frameProcessor: FrameProcessor) {
|
||||
UiThreadUtil.runOnUiThread {
|
||||
val view = findCameraViewById(viewId)
|
||||
view.frameProcessor = frameProcessor
|
||||
}
|
||||
}
|
||||
|
||||
@DoNotStrip
|
||||
@Keep
|
||||
fun removeFrameProcessor(viewId: Int) {
|
||||
UiThreadUtil.runOnUiThread {
|
||||
val view = findCameraViewById(viewId)
|
||||
view.frameProcessor = null
|
||||
}
|
||||
}
|
||||
|
||||
@DoNotStrip
|
||||
@Keep
|
||||
fun getFrameProcessorPlugin(name: String, options: Map<String, Any>): FrameProcessorPlugin {
|
||||
return FrameProcessorPluginRegistry.getPlugin(name, options)
|
||||
}
|
||||
|
||||
// private C++ funcs
|
||||
private external fun initHybrid(
|
||||
jsContext: Long,
|
||||
jsCallInvokerHolder: CallInvokerHolderImpl,
|
||||
scheduler: VisionCameraScheduler
|
||||
): HybridData
|
||||
}
|
@@ -0,0 +1,29 @@
|
||||
package com.mrousavy.camera.frameprocessor;
|
||||
|
||||
import com.facebook.jni.HybridData;
|
||||
import com.facebook.proguard.annotations.DoNotStrip;
|
||||
import com.mrousavy.camera.CameraQueues;
|
||||
|
||||
import java.util.concurrent.ExecutorService;
|
||||
|
||||
@SuppressWarnings("JavaJniMissingFunction") // using fbjni here
|
||||
public class VisionCameraScheduler {
|
||||
@SuppressWarnings({"unused", "FieldCanBeLocal"})
|
||||
@DoNotStrip
|
||||
private final HybridData mHybridData;
|
||||
|
||||
public VisionCameraScheduler() {
|
||||
mHybridData = initHybrid();
|
||||
}
|
||||
|
||||
private native HybridData initHybrid();
|
||||
private native void trigger();
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
@DoNotStrip
|
||||
private void scheduleTrigger() {
|
||||
CameraQueues.CameraQueue videoQueue = CameraQueues.Companion.getVideoQueue();
|
||||
// TODO: Make sure post(this::trigger) works.
|
||||
videoQueue.getHandler().post(this::trigger);
|
||||
}
|
||||
}
|
@@ -0,0 +1,25 @@
|
||||
package com.mrousavy.camera.parsers
|
||||
|
||||
import android.hardware.camera2.CameraDevice
|
||||
|
||||
enum class CameraDeviceError(override val unionValue: String): JSUnionValue {
|
||||
CAMERA_ALREADY_IN_USE("camera-already-in-use"),
|
||||
TOO_MANY_OPEN_CAMERAS("too-many-open-cameras"),
|
||||
CAMERA_IS_DISABLED_BY_ANDROID("camera-is-disabled-by-android"),
|
||||
UNKNOWN_CAMERA_DEVICE_ERROR("unknown-camera-device-error"),
|
||||
UNKNOWN_FATAL_CAMERA_SERVICE_ERROR("unknown-fatal-camera-service-error"),
|
||||
DISCONNECTED("camera-has-been-disconnected");
|
||||
|
||||
companion object {
|
||||
fun fromCameraDeviceError(cameraDeviceError: Int): CameraDeviceError {
|
||||
return when (cameraDeviceError) {
|
||||
CameraDevice.StateCallback.ERROR_CAMERA_IN_USE -> CAMERA_ALREADY_IN_USE
|
||||
CameraDevice.StateCallback.ERROR_MAX_CAMERAS_IN_USE -> TOO_MANY_OPEN_CAMERAS
|
||||
CameraDevice.StateCallback.ERROR_CAMERA_DISABLED -> CAMERA_IS_DISABLED_BY_ANDROID
|
||||
CameraDevice.StateCallback.ERROR_CAMERA_DEVICE -> UNKNOWN_CAMERA_DEVICE_ERROR
|
||||
CameraDevice.StateCallback.ERROR_CAMERA_SERVICE -> UNKNOWN_FATAL_CAMERA_SERVICE_ERROR
|
||||
else -> UNKNOWN_CAMERA_DEVICE_ERROR
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,20 @@
|
||||
package com.mrousavy.camera.parsers
|
||||
|
||||
import com.mrousavy.camera.InvalidTypeScriptUnionError
|
||||
|
||||
enum class Flash(override val unionValue: String): JSUnionValue {
|
||||
OFF("off"),
|
||||
ON("on"),
|
||||
AUTO("auto");
|
||||
|
||||
companion object: JSUnionValue.Companion<Flash> {
|
||||
override fun fromUnionValue(unionValue: String?): Flash {
|
||||
return when (unionValue) {
|
||||
"off" -> OFF
|
||||
"on" -> ON
|
||||
"auto" -> AUTO
|
||||
else -> throw InvalidTypeScriptUnionError("flash", unionValue ?: "(null)")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,24 @@
|
||||
package com.mrousavy.camera.parsers
|
||||
|
||||
import android.hardware.camera2.CameraCharacteristics
|
||||
|
||||
enum class HardwareLevel(override val unionValue: String): JSUnionValue {
|
||||
LEGACY("legacy"),
|
||||
LIMITED("limited"),
|
||||
EXTERNAL("external"),
|
||||
FULL("full"),
|
||||
LEVEL_3("level-3");
|
||||
|
||||
companion object {
|
||||
fun fromCameraCharacteristics(cameraCharacteristics: CameraCharacteristics): HardwareLevel {
|
||||
return when (cameraCharacteristics.get(CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL)) {
|
||||
CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_LEGACY -> LEGACY
|
||||
CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED -> LIMITED
|
||||
CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_EXTERNAL -> EXTERNAL
|
||||
CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_FULL -> FULL
|
||||
CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_3 -> LEVEL_3
|
||||
else -> LEGACY
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,9 @@
|
||||
package com.mrousavy.camera.parsers
|
||||
|
||||
interface JSUnionValue {
|
||||
val unionValue: String
|
||||
|
||||
interface Companion<T> {
|
||||
fun fromUnionValue(unionValue: String?): T?
|
||||
}
|
||||
}
|
@@ -0,0 +1,20 @@
|
||||
package com.mrousavy.camera.parsers
|
||||
|
||||
import android.hardware.camera2.CameraCharacteristics
|
||||
|
||||
enum class LensFacing(override val unionValue: String): JSUnionValue {
|
||||
BACK("back"),
|
||||
FRONT("front"),
|
||||
EXTERNAL("external");
|
||||
|
||||
companion object {
|
||||
fun fromCameraCharacteristics(cameraCharacteristics: CameraCharacteristics): LensFacing {
|
||||
return when (cameraCharacteristics.get(CameraCharacteristics.LENS_FACING)!!) {
|
||||
CameraCharacteristics.LENS_FACING_BACK -> BACK
|
||||
CameraCharacteristics.LENS_FACING_FRONT -> FRONT
|
||||
CameraCharacteristics.LENS_FACING_EXTERNAL -> EXTERNAL
|
||||
else -> EXTERNAL
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,56 @@
|
||||
package com.mrousavy.camera.parsers
|
||||
|
||||
import android.hardware.camera2.CameraCharacteristics
|
||||
|
||||
enum class Orientation(override val unionValue: String): JSUnionValue {
|
||||
PORTRAIT("portrait"),
|
||||
LANDSCAPE_RIGHT("landscape-right"),
|
||||
PORTRAIT_UPSIDE_DOWN("portrait-upside-down"),
|
||||
LANDSCAPE_LEFT("landscape-left");
|
||||
|
||||
fun toDegrees(): Int {
|
||||
return when(this) {
|
||||
PORTRAIT -> 0
|
||||
LANDSCAPE_RIGHT -> 90
|
||||
PORTRAIT_UPSIDE_DOWN -> 180
|
||||
LANDSCAPE_LEFT -> 270
|
||||
}
|
||||
}
|
||||
|
||||
fun toSensorRelativeOrientation(cameraCharacteristics: CameraCharacteristics): Orientation {
|
||||
val sensorOrientation = cameraCharacteristics.get(CameraCharacteristics.SENSOR_ORIENTATION)!!
|
||||
|
||||
// Convert target orientation to rotation degrees (0, 90, 180, 270)
|
||||
var rotationDegrees = this.toDegrees()
|
||||
|
||||
// Reverse device orientation for front-facing cameras
|
||||
val facingFront = cameraCharacteristics.get(CameraCharacteristics.LENS_FACING) == CameraCharacteristics.LENS_FACING_FRONT
|
||||
if (facingFront) rotationDegrees = -rotationDegrees
|
||||
|
||||
// Rotate sensor rotation by target rotation
|
||||
val newRotationDegrees = (sensorOrientation + rotationDegrees + 360) % 360
|
||||
|
||||
return fromRotationDegrees(newRotationDegrees)
|
||||
}
|
||||
|
||||
companion object: JSUnionValue.Companion<Orientation> {
|
||||
override fun fromUnionValue(unionValue: String?): Orientation? {
|
||||
return when (unionValue) {
|
||||
"portrait" -> PORTRAIT
|
||||
"landscape-right" -> LANDSCAPE_RIGHT
|
||||
"portrait-upside-down" -> PORTRAIT_UPSIDE_DOWN
|
||||
"landscape-left" -> LANDSCAPE_LEFT
|
||||
else -> null
|
||||
}
|
||||
}
|
||||
|
||||
fun fromRotationDegrees(rotationDegrees: Int): Orientation {
|
||||
return when (rotationDegrees) {
|
||||
in 45..135 -> LANDSCAPE_RIGHT
|
||||
in 135..225 -> PORTRAIT_UPSIDE_DOWN
|
||||
in 225..315 -> LANDSCAPE_LEFT
|
||||
else -> PORTRAIT
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,19 @@
|
||||
package com.mrousavy.camera.parsers
|
||||
|
||||
import android.content.pm.PackageManager
|
||||
|
||||
enum class PermissionStatus(override val unionValue: String): JSUnionValue {
|
||||
DENIED("denied"),
|
||||
NOT_DETERMINED("not-determined"),
|
||||
GRANTED("granted");
|
||||
|
||||
companion object {
|
||||
fun fromPermissionStatus(status: Int): PermissionStatus {
|
||||
return when (status) {
|
||||
PackageManager.PERMISSION_DENIED -> DENIED
|
||||
PackageManager.PERMISSION_GRANTED -> GRANTED
|
||||
else -> NOT_DETERMINED
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,50 @@
|
||||
package com.mrousavy.camera.parsers
|
||||
|
||||
import android.graphics.ImageFormat
|
||||
import com.mrousavy.camera.PixelFormatNotSupportedError
|
||||
|
||||
@Suppress("FoldInitializerAndIfToElvis")
|
||||
enum class PixelFormat(override val unionValue: String): JSUnionValue {
|
||||
YUV("yuv"),
|
||||
RGB("rgb"),
|
||||
DNG("dng"),
|
||||
NATIVE("native"),
|
||||
UNKNOWN("unknown");
|
||||
|
||||
fun toImageFormat(): Int {
|
||||
val result = when (this) {
|
||||
YUV -> ImageFormat.YUV_420_888
|
||||
RGB -> ImageFormat.JPEG
|
||||
DNG -> ImageFormat.RAW_SENSOR
|
||||
NATIVE -> ImageFormat.PRIVATE
|
||||
UNKNOWN -> null
|
||||
}
|
||||
if (result == null) {
|
||||
throw PixelFormatNotSupportedError(this.unionValue)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
companion object: JSUnionValue.Companion<PixelFormat> {
|
||||
fun fromImageFormat(imageFormat: Int): PixelFormat {
|
||||
return when (imageFormat) {
|
||||
ImageFormat.YUV_420_888 -> YUV
|
||||
ImageFormat.JPEG, ImageFormat.DEPTH_JPEG -> RGB
|
||||
ImageFormat.RAW_SENSOR -> DNG
|
||||
ImageFormat.PRIVATE -> NATIVE
|
||||
else -> UNKNOWN
|
||||
}
|
||||
}
|
||||
|
||||
override fun fromUnionValue(unionValue: String?): PixelFormat? {
|
||||
return when (unionValue) {
|
||||
"yuv" -> YUV
|
||||
"rgb" -> RGB
|
||||
"dng" -> DNG
|
||||
"native" -> NATIVE
|
||||
"unknown" -> UNKNOWN
|
||||
else -> null
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,18 @@
|
||||
package com.mrousavy.camera.parsers
|
||||
|
||||
enum class QualityPrioritization(override val unionValue: String): JSUnionValue {
|
||||
SPEED("speed"),
|
||||
BALANCED("balanced"),
|
||||
QUALITY("quality");
|
||||
|
||||
companion object: JSUnionValue.Companion<QualityPrioritization> {
|
||||
override fun fromUnionValue(unionValue: String?): QualityPrioritization {
|
||||
return when (unionValue) {
|
||||
"speed" -> SPEED
|
||||
"balanced" -> BALANCED
|
||||
"quality" -> QUALITY
|
||||
else -> BALANCED
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,16 @@
|
||||
package com.mrousavy.camera.parsers
|
||||
|
||||
enum class Torch(override val unionValue: String): JSUnionValue {
|
||||
OFF("off"),
|
||||
ON("on");
|
||||
|
||||
companion object: JSUnionValue.Companion<Torch> {
|
||||
override fun fromUnionValue(unionValue: String?): Torch {
|
||||
return when (unionValue) {
|
||||
"off" -> OFF
|
||||
"on" -> ON
|
||||
else -> OFF
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,25 @@
|
||||
package com.mrousavy.camera.parsers
|
||||
|
||||
import android.media.MediaRecorder
|
||||
|
||||
enum class VideoCodec(override val unionValue: String): JSUnionValue {
|
||||
H264("h264"),
|
||||
H265("h265");
|
||||
|
||||
fun toVideoCodec(): Int {
|
||||
return when (this) {
|
||||
H264 -> MediaRecorder.VideoEncoder.H264
|
||||
H265 -> MediaRecorder.VideoEncoder.HEVC
|
||||
}
|
||||
}
|
||||
|
||||
companion object: JSUnionValue.Companion<VideoCodec> {
|
||||
override fun fromUnionValue(unionValue: String?): VideoCodec {
|
||||
return when (unionValue) {
|
||||
"h264" -> H264
|
||||
"h265" -> H265
|
||||
else -> H264
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,25 @@
|
||||
package com.mrousavy.camera.parsers
|
||||
|
||||
import com.mrousavy.camera.InvalidTypeScriptUnionError
|
||||
|
||||
enum class VideoFileType(override val unionValue: String): JSUnionValue {
|
||||
MOV("mov"),
|
||||
MP4("mp4");
|
||||
|
||||
fun toExtension(): String {
|
||||
return when (this) {
|
||||
MOV -> ".mov"
|
||||
MP4 -> ".mp4"
|
||||
}
|
||||
}
|
||||
|
||||
companion object: JSUnionValue.Companion<VideoFileType> {
|
||||
override fun fromUnionValue(unionValue: String?): VideoFileType {
|
||||
return when (unionValue) {
|
||||
"mov" -> MOV
|
||||
"mp4" -> MP4
|
||||
else -> throw InvalidTypeScriptUnionError("fileType", unionValue ?: "(null)")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,59 @@
|
||||
package com.mrousavy.camera.parsers
|
||||
|
||||
import android.hardware.camera2.CameraMetadata.CONTROL_VIDEO_STABILIZATION_MODE_OFF
|
||||
import android.hardware.camera2.CameraMetadata.CONTROL_VIDEO_STABILIZATION_MODE_ON
|
||||
import android.hardware.camera2.CameraMetadata.CONTROL_VIDEO_STABILIZATION_MODE_PREVIEW_STABILIZATION
|
||||
import android.hardware.camera2.CameraMetadata.LENS_OPTICAL_STABILIZATION_MODE_OFF
|
||||
import android.hardware.camera2.CameraMetadata.LENS_OPTICAL_STABILIZATION_MODE_ON
|
||||
|
||||
enum class VideoStabilizationMode(override val unionValue: String): JSUnionValue {
|
||||
OFF("off"),
|
||||
STANDARD("standard"),
|
||||
CINEMATIC("cinematic"),
|
||||
CINEMATIC_EXTENDED("cinematic-extended");
|
||||
|
||||
fun toDigitalStabilizationMode(): Int {
|
||||
return when (this) {
|
||||
OFF -> CONTROL_VIDEO_STABILIZATION_MODE_OFF
|
||||
STANDARD -> CONTROL_VIDEO_STABILIZATION_MODE_ON
|
||||
CINEMATIC -> 2 /* CONTROL_VIDEO_STABILIZATION_MODE_PREVIEW_STABILIZATION */
|
||||
else -> CONTROL_VIDEO_STABILIZATION_MODE_OFF
|
||||
}
|
||||
}
|
||||
|
||||
fun toOpticalStabilizationMode(): Int {
|
||||
return when (this) {
|
||||
OFF -> LENS_OPTICAL_STABILIZATION_MODE_OFF
|
||||
CINEMATIC_EXTENDED -> LENS_OPTICAL_STABILIZATION_MODE_ON
|
||||
else -> LENS_OPTICAL_STABILIZATION_MODE_OFF
|
||||
}
|
||||
}
|
||||
|
||||
companion object: JSUnionValue.Companion<VideoStabilizationMode> {
|
||||
override fun fromUnionValue(unionValue: String?): VideoStabilizationMode? {
|
||||
return when (unionValue) {
|
||||
"off" -> OFF
|
||||
"standard" -> STANDARD
|
||||
"cinematic" -> CINEMATIC
|
||||
"cinematic-extended" -> CINEMATIC_EXTENDED
|
||||
else -> null
|
||||
}
|
||||
}
|
||||
|
||||
fun fromDigitalVideoStabilizationMode(stabiliazionMode: Int): VideoStabilizationMode {
|
||||
return when (stabiliazionMode) {
|
||||
CONTROL_VIDEO_STABILIZATION_MODE_OFF -> OFF
|
||||
CONTROL_VIDEO_STABILIZATION_MODE_ON -> STANDARD
|
||||
CONTROL_VIDEO_STABILIZATION_MODE_PREVIEW_STABILIZATION -> CINEMATIC
|
||||
else -> OFF
|
||||
}
|
||||
}
|
||||
fun fromOpticalVideoStabilizationMode(stabiliazionMode: Int): VideoStabilizationMode {
|
||||
return when (stabiliazionMode) {
|
||||
LENS_OPTICAL_STABILIZATION_MODE_OFF -> OFF
|
||||
LENS_OPTICAL_STABILIZATION_MODE_ON -> CINEMATIC_EXTENDED
|
||||
else -> OFF
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,22 @@
|
||||
package com.mrousavy.camera.utils
|
||||
|
||||
import com.facebook.react.bridge.*
|
||||
|
||||
private fun makeErrorCauseMap(throwable: Throwable): ReadableMap {
|
||||
val map = Arguments.createMap()
|
||||
map.putString("message", throwable.message)
|
||||
map.putString("stacktrace", throwable.stackTraceToString())
|
||||
if (throwable.cause != null) {
|
||||
map.putMap("cause", makeErrorCauseMap(throwable.cause!!))
|
||||
}
|
||||
return map
|
||||
}
|
||||
|
||||
fun makeErrorMap(code: String? = null, message: String? = null, throwable: Throwable? = null, userInfo: WritableMap? = null): ReadableMap {
|
||||
val map = Arguments.createMap()
|
||||
map.putString("code", code)
|
||||
map.putString("message", message)
|
||||
map.putMap("cause", if (throwable != null) makeErrorCauseMap(throwable) else null)
|
||||
map.putMap("userInfo", userInfo)
|
||||
return map
|
||||
}
|
@@ -0,0 +1,16 @@
|
||||
package com.mrousavy.camera.utils
|
||||
|
||||
import com.facebook.react.bridge.Promise
|
||||
import com.mrousavy.camera.CameraError
|
||||
import com.mrousavy.camera.UnknownCameraError
|
||||
|
||||
inline fun withPromise(promise: Promise, closure: () -> Any?) {
|
||||
try {
|
||||
val result = closure()
|
||||
promise.resolve(result)
|
||||
} catch (e: Throwable) {
|
||||
e.printStackTrace()
|
||||
val error = if (e is CameraError) e else UnknownCameraError(e)
|
||||
promise.reject("${error.domain}/${error.id}", error.message, error.cause)
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user