Init ExtensionsManager and ProcessCameraProvider before checking Extension availability (#48)

* Init ExtensionsManager and ProcessCameraProvider before checking Extension availability

* Remove withSuspendablePromise

* Async init ProcessCameraProvider

* Remove that unnecessary Future caching again

* Post `update` on previewView

Fixes "previewView.display must not be null!" error
This commit is contained in:
Marc Rousavy 2021-03-12 10:45:23 +01:00 committed by GitHub
parent c4d7d81c36
commit d85126d883
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 171 additions and 171 deletions

View File

@ -100,6 +100,7 @@ class CameraView(context: Context) : FrameLayout(context), LifecycleOwner {
private val reactContext: ReactContext
get() = context as ReactContext
@Suppress("JoinDeclarationAndAssignment")
internal val previewView: PreviewView
private val cameraExecutor = Executors.newSingleThreadExecutor()
internal val takePhotoExecutor = Executors.newSingleThreadExecutor()
@ -192,30 +193,35 @@ class CameraView(context: Context) : FrameLayout(context), LifecycleOwner {
/**
* Invalidate all React Props and reconfigure the device
*/
fun update(changedProps: ArrayList<String>) = GlobalScope.launch(Dispatchers.Main) {
try {
val shouldReconfigureSession = changedProps.containsAny(propsThatRequireSessionReconfiguration)
val shouldReconfigureZoom = shouldReconfigureSession || changedProps.contains("zoom")
val shouldReconfigureTorch = shouldReconfigureSession || changedProps.contains("torch")
fun update(changedProps: ArrayList<String>) = previewView.post {
// TODO: Does this introduce too much overhead?
// I need to .post on the previewView because it might've not been initialized yet
// I need to use GlobalScope.launch because of the suspend fun [configureSession]
GlobalScope.launch(Dispatchers.Main) {
try {
val shouldReconfigureSession = changedProps.containsAny(propsThatRequireSessionReconfiguration)
val shouldReconfigureZoom = shouldReconfigureSession || changedProps.contains("zoom")
val shouldReconfigureTorch = shouldReconfigureSession || changedProps.contains("torch")
if (changedProps.contains("isActive")) {
updateLifecycleState()
if (changedProps.contains("isActive")) {
updateLifecycleState()
}
if (shouldReconfigureSession) {
configureSession()
}
if (shouldReconfigureZoom) {
val scaled = (zoom.toFloat() * (maxZoom - minZoom)) + minZoom
camera!!.cameraControl.setZoomRatio(scaled)
}
if (shouldReconfigureTorch) {
camera!!.cameraControl.enableTorch(torch == "on")
}
if (changedProps.contains("enableZoomGesture")) {
setOnTouchListener(if (enableZoomGesture) touchEventListener else null)
}
} catch (e: CameraError) {
invokeOnError(e)
}
if (shouldReconfigureSession) {
configureSession()
}
if (shouldReconfigureZoom) {
val scaled = (zoom.toFloat() * (maxZoom - minZoom)) + minZoom
camera!!.cameraControl.setZoomRatio(scaled)
}
if (shouldReconfigureTorch) {
camera!!.cameraControl.enableTorch(torch == "on")
}
if (changedProps.contains("enableZoomGesture")) {
setOnTouchListener(if (enableZoomGesture) touchEventListener else null)
}
} catch (e: CameraError) {
invokeOnError(e)
}
}
@ -225,7 +231,8 @@ class CameraView(context: Context) : FrameLayout(context), LifecycleOwner {
@SuppressLint("UnsafeExperimentalUsageError", "RestrictedApi")
private suspend fun configureSession() {
try {
Log.d(REACT_CLASS, "Configuring session...")
val startTime = System.currentTimeMillis()
Log.i(REACT_CLASS, "Configuring session...")
if (ContextCompat.checkSelfPermission(context, Manifest.permission.CAMERA) != PackageManager.PERMISSION_GRANTED) {
throw MicrophonePermissionError()
}
@ -236,12 +243,12 @@ class CameraView(context: Context) : FrameLayout(context), LifecycleOwner {
throw NoCameraDeviceError()
}
if (format != null)
Log.d(REACT_CLASS, "Configuring session with Camera ID $cameraId and custom format...")
Log.i(REACT_CLASS, "Configuring session with Camera ID $cameraId and custom format...")
else
Log.d(REACT_CLASS, "Configuring session with Camera ID $cameraId and default format options...")
Log.i(REACT_CLASS, "Configuring session with Camera ID $cameraId and default format options...")
// Used to bind the lifecycle of cameras to the lifecycle owner
val cameraProvider = ProcessCameraProvider.getInstance(context).await()
val cameraProvider = ProcessCameraProvider.getInstance(reactContext).await()
val cameraSelector = CameraSelector.Builder().byID(cameraId!!).build()
@ -257,7 +264,7 @@ class CameraView(context: Context) : FrameLayout(context), LifecycleOwner {
if (format == null) {
// let CameraX automatically find best resolution for the target aspect ratio
Log.d(REACT_CLASS, "No custom format has been set, CameraX will automatically determine best configuration...")
Log.i(REACT_CLASS, "No custom format has been set, CameraX will automatically determine best configuration...")
val aspectRatio = aspectRatio(previewView.width, previewView.height)
previewBuilder.setTargetAspectRatio(aspectRatio)
imageCaptureBuilder.setTargetAspectRatio(aspectRatio)
@ -265,7 +272,7 @@ class CameraView(context: Context) : FrameLayout(context), LifecycleOwner {
} else {
// User has selected a custom format={}. Use that
val format = DeviceFormat(format!!)
Log.d(REACT_CLASS, "Using custom format - photo: ${format.photoSize}, video: ${format.videoSize} @ $fps FPS")
Log.i(REACT_CLASS, "Using custom format - photo: ${format.photoSize}, video: ${format.videoSize} @ $fps FPS")
previewBuilder.setDefaultResolution(format.photoSize)
imageCaptureBuilder.setDefaultResolution(format.photoSize)
videoCaptureBuilder.setDefaultResolution(format.photoSize)
@ -275,7 +282,7 @@ class CameraView(context: Context) : FrameLayout(context), LifecycleOwner {
// Camera supports the given FPS (frame rate range)
val frameDuration = (1.0 / fps.toDouble()).toLong() * 1_000_000_000
Log.d(REACT_CLASS, "Setting AE_TARGET_FPS_RANGE to $fps-$fps, and SENSOR_FRAME_DURATION to $frameDuration")
Log.i(REACT_CLASS, "Setting AE_TARGET_FPS_RANGE to $fps-$fps, and SENSOR_FRAME_DURATION to $frameDuration")
Camera2Interop.Extender(previewBuilder)
.setCaptureRequestOption(CaptureRequest.CONTROL_AE_TARGET_FPS_RANGE, Range(fps, fps))
.setCaptureRequestOption(CaptureRequest.SENSOR_FRAME_DURATION, frameDuration)
@ -333,7 +340,8 @@ class CameraView(context: Context) : FrameLayout(context), LifecycleOwner {
minZoom = camera!!.cameraInfo.zoomState.value?.minZoomRatio ?: 1f
maxZoom = camera!!.cameraInfo.zoomState.value?.maxZoomRatio ?: 1f
Log.d(REACT_CLASS, "Session configured! Camera: ${camera!!}")
val duration = System.currentTimeMillis() - startTime
Log.i(REACT_CLASS, "Session configured in $duration ms! Camera: ${camera!!}")
invokeOnInitialized()
} catch (exc: Throwable) {
throw when (exc) {

View File

@ -11,17 +11,18 @@ import android.os.Build
import android.util.Log
import androidx.camera.core.CameraSelector
import androidx.camera.core.ImageCapture
import androidx.camera.extensions.ExtensionsManager
import androidx.camera.extensions.HdrImageCaptureExtender
import androidx.camera.extensions.NightImageCaptureExtender
import androidx.camera.lifecycle.ProcessCameraProvider
import androidx.core.content.ContextCompat
import com.facebook.react.bridge.*
import com.facebook.react.modules.core.PermissionAwareActivity
import com.facebook.react.modules.core.PermissionListener
import com.mrousavy.camera.parsers.*
import com.mrousavy.camera.utils.*
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.GlobalScope
import kotlinx.coroutines.launch
import kotlinx.coroutines.*
import kotlinx.coroutines.guava.await
class CameraViewModule(reactContext: ReactApplicationContext) : ReactContextBaseJavaModule(reactContext) {
companion object {
@ -109,152 +110,154 @@ class CameraViewModule(reactContext: ReactApplicationContext) : ReactContextBase
}
// TODO: This uses the Camera2 API to list all characteristics of a camera device and therefore doesn't work with Camera1. Find a way to use CameraX for this
// https://issuetracker.google.com/issues/179925896
@ReactMethod
fun getAvailableCameraDevices(promise: Promise) {
withPromise(promise) {
val manager = reactApplicationContext.getSystemService(Context.CAMERA_SERVICE) as? CameraManager
?: throw CameraManagerUnavailableError()
val startTime = System.currentTimeMillis()
GlobalScope.launch(Dispatchers.Main) {
withPromise(promise) {
// I need to init those because the HDR/Night Mode Extension expects them to be initialized
val extensionsManager = ExtensionsManager.init(reactApplicationContext).await()
val processCameraProvider = ProcessCameraProvider.getInstance(reactApplicationContext).await()
val cameraDevices: WritableArray = Arguments.createArray()
val manager = reactApplicationContext.getSystemService(Context.CAMERA_SERVICE) as? CameraManager
?: throw CameraManagerUnavailableError()
manager.cameraIdList.forEach loop@{ id ->
val cameraSelector = CameraSelector.Builder().byID(id).build()
// TODO: ImageCapture.Builder - I'm not setting the target resolution, does that matter?
val imageCaptureBuilder = ImageCapture.Builder()
val cameraDevices: WritableArray = Arguments.createArray()
val characteristics = manager.getCameraCharacteristics(id)
val hardwareLevel = characteristics.get(CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL)!!
manager.cameraIdList.forEach loop@{ id ->
val cameraSelector = CameraSelector.Builder().byID(id).build()
// TODO: ImageCapture.Builder - I'm not setting the target resolution, does that matter?
val imageCaptureBuilder = ImageCapture.Builder()
// Filters out cameras that are LEGACY hardware level. Those don't support Preview + Photo Capture + Video Capture at the same time.
if (hardwareLevel == CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_LEGACY) {
Log.i(
REACT_CLASS,
"Skipping Camera #$id because it does not meet the minimum requirements for react-native-vision-camera. " +
"See the tables at https://developer.android.com/reference/android/hardware/camera2/CameraDevice#regular-capture for more information."
)
return@loop
}
val characteristics = manager.getCameraCharacteristics(id)
val hardwareLevel = characteristics.get(CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL)!!
val capabilities = characteristics.get(CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES)!!
val isMultiCam = Build.VERSION.SDK_INT >= Build.VERSION_CODES.P &&
capabilities.contains(CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA)
val deviceTypes = characteristics.getDeviceTypes()
// Filters out cameras that are LEGACY hardware level. Those don't support Preview + Photo Capture + Video Capture at the same time.
if (hardwareLevel == CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_LEGACY) {
Log.i(
REACT_CLASS,
"Skipping Camera #$id because it does not meet the minimum requirements for react-native-vision-camera. " +
"See the tables at https://developer.android.com/reference/android/hardware/camera2/CameraDevice#regular-capture for more information."
)
return@loop
}
val cameraConfig = characteristics.get(CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP)!!
val lensFacing = characteristics.get(CameraCharacteristics.LENS_FACING)!!
val hasFlash = characteristics.get(CameraCharacteristics.FLASH_INFO_AVAILABLE)!!
val maxScalerZoom = characteristics.get(CameraCharacteristics.SCALER_AVAILABLE_MAX_DIGITAL_ZOOM)!!
val supportsDepthCapture = Build.VERSION.SDK_INT >= Build.VERSION_CODES.M &&
capabilities.contains(CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES_DEPTH_OUTPUT)
val supportsRawCapture = capabilities.contains(CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES_RAW)
val isoRange = characteristics.get(CameraCharacteristics.SENSOR_INFO_SENSITIVITY_RANGE)
val stabilizationModes = characteristics.get(CameraCharacteristics.CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES)!! // only digital, no optical
val zoomRange = if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.R)
characteristics.get(CameraCharacteristics.CONTROL_ZOOM_RATIO_RANGE)
else null
val name = if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.P)
characteristics.get(CameraCharacteristics.INFO_VERSION)
else null
val fpsRanges = characteristics.get(CameraCharacteristics.CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES)!!
val capabilities = characteristics.get(CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES)!!
val isMultiCam = Build.VERSION.SDK_INT >= Build.VERSION_CODES.P &&
capabilities.contains(CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA)
val deviceTypes = characteristics.getDeviceTypes()
val cameraConfig = characteristics.get(CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP)!!
val lensFacing = characteristics.get(CameraCharacteristics.LENS_FACING)!!
val hasFlash = characteristics.get(CameraCharacteristics.FLASH_INFO_AVAILABLE)!!
val maxScalerZoom = characteristics.get(CameraCharacteristics.SCALER_AVAILABLE_MAX_DIGITAL_ZOOM)!!
val supportsDepthCapture = Build.VERSION.SDK_INT >= Build.VERSION_CODES.M &&
capabilities.contains(CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES_DEPTH_OUTPUT)
val supportsRawCapture = capabilities.contains(CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES_RAW)
val isoRange = characteristics.get(CameraCharacteristics.SENSOR_INFO_SENSITIVITY_RANGE)
val stabilizationModes = characteristics.get(CameraCharacteristics.CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES)!! // only digital, no optical
val zoomRange = if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.R)
characteristics.get(CameraCharacteristics.CONTROL_ZOOM_RATIO_RANGE)
else null
val name = if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.P)
characteristics.get(CameraCharacteristics.INFO_VERSION)
else null
val fpsRanges = characteristics.get(CameraCharacteristics.CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES)!!
var supportsHdr = false
var supportsLowLightBoost = false
try {
val hdrExtension = HdrImageCaptureExtender.create(imageCaptureBuilder)
supportsHdr = hdrExtension.isExtensionAvailable(cameraSelector)
val supportsHdr = hdrExtension.isExtensionAvailable(cameraSelector)
val nightExtension = NightImageCaptureExtender.create(imageCaptureBuilder)
supportsLowLightBoost = nightExtension.isExtensionAvailable(cameraSelector)
} catch (e: Throwable) {
// error on checking availability. falls back to "false"
Log.e(REACT_CLASS, "Failed to check HDR/Night Mode extension availability.", e)
}
val supportsLowLightBoost = nightExtension.isExtensionAvailable(cameraSelector)
val fieldOfView = characteristics.getFieldOfView()
val fieldOfView = characteristics.getFieldOfView()
val map = Arguments.createMap()
val formats = Arguments.createArray()
map.putString("id", id)
map.putArray("devices", deviceTypes)
map.putString("position", parseLensFacing(lensFacing))
map.putString("name", name ?: "${parseLensFacing(lensFacing)} ($id)")
map.putBoolean("hasFlash", hasFlash)
map.putBoolean("hasTorch", hasFlash)
map.putBoolean("isMultiCam", isMultiCam)
map.putBoolean("supportsRawCapture", supportsRawCapture)
map.putBoolean("supportsDepthCapture", supportsDepthCapture)
map.putBoolean("supportsLowLightBoost", supportsLowLightBoost)
if (zoomRange != null) {
map.putDouble("minZoom", zoomRange.lower.toDouble())
map.putDouble("maxZoom", zoomRange.upper.toDouble())
} else {
map.putDouble("minZoom", 1.0)
map.putDouble("maxZoom", maxScalerZoom.toDouble())
}
map.putDouble("neutralZoom", characteristics.neutralZoomPercent.toDouble())
val maxImageOutputSize = cameraConfig.getOutputSizes(ImageReader::class.java).maxByOrNull { it.width * it.height }!!
// TODO: Should I really check MediaRecorder::class instead of SurfaceView::class?
// Recording should always be done in the most efficient format, which is the format native to the camera framework
cameraConfig.getOutputSizes(MediaRecorder::class.java).forEach { size ->
val isHighestPhotoQualitySupported = areUltimatelyEqual(size, maxImageOutputSize)
// Get the number of seconds that each frame will take to process
val secondsPerFrame = cameraConfig.getOutputMinFrameDuration(MediaRecorder::class.java, size) / 1_000_000_000.0
val frameRateRanges = Arguments.createArray()
if (secondsPerFrame > 0) {
val fps = (1.0 / secondsPerFrame).toInt()
val frameRateRange = Arguments.createMap()
frameRateRange.putInt("minFrameRate", 1)
frameRateRange.putInt("maxFrameRate", fps)
frameRateRanges.pushMap(frameRateRange)
val map = Arguments.createMap()
val formats = Arguments.createArray()
map.putString("id", id)
map.putArray("devices", deviceTypes)
map.putString("position", parseLensFacing(lensFacing))
map.putString("name", name ?: "${parseLensFacing(lensFacing)} ($id)")
map.putBoolean("hasFlash", hasFlash)
map.putBoolean("hasTorch", hasFlash)
map.putBoolean("isMultiCam", isMultiCam)
map.putBoolean("supportsRawCapture", supportsRawCapture)
map.putBoolean("supportsDepthCapture", supportsDepthCapture)
map.putBoolean("supportsLowLightBoost", supportsLowLightBoost)
if (zoomRange != null) {
map.putDouble("minZoom", zoomRange.lower.toDouble())
map.putDouble("maxZoom", zoomRange.upper.toDouble())
} else {
map.putDouble("minZoom", 1.0)
map.putDouble("maxZoom", maxScalerZoom.toDouble())
}
fpsRanges.forEach { range ->
val frameRateRange = Arguments.createMap()
frameRateRange.putInt("minFrameRate", range.lower)
frameRateRange.putInt("maxFrameRate", range.upper)
frameRateRanges.pushMap(frameRateRange)
map.putDouble("neutralZoom", characteristics.neutralZoomPercent.toDouble())
val maxImageOutputSize = cameraConfig.getOutputSizes(ImageReader::class.java).maxByOrNull { it.width * it.height }!!
// TODO: Should I really check MediaRecorder::class instead of SurfaceView::class?
// Recording should always be done in the most efficient format, which is the format native to the camera framework
cameraConfig.getOutputSizes(MediaRecorder::class.java).forEach { size ->
val isHighestPhotoQualitySupported = areUltimatelyEqual(size, maxImageOutputSize)
// Get the number of seconds that each frame will take to process
val secondsPerFrame = cameraConfig.getOutputMinFrameDuration(MediaRecorder::class.java, size) / 1_000_000_000.0
val frameRateRanges = Arguments.createArray()
if (secondsPerFrame > 0) {
val fps = (1.0 / secondsPerFrame).toInt()
val frameRateRange = Arguments.createMap()
frameRateRange.putInt("minFrameRate", 1)
frameRateRange.putInt("maxFrameRate", fps)
frameRateRanges.pushMap(frameRateRange)
}
fpsRanges.forEach { range ->
val frameRateRange = Arguments.createMap()
frameRateRange.putInt("minFrameRate", range.lower)
frameRateRange.putInt("maxFrameRate", range.upper)
frameRateRanges.pushMap(frameRateRange)
}
// TODO Revisit getAvailableCameraDevices (colorSpaces, more than YUV?)
val colorSpaces = Arguments.createArray()
colorSpaces.pushString("yuv")
// TODO Revisit getAvailableCameraDevices (more accurate video stabilization modes)
val videoStabilizationModes = Arguments.createArray()
if (stabilizationModes.contains(CameraCharacteristics.CONTROL_VIDEO_STABILIZATION_MODE_OFF))
videoStabilizationModes.pushString("off")
if (stabilizationModes.contains(CameraCharacteristics.CONTROL_VIDEO_STABILIZATION_MODE_ON)) {
videoStabilizationModes.pushString("auto")
videoStabilizationModes.pushString("standard")
}
val format = Arguments.createMap()
format.putDouble("photoHeight", size.height.toDouble())
format.putDouble("photoWidth", size.width.toDouble())
format.putDouble("videoHeight", size.height.toDouble()) // TODO: Revisit getAvailableCameraDevices (videoHeight == photoHeight?)
format.putDouble("videoWidth", size.width.toDouble()) // TODO: Revisit getAvailableCameraDevices (videoWidth == photoWidth?)
format.putBoolean("isHighestPhotoQualitySupported", isHighestPhotoQualitySupported)
format.putInt("maxISO", isoRange?.upper)
format.putInt("minISO", isoRange?.lower)
format.putDouble("fieldOfView", fieldOfView) // TODO: Revisit getAvailableCameraDevices (is fieldOfView accurate?)
format.putDouble("maxZoom", (zoomRange?.upper ?: maxScalerZoom).toDouble())
format.putArray("colorSpaces", colorSpaces)
format.putBoolean("supportsVideoHDR", false) // TODO: supportsVideoHDR
format.putBoolean("supportsPhotoHDR", supportsHdr)
format.putArray("frameRateRanges", frameRateRanges)
format.putString("autoFocusSystem", "none") // TODO: Revisit getAvailableCameraDevices (autoFocusSystem) (CameraCharacteristics.CONTROL_AF_AVAILABLE_MODES or CameraCharacteristics.LENS_INFO_FOCUS_DISTANCE_CALIBRATION)
format.putArray("videoStabilizationModes", videoStabilizationModes)
formats.pushMap(format)
}
// TODO Revisit getAvailableCameraDevices (colorSpaces, more than YUV?)
val colorSpaces = Arguments.createArray()
colorSpaces.pushString("yuv")
// TODO Revisit getAvailableCameraDevices (more accurate video stabilization modes)
val videoStabilizationModes = Arguments.createArray()
if (stabilizationModes.contains(CameraCharacteristics.CONTROL_VIDEO_STABILIZATION_MODE_OFF))
videoStabilizationModes.pushString("off")
if (stabilizationModes.contains(CameraCharacteristics.CONTROL_VIDEO_STABILIZATION_MODE_ON)) {
videoStabilizationModes.pushString("auto")
videoStabilizationModes.pushString("standard")
}
val format = Arguments.createMap()
format.putDouble("photoHeight", size.height.toDouble())
format.putDouble("photoWidth", size.width.toDouble())
format.putDouble("videoHeight", size.height.toDouble()) // TODO: Revisit getAvailableCameraDevices (videoHeight == photoHeight?)
format.putDouble("videoWidth", size.width.toDouble()) // TODO: Revisit getAvailableCameraDevices (videoWidth == photoWidth?)
format.putBoolean("isHighestPhotoQualitySupported", isHighestPhotoQualitySupported)
format.putInt("maxISO", isoRange?.upper)
format.putInt("minISO", isoRange?.lower)
format.putDouble("fieldOfView", fieldOfView) // TODO: Revisit getAvailableCameraDevices (is fieldOfView accurate?)
format.putDouble("maxZoom", (zoomRange?.upper ?: maxScalerZoom).toDouble())
format.putArray("colorSpaces", colorSpaces)
format.putBoolean("supportsVideoHDR", false) // TODO: supportsVideoHDR
format.putBoolean("supportsPhotoHDR", supportsHdr)
format.putArray("frameRateRanges", frameRateRanges)
format.putString("autoFocusSystem", "none") // TODO: Revisit getAvailableCameraDevices (autoFocusSystem) (CameraCharacteristics.CONTROL_AF_AVAILABLE_MODES or CameraCharacteristics.LENS_INFO_FOCUS_DISTANCE_CALIBRATION)
format.putArray("videoStabilizationModes", videoStabilizationModes)
formats.pushMap(format)
map.putArray("formats", formats)
cameraDevices.pushMap(map)
}
map.putArray("formats", formats)
cameraDevices.pushMap(map)
val difference = System.currentTimeMillis() - startTime
Log.w(REACT_CLASS, "CameraViewModule::getAvailableCameraDevices took: $difference ms")
return@withPromise cameraDevices
}
return@withPromise cameraDevices
}
}

View File

@ -14,14 +14,3 @@ inline fun withPromise(promise: Promise, closure: () -> Any?) {
promise.reject("${error.domain}/${error.id}", error.message, error.cause)
}
}
inline fun withSuspendablePromise(promise: Promise, closure: () -> Any?) {
try {
val result = closure()
promise.resolve(result)
} catch (e: Throwable) {
e.printStackTrace()
val error = if (e is CameraError) e else UnknownCameraError(e)
promise.reject("${error.domain}/${error.id}", error.message, error.cause)
}
}