feat: New JS API for useCameraDevice
and useCameraFormat
and much faster getAvailableCameraDevices()
(#1784)
* Update podfile * Update useCameraFormat.ts * Update API * Delete FormatFilter.md * Format CameraViewManager.m ObjC style * Make `getAvailableCameraDevices` synchronous/blocking * Create some docs * fix: Fix HardwareLevel types * fix: Use new device/format API * Use 60 FPS format as an example * Replace `Camera.getAvailableCameraDevices` with new `CameraDevices` API/Module * Fix Lint * KTLint options * Use continuation indent of 8 * Use 2 spaces for indent * Update .editorconfig * Format code * Update .editorconfig * Format more * Update VideoStabilizationMode.kt * fix: Expose `CameraDevicesManager` to ObjC * Update CameraPage.tsx * fix: `requiresMainQueueSetup() -> false` * Always prefer higher resolution * Update CameraDevicesManager.swift * Update CameraPage.tsx * Also filter pixelFormat * fix: Add AVFoundation import
This commit is contained in:
parent
9eed89aac6
commit
977b859e46
@ -57,17 +57,6 @@ For debugging purposes you can use the `id` or `name` properties to log and comp
|
||||
* For a single Wide-Angle camera, this would be `["wide-angle-camera"]`
|
||||
* For a Triple-Camera, this would be `["wide-angle-camera", "ultra-wide-angle-camera", "telephoto-camera"]`
|
||||
|
||||
You can use the helper function `parsePhysicalDeviceTypes` to convert a list of physical devices to a single device descriptor type which can also describe virtual devices:
|
||||
|
||||
```ts
|
||||
console.log(device.devices)
|
||||
// --> ["wide-angle-camera", "ultra-wide-angle-camera", "telephoto-camera"]
|
||||
|
||||
const deviceType = parsePhysicalDeviceTypes(device.devices)
|
||||
console.log(deviceType)
|
||||
// --> "triple-camera"
|
||||
```
|
||||
|
||||
Always choose a camera device that is best fitted for your use-case; so you might filter out any cameras that do not support flash, have low zoom values, are not on the back side of the phone, do not contain a format with high resolution or fps, and more.
|
||||
|
||||
:::caution
|
||||
|
@ -1,6 +1,15 @@
|
||||
[*.{kt,kts}]
|
||||
indent_size=2
|
||||
indent_style=space
|
||||
indent_size=2
|
||||
continuation_indent_size=4
|
||||
insert_final_newline=true
|
||||
max_line_length=off
|
||||
disabled_rules=no-wildcard-imports
|
||||
max_line_length=140
|
||||
ktlint_code_style=android_studio
|
||||
ktlint_standard=enabled
|
||||
ktlint_experimental=enabled
|
||||
ktlint_standard_filename=disabled # dont require PascalCase filenames
|
||||
ktlint_standard_no-wildcard-imports=disabled # allow .* imports
|
||||
ktlint_function_signature_rule_force_multiline_when_parameter_count_greater_or_equal_than=5
|
||||
ktlint_function_signature_body_expression_wrapping=multiline
|
||||
ij_kotlin_allow_trailing_comma_on_call_site=false
|
||||
ij_kotlin_allow_trailing_comma=false
|
||||
|
@ -0,0 +1,84 @@
|
||||
package com.mrousavy.camera
|
||||
|
||||
import android.content.Context
|
||||
import android.hardware.camera2.CameraManager
|
||||
import android.util.Log
|
||||
import com.facebook.react.bridge.Arguments
|
||||
import com.facebook.react.bridge.ReactApplicationContext
|
||||
import com.facebook.react.bridge.ReactContextBaseJavaModule
|
||||
import com.facebook.react.bridge.ReactMethod
|
||||
import com.facebook.react.bridge.ReadableArray
|
||||
import com.facebook.react.modules.core.DeviceEventManagerModule
|
||||
import com.mrousavy.camera.core.CameraDeviceDetails
|
||||
|
||||
class CameraDevicesManager(private val reactContext: ReactApplicationContext) : ReactContextBaseJavaModule(reactContext) {
|
||||
companion object {
|
||||
private const val TAG = "CameraDevices"
|
||||
}
|
||||
private val cameraManager = reactContext.getSystemService(Context.CAMERA_SERVICE) as CameraManager
|
||||
|
||||
private val callback = object : CameraManager.AvailabilityCallback() {
|
||||
private var devices = cameraManager.cameraIdList.toMutableList()
|
||||
|
||||
// Check if device is still physically connected (even if onCameraUnavailable() is called)
|
||||
private fun isDeviceConnected(cameraId: String): Boolean =
|
||||
try {
|
||||
cameraManager.getCameraCharacteristics(cameraId)
|
||||
true
|
||||
} catch (_: Throwable) {
|
||||
false
|
||||
}
|
||||
|
||||
override fun onCameraAvailable(cameraId: String) {
|
||||
Log.i(TAG, "Camera #$cameraId: Available!")
|
||||
if (!devices.contains(cameraId)) {
|
||||
devices.add(cameraId)
|
||||
sendAvailableDevicesChangedEvent()
|
||||
}
|
||||
}
|
||||
|
||||
override fun onCameraUnavailable(cameraId: String) {
|
||||
Log.i(TAG, "Camera #$cameraId: Unavailable!")
|
||||
if (devices.contains(cameraId) && !isDeviceConnected(cameraId)) {
|
||||
devices.remove(cameraId)
|
||||
sendAvailableDevicesChangedEvent()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
override fun getName(): String = TAG
|
||||
|
||||
override fun initialize() {
|
||||
cameraManager.registerAvailabilityCallback(callback, null)
|
||||
}
|
||||
|
||||
override fun invalidate() {
|
||||
cameraManager.unregisterAvailabilityCallback(callback)
|
||||
super.invalidate()
|
||||
}
|
||||
|
||||
private fun getDevicesJson(): ReadableArray {
|
||||
val devices = Arguments.createArray()
|
||||
cameraManager.cameraIdList.forEach { cameraId ->
|
||||
val device = CameraDeviceDetails(cameraManager, cameraId)
|
||||
devices.pushMap(device.toMap())
|
||||
}
|
||||
return devices
|
||||
}
|
||||
|
||||
fun sendAvailableDevicesChangedEvent() {
|
||||
val eventEmitter = reactContext.getJSModule(DeviceEventManagerModule.RCTDeviceEventEmitter::class.java)
|
||||
eventEmitter.emit("CameraDevicesChanged", getDevicesJson())
|
||||
}
|
||||
|
||||
override fun hasConstants(): Boolean = true
|
||||
|
||||
override fun getConstants(): MutableMap<String, Any> = mutableMapOf("availableCameraDevices" to getDevicesJson())
|
||||
|
||||
// Required for NativeEventEmitter, this is just a dummy implementation:
|
||||
@ReactMethod
|
||||
fun addListener(eventName: String) {}
|
||||
|
||||
@ReactMethod
|
||||
fun removeListeners(count: Int) {}
|
||||
}
|
@ -6,11 +6,11 @@ import com.facebook.react.bridge.ReactApplicationContext
|
||||
import com.facebook.react.uimanager.ViewManager
|
||||
|
||||
class CameraPackage : ReactPackage {
|
||||
override fun createNativeModules(reactContext: ReactApplicationContext): List<NativeModule> {
|
||||
return listOf(CameraViewModule(reactContext))
|
||||
}
|
||||
override fun createNativeModules(reactContext: ReactApplicationContext): List<NativeModule> =
|
||||
listOf(
|
||||
CameraViewModule(reactContext),
|
||||
CameraDevicesManager(reactContext)
|
||||
)
|
||||
|
||||
override fun createViewManagers(reactContext: ReactApplicationContext): List<ViewManager<*, *>> {
|
||||
return listOf(CameraViewManager())
|
||||
}
|
||||
override fun createViewManagers(reactContext: ReactApplicationContext): List<ViewManager<*, *>> = listOf(CameraViewManager())
|
||||
}
|
||||
|
@ -2,10 +2,10 @@ package com.mrousavy.camera
|
||||
|
||||
import android.os.Handler
|
||||
import android.os.HandlerThread
|
||||
import java.util.concurrent.Executor
|
||||
import kotlinx.coroutines.CoroutineDispatcher
|
||||
import kotlinx.coroutines.android.asCoroutineDispatcher
|
||||
import kotlinx.coroutines.asExecutor
|
||||
import java.util.concurrent.Executor
|
||||
|
||||
class CameraQueues {
|
||||
companion object {
|
||||
@ -32,4 +32,3 @@ class CameraQueues {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5,10 +5,10 @@ import android.annotation.SuppressLint
|
||||
import android.content.pm.PackageManager
|
||||
import androidx.core.content.ContextCompat
|
||||
import com.facebook.react.bridge.*
|
||||
import com.mrousavy.camera.core.RecordingSession
|
||||
import com.mrousavy.camera.parsers.Torch
|
||||
import com.mrousavy.camera.parsers.VideoCodec
|
||||
import com.mrousavy.camera.parsers.VideoFileType
|
||||
import com.mrousavy.camera.core.RecordingSession
|
||||
import com.mrousavy.camera.utils.makeErrorMap
|
||||
import java.util.*
|
||||
|
||||
|
@ -15,10 +15,9 @@ import com.mrousavy.camera.core.CameraSession
|
||||
import com.mrousavy.camera.parsers.Flash
|
||||
import com.mrousavy.camera.parsers.QualityPrioritization
|
||||
import com.mrousavy.camera.utils.*
|
||||
import kotlinx.coroutines.*
|
||||
import java.io.File
|
||||
import java.io.FileOutputStream
|
||||
import java.io.OutputStream
|
||||
import kotlinx.coroutines.*
|
||||
|
||||
private const val TAG = "CameraView.takePhoto"
|
||||
|
||||
@ -36,12 +35,14 @@ suspend fun CameraView.takePhoto(optionsMap: ReadableMap): WritableMap {
|
||||
val flashMode = Flash.fromUnionValue(flash)
|
||||
val qualityPrioritizationMode = QualityPrioritization.fromUnionValue(qualityPrioritization)
|
||||
|
||||
val photo = cameraSession.takePhoto(qualityPrioritizationMode,
|
||||
flashMode,
|
||||
enableShutterSound,
|
||||
enableAutoRedEyeReduction,
|
||||
enableAutoStabilization,
|
||||
outputOrientation)
|
||||
val photo = cameraSession.takePhoto(
|
||||
qualityPrioritizationMode,
|
||||
flashMode,
|
||||
enableShutterSound,
|
||||
enableAutoRedEyeReduction,
|
||||
enableAutoStabilization,
|
||||
outputOrientation
|
||||
)
|
||||
|
||||
photo.use {
|
||||
Log.i(TAG, "Successfully captured ${photo.image.width} x ${photo.image.height} photo!")
|
||||
@ -83,10 +84,12 @@ private fun writePhotoToFile(photo: CameraSession.CapturedPhoto, file: File) {
|
||||
}
|
||||
}
|
||||
|
||||
private suspend fun savePhotoToFile(context: Context,
|
||||
cameraCharacteristics: CameraCharacteristics,
|
||||
photo: CameraSession.CapturedPhoto): String {
|
||||
return withContext(Dispatchers.IO) {
|
||||
private suspend fun savePhotoToFile(
|
||||
context: Context,
|
||||
cameraCharacteristics: CameraCharacteristics,
|
||||
photo: CameraSession.CapturedPhoto
|
||||
): String =
|
||||
withContext(Dispatchers.IO) {
|
||||
when (photo.format) {
|
||||
// When the format is JPEG or DEPTH JPEG we can simply save the bytes as-is
|
||||
ImageFormat.JPEG, ImageFormat.DEPTH_JPEG -> {
|
||||
@ -111,8 +114,8 @@ private suspend fun savePhotoToFile(context: Context,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private fun createFile(context: Context, extension: String): File {
|
||||
return File.createTempFile("mrousavy", extension, context.cacheDir).apply { deleteOnExit() }
|
||||
}
|
||||
private fun createFile(context: Context, extension: String): File =
|
||||
File.createTempFile("mrousavy", extension, context.cacheDir).apply {
|
||||
deleteOnExit()
|
||||
}
|
||||
|
@ -15,6 +15,7 @@ import androidx.core.content.ContextCompat
|
||||
import com.facebook.react.bridge.ReadableMap
|
||||
import com.mrousavy.camera.core.CameraSession
|
||||
import com.mrousavy.camera.core.PreviewView
|
||||
import com.mrousavy.camera.core.outputs.CameraOutputs
|
||||
import com.mrousavy.camera.extensions.containsAny
|
||||
import com.mrousavy.camera.extensions.installHierarchyFitter
|
||||
import com.mrousavy.camera.frameprocessor.FrameProcessor
|
||||
@ -22,7 +23,6 @@ import com.mrousavy.camera.parsers.Orientation
|
||||
import com.mrousavy.camera.parsers.PixelFormat
|
||||
import com.mrousavy.camera.parsers.Torch
|
||||
import com.mrousavy.camera.parsers.VideoStabilizationMode
|
||||
import com.mrousavy.camera.core.outputs.CameraOutputs
|
||||
import kotlinx.coroutines.CoroutineScope
|
||||
import kotlinx.coroutines.Dispatchers
|
||||
import kotlinx.coroutines.launch
|
||||
@ -42,7 +42,8 @@ class CameraView(context: Context) : FrameLayout(context) {
|
||||
const val TAG = "CameraView"
|
||||
|
||||
private val propsThatRequirePreviewReconfiguration = arrayListOf("cameraId")
|
||||
private val propsThatRequireSessionReconfiguration = arrayListOf("cameraId", "format", "photo", "video", "enableFrameProcessor", "pixelFormat")
|
||||
private val propsThatRequireSessionReconfiguration =
|
||||
arrayListOf("cameraId", "format", "photo", "video", "enableFrameProcessor", "pixelFormat")
|
||||
private val propsThatRequireFormatReconfiguration = arrayListOf("fps", "hdr", "videoStabilizationMode", "lowLightBoost")
|
||||
}
|
||||
|
||||
@ -52,18 +53,21 @@ class CameraView(context: Context) : FrameLayout(context) {
|
||||
var enableDepthData = false
|
||||
var enableHighQualityPhotos: Boolean? = null
|
||||
var enablePortraitEffectsMatteDelivery = false
|
||||
|
||||
// use-cases
|
||||
var photo: Boolean? = null
|
||||
var video: Boolean? = null
|
||||
var audio: Boolean? = null
|
||||
var enableFrameProcessor = false
|
||||
var pixelFormat: PixelFormat = PixelFormat.NATIVE
|
||||
|
||||
// props that require format reconfiguring
|
||||
var format: ReadableMap? = null
|
||||
var fps: Int? = null
|
||||
var videoStabilizationMode: VideoStabilizationMode? = null
|
||||
var hdr: Boolean? = null // nullable bool
|
||||
var lowLightBoost: Boolean? = null // nullable bool
|
||||
|
||||
// other props
|
||||
var isActive = false
|
||||
var torch: Torch = Torch.OFF
|
||||
@ -129,7 +133,7 @@ class CameraView(context: Context) : FrameLayout(context) {
|
||||
Log.i(TAG, "Props changed: $changedProps")
|
||||
try {
|
||||
val shouldReconfigurePreview = changedProps.containsAny(propsThatRequirePreviewReconfiguration)
|
||||
val shouldReconfigureSession = shouldReconfigurePreview || changedProps.containsAny(propsThatRequireSessionReconfiguration)
|
||||
val shouldReconfigureSession = shouldReconfigurePreview || changedProps.containsAny(propsThatRequireSessionReconfiguration)
|
||||
val shouldReconfigureFormat = shouldReconfigureSession || changedProps.containsAny(propsThatRequireFormatReconfiguration)
|
||||
val shouldReconfigureZoom = shouldReconfigureSession || changedProps.contains("zoom")
|
||||
val shouldReconfigureTorch = shouldReconfigureSession || changedProps.contains("torch")
|
||||
@ -182,10 +186,14 @@ class CameraView(context: Context) : FrameLayout(context) {
|
||||
val previewOutput = CameraOutputs.PreviewOutput(previewSurface)
|
||||
val photoOutput = if (photo == true) {
|
||||
CameraOutputs.PhotoOutput(targetPhotoSize)
|
||||
} else null
|
||||
} else {
|
||||
null
|
||||
}
|
||||
val videoOutput = if (video == true || enableFrameProcessor) {
|
||||
CameraOutputs.VideoOutput(targetVideoSize, video == true, enableFrameProcessor, pixelFormat.toImageFormat())
|
||||
} else null
|
||||
} else {
|
||||
null
|
||||
}
|
||||
|
||||
cameraSession.configureSession(cameraId, previewOutput, photoOutput, videoOutput)
|
||||
} catch (e: Throwable) {
|
||||
@ -215,13 +223,16 @@ class CameraView(context: Context) : FrameLayout(context) {
|
||||
@SuppressLint("ClickableViewAccessibility")
|
||||
private fun updateZoomGesture() {
|
||||
if (enableZoomGesture) {
|
||||
val scaleGestureDetector = ScaleGestureDetector(context, object: ScaleGestureDetector.SimpleOnScaleGestureListener() {
|
||||
override fun onScale(detector: ScaleGestureDetector): Boolean {
|
||||
zoom *= detector.scaleFactor
|
||||
cameraSession.setZoom(zoom)
|
||||
return true
|
||||
val scaleGestureDetector = ScaleGestureDetector(
|
||||
context,
|
||||
object : ScaleGestureDetector.SimpleOnScaleGestureListener() {
|
||||
override fun onScale(detector: ScaleGestureDetector): Boolean {
|
||||
zoom *= detector.scaleFactor
|
||||
cameraSession.setZoom(zoom)
|
||||
return true
|
||||
}
|
||||
}
|
||||
})
|
||||
)
|
||||
setOnTouchListener { _, event ->
|
||||
scaleGestureDetector.onTouchEvent(event)
|
||||
}
|
||||
|
@ -12,9 +12,7 @@ import com.mrousavy.camera.parsers.VideoStabilizationMode
|
||||
|
||||
@Suppress("unused")
|
||||
class CameraViewManager : ViewGroupManager<CameraView>() {
|
||||
public override fun createViewInstance(context: ThemedReactContext): CameraView {
|
||||
return CameraView(context)
|
||||
}
|
||||
public override fun createViewInstance(context: ThemedReactContext): CameraView = CameraView(context)
|
||||
|
||||
override fun onAfterUpdateTransaction(view: CameraView) {
|
||||
super.onAfterUpdateTransaction(view)
|
||||
@ -23,101 +21,110 @@ class CameraViewManager : ViewGroupManager<CameraView>() {
|
||||
cameraViewTransactions.remove(view)
|
||||
}
|
||||
|
||||
override fun getExportedCustomDirectEventTypeConstants(): MutableMap<String, Any>? {
|
||||
return MapBuilder.builder<String, Any>()
|
||||
override fun getExportedCustomDirectEventTypeConstants(): MutableMap<String, Any>? =
|
||||
MapBuilder.builder<String, Any>()
|
||||
.put("cameraViewReady", MapBuilder.of("registrationName", "onViewReady"))
|
||||
.put("cameraInitialized", MapBuilder.of("registrationName", "onInitialized"))
|
||||
.put("cameraError", MapBuilder.of("registrationName", "onError"))
|
||||
.build()
|
||||
}
|
||||
|
||||
override fun getName(): String {
|
||||
return TAG
|
||||
}
|
||||
override fun getName(): String = TAG
|
||||
|
||||
@ReactProp(name = "cameraId")
|
||||
fun setCameraId(view: CameraView, cameraId: String) {
|
||||
if (view.cameraId != cameraId)
|
||||
if (view.cameraId != cameraId) {
|
||||
addChangedPropToTransaction(view, "cameraId")
|
||||
}
|
||||
view.cameraId = cameraId
|
||||
}
|
||||
|
||||
@ReactProp(name = "photo")
|
||||
fun setPhoto(view: CameraView, photo: Boolean?) {
|
||||
if (view.photo != photo)
|
||||
if (view.photo != photo) {
|
||||
addChangedPropToTransaction(view, "photo")
|
||||
}
|
||||
view.photo = photo
|
||||
}
|
||||
|
||||
@ReactProp(name = "video")
|
||||
fun setVideo(view: CameraView, video: Boolean?) {
|
||||
if (view.video != video)
|
||||
if (view.video != video) {
|
||||
addChangedPropToTransaction(view, "video")
|
||||
}
|
||||
view.video = video
|
||||
}
|
||||
|
||||
@ReactProp(name = "audio")
|
||||
fun setAudio(view: CameraView, audio: Boolean?) {
|
||||
if (view.audio != audio)
|
||||
if (view.audio != audio) {
|
||||
addChangedPropToTransaction(view, "audio")
|
||||
}
|
||||
view.audio = audio
|
||||
}
|
||||
|
||||
@ReactProp(name = "enableFrameProcessor")
|
||||
fun setEnableFrameProcessor(view: CameraView, enableFrameProcessor: Boolean) {
|
||||
if (view.enableFrameProcessor != enableFrameProcessor)
|
||||
if (view.enableFrameProcessor != enableFrameProcessor) {
|
||||
addChangedPropToTransaction(view, "enableFrameProcessor")
|
||||
}
|
||||
view.enableFrameProcessor = enableFrameProcessor
|
||||
}
|
||||
|
||||
@ReactProp(name = "pixelFormat")
|
||||
fun setPixelFormat(view: CameraView, pixelFormat: String?) {
|
||||
val newPixelFormat = PixelFormat.fromUnionValue(pixelFormat)
|
||||
if (view.pixelFormat != newPixelFormat)
|
||||
if (view.pixelFormat != newPixelFormat) {
|
||||
addChangedPropToTransaction(view, "pixelFormat")
|
||||
}
|
||||
view.pixelFormat = newPixelFormat ?: PixelFormat.NATIVE
|
||||
}
|
||||
|
||||
@ReactProp(name = "enableDepthData")
|
||||
fun setEnableDepthData(view: CameraView, enableDepthData: Boolean) {
|
||||
if (view.enableDepthData != enableDepthData)
|
||||
if (view.enableDepthData != enableDepthData) {
|
||||
addChangedPropToTransaction(view, "enableDepthData")
|
||||
}
|
||||
view.enableDepthData = enableDepthData
|
||||
}
|
||||
|
||||
@ReactProp(name = "enableZoomGesture")
|
||||
fun setEnableZoomGesture(view: CameraView, enableZoomGesture: Boolean) {
|
||||
if (view.enableZoomGesture != enableZoomGesture)
|
||||
if (view.enableZoomGesture != enableZoomGesture) {
|
||||
addChangedPropToTransaction(view, "enableZoomGesture")
|
||||
}
|
||||
view.enableZoomGesture = enableZoomGesture
|
||||
}
|
||||
|
||||
@ReactProp(name = "videoStabilizationMode")
|
||||
fun setVideoStabilizationMode(view: CameraView, videoStabilizationMode: String?) {
|
||||
val newMode = VideoStabilizationMode.fromUnionValue(videoStabilizationMode)
|
||||
if (view.videoStabilizationMode != newMode)
|
||||
if (view.videoStabilizationMode != newMode) {
|
||||
addChangedPropToTransaction(view, "videoStabilizationMode")
|
||||
}
|
||||
view.videoStabilizationMode = newMode
|
||||
}
|
||||
|
||||
@ReactProp(name = "enableHighQualityPhotos")
|
||||
fun setEnableHighQualityPhotos(view: CameraView, enableHighQualityPhotos: Boolean?) {
|
||||
if (view.enableHighQualityPhotos != enableHighQualityPhotos)
|
||||
if (view.enableHighQualityPhotos != enableHighQualityPhotos) {
|
||||
addChangedPropToTransaction(view, "enableHighQualityPhotos")
|
||||
}
|
||||
view.enableHighQualityPhotos = enableHighQualityPhotos
|
||||
}
|
||||
|
||||
@ReactProp(name = "enablePortraitEffectsMatteDelivery")
|
||||
fun setEnablePortraitEffectsMatteDelivery(view: CameraView, enablePortraitEffectsMatteDelivery: Boolean) {
|
||||
if (view.enablePortraitEffectsMatteDelivery != enablePortraitEffectsMatteDelivery)
|
||||
if (view.enablePortraitEffectsMatteDelivery != enablePortraitEffectsMatteDelivery) {
|
||||
addChangedPropToTransaction(view, "enablePortraitEffectsMatteDelivery")
|
||||
}
|
||||
view.enablePortraitEffectsMatteDelivery = enablePortraitEffectsMatteDelivery
|
||||
}
|
||||
|
||||
@ReactProp(name = "format")
|
||||
fun setFormat(view: CameraView, format: ReadableMap?) {
|
||||
if (view.format != format)
|
||||
if (view.format != format) {
|
||||
addChangedPropToTransaction(view, "format")
|
||||
}
|
||||
view.format = format
|
||||
}
|
||||
|
||||
@ -126,53 +133,60 @@ class CameraViewManager : ViewGroupManager<CameraView>() {
|
||||
// of type "Int?" the react bridge throws an error.
|
||||
@ReactProp(name = "fps", defaultInt = -1)
|
||||
fun setFps(view: CameraView, fps: Int) {
|
||||
if (view.fps != fps)
|
||||
if (view.fps != fps) {
|
||||
addChangedPropToTransaction(view, "fps")
|
||||
}
|
||||
view.fps = if (fps > 0) fps else null
|
||||
}
|
||||
|
||||
@ReactProp(name = "hdr")
|
||||
fun setHdr(view: CameraView, hdr: Boolean?) {
|
||||
if (view.hdr != hdr)
|
||||
if (view.hdr != hdr) {
|
||||
addChangedPropToTransaction(view, "hdr")
|
||||
}
|
||||
view.hdr = hdr
|
||||
}
|
||||
|
||||
@ReactProp(name = "lowLightBoost")
|
||||
fun setLowLightBoost(view: CameraView, lowLightBoost: Boolean?) {
|
||||
if (view.lowLightBoost != lowLightBoost)
|
||||
if (view.lowLightBoost != lowLightBoost) {
|
||||
addChangedPropToTransaction(view, "lowLightBoost")
|
||||
}
|
||||
view.lowLightBoost = lowLightBoost
|
||||
}
|
||||
|
||||
@ReactProp(name = "isActive")
|
||||
fun setIsActive(view: CameraView, isActive: Boolean) {
|
||||
if (view.isActive != isActive)
|
||||
if (view.isActive != isActive) {
|
||||
addChangedPropToTransaction(view, "isActive")
|
||||
}
|
||||
view.isActive = isActive
|
||||
}
|
||||
|
||||
@ReactProp(name = "torch")
|
||||
fun setTorch(view: CameraView, torch: String) {
|
||||
val newMode = Torch.fromUnionValue(torch)
|
||||
if (view.torch != newMode)
|
||||
if (view.torch != newMode) {
|
||||
addChangedPropToTransaction(view, "torch")
|
||||
}
|
||||
view.torch = newMode
|
||||
}
|
||||
|
||||
@ReactProp(name = "zoom")
|
||||
fun setZoom(view: CameraView, zoom: Double) {
|
||||
val zoomFloat = zoom.toFloat()
|
||||
if (view.zoom != zoomFloat)
|
||||
if (view.zoom != zoomFloat) {
|
||||
addChangedPropToTransaction(view, "zoom")
|
||||
}
|
||||
view.zoom = zoomFloat
|
||||
}
|
||||
|
||||
@ReactProp(name = "orientation")
|
||||
fun setOrientation(view: CameraView, orientation: String?) {
|
||||
val newMode = Orientation.fromUnionValue(orientation)
|
||||
if (view.orientation != newMode)
|
||||
if (view.orientation != newMode) {
|
||||
addChangedPropToTransaction(view, "orientation")
|
||||
}
|
||||
view.orientation = newMode
|
||||
}
|
||||
|
||||
|
@ -1,9 +1,7 @@
|
||||
package com.mrousavy.camera
|
||||
|
||||
import android.Manifest
|
||||
import android.content.Context
|
||||
import android.content.pm.PackageManager
|
||||
import android.hardware.camera2.CameraManager
|
||||
import android.util.Log
|
||||
import androidx.core.content.ContextCompat
|
||||
import com.facebook.react.bridge.*
|
||||
@ -11,22 +9,21 @@ import com.facebook.react.module.annotations.ReactModule
|
||||
import com.facebook.react.modules.core.PermissionAwareActivity
|
||||
import com.facebook.react.modules.core.PermissionListener
|
||||
import com.facebook.react.uimanager.UIManagerHelper
|
||||
import com.mrousavy.camera.core.CameraDeviceDetails
|
||||
import com.mrousavy.camera.frameprocessor.VisionCameraInstaller
|
||||
import com.mrousavy.camera.frameprocessor.VisionCameraProxy
|
||||
import com.mrousavy.camera.parsers.*
|
||||
import com.mrousavy.camera.utils.*
|
||||
import kotlinx.coroutines.*
|
||||
import kotlin.coroutines.resume
|
||||
import kotlin.coroutines.resumeWithException
|
||||
import kotlin.coroutines.suspendCoroutine
|
||||
import kotlinx.coroutines.*
|
||||
|
||||
@ReactModule(name = CameraViewModule.TAG)
|
||||
@Suppress("unused")
|
||||
class CameraViewModule(reactContext: ReactApplicationContext): ReactContextBaseJavaModule(reactContext) {
|
||||
class CameraViewModule(reactContext: ReactApplicationContext) : ReactContextBaseJavaModule(reactContext) {
|
||||
companion object {
|
||||
const val TAG = "CameraView"
|
||||
var RequestCode = 10
|
||||
var sharedRequestCode = 10
|
||||
}
|
||||
|
||||
private val coroutineScope = CoroutineScope(Dispatchers.Default) // TODO: or Dispatchers.Main?
|
||||
@ -38,25 +35,32 @@ class CameraViewModule(reactContext: ReactApplicationContext): ReactContextBaseJ
|
||||
}
|
||||
}
|
||||
|
||||
override fun getName(): String {
|
||||
return TAG
|
||||
}
|
||||
override fun getName(): String = TAG
|
||||
|
||||
private suspend fun findCameraView(viewId: Int): CameraView {
|
||||
return suspendCoroutine { continuation ->
|
||||
private suspend fun findCameraView(viewId: Int): CameraView =
|
||||
suspendCoroutine { continuation ->
|
||||
UiThreadUtil.runOnUiThread {
|
||||
Log.d(TAG, "Finding view $viewId...")
|
||||
val view = if (reactApplicationContext != null) UIManagerHelper.getUIManager(reactApplicationContext, viewId)?.resolveView(viewId) as CameraView? else null
|
||||
Log.d(TAG, if (reactApplicationContext != null) "Found view $viewId!" else "Couldn't find view $viewId!")
|
||||
if (view != null) continuation.resume(view)
|
||||
else continuation.resumeWithException(ViewNotFoundError(viewId))
|
||||
val view = if (reactApplicationContext != null) {
|
||||
UIManagerHelper.getUIManager(
|
||||
reactApplicationContext,
|
||||
viewId
|
||||
)?.resolveView(viewId) as CameraView?
|
||||
} else {
|
||||
null
|
||||
}
|
||||
Log.d(TAG, if (reactApplicationContext != null) "Found view $viewId!" else "Couldn't find view $viewId!")
|
||||
if (view != null) {
|
||||
continuation.resume(view)
|
||||
} else {
|
||||
continuation.resumeWithException(ViewNotFoundError(viewId))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ReactMethod(isBlockingSynchronousMethod = true)
|
||||
fun installFrameProcessorBindings(): Boolean {
|
||||
return try {
|
||||
fun installFrameProcessorBindings(): Boolean =
|
||||
try {
|
||||
val proxy = VisionCameraProxy(reactApplicationContext)
|
||||
VisionCameraInstaller.install(proxy)
|
||||
true
|
||||
@ -64,7 +68,6 @@ class CameraViewModule(reactContext: ReactApplicationContext): ReactContextBaseJ
|
||||
Log.e(TAG, "Failed to install Frame Processor JSI Bindings!", e)
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
fun takePhoto(viewTag: Int, options: ReadableMap, promise: Promise) {
|
||||
@ -87,7 +90,8 @@ class CameraViewModule(reactContext: ReactApplicationContext): ReactContextBaseJ
|
||||
val map = makeErrorMap("${error.domain}/${error.id}", error.message, error)
|
||||
onRecordCallback(null, map)
|
||||
} catch (error: Throwable) {
|
||||
val map = makeErrorMap("capture/unknown", "An unknown error occurred while trying to start a video recording! ${error.message}", error)
|
||||
val map =
|
||||
makeErrorMap("capture/unknown", "An unknown error occurred while trying to start a video recording! ${error.message}", error)
|
||||
onRecordCallback(null, map)
|
||||
}
|
||||
}
|
||||
@ -137,22 +141,6 @@ class CameraViewModule(reactContext: ReactApplicationContext): ReactContextBaseJ
|
||||
}
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
fun getAvailableCameraDevices(promise: Promise) {
|
||||
coroutineScope.launch {
|
||||
withPromise(promise) {
|
||||
val manager = reactApplicationContext.getSystemService(Context.CAMERA_SERVICE) as CameraManager
|
||||
|
||||
val devices = Arguments.createArray()
|
||||
manager.cameraIdList.forEach { cameraId ->
|
||||
val device = CameraDeviceDetails(manager, cameraId)
|
||||
devices.pushMap(device.toMap())
|
||||
}
|
||||
promise.resolve(devices)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private fun canRequestPermission(permission: String): Boolean {
|
||||
val activity = currentActivity as? PermissionAwareActivity
|
||||
return activity?.shouldShowRequestPermissionRationale(permission) ?: false
|
||||
@ -182,7 +170,7 @@ class CameraViewModule(reactContext: ReactApplicationContext): ReactContextBaseJ
|
||||
fun requestCameraPermission(promise: Promise) {
|
||||
val activity = reactApplicationContext.currentActivity
|
||||
if (activity is PermissionAwareActivity) {
|
||||
val currentRequestCode = RequestCode++
|
||||
val currentRequestCode = sharedRequestCode++
|
||||
val listener = PermissionListener { requestCode: Int, _: Array<String>, grantResults: IntArray ->
|
||||
if (requestCode == currentRequestCode) {
|
||||
val permissionStatus = if (grantResults.isNotEmpty()) grantResults[0] else PackageManager.PERMISSION_DENIED
|
||||
@ -202,7 +190,7 @@ class CameraViewModule(reactContext: ReactApplicationContext): ReactContextBaseJ
|
||||
fun requestMicrophonePermission(promise: Promise) {
|
||||
val activity = reactApplicationContext.currentActivity
|
||||
if (activity is PermissionAwareActivity) {
|
||||
val currentRequestCode = RequestCode++
|
||||
val currentRequestCode = sharedRequestCode++
|
||||
val listener = PermissionListener { requestCode: Int, _: Array<String>, grantResults: IntArray ->
|
||||
if (requestCode == currentRequestCode) {
|
||||
val permissionStatus = if (grantResults.isNotEmpty()) grantResults[0] else PackageManager.PERMISSION_DENIED
|
||||
|
@ -1,7 +1,7 @@
|
||||
package com.mrousavy.camera
|
||||
|
||||
import com.mrousavy.camera.parsers.CameraDeviceError
|
||||
import com.mrousavy.camera.core.outputs.CameraOutputs
|
||||
import com.mrousavy.camera.parsers.CameraDeviceError
|
||||
|
||||
abstract class CameraError(
|
||||
// example: "permission"
|
||||
@ -16,30 +16,53 @@ abstract class CameraError(
|
||||
val CameraError.code: String
|
||||
get() = "$domain/$id"
|
||||
|
||||
class MicrophonePermissionError : CameraError("permission", "microphone-permission-denied", "The Microphone permission was denied! If you want to record Video without sound, pass `audio={false}`.")
|
||||
class MicrophonePermissionError :
|
||||
CameraError(
|
||||
"permission",
|
||||
"microphone-permission-denied",
|
||||
"The Microphone permission was denied! If you want to record Video without sound, pass `audio={false}`."
|
||||
)
|
||||
class CameraPermissionError : CameraError("permission", "camera-permission-denied", "The Camera permission was denied!")
|
||||
|
||||
class InvalidTypeScriptUnionError(unionName: String, unionValue: String) : CameraError("parameter", "invalid-parameter", "The given value for $unionName could not be parsed! (Received: $unionValue)")
|
||||
class InvalidTypeScriptUnionError(unionName: String, unionValue: String) :
|
||||
CameraError("parameter", "invalid-parameter", "The given value for $unionName could not be parsed! (Received: $unionValue)")
|
||||
|
||||
class NoCameraDeviceError : CameraError("device", "no-device", "No device was set! Use `getAvailableCameraDevices()` to select a suitable Camera device.")
|
||||
class PixelFormatNotSupportedError(format: String) : CameraError("device", "pixel-format-not-supported", "The pixelFormat $format is not supported on the given Camera Device!")
|
||||
class NoCameraDeviceError :
|
||||
CameraError("device", "no-device", "No device was set! Use `getAvailableCameraDevices()` to select a suitable Camera device.")
|
||||
class PixelFormatNotSupportedError(format: String) :
|
||||
CameraError("device", "pixel-format-not-supported", "The pixelFormat $format is not supported on the given Camera Device!")
|
||||
|
||||
class CameraNotReadyError : CameraError("session", "camera-not-ready", "The Camera is not ready yet! Wait for the onInitialized() callback!")
|
||||
class CameraCannotBeOpenedError(cameraId: String, error: CameraDeviceError) : CameraError("session", "camera-cannot-be-opened", "The given Camera device (id: $cameraId) could not be opened! Error: $error")
|
||||
class CameraSessionCannotBeConfiguredError(cameraId: String, outputs: CameraOutputs) : CameraError("session", "cannot-create-session", "Failed to create a Camera Session for Camera $cameraId! Outputs: $outputs")
|
||||
class CameraDisconnectedError(cameraId: String, error: CameraDeviceError) : CameraError("session", "camera-has-been-disconnected", "The given Camera device (id: $cameraId) has been disconnected! Error: $error")
|
||||
class CameraNotReadyError :
|
||||
CameraError("session", "camera-not-ready", "The Camera is not ready yet! Wait for the onInitialized() callback!")
|
||||
class CameraCannotBeOpenedError(cameraId: String, error: CameraDeviceError) :
|
||||
CameraError("session", "camera-cannot-be-opened", "The given Camera device (id: $cameraId) could not be opened! Error: $error")
|
||||
class CameraSessionCannotBeConfiguredError(cameraId: String, outputs: CameraOutputs) :
|
||||
CameraError("session", "cannot-create-session", "Failed to create a Camera Session for Camera $cameraId! Outputs: $outputs")
|
||||
class CameraDisconnectedError(cameraId: String, error: CameraDeviceError) :
|
||||
CameraError("session", "camera-has-been-disconnected", "The given Camera device (id: $cameraId) has been disconnected! Error: $error")
|
||||
|
||||
class VideoNotEnabledError : CameraError("capture", "video-not-enabled", "Video capture is disabled! Pass `video={true}` to enable video recordings.")
|
||||
class PhotoNotEnabledError : CameraError("capture", "photo-not-enabled", "Photo capture is disabled! Pass `photo={true}` to enable photo capture.")
|
||||
class CaptureAbortedError(wasImageCaptured: Boolean) : CameraError("capture", "aborted", "The image capture was aborted! Was Image captured: $wasImageCaptured")
|
||||
class UnknownCaptureError(wasImageCaptured: Boolean) : CameraError("capture", "unknown", "An unknown error occurred while trying to capture an Image! Was Image captured: $wasImageCaptured")
|
||||
class VideoNotEnabledError :
|
||||
CameraError("capture", "video-not-enabled", "Video capture is disabled! Pass `video={true}` to enable video recordings.")
|
||||
class PhotoNotEnabledError :
|
||||
CameraError("capture", "photo-not-enabled", "Photo capture is disabled! Pass `photo={true}` to enable photo capture.")
|
||||
class CaptureAbortedError(wasImageCaptured: Boolean) :
|
||||
CameraError("capture", "aborted", "The image capture was aborted! Was Image captured: $wasImageCaptured")
|
||||
class UnknownCaptureError(wasImageCaptured: Boolean) :
|
||||
CameraError("capture", "unknown", "An unknown error occurred while trying to capture an Image! Was Image captured: $wasImageCaptured")
|
||||
|
||||
class RecorderError(name: String, extra: Int) : CameraError("capture", "recorder-error", "An error occured while recording a video! $name $extra")
|
||||
class RecorderError(name: String, extra: Int) :
|
||||
CameraError("capture", "recorder-error", "An error occured while recording a video! $name $extra")
|
||||
|
||||
class NoRecordingInProgressError : CameraError("capture", "no-recording-in-progress", "There was no active video recording in progress! Did you call stopRecording() twice?")
|
||||
class RecordingInProgressError : CameraError("capture", "recording-in-progress", "There is already an active video recording in progress! Did you call startRecording() twice?")
|
||||
class NoRecordingInProgressError :
|
||||
CameraError("capture", "no-recording-in-progress", "There was no active video recording in progress! Did you call stopRecording() twice?")
|
||||
class RecordingInProgressError :
|
||||
CameraError(
|
||||
"capture",
|
||||
"recording-in-progress",
|
||||
"There is already an active video recording in progress! Did you call startRecording() twice?"
|
||||
)
|
||||
|
||||
class ViewNotFoundError(viewId: Int) : CameraError("system", "view-not-found", "The given view (ID $viewId) was not found in the view manager.")
|
||||
class ViewNotFoundError(viewId: Int) :
|
||||
CameraError("system", "view-not-found", "The given view (ID $viewId) was not found in the view manager.")
|
||||
|
||||
class UnknownCameraError(cause: Throwable?) : CameraError("unknown", "unknown", cause?.message ?: "An unknown camera error occured.", cause)
|
||||
|
||||
|
@ -14,10 +14,10 @@ import com.facebook.react.bridge.ReadableMap
|
||||
import com.mrousavy.camera.extensions.bigger
|
||||
import com.mrousavy.camera.extensions.getPhotoSizes
|
||||
import com.mrousavy.camera.extensions.getVideoSizes
|
||||
import com.mrousavy.camera.parsers.PixelFormat
|
||||
import com.mrousavy.camera.parsers.HardwareLevel
|
||||
import com.mrousavy.camera.parsers.LensFacing
|
||||
import com.mrousavy.camera.parsers.Orientation
|
||||
import com.mrousavy.camera.parsers.PixelFormat
|
||||
import com.mrousavy.camera.parsers.VideoStabilizationMode
|
||||
import kotlin.math.PI
|
||||
import kotlin.math.atan
|
||||
@ -29,50 +29,64 @@ class CameraDeviceDetails(private val cameraManager: CameraManager, private val
|
||||
private val extensions = getSupportedExtensions()
|
||||
|
||||
// device characteristics
|
||||
private val isMultiCam = capabilities.contains(11 /* TODO: CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA */)
|
||||
private val supportsDepthCapture = capabilities.contains(8 /* TODO: CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES_DEPTH_OUTPUT */)
|
||||
private val isMultiCam = capabilities.contains(11) // TODO: CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA
|
||||
private val supportsDepthCapture = capabilities.contains(8) // TODO: CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES_DEPTH_OUTPUT
|
||||
private val supportsRawCapture = capabilities.contains(CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES_RAW)
|
||||
private val supportsLowLightBoost = extensions.contains(4 /* TODO: CameraExtensionCharacteristics.EXTENSION_NIGHT */)
|
||||
private val supportsLowLightBoost = extensions.contains(4) // TODO: CameraExtensionCharacteristics.EXTENSION_NIGHT
|
||||
private val lensFacing = LensFacing.fromCameraCharacteristics(characteristics)
|
||||
private val hasFlash = characteristics.get(CameraCharacteristics.FLASH_INFO_AVAILABLE) ?: false
|
||||
private val focalLengths = characteristics.get(CameraCharacteristics.LENS_INFO_AVAILABLE_FOCAL_LENGTHS) ?: floatArrayOf(35f /* 35mm default */)
|
||||
private val focalLengths =
|
||||
characteristics.get(CameraCharacteristics.LENS_INFO_AVAILABLE_FOCAL_LENGTHS)
|
||||
// 35mm is the film standard sensor size
|
||||
?: floatArrayOf(35f)
|
||||
private val sensorSize = characteristics.get(CameraCharacteristics.SENSOR_INFO_PHYSICAL_SIZE)!!
|
||||
private val sensorOrientation = characteristics.get(CameraCharacteristics.SENSOR_ORIENTATION)!!
|
||||
private val name = (if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.P) characteristics.get(CameraCharacteristics.INFO_VERSION)
|
||||
else null) ?: "$lensFacing (${cameraId})"
|
||||
private val name = (
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.P) {
|
||||
characteristics.get(CameraCharacteristics.INFO_VERSION)
|
||||
} else {
|
||||
null
|
||||
}
|
||||
) ?: "$lensFacing ($cameraId)"
|
||||
|
||||
// "formats" (all possible configurations for this device)
|
||||
private val zoomRange = (if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.R) characteristics.get(CameraCharacteristics.CONTROL_ZOOM_RATIO_RANGE)
|
||||
else null) ?: Range(1f, characteristics.get(CameraCharacteristics.SCALER_AVAILABLE_MAX_DIGITAL_ZOOM) ?: 1f)
|
||||
private val zoomRange = (
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.R) {
|
||||
characteristics.get(CameraCharacteristics.CONTROL_ZOOM_RATIO_RANGE)
|
||||
} else {
|
||||
null
|
||||
}
|
||||
) ?: Range(1f, characteristics.get(CameraCharacteristics.SCALER_AVAILABLE_MAX_DIGITAL_ZOOM) ?: 1f)
|
||||
private val minZoom = zoomRange.lower.toDouble()
|
||||
private val maxZoom = zoomRange.upper.toDouble()
|
||||
|
||||
private val cameraConfig = characteristics.get(CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP)!!
|
||||
private val isoRange = characteristics.get(CameraCharacteristics.SENSOR_INFO_SENSITIVITY_RANGE) ?: Range(0, 0)
|
||||
private val digitalStabilizationModes = characteristics.get(CameraCharacteristics.CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES) ?: IntArray(0)
|
||||
private val opticalStabilizationModes = characteristics.get(CameraCharacteristics.LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION) ?: IntArray(0)
|
||||
private val supportsPhotoHdr = extensions.contains(3 /* TODO: CameraExtensionCharacteristics.EXTENSION_HDR */)
|
||||
private val digitalStabilizationModes =
|
||||
characteristics.get(CameraCharacteristics.CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES) ?: IntArray(0)
|
||||
private val opticalStabilizationModes =
|
||||
characteristics.get(CameraCharacteristics.LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION) ?: IntArray(0)
|
||||
private val supportsPhotoHdr = extensions.contains(3) // TODO: CameraExtensionCharacteristics.EXTENSION_HDR
|
||||
private val supportsVideoHdr = getHasVideoHdr()
|
||||
|
||||
private val videoFormat = ImageFormat.YUV_420_888
|
||||
|
||||
// get extensions (HDR, Night Mode, ..)
|
||||
private fun getSupportedExtensions(): List<Int> {
|
||||
return if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.S) {
|
||||
private fun getSupportedExtensions(): List<Int> =
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.S) {
|
||||
val extensions = cameraManager.getCameraExtensionCharacteristics(cameraId)
|
||||
extensions.supportedExtensions
|
||||
} else {
|
||||
emptyList()
|
||||
}
|
||||
}
|
||||
|
||||
private fun getHasVideoHdr(): Boolean {
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.TIRAMISU) {
|
||||
if (capabilities.contains(CameraMetadata.REQUEST_AVAILABLE_CAPABILITIES_DYNAMIC_RANGE_TEN_BIT)) {
|
||||
val availableProfiles = characteristics.get(CameraCharacteristics.REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES)
|
||||
?: DynamicRangeProfiles(LongArray(0))
|
||||
return availableProfiles.supportedProfiles.contains(DynamicRangeProfiles.HLG10)
|
||||
|| availableProfiles.supportedProfiles.contains(DynamicRangeProfiles.HDR10)
|
||||
return availableProfiles.supportedProfiles.contains(DynamicRangeProfiles.HLG10) ||
|
||||
availableProfiles.supportedProfiles.contains(DynamicRangeProfiles.HDR10)
|
||||
}
|
||||
}
|
||||
return false
|
||||
@ -117,16 +131,10 @@ class CameraDeviceDetails(private val cameraManager: CameraManager, private val
|
||||
return deviceTypes
|
||||
}
|
||||
|
||||
private fun getFieldOfView(): Double {
|
||||
return 2 * atan(sensorSize.bigger / (focalLengths[0] * 2)) * (180 / PI)
|
||||
}
|
||||
private fun getFieldOfView(): Double = 2 * atan(sensorSize.bigger / (focalLengths[0] * 2)) * (180 / PI)
|
||||
|
||||
private fun getVideoSizes(): List<Size> {
|
||||
return characteristics.getVideoSizes(cameraId, videoFormat)
|
||||
}
|
||||
private fun getPhotoSizes(): List<Size> {
|
||||
return characteristics.getPhotoSizes(ImageFormat.JPEG)
|
||||
}
|
||||
private fun getVideoSizes(): List<Size> = characteristics.getVideoSizes(cameraId, videoFormat)
|
||||
private fun getPhotoSizes(): List<Size> = characteristics.getPhotoSizes(ImageFormat.JPEG)
|
||||
|
||||
private fun getFormats(): ReadableArray {
|
||||
val array = Arguments.createArray()
|
||||
|
@ -25,6 +25,7 @@ import com.mrousavy.camera.PhotoNotEnabledError
|
||||
import com.mrousavy.camera.RecorderError
|
||||
import com.mrousavy.camera.RecordingInProgressError
|
||||
import com.mrousavy.camera.VideoNotEnabledError
|
||||
import com.mrousavy.camera.core.outputs.CameraOutputs
|
||||
import com.mrousavy.camera.extensions.capture
|
||||
import com.mrousavy.camera.extensions.createCaptureSession
|
||||
import com.mrousavy.camera.extensions.createPhotoCaptureRequest
|
||||
@ -37,19 +38,23 @@ import com.mrousavy.camera.parsers.QualityPrioritization
|
||||
import com.mrousavy.camera.parsers.VideoCodec
|
||||
import com.mrousavy.camera.parsers.VideoFileType
|
||||
import com.mrousavy.camera.parsers.VideoStabilizationMode
|
||||
import com.mrousavy.camera.core.outputs.CameraOutputs
|
||||
import java.io.Closeable
|
||||
import java.util.concurrent.CancellationException
|
||||
import kotlin.coroutines.CoroutineContext
|
||||
import kotlinx.coroutines.CoroutineScope
|
||||
import kotlinx.coroutines.launch
|
||||
import kotlinx.coroutines.sync.Mutex
|
||||
import kotlinx.coroutines.sync.withLock
|
||||
import java.io.Closeable
|
||||
import java.util.concurrent.CancellationException
|
||||
import kotlin.coroutines.CoroutineContext
|
||||
|
||||
class CameraSession(private val context: Context,
|
||||
private val cameraManager: CameraManager,
|
||||
private val onInitialized: () -> Unit,
|
||||
private val onError: (e: Throwable) -> Unit): CoroutineScope, Closeable, CameraOutputs.Callback, CameraManager.AvailabilityCallback() {
|
||||
class CameraSession(
|
||||
private val context: Context,
|
||||
private val cameraManager: CameraManager,
|
||||
private val onInitialized: () -> Unit,
|
||||
private val onError: (e: Throwable) -> Unit
|
||||
) : CameraManager.AvailabilityCallback(),
|
||||
CoroutineScope,
|
||||
Closeable,
|
||||
CameraOutputs.Callback {
|
||||
companion object {
|
||||
private const val TAG = "CameraSession"
|
||||
|
||||
@ -57,11 +62,13 @@ class CameraSession(private val context: Context,
|
||||
private val CAN_SET_FPS = !Build.MANUFACTURER.equals("samsung", true)
|
||||
}
|
||||
|
||||
data class CapturedPhoto(val image: Image,
|
||||
val metadata: TotalCaptureResult,
|
||||
val orientation: Orientation,
|
||||
val isMirrored: Boolean,
|
||||
val format: Int): Closeable {
|
||||
data class CapturedPhoto(
|
||||
val image: Image,
|
||||
val metadata: TotalCaptureResult,
|
||||
val orientation: Orientation,
|
||||
val isMirrored: Boolean,
|
||||
val format: Int
|
||||
) : Closeable {
|
||||
override fun close() {
|
||||
image.close()
|
||||
}
|
||||
@ -92,6 +99,7 @@ class CameraSession(private val context: Context,
|
||||
private val mutex = Mutex()
|
||||
private var isRunning = false
|
||||
private var enableTorch = false
|
||||
|
||||
// Video Outputs
|
||||
private var recording: RecordingSession? = null
|
||||
set(value) {
|
||||
@ -127,18 +135,22 @@ class CameraSession(private val context: Context,
|
||||
return Orientation.fromRotationDegrees(sensorRotation)
|
||||
}
|
||||
|
||||
fun configureSession(cameraId: String,
|
||||
preview: CameraOutputs.PreviewOutput? = null,
|
||||
photo: CameraOutputs.PhotoOutput? = null,
|
||||
video: CameraOutputs.VideoOutput? = null) {
|
||||
fun configureSession(
|
||||
cameraId: String,
|
||||
preview: CameraOutputs.PreviewOutput? = null,
|
||||
photo: CameraOutputs.PhotoOutput? = null,
|
||||
video: CameraOutputs.VideoOutput? = null
|
||||
) {
|
||||
Log.i(TAG, "Configuring Session for Camera $cameraId...")
|
||||
val outputs = CameraOutputs(cameraId,
|
||||
val outputs = CameraOutputs(
|
||||
cameraId,
|
||||
cameraManager,
|
||||
preview,
|
||||
photo,
|
||||
video,
|
||||
hdr == true,
|
||||
this)
|
||||
this
|
||||
)
|
||||
if (this.cameraId == cameraId && this.outputs == outputs && isActive == isRunning) {
|
||||
Log.i(TAG, "Nothing changed in configuration, canceling..")
|
||||
}
|
||||
@ -156,10 +168,12 @@ class CameraSession(private val context: Context,
|
||||
}
|
||||
}
|
||||
|
||||
fun configureFormat(fps: Int? = null,
|
||||
videoStabilizationMode: VideoStabilizationMode? = null,
|
||||
hdr: Boolean? = null,
|
||||
lowLightBoost: Boolean? = null) {
|
||||
fun configureFormat(
|
||||
fps: Int? = null,
|
||||
videoStabilizationMode: VideoStabilizationMode? = null,
|
||||
hdr: Boolean? = null,
|
||||
lowLightBoost: Boolean? = null
|
||||
) {
|
||||
Log.i(TAG, "Setting Format (fps: $fps | videoStabilization: $videoStabilizationMode | hdr: $hdr | lowLightBoost: $lowLightBoost)...")
|
||||
this.fps = fps
|
||||
this.videoStabilizationMode = videoStabilizationMode
|
||||
@ -170,18 +184,23 @@ class CameraSession(private val context: Context,
|
||||
val currentOutputs = outputs
|
||||
if (currentOutputs != null && currentOutputs.enableHdr != hdr) {
|
||||
// Update existing HDR for Outputs
|
||||
this.outputs = CameraOutputs(currentOutputs.cameraId,
|
||||
this.outputs = CameraOutputs(
|
||||
currentOutputs.cameraId,
|
||||
cameraManager,
|
||||
currentOutputs.preview,
|
||||
currentOutputs.photo,
|
||||
currentOutputs.video,
|
||||
hdr,
|
||||
this)
|
||||
this
|
||||
)
|
||||
needsReconfiguration = true
|
||||
}
|
||||
launch {
|
||||
if (needsReconfiguration) startRunning()
|
||||
else updateRepeatingRequest()
|
||||
if (needsReconfiguration) {
|
||||
startRunning()
|
||||
} else {
|
||||
updateRepeatingRequest()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -208,12 +227,14 @@ class CameraSession(private val context: Context,
|
||||
videoPipeline.setFrameProcessorOutput(this.frameProcessor)
|
||||
}
|
||||
|
||||
suspend fun takePhoto(qualityPrioritization: QualityPrioritization,
|
||||
flashMode: Flash,
|
||||
enableShutterSound: Boolean,
|
||||
enableRedEyeReduction: Boolean,
|
||||
enableAutoStabilization: Boolean,
|
||||
outputOrientation: Orientation): CapturedPhoto {
|
||||
suspend fun takePhoto(
|
||||
qualityPrioritization: QualityPrioritization,
|
||||
flashMode: Flash,
|
||||
enableShutterSound: Boolean,
|
||||
enableRedEyeReduction: Boolean,
|
||||
enableAutoStabilization: Boolean,
|
||||
outputOrientation: Orientation
|
||||
): CapturedPhoto {
|
||||
val captureSession = captureSession ?: throw CameraNotReadyError()
|
||||
val outputs = outputs ?: throw CameraNotReadyError()
|
||||
|
||||
@ -223,14 +244,16 @@ class CameraSession(private val context: Context,
|
||||
|
||||
val cameraCharacteristics = cameraManager.getCameraCharacteristics(captureSession.device.id)
|
||||
val orientation = outputOrientation.toSensorRelativeOrientation(cameraCharacteristics)
|
||||
val captureRequest = captureSession.device.createPhotoCaptureRequest(cameraManager,
|
||||
photoOutput.surface,
|
||||
zoom,
|
||||
qualityPrioritization,
|
||||
flashMode,
|
||||
enableRedEyeReduction,
|
||||
enableAutoStabilization,
|
||||
orientation)
|
||||
val captureRequest = captureSession.device.createPhotoCaptureRequest(
|
||||
cameraManager,
|
||||
photoOutput.surface,
|
||||
zoom,
|
||||
qualityPrioritization,
|
||||
flashMode,
|
||||
enableRedEyeReduction,
|
||||
enableAutoStabilization,
|
||||
orientation
|
||||
)
|
||||
Log.i(TAG, "Photo capture 1/3 - starting capture...")
|
||||
val result = captureSession.capture(captureRequest, enableShutterSound)
|
||||
val timestamp = result[CaptureResult.SENSOR_TIMESTAMP]!!
|
||||
@ -252,11 +275,13 @@ class CameraSession(private val context: Context,
|
||||
photoOutputSynchronizer.set(image.timestamp, image)
|
||||
}
|
||||
|
||||
suspend fun startRecording(enableAudio: Boolean,
|
||||
codec: VideoCodec,
|
||||
fileType: VideoFileType,
|
||||
callback: (video: RecordingSession.Video) -> Unit,
|
||||
onError: (error: RecorderError) -> Unit) {
|
||||
suspend fun startRecording(
|
||||
enableAudio: Boolean,
|
||||
codec: VideoCodec,
|
||||
fileType: VideoFileType,
|
||||
callback: (video: RecordingSession.Video) -> Unit,
|
||||
onError: (error: RecorderError) -> Unit
|
||||
) {
|
||||
mutex.withLock {
|
||||
if (recording != null) throw RecordingInProgressError()
|
||||
val outputs = outputs ?: throw CameraNotReadyError()
|
||||
@ -396,9 +421,7 @@ class CameraSession(private val context: Context,
|
||||
// Caches the result of outputs.hashCode() of the last getCaptureSession call
|
||||
private var lastOutputsHashCode: Int? = null
|
||||
|
||||
private suspend fun getCaptureSession(cameraDevice: CameraDevice,
|
||||
outputs: CameraOutputs,
|
||||
onClosed: () -> Unit): CameraCaptureSession {
|
||||
private suspend fun getCaptureSession(cameraDevice: CameraDevice, outputs: CameraOutputs, onClosed: () -> Unit): CameraCaptureSession {
|
||||
val currentSession = captureSession
|
||||
if (currentSession?.device == cameraDevice && outputs.hashCode() == lastOutputsHashCode) {
|
||||
// We already opened a CameraCaptureSession on this device
|
||||
@ -426,11 +449,13 @@ class CameraSession(private val context: Context,
|
||||
return session
|
||||
}
|
||||
|
||||
private fun getPreviewCaptureRequest(fps: Int? = null,
|
||||
videoStabilizationMode: VideoStabilizationMode? = null,
|
||||
lowLightBoost: Boolean? = null,
|
||||
hdr: Boolean? = null,
|
||||
torch: Boolean? = null): CaptureRequest {
|
||||
private fun getPreviewCaptureRequest(
|
||||
fps: Int? = null,
|
||||
videoStabilizationMode: VideoStabilizationMode? = null,
|
||||
lowLightBoost: Boolean? = null,
|
||||
hdr: Boolean? = null,
|
||||
torch: Boolean? = null
|
||||
): CaptureRequest {
|
||||
val captureRequest = previewRequest ?: throw CameraNotReadyError()
|
||||
|
||||
// FPS
|
||||
@ -442,9 +467,16 @@ class CameraSession(private val context: Context,
|
||||
captureRequest.set(CaptureRequest.LENS_OPTICAL_STABILIZATION_MODE, videoStabilizationMode?.toOpticalStabilizationMode())
|
||||
|
||||
// Night/HDR Mode
|
||||
val sceneMode = if (hdr == true) CaptureRequest.CONTROL_SCENE_MODE_HDR else if (lowLightBoost == true) CaptureRequest.CONTROL_SCENE_MODE_NIGHT else null
|
||||
val sceneMode = if (hdr ==
|
||||
true
|
||||
) {
|
||||
CaptureRequest.CONTROL_SCENE_MODE_HDR
|
||||
} else if (lowLightBoost == true) CaptureRequest.CONTROL_SCENE_MODE_NIGHT else null
|
||||
captureRequest.set(CaptureRequest.CONTROL_SCENE_MODE, sceneMode)
|
||||
captureRequest.set(CaptureRequest.CONTROL_MODE, if (sceneMode != null) CaptureRequest.CONTROL_MODE_USE_SCENE_MODE else CaptureRequest.CONTROL_MODE_AUTO)
|
||||
captureRequest.set(
|
||||
CaptureRequest.CONTROL_MODE,
|
||||
if (sceneMode != null) CaptureRequest.CONTROL_MODE_USE_SCENE_MODE else CaptureRequest.CONTROL_MODE_AUTO
|
||||
)
|
||||
|
||||
// Zoom
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.R) {
|
||||
|
@ -1,4 +1,4 @@
|
||||
package com.mrousavy.camera.core;
|
||||
package com.mrousavy.camera.core
|
||||
|
||||
import android.media.Image
|
||||
import kotlinx.coroutines.CompletableDeferred
|
||||
|
@ -12,10 +12,12 @@ import com.mrousavy.camera.extensions.getPreviewSize
|
||||
import kotlin.math.roundToInt
|
||||
|
||||
@SuppressLint("ViewConstructor")
|
||||
class PreviewView(context: Context,
|
||||
cameraManager: CameraManager,
|
||||
cameraId: String,
|
||||
private val onSurfaceChanged: (surface: Surface?) -> Unit): SurfaceView(context) {
|
||||
class PreviewView(
|
||||
context: Context,
|
||||
cameraManager: CameraManager,
|
||||
cameraId: String,
|
||||
private val onSurfaceChanged: (surface: Surface?) -> Unit
|
||||
) : SurfaceView(context) {
|
||||
private val targetSize: Size
|
||||
private val aspectRatio: Float
|
||||
get() = targetSize.width.toFloat() / targetSize.height.toFloat()
|
||||
@ -26,7 +28,7 @@ class PreviewView(context: Context,
|
||||
|
||||
Log.i(TAG, "Using Preview Size ${targetSize.width} x ${targetSize.height}.")
|
||||
holder.setFixedSize(targetSize.width, targetSize.height)
|
||||
holder.addCallback(object: SurfaceHolder.Callback {
|
||||
holder.addCallback(object : SurfaceHolder.Callback {
|
||||
override fun surfaceCreated(holder: SurfaceHolder) {
|
||||
Log.i(TAG, "Surface created! ${holder.surface}")
|
||||
onSurfaceChanged(holder.surface)
|
||||
|
@ -14,17 +14,20 @@ import com.mrousavy.camera.parsers.VideoCodec
|
||||
import com.mrousavy.camera.parsers.VideoFileType
|
||||
import java.io.File
|
||||
|
||||
class RecordingSession(context: Context,
|
||||
val size: Size,
|
||||
private val enableAudio: Boolean,
|
||||
private val fps: Int? = null,
|
||||
private val codec: VideoCodec = VideoCodec.H264,
|
||||
private val orientation: Orientation,
|
||||
private val fileType: VideoFileType = VideoFileType.MP4,
|
||||
private val callback: (video: Video) -> Unit,
|
||||
private val onError: (error: RecorderError) -> Unit) {
|
||||
class RecordingSession(
|
||||
context: Context,
|
||||
val size: Size,
|
||||
private val enableAudio: Boolean,
|
||||
private val fps: Int? = null,
|
||||
private val codec: VideoCodec = VideoCodec.H264,
|
||||
private val orientation: Orientation,
|
||||
private val fileType: VideoFileType = VideoFileType.MP4,
|
||||
private val callback: (video: Video) -> Unit,
|
||||
private val onError: (error: RecorderError) -> Unit
|
||||
) {
|
||||
companion object {
|
||||
private const val TAG = "RecordingSession"
|
||||
|
||||
// bits per second
|
||||
private const val VIDEO_BIT_RATE = 10_000_000
|
||||
private const val AUDIO_SAMPLING_RATE = 44_100
|
||||
@ -67,7 +70,7 @@ class RecordingSession(context: Context,
|
||||
recorder.setAudioChannels(AUDIO_CHANNELS)
|
||||
}
|
||||
recorder.setInputSurface(surface)
|
||||
//recorder.setOrientationHint(orientation.toDegrees())
|
||||
// recorder.setOrientationHint(orientation.toDegrees())
|
||||
|
||||
recorder.setOnErrorListener { _, what, extra ->
|
||||
Log.e(TAG, "MediaRecorder Error: $what ($extra)")
|
||||
|
@ -3,7 +3,6 @@ package com.mrousavy.camera.core
|
||||
import android.graphics.ImageFormat
|
||||
import android.media.ImageReader
|
||||
import android.media.ImageWriter
|
||||
import android.media.MediaRecorder
|
||||
import android.util.Log
|
||||
import android.view.Surface
|
||||
import com.mrousavy.camera.CameraQueues
|
||||
@ -13,10 +12,9 @@ import com.mrousavy.camera.parsers.Orientation
|
||||
import java.io.Closeable
|
||||
|
||||
@Suppress("JoinDeclarationAndAssignment")
|
||||
class VideoPipeline(val width: Int,
|
||||
val height: Int,
|
||||
val format: Int = ImageFormat.PRIVATE,
|
||||
private val isMirrored: Boolean = false): ImageReader.OnImageAvailableListener, Closeable {
|
||||
class VideoPipeline(val width: Int, val height: Int, val format: Int = ImageFormat.PRIVATE, private val isMirrored: Boolean = false) :
|
||||
ImageReader.OnImageAvailableListener,
|
||||
Closeable {
|
||||
companion object {
|
||||
private const val MAX_IMAGES = 3
|
||||
private const val TAG = "VideoPipeline"
|
||||
|
@ -9,32 +9,35 @@ import android.util.Log
|
||||
import android.util.Size
|
||||
import android.view.Surface
|
||||
import com.mrousavy.camera.CameraQueues
|
||||
import com.mrousavy.camera.core.VideoPipeline
|
||||
import com.mrousavy.camera.extensions.closestToOrMax
|
||||
import com.mrousavy.camera.extensions.getPhotoSizes
|
||||
import com.mrousavy.camera.extensions.getPreviewSize
|
||||
import com.mrousavy.camera.extensions.getVideoSizes
|
||||
import com.mrousavy.camera.core.VideoPipeline
|
||||
import java.io.Closeable
|
||||
|
||||
class CameraOutputs(val cameraId: String,
|
||||
cameraManager: CameraManager,
|
||||
val preview: PreviewOutput? = null,
|
||||
val photo: PhotoOutput? = null,
|
||||
val video: VideoOutput? = null,
|
||||
val enableHdr: Boolean? = false,
|
||||
val callback: Callback): Closeable {
|
||||
class CameraOutputs(
|
||||
val cameraId: String,
|
||||
cameraManager: CameraManager,
|
||||
val preview: PreviewOutput? = null,
|
||||
val photo: PhotoOutput? = null,
|
||||
val video: VideoOutput? = null,
|
||||
val enableHdr: Boolean? = false,
|
||||
val callback: Callback
|
||||
) : Closeable {
|
||||
companion object {
|
||||
private const val TAG = "CameraOutputs"
|
||||
const val PHOTO_OUTPUT_BUFFER_SIZE = 3
|
||||
}
|
||||
|
||||
data class PreviewOutput(val surface: Surface)
|
||||
data class PhotoOutput(val targetSize: Size? = null,
|
||||
val format: Int = ImageFormat.JPEG)
|
||||
data class VideoOutput(val targetSize: Size? = null,
|
||||
val enableRecording: Boolean = false,
|
||||
val enableFrameProcessor: Boolean? = false,
|
||||
val format: Int = ImageFormat.PRIVATE)
|
||||
data class PhotoOutput(val targetSize: Size? = null, val format: Int = ImageFormat.JPEG)
|
||||
data class VideoOutput(
|
||||
val targetSize: Size? = null,
|
||||
val enableRecording: Boolean = false,
|
||||
val enableFrameProcessor: Boolean? = false,
|
||||
val format: Int = ImageFormat.PRIVATE
|
||||
)
|
||||
|
||||
interface Callback {
|
||||
fun onPhotoCaptured(image: Image)
|
||||
@ -58,14 +61,14 @@ class CameraOutputs(val cameraId: String,
|
||||
|
||||
override fun equals(other: Any?): Boolean {
|
||||
if (other !is CameraOutputs) return false
|
||||
return this.cameraId == other.cameraId
|
||||
&& this.preview?.surface == other.preview?.surface
|
||||
&& this.photo?.targetSize == other.photo?.targetSize
|
||||
&& this.photo?.format == other.photo?.format
|
||||
&& this.video?.enableRecording == other.video?.enableRecording
|
||||
&& this.video?.targetSize == other.video?.targetSize
|
||||
&& this.video?.format == other.video?.format
|
||||
&& this.enableHdr == other.enableHdr
|
||||
return this.cameraId == other.cameraId &&
|
||||
this.preview?.surface == other.preview?.surface &&
|
||||
this.photo?.targetSize == other.photo?.targetSize &&
|
||||
this.photo?.format == other.photo?.format &&
|
||||
this.video?.enableRecording == other.video?.enableRecording &&
|
||||
this.video?.targetSize == other.video?.targetSize &&
|
||||
this.video?.format == other.video?.format &&
|
||||
this.enableHdr == other.enableHdr
|
||||
}
|
||||
|
||||
override fun hashCode(): Int {
|
||||
|
@ -5,15 +5,18 @@ import android.util.Log
|
||||
import android.util.Size
|
||||
import java.io.Closeable
|
||||
|
||||
class ImageReaderOutput(private val imageReader: ImageReader,
|
||||
outputType: OutputType,
|
||||
dynamicRangeProfile: Long? = null): Closeable, SurfaceOutput(imageReader.surface, Size(imageReader.width, imageReader.height), outputType, dynamicRangeProfile) {
|
||||
class ImageReaderOutput(private val imageReader: ImageReader, outputType: OutputType, dynamicRangeProfile: Long? = null) :
|
||||
SurfaceOutput(
|
||||
imageReader.surface,
|
||||
Size(imageReader.width, imageReader.height),
|
||||
outputType,
|
||||
dynamicRangeProfile
|
||||
),
|
||||
Closeable {
|
||||
override fun close() {
|
||||
Log.i(TAG, "Closing ${imageReader.width}x${imageReader.height} $outputType ImageReader..")
|
||||
imageReader.close()
|
||||
}
|
||||
|
||||
override fun toString(): String {
|
||||
return "$outputType (${imageReader.width} x ${imageReader.height} in format #${imageReader.imageFormat})"
|
||||
}
|
||||
override fun toString(): String = "$outputType (${imageReader.width} x ${imageReader.height} in format #${imageReader.imageFormat})"
|
||||
}
|
||||
|
@ -10,11 +10,13 @@ import android.view.Surface
|
||||
import androidx.annotation.RequiresApi
|
||||
import java.io.Closeable
|
||||
|
||||
open class SurfaceOutput(val surface: Surface,
|
||||
val size: Size,
|
||||
val outputType: OutputType,
|
||||
private val dynamicRangeProfile: Long? = null,
|
||||
private val closeSurfaceOnEnd: Boolean = false): Closeable {
|
||||
open class SurfaceOutput(
|
||||
val surface: Surface,
|
||||
val size: Size,
|
||||
val outputType: OutputType,
|
||||
private val dynamicRangeProfile: Long? = null,
|
||||
private val closeSurfaceOnEnd: Boolean = false
|
||||
) : Closeable {
|
||||
companion object {
|
||||
const val TAG = "SurfaceOutput"
|
||||
|
||||
@ -47,9 +49,7 @@ open class SurfaceOutput(val surface: Surface,
|
||||
return result
|
||||
}
|
||||
|
||||
override fun toString(): String {
|
||||
return "$outputType (${size.width} x ${size.height})"
|
||||
}
|
||||
override fun toString(): String = "$outputType (${size.width} x ${size.height})"
|
||||
|
||||
override fun close() {
|
||||
if (closeSurfaceOnEnd) {
|
||||
@ -64,13 +64,12 @@ open class SurfaceOutput(val surface: Surface,
|
||||
VIDEO_AND_PREVIEW;
|
||||
|
||||
@RequiresApi(Build.VERSION_CODES.TIRAMISU)
|
||||
fun toOutputType(): Int {
|
||||
return when(this) {
|
||||
fun toOutputType(): Int =
|
||||
when (this) {
|
||||
PHOTO -> CameraMetadata.SCALER_AVAILABLE_STREAM_USE_CASES_STILL_CAPTURE
|
||||
VIDEO -> CameraMetadata.SCALER_AVAILABLE_STREAM_USE_CASES_VIDEO_RECORD
|
||||
PREVIEW -> CameraMetadata.SCALER_AVAILABLE_STREAM_USE_CASES_PREVIEW
|
||||
VIDEO_AND_PREVIEW -> CameraMetadata.SCALER_AVAILABLE_STREAM_USE_CASES_PREVIEW_VIDEO_STILL
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -5,15 +5,18 @@ import android.util.Size
|
||||
import com.mrousavy.camera.core.VideoPipeline
|
||||
import java.io.Closeable
|
||||
|
||||
class VideoPipelineOutput(val videoPipeline: VideoPipeline,
|
||||
outputType: OutputType,
|
||||
dynamicRangeProfile: Long? = null): Closeable, SurfaceOutput(videoPipeline.surface, Size(videoPipeline.width, videoPipeline.height), outputType, dynamicRangeProfile) {
|
||||
class VideoPipelineOutput(val videoPipeline: VideoPipeline, outputType: OutputType, dynamicRangeProfile: Long? = null) :
|
||||
SurfaceOutput(
|
||||
videoPipeline.surface,
|
||||
Size(videoPipeline.width, videoPipeline.height),
|
||||
outputType,
|
||||
dynamicRangeProfile
|
||||
),
|
||||
Closeable {
|
||||
override fun close() {
|
||||
Log.i(TAG, "Closing ${videoPipeline.width}x${videoPipeline.height} Video Pipeline..")
|
||||
videoPipeline.close()
|
||||
}
|
||||
|
||||
override fun toString(): String {
|
||||
return "$outputType (${videoPipeline.width} x ${videoPipeline.height} in format #${videoPipeline.format})"
|
||||
}
|
||||
override fun toString(): String = "$outputType (${videoPipeline.width} x ${videoPipeline.height} in format #${videoPipeline.format})"
|
||||
}
|
||||
|
@ -12,42 +12,37 @@ import kotlin.coroutines.resume
|
||||
import kotlin.coroutines.resumeWithException
|
||||
import kotlin.coroutines.suspendCoroutine
|
||||
|
||||
suspend fun CameraCaptureSession.capture(captureRequest: CaptureRequest, enableShutterSound: Boolean): TotalCaptureResult {
|
||||
return suspendCoroutine { continuation ->
|
||||
this.capture(captureRequest, object: CameraCaptureSession.CaptureCallback() {
|
||||
override fun onCaptureCompleted(
|
||||
session: CameraCaptureSession,
|
||||
request: CaptureRequest,
|
||||
result: TotalCaptureResult
|
||||
) {
|
||||
super.onCaptureCompleted(session, request, result)
|
||||
suspend fun CameraCaptureSession.capture(captureRequest: CaptureRequest, enableShutterSound: Boolean): TotalCaptureResult =
|
||||
suspendCoroutine { continuation ->
|
||||
this.capture(
|
||||
captureRequest,
|
||||
object : CameraCaptureSession.CaptureCallback() {
|
||||
override fun onCaptureCompleted(session: CameraCaptureSession, request: CaptureRequest, result: TotalCaptureResult) {
|
||||
super.onCaptureCompleted(session, request, result)
|
||||
|
||||
continuation.resume(result)
|
||||
}
|
||||
|
||||
override fun onCaptureStarted(session: CameraCaptureSession, request: CaptureRequest, timestamp: Long, frameNumber: Long) {
|
||||
super.onCaptureStarted(session, request, timestamp, frameNumber)
|
||||
|
||||
if (enableShutterSound) {
|
||||
val mediaActionSound = MediaActionSound()
|
||||
mediaActionSound.play(MediaActionSound.SHUTTER_CLICK)
|
||||
continuation.resume(result)
|
||||
}
|
||||
}
|
||||
|
||||
override fun onCaptureFailed(
|
||||
session: CameraCaptureSession,
|
||||
request: CaptureRequest,
|
||||
failure: CaptureFailure
|
||||
) {
|
||||
super.onCaptureFailed(session, request, failure)
|
||||
val wasImageCaptured = failure.wasImageCaptured()
|
||||
val error = when (failure.reason) {
|
||||
CaptureFailure.REASON_ERROR -> UnknownCaptureError(wasImageCaptured)
|
||||
CaptureFailure.REASON_FLUSHED -> CaptureAbortedError(wasImageCaptured)
|
||||
else -> UnknownCaptureError(wasImageCaptured)
|
||||
override fun onCaptureStarted(session: CameraCaptureSession, request: CaptureRequest, timestamp: Long, frameNumber: Long) {
|
||||
super.onCaptureStarted(session, request, timestamp, frameNumber)
|
||||
|
||||
if (enableShutterSound) {
|
||||
val mediaActionSound = MediaActionSound()
|
||||
mediaActionSound.play(MediaActionSound.SHUTTER_CLICK)
|
||||
}
|
||||
}
|
||||
continuation.resumeWithException(error)
|
||||
}
|
||||
}, CameraQueues.cameraQueue.handler)
|
||||
|
||||
override fun onCaptureFailed(session: CameraCaptureSession, request: CaptureRequest, failure: CaptureFailure) {
|
||||
super.onCaptureFailed(session, request, failure)
|
||||
val wasImageCaptured = failure.wasImageCaptured()
|
||||
val error = when (failure.reason) {
|
||||
CaptureFailure.REASON_ERROR -> UnknownCaptureError(wasImageCaptured)
|
||||
CaptureFailure.REASON_FLUSHED -> CaptureAbortedError(wasImageCaptured)
|
||||
else -> UnknownCaptureError(wasImageCaptured)
|
||||
}
|
||||
continuation.resumeWithException(error)
|
||||
}
|
||||
},
|
||||
CameraQueues.cameraQueue.handler
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@ -8,29 +8,33 @@ import android.hardware.camera2.params.OutputConfiguration
|
||||
import android.hardware.camera2.params.SessionConfiguration
|
||||
import android.os.Build
|
||||
import android.util.Log
|
||||
import androidx.annotation.RequiresApi
|
||||
import com.mrousavy.camera.CameraQueues
|
||||
import com.mrousavy.camera.CameraSessionCannotBeConfiguredError
|
||||
import com.mrousavy.camera.core.outputs.CameraOutputs
|
||||
import kotlinx.coroutines.suspendCancellableCoroutine
|
||||
import kotlin.coroutines.resume
|
||||
import kotlin.coroutines.resumeWithException
|
||||
import kotlinx.coroutines.suspendCancellableCoroutine
|
||||
|
||||
private const val TAG = "CreateCaptureSession"
|
||||
private var sessionId = 1000
|
||||
|
||||
suspend fun CameraDevice.createCaptureSession(cameraManager: CameraManager,
|
||||
outputs: CameraOutputs,
|
||||
onClosed: (session: CameraCaptureSession) -> Unit,
|
||||
queue: CameraQueues.CameraQueue): CameraCaptureSession {
|
||||
return suspendCancellableCoroutine { continuation ->
|
||||
suspend fun CameraDevice.createCaptureSession(
|
||||
cameraManager: CameraManager,
|
||||
outputs: CameraOutputs,
|
||||
onClosed: (session: CameraCaptureSession) -> Unit,
|
||||
queue: CameraQueues.CameraQueue
|
||||
): CameraCaptureSession =
|
||||
suspendCancellableCoroutine { continuation ->
|
||||
val characteristics = cameraManager.getCameraCharacteristics(id)
|
||||
val hardwareLevel = characteristics.get(CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL)!!
|
||||
val sessionId = sessionId++
|
||||
Log.i(TAG, "Camera $id: Creating Capture Session #$sessionId... " +
|
||||
"Hardware Level: $hardwareLevel} | Outputs: $outputs")
|
||||
Log.i(
|
||||
TAG,
|
||||
"Camera $id: Creating Capture Session #$sessionId... " +
|
||||
"Hardware Level: $hardwareLevel} | Outputs: $outputs"
|
||||
)
|
||||
|
||||
val callback = object: CameraCaptureSession.StateCallback() {
|
||||
val callback = object : CameraCaptureSession.StateCallback() {
|
||||
override fun onConfigured(session: CameraCaptureSession) {
|
||||
Log.i(TAG, "Camera $id: Capture Session #$sessionId configured!")
|
||||
continuation.resume(session)
|
||||
@ -78,4 +82,3 @@ suspend fun CameraDevice.createCaptureSession(cameraManager: CameraManager,
|
||||
this.createCaptureSessionByOutputConfigurations(outputConfigurations, callback, queue.handler)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -23,14 +23,16 @@ private fun supportsSnapshotCapture(cameraCharacteristics: CameraCharacteristics
|
||||
return true
|
||||
}
|
||||
|
||||
fun CameraDevice.createPhotoCaptureRequest(cameraManager: CameraManager,
|
||||
surface: Surface,
|
||||
zoom: Float,
|
||||
qualityPrioritization: QualityPrioritization,
|
||||
flashMode: Flash,
|
||||
enableRedEyeReduction: Boolean,
|
||||
enableAutoStabilization: Boolean,
|
||||
orientation: Orientation): CaptureRequest {
|
||||
fun CameraDevice.createPhotoCaptureRequest(
|
||||
cameraManager: CameraManager,
|
||||
surface: Surface,
|
||||
zoom: Float,
|
||||
qualityPrioritization: QualityPrioritization,
|
||||
flashMode: Flash,
|
||||
enableRedEyeReduction: Boolean,
|
||||
enableAutoStabilization: Boolean,
|
||||
orientation: Orientation
|
||||
): CaptureRequest {
|
||||
val cameraCharacteristics = cameraManager.getCameraCharacteristics(this.id)
|
||||
|
||||
val template = if (qualityPrioritization == QualityPrioritization.SPEED && supportsSnapshotCapture(cameraCharacteristics)) {
|
||||
|
@ -9,20 +9,22 @@ import com.mrousavy.camera.CameraCannotBeOpenedError
|
||||
import com.mrousavy.camera.CameraDisconnectedError
|
||||
import com.mrousavy.camera.CameraQueues
|
||||
import com.mrousavy.camera.parsers.CameraDeviceError
|
||||
import kotlinx.coroutines.suspendCancellableCoroutine
|
||||
import kotlin.coroutines.resume
|
||||
import kotlin.coroutines.resumeWithException
|
||||
import kotlinx.coroutines.suspendCancellableCoroutine
|
||||
|
||||
private const val TAG = "CameraManager"
|
||||
|
||||
@SuppressLint("MissingPermission")
|
||||
suspend fun CameraManager.openCamera(cameraId: String,
|
||||
onDisconnected: (camera: CameraDevice, reason: Throwable) -> Unit,
|
||||
queue: CameraQueues.CameraQueue): CameraDevice {
|
||||
return suspendCancellableCoroutine { continuation ->
|
||||
suspend fun CameraManager.openCamera(
|
||||
cameraId: String,
|
||||
onDisconnected: (camera: CameraDevice, reason: Throwable) -> Unit,
|
||||
queue: CameraQueues.CameraQueue
|
||||
): CameraDevice =
|
||||
suspendCancellableCoroutine { continuation ->
|
||||
Log.i(TAG, "Camera $cameraId: Opening...")
|
||||
|
||||
val callback = object: CameraDevice.StateCallback() {
|
||||
val callback = object : CameraDevice.StateCallback() {
|
||||
override fun onOpened(camera: CameraDevice) {
|
||||
Log.i(TAG, "Camera $cameraId: Opened!")
|
||||
continuation.resume(camera)
|
||||
@ -56,4 +58,3 @@ suspend fun CameraManager.openCamera(cameraId: String,
|
||||
this.openCamera(cameraId, callback, queue.handler)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,3 @@
|
||||
package com.mrousavy.camera.extensions
|
||||
|
||||
fun <T> List<T>.containsAny(elements: List<T>): Boolean {
|
||||
return elements.any { element -> this.contains(element) }
|
||||
}
|
||||
fun <T> List<T>.containsAny(elements: List<T>): Boolean = elements.any { element -> this.contains(element) }
|
||||
|
@ -7,23 +7,21 @@ import kotlin.math.abs
|
||||
import kotlin.math.max
|
||||
import kotlin.math.min
|
||||
|
||||
fun List<Size>.closestToOrMax(size: Size?): Size {
|
||||
return if (size != null) {
|
||||
fun List<Size>.closestToOrMax(size: Size?): Size =
|
||||
if (size != null) {
|
||||
this.minBy { abs(it.width - size.width) + abs(it.height - size.height) }
|
||||
} else {
|
||||
this.maxBy { it.width * it.height }
|
||||
}
|
||||
}
|
||||
|
||||
fun Size.rotated(surfaceRotation: Int): Size {
|
||||
return when (surfaceRotation) {
|
||||
fun Size.rotated(surfaceRotation: Int): Size =
|
||||
when (surfaceRotation) {
|
||||
Surface.ROTATION_0 -> Size(width, height)
|
||||
Surface.ROTATION_90 -> Size(height, width)
|
||||
Surface.ROTATION_180 -> Size(width, height)
|
||||
Surface.ROTATION_270 -> Size(height, width)
|
||||
else -> Size(width, height)
|
||||
}
|
||||
}
|
||||
|
||||
val Size.bigger: Int
|
||||
get() = max(width, height)
|
||||
@ -35,7 +33,4 @@ val SizeF.bigger: Float
|
||||
val SizeF.smaller: Float
|
||||
get() = min(this.width, this.height)
|
||||
|
||||
operator fun Size.compareTo(other: Size): Int {
|
||||
return (this.width * this.height).compareTo(other.width * other.height)
|
||||
}
|
||||
|
||||
operator fun Size.compareTo(other: Size): Int = (this.width * this.height).compareTo(other.width * other.height)
|
||||
|
@ -26,6 +26,7 @@ class VisionCameraProxy(context: ReactApplicationContext) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@DoNotStrip
|
||||
@Keep
|
||||
private var mHybridData: HybridData
|
||||
@ -45,7 +46,7 @@ class VisionCameraProxy(context: ReactApplicationContext) {
|
||||
Log.d(TAG, "Finding view $viewId...")
|
||||
val ctx = mContext.get()
|
||||
val view = if (ctx != null) UIManagerHelper.getUIManager(ctx, viewId)?.resolveView(viewId) as CameraView? else null
|
||||
Log.d(TAG, if (view != null) "Found view $viewId!" else "Couldn't find view $viewId!")
|
||||
Log.d(TAG, if (view != null) "Found view $viewId!" else "Couldn't find view $viewId!")
|
||||
return view ?: throw ViewNotFoundError(viewId)
|
||||
}
|
||||
|
||||
@ -69,12 +70,9 @@ class VisionCameraProxy(context: ReactApplicationContext) {
|
||||
|
||||
@DoNotStrip
|
||||
@Keep
|
||||
fun getFrameProcessorPlugin(name: String, options: Map<String, Any>): FrameProcessorPlugin {
|
||||
return FrameProcessorPluginRegistry.getPlugin(name, options)
|
||||
}
|
||||
fun getFrameProcessorPlugin(name: String, options: Map<String, Any>): FrameProcessorPlugin =
|
||||
FrameProcessorPluginRegistry.getPlugin(name, options)
|
||||
|
||||
// private C++ funcs
|
||||
private external fun initHybrid(jsContext: Long,
|
||||
jsCallInvokerHolder: CallInvokerHolderImpl,
|
||||
scheduler: VisionCameraScheduler): HybridData
|
||||
private external fun initHybrid(jsContext: Long, jsCallInvokerHolder: CallInvokerHolderImpl, scheduler: VisionCameraScheduler): HybridData
|
||||
}
|
||||
|
@ -2,7 +2,7 @@ package com.mrousavy.camera.parsers
|
||||
|
||||
import android.hardware.camera2.CameraDevice
|
||||
|
||||
enum class CameraDeviceError(override val unionValue: String): JSUnionValue {
|
||||
enum class CameraDeviceError(override val unionValue: String) : JSUnionValue {
|
||||
CAMERA_ALREADY_IN_USE("camera-already-in-use"),
|
||||
TOO_MANY_OPEN_CAMERAS("too-many-open-cameras"),
|
||||
CAMERA_IS_DISABLED_BY_ANDROID("camera-is-disabled-by-android"),
|
||||
@ -11,8 +11,8 @@ enum class CameraDeviceError(override val unionValue: String): JSUnionValue {
|
||||
DISCONNECTED("camera-has-been-disconnected");
|
||||
|
||||
companion object {
|
||||
fun fromCameraDeviceError(cameraDeviceError: Int): CameraDeviceError {
|
||||
return when (cameraDeviceError) {
|
||||
fun fromCameraDeviceError(cameraDeviceError: Int): CameraDeviceError =
|
||||
when (cameraDeviceError) {
|
||||
CameraDevice.StateCallback.ERROR_CAMERA_IN_USE -> CAMERA_ALREADY_IN_USE
|
||||
CameraDevice.StateCallback.ERROR_MAX_CAMERAS_IN_USE -> TOO_MANY_OPEN_CAMERAS
|
||||
CameraDevice.StateCallback.ERROR_CAMERA_DISABLED -> CAMERA_IS_DISABLED_BY_ANDROID
|
||||
@ -20,6 +20,5 @@ enum class CameraDeviceError(override val unionValue: String): JSUnionValue {
|
||||
CameraDevice.StateCallback.ERROR_CAMERA_SERVICE -> UNKNOWN_FATAL_CAMERA_SERVICE_ERROR
|
||||
else -> UNKNOWN_CAMERA_DEVICE_ERROR
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2,19 +2,18 @@ package com.mrousavy.camera.parsers
|
||||
|
||||
import com.mrousavy.camera.InvalidTypeScriptUnionError
|
||||
|
||||
enum class Flash(override val unionValue: String): JSUnionValue {
|
||||
enum class Flash(override val unionValue: String) : JSUnionValue {
|
||||
OFF("off"),
|
||||
ON("on"),
|
||||
AUTO("auto");
|
||||
|
||||
companion object: JSUnionValue.Companion<Flash> {
|
||||
override fun fromUnionValue(unionValue: String?): Flash {
|
||||
return when (unionValue) {
|
||||
companion object : JSUnionValue.Companion<Flash> {
|
||||
override fun fromUnionValue(unionValue: String?): Flash =
|
||||
when (unionValue) {
|
||||
"off" -> OFF
|
||||
"on" -> ON
|
||||
"auto" -> AUTO
|
||||
else -> throw InvalidTypeScriptUnionError("flash", unionValue ?: "(null)")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2,16 +2,16 @@ package com.mrousavy.camera.parsers
|
||||
|
||||
import android.hardware.camera2.CameraCharacteristics
|
||||
|
||||
enum class HardwareLevel(override val unionValue: String): JSUnionValue {
|
||||
enum class HardwareLevel(override val unionValue: String) : JSUnionValue {
|
||||
LEGACY("legacy"),
|
||||
LIMITED("limited"),
|
||||
EXTERNAL("external"),
|
||||
EXTERNAL("limited"),
|
||||
FULL("full"),
|
||||
LEVEL_3("level-3");
|
||||
LEVEL_3("full");
|
||||
|
||||
companion object {
|
||||
fun fromCameraCharacteristics(cameraCharacteristics: CameraCharacteristics): HardwareLevel {
|
||||
return when (cameraCharacteristics.get(CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL)) {
|
||||
fun fromCameraCharacteristics(cameraCharacteristics: CameraCharacteristics): HardwareLevel =
|
||||
when (cameraCharacteristics.get(CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL)) {
|
||||
CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_LEGACY -> LEGACY
|
||||
CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED -> LIMITED
|
||||
CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_EXTERNAL -> EXTERNAL
|
||||
@ -19,6 +19,5 @@ enum class HardwareLevel(override val unionValue: String): JSUnionValue {
|
||||
CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_3 -> LEVEL_3
|
||||
else -> LEGACY
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2,19 +2,18 @@ package com.mrousavy.camera.parsers
|
||||
|
||||
import android.hardware.camera2.CameraCharacteristics
|
||||
|
||||
enum class LensFacing(override val unionValue: String): JSUnionValue {
|
||||
enum class LensFacing(override val unionValue: String) : JSUnionValue {
|
||||
BACK("back"),
|
||||
FRONT("front"),
|
||||
EXTERNAL("external");
|
||||
|
||||
companion object {
|
||||
fun fromCameraCharacteristics(cameraCharacteristics: CameraCharacteristics): LensFacing {
|
||||
return when (cameraCharacteristics.get(CameraCharacteristics.LENS_FACING)!!) {
|
||||
fun fromCameraCharacteristics(cameraCharacteristics: CameraCharacteristics): LensFacing =
|
||||
when (cameraCharacteristics.get(CameraCharacteristics.LENS_FACING)!!) {
|
||||
CameraCharacteristics.LENS_FACING_BACK -> BACK
|
||||
CameraCharacteristics.LENS_FACING_FRONT -> FRONT
|
||||
CameraCharacteristics.LENS_FACING_EXTERNAL -> EXTERNAL
|
||||
else -> EXTERNAL
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2,20 +2,19 @@ package com.mrousavy.camera.parsers
|
||||
|
||||
import android.hardware.camera2.CameraCharacteristics
|
||||
|
||||
enum class Orientation(override val unionValue: String): JSUnionValue {
|
||||
enum class Orientation(override val unionValue: String) : JSUnionValue {
|
||||
PORTRAIT("portrait"),
|
||||
LANDSCAPE_RIGHT("landscape-right"),
|
||||
PORTRAIT_UPSIDE_DOWN("portrait-upside-down"),
|
||||
LANDSCAPE_LEFT("landscape-left");
|
||||
|
||||
fun toDegrees(): Int {
|
||||
return when(this) {
|
||||
fun toDegrees(): Int =
|
||||
when (this) {
|
||||
PORTRAIT -> 0
|
||||
LANDSCAPE_RIGHT -> 90
|
||||
PORTRAIT_UPSIDE_DOWN -> 180
|
||||
LANDSCAPE_LEFT -> 270
|
||||
}
|
||||
}
|
||||
|
||||
fun toSensorRelativeOrientation(cameraCharacteristics: CameraCharacteristics): Orientation {
|
||||
val sensorOrientation = cameraCharacteristics.get(CameraCharacteristics.SENSOR_ORIENTATION)!!
|
||||
@ -33,24 +32,22 @@ enum class Orientation(override val unionValue: String): JSUnionValue {
|
||||
return fromRotationDegrees(newRotationDegrees)
|
||||
}
|
||||
|
||||
companion object: JSUnionValue.Companion<Orientation> {
|
||||
override fun fromUnionValue(unionValue: String?): Orientation? {
|
||||
return when (unionValue) {
|
||||
companion object : JSUnionValue.Companion<Orientation> {
|
||||
override fun fromUnionValue(unionValue: String?): Orientation? =
|
||||
when (unionValue) {
|
||||
"portrait" -> PORTRAIT
|
||||
"landscape-right" -> LANDSCAPE_RIGHT
|
||||
"portrait-upside-down" -> PORTRAIT_UPSIDE_DOWN
|
||||
"landscape-left" -> LANDSCAPE_LEFT
|
||||
else -> null
|
||||
}
|
||||
}
|
||||
|
||||
fun fromRotationDegrees(rotationDegrees: Int): Orientation {
|
||||
return when (rotationDegrees) {
|
||||
fun fromRotationDegrees(rotationDegrees: Int): Orientation =
|
||||
when (rotationDegrees) {
|
||||
in 45..135 -> LANDSCAPE_RIGHT
|
||||
in 135..225 -> PORTRAIT_UPSIDE_DOWN
|
||||
in 225..315 -> LANDSCAPE_LEFT
|
||||
else -> PORTRAIT
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2,18 +2,17 @@ package com.mrousavy.camera.parsers
|
||||
|
||||
import android.content.pm.PackageManager
|
||||
|
||||
enum class PermissionStatus(override val unionValue: String): JSUnionValue {
|
||||
enum class PermissionStatus(override val unionValue: String) : JSUnionValue {
|
||||
DENIED("denied"),
|
||||
NOT_DETERMINED("not-determined"),
|
||||
GRANTED("granted");
|
||||
|
||||
companion object {
|
||||
fun fromPermissionStatus(status: Int): PermissionStatus {
|
||||
return when (status) {
|
||||
fun fromPermissionStatus(status: Int): PermissionStatus =
|
||||
when (status) {
|
||||
PackageManager.PERMISSION_DENIED -> DENIED
|
||||
PackageManager.PERMISSION_GRANTED -> GRANTED
|
||||
else -> NOT_DETERMINED
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4,7 +4,7 @@ import android.graphics.ImageFormat
|
||||
import com.mrousavy.camera.PixelFormatNotSupportedError
|
||||
|
||||
@Suppress("FoldInitializerAndIfToElvis")
|
||||
enum class PixelFormat(override val unionValue: String): JSUnionValue {
|
||||
enum class PixelFormat(override val unionValue: String) : JSUnionValue {
|
||||
YUV("yuv"),
|
||||
RGB("rgb"),
|
||||
DNG("dng"),
|
||||
@ -25,19 +25,18 @@ enum class PixelFormat(override val unionValue: String): JSUnionValue {
|
||||
return result
|
||||
}
|
||||
|
||||
companion object: JSUnionValue.Companion<PixelFormat> {
|
||||
fun fromImageFormat(imageFormat: Int): PixelFormat {
|
||||
return when (imageFormat) {
|
||||
companion object : JSUnionValue.Companion<PixelFormat> {
|
||||
fun fromImageFormat(imageFormat: Int): PixelFormat =
|
||||
when (imageFormat) {
|
||||
ImageFormat.YUV_420_888 -> YUV
|
||||
ImageFormat.JPEG, ImageFormat.DEPTH_JPEG -> RGB
|
||||
ImageFormat.RAW_SENSOR -> DNG
|
||||
ImageFormat.PRIVATE -> NATIVE
|
||||
else -> UNKNOWN
|
||||
}
|
||||
}
|
||||
|
||||
override fun fromUnionValue(unionValue: String?): PixelFormat? {
|
||||
return when (unionValue) {
|
||||
override fun fromUnionValue(unionValue: String?): PixelFormat? =
|
||||
when (unionValue) {
|
||||
"yuv" -> YUV
|
||||
"rgb" -> RGB
|
||||
"dng" -> DNG
|
||||
@ -45,6 +44,5 @@ enum class PixelFormat(override val unionValue: String): JSUnionValue {
|
||||
"unknown" -> UNKNOWN
|
||||
else -> null
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,18 +1,17 @@
|
||||
package com.mrousavy.camera.parsers
|
||||
|
||||
enum class QualityPrioritization(override val unionValue: String): JSUnionValue {
|
||||
enum class QualityPrioritization(override val unionValue: String) : JSUnionValue {
|
||||
SPEED("speed"),
|
||||
BALANCED("balanced"),
|
||||
QUALITY("quality");
|
||||
|
||||
companion object: JSUnionValue.Companion<QualityPrioritization> {
|
||||
override fun fromUnionValue(unionValue: String?): QualityPrioritization {
|
||||
return when (unionValue) {
|
||||
companion object : JSUnionValue.Companion<QualityPrioritization> {
|
||||
override fun fromUnionValue(unionValue: String?): QualityPrioritization =
|
||||
when (unionValue) {
|
||||
"speed" -> SPEED
|
||||
"balanced" -> BALANCED
|
||||
"quality" -> QUALITY
|
||||
else -> BALANCED
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,16 +1,15 @@
|
||||
package com.mrousavy.camera.parsers
|
||||
|
||||
enum class Torch(override val unionValue: String): JSUnionValue {
|
||||
enum class Torch(override val unionValue: String) : JSUnionValue {
|
||||
OFF("off"),
|
||||
ON("on");
|
||||
|
||||
companion object: JSUnionValue.Companion<Torch> {
|
||||
override fun fromUnionValue(unionValue: String?): Torch {
|
||||
return when (unionValue) {
|
||||
companion object : JSUnionValue.Companion<Torch> {
|
||||
override fun fromUnionValue(unionValue: String?): Torch =
|
||||
when (unionValue) {
|
||||
"off" -> OFF
|
||||
"on" -> ON
|
||||
else -> OFF
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2,24 +2,22 @@ package com.mrousavy.camera.parsers
|
||||
|
||||
import android.media.MediaRecorder
|
||||
|
||||
enum class VideoCodec(override val unionValue: String): JSUnionValue {
|
||||
enum class VideoCodec(override val unionValue: String) : JSUnionValue {
|
||||
H264("h264"),
|
||||
H265("h265");
|
||||
|
||||
fun toVideoCodec(): Int {
|
||||
return when (this) {
|
||||
fun toVideoCodec(): Int =
|
||||
when (this) {
|
||||
H264 -> MediaRecorder.VideoEncoder.H264
|
||||
H265 -> MediaRecorder.VideoEncoder.HEVC
|
||||
}
|
||||
}
|
||||
|
||||
companion object: JSUnionValue.Companion<VideoCodec> {
|
||||
override fun fromUnionValue(unionValue: String?): VideoCodec {
|
||||
return when (unionValue) {
|
||||
companion object : JSUnionValue.Companion<VideoCodec> {
|
||||
override fun fromUnionValue(unionValue: String?): VideoCodec =
|
||||
when (unionValue) {
|
||||
"h264" -> H264
|
||||
"h265" -> H265
|
||||
else -> H264
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2,24 +2,22 @@ package com.mrousavy.camera.parsers
|
||||
|
||||
import com.mrousavy.camera.InvalidTypeScriptUnionError
|
||||
|
||||
enum class VideoFileType(override val unionValue: String): JSUnionValue {
|
||||
enum class VideoFileType(override val unionValue: String) : JSUnionValue {
|
||||
MOV("mov"),
|
||||
MP4("mp4");
|
||||
|
||||
fun toExtension(): String {
|
||||
return when (this) {
|
||||
fun toExtension(): String =
|
||||
when (this) {
|
||||
MOV -> ".mov"
|
||||
MP4 -> ".mp4"
|
||||
}
|
||||
}
|
||||
|
||||
companion object: JSUnionValue.Companion<VideoFileType> {
|
||||
override fun fromUnionValue(unionValue: String?): VideoFileType {
|
||||
return when (unionValue) {
|
||||
companion object : JSUnionValue.Companion<VideoFileType> {
|
||||
override fun fromUnionValue(unionValue: String?): VideoFileType =
|
||||
when (unionValue) {
|
||||
"mov" -> MOV
|
||||
"mp4" -> MP4
|
||||
else -> throw InvalidTypeScriptUnionError("fileType", unionValue ?: "(null)")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -6,54 +6,49 @@ import android.hardware.camera2.CameraMetadata.CONTROL_VIDEO_STABILIZATION_MODE_
|
||||
import android.hardware.camera2.CameraMetadata.LENS_OPTICAL_STABILIZATION_MODE_OFF
|
||||
import android.hardware.camera2.CameraMetadata.LENS_OPTICAL_STABILIZATION_MODE_ON
|
||||
|
||||
enum class VideoStabilizationMode(override val unionValue: String): JSUnionValue {
|
||||
enum class VideoStabilizationMode(override val unionValue: String) : JSUnionValue {
|
||||
OFF("off"),
|
||||
STANDARD("standard"),
|
||||
CINEMATIC("cinematic"),
|
||||
CINEMATIC_EXTENDED("cinematic-extended");
|
||||
|
||||
fun toDigitalStabilizationMode(): Int {
|
||||
return when (this) {
|
||||
fun toDigitalStabilizationMode(): Int =
|
||||
when (this) {
|
||||
OFF -> CONTROL_VIDEO_STABILIZATION_MODE_OFF
|
||||
STANDARD -> CONTROL_VIDEO_STABILIZATION_MODE_ON
|
||||
CINEMATIC -> 2 /* CONTROL_VIDEO_STABILIZATION_MODE_PREVIEW_STABILIZATION */
|
||||
CINEMATIC -> 2 // TODO: CONTROL_VIDEO_STABILIZATION_MODE_PREVIEW_STABILIZATION
|
||||
else -> CONTROL_VIDEO_STABILIZATION_MODE_OFF
|
||||
}
|
||||
}
|
||||
|
||||
fun toOpticalStabilizationMode(): Int {
|
||||
return when (this) {
|
||||
fun toOpticalStabilizationMode(): Int =
|
||||
when (this) {
|
||||
OFF -> LENS_OPTICAL_STABILIZATION_MODE_OFF
|
||||
CINEMATIC_EXTENDED -> LENS_OPTICAL_STABILIZATION_MODE_ON
|
||||
else -> LENS_OPTICAL_STABILIZATION_MODE_OFF
|
||||
}
|
||||
}
|
||||
|
||||
companion object: JSUnionValue.Companion<VideoStabilizationMode> {
|
||||
override fun fromUnionValue(unionValue: String?): VideoStabilizationMode? {
|
||||
return when (unionValue) {
|
||||
companion object : JSUnionValue.Companion<VideoStabilizationMode> {
|
||||
override fun fromUnionValue(unionValue: String?): VideoStabilizationMode? =
|
||||
when (unionValue) {
|
||||
"off" -> OFF
|
||||
"standard" -> STANDARD
|
||||
"cinematic" -> CINEMATIC
|
||||
"cinematic-extended" -> CINEMATIC_EXTENDED
|
||||
else -> null
|
||||
}
|
||||
}
|
||||
|
||||
fun fromDigitalVideoStabilizationMode(stabiliazionMode: Int): VideoStabilizationMode {
|
||||
return when (stabiliazionMode) {
|
||||
fun fromDigitalVideoStabilizationMode(stabiliazionMode: Int): VideoStabilizationMode =
|
||||
when (stabiliazionMode) {
|
||||
CONTROL_VIDEO_STABILIZATION_MODE_OFF -> OFF
|
||||
CONTROL_VIDEO_STABILIZATION_MODE_ON -> STANDARD
|
||||
CONTROL_VIDEO_STABILIZATION_MODE_PREVIEW_STABILIZATION -> CINEMATIC
|
||||
else -> OFF
|
||||
}
|
||||
}
|
||||
fun fromOpticalVideoStabilizationMode(stabiliazionMode: Int): VideoStabilizationMode {
|
||||
return when (stabiliazionMode) {
|
||||
fun fromOpticalVideoStabilizationMode(stabiliazionMode: Int): VideoStabilizationMode =
|
||||
when (stabiliazionMode) {
|
||||
LENS_OPTICAL_STABILIZATION_MODE_OFF -> OFF
|
||||
LENS_OPTICAL_STABILIZATION_MODE_ON -> CINEMATIC_EXTENDED
|
||||
else -> OFF
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,16 +1,8 @@
|
||||
import * as React from 'react';
|
||||
import { useRef, useState, useMemo, useCallback } from 'react';
|
||||
import { useRef, useState, useCallback } from 'react';
|
||||
import { StyleSheet, Text, View } from 'react-native';
|
||||
import { PinchGestureHandler, PinchGestureHandlerGestureEvent, TapGestureHandler } from 'react-native-gesture-handler';
|
||||
import {
|
||||
CameraDeviceFormat,
|
||||
CameraRuntimeError,
|
||||
PhotoFile,
|
||||
sortFormats,
|
||||
useCameraDevices,
|
||||
useFrameProcessor,
|
||||
VideoFile,
|
||||
} from 'react-native-vision-camera';
|
||||
import { CameraRuntimeError, PhotoFile, useCameraDevice, useCameraFormat, useFrameProcessor, VideoFile } from 'react-native-vision-camera';
|
||||
import { Camera } from 'react-native-vision-camera';
|
||||
import { CONTENT_SPACING, MAX_ZOOM_FACTOR, SAFE_AREA_PADDING } from './Constants';
|
||||
import Reanimated, { Extrapolate, interpolate, useAnimatedGestureHandler, useAnimatedProps, useSharedValue } from 'react-native-reanimated';
|
||||
@ -53,59 +45,24 @@ export function CameraPage({ navigation }: Props): React.ReactElement {
|
||||
const [enableNightMode, setEnableNightMode] = useState(false);
|
||||
|
||||
// camera format settings
|
||||
const devices = useCameraDevices();
|
||||
const device = devices[cameraPosition];
|
||||
const formats = useMemo<CameraDeviceFormat[]>(() => {
|
||||
if (device?.formats == null) return [];
|
||||
return device.formats.sort(sortFormats);
|
||||
}, [device?.formats]);
|
||||
const device = useCameraDevice(cameraPosition);
|
||||
const format = useCameraFormat(device, {
|
||||
fps: {
|
||||
target: 60,
|
||||
priority: 1,
|
||||
},
|
||||
});
|
||||
|
||||
//#region Memos
|
||||
const [is60Fps, setIs60Fps] = useState(true);
|
||||
const fps = useMemo(() => {
|
||||
if (!is60Fps) return 30;
|
||||
const [targetFps, setTargetFps] = useState(30);
|
||||
const fps = Math.min(format?.maxFps ?? 1, targetFps);
|
||||
|
||||
if (enableNightMode && !device?.supportsLowLightBoost) {
|
||||
// User has enabled Night Mode, but Night Mode is not natively supported, so we simulate it by lowering the frame rate.
|
||||
return 30;
|
||||
}
|
||||
|
||||
const supportsHdrAt60Fps = formats.some((f) => f.supportsVideoHDR && f.maxFps >= 60);
|
||||
if (enableHdr && !supportsHdrAt60Fps) {
|
||||
// User has enabled HDR, but HDR is not supported at 60 FPS.
|
||||
return 30;
|
||||
}
|
||||
|
||||
const supports60Fps = formats.some((f) => f.maxFps >= 60);
|
||||
if (!supports60Fps) {
|
||||
// 60 FPS is not supported by any format.
|
||||
return 30;
|
||||
}
|
||||
// If nothing blocks us from using it, we default to 60 FPS.
|
||||
return 60;
|
||||
}, [device?.supportsLowLightBoost, enableHdr, enableNightMode, formats, is60Fps]);
|
||||
|
||||
const supportsCameraFlipping = useMemo(() => devices.back != null && devices.front != null, [devices.back, devices.front]);
|
||||
const supportsFlash = device?.hasFlash ?? false;
|
||||
const supportsHdr = useMemo(() => formats.some((f) => f.supportsVideoHDR || f.supportsPhotoHDR), [formats]);
|
||||
const supports60Fps = useMemo(() => formats.some((f) => f.maxFps >= 60), [formats]);
|
||||
const canToggleNightMode = enableNightMode
|
||||
? true // it's enabled so you have to be able to turn it off again
|
||||
: (device?.supportsLowLightBoost ?? false) || fps > 30; // either we have native support, or we can lower the FPS
|
||||
const supportsHdr = format?.supportsPhotoHDR;
|
||||
const supports60Fps = (format?.maxFps ?? 0) >= 60;
|
||||
const canToggleNightMode = device?.supportsLowLightBoost ?? false;
|
||||
//#endregion
|
||||
|
||||
const format = useMemo(() => {
|
||||
let result = formats;
|
||||
if (enableHdr) {
|
||||
// We only filter by HDR capable formats if HDR is set to true.
|
||||
// Otherwise we ignore the `supportsVideoHDR` property and accept formats which support HDR `true` or `false`
|
||||
result = result.filter((f) => f.supportsVideoHDR || f.supportsPhotoHDR);
|
||||
}
|
||||
|
||||
// find the first format that includes the given FPS
|
||||
return result.find((f) => f.maxFps >= fps);
|
||||
}, [formats, fps, enableHdr]);
|
||||
|
||||
//#region Animated Zoom
|
||||
// This just maps the zoom factor to a percentage value.
|
||||
// so e.g. for [min, neutr., max] values [1, 2, 128] this would result in [0, 0.0081, 1]
|
||||
@ -249,22 +206,17 @@ export function CameraPage({ navigation }: Props): React.ReactElement {
|
||||
<StatusBarBlurBackground />
|
||||
|
||||
<View style={styles.rightButtonRow}>
|
||||
{supportsCameraFlipping && (
|
||||
<PressableOpacity style={styles.button} onPress={onFlipCameraPressed} disabledOpacity={0.4}>
|
||||
<IonIcon name="camera-reverse" color="white" size={24} />
|
||||
</PressableOpacity>
|
||||
)}
|
||||
<PressableOpacity style={styles.button} onPress={onFlipCameraPressed} disabledOpacity={0.4}>
|
||||
<IonIcon name="camera-reverse" color="white" size={24} />
|
||||
</PressableOpacity>
|
||||
{supportsFlash && (
|
||||
<PressableOpacity style={styles.button} onPress={onFlashPressed} disabledOpacity={0.4}>
|
||||
<IonIcon name={flash === 'on' ? 'flash' : 'flash-off'} color="white" size={24} />
|
||||
</PressableOpacity>
|
||||
)}
|
||||
{supports60Fps && (
|
||||
<PressableOpacity style={styles.button} onPress={() => setIs60Fps(!is60Fps)}>
|
||||
<Text style={styles.text}>
|
||||
{is60Fps ? '60' : '30'}
|
||||
{'\n'}FPS
|
||||
</Text>
|
||||
<PressableOpacity style={styles.button} onPress={() => setTargetFps((t) => (t === 30 ? 60 : 30))}>
|
||||
<Text style={styles.text}>{`${targetFps} FPS`}</Text>
|
||||
</PressableOpacity>
|
||||
)}
|
||||
{supportsHdr && (
|
||||
|
@ -10,6 +10,7 @@
|
||||
|
||||
#import <Foundation/Foundation.h>
|
||||
|
||||
#import <React/RCTEventEmitter.h>
|
||||
#import <React/RCTFPSGraph.h>
|
||||
#import <React/RCTLog.h>
|
||||
#import <React/RCTUIManager.h>
|
||||
|
15
package/ios/CameraDevicesManager.m
Normal file
15
package/ios/CameraDevicesManager.m
Normal file
@ -0,0 +1,15 @@
|
||||
//
|
||||
// CameraDevicesManager.m
|
||||
// VisionCamera
|
||||
//
|
||||
// Created by Marc Rousavy on 19.09.23.
|
||||
// Copyright © 2023 mrousavy. All rights reserved.
|
||||
//
|
||||
|
||||
#import <Foundation/Foundation.h>
|
||||
#import <React/RCTEventEmitter.h>
|
||||
#import <React/RCTUtils.h>
|
||||
|
||||
@interface RCT_EXTERN_REMAP_MODULE (CameraDevices, CameraDevicesManager, RCTEventEmitter)
|
||||
|
||||
@end
|
83
package/ios/CameraDevicesManager.swift
Normal file
83
package/ios/CameraDevicesManager.swift
Normal file
@ -0,0 +1,83 @@
|
||||
//
|
||||
// CameraDevicesManager.swift
|
||||
// VisionCamera
|
||||
//
|
||||
// Created by Marc Rousavy on 19.09.23.
|
||||
// Copyright © 2023 mrousavy. All rights reserved.
|
||||
//
|
||||
|
||||
import AVFoundation
|
||||
import Foundation
|
||||
|
||||
@objc(CameraDevicesManager)
|
||||
class CameraDevicesManager: RCTEventEmitter {
|
||||
private let discoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: getAllDeviceTypes(),
|
||||
mediaType: .video,
|
||||
position: .unspecified)
|
||||
private var observer: NSKeyValueObservation?
|
||||
private let devicesChangedEventName = "CameraDevicesChanged"
|
||||
|
||||
override init() {
|
||||
super.init()
|
||||
observer = discoverySession.observe(\.devices) { _, _ in
|
||||
self.sendEvent(withName: self.devicesChangedEventName, body: self.getDevicesJson())
|
||||
}
|
||||
}
|
||||
|
||||
override func invalidate() {
|
||||
observer?.invalidate()
|
||||
}
|
||||
|
||||
override func supportedEvents() -> [String]! {
|
||||
return [devicesChangedEventName]
|
||||
}
|
||||
|
||||
override class func requiresMainQueueSetup() -> Bool {
|
||||
return false
|
||||
}
|
||||
|
||||
override func constantsToExport() -> [AnyHashable: Any]! {
|
||||
return [
|
||||
"availableCameraDevices": getDevicesJson(),
|
||||
]
|
||||
}
|
||||
|
||||
private func getDevicesJson() -> [[String: Any]] {
|
||||
return discoverySession.devices.map {
|
||||
return [
|
||||
"id": $0.uniqueID,
|
||||
"devices": $0.physicalDevices.map(\.deviceType.descriptor),
|
||||
"position": $0.position.descriptor,
|
||||
"name": $0.localizedName,
|
||||
"hasFlash": $0.hasFlash,
|
||||
"hasTorch": $0.hasTorch,
|
||||
"minZoom": $0.minAvailableVideoZoomFactor,
|
||||
"neutralZoom": $0.neutralZoomFactor,
|
||||
"maxZoom": $0.maxAvailableVideoZoomFactor,
|
||||
"isMultiCam": $0.isMultiCam,
|
||||
"supportsDepthCapture": false, // TODO: supportsDepthCapture
|
||||
"supportsRawCapture": false, // TODO: supportsRawCapture
|
||||
"supportsLowLightBoost": $0.isLowLightBoostSupported,
|
||||
"supportsFocus": $0.isFocusPointOfInterestSupported,
|
||||
"hardwareLevel": "full",
|
||||
"sensorOrientation": "portrait", // TODO: Sensor Orientation?
|
||||
"formats": $0.formats.map { format -> [String: Any] in
|
||||
format.toDictionary()
|
||||
},
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
private static func getAllDeviceTypes() -> [AVCaptureDevice.DeviceType] {
|
||||
var deviceTypes: [AVCaptureDevice.DeviceType] = []
|
||||
if #available(iOS 13.0, *) {
|
||||
deviceTypes.append(.builtInTripleCamera)
|
||||
deviceTypes.append(.builtInDualWideCamera)
|
||||
deviceTypes.append(.builtInUltraWideCamera)
|
||||
}
|
||||
deviceTypes.append(.builtInDualCamera)
|
||||
deviceTypes.append(.builtInWideAngleCamera)
|
||||
deviceTypes.append(.builtInTelephotoCamera)
|
||||
return deviceTypes
|
||||
}
|
||||
}
|
@ -19,7 +19,8 @@ RCT_EXTERN_METHOD(getMicrophonePermissionStatus : (RCTPromiseResolveBlock)resolv
|
||||
RCT_EXTERN_METHOD(requestCameraPermission : (RCTPromiseResolveBlock)resolve reject : (RCTPromiseRejectBlock)reject);
|
||||
RCT_EXTERN_METHOD(requestMicrophonePermission : (RCTPromiseResolveBlock)resolve reject : (RCTPromiseRejectBlock)reject);
|
||||
|
||||
RCT_EXTERN_METHOD(getAvailableCameraDevices : (RCTPromiseResolveBlock)resolve reject : (RCTPromiseRejectBlock)reject);
|
||||
RCT_EXTERN__BLOCKING_SYNCHRONOUS_METHOD(getAvailableCameraDevices);
|
||||
RCT_EXTERN__BLOCKING_SYNCHRONOUS_METHOD(installFrameProcessorBindings);
|
||||
|
||||
// Camera View Properties
|
||||
RCT_EXPORT_VIEW_PROPERTY(isActive, BOOL);
|
||||
@ -75,7 +76,4 @@ RCT_EXTERN_METHOD(focus
|
||||
: (RCTPromiseResolveBlock)resolve reject
|
||||
: (RCTPromiseRejectBlock)reject);
|
||||
|
||||
// Static Methods
|
||||
RCT_EXTERN__BLOCKING_SYNCHRONOUS_METHOD(installFrameProcessorBindings);
|
||||
|
||||
@end
|
||||
|
@ -79,38 +79,6 @@ final class CameraViewManager: RCTViewManager {
|
||||
component.focus(point: CGPoint(x: x.doubleValue, y: y.doubleValue), promise: promise)
|
||||
}
|
||||
|
||||
@objc
|
||||
final func getAvailableCameraDevices(_ resolve: @escaping RCTPromiseResolveBlock, reject: @escaping RCTPromiseRejectBlock) {
|
||||
withPromise(resolve: resolve, reject: reject) {
|
||||
let discoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: getAllDeviceTypes(),
|
||||
mediaType: .video,
|
||||
position: .unspecified)
|
||||
return discoverySession.devices.map {
|
||||
return [
|
||||
"id": $0.uniqueID,
|
||||
"devices": $0.physicalDevices.map(\.deviceType.descriptor),
|
||||
"position": $0.position.descriptor,
|
||||
"name": $0.localizedName,
|
||||
"hasFlash": $0.hasFlash,
|
||||
"hasTorch": $0.hasTorch,
|
||||
"minZoom": $0.minAvailableVideoZoomFactor,
|
||||
"neutralZoom": $0.neutralZoomFactor,
|
||||
"maxZoom": $0.maxAvailableVideoZoomFactor,
|
||||
"isMultiCam": $0.isMultiCam,
|
||||
"supportsDepthCapture": false, // TODO: supportsDepthCapture
|
||||
"supportsRawCapture": false, // TODO: supportsRawCapture
|
||||
"supportsLowLightBoost": $0.isLowLightBoostSupported,
|
||||
"supportsFocus": $0.isFocusPointOfInterestSupported,
|
||||
"hardwareLevel": "full",
|
||||
"sensorOrientation": "portrait", // TODO: Sensor Orientation?
|
||||
"formats": $0.formats.map { format -> [String: Any] in
|
||||
format.toDictionary()
|
||||
},
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@objc
|
||||
final func getCameraPermissionStatus(_ resolve: @escaping RCTPromiseResolveBlock, reject: @escaping RCTPromiseRejectBlock) {
|
||||
withPromise(resolve: resolve, reject: reject) {
|
||||
@ -150,17 +118,4 @@ final class CameraViewManager: RCTViewManager {
|
||||
return bridge.uiManager.view(forReactTag: tag) as! CameraView
|
||||
// swiftlint:enable force_cast
|
||||
}
|
||||
|
||||
private final func getAllDeviceTypes() -> [AVCaptureDevice.DeviceType] {
|
||||
var deviceTypes: [AVCaptureDevice.DeviceType] = []
|
||||
if #available(iOS 13.0, *) {
|
||||
deviceTypes.append(.builtInTripleCamera)
|
||||
deviceTypes.append(.builtInDualWideCamera)
|
||||
deviceTypes.append(.builtInUltraWideCamera)
|
||||
}
|
||||
deviceTypes.append(.builtInDualCamera)
|
||||
deviceTypes.append(.builtInWideAngleCamera)
|
||||
deviceTypes.append(.builtInTelephotoCamera)
|
||||
return deviceTypes
|
||||
}
|
||||
}
|
||||
|
@ -17,7 +17,7 @@ extension AVCaptureDevice.Position {
|
||||
case .front:
|
||||
return "front"
|
||||
case .unspecified:
|
||||
return "unspecified"
|
||||
return "external"
|
||||
@unknown default:
|
||||
fatalError("AVCaptureDevice.Position has unknown state.")
|
||||
}
|
||||
|
@ -11,6 +11,8 @@
|
||||
B80E06A0266632F000728644 /* AVAudioSession+updateCategory.swift in Sources */ = {isa = PBXBuildFile; fileRef = B80E069F266632F000728644 /* AVAudioSession+updateCategory.swift */; };
|
||||
B81BE1BF26B936FF002696CC /* AVCaptureDevice.Format+videoDimensions.swift in Sources */ = {isa = PBXBuildFile; fileRef = B81BE1BE26B936FF002696CC /* AVCaptureDevice.Format+videoDimensions.swift */; };
|
||||
B83D5EE729377117000AFD2F /* PreviewView.swift in Sources */ = {isa = PBXBuildFile; fileRef = B83D5EE629377117000AFD2F /* PreviewView.swift */; };
|
||||
B8446E4D2ABA147C00E56077 /* CameraDevicesManager.swift in Sources */ = {isa = PBXBuildFile; fileRef = B8446E4C2ABA147C00E56077 /* CameraDevicesManager.swift */; };
|
||||
B8446E502ABA14C900E56077 /* CameraDevicesManager.m in Sources */ = {isa = PBXBuildFile; fileRef = B8446E4F2ABA14C900E56077 /* CameraDevicesManager.m */; };
|
||||
B84760A62608EE7C004C3180 /* FrameHostObject.mm in Sources */ = {isa = PBXBuildFile; fileRef = B84760A52608EE7C004C3180 /* FrameHostObject.mm */; };
|
||||
B84760DF2608F57D004C3180 /* CameraQueues.swift in Sources */ = {isa = PBXBuildFile; fileRef = B84760DE2608F57D004C3180 /* CameraQueues.swift */; };
|
||||
B85F7AE92A77BB680089C539 /* FrameProcessorPlugin.m in Sources */ = {isa = PBXBuildFile; fileRef = B85F7AE82A77BB680089C539 /* FrameProcessorPlugin.m */; };
|
||||
@ -85,6 +87,8 @@
|
||||
B81BE1BE26B936FF002696CC /* AVCaptureDevice.Format+videoDimensions.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "AVCaptureDevice.Format+videoDimensions.swift"; sourceTree = "<group>"; };
|
||||
B81D41EF263C86F900B041FD /* JSINSObjectConversion.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = JSINSObjectConversion.h; sourceTree = "<group>"; };
|
||||
B83D5EE629377117000AFD2F /* PreviewView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = PreviewView.swift; sourceTree = "<group>"; };
|
||||
B8446E4C2ABA147C00E56077 /* CameraDevicesManager.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = CameraDevicesManager.swift; sourceTree = "<group>"; };
|
||||
B8446E4F2ABA14C900E56077 /* CameraDevicesManager.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = CameraDevicesManager.m; sourceTree = "<group>"; };
|
||||
B84760A22608EE38004C3180 /* FrameHostObject.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = FrameHostObject.h; sourceTree = "<group>"; };
|
||||
B84760A52608EE7C004C3180 /* FrameHostObject.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = FrameHostObject.mm; sourceTree = "<group>"; };
|
||||
B84760DE2608F57D004C3180 /* CameraQueues.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = CameraQueues.swift; sourceTree = "<group>"; };
|
||||
@ -181,6 +185,8 @@
|
||||
B86400512784A23400E9D2CA /* CameraView+Orientation.swift */,
|
||||
B887515F25E0102000DB86D6 /* CameraViewManager.m */,
|
||||
B887518125E0102000DB86D6 /* CameraViewManager.swift */,
|
||||
B8446E4F2ABA14C900E56077 /* CameraDevicesManager.m */,
|
||||
B8446E4C2ABA147C00E56077 /* CameraDevicesManager.swift */,
|
||||
B8DB3BC9263DC4D8004C18D7 /* RecordingSession.swift */,
|
||||
B83D5EE629377117000AFD2F /* PreviewView.swift */,
|
||||
B887515C25E0102000DB86D6 /* PhotoCaptureDelegate.swift */,
|
||||
@ -407,11 +413,13 @@
|
||||
B887519F25E0102000DB86D6 /* AVCaptureDevice.DeviceType+descriptor.swift in Sources */,
|
||||
B8D22CDC2642DB4D00234472 /* AVAssetWriterInputPixelBufferAdaptor+initWithVideoSettings.swift in Sources */,
|
||||
B84760DF2608F57D004C3180 /* CameraQueues.swift in Sources */,
|
||||
B8446E502ABA14C900E56077 /* CameraDevicesManager.m in Sources */,
|
||||
B887519025E0102000DB86D6 /* AVCaptureDevice.Format+matchesFilter.swift in Sources */,
|
||||
B887518F25E0102000DB86D6 /* AVCapturePhotoOutput+mirror.swift in Sources */,
|
||||
B88751A425E0102000DB86D6 /* AVCaptureDevice.Format.AutoFocusSystem+descriptor.swift in Sources */,
|
||||
B8DB3BCC263DC97E004C18D7 /* AVFileType+descriptor.swift in Sources */,
|
||||
B88751A025E0102000DB86D6 /* AVAuthorizationStatus+descriptor.swift in Sources */,
|
||||
B8446E4D2ABA147C00E56077 /* CameraDevicesManager.swift in Sources */,
|
||||
B80C0E00260BDDF7001699AB /* FrameProcessorPluginRegistry.m in Sources */,
|
||||
B887519C25E0102000DB86D6 /* AVCaptureDevice.TorchMode+descriptor.swift in Sources */,
|
||||
B8994E6C263F03E100069589 /* JSINSObjectConversion.mm in Sources */,
|
||||
|
@ -1,7 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
if which ktlint >/dev/null; then
|
||||
cd android && ktlint -F ./**/*.kt*
|
||||
cd android && ktlint --color --relative --editorconfig=./.editorconfig -F ./**/*.kt*
|
||||
else
|
||||
echo "warning: KTLint not installed, download from https://github.com/pinterest/ktlint"
|
||||
fi
|
||||
|
@ -9,6 +9,8 @@ import type { PhotoFile, TakePhotoOptions } from './PhotoFile';
|
||||
import type { Point } from './Point';
|
||||
import type { RecordVideoOptions, VideoFile } from './VideoFile';
|
||||
import { VisionCameraProxy } from './FrameProcessorPlugins';
|
||||
import { CameraDevices } from './CameraDevices';
|
||||
import type { EmitterSubscription } from 'react-native';
|
||||
|
||||
//#region Types
|
||||
export type CameraPermissionStatus = 'granted' | 'not-determined' | 'denied' | 'restricted';
|
||||
@ -37,7 +39,7 @@ type RefType = React.Component<NativeCameraViewProps> & Readonly<NativeMethods>;
|
||||
*
|
||||
* The `<Camera>` component's most important (and therefore _required_) properties are:
|
||||
*
|
||||
* * {@linkcode CameraProps.device | device}: Specifies the {@linkcode CameraDevice} to use. Get a {@linkcode CameraDevice} by using the {@linkcode useCameraDevices | useCameraDevices()} hook, or manually by using the {@linkcode Camera.getAvailableCameraDevices Camera.getAvailableCameraDevices()} function.
|
||||
* * {@linkcode CameraProps.device | device}: Specifies the {@linkcode CameraDevice} to use. Get a {@linkcode CameraDevice} by using the {@linkcode useCameraDevice | useCameraDevice()} hook, or manually by using the {@linkcode CameraDevices.getAvailableCameraDevices CameraDevices.getAvailableCameraDevices()} function.
|
||||
* * {@linkcode CameraProps.isActive | isActive}: A boolean value that specifies whether the Camera should actively stream video frames or not. This can be compared to a Video component, where `isActive` specifies whether the video is paused or not. If you fully unmount the `<Camera>` component instead of using `isActive={false}`, the Camera will take a bit longer to start again.
|
||||
*
|
||||
* @example
|
||||
@ -116,12 +118,6 @@ export class Camera extends React.PureComponent<CameraProps> {
|
||||
/**
|
||||
* Start a new video recording.
|
||||
*
|
||||
* Records in the following formats:
|
||||
* * **iOS**: QuickTime (`.mov`)
|
||||
* * **Android**: MPEG4 (`.mp4`)
|
||||
*
|
||||
* @blocking This function is synchronized/blocking.
|
||||
*
|
||||
* @throws {@linkcode CameraCaptureError} When any kind of error occured while starting the video recording. Use the {@linkcode CameraCaptureError.code | code} property to get the actual error
|
||||
*
|
||||
* @example
|
||||
@ -144,8 +140,8 @@ export class Camera extends React.PureComponent<CameraProps> {
|
||||
if (error != null) return onRecordingError(error);
|
||||
if (video != null) return onRecordingFinished(video);
|
||||
};
|
||||
// TODO: Use TurboModules to either make this a sync invokation, or make it async.
|
||||
try {
|
||||
// TODO: Use TurboModules to make this awaitable.
|
||||
CameraModule.startRecording(this.handle, passThroughOptions, onRecordCallback);
|
||||
} catch (e) {
|
||||
throw tryParseNativeCameraError(e);
|
||||
@ -231,8 +227,8 @@ export class Camera extends React.PureComponent<CameraProps> {
|
||||
|
||||
/**
|
||||
* Focus the camera to a specific point in the coordinate system.
|
||||
* @param {Point} point The point to focus to. This should be relative to the Camera view's coordinate system,
|
||||
* and expressed in Pixel on iOS and Points on Android.
|
||||
* @param {Point} point The point to focus to. This should be relative
|
||||
* to the Camera view's coordinate system and is expressed in points.
|
||||
* * `(0, 0)` means **top left**.
|
||||
* * `(CameraView.width, CameraView.height)` means **bottom right**.
|
||||
*
|
||||
@ -257,28 +253,32 @@ export class Camera extends React.PureComponent<CameraProps> {
|
||||
//#endregion
|
||||
|
||||
//#region Static Functions (NativeModule)
|
||||
|
||||
/**
|
||||
* Get a list of all available camera devices on the current phone.
|
||||
*
|
||||
* @throws {@linkcode CameraRuntimeError} When any kind of error occured while getting all available camera devices. Use the {@linkcode CameraRuntimeError.code | code} property to get the actual error
|
||||
* If you use Hooks, use the `useCameraDevices()` hook instead.
|
||||
*
|
||||
* * For Camera Devices attached to the phone, it is safe to assume that this will never change.
|
||||
* * For external Camera Devices (USB cameras, Mac continuity cameras, etc.) the available Camera Devices could change over time when the external Camera device gets plugged in or plugged out, so use {@link addCameraDevicesChangedListener | addCameraDevicesChangedListener(...)} to listen for such changes.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const devices = await Camera.getAvailableCameraDevices()
|
||||
* const filtered = devices.filter((d) => matchesMyExpectations(d))
|
||||
* const sorted = devices.sort(sortDevicesByAmountOfCameras)
|
||||
* return {
|
||||
* back: sorted.find((d) => d.position === "back"),
|
||||
* front: sorted.find((d) => d.position === "front")
|
||||
* }
|
||||
* const devices = Camera.getAvailableCameraDevices()
|
||||
* const backCameras = devices.filter((d) => d.position === "back")
|
||||
* const frontCameras = devices.filter((d) => d.position === "front")
|
||||
* ```
|
||||
*/
|
||||
public static async getAvailableCameraDevices(): Promise<CameraDevice[]> {
|
||||
try {
|
||||
return await CameraModule.getAvailableCameraDevices();
|
||||
} catch (e) {
|
||||
throw tryParseNativeCameraError(e);
|
||||
}
|
||||
public static getAvailableCameraDevices(): CameraDevice[] {
|
||||
return CameraDevices.getAvailableCameraDevices();
|
||||
}
|
||||
/**
|
||||
* Adds a listener that gets called everytime the Camera Devices change, for example
|
||||
* when an external Camera Device (USB or continuity Camera) gets plugged in or plugged out.
|
||||
*
|
||||
* If you use Hooks, use the `useCameraDevices()` hook instead.
|
||||
*/
|
||||
public static addCameraDevicesChangedListener(listener: (newDevices: CameraDevice[]) => void): EmitterSubscription {
|
||||
return CameraDevices.addCameraDevicesChangedListener(listener);
|
||||
}
|
||||
/**
|
||||
* Gets the current Camera Permission Status. Check this before mounting the Camera to ensure
|
||||
|
@ -1,48 +1,30 @@
|
||||
import type { CameraPosition } from './CameraPosition';
|
||||
import { Orientation } from './Orientation';
|
||||
import type { PixelFormat } from './PixelFormat';
|
||||
|
||||
/**
|
||||
* Represents the camera device position.
|
||||
*
|
||||
* * `"back"`: Indicates that the device is physically located on the back of the system hardware
|
||||
* * `"front"`: Indicates that the device is physically located on the front of the system hardware
|
||||
* * `"external"`: The camera device is an external camera, and has no fixed facing relative to the device's screen.
|
||||
*/
|
||||
export type CameraPosition = 'front' | 'back' | 'external';
|
||||
|
||||
/**
|
||||
* Indentifiers for a physical camera (one that actually exists on the back/front of the device)
|
||||
*
|
||||
* * `"ultra-wide-angle-camera"`: A built-in camera with a shorter focal length than that of a wide-angle camera. (focal length between below 24mm)
|
||||
* * `"wide-angle-camera"`: A built-in wide-angle camera. (focal length between 24mm and 35mm)
|
||||
* * `"telephoto-camera"`: A built-in camera device with a longer focal length than a wide-angle camera. (focal length between above 85mm)
|
||||
*
|
||||
* Some Camera devices consist of multiple physical devices. They can be interpreted as _logical devices_, for example:
|
||||
*
|
||||
* * `"ultra-wide-angle-camera"` + `"wide-angle-camera"` = **dual wide-angle camera**.
|
||||
* * `"wide-angle-camera"` + `"telephoto-camera"` = **dual camera**.
|
||||
* * `"ultra-wide-angle-camera"` + `"wide-angle-camera"` + `"telephoto-camera"` = **triple camera**.
|
||||
*/
|
||||
export type PhysicalCameraDeviceType = 'ultra-wide-angle-camera' | 'wide-angle-camera' | 'telephoto-camera';
|
||||
|
||||
/**
|
||||
* Indentifiers for a logical camera (Combinations of multiple physical cameras to create a single logical camera).
|
||||
*
|
||||
* * `"dual-camera"`: A combination of wide-angle and telephoto cameras that creates a capture device.
|
||||
* * `"dual-wide-camera"`: A device that consists of two cameras of fixed focal length, one ultrawide angle and one wide angle.
|
||||
* * `"triple-camera"`: A device that consists of three cameras of fixed focal length, one ultrawide angle, one wide angle, and one telephoto.
|
||||
*/
|
||||
export type LogicalCameraDeviceType = 'dual-camera' | 'dual-wide-camera' | 'triple-camera';
|
||||
|
||||
/**
|
||||
* Parses an array of physical device types into a single {@linkcode PhysicalCameraDeviceType} or {@linkcode LogicalCameraDeviceType}, depending what matches.
|
||||
* @method
|
||||
*/
|
||||
export const parsePhysicalDeviceTypes = (
|
||||
physicalDeviceTypes: PhysicalCameraDeviceType[],
|
||||
): PhysicalCameraDeviceType | LogicalCameraDeviceType => {
|
||||
if (physicalDeviceTypes.length === 1) {
|
||||
// @ts-expect-error for very obvious reasons
|
||||
return physicalDeviceTypes[0];
|
||||
}
|
||||
|
||||
const hasWide = physicalDeviceTypes.includes('wide-angle-camera');
|
||||
const hasUltra = physicalDeviceTypes.includes('ultra-wide-angle-camera');
|
||||
const hasTele = physicalDeviceTypes.includes('telephoto-camera');
|
||||
|
||||
if (hasTele && hasWide && hasUltra) return 'triple-camera';
|
||||
if (hasWide && hasUltra) return 'dual-wide-camera';
|
||||
if (hasWide && hasTele) return 'dual-camera';
|
||||
|
||||
throw new Error(`Invalid physical device type combination! ${physicalDeviceTypes.join(' + ')}`);
|
||||
};
|
||||
|
||||
/**
|
||||
* Indicates a format's autofocus system.
|
||||
*
|
||||
|
25
package/src/CameraDevices.ts
Normal file
25
package/src/CameraDevices.ts
Normal file
@ -0,0 +1,25 @@
|
||||
import { NativeModules, NativeEventEmitter } from 'react-native';
|
||||
import { CameraDevice } from './CameraDevice';
|
||||
|
||||
const CameraDevicesManager = NativeModules.CameraDevices as {
|
||||
getConstants: () => {
|
||||
availableCameraDevices: CameraDevice[];
|
||||
};
|
||||
};
|
||||
|
||||
const constants = CameraDevicesManager.getConstants();
|
||||
let devices = constants.availableCameraDevices;
|
||||
|
||||
const DEVICES_CHANGED_NAME = 'CameraDevicesChanged';
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
const eventEmitter = new NativeEventEmitter(CameraDevicesManager as any);
|
||||
eventEmitter.addListener(DEVICES_CHANGED_NAME, (newDevices: CameraDevice[]) => {
|
||||
devices = newDevices;
|
||||
});
|
||||
|
||||
export const CameraDevices = {
|
||||
getAvailableCameraDevices: () => devices,
|
||||
addCameraDevicesChangedListener: (callback: (newDevices: CameraDevice[]) => void) => {
|
||||
return eventEmitter.addListener(DEVICES_CHANGED_NAME, callback);
|
||||
},
|
||||
};
|
@ -1,13 +0,0 @@
|
||||
/**
|
||||
* Represents the camera device position.
|
||||
*
|
||||
* * `"back"`: Indicates that the device is physically located on the back of the system hardware
|
||||
* * `"front"`: Indicates that the device is physically located on the front of the system hardware
|
||||
*
|
||||
* #### iOS only
|
||||
* * `"unspecified"`: Indicates that the device's position relative to the system hardware is unspecified
|
||||
*
|
||||
* #### Android only
|
||||
* * `"external"`: The camera device is an external camera, and has no fixed facing relative to the device's screen. (Android only)
|
||||
*/
|
||||
export type CameraPosition = 'front' | 'back' | 'unspecified' | 'external';
|
43
package/src/devices/Filter.ts
Normal file
43
package/src/devices/Filter.ts
Normal file
@ -0,0 +1,43 @@
|
||||
export interface Filter<T> {
|
||||
/**
|
||||
* The target value for this specific requirement
|
||||
*/
|
||||
target: T;
|
||||
/**
|
||||
* The priority of this requirement.
|
||||
* Filters with higher priority can take precedence over filters with lower priority.
|
||||
*
|
||||
* For example, if we have two formats:
|
||||
* ```json
|
||||
* [
|
||||
* videoWidth: 3840,
|
||||
* videoHeight: 2160,
|
||||
* maxFps: 30,
|
||||
* ...
|
||||
* ],
|
||||
* [
|
||||
* videoWidth: 1920,
|
||||
* videoHeight: 1080,
|
||||
* maxFps: 60,
|
||||
* ...
|
||||
* ]
|
||||
* ```
|
||||
* And your filter looks like this:
|
||||
* ```json
|
||||
* {
|
||||
* fps: { target: 60, priority: 1 }
|
||||
* videoSize: { target: { width: 4000, height: 2000 }, priority: 3 }
|
||||
* }
|
||||
* ```
|
||||
* The 4k format will be chosen since the `videoSize` filter has a higher priority (2) than the `fps` filter (1).
|
||||
*
|
||||
* To choose the 60 FPS format instead, use a higher priority for the `fps` filter:
|
||||
* ```json
|
||||
* {
|
||||
* fps: { target: 60, priority: 2 }
|
||||
* videoSize: { target: { width: 4000, height: 2000 }, priority: 1 }
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
priority: number;
|
||||
}
|
62
package/src/devices/getCameraDevice.ts
Normal file
62
package/src/devices/getCameraDevice.ts
Normal file
@ -0,0 +1,62 @@
|
||||
import { CameraDevice, CameraPosition, PhysicalCameraDeviceType } from '../CameraDevice';
|
||||
import { CameraRuntimeError } from '../CameraError';
|
||||
|
||||
export interface DeviceFilter {
|
||||
/**
|
||||
* The desired physical devices your camera device should have.
|
||||
*
|
||||
* Many modern phones have multiple Camera devices on one side and can combine those physical camera devices to one logical camera device.
|
||||
* For example, the iPhone 11 has two physical camera devices, the `ultra-wide-angle-camera` ("fish-eye") and the normal `wide-angle-camera`. You can either use one of those devices individually, or use a combined logical camera device which can smoothly switch over between the two physical cameras depending on the current `zoom` level.
|
||||
* When the user is at 0.5x-1x zoom, the `ultra-wide-angle-camera` can be used to offer a fish-eye zoom-out effect, and anything above 1x will smoothly switch over to the `wide-angle-camera`.
|
||||
*
|
||||
* **Note:** Devices with less phyiscal devices (`['wide-angle-camera']`) are usually faster to start-up than more complex
|
||||
* devices (`['ultra-wide-angle-camera', 'wide-angle-camera', 'telephoto-camera']`), but don't offer zoom switch-over capabilities.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* // This device is simpler, so it starts up faster.
|
||||
* getCameraDevice({ physicalDevices: ['wide-angle-camera'] })
|
||||
* // This device is more complex, so it starts up slower, but you can switch between devices on 0.5x, 1x and 2x zoom.
|
||||
* getCameraDevice({ physicalDevices: ['ultra-wide-angle-camera', 'wide-angle-camera', 'telephoto-camera'] })
|
||||
* ```
|
||||
*/
|
||||
physicalDevices?: PhysicalCameraDeviceType[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the best matching Camera device that satisfies your requirements using a sorting filter.
|
||||
* @param devices All available Camera Devices this function will use for filtering. To get devices, use `Camera.getAvailableCameraDevices()`.
|
||||
* @param filter The filter you want to use. The device that matches your filter the closest will be returned.
|
||||
* @returns The device that matches your filter the closest.
|
||||
*/
|
||||
export function getCameraDevice(devices: CameraDevice[], position: CameraPosition, filter: DeviceFilter = {}): CameraDevice {
|
||||
const filtered = devices.filter((d) => d.position === position);
|
||||
const sortedDevices = filtered.sort((left, right) => {
|
||||
let leftPoints = 0;
|
||||
let rightPoints = 0;
|
||||
|
||||
// prefer higher hardware-level
|
||||
if (left.hardwareLevel === 'full') leftPoints += 4;
|
||||
if (right.hardwareLevel === 'full') rightPoints += 4;
|
||||
|
||||
// compare devices. two possible scenarios:
|
||||
// 1. user wants all cameras ([ultra-wide, wide, tele]) to zoom. prefer those devices that have all 3 cameras.
|
||||
// 2. user wants only one ([wide]) for faster performance. prefer those devices that only have one camera, if they have more, we rank them lower.
|
||||
if (filter.physicalDevices != null) {
|
||||
for (const device of left.devices) {
|
||||
if (filter.physicalDevices.includes(device)) leftPoints += 1;
|
||||
else leftPoints -= 1;
|
||||
}
|
||||
for (const device of right.devices) {
|
||||
if (filter.physicalDevices.includes(device)) rightPoints += 1;
|
||||
else rightPoints -= 1;
|
||||
}
|
||||
}
|
||||
|
||||
return leftPoints - rightPoints;
|
||||
});
|
||||
|
||||
const device = sortedDevices[0];
|
||||
if (device == null) throw new CameraRuntimeError('device/invalid-device', 'No Camera Device could be found!');
|
||||
return device;
|
||||
}
|
153
package/src/devices/getCameraFormat.ts
Normal file
153
package/src/devices/getCameraFormat.ts
Normal file
@ -0,0 +1,153 @@
|
||||
import type { CameraDevice, CameraDeviceFormat, VideoStabilizationMode } from '../CameraDevice';
|
||||
import { CameraRuntimeError } from '../CameraError';
|
||||
import { PixelFormat } from '../PixelFormat';
|
||||
import { Filter } from './Filter';
|
||||
|
||||
interface Size {
|
||||
width: number;
|
||||
height: number;
|
||||
}
|
||||
|
||||
export interface FormatFilter {
|
||||
/**
|
||||
* The target resolution of the video (and frame processor) output pipeline.
|
||||
* If no format supports the given resolution, the format closest to this value will be used.
|
||||
*/
|
||||
videoResolution?: Filter<Size>;
|
||||
/**
|
||||
* The target resolution of the photo output pipeline.
|
||||
* If no format supports the given resolution, the format closest to this value will be used.
|
||||
*/
|
||||
photoResolution?: Filter<Size>;
|
||||
/**
|
||||
* The target aspect ratio of the video (and preview) output, expressed as a factor: `width / height`.
|
||||
*
|
||||
* In most cases, you want this to be as close to the screen's aspect ratio as possible (usually ~9:16).
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const screen = Dimensions.get('screen')
|
||||
* targetVideoAspectRatio: screen.width / screen.height
|
||||
* ```
|
||||
*/
|
||||
videoAspectRatio?: Filter<number>;
|
||||
/**
|
||||
* The target aspect ratio of the photo output, expressed as a factor: `width / height`.
|
||||
*
|
||||
* In most cases, you want this to be the same as `targetVideoAspectRatio`, which you often want
|
||||
* to be as close to the screen's aspect ratio as possible (usually ~9:16)
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const screen = Dimensions.get('screen')
|
||||
* targetPhotoAspectRatio: screen.width / screen.height
|
||||
* ```
|
||||
*/
|
||||
photoAspectRatio?: Filter<number>;
|
||||
/**
|
||||
* The target FPS you want to record video at.
|
||||
* If the FPS requirements can not be met, the format closest to this value will be used.
|
||||
*/
|
||||
fps?: Filter<number>;
|
||||
/**
|
||||
* The target video stabilization mode you want to use.
|
||||
* If no format supports the target video stabilization mode, the best other matching format will be used.
|
||||
*/
|
||||
videoStabilizationMode?: Filter<VideoStabilizationMode>;
|
||||
/**
|
||||
* The target pixel format you want to use.
|
||||
* If no format supports the target pixel format, the best other matching format will be used.
|
||||
*/
|
||||
pixelFormat?: Filter<PixelFormat>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the best matching Camera format for the given device that satisfies your requirements using a sorting filter. By default, formats are sorted by highest to lowest resolution.
|
||||
* @param device The Camera Device you're currently using
|
||||
* @param filter The filter you want to use. The format that matches your filter the closest will be returned
|
||||
* @returns The format that matches your filter the closest.
|
||||
*/
|
||||
export function getCameraFormat(device: CameraDevice, filter: FormatFilter): CameraDeviceFormat {
|
||||
const copy = [...device.formats];
|
||||
const sortedFormats = copy.sort((left, right) => {
|
||||
let leftPoints = 0;
|
||||
let rightPoints = 0;
|
||||
|
||||
const leftVideoResolution = left.videoWidth * left.videoHeight;
|
||||
const rightVideoResolution = right.videoWidth * right.videoHeight;
|
||||
if (filter.videoResolution != null) {
|
||||
// Find video resolution closest to the filter (ignoring orientation)
|
||||
const targetResolution = filter.videoResolution.target.width * filter.videoResolution.target.height;
|
||||
const leftDiff = Math.abs(leftVideoResolution - targetResolution);
|
||||
const rightDiff = Math.abs(rightVideoResolution - targetResolution);
|
||||
if (leftDiff < rightDiff) leftPoints += filter.videoResolution.priority;
|
||||
else if (rightDiff < leftDiff) rightPoints += filter.videoResolution.priority;
|
||||
} else {
|
||||
// No filter is set, so just prefer higher resolutions
|
||||
if (leftVideoResolution > rightVideoResolution) leftPoints++;
|
||||
else if (rightVideoResolution > leftVideoResolution) rightPoints++;
|
||||
}
|
||||
|
||||
const leftPhotoResolution = left.photoWidth * left.photoHeight;
|
||||
const rightPhotoResolution = right.photoWidth * right.photoHeight;
|
||||
if (filter.photoResolution != null) {
|
||||
// Find closest photo resolution to the filter (ignoring orientation)
|
||||
const targetResolution = filter.photoResolution.target.width * filter.photoResolution.target.height;
|
||||
const leftDiff = Math.abs(leftPhotoResolution - targetResolution);
|
||||
const rightDiff = Math.abs(rightPhotoResolution - targetResolution);
|
||||
if (leftDiff < rightDiff) leftPoints += filter.photoResolution.priority;
|
||||
else if (rightDiff < leftDiff) rightPoints += filter.photoResolution.priority;
|
||||
} else {
|
||||
// No filter is set, so just prefer higher resolutions
|
||||
if (leftPhotoResolution > rightPhotoResolution) leftPoints++;
|
||||
else if (rightPhotoResolution > leftPhotoResolution) rightPoints++;
|
||||
}
|
||||
|
||||
// Find closest aspect ratio (video)
|
||||
if (filter.videoAspectRatio != null) {
|
||||
const leftAspect = left.videoWidth / right.videoHeight;
|
||||
const rightAspect = right.videoWidth / right.videoHeight;
|
||||
const leftDiff = Math.abs(leftAspect - filter.videoAspectRatio.target);
|
||||
const rightDiff = Math.abs(rightAspect - filter.videoAspectRatio.target);
|
||||
if (leftDiff < rightDiff) leftPoints += filter.videoAspectRatio.priority;
|
||||
else if (rightDiff < leftDiff) rightPoints += filter.videoAspectRatio.priority;
|
||||
}
|
||||
|
||||
// Find closest aspect ratio (photo)
|
||||
if (filter.photoAspectRatio != null) {
|
||||
const leftAspect = left.photoWidth / right.photoHeight;
|
||||
const rightAspect = right.photoWidth / right.photoHeight;
|
||||
const leftDiff = Math.abs(leftAspect - filter.photoAspectRatio.target);
|
||||
const rightDiff = Math.abs(rightAspect - filter.photoAspectRatio.target);
|
||||
if (leftDiff < rightDiff) leftPoints += filter.photoAspectRatio.priority;
|
||||
else if (rightDiff < leftDiff) rightPoints += filter.photoAspectRatio.priority;
|
||||
}
|
||||
|
||||
// Find closest max FPS
|
||||
if (filter.fps != null) {
|
||||
const leftDiff = Math.abs(left.maxFps - filter.fps.target);
|
||||
const rightDiff = Math.abs(right.maxFps - filter.fps.target);
|
||||
if (leftDiff < rightDiff) leftPoints += filter.fps.priority;
|
||||
else if (rightDiff < leftDiff) rightPoints += filter.fps.priority;
|
||||
}
|
||||
|
||||
// Find video stabilization mode
|
||||
if (filter.videoStabilizationMode != null) {
|
||||
if (left.videoStabilizationModes.includes(filter.videoStabilizationMode.target)) leftPoints++;
|
||||
if (right.videoStabilizationModes.includes(filter.videoStabilizationMode.target)) rightPoints++;
|
||||
}
|
||||
|
||||
// Find pixel format
|
||||
if (filter.pixelFormat != null) {
|
||||
if (left.pixelFormats.includes(filter.pixelFormat.target)) leftPoints++;
|
||||
if (right.pixelFormats.includes(filter.pixelFormat.target)) rightPoints++;
|
||||
}
|
||||
|
||||
return rightPoints - leftPoints;
|
||||
});
|
||||
|
||||
const format = sortedFormats[0];
|
||||
if (format == null)
|
||||
throw new CameraRuntimeError('device/invalid-device', `The given Camera Device (${device.id}) does not have any formats!`);
|
||||
return format;
|
||||
}
|
29
package/src/hooks/useCameraDevice.ts
Normal file
29
package/src/hooks/useCameraDevice.ts
Normal file
@ -0,0 +1,29 @@
|
||||
import { useMemo } from 'react';
|
||||
import { CameraDevice, CameraPosition } from '../CameraDevice';
|
||||
import { getCameraDevice, DeviceFilter } from '../devices/getCameraDevice';
|
||||
import { useCameraDevices } from './useCameraDevices';
|
||||
|
||||
/**
|
||||
* Get the best matching Camera device that best satisfies your requirements using a sorting filter.
|
||||
* @param position The position of the Camera device relative to the phone.
|
||||
* @param filter The filter you want to use. The Camera device that matches your filter the closest will be returned
|
||||
* @returns The Camera device that matches your filter the closest.
|
||||
* @example
|
||||
* ```ts
|
||||
* const [position, setPosition] = useState<CameraPosition>('back')
|
||||
* const device = useCameraDevice(position, {
|
||||
* physicalDevices: ['wide-angle-camera']
|
||||
* })
|
||||
* ```
|
||||
*/
|
||||
export function useCameraDevice(position: CameraPosition, filter?: DeviceFilter): CameraDevice | undefined {
|
||||
const devices = useCameraDevices();
|
||||
|
||||
const device = useMemo(
|
||||
() => getCameraDevice(devices, position, filter),
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
[devices, position, JSON.stringify(filter)],
|
||||
);
|
||||
|
||||
return device;
|
||||
}
|
@ -1,78 +1,23 @@
|
||||
import { useEffect, useState } from 'react';
|
||||
import type { CameraPosition } from '../CameraPosition';
|
||||
import { sortDevices } from '../utils/FormatFilter';
|
||||
import { Camera } from '../Camera';
|
||||
import { CameraDevice, LogicalCameraDeviceType, parsePhysicalDeviceTypes, PhysicalCameraDeviceType } from '../CameraDevice';
|
||||
|
||||
export type CameraDevices = {
|
||||
[key in CameraPosition]: CameraDevice | undefined;
|
||||
};
|
||||
const DefaultCameraDevices: CameraDevices = {
|
||||
back: undefined,
|
||||
external: undefined,
|
||||
front: undefined,
|
||||
unspecified: undefined,
|
||||
};
|
||||
import type { CameraDevice } from '../CameraDevice';
|
||||
import { CameraDevices } from '../CameraDevices';
|
||||
|
||||
/**
|
||||
* Gets the best available {@linkcode CameraDevice}. Devices with more cameras are preferred.
|
||||
* Get all available Camera Devices this phone has.
|
||||
*
|
||||
* @returns The best matching {@linkcode CameraDevice}.
|
||||
* @throws {@linkcode CameraRuntimeError} if no device was found.
|
||||
* @example
|
||||
* ```tsx
|
||||
* const device = useCameraDevice()
|
||||
* // ...
|
||||
* return <Camera device={device} />
|
||||
* ```
|
||||
* Camera Devices attached to this phone (`back` or `front`) are always available,
|
||||
* while `external` devices might be plugged in or out at any point,
|
||||
* so the result of this function might update over time.
|
||||
*/
|
||||
export function useCameraDevices(): CameraDevices;
|
||||
|
||||
/**
|
||||
* Gets a {@linkcode CameraDevice} for the requested device type.
|
||||
*
|
||||
* @param {PhysicalCameraDeviceType | LogicalCameraDeviceType} deviceType Specifies a device type which will be used as a device filter.
|
||||
* @returns A {@linkcode CameraDevice} for the requested device type.
|
||||
* @throws {@linkcode CameraRuntimeError} if no device was found.
|
||||
* @example
|
||||
* ```tsx
|
||||
* const device = useCameraDevice('wide-angle-camera')
|
||||
* // ...
|
||||
* return <Camera device={device} />
|
||||
* ```
|
||||
*/
|
||||
export function useCameraDevices(deviceType: PhysicalCameraDeviceType | LogicalCameraDeviceType): CameraDevices;
|
||||
|
||||
export function useCameraDevices(deviceType?: PhysicalCameraDeviceType | LogicalCameraDeviceType): CameraDevices {
|
||||
const [cameraDevices, setCameraDevices] = useState<CameraDevices>(DefaultCameraDevices);
|
||||
export function useCameraDevices(): CameraDevice[] {
|
||||
const [devices, setDevices] = useState(() => CameraDevices.getAvailableCameraDevices());
|
||||
|
||||
useEffect(() => {
|
||||
let isMounted = true;
|
||||
const listener = CameraDevices.addCameraDevicesChangedListener((newDevices) => {
|
||||
setDevices(newDevices);
|
||||
});
|
||||
return () => listener.remove();
|
||||
}, []);
|
||||
|
||||
const loadDevice = async (): Promise<void> => {
|
||||
let devices = await Camera.getAvailableCameraDevices();
|
||||
if (!isMounted) return;
|
||||
|
||||
devices = devices.sort(sortDevices);
|
||||
if (deviceType != null) {
|
||||
devices = devices.filter((d) => {
|
||||
const parsedType = parsePhysicalDeviceTypes(d.devices);
|
||||
return parsedType === deviceType;
|
||||
});
|
||||
}
|
||||
setCameraDevices({
|
||||
back: devices.find((d) => d.position === 'back'),
|
||||
external: devices.find((d) => d.position === 'external'),
|
||||
front: devices.find((d) => d.position === 'front'),
|
||||
unspecified: devices.find((d) => d.position === 'unspecified'),
|
||||
});
|
||||
};
|
||||
loadDevice();
|
||||
|
||||
return () => {
|
||||
isMounted = false;
|
||||
};
|
||||
}, [deviceType]);
|
||||
|
||||
return cameraDevices;
|
||||
return devices;
|
||||
}
|
||||
|
@ -1,16 +1,27 @@
|
||||
import { useMemo } from 'react';
|
||||
import type { CameraDevice, CameraDeviceFormat } from '../CameraDevice';
|
||||
import { sortFormats } from '../utils/FormatFilter';
|
||||
import { CameraDevice, CameraDeviceFormat } from '../CameraDevice';
|
||||
import { FormatFilter, getCameraFormat } from '../devices/getCameraFormat';
|
||||
|
||||
/**
|
||||
* Returns the best format for the given camera device.
|
||||
*
|
||||
* This function tries to choose a format with the highest possible photo-capture resolution and best matching aspect ratio.
|
||||
*
|
||||
* @param {CameraDevice} device The Camera Device
|
||||
*
|
||||
* @returns The best matching format for the given camera device, or `undefined` if the camera device is `undefined`.
|
||||
* Get the best matching Camera format for the given device that satisfies your requirements using a sorting filter. By default, formats are sorted by highest to lowest resolution.
|
||||
* @param device The Camera Device you're currently using
|
||||
* @param filter The filter you want to use. The format that matches your filter the closest will be returned
|
||||
* @returns The format that matches your filter the closest.
|
||||
* @example
|
||||
* ```ts
|
||||
* const device = useCameraDevice(...)
|
||||
* const format = useCameraFormat(device, {
|
||||
* videoResolution: { target: { width: 3048, height: 2160 }, priority: 2 },
|
||||
* fps: { target: 60, priority: 1 }
|
||||
* })
|
||||
* ```
|
||||
*/
|
||||
export function useCameraFormat(device?: CameraDevice): CameraDeviceFormat | undefined {
|
||||
return useMemo(() => device?.formats.sort(sortFormats)[0], [device?.formats]);
|
||||
export function useCameraFormat(device: CameraDevice | undefined, filter: FormatFilter): CameraDeviceFormat | undefined {
|
||||
const format = useMemo(() => {
|
||||
if (device == null) return undefined;
|
||||
return getCameraFormat(device, filter);
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [device, JSON.stringify(filter)]);
|
||||
|
||||
return format;
|
||||
}
|
||||
|
@ -1,19 +1,16 @@
|
||||
export * from './Camera';
|
||||
export * from './CameraDevice';
|
||||
export * from './CameraError';
|
||||
export * from './CameraPosition';
|
||||
export * from './CameraProps';
|
||||
export { Frame } from './Frame';
|
||||
export * from './FrameProcessorPlugins';
|
||||
export * from './CameraProps';
|
||||
export * from './PhotoFile';
|
||||
export * from './PixelFormat';
|
||||
export * from './Point';
|
||||
export * from './TemporaryFile';
|
||||
export * from './VideoFile';
|
||||
|
||||
export * from './hooks/useCameraDevices';
|
||||
export * from './hooks/useCameraDevice';
|
||||
export * from './hooks/useCameraFormat';
|
||||
export * from './devices/getCameraFormat';
|
||||
export * from './hooks/useFrameProcessor';
|
||||
|
||||
export * from './utils/FormatFilter';
|
||||
|
@ -1,93 +0,0 @@
|
||||
import { Dimensions } from 'react-native';
|
||||
import type { CameraDevice, CameraDeviceFormat } from '../CameraDevice';
|
||||
|
||||
/**
|
||||
* Compares two devices by the following criteria:
|
||||
* * `wide-angle-camera`s are ranked higher than others
|
||||
* * Devices with more physical cameras are ranked higher than ones with less. (e.g. "Triple Camera" > "Wide-Angle Camera")
|
||||
*
|
||||
* > Note that this makes the `sort()` function descending, so the first element (`[0]`) is the "best" device.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const devices = camera.devices.sort(sortDevices)
|
||||
* const bestDevice = devices[0]
|
||||
* ```
|
||||
* @method
|
||||
*/
|
||||
export const sortDevices = (left: CameraDevice, right: CameraDevice): number => {
|
||||
let leftPoints = 0;
|
||||
let rightPoints = 0;
|
||||
|
||||
const leftHasWideAngle = left.devices.includes('wide-angle-camera');
|
||||
const rightHasWideAngle = right.devices.includes('wide-angle-camera');
|
||||
if (leftHasWideAngle) leftPoints += 2;
|
||||
if (rightHasWideAngle) rightPoints += 2;
|
||||
|
||||
if (left.isMultiCam) leftPoints += 2;
|
||||
if (right.isMultiCam) rightPoints += 2;
|
||||
|
||||
if (left.hardwareLevel === 'full') leftPoints += 3;
|
||||
if (right.hardwareLevel === 'full') rightPoints += 3;
|
||||
if (left.hardwareLevel === 'limited') leftPoints += 1;
|
||||
if (right.hardwareLevel === 'limited') rightPoints += 1;
|
||||
|
||||
if (left.hasFlash) leftPoints += 1;
|
||||
if (right.hasFlash) rightPoints += 1;
|
||||
|
||||
const leftMaxResolution = left.formats.reduce(
|
||||
(prev, curr) => Math.max(prev, curr.videoHeight * curr.videoWidth + curr.photoHeight * curr.photoWidth),
|
||||
0,
|
||||
);
|
||||
const rightMaxResolution = right.formats.reduce(
|
||||
(prev, curr) => Math.max(prev, curr.videoHeight * curr.videoWidth + curr.photoHeight * curr.photoWidth),
|
||||
0,
|
||||
);
|
||||
if (leftMaxResolution > rightMaxResolution) leftPoints += 3;
|
||||
if (rightMaxResolution > leftMaxResolution) rightPoints += 3;
|
||||
|
||||
// telephoto cameras often have very poor quality.
|
||||
const leftHasTelephoto = left.devices.includes('telephoto-camera');
|
||||
const rightHasTelephoto = right.devices.includes('telephoto-camera');
|
||||
if (leftHasTelephoto) leftPoints -= 2;
|
||||
if (rightHasTelephoto) rightPoints -= 2;
|
||||
|
||||
if (left.devices.length > right.devices.length) leftPoints += 1;
|
||||
if (right.devices.length > left.devices.length) rightPoints += 1;
|
||||
|
||||
return rightPoints - leftPoints;
|
||||
};
|
||||
|
||||
const SCREEN_SIZE = {
|
||||
width: Dimensions.get('window').width,
|
||||
height: Dimensions.get('window').height,
|
||||
};
|
||||
const SCREEN_ASPECT_RATIO = SCREEN_SIZE.width / SCREEN_SIZE.height;
|
||||
|
||||
/**
|
||||
* Sort formats by resolution and aspect ratio difference (to the Screen size).
|
||||
*
|
||||
* > Note that this makes the `sort()` function descending, so the first element (`[0]`) is the "best" device.
|
||||
*/
|
||||
export const sortFormats = (left: CameraDeviceFormat, right: CameraDeviceFormat): number => {
|
||||
let leftPoints = 0,
|
||||
rightPoints = 0;
|
||||
|
||||
// we downscale the points so much that we are in smaller number ranges for future calculations
|
||||
// e.g. for 4k (4096), this adds 8 points.
|
||||
leftPoints += Math.round(left.photoWidth / 500);
|
||||
rightPoints += Math.round(right.photoWidth / 500);
|
||||
// e.g. for 4k (4096), this adds 8 points.
|
||||
leftPoints += Math.round(left.videoWidth / 500);
|
||||
rightPoints += Math.round(right.videoWidth / 500);
|
||||
|
||||
// we downscale the points here as well, so if left has 16:9 and right has 21:9, this roughly
|
||||
// adds 5 points. If the difference is smaller, e.g. 16:9 vs 17:9, this roughly adds a little
|
||||
// bit over 1 point, just enough to overrule the FPS below.
|
||||
const leftAspectRatioDiff = left.photoHeight / left.photoWidth - SCREEN_ASPECT_RATIO;
|
||||
const rightAspectRatioDiff = right.photoHeight / right.photoWidth - SCREEN_ASPECT_RATIO;
|
||||
leftPoints -= Math.abs(leftAspectRatioDiff) * 10;
|
||||
rightPoints -= Math.abs(rightAspectRatioDiff) * 10;
|
||||
|
||||
return rightPoints - leftPoints;
|
||||
};
|
Loading…
Reference in New Issue
Block a user