From 72a1fad78e3a5ecdb120d8c9c9b549ad4423c5a8 Mon Sep 17 00:00:00 2001 From: Marc Rousavy Date: Mon, 7 Jun 2021 13:08:40 +0200 Subject: [PATCH] feat: Separate usecases (decouple microphone, video, photo) (#168) * Add props * add props (iOS) * Add use-cases conditionally * Update CameraView+RecordVideo.swift * Update RecordingSession.swift * reconfigure on change * Throw correct errors * Check for audio permission * Move `#if` outward * Throw appropriate errors * Update CameraView+RecordVideo.swift * fix Splashscreen * Dynamic filePath * Fix video extension * add `avci` and `m4v` file types * Fix RecordVideo errors * Fix audio setup * Enable `photo`, `video` and `audio` * Check for `video={true}` in frameProcessor * format * Remove unused DispatchQueue * Update docs * Add `supportsPhotoAndVideoCapture` * Fix view manager * Fix error not being propagated * Catch normal errors too * Update DEVICES.mdx * Update CAPTURING.mdx * Update classdocs --- .../mrousavy/camera/CameraView+RecordVideo.kt | 14 +- .../mrousavy/camera/CameraView+TakePhoto.kt | 12 +- .../java/com/mrousavy/camera/CameraView.kt | 29 ++-- .../com/mrousavy/camera/CameraViewManager.kt | 21 +++ .../com/mrousavy/camera/CameraViewModule.kt | 23 ++- .../main/java/com/mrousavy/camera/Errors.kt | 5 +- docs/docs/guides/CAPTURING.mdx | 28 +++- docs/docs/guides/DEVICES.mdx | 10 +- example/src/CameraPage.tsx | 3 + example/src/Splash.tsx | 12 +- ios/CameraError.swift | 12 +- ios/CameraQueues.swift | 1 - ios/CameraView+AVAudioSession.swift | 41 ++++-- ios/CameraView+AVCaptureSession.swift | 56 +++---- ios/CameraView+RecordVideo.swift | 139 ++++++++++-------- ios/CameraView+TakePhoto.swift | 9 +- ios/CameraView.swift | 24 ++- ios/CameraViewManager.m | 4 + ios/CameraViewManager.swift | 1 + ios/Frame Processor/FrameHostObject.mm | 4 - ios/Parsers/AVFileType+descriptor.swift | 30 +++- ios/Parsers/EnumParserError.swift | 4 - ios/React Utils/Callback.swift | 38 +++++ ios/RecordingSession.swift | 4 +- ios/VisionCamera.xcodeproj/project.pbxproj | 4 + src/Camera.tsx | 7 + src/CameraDevice.ts | 22 +++ src/CameraError.ts | 2 + src/CameraProps.ts | 16 ++ src/Frame.ts | 4 - 30 files changed, 412 insertions(+), 167 deletions(-) create mode 100644 ios/React Utils/Callback.swift diff --git a/android/src/main/java/com/mrousavy/camera/CameraView+RecordVideo.kt b/android/src/main/java/com/mrousavy/camera/CameraView+RecordVideo.kt index 8296103..bca7a36 100644 --- a/android/src/main/java/com/mrousavy/camera/CameraView+RecordVideo.kt +++ b/android/src/main/java/com/mrousavy/camera/CameraView+RecordVideo.kt @@ -15,10 +15,18 @@ data class TemporaryFile(val path: String) @SuppressLint("RestrictedApi", "MissingPermission") suspend fun CameraView.startRecording(options: ReadableMap, onRecordCallback: Callback): TemporaryFile { if (videoCapture == null) { - throw CameraNotReadyError() + if (video == true) { + throw CameraNotReadyError() + } else { + throw VideoNotEnabledError() + } } - if (ContextCompat.checkSelfPermission(context, Manifest.permission.CAMERA) != PackageManager.PERMISSION_GRANTED) { - throw MicrophonePermissionError() + + // check audio permission + if (audio == true) { + if (ContextCompat.checkSelfPermission(context, Manifest.permission.RECORD_AUDIO) != PackageManager.PERMISSION_GRANTED) { + throw MicrophonePermissionError() + } } if (options.hasKey("flash")) { diff --git a/android/src/main/java/com/mrousavy/camera/CameraView+TakePhoto.kt b/android/src/main/java/com/mrousavy/camera/CameraView+TakePhoto.kt index 9a3fbd5..8f5820d 100644 --- a/android/src/main/java/com/mrousavy/camera/CameraView+TakePhoto.kt +++ b/android/src/main/java/com/mrousavy/camera/CameraView+TakePhoto.kt @@ -19,11 +19,17 @@ import kotlin.system.measureTimeMillis suspend fun CameraView.takePhoto(options: ReadableMap): WritableMap = coroutineScope { val startFunc = System.nanoTime() Log.d(CameraView.TAG, "takePhoto() called") - val imageCapture = imageCapture ?: throw CameraNotReadyError() + if (imageCapture == null) { + if (photo == true) { + throw CameraNotReadyError() + } else { + throw PhotoNotEnabledError() + } + } if (options.hasKey("flash")) { val flashMode = options.getString("flash") - imageCapture.flashMode = when (flashMode) { + imageCapture!!.flashMode = when (flashMode) { "on" -> ImageCapture.FLASH_MODE_ON "off" -> ImageCapture.FLASH_MODE_OFF "auto" -> ImageCapture.FLASH_MODE_AUTO @@ -61,7 +67,7 @@ suspend fun CameraView.takePhoto(options: ReadableMap): WritableMap = coroutineS async(coroutineContext) { Log.d(CameraView.TAG, "Taking picture...") val startCapture = System.nanoTime() - val pic = imageCapture.takePicture(takePhotoExecutor) + val pic = imageCapture!!.takePicture(takePhotoExecutor) val endCapture = System.nanoTime() Log.i(CameraView.TAG_PERF, "Finished image capture in ${(endCapture - startCapture) / 1_000_000}ms") pic diff --git a/android/src/main/java/com/mrousavy/camera/CameraView.kt b/android/src/main/java/com/mrousavy/camera/CameraView.kt index ea56f24..98d19ab 100644 --- a/android/src/main/java/com/mrousavy/camera/CameraView.kt +++ b/android/src/main/java/com/mrousavy/camera/CameraView.kt @@ -68,6 +68,10 @@ class CameraView(context: Context) : FrameLayout(context), LifecycleOwner { var enableDepthData = false var enableHighResolutionCapture: Boolean? = null var enablePortraitEffectsMatteDelivery = false + // use-cases + var photo: Boolean? = null + var video: Boolean? = null + var audio: Boolean? = null // props that require format reconfiguring var format: ReadableMap? = null var fps: Int? = null @@ -220,9 +224,6 @@ class CameraView(context: Context) : FrameLayout(context), LifecycleOwner { if (ContextCompat.checkSelfPermission(context, Manifest.permission.CAMERA) != PackageManager.PERMISSION_GRANTED) { throw CameraPermissionError() } - if (ContextCompat.checkSelfPermission(context, Manifest.permission.RECORD_AUDIO) != PackageManager.PERMISSION_GRANTED) { - throw MicrophonePermissionError() - } if (cameraId == null) { throw NoCameraDeviceError() } @@ -249,7 +250,7 @@ class CameraView(context: Context) : FrameLayout(context), LifecycleOwner { if (format == null) { // let CameraX automatically find best resolution for the target aspect ratio Log.i(TAG, "No custom format has been set, CameraX will automatically determine best configuration...") - val aspectRatio = aspectRatio(previewView.width, previewView.height) + val aspectRatio = aspectRatio(previewView.height, previewView.width) // flipped because it's in sensor orientation. previewBuilder.setTargetAspectRatio(aspectRatio) imageCaptureBuilder.setTargetAspectRatio(aspectRatio) videoCaptureBuilder.setTargetAspectRatio(aspectRatio) @@ -257,7 +258,8 @@ class CameraView(context: Context) : FrameLayout(context), LifecycleOwner { // User has selected a custom format={}. Use that val format = DeviceFormat(format!!) Log.i(TAG, "Using custom format - photo: ${format.photoSize}, video: ${format.videoSize} @ $fps FPS") - previewBuilder.setDefaultResolution(format.photoSize) + val aspectRatio = aspectRatio(format.photoSize.width, format.photoSize.height) + previewBuilder.setTargetAspectRatio(aspectRatio) imageCaptureBuilder.setDefaultResolution(format.photoSize) videoCaptureBuilder.setDefaultResolution(format.photoSize) @@ -311,14 +313,23 @@ class CameraView(context: Context) : FrameLayout(context), LifecycleOwner { } val preview = previewBuilder.build() - imageCapture = imageCaptureBuilder.build() - videoCapture = videoCaptureBuilder.build() // Unbind use cases before rebinding + videoCapture = null + imageCapture = null cameraProvider.unbindAll() // Bind use cases to camera - camera = cameraProvider.bindToLifecycle(this, cameraSelector, preview, imageCapture!!, videoCapture!!) + val useCases = ArrayList() + if (video == true) { + videoCapture = videoCaptureBuilder.build() + useCases.add(videoCapture!!) + } + if (photo == true) { + imageCapture = imageCaptureBuilder.build() + useCases.add(imageCapture!!) + } + camera = cameraProvider.bindToLifecycle(this, cameraSelector, preview, *useCases.toTypedArray()) preview.setSurfaceProvider(previewView.surfaceProvider) minZoom = camera!!.cameraInfo.zoomState.value?.minZoomRatio ?: 1f @@ -371,7 +382,7 @@ class CameraView(context: Context) : FrameLayout(context), LifecycleOwner { const val TAG = "CameraView" const val TAG_PERF = "CameraView.performance" - private val propsThatRequireSessionReconfiguration = arrayListOf("cameraId", "format", "fps", "hdr", "lowLightBoost") + private val propsThatRequireSessionReconfiguration = arrayListOf("cameraId", "format", "fps", "hdr", "lowLightBoost", "photo", "video") private val arrayListOfZoom = arrayListOf("zoom") } diff --git a/android/src/main/java/com/mrousavy/camera/CameraViewManager.kt b/android/src/main/java/com/mrousavy/camera/CameraViewManager.kt index 1be554b..c93b191 100644 --- a/android/src/main/java/com/mrousavy/camera/CameraViewManager.kt +++ b/android/src/main/java/com/mrousavy/camera/CameraViewManager.kt @@ -23,6 +23,27 @@ class CameraViewManager : SimpleViewManager() { view.cameraId = cameraId } + @ReactProp(name = "photo") + fun setPhoto(view: CameraView, photo: Boolean?) { + if (view.photo != photo) + addChangedPropToTransaction(view, "photo") + view.photo = photo + } + + @ReactProp(name = "video") + fun setVideo(view: CameraView, video: Boolean?) { + if (view.video != video) + addChangedPropToTransaction(view, "video") + view.video = video + } + + @ReactProp(name = "audio") + fun setAudio(view: CameraView, audio: Boolean?) { + if (view.audio != audio) + addChangedPropToTransaction(view, "audio") + view.audio = audio + } + @ReactProp(name = "enableDepthData") fun setEnableDepthData(view: CameraView, enableDepthData: Boolean) { if (view.enableDepthData != enableDepthData) diff --git a/android/src/main/java/com/mrousavy/camera/CameraViewModule.kt b/android/src/main/java/com/mrousavy/camera/CameraViewModule.kt index e9b2b8e..9ca2aed 100644 --- a/android/src/main/java/com/mrousavy/camera/CameraViewModule.kt +++ b/android/src/main/java/com/mrousavy/camera/CameraViewModule.kt @@ -63,11 +63,19 @@ class CameraViewModule(reactContext: ReactApplicationContext) : ReactContextBase } // TODO: startRecording() cannot be awaited, because I can't have a Promise and a onRecordedCallback in the same function. Hopefully TurboModules allows that - @ReactMethod(isBlockingSynchronousMethod = true) + @ReactMethod fun startRecording(viewTag: Int, options: ReadableMap, onRecordCallback: Callback) { GlobalScope.launch(Dispatchers.Main) { val view = findCameraView(viewTag) - view.startRecording(options, onRecordCallback) + try { + view.startRecording(options, onRecordCallback) + } catch (error: CameraError) { + val map = makeErrorMap("${error.domain}/${error.id}", error.message, error) + onRecordCallback(null, map) + } catch (error: Throwable) { + val map = makeErrorMap("capture/unknown", "An unknown error occured while trying to start a video recording!", error) + onRecordCallback(null, map) + } } } @@ -115,16 +123,6 @@ class CameraViewModule(reactContext: ReactApplicationContext) : ReactContextBase val characteristics = manager.getCameraCharacteristics(id) val hardwareLevel = characteristics.get(CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL)!! - // Filters out cameras that are LEGACY hardware level. Those don't support Preview + Photo Capture + Video Capture at the same time. - if (hardwareLevel == CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_LEGACY) { - Log.i( - REACT_CLASS, - "Skipping Camera #$id because it does not meet the minimum requirements for react-native-vision-camera. " + - "See the tables at https://developer.android.com/reference/android/hardware/camera2/CameraDevice#regular-capture for more information." - ) - return@loop - } - val capabilities = characteristics.get(CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES)!! val isMultiCam = Build.VERSION.SDK_INT >= Build.VERSION_CODES.P && capabilities.contains(CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA) @@ -162,6 +160,7 @@ class CameraViewModule(reactContext: ReactApplicationContext) : ReactContextBase map.putBoolean("hasFlash", hasFlash) map.putBoolean("hasTorch", hasFlash) map.putBoolean("isMultiCam", isMultiCam) + map.putBoolean("supportsPhotoAndVideoCapture", hardwareLevel != CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_LEGACY) map.putBoolean("supportsRawCapture", supportsRawCapture) map.putBoolean("supportsDepthCapture", supportsDepthCapture) map.putBoolean("supportsLowLightBoost", supportsLowLightBoost) diff --git a/android/src/main/java/com/mrousavy/camera/Errors.kt b/android/src/main/java/com/mrousavy/camera/Errors.kt index cff2187..35ba9bf 100644 --- a/android/src/main/java/com/mrousavy/camera/Errors.kt +++ b/android/src/main/java/com/mrousavy/camera/Errors.kt @@ -30,7 +30,7 @@ abstract class CameraError( val CameraError.code: String get() = "$domain/$id" -class MicrophonePermissionError : CameraError("permission", "microphone-permission-denied", "The Microphone permission was denied!") +class MicrophonePermissionError : CameraError("permission", "microphone-permission-denied", "The Microphone permission was denied! If you want to record Video without sound, pass `audio={false}`.") class CameraPermissionError : CameraError("permission", "camera-permission-denied", "The Camera permission was denied!") class InvalidTypeScriptUnionError(unionName: String, unionValue: String) : CameraError("parameter", "invalid-parameter", "The given value for $unionName could not be parsed! (Received: $unionValue)") @@ -52,6 +52,9 @@ class LowLightBoostNotContainedInFormatError() : CameraError( class CameraNotReadyError : CameraError("session", "camera-not-ready", "The Camera is not ready yet! Wait for the onInitialized() callback!") +class VideoNotEnabledError : CameraError("capture", "video-not-enabled", "Video capture is disabled! Pass `video={true}` to enable video recordings.") +class PhotoNotEnabledError : CameraError("capture", "photo-not-enabled", "Photo capture is disabled! Pass `photo={true}` to enable photo capture.") + class InvalidFormatError(format: Int) : CameraError("capture", "invalid-photo-format", "The Photo has an invalid format! Expected ${ImageFormat.YUV_420_888}, actual: $format") class VideoEncoderError(message: String, cause: Throwable? = null) : CameraError("capture", "encoder-error", message, cause) class VideoMuxerError(message: String, cause: Throwable? = null) : CameraError("capture", "muxer-error", message, cause) diff --git a/docs/docs/guides/CAPTURING.mdx b/docs/docs/guides/CAPTURING.mdx index 4a20bdd..55b1fb2 100644 --- a/docs/docs/guides/CAPTURING.mdx +++ b/docs/docs/guides/CAPTURING.mdx @@ -40,7 +40,13 @@ The most important actions are: ## Taking Photos -To take a photo, simply use the Camera's [`takePhoto(...)`](../api/classes/camera.camera-1#takephoto) function: +To take a photo you first have to enable photo capture: + +```tsx + +``` + +Then, simply use the Camera's [`takePhoto(...)`](../api/classes/camera.camera-1#takephoto) function: ```ts const photo = await camera.current.takePhoto({ @@ -71,9 +77,23 @@ const snapshot = await camera.current.takeSnapshot({ While taking snapshots is faster than taking photos, the resulting image has way lower quality. You can combine both functions to create a snapshot to present to the user at first, then deliver the actual high-res photo afterwards. ::: +:::note +The `takeSnapshot` function also works with `photo={false}`. For this reason, devices that do not support photo and video capture at the same time can use `video={true}` and fall back to snapshot capture for photos. +::: + ## Recording Videos -To start a video recording, use the Camera's [`startRecording(...)`](../api/classes/camera.camera-1#startrecording) function: +To start a video recording you first have to enable video capture: + +```tsx + +``` + +Then, simply use the Camera's [`startRecording(...)`](../api/classes/camera.camera-1#startrecording) function: ```ts camera.current.startRecording({ @@ -85,10 +105,6 @@ camera.current.startRecording({ For any error that occured _while recording the video_, the `onRecordingError` callback will be invoked with a [`CaptureError`](../api/classes/cameraerror.cameracaptureerror) and the recording is therefore cancelled. -:::note -Due to limitations of the React Native Bridge, this function can not be awaited. This means, any errors thrown while trying to start the recording (e.g. `capture/recording-in-progress`) can only be caught synchronously (`isBlockingSynchronousMethod`). This will change with the upcoming React Native Re-Architecture. -::: - To stop the video recording, you can call [`stopRecording(...)`](../api/classes/camera.camera-1#stoprecording): ```ts diff --git a/docs/docs/guides/DEVICES.mdx b/docs/docs/guides/DEVICES.mdx index d237fb1..2f78975 100644 --- a/docs/docs/guides/DEVICES.mdx +++ b/docs/docs/guides/DEVICES.mdx @@ -60,7 +60,7 @@ The `CameraDevice` type also contains other useful information describing a came Make sure to be careful when filtering out unneeded camera devices, since not every phone supports all camera device types. Some phones don't even have front-cameras. You always want to have a camera device, even when it's not the one that has the best features. ::: -### `useCameraDevices` hook +### The `useCameraDevices` hook The react-native-vision-camera library provides a hook to make camera device selection a lot easier. @@ -100,6 +100,14 @@ function App() { } ``` +### The `supportsPhotoAndVideoCapture` prop + +Camera devices provide the `supportsPhotoAndVideoCapture` property which determines whether the device allows enabling photo- and video-capture at the same time. While every iOS device supports this feature, there are some older Android devices which only allow enabling one of each - either photo capture or video capture. (Those are `LEGACY` devices, see [this table](https://developer.android.com/reference/android/hardware/camera2/CameraDevice#regular-capture).) + +:::note +If you need photo- and video-capture for devices where `supportsPhotoAndVideoCapture` is `false`, you can fall back to snapshot capture (see [**"Taking Snapshots"**](https://cuvent.github.io/react-native-vision-camera/docs/guides/capturing#taking-snapshots)) instead. +::: + ### The `isActive` prop The Camera's `isActive` property can be used to _pause_ the session (`isActive={false}`) while still keeping the session "warm". This is more desirable than completely unmounting the camera, since _resuming_ the session (`isActive={true}`) will be **much faster** than re-mounting the camera view. diff --git a/example/src/CameraPage.tsx b/example/src/CameraPage.tsx index b7b5e43..fe6a9df 100644 --- a/example/src/CameraPage.tsx +++ b/example/src/CameraPage.tsx @@ -214,6 +214,9 @@ export const CameraPage: NavigationFunctionComponent = ({ componentId }) => { onError={onError} enableZoomGesture={false} animatedProps={cameraAnimatedProps} + photo={true} + video={true} + audio={true} // frameProcessor={frameProcessor} // frameProcessorFps={1} /> diff --git a/example/src/Splash.tsx b/example/src/Splash.tsx index 8a6654f..a920a3b 100644 --- a/example/src/Splash.tsx +++ b/example/src/Splash.tsx @@ -1,5 +1,5 @@ import React, { useCallback, useEffect, useState } from 'react'; -import type { ImageRequireSource } from 'react-native'; +import { ImageRequireSource, Linking } from 'react-native'; import { StyleSheet, View, Text, Image } from 'react-native'; import { Navigation, NavigationFunctionComponent } from 'react-native-navigation'; @@ -17,6 +17,8 @@ export const Splash: NavigationFunctionComponent = ({ componentId }) => { console.log('Requesting microphone permission...'); const permission = await Camera.requestMicrophonePermission(); console.log(`Microphone permission status: ${permission}`); + + if (permission === 'denied') Linking.openSettings(); setMicrophonePermissionStatus(permission); }, []); @@ -24,6 +26,8 @@ export const Splash: NavigationFunctionComponent = ({ componentId }) => { console.log('Requesting camera permission...'); const permission = await Camera.requestCameraPermission(); console.log(`Camera permission status: ${permission}`); + + if (permission === 'denied') Linking.openSettings(); setCameraPermissionStatus(permission); }, []); @@ -43,14 +47,14 @@ export const Splash: NavigationFunctionComponent = ({ componentId }) => { }, []); useEffect(() => { - if (cameraPermissionStatus === 'authorized' && microphonePermissionStatus === 'authorized') { + if (cameraPermissionStatus === 'authorized' && microphonePermissionStatus !== 'not-determined') { Navigation.setRoot({ root: { stack: { children: [ { component: { - name: 'Home', + name: 'CameraPage', }, }, ], @@ -73,7 +77,7 @@ export const Splash: NavigationFunctionComponent = ({ componentId }) => { )} - {microphonePermissionStatus !== 'authorized' && ( + {microphonePermissionStatus === 'not-determined' && ( Vision Camera needs Microphone permission. diff --git a/ios/CameraError.swift b/ios/CameraError.swift index 64ec592..154f241 100644 --- a/ios/CameraError.swift +++ b/ios/CameraError.swift @@ -21,7 +21,7 @@ enum PermissionError: String { var message: String { switch self { case .microphone: - return "The Microphone permission was denied!" + return "The Microphone permission was denied! If you want to record Videos without sound, pass `audio={false}`." case .camera: return "The Camera permission was denied!" } @@ -186,6 +186,8 @@ enum CaptureError { case createTempFileError case createRecorderError(message: String? = nil) case invalidPhotoCodec + case videoNotEnabled + case photoNotEnabled case unknown(message: String? = nil) var code: String { @@ -204,6 +206,10 @@ enum CaptureError { return "create-recorder-error" case .invalidPhotoCodec: return "invalid-photo-codec" + case .videoNotEnabled: + return "video-not-enabled" + case .photoNotEnabled: + return "photo-not-enabled" case .unknown: return "unknown" } @@ -225,6 +231,10 @@ enum CaptureError { return "Failed to create a temporary file!" case let .createRecorderError(message: message): return "Failed to create the AVAssetWriter (Recorder)! \(message ?? "(no additional message)")" + case .videoNotEnabled: + return "Video capture is disabled! Pass `video={true}` to enable video recordings." + case .photoNotEnabled: + return "Photo capture is disabled! Pass `photo={true}` to enable photo capture." case let .unknown(message: message): return message ?? "An unknown error occured while capturing a video/photo." } diff --git a/ios/CameraQueues.swift b/ios/CameraQueues.swift index 4a02286..9b31e91 100644 --- a/ios/CameraQueues.swift +++ b/ios/CameraQueues.swift @@ -23,7 +23,6 @@ public class CameraQueues: NSObject { autoreleaseFrequency: .inherit, target: nil) - // TODO: Is it a good idea to use a separate queue for audio output processing? /// The serial execution queue for output processing of audio buffers. @objc public static let audioQueue = DispatchQueue(label: "com.mrousavy.vision.audio-queue", qos: .userInteractive, diff --git a/ios/CameraView+AVAudioSession.swift b/ios/CameraView+AVAudioSession.swift index eff11ec..489d081 100644 --- a/ios/CameraView+AVAudioSession.swift +++ b/ios/CameraView+AVAudioSession.swift @@ -25,6 +25,15 @@ extension CameraView { } audioCaptureSession.automaticallyConfiguresApplicationAudioSession = false + let enableAudio = audio?.boolValue == true + + // check microphone permission + if enableAudio { + let audioPermissionStatus = AVCaptureDevice.authorizationStatus(for: .audio) + if audioPermissionStatus != .authorized { + return invokeOnError(.permission(.microphone)) + } + } // Audio Input do { @@ -32,15 +41,17 @@ extension CameraView { audioCaptureSession.removeInput(audioDeviceInput) self.audioDeviceInput = nil } - ReactLogger.log(level: .info, message: "Adding Audio input...") - guard let audioDevice = AVCaptureDevice.default(for: .audio) else { - return invokeOnError(.device(.microphoneUnavailable)) + if enableAudio { + ReactLogger.log(level: .info, message: "Adding Audio input...") + guard let audioDevice = AVCaptureDevice.default(for: .audio) else { + return invokeOnError(.device(.microphoneUnavailable)) + } + audioDeviceInput = try AVCaptureDeviceInput(device: audioDevice) + guard audioCaptureSession.canAddInput(audioDeviceInput!) else { + return invokeOnError(.parameter(.unsupportedInput(inputDescriptor: "audio-input"))) + } + audioCaptureSession.addInput(audioDeviceInput!) } - audioDeviceInput = try AVCaptureDeviceInput(device: audioDevice) - guard audioCaptureSession.canAddInput(audioDeviceInput!) else { - return invokeOnError(.parameter(.unsupportedInput(inputDescriptor: "audio-input"))) - } - audioCaptureSession.addInput(audioDeviceInput!) } catch let error as NSError { return invokeOnError(.device(.microphoneUnavailable), cause: error) } @@ -50,13 +61,15 @@ extension CameraView { audioCaptureSession.removeOutput(audioOutput) self.audioOutput = nil } - ReactLogger.log(level: .info, message: "Adding Audio Data output...") - audioOutput = AVCaptureAudioDataOutput() - guard audioCaptureSession.canAddOutput(audioOutput!) else { - return invokeOnError(.parameter(.unsupportedOutput(outputDescriptor: "audio-output"))) + if enableAudio { + ReactLogger.log(level: .info, message: "Adding Audio Data output...") + audioOutput = AVCaptureAudioDataOutput() + guard audioCaptureSession.canAddOutput(audioOutput!) else { + return invokeOnError(.parameter(.unsupportedOutput(outputDescriptor: "audio-output"))) + } + audioOutput!.setSampleBufferDelegate(self, queue: audioQueue) + audioCaptureSession.addOutput(audioOutput!) } - audioOutput!.setSampleBufferDelegate(self, queue: audioQueue) - audioCaptureSession.addOutput(audioOutput!) } /** diff --git a/ios/CameraView+AVCaptureSession.swift b/ios/CameraView+AVCaptureSession.swift index f675f4d..4c2ce5f 100644 --- a/ios/CameraView+AVCaptureSession.swift +++ b/ios/CameraView+AVCaptureSession.swift @@ -84,21 +84,23 @@ extension CameraView { captureSession.removeOutput(photoOutput) self.photoOutput = nil } - ReactLogger.log(level: .info, message: "Adding Photo output...") - photoOutput = AVCapturePhotoOutput() - photoOutput!.isDepthDataDeliveryEnabled = photoOutput!.isDepthDataDeliverySupported && enableDepthData - if let enableHighResolutionCapture = self.enableHighResolutionCapture?.boolValue { - photoOutput!.isHighResolutionCaptureEnabled = enableHighResolutionCapture - } - if #available(iOS 12.0, *) { - photoOutput!.isPortraitEffectsMatteDeliveryEnabled = photoOutput!.isPortraitEffectsMatteDeliverySupported && self.enablePortraitEffectsMatteDelivery - } - guard captureSession.canAddOutput(photoOutput!) else { - return invokeOnError(.parameter(.unsupportedOutput(outputDescriptor: "photo-output"))) - } - captureSession.addOutput(photoOutput!) - if videoDeviceInput!.device.position == .front { - photoOutput!.mirror() + if photo?.boolValue == true { + ReactLogger.log(level: .info, message: "Adding Photo output...") + photoOutput = AVCapturePhotoOutput() + photoOutput!.isDepthDataDeliveryEnabled = photoOutput!.isDepthDataDeliverySupported && enableDepthData + if let enableHighResolutionCapture = self.enableHighResolutionCapture?.boolValue { + photoOutput!.isHighResolutionCaptureEnabled = enableHighResolutionCapture + } + if #available(iOS 12.0, *) { + photoOutput!.isPortraitEffectsMatteDeliveryEnabled = photoOutput!.isPortraitEffectsMatteDeliverySupported && self.enablePortraitEffectsMatteDelivery + } + guard captureSession.canAddOutput(photoOutput!) else { + return invokeOnError(.parameter(.unsupportedOutput(outputDescriptor: "photo-output"))) + } + captureSession.addOutput(photoOutput!) + if videoDeviceInput!.device.position == .front { + photoOutput!.mirror() + } } // Video Output + Frame Processor @@ -106,16 +108,18 @@ extension CameraView { captureSession.removeOutput(videoOutput) self.videoOutput = nil } - ReactLogger.log(level: .info, message: "Adding Video Data output...") - videoOutput = AVCaptureVideoDataOutput() - guard captureSession.canAddOutput(videoOutput!) else { - return invokeOnError(.parameter(.unsupportedOutput(outputDescriptor: "video-output"))) - } - videoOutput!.setSampleBufferDelegate(self, queue: videoQueue) - videoOutput!.alwaysDiscardsLateVideoFrames = true - captureSession.addOutput(videoOutput!) - if videoDeviceInput!.device.position == .front { - videoOutput!.mirror() + if video?.boolValue == true { + ReactLogger.log(level: .info, message: "Adding Video Data output...") + videoOutput = AVCaptureVideoDataOutput() + guard captureSession.canAddOutput(videoOutput!) else { + return invokeOnError(.parameter(.unsupportedOutput(outputDescriptor: "video-output"))) + } + videoOutput!.setSampleBufferDelegate(self, queue: videoQueue) + videoOutput!.alwaysDiscardsLateVideoFrames = true + captureSession.addOutput(videoOutput!) + if videoDeviceInput!.device.position == .front { + videoOutput!.mirror() + } } invokeOnInitialized() @@ -223,7 +227,7 @@ extension CameraView { if isActive { // restart capture session after an error occured - queue.async { + cameraQueue.async { self.captureSession.startRunning() } } diff --git a/ios/CameraView+RecordVideo.swift b/ios/CameraView+RecordVideo.swift index 8a958d2..4f2c6d3 100644 --- a/ios/CameraView+RecordVideo.swift +++ b/ios/CameraView+RecordVideo.swift @@ -16,72 +16,95 @@ extension CameraView: AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureAud /** Starts a video + audio recording with a custom Asset Writer. */ - func startRecording(options: NSDictionary, callback: @escaping RCTResponseSenderBlock) { + func startRecording(options: NSDictionary, callback jsCallbackFunc: @escaping RCTResponseSenderBlock) { cameraQueue.async { ReactLogger.log(level: .info, message: "Starting Video recording...") + let callback = Callback(jsCallbackFunc) - do { - let errorPointer = ErrorPointer(nilLiteral: ()) - guard let tempFilePath = RCTTempFilePath("mov", errorPointer) else { - return callback([NSNull(), makeReactError(.capture(.createTempFileError), cause: errorPointer?.pointee)]) + var fileType = AVFileType.mov + if let fileTypeOption = options["fileType"] as? String { + guard let parsed = try? AVFileType(withString: fileTypeOption) else { + return callback.reject(error: .parameter(.invalid(unionName: "fileType", receivedValue: fileTypeOption))) } + fileType = parsed + } - let tempURL = URL(string: "file://\(tempFilePath)")! - if let flashMode = options["flash"] as? String { - // use the torch as the video's flash - self.setTorchMode(flashMode) + let errorPointer = ErrorPointer(nilLiteral: ()) + let fileExtension = fileType.descriptor ?? "mov" + guard let tempFilePath = RCTTempFilePath(fileExtension, errorPointer) else { + return callback.reject(error: .capture(.createTempFileError), cause: errorPointer?.pointee) + } + + ReactLogger.log(level: .info, message: "File path: \(tempFilePath)") + let tempURL = URL(string: "file://\(tempFilePath)")! + + if let flashMode = options["flash"] as? String { + // use the torch as the video's flash + self.setTorchMode(flashMode) + } + + guard let videoOutput = self.videoOutput else { + if self.video?.boolValue == true { + return callback.reject(error: .session(.cameraNotReady)) + } else { + return callback.reject(error: .capture(.videoNotEnabled)) } + } - var fileType = AVFileType.mov - if let fileTypeOption = options["fileType"] as? String { - fileType = AVFileType(withString: fileTypeOption) - } + // TODO: The startRecording() func cannot be async because RN doesn't allow + // both a callback and a Promise in a single function. Wait for TurboModules? + // This means that any errors that occur in this function have to be delegated through + // the callback, but I'd prefer for them to throw for the original function instead. - // TODO: The startRecording() func cannot be async because RN doesn't allow - // both a callback and a Promise in a single function. Wait for TurboModules? - // This means that any errors that occur in this function have to be delegated through - // the callback, but I'd prefer for them to throw for the original function instead. + let enableAudio = self.audio?.boolValue == true - let onFinish = { (status: AVAssetWriter.Status, error: Error?) -> Void in - defer { - self.recordingSession = nil + let onFinish = { (status: AVAssetWriter.Status, error: Error?) -> Void in + defer { + self.recordingSession = nil + if enableAudio { self.audioQueue.async { self.deactivateAudioSession() } } - ReactLogger.log(level: .info, message: "RecordingSession finished with status \(status.descriptor).") - if let error = error { - let description = (error as NSError).description - return callback([NSNull(), CameraError.capture(.unknown(message: "An unknown recording error occured! \(description)"))]) + } + ReactLogger.log(level: .info, message: "RecordingSession finished with status \(status.descriptor).") + if let error = error as NSError? { + let description = error.description + return callback.reject(error: .capture(.unknown(message: "An unknown recording error occured! \(description)")), cause: error) + } else { + if status == .completed { + return callback.resolve([ + "path": self.recordingSession!.url.absoluteString, + "duration": self.recordingSession!.duration, + ]) } else { - if status == .completed { - return callback([[ - "path": self.recordingSession!.url.absoluteString, - "duration": self.recordingSession!.duration, - ], NSNull()]) - } else { - return callback([NSNull(), CameraError.unknown(message: "AVAssetWriter completed with status: \(status.descriptor)")]) - } + return callback.reject(error: .unknown(message: "AVAssetWriter completed with status: \(status.descriptor)")) } } + } + do { self.recordingSession = try RecordingSession(url: tempURL, fileType: fileType, completion: onFinish) + } catch let error as NSError { + return callback.reject(error: .capture(.createRecorderError(message: nil)), cause: error) + } - // Init Video - guard let videoOutput = self.videoOutput, - let videoSettings = videoOutput.recommendedVideoSettingsForAssetWriter(writingTo: fileType), - !videoSettings.isEmpty else { - throw CameraError.capture(.createRecorderError(message: "Failed to get video settings!")) - } - self.recordingSession!.initializeVideoWriter(withSettings: videoSettings, - isVideoMirrored: self.videoOutput!.isMirrored) + // Init Video + guard let videoSettings = videoOutput.recommendedVideoSettingsForAssetWriter(writingTo: fileType), + !videoSettings.isEmpty else { + return callback.reject(error: .capture(.createRecorderError(message: "Failed to get video settings!"))) + } + self.recordingSession!.initializeVideoWriter(withSettings: videoSettings, + isVideoMirrored: self.videoOutput!.isMirrored) - // Init Audio (optional, async) + // Init Audio (optional, async) + if enableAudio { self.audioQueue.async { // Activate Audio Session (blocking) self.activateAudioSession() + guard let recordingSession = self.recordingSession else { // recording has already been cancelled return @@ -95,10 +118,10 @@ extension CameraView: AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureAud recordingSession.start() self.isRecording = true } - } catch EnumParserError.invalidValue { - return callback([NSNull(), EnumParserError.invalidValue]) - } catch let error as NSError { - return callback([NSNull(), makeReactError(.capture(.createTempFileError), cause: error)]) + } else { + // start recording session without audio. + self.recordingSession!.start() + self.isRecording = true } } } @@ -175,8 +198,8 @@ extension CameraView: AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureAud } } - public final func captureOutput(_ captureOutput: AVCaptureOutput, didDrop buffer: CMSampleBuffer, from _: AVCaptureConnection) { - #if DEBUG + #if DEBUG + public final func captureOutput(_ captureOutput: AVCaptureOutput, didDrop buffer: CMSampleBuffer, from _: AVCaptureConnection) { if frameProcessorCallback != nil && !hasLoggedFrameDropWarning && captureOutput is AVCaptureVideoDataOutput { let reason = findFrameDropReason(inBuffer: buffer) ReactLogger.log(level: .warning, @@ -185,16 +208,16 @@ extension CameraView: AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureAud alsoLogToJS: true) hasLoggedFrameDropWarning = true } - #endif - } - - private final func findFrameDropReason(inBuffer buffer: CMSampleBuffer) -> String { - var mode: CMAttachmentMode = 0 - guard let reason = CMGetAttachment(buffer, - key: kCMSampleBufferAttachmentKey_DroppedFrameReason, - attachmentModeOut: &mode) else { - return "unknown" } - return String(describing: reason) - } + + private final func findFrameDropReason(inBuffer buffer: CMSampleBuffer) -> String { + var mode: CMAttachmentMode = 0 + guard let reason = CMGetAttachment(buffer, + key: kCMSampleBufferAttachmentKey_DroppedFrameReason, + attachmentModeOut: &mode) else { + return "unknown" + } + return String(describing: reason) + } + #endif } diff --git a/ios/CameraView+TakePhoto.swift b/ios/CameraView+TakePhoto.swift index 4021a29..35e13f9 100644 --- a/ios/CameraView+TakePhoto.swift +++ b/ios/CameraView+TakePhoto.swift @@ -25,8 +25,13 @@ struct TakePhotoOptions { extension CameraView { func takePhoto(options: NSDictionary, promise: Promise) { cameraQueue.async { - guard let photoOutput = self.photoOutput, let videoDeviceInput = self.videoDeviceInput else { - return promise.reject(error: .session(.cameraNotReady)) + guard let photoOutput = self.photoOutput, + let videoDeviceInput = self.videoDeviceInput else { + if self.photo?.boolValue == true { + return promise.reject(error: .session(.cameraNotReady)) + } else { + return promise.reject(error: .capture(.photoNotEnabled)) + } } var photoSettings = AVCapturePhotoSettings() diff --git a/ios/CameraView.swift b/ios/CameraView.swift index 01d2717..1229a33 100644 --- a/ios/CameraView.swift +++ b/ios/CameraView.swift @@ -15,7 +15,6 @@ import UIKit // // CameraView+RecordVideo // TODO: Better startRecording()/stopRecording() (promise + callback, wait for TurboModules/JSI) -// TODO: videoStabilizationMode // CameraView+TakePhoto // TODO: Photo HDR @@ -24,7 +23,9 @@ private let propsThatRequireReconfiguration = ["cameraId", "enableDepthData", "enableHighResolutionCapture", "enablePortraitEffectsMatteDelivery", - "preset"] + "preset", + "photo", + "video"] private let propsThatRequireDeviceReconfiguration = ["fps", "hdr", "lowLightBoost", @@ -42,6 +43,10 @@ public final class CameraView: UIView { @objc var enableHighResolutionCapture: NSNumber? // nullable bool @objc var enablePortraitEffectsMatteDelivery = false @objc var preset: String? + // use cases + @objc var photo: NSNumber? // nullable bool + @objc var video: NSNumber? // nullable bool + @objc var audio: NSNumber? // nullable bool // props that require format reconfiguring @objc var format: NSDictionary? @objc var fps: NSNumber? @@ -71,8 +76,6 @@ public final class CameraView: UIView { // pragma MARK: Internal Properties internal var isReady = false - /// The serial execution queue for the camera preview layer (input stream) as well as output processing (take photo and record video) - internal let queue = DispatchQueue(label: "com.mrousavy.camera-queue", qos: .userInteractive, attributes: [], autoreleaseFrequency: .inherit, target: nil) // Capture Session internal let captureSession = AVCaptureSession() internal let audioCaptureSession = AVCaptureSession() @@ -130,10 +133,6 @@ public final class CameraView: UIView { selector: #selector(audioSessionInterrupted), name: AVAudioSession.interruptionNotification, object: AVAudioSession.sharedInstance) - - audioQueue.async { - self.configureAudioSession() - } } @available(*, unavailable) @@ -159,6 +158,7 @@ public final class CameraView: UIView { let shouldReconfigure = changedProps.contains { propsThatRequireReconfiguration.contains($0) } let shouldReconfigureFormat = shouldReconfigure || changedProps.contains("format") let shouldReconfigureDevice = shouldReconfigureFormat || changedProps.contains { propsThatRequireDeviceReconfiguration.contains($0) } + let shouldReconfigureAudioSession = changedProps.contains("audio") let willReconfigure = shouldReconfigure || shouldReconfigureFormat || shouldReconfigureDevice @@ -168,6 +168,7 @@ public final class CameraView: UIView { let shouldUpdateVideoStabilization = willReconfigure || changedProps.contains("videoStabilizationMode") if shouldReconfigure || + shouldReconfigureAudioSession || shouldCheckActive || shouldUpdateTorch || shouldUpdateZoom || @@ -214,6 +215,13 @@ public final class CameraView: UIView { } } } + + // Audio Configuration + if shouldReconfigureAudioSession { + audioQueue.async { + self.configureAudioSession() + } + } } } diff --git a/ios/CameraViewManager.m b/ios/CameraViewManager.m index 536b24a..ce1ccfd 100644 --- a/ios/CameraViewManager.m +++ b/ios/CameraViewManager.m @@ -27,6 +27,10 @@ RCT_EXPORT_VIEW_PROPERTY(cameraId, NSString); RCT_EXPORT_VIEW_PROPERTY(enableDepthData, BOOL); RCT_EXPORT_VIEW_PROPERTY(enableHighResolutionCapture, NSNumber); // nullable bool RCT_EXPORT_VIEW_PROPERTY(enablePortraitEffectsMatteDelivery, BOOL); +// use cases +RCT_EXPORT_VIEW_PROPERTY(photo, NSNumber); // nullable bool +RCT_EXPORT_VIEW_PROPERTY(video, NSNumber); // nullable bool +RCT_EXPORT_VIEW_PROPERTY(audio, NSNumber); // nullable bool // device format RCT_EXPORT_VIEW_PROPERTY(format, NSDictionary); RCT_EXPORT_VIEW_PROPERTY(fps, NSNumber); diff --git a/ios/CameraViewManager.swift b/ios/CameraViewManager.swift index 3890e8a..5d2ff41 100644 --- a/ios/CameraViewManager.swift +++ b/ios/CameraViewManager.swift @@ -99,6 +99,7 @@ final class CameraViewManager: RCTViewManager { "neutralZoom": $0.neutralZoomFactor, "maxZoom": $0.maxAvailableVideoZoomFactor, "isMultiCam": $0.isMultiCam, + "supportsPhotoAndVideoCapture": true, "supportsDepthCapture": false, // TODO: supportsDepthCapture "supportsRawCapture": false, // TODO: supportsRawCapture "supportsLowLightBoost": $0.isLowLightBoostSupported, diff --git a/ios/Frame Processor/FrameHostObject.mm b/ios/Frame Processor/FrameHostObject.mm index 0e0af40..48c6cd6 100644 --- a/ios/Frame Processor/FrameHostObject.mm +++ b/ios/Frame Processor/FrameHostObject.mm @@ -75,10 +75,6 @@ jsi::Value FrameHostObject::get(jsi::Runtime& runtime, const jsi::PropNameID& pr auto planesCount = CVPixelBufferGetPlaneCount(imageBuffer); return jsi::Value((double) planesCount); } - if (name == "buffer") { - // TODO: Actually return the pixels of the buffer. Not sure if this will be a huge performance hit or not - return jsi::Array(runtime, 0); - } return jsi::Value::undefined(); } diff --git a/ios/Parsers/AVFileType+descriptor.swift b/ios/Parsers/AVFileType+descriptor.swift index 811216e..f5f4dbb 100644 --- a/ios/Parsers/AVFileType+descriptor.swift +++ b/ios/Parsers/AVFileType+descriptor.swift @@ -10,11 +10,33 @@ import AVFoundation import Foundation extension AVFileType { - init(withString string: String) { - self.init(rawValue: string) + init(withString string: String) throws { + switch string { + case "mov": + self = .mov + case "mp4": + self = .mp4 + case "avci": + self = .avci + case "m4v": + self = .m4v + default: + throw EnumParserError.invalidValue + } } - var descriptor: String { - return rawValue + var descriptor: String? { + switch self { + case .mov: + return "mov" + case .mp4: + return "mp4" + case .avci: + return "avci" + case .m4v: + return "m4v" + default: + return nil + } } } diff --git a/ios/Parsers/EnumParserError.swift b/ios/Parsers/EnumParserError.swift index 0f665f8..12debe3 100644 --- a/ios/Parsers/EnumParserError.swift +++ b/ios/Parsers/EnumParserError.swift @@ -20,8 +20,4 @@ enum EnumParserError: Error { Raised when the descriptor does not match any of the possible values. */ case invalidValue - /** - Raised when no descriptor for the given enum is available. - */ - case noDescriptorAvailable } diff --git a/ios/React Utils/Callback.swift b/ios/React Utils/Callback.swift new file mode 100644 index 0000000..8c8957d --- /dev/null +++ b/ios/React Utils/Callback.swift @@ -0,0 +1,38 @@ +// +// Callback.swift +// VisionCamera +// +// Created by Marc Rousavy on 07.06.21. +// Copyright © 2021 mrousavy. All rights reserved. +// + +import Foundation + +/** + Represents a callback to JavaScript. Syntax is the same as with Promise. + */ +class Callback { + init(_ callback: @escaping RCTResponseSenderBlock) { + self.callback = callback + } + + func reject(error: CameraError, cause: NSError?) { + callback([NSNull(), makeReactError(error, cause: cause)]) + } + + func reject(error: CameraError) { + reject(error: error, cause: nil) + } + + func resolve(_ value: Any?) { + callback([value, NSNull()]) + } + + func resolve() { + resolve(nil) + } + + // MARK: Private + + private let callback: RCTResponseSenderBlock +} diff --git a/ios/RecordingSession.swift b/ios/RecordingSession.swift index 68735c5..999225d 100644 --- a/ios/RecordingSession.swift +++ b/ios/RecordingSession.swift @@ -11,7 +11,7 @@ import Foundation // MARK: - BufferType -enum BufferType: String { +enum BufferType { case audio case video } @@ -112,7 +112,7 @@ class RecordingSession { } guard let initialTimestamp = initialTimestamp else { ReactLogger.log(level: .error, - message: "A \(bufferType.rawValue) frame arrived, but initialTimestamp was nil. Is this RecordingSession running?", + message: "A frame arrived, but initialTimestamp was nil. Is this RecordingSession running?", alsoLogToJS: true) return } diff --git a/ios/VisionCamera.xcodeproj/project.pbxproj b/ios/VisionCamera.xcodeproj/project.pbxproj index f3fa056..a942b8b 100644 --- a/ios/VisionCamera.xcodeproj/project.pbxproj +++ b/ios/VisionCamera.xcodeproj/project.pbxproj @@ -54,6 +54,7 @@ B88B47472667C8E00091F538 /* AVCaptureSession+setVideoStabilizationMode.swift in Sources */ = {isa = PBXBuildFile; fileRef = B88B47462667C8E00091F538 /* AVCaptureSession+setVideoStabilizationMode.swift */; }; B8994E6C263F03E100069589 /* JSIUtils.mm in Sources */ = {isa = PBXBuildFile; fileRef = B8994E6B263F03E100069589 /* JSIUtils.mm */; }; B8A751D82609E4B30011C623 /* FrameProcessorRuntimeManager.mm in Sources */ = {isa = PBXBuildFile; fileRef = B8A751D72609E4B30011C623 /* FrameProcessorRuntimeManager.mm */; }; + B8BD3BA2266E22D2006C80A2 /* Callback.swift in Sources */ = {isa = PBXBuildFile; fileRef = B8BD3BA1266E22D2006C80A2 /* Callback.swift */; }; B8D22CDC2642DB4D00234472 /* AVAssetWriterInputPixelBufferAdaptor+initWithVideoSettings.swift in Sources */ = {isa = PBXBuildFile; fileRef = B8D22CDB2642DB4D00234472 /* AVAssetWriterInputPixelBufferAdaptor+initWithVideoSettings.swift */; }; B8DB3BC8263DC28C004C18D7 /* AVAssetWriter.Status+descriptor.swift in Sources */ = {isa = PBXBuildFile; fileRef = B8DB3BC7263DC28C004C18D7 /* AVAssetWriter.Status+descriptor.swift */; }; B8DB3BCA263DC4D8004C18D7 /* RecordingSession.swift in Sources */ = {isa = PBXBuildFile; fileRef = B8DB3BC9263DC4D8004C18D7 /* RecordingSession.swift */; }; @@ -132,6 +133,7 @@ B8994E6B263F03E100069589 /* JSIUtils.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = JSIUtils.mm; sourceTree = ""; }; B8A751D62609E4980011C623 /* FrameProcessorRuntimeManager.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = FrameProcessorRuntimeManager.h; sourceTree = ""; }; B8A751D72609E4B30011C623 /* FrameProcessorRuntimeManager.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = FrameProcessorRuntimeManager.mm; sourceTree = ""; }; + B8BD3BA1266E22D2006C80A2 /* Callback.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Callback.swift; sourceTree = ""; }; B8D22CDB2642DB4D00234472 /* AVAssetWriterInputPixelBufferAdaptor+initWithVideoSettings.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "AVAssetWriterInputPixelBufferAdaptor+initWithVideoSettings.swift"; sourceTree = ""; }; B8DB3BC7263DC28C004C18D7 /* AVAssetWriter.Status+descriptor.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "AVAssetWriter.Status+descriptor.swift"; sourceTree = ""; }; B8DB3BC9263DC4D8004C18D7 /* RecordingSession.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = RecordingSession.swift; sourceTree = ""; }; @@ -212,6 +214,7 @@ B887516E25E0102000DB86D6 /* MakeReactError.swift */, B887516F25E0102000DB86D6 /* ReactLogger.swift */, B887517025E0102000DB86D6 /* Promise.swift */, + B8BD3BA1266E22D2006C80A2 /* Callback.swift */, B82FBA942614B69D00909718 /* RCTBridge+runOnJS.h */, B82FBA952614B69D00909718 /* RCTBridge+runOnJS.mm */, B81D41EF263C86F900B041FD /* JSIUtils.h */, @@ -391,6 +394,7 @@ B88751A725E0102000DB86D6 /* CameraView+Zoom.swift in Sources */, B887518525E0102000DB86D6 /* PhotoCaptureDelegate.swift in Sources */, B887518B25E0102000DB86D6 /* AVCaptureDevice.Format+isBetterThan.swift in Sources */, + B8BD3BA2266E22D2006C80A2 /* Callback.swift in Sources */, B84760A62608EE7C004C3180 /* FrameHostObject.mm in Sources */, B8103E1C25FF553B007A1684 /* FrameProcessorUtils.mm in Sources */, B887518E25E0102000DB86D6 /* AVFrameRateRange+includes.swift in Sources */, diff --git a/src/Camera.tsx b/src/Camera.tsx index 43c79f2..93a6dab 100644 --- a/src/Camera.tsx +++ b/src/Camera.tsx @@ -358,6 +358,13 @@ export class Camera extends React.PureComponent { this.assertFrameProcessorsEnabled(); // frameProcessor argument changed. Update native to reflect the change. if (this.props.frameProcessor != null) { + if (this.props.video !== true) { + throw new CameraCaptureError( + 'capture/video-not-enabled', + 'Video capture is disabled! Pass `video={true}` to enable frame processors.', + ); + } + // 1. Spawn threaded JSI Runtime (if not already done) // 2. Add video data output to Camera stream (if not already done) // 3. Workletize the frameProcessor and prepare it for being called with frames diff --git a/src/CameraDevice.ts b/src/CameraDevice.ts index 238e28a..441883e 100644 --- a/src/CameraDevice.ts +++ b/src/CameraDevice.ts @@ -250,6 +250,28 @@ export interface CameraDevice { * See [the Camera Formats documentation](https://cuvent.github.io/react-native-vision-camera/docs/guides/formats) for more information about Camera Formats. */ formats: CameraDeviceFormat[]; + /** + * Whether this camera device supports enabling photo and video capture at the same time. + * + * * On **iOS** devices this value is always `true`. + * * On newer **Android** devices this value is always `true`. + * * On older **Android** devices this value is `true` if the device's hardware level is `LIMITED` and above, `false` otherwise. (`LEGACY`) (See [this table](https://developer.android.com/reference/android/hardware/camera2/CameraDevice#regular-capture)) + * + * If the device does not allow enabling `photo` and `video` capture at the same time, you might want to fall back to **snapshot capture** (See ["Taking Snapshots"](https://cuvent.github.io/react-native-vision-camera/docs/guides/capturing#taking-snapshots)) instead: + * + * @example + * ```tsx + * const captureMode = device.supportsPhotoAndVideoCapture ? "photo" : "snapshot" + * return ( + * + * ) + * ``` + */ + supportsPhotoAndVideoCapture: boolean; /** * Whether this camera device supports low light boost. */ diff --git a/src/CameraError.ts b/src/CameraError.ts index af89db0..4fca071 100644 --- a/src/CameraError.ts +++ b/src/CameraError.ts @@ -37,6 +37,8 @@ export type CaptureError = | 'capture/invalid-photo-codec' | 'capture/not-bound-error' | 'capture/capture-type-not-supported' + | 'capture/video-not-enabled' + | 'capture/photo-not-enabled' | 'capture/unknown'; export type SystemError = 'system/no-camera-manager'; export type UnknownError = 'unknown/unknown'; diff --git a/src/CameraProps.ts b/src/CameraProps.ts index e628286..ba83688 100644 --- a/src/CameraProps.ts +++ b/src/CameraProps.ts @@ -34,6 +34,22 @@ export interface CameraProps extends ViewProps { */ isActive: boolean; + //#region Use-cases + /** + * * Enables **photo capture** with the `takePhoto` function (see ["Taking Photos"](https://cuvent.github.io/react-native-vision-camera/docs/guides/capturing#taking-photos)) + */ + photo?: boolean; + /** + * * Enables **video capture** with the `startRecording` function (see ["Recording Videos"](https://cuvent.github.io/react-native-vision-camera/docs/guides/capturing/#recording-videos)) + * * Enables **frame processing** (see ["Frame Processors"](https://cuvent.github.io/react-native-vision-camera/docs/guides/frame-processors)) + */ + video?: boolean; + /** + * * Enables **audio capture** for video recordings (see ["Recording Videos"](https://cuvent.github.io/react-native-vision-camera/docs/guides/capturing/#recording-videos)) + */ + audio?: boolean; + //#endregion + //#region Common Props (torch, zoom) /** * Set the current torch mode. diff --git a/src/Frame.ts b/src/Frame.ts index eb2606a..87cd3f4 100644 --- a/src/Frame.ts +++ b/src/Frame.ts @@ -2,10 +2,6 @@ * A single frame, as seen by the camera. */ export interface Frame { - /** - * The raw pixel buffer. - */ - buffer: unknown[]; /** * Whether the underlying buffer is still valid or not. The buffer will be released after the frame processor returns. */