diff --git a/ios/CameraView+AVAudioSession.swift b/ios/CameraView+AVAudioSession.swift index 489d081..3e47459 100644 --- a/ios/CameraView+AVAudioSession.swift +++ b/ios/CameraView+AVAudioSession.swift @@ -31,7 +31,8 @@ extension CameraView { if enableAudio { let audioPermissionStatus = AVCaptureDevice.authorizationStatus(for: .audio) if audioPermissionStatus != .authorized { - return invokeOnError(.permission(.microphone)) + invokeOnError(.permission(.microphone)) + return } } @@ -44,16 +45,19 @@ extension CameraView { if enableAudio { ReactLogger.log(level: .info, message: "Adding Audio input...") guard let audioDevice = AVCaptureDevice.default(for: .audio) else { - return invokeOnError(.device(.microphoneUnavailable)) + invokeOnError(.device(.microphoneUnavailable)) + return } audioDeviceInput = try AVCaptureDeviceInput(device: audioDevice) guard audioCaptureSession.canAddInput(audioDeviceInput!) else { - return invokeOnError(.parameter(.unsupportedInput(inputDescriptor: "audio-input"))) + invokeOnError(.parameter(.unsupportedInput(inputDescriptor: "audio-input"))) + return } audioCaptureSession.addInput(audioDeviceInput!) } } catch let error as NSError { - return invokeOnError(.device(.microphoneUnavailable), cause: error) + invokeOnError(.device(.microphoneUnavailable), cause: error) + return } // Audio Output @@ -65,7 +69,8 @@ extension CameraView { ReactLogger.log(level: .info, message: "Adding Audio Data output...") audioOutput = AVCaptureAudioDataOutput() guard audioCaptureSession.canAddOutput(audioOutput!) else { - return invokeOnError(.parameter(.unsupportedOutput(outputDescriptor: "audio-output"))) + invokeOnError(.parameter(.unsupportedOutput(outputDescriptor: "audio-output"))) + return } audioOutput!.setSampleBufferDelegate(self, queue: audioQueue) audioCaptureSession.addOutput(audioOutput!) diff --git a/ios/CameraView+AVCaptureSession.swift b/ios/CameraView+AVCaptureSession.swift index 4c2ce5f..50de6ea 100644 --- a/ios/CameraView+AVCaptureSession.swift +++ b/ios/CameraView+AVCaptureSession.swift @@ -23,11 +23,13 @@ extension CameraView { isReady = false #if targetEnvironment(simulator) - return invokeOnError(.device(.notAvailableOnSimulator)) + invokeOnError(.device(.notAvailableOnSimulator)) + return #endif guard cameraId != nil else { - return invokeOnError(.device(.noDevice)) + invokeOnError(.device(.noDevice)) + return } let cameraId = self.cameraId! as String @@ -43,9 +45,11 @@ extension CameraView { do { sessionPreset = try AVCaptureSession.Preset(withString: preset) } catch let EnumParserError.unsupportedOS(supportedOnOS: os) { - return invokeOnError(.parameter(.unsupportedOS(unionName: "Preset", receivedValue: preset, supportedOnOs: os))) + invokeOnError(.parameter(.unsupportedOS(unionName: "Preset", receivedValue: preset, supportedOnOs: os))) + return } catch { - return invokeOnError(.parameter(.invalid(unionName: "Preset", receivedValue: preset))) + invokeOnError(.parameter(.invalid(unionName: "Preset", receivedValue: preset))) + return } if sessionPreset != nil { if captureSession.canSetSessionPreset(sessionPreset!) { @@ -66,15 +70,18 @@ extension CameraView { } ReactLogger.log(level: .info, message: "Adding Video input...") guard let videoDevice = AVCaptureDevice(uniqueID: cameraId) else { - return invokeOnError(.device(.invalid)) + invokeOnError(.device(.invalid)) + return } videoDeviceInput = try AVCaptureDeviceInput(device: videoDevice) guard captureSession.canAddInput(videoDeviceInput!) else { - return invokeOnError(.parameter(.unsupportedInput(inputDescriptor: "video-input"))) + invokeOnError(.parameter(.unsupportedInput(inputDescriptor: "video-input"))) + return } captureSession.addInput(videoDeviceInput!) } catch { - return invokeOnError(.device(.invalid)) + invokeOnError(.device(.invalid)) + return } // pragma MARK: Capture Session Outputs @@ -95,7 +102,8 @@ extension CameraView { photoOutput!.isPortraitEffectsMatteDeliveryEnabled = photoOutput!.isPortraitEffectsMatteDeliverySupported && self.enablePortraitEffectsMatteDelivery } guard captureSession.canAddOutput(photoOutput!) else { - return invokeOnError(.parameter(.unsupportedOutput(outputDescriptor: "photo-output"))) + invokeOnError(.parameter(.unsupportedOutput(outputDescriptor: "photo-output"))) + return } captureSession.addOutput(photoOutput!) if videoDeviceInput!.device.position == .front { @@ -112,7 +120,8 @@ extension CameraView { ReactLogger.log(level: .info, message: "Adding Video Data output...") videoOutput = AVCaptureVideoDataOutput() guard captureSession.canAddOutput(videoOutput!) else { - return invokeOnError(.parameter(.unsupportedOutput(outputDescriptor: "video-output"))) + invokeOnError(.parameter(.unsupportedOutput(outputDescriptor: "video-output"))) + return } videoOutput!.setSampleBufferDelegate(self, queue: videoQueue) videoOutput!.alwaysDiscardsLateVideoFrames = true @@ -135,7 +144,8 @@ extension CameraView { final func configureDevice() { ReactLogger.log(level: .info, message: "Configuring Device...") guard let device = videoDeviceInput?.device else { - return invokeOnError(.session(.cameraNotReady)) + invokeOnError(.session(.cameraNotReady)) + return } do { @@ -151,7 +161,8 @@ extension CameraView { } if hdr != nil { if hdr == true && !device.activeFormat.isVideoHDRSupported { - return invokeOnError(.format(.invalidHdr)) + invokeOnError(.format(.invalidHdr)) + return } if !device.automaticallyAdjustsVideoHDREnabled { if device.isVideoHDREnabled != hdr!.boolValue { @@ -161,7 +172,8 @@ extension CameraView { } if lowLightBoost != nil { if lowLightBoost == true && !device.isLowLightBoostSupported { - return invokeOnError(.device(.lowLightBoostNotSupported)) + invokeOnError(.device(.lowLightBoostNotSupported)) + return } if device.automaticallyEnablesLowLightBoostWhenAvailable != lowLightBoost!.boolValue { device.automaticallyEnablesLowLightBoostWhenAvailable = lowLightBoost!.boolValue @@ -174,7 +186,8 @@ extension CameraView { device.unlockForConfiguration() ReactLogger.log(level: .info, message: "Device successfully configured!") } catch let error as NSError { - return invokeOnError(.device(.configureError), cause: error) + invokeOnError(.device(.configureError), cause: error) + return } } @@ -190,7 +203,8 @@ extension CameraView { return } guard let device = videoDeviceInput?.device else { - return invokeOnError(.session(.cameraNotReady)) + invokeOnError(.session(.cameraNotReady)) + return } if device.activeFormat.matchesFilter(filter) { @@ -201,7 +215,8 @@ extension CameraView { // get matching format let matchingFormats = device.formats.filter { $0.matchesFilter(filter) }.sorted { $0.isBetterThan($1) } guard let format = matchingFormats.first else { - return invokeOnError(.format(.invalidFormat)) + invokeOnError(.format(.invalidFormat)) + return } do { @@ -210,7 +225,8 @@ extension CameraView { device.unlockForConfiguration() ReactLogger.log(level: .info, message: "Format successfully configured!") } catch let error as NSError { - return invokeOnError(.device(.configureError), cause: error) + invokeOnError(.device(.configureError), cause: error) + return } } diff --git a/ios/CameraView+RecordVideo.swift b/ios/CameraView+RecordVideo.swift index 8588688..15ae538 100644 --- a/ios/CameraView+RecordVideo.swift +++ b/ios/CameraView+RecordVideo.swift @@ -24,7 +24,8 @@ extension CameraView: AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureAud var fileType = AVFileType.mov if let fileTypeOption = options["fileType"] as? String { guard let parsed = try? AVFileType(withString: fileTypeOption) else { - return callback.reject(error: .parameter(.invalid(unionName: "fileType", receivedValue: fileTypeOption))) + callback.reject(error: .parameter(.invalid(unionName: "fileType", receivedValue: fileTypeOption))) + return } fileType = parsed } @@ -32,7 +33,8 @@ extension CameraView: AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureAud let errorPointer = ErrorPointer(nilLiteral: ()) let fileExtension = fileType.descriptor ?? "mov" guard let tempFilePath = RCTTempFilePath(fileExtension, errorPointer) else { - return callback.reject(error: .capture(.createTempFileError), cause: errorPointer?.pointee) + callback.reject(error: .capture(.createTempFileError), cause: errorPointer?.pointee) + return } ReactLogger.log(level: .info, message: "File path: \(tempFilePath)") @@ -45,9 +47,11 @@ extension CameraView: AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureAud guard let videoOutput = self.videoOutput else { if self.video?.boolValue == true { - return callback.reject(error: .session(.cameraNotReady)) + callback.reject(error: .session(.cameraNotReady)) + return } else { - return callback.reject(error: .capture(.videoNotEnabled)) + callback.reject(error: .capture(.videoNotEnabled)) + return } } @@ -58,7 +62,7 @@ extension CameraView: AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureAud let enableAudio = self.audio?.boolValue == true - let onFinish = { (status: AVAssetWriter.Status, error: Error?) -> Void in + let onFinish = { (status: AVAssetWriter.Status, error: Error?) in defer { self.recordingSession = nil if enableAudio { @@ -69,16 +73,15 @@ extension CameraView: AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureAud } ReactLogger.log(level: .info, message: "RecordingSession finished with status \(status.descriptor).") if let error = error as NSError? { - let description = error.description - return callback.reject(error: .capture(.unknown(message: "An unknown recording error occured! \(description)")), cause: error) + callback.reject(error: .capture(.unknown(message: "An unknown recording error occured! \(error.description)")), cause: error) } else { if status == .completed { - return callback.resolve([ + callback.resolve([ "path": self.recordingSession!.url.absoluteString, "duration": self.recordingSession!.duration, ]) } else { - return callback.reject(error: .unknown(message: "AVAssetWriter completed with status: \(status.descriptor)")) + callback.reject(error: .unknown(message: "AVAssetWriter completed with status: \(status.descriptor)")) } } } @@ -88,13 +91,15 @@ extension CameraView: AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureAud fileType: fileType, completion: onFinish) } catch let error as NSError { - return callback.reject(error: .capture(.createRecorderError(message: nil)), cause: error) + callback.reject(error: .capture(.createRecorderError(message: nil)), cause: error) + return } // Init Video guard let videoSettings = videoOutput.recommendedVideoSettingsForAssetWriter(writingTo: fileType), !videoSettings.isEmpty else { - return callback.reject(error: .capture(.createRecorderError(message: "Failed to get video settings!"))) + callback.reject(error: .capture(.createRecorderError(message: "Failed to get video settings!"))) + return } self.recordingSession!.initializeVideoWriter(withSettings: videoSettings, isVideoMirrored: self.videoOutput!.isMirrored) @@ -172,7 +177,8 @@ extension CameraView: AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureAud // Video Recording runs in the same queue if isRecording { guard let recordingSession = recordingSession else { - return invokeOnError(.capture(.unknown(message: "isRecording was true but the RecordingSession was null!"))) + invokeOnError(.capture(.unknown(message: "isRecording was true but the RecordingSession was null!"))) + return } switch captureOutput { diff --git a/ios/CameraView+TakePhoto.swift b/ios/CameraView+TakePhoto.swift index 35e13f9..f53b957 100644 --- a/ios/CameraView+TakePhoto.swift +++ b/ios/CameraView+TakePhoto.swift @@ -28,26 +28,31 @@ extension CameraView { guard let photoOutput = self.photoOutput, let videoDeviceInput = self.videoDeviceInput else { if self.photo?.boolValue == true { - return promise.reject(error: .session(.cameraNotReady)) + promise.reject(error: .session(.cameraNotReady)) + return } else { - return promise.reject(error: .capture(.photoNotEnabled)) + promise.reject(error: .capture(.photoNotEnabled)) + return } } var photoSettings = AVCapturePhotoSettings() if let photoCodecString = options["photoCodec"] as? String { guard let photoCodec = AVVideoCodecType(withString: photoCodecString) else { - return promise.reject(error: .capture(.invalidPhotoCodec)) + promise.reject(error: .capture(.invalidPhotoCodec)) + return } if photoOutput.availablePhotoCodecTypes.contains(photoCodec) { photoSettings = AVCapturePhotoSettings(format: [AVVideoCodecKey: photoCodec]) } else { - return promise.reject(error: .parameter(.invalid(unionName: "PhotoCodec", receivedValue: photoCodecString))) + promise.reject(error: .parameter(.invalid(unionName: "PhotoCodec", receivedValue: photoCodecString))) + return } } if videoDeviceInput.device.isFlashAvailable, let flash = options["flash"] as? String { guard let flashMode = AVCaptureDevice.FlashMode(withString: flash) else { - return promise.reject(error: .parameter(.invalid(unionName: "FlashMode", receivedValue: flash))) + promise.reject(error: .parameter(.invalid(unionName: "FlashMode", receivedValue: flash))) + return } photoSettings.flashMode = flashMode } @@ -63,7 +68,8 @@ extension CameraView { } if #available(iOS 13.0, *), let qualityPrioritization = options["qualityPrioritization"] as? String { guard let photoQualityPrioritization = AVCapturePhotoOutput.QualityPrioritization(withString: qualityPrioritization) else { - return promise.reject(error: .parameter(.invalid(unionName: "QualityPrioritization", receivedValue: qualityPrioritization))) + promise.reject(error: .parameter(.invalid(unionName: "QualityPrioritization", receivedValue: qualityPrioritization))) + return } photoSettings.photoQualityPrioritization = photoQualityPrioritization } diff --git a/ios/CameraView.swift b/ios/CameraView.swift index 1229a33..ac280c3 100644 --- a/ios/CameraView.swift +++ b/ios/CameraView.swift @@ -227,10 +227,12 @@ public final class CameraView: UIView { internal final func setTorchMode(_ torchMode: String) { guard let device = videoDeviceInput?.device else { - return invokeOnError(.session(.cameraNotReady)) + invokeOnError(.session(.cameraNotReady)) + return } guard var torchMode = AVCaptureDevice.TorchMode(withString: torchMode) else { - return invokeOnError(.parameter(.invalid(unionName: "TorchMode", receivedValue: torch))) + invokeOnError(.parameter(.invalid(unionName: "TorchMode", receivedValue: torch))) + return } if !captureSession.isRunning { torchMode = .off @@ -245,7 +247,8 @@ public final class CameraView: UIView { return } else { // torch mode is .auto or .on, but no torch is available. - return invokeOnError(.device(.torchUnavailable)) + invokeOnError(.device(.torchUnavailable)) + return } } do { @@ -256,7 +259,8 @@ public final class CameraView: UIView { } device.unlockForConfiguration() } catch let error as NSError { - return invokeOnError(.device(.configureError), cause: error) + invokeOnError(.device(.configureError), cause: error) + return } } diff --git a/ios/CameraViewManager.swift b/ios/CameraViewManager.swift index 5d2ff41..3637dc9 100644 --- a/ios/CameraViewManager.swift +++ b/ios/CameraViewManager.swift @@ -70,7 +70,8 @@ final class CameraViewManager: RCTViewManager { final func focus(_ node: NSNumber, point: NSDictionary, resolve: @escaping RCTPromiseResolveBlock, reject: @escaping RCTPromiseRejectBlock) { let promise = Promise(resolver: resolve, rejecter: reject) guard let x = point["x"] as? NSNumber, let y = point["y"] as? NSNumber else { - return promise.reject(error: .parameter(.invalid(unionName: "point", receivedValue: point.description))) + promise.reject(error: .parameter(.invalid(unionName: "point", receivedValue: point.description))) + return } let component = getCameraView(withTag: node) component.focus(point: CGPoint(x: x.doubleValue, y: y.doubleValue), promise: promise) diff --git a/ios/Frame Processor/FrameProcessorRuntimeManager.mm b/ios/Frame Processor/FrameProcessorRuntimeManager.mm index 7f7f7aa..fc086b3 100644 --- a/ios/Frame Processor/FrameProcessorRuntimeManager.mm +++ b/ios/Frame Processor/FrameProcessorRuntimeManager.mm @@ -63,11 +63,11 @@ __attribute__((objc_runtime_name("_TtC12VisionCamera10CameraView"))) #ifdef ENABLE_FRAME_PROCESSORS NSLog(@"FrameProcessorBindings: Creating Runtime Manager..."); weakBridge = bridge; - + auto runtime = vision::makeJSIRuntime(); reanimated::RuntimeDecorator::decorateRuntime(*runtime, "FRAME_PROCESSOR"); runtime->global().setProperty(*runtime, "_FRAME_PROCESSOR", jsi::Value(true)); - + auto callInvoker = bridge.jsCallInvoker; auto scheduler = std::make_shared(callInvoker); runtimeManager = std::make_unique(std::move(runtime), @@ -78,7 +78,7 @@ __attribute__((objc_runtime_name("_TtC12VisionCamera10CameraView"))) NSLog(@"FrameProcessorBindings: Installing Frame Processor plugins..."); auto& visionRuntime = *runtimeManager->runtime; auto visionGlobal = visionRuntime.global(); - + for (NSString* pluginKey in [FrameProcessorPluginRegistry frameProcessorPlugins]) { auto pluginName = [pluginKey UTF8String]; @@ -89,18 +89,16 @@ __attribute__((objc_runtime_name("_TtC12VisionCamera10CameraView"))) const jsi::Value& thisValue, const jsi::Value* arguments, size_t count) -> jsi::Value { - auto frameHostObject = arguments[0].asObject(runtime).asHostObject(runtime); auto frame = static_cast(frameHostObject.get()); - + auto args = convertJSICStyleArrayToNSArray(runtime, arguments + 1, // start at index 1 since first arg = Frame count - 1, // use smaller count callInvoker); id result = callback(frame->frame, args); - + return convertObjCObjectToJSIValue(runtime, result); - }; visionGlobal.setProperty(visionRuntime, pluginName, jsi::Function::createFromHostFunction(visionRuntime, @@ -108,9 +106,9 @@ __attribute__((objc_runtime_name("_TtC12VisionCamera10CameraView"))) 1, // frame function)); } - + [FrameProcessorPluginRegistry markInvalid]; - + NSLog(@"FrameProcessorBindings: Frame Processor plugins installed!"); #else NSLog(@"Reanimated not found, Frame Processors are disabled."); @@ -125,13 +123,13 @@ __attribute__((objc_runtime_name("_TtC12VisionCamera10CameraView"))) NSLog(@"FrameProcessorBindings: Failed to install Frame Processor Bindings - bridge was null!"); return; } - + NSLog(@"FrameProcessorBindings: Installing Frame Processor Bindings for Bridge..."); RCTCxxBridge *cxxBridge = (RCTCxxBridge *)weakBridge; if (!cxxBridge.runtime) { return; } - + jsi::Runtime& jsiRuntime = *(jsi::Runtime*)cxxBridge.runtime; NSLog(@"FrameProcessorBindings: Installing global functions..."); @@ -150,16 +148,16 @@ __attribute__((objc_runtime_name("_TtC12VisionCamera10CameraView"))) auto worklet = reanimated::ShareableValue::adapt(runtime, arguments[1], runtimeManager.get()); NSLog(@"FrameProcessorBindings: Successfully created worklet!"); - RCTExecuteOnMainQueue([worklet, viewTag, self]() -> void { + RCTExecuteOnMainQueue([worklet, viewTag, self]() { auto currentBridge = [RCTBridge currentBridge]; auto anonymousView = [currentBridge.uiManager viewForReactTag:[NSNumber numberWithDouble:viewTag]]; auto view = static_cast(anonymousView); - dispatch_async(CameraQueues.videoQueue, [worklet, view, self]() -> void { + dispatch_async(CameraQueues.videoQueue, [worklet, view, self]() { NSLog(@"FrameProcessorBindings: Converting worklet to Objective-C callback..."); auto& rt = *runtimeManager->runtime; auto function = worklet->getValue(rt).asObject(rt).asFunction(rt); - + view.frameProcessorCallback = convertJSIFunctionToFrameProcessorCallback(rt, function); NSLog(@"FrameProcessorBindings: Frame processor set!"); }); @@ -184,10 +182,10 @@ __attribute__((objc_runtime_name("_TtC12VisionCamera10CameraView"))) RCTExecuteOnMainQueue(^{ auto currentBridge = [RCTBridge currentBridge]; if (!currentBridge) return; - + auto anonymousView = [currentBridge.uiManager viewForReactTag:[NSNumber numberWithDouble:viewTag]]; if (!anonymousView) return; - + auto view = static_cast(anonymousView); view.frameProcessorCallback = nil; NSLog(@"FrameProcessorBindings: Frame processor removed!"); diff --git a/ios/PhotoCaptureDelegate.swift b/ios/PhotoCaptureDelegate.swift index 0c3db90..55fa85a 100644 --- a/ios/PhotoCaptureDelegate.swift +++ b/ios/PhotoCaptureDelegate.swift @@ -13,6 +13,8 @@ private var delegatesReferences: [NSObject] = [] // MARK: - PhotoCaptureDelegate class PhotoCaptureDelegate: NSObject, AVCapturePhotoCaptureDelegate { + private let promise: Promise + required init(promise: Promise) { self.promise = promise super.init() @@ -24,19 +26,21 @@ class PhotoCaptureDelegate: NSObject, AVCapturePhotoCaptureDelegate { delegatesReferences.removeAll(where: { $0 == self }) } if let error = error as NSError? { - return promise.reject(error: .capture(.unknown(message: error.description)), cause: error) + promise.reject(error: .capture(.unknown(message: error.description)), cause: error) + return } let error = ErrorPointer(nilLiteral: ()) guard let tempFilePath = RCTTempFilePath("jpeg", error) else { - return promise.reject(error: .capture(.createTempFileError), cause: error?.pointee) + promise.reject(error: .capture(.createTempFileError), cause: error?.pointee) + return } let url = URL(string: "file://\(tempFilePath)")! - guard let data = photo.fileDataRepresentation() - else { - return promise.reject(error: .capture(.fileError)) + guard let data = photo.fileDataRepresentation() else { + promise.reject(error: .capture(.fileError)) + return } do { @@ -45,7 +49,7 @@ class PhotoCaptureDelegate: NSObject, AVCapturePhotoCaptureDelegate { let width = exif?["PixelXDimension"] let height = exif?["PixelYDimension"] - return promise.resolve([ + promise.resolve([ "path": tempFilePath, "width": width as Any, "height": height as Any, @@ -54,7 +58,7 @@ class PhotoCaptureDelegate: NSObject, AVCapturePhotoCaptureDelegate { "thumbnail": photo.embeddedThumbnailPhotoFormat as Any, ]) } catch { - return promise.reject(error: .capture(.fileError), cause: error as NSError) + promise.reject(error: .capture(.fileError), cause: error as NSError) } } @@ -63,11 +67,8 @@ class PhotoCaptureDelegate: NSObject, AVCapturePhotoCaptureDelegate { delegatesReferences.removeAll(where: { $0 == self }) } if let error = error as NSError? { - return promise.reject(error: .capture(.unknown(message: error.description)), cause: error) + promise.reject(error: .capture(.unknown(message: error.description)), cause: error) + return } } - - // MARK: Private - - private let promise: Promise } diff --git a/ios/React Utils/JSIUtils.mm b/ios/React Utils/JSIUtils.mm index 21d643f..ccd0476 100644 --- a/ios/React Utils/JSIUtils.mm +++ b/ios/React Utils/JSIUtils.mm @@ -81,7 +81,7 @@ NSString *convertJSIStringToNSString(jsi::Runtime &runtime, const jsi::String &v } NSArray* convertJSICStyleArrayToNSArray(jsi::Runtime &runtime, const jsi::Value* array, size_t length, std::shared_ptr jsInvoker) { - if (length == 0) return @[]; + if (length < 1) return @[]; NSMutableArray *result = [NSMutableArray new]; for (size_t i = 0; i < length; i++) { // Insert kCFNull when it's `undefined` value to preserve the indices.