react-native-vision-camera/ios/CameraView+AVCaptureSession.swift
Marc Rousavy 375e894038
feat: Complete iOS Codebase rewrite (#1647)
* Make Frame Processors an extra subspec

* Update VisionCamera.podspec

* Make optional

* Make VisionCamera compile without Skia

* Fix

* Add skia again

* Update VisionCamera.podspec

* Make VisionCamera build without Frame Processors

* Rename error to `system/frame-processors-unavailable`

* Fix Frame Processor returning early

* Remove `preset`, FP partial rewrite

* Only warn on frame drop

* Fix wrong queue

* fix: Run on CameraQueue again

* Update CameraView.swift

* fix: Activate audio session asynchronously on audio queue

* Update CameraView+RecordVideo.swift

* Update PreviewView.h

* Cleanups

* Cleanup

* fix cast

* feat: Add LiDAR Depth Camera support

* Upgrade Ruby

* Add vector icons type

* Update Gemfile.lock

* fix: Stop queues on deinit

* Also load `builtInTrueDepthCamera`

* Update CameraViewManager.swift

* Update SkImageHelpers.mm

* Extract FrameProcessorCallback to FrameProcessor

Holds more context now :)

* Rename to .m

* fix: Add `RCTLog` import

* Create SkiaFrameProcessor

* Update CameraBridge.h

* Call Frame Processor

* Fix defines

* fix: Allow deleting callback funcs

* fix Skia build

* batch

* Just call `setSkiaFrameProcessor`

* Rewrite in Swift

* Pass `SkiaRenderer`

* Fix Import

* Move `PreviewView` to Swift

* Fix Layer

* Set Skia Canvas to Frame Host Object

* Make `DrawableFrameHostObject` subclass

* Fix TS types

* Use same MTLDevice and apply scale

* Make getter

* Extract `setTorch` and `Preview`

* fix: Fix nil metal device

* Don't wait for session stop in deinit

* Use main pixel ratio

* Use unique_ptr for Render Contexts

* fix: Fix SkiaPreviewDisplayLink broken after deinit

* inline `getTextureCache`

* Update CameraPage.tsx

* chore: Format iOS

* perf: Allow MTLLayer to be optimized for only frame buffers

* Add RN Video types

* fix: Fix Frame Processors if guard

* Find nodeModules recursively

* Create `Frame.isDrawable`

* Add `cocoapods-check` dependency
2023-07-20 15:30:04 +02:00

260 lines
8.6 KiB
Swift

//
// CameraView+AVCaptureSession.swift
// VisionCamera
//
// Created by Marc Rousavy on 26.03.21.
// Copyright © 2021 mrousavy. All rights reserved.
//
import AVFoundation
import Foundation
/**
Extension for CameraView that sets up the AVCaptureSession, Device and Format.
*/
extension CameraView {
// pragma MARK: Configure Capture Session
/**
Configures the Capture Session.
*/
final func configureCaptureSession() {
ReactLogger.log(level: .info, message: "Configuring Session...")
isReady = false
#if targetEnvironment(simulator)
invokeOnError(.device(.notAvailableOnSimulator))
return
#endif
guard cameraId != nil else {
invokeOnError(.device(.noDevice))
return
}
let cameraId = self.cameraId! as String
ReactLogger.log(level: .info, message: "Initializing Camera with device \(cameraId)...")
captureSession.beginConfiguration()
defer {
captureSession.commitConfiguration()
}
// pragma MARK: Capture Session Inputs
// Video Input
do {
if let videoDeviceInput = videoDeviceInput {
captureSession.removeInput(videoDeviceInput)
self.videoDeviceInput = nil
}
ReactLogger.log(level: .info, message: "Adding Video input...")
guard let videoDevice = AVCaptureDevice(uniqueID: cameraId) else {
invokeOnError(.device(.invalid))
return
}
videoDeviceInput = try AVCaptureDeviceInput(device: videoDevice)
guard captureSession.canAddInput(videoDeviceInput!) else {
invokeOnError(.parameter(.unsupportedInput(inputDescriptor: "video-input")))
return
}
captureSession.addInput(videoDeviceInput!)
} catch {
invokeOnError(.device(.invalid))
return
}
// pragma MARK: Capture Session Outputs
// Photo Output
if let photoOutput = photoOutput {
captureSession.removeOutput(photoOutput)
self.photoOutput = nil
}
if photo?.boolValue == true {
ReactLogger.log(level: .info, message: "Adding Photo output...")
photoOutput = AVCapturePhotoOutput()
if enableHighQualityPhotos?.boolValue == true {
photoOutput!.isHighResolutionCaptureEnabled = true
if #available(iOS 13.0, *) {
photoOutput!.isVirtualDeviceConstituentPhotoDeliveryEnabled = photoOutput!.isVirtualDeviceConstituentPhotoDeliverySupported
photoOutput!.maxPhotoQualityPrioritization = .quality
} else {
photoOutput!.isDualCameraDualPhotoDeliveryEnabled = photoOutput!.isDualCameraDualPhotoDeliverySupported
}
}
if enableDepthData {
photoOutput!.isDepthDataDeliveryEnabled = photoOutput!.isDepthDataDeliverySupported
}
if #available(iOS 12.0, *), enablePortraitEffectsMatteDelivery {
photoOutput!.isPortraitEffectsMatteDeliveryEnabled = photoOutput!.isPortraitEffectsMatteDeliverySupported
}
guard captureSession.canAddOutput(photoOutput!) else {
invokeOnError(.parameter(.unsupportedOutput(outputDescriptor: "photo-output")))
return
}
captureSession.addOutput(photoOutput!)
if videoDeviceInput!.device.position == .front {
photoOutput!.mirror()
}
}
// Video Output + Frame Processor
if let videoOutput = videoOutput {
captureSession.removeOutput(videoOutput)
self.videoOutput = nil
}
if video?.boolValue == true || enableFrameProcessor {
ReactLogger.log(level: .info, message: "Adding Video Data output...")
videoOutput = AVCaptureVideoDataOutput()
guard captureSession.canAddOutput(videoOutput!) else {
invokeOnError(.parameter(.unsupportedOutput(outputDescriptor: "video-output")))
return
}
videoOutput!.setSampleBufferDelegate(self, queue: CameraQueues.videoQueue)
videoOutput!.alwaysDiscardsLateVideoFrames = false
if previewType == "skia" {
// If the PreviewView is a Skia view, we need to use the RGB format since Skia works in the RGB colorspace instead of YUV.
// This does introduce a performance overhead, but it's inevitable since Skia would internally convert
// YUV frames to RGB anyways since all Shaders and draw operations operate in the RGB space.
videoOutput!.videoSettings = [
String(kCVPixelBufferPixelFormatTypeKey): kCVPixelFormatType_32BGRA, // default: kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange
]
}
captureSession.addOutput(videoOutput!)
}
onOrientationChanged()
invokeOnInitialized()
isReady = true
ReactLogger.log(level: .info, message: "Session successfully configured!")
}
// pragma MARK: Configure Device
/**
Configures the Video Device with the given FPS, HDR and ColorSpace.
*/
final func configureDevice() {
ReactLogger.log(level: .info, message: "Configuring Device...")
guard let device = videoDeviceInput?.device else {
invokeOnError(.session(.cameraNotReady))
return
}
do {
try device.lockForConfiguration()
if let fps = fps?.int32Value {
let supportsGivenFps = device.activeFormat.videoSupportedFrameRateRanges.contains { range in
return range.includes(fps: Double(fps))
}
if !supportsGivenFps {
invokeOnError(.format(.invalidFps(fps: Int(fps))))
return
}
let duration = CMTimeMake(value: 1, timescale: fps)
device.activeVideoMinFrameDuration = duration
device.activeVideoMaxFrameDuration = duration
} else {
device.activeVideoMinFrameDuration = CMTime.invalid
device.activeVideoMaxFrameDuration = CMTime.invalid
}
if hdr != nil {
if hdr == true && !device.activeFormat.isVideoHDRSupported {
invokeOnError(.format(.invalidHdr))
return
}
if !device.automaticallyAdjustsVideoHDREnabled {
if device.isVideoHDREnabled != hdr!.boolValue {
device.isVideoHDREnabled = hdr!.boolValue
}
}
}
if lowLightBoost != nil {
if lowLightBoost == true && !device.isLowLightBoostSupported {
invokeOnError(.device(.lowLightBoostNotSupported))
return
}
if device.automaticallyEnablesLowLightBoostWhenAvailable != lowLightBoost!.boolValue {
device.automaticallyEnablesLowLightBoostWhenAvailable = lowLightBoost!.boolValue
}
}
if let colorSpace = colorSpace as String? {
guard let avColorSpace = try? AVCaptureColorSpace(string: colorSpace),
device.activeFormat.supportedColorSpaces.contains(avColorSpace) else {
invokeOnError(.format(.invalidColorSpace(colorSpace: colorSpace)))
return
}
device.activeColorSpace = avColorSpace
}
device.unlockForConfiguration()
ReactLogger.log(level: .info, message: "Device successfully configured!")
} catch let error as NSError {
invokeOnError(.device(.configureError), cause: error)
return
}
}
// pragma MARK: Configure Format
/**
Configures the Video Device to find the best matching Format.
*/
final func configureFormat() {
ReactLogger.log(level: .info, message: "Configuring Format...")
guard let filter = format else {
// Format Filter was null. Ignore it.
return
}
guard let device = videoDeviceInput?.device else {
invokeOnError(.session(.cameraNotReady))
return
}
if device.activeFormat.matchesFilter(filter) {
ReactLogger.log(level: .info, message: "Active format already matches filter.")
return
}
// get matching format
let matchingFormats = device.formats.filter { $0.matchesFilter(filter) }.sorted { $0.isBetterThan($1) }
guard let format = matchingFormats.first else {
invokeOnError(.format(.invalidFormat))
return
}
do {
try device.lockForConfiguration()
device.activeFormat = format
device.unlockForConfiguration()
ReactLogger.log(level: .info, message: "Format successfully configured!")
} catch let error as NSError {
invokeOnError(.device(.configureError), cause: error)
return
}
}
// pragma MARK: Notifications/Interruptions
@objc
func sessionRuntimeError(notification: Notification) {
ReactLogger.log(level: .error, message: "Unexpected Camera Runtime Error occured!")
guard let error = notification.userInfo?[AVCaptureSessionErrorKey] as? AVError else {
return
}
invokeOnError(.unknown(message: error._nsError.description), cause: error._nsError)
if isActive {
// restart capture session after an error occured
CameraQueues.cameraQueue.async {
self.captureSession.startRunning()
}
}
}
}