2023-10-13 10:33:20 -06:00
|
|
|
//
|
|
|
|
// CameraSession+Configuration.swift
|
|
|
|
// VisionCamera
|
|
|
|
//
|
|
|
|
// Created by Marc Rousavy on 12.10.23.
|
|
|
|
// Copyright © 2023 mrousavy. All rights reserved.
|
|
|
|
//
|
|
|
|
|
|
|
|
import AVFoundation
|
|
|
|
import Foundation
|
|
|
|
|
|
|
|
extension CameraSession {
|
|
|
|
// pragma MARK: Input Device
|
|
|
|
|
|
|
|
/**
|
|
|
|
Configures the Input Device (`cameraId`)
|
|
|
|
*/
|
|
|
|
func configureDevice(configuration: CameraConfiguration) throws {
|
|
|
|
ReactLogger.log(level: .info, message: "Configuring Input Device...")
|
|
|
|
|
|
|
|
// Remove all inputs
|
|
|
|
captureSession.inputs.forEach { input in
|
|
|
|
captureSession.removeInput(input)
|
|
|
|
}
|
|
|
|
videoDeviceInput = nil
|
|
|
|
|
|
|
|
#if targetEnvironment(simulator)
|
|
|
|
// iOS Simulators don't have Cameras
|
|
|
|
throw CameraError.device(.notAvailableOnSimulator)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
guard let cameraId = configuration.cameraId else {
|
|
|
|
throw CameraError.device(.noDevice)
|
|
|
|
}
|
|
|
|
|
|
|
|
ReactLogger.log(level: .info, message: "Configuring Camera \(cameraId)...")
|
|
|
|
// Video Input (Camera Device/Sensor)
|
|
|
|
guard let videoDevice = AVCaptureDevice(uniqueID: cameraId) else {
|
|
|
|
throw CameraError.device(.invalid)
|
|
|
|
}
|
|
|
|
let input = try AVCaptureDeviceInput(device: videoDevice)
|
|
|
|
guard captureSession.canAddInput(input) else {
|
|
|
|
throw CameraError.parameter(.unsupportedInput(inputDescriptor: "video-input"))
|
|
|
|
}
|
|
|
|
captureSession.addInput(input)
|
|
|
|
videoDeviceInput = input
|
|
|
|
|
|
|
|
ReactLogger.log(level: .info, message: "Successfully configured Input Device!")
|
|
|
|
}
|
|
|
|
|
|
|
|
// pragma MARK: Outputs
|
|
|
|
|
|
|
|
/**
|
|
|
|
Configures all outputs (`photo` + `video` + `codeScanner`)
|
|
|
|
*/
|
|
|
|
func configureOutputs(configuration: CameraConfiguration) throws {
|
|
|
|
ReactLogger.log(level: .info, message: "Configuring Outputs...")
|
|
|
|
|
|
|
|
// Remove all outputs
|
|
|
|
captureSession.outputs.forEach { output in
|
|
|
|
captureSession.removeOutput(output)
|
|
|
|
}
|
|
|
|
photoOutput = nil
|
|
|
|
videoOutput = nil
|
|
|
|
codeScannerOutput = nil
|
|
|
|
|
|
|
|
// Photo Output
|
|
|
|
if case let .enabled(photo) = configuration.photo {
|
|
|
|
ReactLogger.log(level: .info, message: "Adding Photo output...")
|
2023-10-17 03:55:11 -06:00
|
|
|
|
|
|
|
// 1. Add
|
2023-10-13 10:33:20 -06:00
|
|
|
let photoOutput = AVCapturePhotoOutput()
|
2023-10-17 03:55:11 -06:00
|
|
|
guard captureSession.canAddOutput(photoOutput) else {
|
|
|
|
throw CameraError.parameter(.unsupportedOutput(outputDescriptor: "photo-output"))
|
|
|
|
}
|
|
|
|
captureSession.addOutput(photoOutput)
|
2023-10-13 10:33:20 -06:00
|
|
|
|
2023-10-17 03:55:11 -06:00
|
|
|
// 2. Configure
|
2023-10-13 10:33:20 -06:00
|
|
|
if photo.enableHighQualityPhotos {
|
|
|
|
// TODO: In iOS 16 this will be removed in favor of maxPhotoDimensions.
|
|
|
|
photoOutput.isHighResolutionCaptureEnabled = true
|
|
|
|
if #available(iOS 13.0, *) {
|
|
|
|
// TODO: Test if this actually does any fusion or if this just calls the captureOutput twice. If the latter, remove it.
|
|
|
|
photoOutput.isVirtualDeviceConstituentPhotoDeliveryEnabled = photoOutput.isVirtualDeviceConstituentPhotoDeliverySupported
|
|
|
|
photoOutput.maxPhotoQualityPrioritization = .quality
|
|
|
|
} else {
|
|
|
|
photoOutput.isDualCameraDualPhotoDeliveryEnabled = photoOutput.isDualCameraDualPhotoDeliverySupported
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// TODO: Enable isResponsiveCaptureEnabled? (iOS 17+)
|
|
|
|
// TODO: Enable isFastCapturePrioritizationEnabled? (iOS 17+)
|
|
|
|
if photo.enableDepthData {
|
|
|
|
photoOutput.isDepthDataDeliveryEnabled = photoOutput.isDepthDataDeliverySupported
|
|
|
|
}
|
|
|
|
if #available(iOS 12.0, *), photo.enablePortraitEffectsMatte {
|
|
|
|
photoOutput.isPortraitEffectsMatteDeliveryEnabled = photoOutput.isPortraitEffectsMatteDeliverySupported
|
|
|
|
}
|
|
|
|
|
|
|
|
self.photoOutput = photoOutput
|
|
|
|
}
|
|
|
|
|
|
|
|
// Video Output + Frame Processor
|
|
|
|
if case let .enabled(video) = configuration.video {
|
|
|
|
ReactLogger.log(level: .info, message: "Adding Video Data output...")
|
2023-10-17 03:55:11 -06:00
|
|
|
|
|
|
|
// 1. Add
|
2023-10-13 10:33:20 -06:00
|
|
|
let videoOutput = AVCaptureVideoDataOutput()
|
2023-10-17 03:55:11 -06:00
|
|
|
guard captureSession.canAddOutput(videoOutput) else {
|
|
|
|
throw CameraError.parameter(.unsupportedOutput(outputDescriptor: "video-output"))
|
|
|
|
}
|
|
|
|
captureSession.addOutput(videoOutput)
|
2023-10-13 10:33:20 -06:00
|
|
|
|
2023-10-17 03:55:11 -06:00
|
|
|
// 2. Configure
|
2023-10-13 10:33:20 -06:00
|
|
|
videoOutput.setSampleBufferDelegate(self, queue: CameraQueues.videoQueue)
|
|
|
|
videoOutput.alwaysDiscardsLateVideoFrames = true
|
|
|
|
let pixelFormatType = try video.getPixelFormat(for: videoOutput)
|
|
|
|
videoOutput.videoSettings = [
|
|
|
|
String(kCVPixelBufferPixelFormatTypeKey): pixelFormatType,
|
|
|
|
]
|
|
|
|
|
|
|
|
self.videoOutput = videoOutput
|
|
|
|
}
|
|
|
|
|
|
|
|
// Code Scanner
|
|
|
|
if case let .enabled(codeScanner) = configuration.codeScanner {
|
|
|
|
ReactLogger.log(level: .info, message: "Adding Code Scanner output...")
|
|
|
|
let codeScannerOutput = AVCaptureMetadataOutput()
|
|
|
|
|
2023-10-17 03:55:11 -06:00
|
|
|
// 1. Add
|
|
|
|
guard captureSession.canAddOutput(codeScannerOutput) else {
|
|
|
|
throw CameraError.codeScanner(.notCompatibleWithOutputs)
|
|
|
|
}
|
|
|
|
captureSession.addOutput(codeScannerOutput)
|
|
|
|
|
|
|
|
// 2. Configure
|
2023-10-16 08:56:39 -06:00
|
|
|
let options = codeScanner.options
|
2023-10-17 03:55:11 -06:00
|
|
|
codeScannerOutput.setMetadataObjectsDelegate(self, queue: CameraQueues.codeScannerQueue)
|
2023-10-16 08:56:39 -06:00
|
|
|
try codeScanner.options.codeTypes.forEach { type in
|
2023-10-17 03:55:11 -06:00
|
|
|
// CodeScanner::availableMetadataObjectTypes depends on the connection to the
|
|
|
|
// AVCaptureSession, so this list is only available after we add the output to the session.
|
2023-10-13 10:33:20 -06:00
|
|
|
if !codeScannerOutput.availableMetadataObjectTypes.contains(type) {
|
|
|
|
throw CameraError.codeScanner(.codeTypeNotSupported(codeType: type.descriptor))
|
|
|
|
}
|
|
|
|
}
|
2023-10-16 08:56:39 -06:00
|
|
|
codeScannerOutput.metadataObjectTypes = options.codeTypes
|
|
|
|
if let rectOfInterest = options.regionOfInterest {
|
2023-10-13 10:33:20 -06:00
|
|
|
codeScannerOutput.rectOfInterest = rectOfInterest
|
|
|
|
}
|
|
|
|
|
|
|
|
self.codeScannerOutput = codeScannerOutput
|
|
|
|
}
|
|
|
|
|
|
|
|
// Done!
|
|
|
|
ReactLogger.log(level: .info, message: "Successfully configured all outputs!")
|
|
|
|
}
|
|
|
|
|
2023-11-15 09:00:41 -07:00
|
|
|
// pragma MARK: Video Stabilization
|
|
|
|
func configureVideoStabilization(configuration: CameraConfiguration) {
|
|
|
|
captureSession.outputs.forEach { output in
|
|
|
|
output.connections.forEach { connection in
|
|
|
|
if connection.isVideoStabilizationSupported {
|
|
|
|
connection.preferredVideoStabilizationMode = configuration.videoStabilizationMode.toAVCaptureVideoStabilizationMode()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-13 10:33:20 -06:00
|
|
|
// pragma MARK: Orientation
|
|
|
|
|
|
|
|
func configureOrientation(configuration: CameraConfiguration) {
|
|
|
|
// Set up orientation and mirroring for all outputs.
|
|
|
|
// Note: Photos are only rotated through EXIF tags, and Preview through view transforms
|
|
|
|
let isMirrored = videoDeviceInput?.device.position == .front
|
|
|
|
captureSession.outputs.forEach { output in
|
|
|
|
if isMirrored {
|
|
|
|
output.mirror()
|
|
|
|
}
|
|
|
|
output.setOrientation(configuration.orientation)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// pragma MARK: Format
|
|
|
|
|
|
|
|
/**
|
|
|
|
Configures the active format (`format`)
|
|
|
|
*/
|
2023-10-18 10:04:58 -06:00
|
|
|
func configureFormat(configuration: CameraConfiguration, device: AVCaptureDevice) throws {
|
2023-10-13 10:33:20 -06:00
|
|
|
guard let targetFormat = configuration.format else {
|
|
|
|
// No format was set, just use the default.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
ReactLogger.log(level: .info, message: "Configuring Format (\(targetFormat))...")
|
|
|
|
|
|
|
|
let currentFormat = CameraDeviceFormat(fromFormat: device.activeFormat)
|
|
|
|
if currentFormat == targetFormat {
|
|
|
|
ReactLogger.log(level: .info, message: "Already selected active format, no need to configure.")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Find matching format (JS Dictionary -> strongly typed Swift class)
|
|
|
|
let format = device.formats.first { targetFormat.isEqualTo(format: $0) }
|
|
|
|
guard let format else {
|
|
|
|
throw CameraError.format(.invalidFormat)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set new device Format
|
|
|
|
device.activeFormat = format
|
|
|
|
|
|
|
|
ReactLogger.log(level: .info, message: "Successfully configured Format!")
|
|
|
|
}
|
|
|
|
|
|
|
|
// pragma MARK: Side-Props
|
|
|
|
|
|
|
|
/**
|
2023-10-18 10:04:58 -06:00
|
|
|
Configures format-dependant "side-props" (`fps`, `lowLightBoost`)
|
2023-10-13 10:33:20 -06:00
|
|
|
*/
|
2023-10-18 10:04:58 -06:00
|
|
|
func configureSideProps(configuration: CameraConfiguration, device: AVCaptureDevice) throws {
|
2023-10-13 10:33:20 -06:00
|
|
|
// Configure FPS
|
|
|
|
if let fps = configuration.fps {
|
|
|
|
let supportsGivenFps = device.activeFormat.videoSupportedFrameRateRanges.contains { range in
|
|
|
|
return range.includes(fps: Double(fps))
|
|
|
|
}
|
|
|
|
if !supportsGivenFps {
|
|
|
|
throw CameraError.format(.invalidFps(fps: Int(fps)))
|
|
|
|
}
|
|
|
|
|
|
|
|
let duration = CMTimeMake(value: 1, timescale: fps)
|
|
|
|
device.activeVideoMinFrameDuration = duration
|
|
|
|
device.activeVideoMaxFrameDuration = duration
|
|
|
|
} else {
|
|
|
|
device.activeVideoMinFrameDuration = CMTime.invalid
|
|
|
|
device.activeVideoMaxFrameDuration = CMTime.invalid
|
|
|
|
}
|
|
|
|
|
|
|
|
// Configure Low-Light-Boost
|
2023-10-13 10:44:44 -06:00
|
|
|
if device.automaticallyEnablesLowLightBoostWhenAvailable != configuration.enableLowLightBoost {
|
|
|
|
guard device.isLowLightBoostSupported else {
|
2023-10-13 10:33:20 -06:00
|
|
|
throw CameraError.device(.lowLightBoostNotSupported)
|
|
|
|
}
|
|
|
|
device.automaticallyEnablesLowLightBoostWhenAvailable = configuration.enableLowLightBoost
|
|
|
|
}
|
2023-10-18 10:04:58 -06:00
|
|
|
}
|
2023-10-13 10:33:20 -06:00
|
|
|
|
2023-10-18 10:04:58 -06:00
|
|
|
/**
|
|
|
|
Configures the torch.
|
|
|
|
The CaptureSession has to be running for the Torch to work.
|
|
|
|
*/
|
|
|
|
func configureTorch(configuration: CameraConfiguration, device: AVCaptureDevice) throws {
|
2023-10-13 10:33:20 -06:00
|
|
|
// Configure Torch
|
2023-10-13 10:44:44 -06:00
|
|
|
let torchMode = configuration.torch.toTorchMode()
|
|
|
|
if device.torchMode != torchMode {
|
2023-10-13 10:33:20 -06:00
|
|
|
guard device.hasTorch else {
|
|
|
|
throw CameraError.device(.flashUnavailable)
|
|
|
|
}
|
2023-10-18 10:04:58 -06:00
|
|
|
|
2023-10-13 10:44:44 -06:00
|
|
|
device.torchMode = torchMode
|
|
|
|
if torchMode == .on {
|
|
|
|
try device.setTorchModeOn(level: 1.0)
|
|
|
|
}
|
2023-10-13 10:33:20 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// pragma MARK: Zoom
|
|
|
|
|
|
|
|
/**
|
|
|
|
Configures zoom (`zoom`)
|
|
|
|
*/
|
2023-10-18 10:04:58 -06:00
|
|
|
func configureZoom(configuration: CameraConfiguration, device: AVCaptureDevice) {
|
2023-10-13 10:33:20 -06:00
|
|
|
guard let zoom = configuration.zoom else {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
let clamped = max(min(zoom, device.activeFormat.videoMaxZoomFactor), device.minAvailableVideoZoomFactor)
|
|
|
|
device.videoZoomFactor = clamped
|
|
|
|
}
|
|
|
|
|
|
|
|
// pragma MARK: Audio
|
|
|
|
|
|
|
|
/**
|
|
|
|
Configures the Audio Capture Session with an audio input and audio data output.
|
|
|
|
*/
|
|
|
|
func configureAudioSession(configuration: CameraConfiguration) throws {
|
|
|
|
ReactLogger.log(level: .info, message: "Configuring Audio Session...")
|
|
|
|
|
|
|
|
// Prevent iOS from automatically configuring the Audio Session for us
|
|
|
|
audioCaptureSession.automaticallyConfiguresApplicationAudioSession = false
|
|
|
|
let enableAudio = configuration.audio != .disabled
|
|
|
|
|
|
|
|
// Check microphone permission
|
|
|
|
if enableAudio {
|
|
|
|
let audioPermissionStatus = AVCaptureDevice.authorizationStatus(for: .audio)
|
|
|
|
if audioPermissionStatus != .authorized {
|
|
|
|
throw CameraError.permission(.microphone)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove all current inputs
|
|
|
|
audioCaptureSession.inputs.forEach { input in
|
|
|
|
audioCaptureSession.removeInput(input)
|
|
|
|
}
|
|
|
|
audioDeviceInput = nil
|
|
|
|
|
|
|
|
// Audio Input (Microphone)
|
|
|
|
if enableAudio {
|
|
|
|
ReactLogger.log(level: .info, message: "Adding Audio input...")
|
|
|
|
guard let microphone = AVCaptureDevice.default(for: .audio) else {
|
|
|
|
throw CameraError.device(.microphoneUnavailable)
|
|
|
|
}
|
|
|
|
let input = try AVCaptureDeviceInput(device: microphone)
|
|
|
|
guard audioCaptureSession.canAddInput(input) else {
|
|
|
|
throw CameraError.parameter(.unsupportedInput(inputDescriptor: "audio-input"))
|
|
|
|
}
|
|
|
|
audioCaptureSession.addInput(input)
|
|
|
|
audioDeviceInput = input
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove all current outputs
|
|
|
|
audioCaptureSession.outputs.forEach { output in
|
|
|
|
audioCaptureSession.removeOutput(output)
|
|
|
|
}
|
|
|
|
audioOutput = nil
|
|
|
|
|
|
|
|
// Audio Output
|
|
|
|
if enableAudio {
|
|
|
|
ReactLogger.log(level: .info, message: "Adding Audio Data output...")
|
|
|
|
let output = AVCaptureAudioDataOutput()
|
|
|
|
guard audioCaptureSession.canAddOutput(output) else {
|
|
|
|
throw CameraError.parameter(.unsupportedOutput(outputDescriptor: "audio-output"))
|
|
|
|
}
|
|
|
|
output.setSampleBufferDelegate(self, queue: CameraQueues.audioQueue)
|
|
|
|
audioCaptureSession.addOutput(output)
|
|
|
|
audioOutput = output
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|