feat: New Core/
library (#1975)
Moves everything Camera related into `core/` / `Core/` so that it is better encapsulated from React Native. Benefits: 1. Code is much better organized. Should be easier for collaborators now, and cleaner codebase for me. 2. Locking is fully atomically as you can now only configure the session through a lock/Mutex which is batch-overridable * On iOS, this makes Camera startup time **MUCH** faster, I measured speedups from **1.5 seconds** to only **240 milliseconds** since we only lock/commit once! 🚀 * On Android, this fixes a few out-of-sync/concurrency issues like "Capture Request contains unconfigured Input/Output Surface!" since it is now a single lock-operation! 💪 3. It is easier to integrate VisionCamera outside of React Native (e.g. Native iOS Apps, NativeScript, Flutter, etc) With this PR, VisionCamera V3 is up to **7x** faster than V2
This commit is contained in:
231
package/ios/Core/CameraConfiguration.swift
Normal file
231
package/ios/Core/CameraConfiguration.swift
Normal file
@@ -0,0 +1,231 @@
|
||||
//
|
||||
// CameraConfiguration.swift
|
||||
// VisionCamera
|
||||
//
|
||||
// Created by Marc Rousavy on 11.10.23.
|
||||
// Copyright © 2023 mrousavy. All rights reserved.
|
||||
//
|
||||
|
||||
import AVFoundation
|
||||
import Foundation
|
||||
|
||||
// MARK: - CameraConfiguration
|
||||
|
||||
class CameraConfiguration {
|
||||
// pragma MARK: Configuration Props
|
||||
|
||||
// Input
|
||||
var cameraId: String?
|
||||
|
||||
// Outputs
|
||||
var photo: OutputConfiguration<Photo> = .disabled
|
||||
var video: OutputConfiguration<Video> = .disabled
|
||||
var codeScanner: OutputConfiguration<CodeScanner> = .disabled
|
||||
|
||||
// Orientation
|
||||
var orientation: Orientation = .portrait
|
||||
|
||||
// Format
|
||||
var format: CameraDeviceFormat?
|
||||
|
||||
// Side-Props
|
||||
var fps: Int32?
|
||||
var enableLowLightBoost = false
|
||||
var torch: Torch = .off
|
||||
|
||||
// Zoom
|
||||
var zoom: CGFloat?
|
||||
|
||||
// isActive (Start/Stop)
|
||||
var isActive = false
|
||||
|
||||
// Audio Session
|
||||
var audio: OutputConfiguration<Audio> = .disabled
|
||||
|
||||
init(copyOf other: CameraConfiguration?) {
|
||||
if let other {
|
||||
// copy over all values
|
||||
cameraId = other.cameraId
|
||||
photo = other.photo
|
||||
video = other.video
|
||||
codeScanner = other.codeScanner
|
||||
orientation = other.orientation
|
||||
format = other.format
|
||||
fps = other.fps
|
||||
enableLowLightBoost = other.enableLowLightBoost
|
||||
torch = other.torch
|
||||
zoom = other.zoom
|
||||
isActive = other.isActive
|
||||
audio = other.audio
|
||||
} else {
|
||||
// self will just be initialized with the default values.
|
||||
}
|
||||
}
|
||||
|
||||
// pragma MARK: Types
|
||||
|
||||
struct Difference {
|
||||
let inputChanged: Bool
|
||||
let outputsChanged: Bool
|
||||
let orientationChanged: Bool
|
||||
let formatChanged: Bool
|
||||
let sidePropsChanged: Bool
|
||||
let zoomChanged: Bool
|
||||
|
||||
let audioSessionChanged: Bool
|
||||
|
||||
/**
|
||||
Returns `true` when props that affect the AVCaptureSession configuration (i.e. props that require beginConfiguration()) have changed.
|
||||
[`inputChanged`, `outputsChanged`, `orientationChanged`]
|
||||
*/
|
||||
var isSessionConfigurationDirty: Bool {
|
||||
return inputChanged || outputsChanged || orientationChanged
|
||||
}
|
||||
|
||||
/**
|
||||
Returns `true` when props that affect the AVCaptureDevice configuration (i.e. props that require lockForConfiguration()) have changed.
|
||||
[`formatChanged`, `sidePropsChanged`, `zoomChanged`]
|
||||
*/
|
||||
var isDeviceConfigurationDirty: Bool {
|
||||
return isSessionConfigurationDirty || formatChanged || sidePropsChanged || zoomChanged
|
||||
}
|
||||
|
||||
init(between left: CameraConfiguration?, and right: CameraConfiguration) {
|
||||
// cameraId
|
||||
inputChanged = left?.cameraId != right.cameraId
|
||||
// photo, video, codeScanner
|
||||
outputsChanged = inputChanged || left?.photo != right.photo || left?.video != right.video || left?.codeScanner != right.codeScanner
|
||||
// orientation
|
||||
orientationChanged = outputsChanged || left?.orientation != right.orientation
|
||||
// format (depends on cameraId)
|
||||
formatChanged = inputChanged || left?.format != right.format
|
||||
// side-props (depends on format)
|
||||
sidePropsChanged = formatChanged || left?.fps != right.fps || left?.enableLowLightBoost != right.enableLowLightBoost || left?.torch != right.torch
|
||||
// zoom (depends on format)
|
||||
zoomChanged = formatChanged || left?.zoom != right.zoom
|
||||
|
||||
// audio session
|
||||
audioSessionChanged = left?.audio != right.audio
|
||||
}
|
||||
}
|
||||
|
||||
enum OutputConfiguration<T: Equatable>: Equatable {
|
||||
case disabled
|
||||
case enabled(config: T)
|
||||
|
||||
public static func == (lhs: OutputConfiguration, rhs: OutputConfiguration) -> Bool {
|
||||
switch (lhs, rhs) {
|
||||
case (.disabled, .disabled):
|
||||
return true
|
||||
case let (.enabled(a), .enabled(b)):
|
||||
return a == b
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
A Photo Output configuration
|
||||
*/
|
||||
struct Photo: Equatable {
|
||||
var enableHighQualityPhotos = false
|
||||
var enableDepthData = false
|
||||
var enablePortraitEffectsMatte = false
|
||||
}
|
||||
|
||||
/**
|
||||
A Video Output configuration
|
||||
*/
|
||||
struct Video: Equatable {
|
||||
var pixelFormat: PixelFormat = .native
|
||||
var enableBufferCompression = false
|
||||
var enableHdr = false
|
||||
var enableFrameProcessor = false
|
||||
}
|
||||
|
||||
/**
|
||||
An Audio Output configuration
|
||||
*/
|
||||
struct Audio: Equatable {
|
||||
// no props for audio at the moment
|
||||
}
|
||||
}
|
||||
|
||||
extension CameraConfiguration.Video {
|
||||
/**
|
||||
Returns the pixel format that should be used for the given AVCaptureVideoDataOutput.
|
||||
If HDR is enabled, this will return YUV 4:2:0 10-bit.
|
||||
If HDR is disabled, this will return whatever the user specified as a pixelFormat, or the most efficient format as a fallback.
|
||||
*/
|
||||
func getPixelFormat(for videoOutput: AVCaptureVideoDataOutput) throws -> OSType {
|
||||
// as per documentation, the first value is always the most efficient format
|
||||
var defaultFormat = videoOutput.availableVideoPixelFormatTypes.first!
|
||||
if enableBufferCompression {
|
||||
// use compressed format instead if we enabled buffer compression
|
||||
if defaultFormat == kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange &&
|
||||
videoOutput.availableVideoPixelFormatTypes.contains(kCVPixelFormatType_Lossless_420YpCbCr8BiPlanarVideoRange) {
|
||||
// YUV 4:2:0 8-bit (limited video colors; compressed)
|
||||
defaultFormat = kCVPixelFormatType_Lossless_420YpCbCr8BiPlanarVideoRange
|
||||
}
|
||||
if defaultFormat == kCVPixelFormatType_420YpCbCr8BiPlanarFullRange &&
|
||||
videoOutput.availableVideoPixelFormatTypes.contains(kCVPixelFormatType_Lossless_420YpCbCr8BiPlanarFullRange) {
|
||||
// YUV 4:2:0 8-bit (full video colors; compressed)
|
||||
defaultFormat = kCVPixelFormatType_Lossless_420YpCbCr8BiPlanarFullRange
|
||||
}
|
||||
}
|
||||
|
||||
// If the user enabled HDR, we can only use the YUV 4:2:0 10-bit pixel format.
|
||||
if enableHdr == true {
|
||||
guard pixelFormat == .native || pixelFormat == .yuv else {
|
||||
throw CameraError.format(.incompatiblePixelFormatWithHDR)
|
||||
}
|
||||
|
||||
var targetFormats = [kCVPixelFormatType_420YpCbCr10BiPlanarFullRange,
|
||||
kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange]
|
||||
if enableBufferCompression {
|
||||
// If we enable buffer compression, try to use a lossless compressed YUV format first, otherwise fall back to the others.
|
||||
targetFormats.insert(kCVPixelFormatType_Lossless_420YpCbCr10PackedBiPlanarVideoRange, at: 0)
|
||||
}
|
||||
|
||||
// Find the best matching format
|
||||
guard let format = videoOutput.findPixelFormat(firstOf: targetFormats) else {
|
||||
throw CameraError.format(.invalidHdr)
|
||||
}
|
||||
// YUV 4:2:0 10-bit (compressed/uncompressed)
|
||||
return format
|
||||
}
|
||||
|
||||
// If we don't use HDR, we can use any other custom pixel format.
|
||||
switch pixelFormat {
|
||||
case .yuv:
|
||||
// YUV 4:2:0 8-bit (full/limited video colors; uncompressed)
|
||||
var targetFormats = [kCVPixelFormatType_420YpCbCr8BiPlanarFullRange,
|
||||
kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange]
|
||||
if enableBufferCompression {
|
||||
// YUV 4:2:0 8-bit (full/limited video colors; compressed)
|
||||
targetFormats.insert(kCVPixelFormatType_Lossless_420YpCbCr8BiPlanarVideoRange, at: 0)
|
||||
targetFormats.insert(kCVPixelFormatType_Lossless_420YpCbCr8BiPlanarFullRange, at: 0)
|
||||
}
|
||||
guard let format = videoOutput.findPixelFormat(firstOf: targetFormats) else {
|
||||
throw CameraError.device(.pixelFormatNotSupported)
|
||||
}
|
||||
return format
|
||||
case .rgb:
|
||||
// RGBA 8-bit (uncompressed)
|
||||
var targetFormats = [kCVPixelFormatType_32BGRA]
|
||||
if enableBufferCompression {
|
||||
// RGBA 8-bit (compressed)
|
||||
targetFormats.insert(kCVPixelFormatType_Lossless_32BGRA, at: 0)
|
||||
}
|
||||
guard let format = videoOutput.findPixelFormat(firstOf: targetFormats) else {
|
||||
throw CameraError.device(.pixelFormatNotSupported)
|
||||
}
|
||||
return format
|
||||
case .native:
|
||||
return defaultFormat
|
||||
case .unknown:
|
||||
throw CameraError.parameter(.invalid(unionName: "pixelFormat", receivedValue: "unknown"))
|
||||
}
|
||||
}
|
||||
}
|
319
package/ios/Core/CameraError.swift
Normal file
319
package/ios/Core/CameraError.swift
Normal file
@@ -0,0 +1,319 @@
|
||||
//
|
||||
// CameraError.swift
|
||||
// mrousavy
|
||||
//
|
||||
// Created by Marc Rousavy on 14.01.21.
|
||||
// Copyright © 2021 mrousavy. All rights reserved.
|
||||
//
|
||||
|
||||
import Foundation
|
||||
|
||||
// MARK: - PermissionError
|
||||
|
||||
enum PermissionError: String {
|
||||
case microphone = "microphone-permission-denied"
|
||||
case camera = "camera-permission-denied"
|
||||
|
||||
var code: String {
|
||||
return rawValue
|
||||
}
|
||||
|
||||
var message: String {
|
||||
switch self {
|
||||
case .microphone:
|
||||
return "The Microphone permission was denied! If you want to record Videos without sound, pass `audio={false}`."
|
||||
case .camera:
|
||||
return "The Camera permission was denied!"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - ParameterError
|
||||
|
||||
enum ParameterError {
|
||||
case invalid(unionName: String, receivedValue: String)
|
||||
case unsupportedOutput(outputDescriptor: String)
|
||||
case unsupportedInput(inputDescriptor: String)
|
||||
case invalidCombination(provided: String, missing: String)
|
||||
|
||||
var code: String {
|
||||
switch self {
|
||||
case .invalid:
|
||||
return "invalid-parameter"
|
||||
case .unsupportedOutput:
|
||||
return "unsupported-output"
|
||||
case .unsupportedInput:
|
||||
return "unsupported-input"
|
||||
case .invalidCombination:
|
||||
return "invalid-combination"
|
||||
}
|
||||
}
|
||||
|
||||
var message: String {
|
||||
switch self {
|
||||
case let .invalid(unionName: unionName, receivedValue: receivedValue):
|
||||
return "The value \"\(receivedValue)\" could not be parsed to type \(unionName)!"
|
||||
case let .unsupportedOutput(outputDescriptor: output):
|
||||
return "The output \"\(output)\" is not supported!"
|
||||
case let .unsupportedInput(inputDescriptor: input):
|
||||
return "The input \"\(input)\" is not supported!"
|
||||
case let .invalidCombination(provided: provided, missing: missing):
|
||||
return "Invalid combination! If \"\(provided)\" is provided, \"\(missing)\" also has to be set!"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - DeviceError
|
||||
|
||||
enum DeviceError: String {
|
||||
case configureError = "configuration-error"
|
||||
case noDevice = "no-device"
|
||||
case invalid = "invalid-device"
|
||||
case flashUnavailable = "flash-unavailable"
|
||||
case microphoneUnavailable = "microphone-unavailable"
|
||||
case lowLightBoostNotSupported = "low-light-boost-not-supported"
|
||||
case focusNotSupported = "focus-not-supported"
|
||||
case notAvailableOnSimulator = "camera-not-available-on-simulator"
|
||||
case pixelFormatNotSupported = "pixel-format-not-supported"
|
||||
|
||||
var code: String {
|
||||
return rawValue
|
||||
}
|
||||
|
||||
var message: String {
|
||||
switch self {
|
||||
case .configureError:
|
||||
return "Failed to lock the device for configuration."
|
||||
case .noDevice:
|
||||
return "No device was set! Use `useCameraDevice(..)` or `Camera.getAvailableCameraDevices()` to select a suitable Camera device."
|
||||
case .invalid:
|
||||
return "The given Camera device was invalid. Use `useCameraDevice(..)` or `Camera.getAvailableCameraDevices()` to select a suitable Camera device."
|
||||
case .flashUnavailable:
|
||||
return "The Camera Device does not have a flash unit! Make sure you select a device where `hasFlash`/`hasTorch` is true!"
|
||||
case .lowLightBoostNotSupported:
|
||||
return "The currently selected camera device does not support low-light boost! Make sure you select a device where `supportsLowLightBoost` is true!"
|
||||
case .focusNotSupported:
|
||||
return "The currently selected camera device does not support focussing!"
|
||||
case .microphoneUnavailable:
|
||||
return "The microphone was unavailable."
|
||||
case .notAvailableOnSimulator:
|
||||
return "The Camera is not available on the iOS Simulator!"
|
||||
case .pixelFormatNotSupported:
|
||||
return "The given pixelFormat is not supported on the given Camera Device!"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - FormatError
|
||||
|
||||
enum FormatError {
|
||||
case invalidFps(fps: Int)
|
||||
case invalidHdr
|
||||
case invalidFormat
|
||||
case incompatiblePixelFormatWithHDR
|
||||
|
||||
var code: String {
|
||||
switch self {
|
||||
case .invalidFormat:
|
||||
return "invalid-format"
|
||||
case .invalidFps:
|
||||
return "invalid-fps"
|
||||
case .invalidHdr:
|
||||
return "invalid-hdr"
|
||||
case .incompatiblePixelFormatWithHDR:
|
||||
return "incompatible-pixel-format-with-hdr-setting"
|
||||
}
|
||||
}
|
||||
|
||||
var message: String {
|
||||
switch self {
|
||||
case .invalidFormat:
|
||||
return "The given format was invalid. Did you check if the current device supports the given format in `device.formats`?"
|
||||
case let .invalidFps(fps):
|
||||
return "The given format cannot run at \(fps) FPS! Make sure your FPS is lower than `format.maxFps` but higher than `format.minFps`."
|
||||
case .invalidHdr:
|
||||
return "The currently selected format does not support HDR capture! Make sure you select a format which includes `supportsPhotoHDR`/`supportsVideoHDR`!"
|
||||
case .incompatiblePixelFormatWithHDR:
|
||||
return "The currently selected pixelFormat is not compatible with HDR! HDR only works with the `yuv` pixelFormat."
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - SessionError
|
||||
|
||||
enum SessionError {
|
||||
case cameraNotReady
|
||||
case audioSessionFailedToActivate
|
||||
case audioInUseByOtherApp
|
||||
|
||||
var code: String {
|
||||
switch self {
|
||||
case .cameraNotReady:
|
||||
return "camera-not-ready"
|
||||
case .audioInUseByOtherApp:
|
||||
return "audio-in-use-by-other-app"
|
||||
case .audioSessionFailedToActivate:
|
||||
return "audio-session-failed-to-activate"
|
||||
}
|
||||
}
|
||||
|
||||
var message: String {
|
||||
switch self {
|
||||
case .cameraNotReady:
|
||||
return "The Camera is not ready yet! Wait for the onInitialized() callback!"
|
||||
case .audioInUseByOtherApp:
|
||||
return "The audio session is already in use by another app with higher priority!"
|
||||
case .audioSessionFailedToActivate:
|
||||
return "Failed to activate Audio Session!"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - CaptureError
|
||||
|
||||
enum CaptureError {
|
||||
case recordingInProgress
|
||||
case noRecordingInProgress
|
||||
case fileError
|
||||
case createTempFileError(message: String? = nil)
|
||||
case createRecorderError(message: String? = nil)
|
||||
case videoNotEnabled
|
||||
case photoNotEnabled
|
||||
case aborted
|
||||
case unknown(message: String? = nil)
|
||||
|
||||
var code: String {
|
||||
switch self {
|
||||
case .recordingInProgress:
|
||||
return "recording-in-progress"
|
||||
case .noRecordingInProgress:
|
||||
return "no-recording-in-progress"
|
||||
case .fileError:
|
||||
return "file-io-error"
|
||||
case .createTempFileError:
|
||||
return "create-temp-file-error"
|
||||
case .createRecorderError:
|
||||
return "create-recorder-error"
|
||||
case .videoNotEnabled:
|
||||
return "video-not-enabled"
|
||||
case .photoNotEnabled:
|
||||
return "photo-not-enabled"
|
||||
case .aborted:
|
||||
return "aborted"
|
||||
case .unknown:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
var message: String {
|
||||
switch self {
|
||||
case .recordingInProgress:
|
||||
return "There is already an active video recording in progress! Did you call startRecording() twice?"
|
||||
case .noRecordingInProgress:
|
||||
return "There was no active video recording in progress! Did you call stopRecording() twice?"
|
||||
case .fileError:
|
||||
return "An unexpected File IO error occured!"
|
||||
case let .createTempFileError(message: message):
|
||||
return "Failed to create a temporary file! \(message ?? "(no additional message)")"
|
||||
case let .createRecorderError(message: message):
|
||||
return "Failed to create the AVAssetWriter (Recorder)! \(message ?? "(no additional message)")"
|
||||
case .videoNotEnabled:
|
||||
return "Video capture is disabled! Pass `video={true}` to enable video recordings."
|
||||
case .photoNotEnabled:
|
||||
return "Photo capture is disabled! Pass `photo={true}` to enable photo capture."
|
||||
case .aborted:
|
||||
return "The capture has been stopped before any input data arrived."
|
||||
case let .unknown(message: message):
|
||||
return message ?? "An unknown error occured while capturing a video/photo."
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - CodeScannerError
|
||||
|
||||
enum CodeScannerError {
|
||||
case notCompatibleWithOutputs
|
||||
case codeTypeNotSupported(codeType: String)
|
||||
|
||||
var code: String {
|
||||
switch self {
|
||||
case .notCompatibleWithOutputs:
|
||||
return "not-compatible-with-outputs"
|
||||
case .codeTypeNotSupported:
|
||||
return "code-type-not-supported"
|
||||
}
|
||||
}
|
||||
|
||||
var message: String {
|
||||
switch self {
|
||||
case .notCompatibleWithOutputs:
|
||||
return "The Code Scanner is not supported in combination with the current outputs! Either disable video or photo outputs."
|
||||
case let .codeTypeNotSupported(codeType: codeType):
|
||||
return "The codeType \"\(codeType)\" is not supported by the Code Scanner!"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - CameraError
|
||||
|
||||
enum CameraError: Error {
|
||||
case permission(_ id: PermissionError)
|
||||
case parameter(_ id: ParameterError)
|
||||
case device(_ id: DeviceError)
|
||||
case format(_ id: FormatError)
|
||||
case session(_ id: SessionError)
|
||||
case capture(_ id: CaptureError)
|
||||
case codeScanner(_ id: CodeScannerError)
|
||||
case unknown(message: String? = nil, cause: NSError? = nil)
|
||||
|
||||
var code: String {
|
||||
switch self {
|
||||
case let .permission(id: id):
|
||||
return "permission/\(id.code)"
|
||||
case let .parameter(id: id):
|
||||
return "parameter/\(id.code)"
|
||||
case let .device(id: id):
|
||||
return "device/\(id.code)"
|
||||
case let .format(id: id):
|
||||
return "format/\(id.code)"
|
||||
case let .session(id: id):
|
||||
return "session/\(id.code)"
|
||||
case let .capture(id: id):
|
||||
return "capture/\(id.code)"
|
||||
case let .codeScanner(id: id):
|
||||
return "code-scanner/\(id.code)"
|
||||
case .unknown:
|
||||
return "unknown/unknown"
|
||||
}
|
||||
}
|
||||
|
||||
var message: String {
|
||||
switch self {
|
||||
case let .permission(id: id):
|
||||
return id.message
|
||||
case let .parameter(id: id):
|
||||
return id.message
|
||||
case let .device(id: id):
|
||||
return id.message
|
||||
case let .format(id: id):
|
||||
return id.message
|
||||
case let .session(id: id):
|
||||
return id.message
|
||||
case let .capture(id: id):
|
||||
return id.message
|
||||
case let .codeScanner(id: id):
|
||||
return id.message
|
||||
case let .unknown(message: message, cause: cause):
|
||||
return message ?? cause?.description ?? "An unexpected error occured."
|
||||
}
|
||||
}
|
||||
|
||||
var cause: NSError? {
|
||||
switch self {
|
||||
case let .unknown(message: _, cause: cause):
|
||||
return cause
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
40
package/ios/Core/CameraQueues.swift
Normal file
40
package/ios/Core/CameraQueues.swift
Normal file
@@ -0,0 +1,40 @@
|
||||
//
|
||||
// CameraQueues.swift
|
||||
// VisionCamera
|
||||
//
|
||||
// Created by Marc Rousavy on 22.03.21.
|
||||
// Copyright © 2021 mrousavy. All rights reserved.
|
||||
//
|
||||
|
||||
import Foundation
|
||||
|
||||
@objc
|
||||
public class CameraQueues: NSObject {
|
||||
/// The serial execution queue for camera configuration and setup.
|
||||
@objc public static let cameraQueue = DispatchQueue(label: "mrousavy/VisionCamera.main",
|
||||
qos: .userInteractive,
|
||||
attributes: [],
|
||||
autoreleaseFrequency: .inherit,
|
||||
target: nil)
|
||||
|
||||
/// The serial execution queue for output processing of videos for recording or synchronous frame processing.
|
||||
@objc public static let videoQueue = DispatchQueue(label: "mrousavy/VisionCamera.video",
|
||||
qos: .userInteractive,
|
||||
attributes: [],
|
||||
autoreleaseFrequency: .inherit,
|
||||
target: nil)
|
||||
|
||||
/// The serial execution queue for output processing of QR/barcodes.
|
||||
@objc public static let codeScannerQueue = DispatchQueue(label: "mrousavy/VisionCamera.codeScanner",
|
||||
qos: .userInteractive,
|
||||
attributes: [],
|
||||
autoreleaseFrequency: .inherit,
|
||||
target: nil)
|
||||
|
||||
/// The serial execution queue for output processing of audio buffers.
|
||||
@objc public static let audioQueue = DispatchQueue(label: "mrousavy/VisionCamera.audio",
|
||||
qos: .userInteractive,
|
||||
attributes: [],
|
||||
autoreleaseFrequency: .inherit,
|
||||
target: nil)
|
||||
}
|
93
package/ios/Core/CameraSession+Audio.swift
Normal file
93
package/ios/Core/CameraSession+Audio.swift
Normal file
@@ -0,0 +1,93 @@
|
||||
//
|
||||
// CameraSession+Audio.swift
|
||||
// VisionCamera
|
||||
//
|
||||
// Created by Marc Rousavy on 11.10.23.
|
||||
// Copyright © 2023 mrousavy. All rights reserved.
|
||||
//
|
||||
|
||||
import AVFoundation
|
||||
import Foundation
|
||||
|
||||
extension CameraSession {
|
||||
/**
|
||||
Configures the Audio session and activates it. If the session was active it will shortly be deactivated before configuration.
|
||||
|
||||
The Audio Session will be configured to allow background music, haptics (vibrations) and system sound playback while recording.
|
||||
Background audio is allowed to play on speakers or bluetooth speakers.
|
||||
*/
|
||||
final func activateAudioSession() throws {
|
||||
ReactLogger.log(level: .info, message: "Activating Audio Session...")
|
||||
|
||||
do {
|
||||
let audioSession = AVAudioSession.sharedInstance()
|
||||
|
||||
try audioSession.updateCategory(AVAudioSession.Category.playAndRecord,
|
||||
options: [.mixWithOthers,
|
||||
.allowBluetoothA2DP,
|
||||
.defaultToSpeaker,
|
||||
.allowAirPlay])
|
||||
|
||||
if #available(iOS 14.5, *) {
|
||||
// prevents the audio session from being interrupted by a phone call
|
||||
try audioSession.setPrefersNoInterruptionsFromSystemAlerts(true)
|
||||
}
|
||||
|
||||
if #available(iOS 13.0, *) {
|
||||
// allow system sounds (notifications, calls, music) to play while recording
|
||||
try audioSession.setAllowHapticsAndSystemSoundsDuringRecording(true)
|
||||
}
|
||||
|
||||
audioCaptureSession.startRunning()
|
||||
} catch let error as NSError {
|
||||
ReactLogger.log(level: .error, message: "Failed to activate audio session! Error \(error.code): \(error.description)")
|
||||
switch error.code {
|
||||
case 561_017_449:
|
||||
throw CameraError.session(.audioInUseByOtherApp)
|
||||
default:
|
||||
throw CameraError.session(.audioSessionFailedToActivate)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
final func deactivateAudioSession() {
|
||||
ReactLogger.log(level: .info, message: "Deactivating Audio Session...")
|
||||
|
||||
audioCaptureSession.stopRunning()
|
||||
}
|
||||
|
||||
@objc
|
||||
func audioSessionInterrupted(notification: Notification) {
|
||||
ReactLogger.log(level: .error, message: "Audio Session Interruption Notification!")
|
||||
guard let userInfo = notification.userInfo,
|
||||
let typeValue = userInfo[AVAudioSessionInterruptionTypeKey] as? UInt,
|
||||
let type = AVAudioSession.InterruptionType(rawValue: typeValue) else {
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: Add JS-Event for Audio Session interruptions?
|
||||
switch type {
|
||||
case .began:
|
||||
// Something interrupted our Audio Session, stop recording audio.
|
||||
ReactLogger.log(level: .error, message: "The Audio Session was interrupted!")
|
||||
case .ended:
|
||||
ReactLogger.log(level: .info, message: "The Audio Session interruption has ended.")
|
||||
guard let optionsValue = userInfo[AVAudioSessionInterruptionOptionKey] as? UInt else { return }
|
||||
let options = AVAudioSession.InterruptionOptions(rawValue: optionsValue)
|
||||
if options.contains(.shouldResume) {
|
||||
// Try resuming if possible
|
||||
if isRecording {
|
||||
CameraQueues.audioQueue.async {
|
||||
ReactLogger.log(level: .info, message: "Resuming interrupted Audio Session...")
|
||||
// restart audio session because interruption is over
|
||||
try? self.activateAudioSession()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ReactLogger.log(level: .error, message: "Cannot resume interrupted Audio Session!")
|
||||
}
|
||||
@unknown default:
|
||||
()
|
||||
}
|
||||
}
|
||||
}
|
80
package/ios/Core/CameraSession+CodeScanner.swift
Normal file
80
package/ios/Core/CameraSession+CodeScanner.swift
Normal file
@@ -0,0 +1,80 @@
|
||||
//
|
||||
// CameraSession+CodeScanner.swift
|
||||
// VisionCamera
|
||||
//
|
||||
// Created by Marc Rousavy on 11.10.23.
|
||||
// Copyright © 2023 mrousavy. All rights reserved.
|
||||
//
|
||||
|
||||
import AVFoundation
|
||||
import Foundation
|
||||
|
||||
extension CameraSession: AVCaptureMetadataOutputObjectsDelegate {
|
||||
public func metadataOutput(_: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from _: AVCaptureConnection) {
|
||||
guard let onCodeScanned = delegate?.onCodeScanned else {
|
||||
// No delegate callback
|
||||
return
|
||||
}
|
||||
guard !metadataObjects.isEmpty else {
|
||||
// No codes detected
|
||||
return
|
||||
}
|
||||
guard let device = videoDeviceInput?.device else {
|
||||
// No cameraId set
|
||||
return
|
||||
}
|
||||
let size = device.activeFormat.videoDimensions
|
||||
|
||||
// Map codes to JS values
|
||||
let codes = metadataObjects.map { object in
|
||||
var value: String?
|
||||
if let code = object as? AVMetadataMachineReadableCodeObject {
|
||||
value = code.stringValue
|
||||
}
|
||||
let x = object.bounds.origin.x * Double(size.width)
|
||||
let y = object.bounds.origin.y * Double(size.height)
|
||||
let w = object.bounds.width * Double(size.width)
|
||||
let h = object.bounds.height * Double(size.height)
|
||||
let frame = CGRect(x: x, y: y, width: w, height: h)
|
||||
|
||||
return Code(type: object.type, value: value, frame: frame)
|
||||
}
|
||||
|
||||
// Call delegate (JS) event
|
||||
onCodeScanned(codes)
|
||||
}
|
||||
|
||||
/**
|
||||
A scanned QR/Barcode.
|
||||
*/
|
||||
struct Code {
|
||||
/**
|
||||
Type of the scanned Code
|
||||
*/
|
||||
let type: AVMetadataObject.ObjectType
|
||||
/**
|
||||
Decoded value of the code
|
||||
*/
|
||||
let value: String?
|
||||
/**
|
||||
Location of the code on-screen, relative to the video output layer
|
||||
*/
|
||||
let frame: CGRect
|
||||
|
||||
/**
|
||||
Converts this Code to a JS Object (Dictionary)
|
||||
*/
|
||||
func toJSValue() -> [String: AnyHashable] {
|
||||
return [
|
||||
"type": type.descriptor,
|
||||
"value": value,
|
||||
"frame": [
|
||||
"x": frame.origin.x,
|
||||
"y": frame.origin.y,
|
||||
"width": frame.size.width,
|
||||
"height": frame.size.height,
|
||||
],
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
323
package/ios/Core/CameraSession+Configuration.swift
Normal file
323
package/ios/Core/CameraSession+Configuration.swift
Normal file
@@ -0,0 +1,323 @@
|
||||
//
|
||||
// CameraSession+Configuration.swift
|
||||
// VisionCamera
|
||||
//
|
||||
// Created by Marc Rousavy on 12.10.23.
|
||||
// Copyright © 2023 mrousavy. All rights reserved.
|
||||
//
|
||||
|
||||
import AVFoundation
|
||||
import Foundation
|
||||
|
||||
extension CameraSession {
|
||||
// pragma MARK: Input Device
|
||||
|
||||
/**
|
||||
Configures the Input Device (`cameraId`)
|
||||
*/
|
||||
func configureDevice(configuration: CameraConfiguration) throws {
|
||||
ReactLogger.log(level: .info, message: "Configuring Input Device...")
|
||||
|
||||
// Remove all inputs
|
||||
captureSession.inputs.forEach { input in
|
||||
captureSession.removeInput(input)
|
||||
}
|
||||
videoDeviceInput = nil
|
||||
|
||||
#if targetEnvironment(simulator)
|
||||
// iOS Simulators don't have Cameras
|
||||
throw CameraError.device(.notAvailableOnSimulator)
|
||||
#endif
|
||||
|
||||
guard let cameraId = configuration.cameraId else {
|
||||
throw CameraError.device(.noDevice)
|
||||
}
|
||||
|
||||
ReactLogger.log(level: .info, message: "Configuring Camera \(cameraId)...")
|
||||
// Video Input (Camera Device/Sensor)
|
||||
guard let videoDevice = AVCaptureDevice(uniqueID: cameraId) else {
|
||||
throw CameraError.device(.invalid)
|
||||
}
|
||||
let input = try AVCaptureDeviceInput(device: videoDevice)
|
||||
guard captureSession.canAddInput(input) else {
|
||||
throw CameraError.parameter(.unsupportedInput(inputDescriptor: "video-input"))
|
||||
}
|
||||
captureSession.addInput(input)
|
||||
videoDeviceInput = input
|
||||
|
||||
ReactLogger.log(level: .info, message: "Successfully configured Input Device!")
|
||||
}
|
||||
|
||||
// pragma MARK: Outputs
|
||||
|
||||
/**
|
||||
Configures all outputs (`photo` + `video` + `codeScanner`)
|
||||
*/
|
||||
func configureOutputs(configuration: CameraConfiguration) throws {
|
||||
ReactLogger.log(level: .info, message: "Configuring Outputs...")
|
||||
|
||||
// Remove all outputs
|
||||
captureSession.outputs.forEach { output in
|
||||
captureSession.removeOutput(output)
|
||||
}
|
||||
photoOutput = nil
|
||||
videoOutput = nil
|
||||
audioOutput = nil
|
||||
codeScannerOutput = nil
|
||||
|
||||
// Photo Output
|
||||
if case let .enabled(photo) = configuration.photo {
|
||||
ReactLogger.log(level: .info, message: "Adding Photo output...")
|
||||
let photoOutput = AVCapturePhotoOutput()
|
||||
|
||||
// 1. Configure
|
||||
if photo.enableHighQualityPhotos {
|
||||
// TODO: In iOS 16 this will be removed in favor of maxPhotoDimensions.
|
||||
photoOutput.isHighResolutionCaptureEnabled = true
|
||||
if #available(iOS 13.0, *) {
|
||||
// TODO: Test if this actually does any fusion or if this just calls the captureOutput twice. If the latter, remove it.
|
||||
photoOutput.isVirtualDeviceConstituentPhotoDeliveryEnabled = photoOutput.isVirtualDeviceConstituentPhotoDeliverySupported
|
||||
photoOutput.maxPhotoQualityPrioritization = .quality
|
||||
} else {
|
||||
photoOutput.isDualCameraDualPhotoDeliveryEnabled = photoOutput.isDualCameraDualPhotoDeliverySupported
|
||||
}
|
||||
}
|
||||
// TODO: Enable isResponsiveCaptureEnabled? (iOS 17+)
|
||||
// TODO: Enable isFastCapturePrioritizationEnabled? (iOS 17+)
|
||||
if photo.enableDepthData {
|
||||
photoOutput.isDepthDataDeliveryEnabled = photoOutput.isDepthDataDeliverySupported
|
||||
}
|
||||
if #available(iOS 12.0, *), photo.enablePortraitEffectsMatte {
|
||||
photoOutput.isPortraitEffectsMatteDeliveryEnabled = photoOutput.isPortraitEffectsMatteDeliverySupported
|
||||
}
|
||||
|
||||
// 2. Add
|
||||
guard captureSession.canAddOutput(photoOutput) else {
|
||||
throw CameraError.parameter(.unsupportedOutput(outputDescriptor: "photo-output"))
|
||||
}
|
||||
captureSession.addOutput(photoOutput)
|
||||
self.photoOutput = photoOutput
|
||||
}
|
||||
|
||||
// Video Output + Frame Processor
|
||||
if case let .enabled(video) = configuration.video {
|
||||
ReactLogger.log(level: .info, message: "Adding Video Data output...")
|
||||
let videoOutput = AVCaptureVideoDataOutput()
|
||||
|
||||
// 1. Configure
|
||||
videoOutput.setSampleBufferDelegate(self, queue: CameraQueues.videoQueue)
|
||||
videoOutput.alwaysDiscardsLateVideoFrames = true
|
||||
let pixelFormatType = try video.getPixelFormat(for: videoOutput)
|
||||
videoOutput.videoSettings = [
|
||||
String(kCVPixelBufferPixelFormatTypeKey): pixelFormatType,
|
||||
]
|
||||
|
||||
// 2. Add
|
||||
guard captureSession.canAddOutput(videoOutput) else {
|
||||
throw CameraError.parameter(.unsupportedOutput(outputDescriptor: "video-output"))
|
||||
}
|
||||
captureSession.addOutput(videoOutput)
|
||||
self.videoOutput = videoOutput
|
||||
}
|
||||
|
||||
// Code Scanner
|
||||
if case let .enabled(codeScanner) = configuration.codeScanner {
|
||||
ReactLogger.log(level: .info, message: "Adding Code Scanner output...")
|
||||
let codeScannerOutput = AVCaptureMetadataOutput()
|
||||
|
||||
// 1. Configure
|
||||
try codeScanner.codeTypes.forEach { type in
|
||||
if !codeScannerOutput.availableMetadataObjectTypes.contains(type) {
|
||||
throw CameraError.codeScanner(.codeTypeNotSupported(codeType: type.descriptor))
|
||||
}
|
||||
}
|
||||
codeScannerOutput.setMetadataObjectsDelegate(self, queue: CameraQueues.codeScannerQueue)
|
||||
codeScannerOutput.metadataObjectTypes = codeScanner.codeTypes
|
||||
if let rectOfInterest = codeScanner.regionOfInterest {
|
||||
codeScannerOutput.rectOfInterest = rectOfInterest
|
||||
}
|
||||
|
||||
// 2. Add
|
||||
guard captureSession.canAddOutput(codeScannerOutput) else {
|
||||
throw CameraError.codeScanner(.notCompatibleWithOutputs)
|
||||
}
|
||||
captureSession.addOutput(codeScannerOutput)
|
||||
self.codeScannerOutput = codeScannerOutput
|
||||
}
|
||||
|
||||
// Done!
|
||||
ReactLogger.log(level: .info, message: "Successfully configured all outputs!")
|
||||
delegate?.onSessionInitialized()
|
||||
}
|
||||
|
||||
// pragma MARK: Orientation
|
||||
|
||||
func configureOrientation(configuration: CameraConfiguration) {
|
||||
// Set up orientation and mirroring for all outputs.
|
||||
// Note: Photos are only rotated through EXIF tags, and Preview through view transforms
|
||||
let isMirrored = videoDeviceInput?.device.position == .front
|
||||
captureSession.outputs.forEach { output in
|
||||
if isMirrored {
|
||||
output.mirror()
|
||||
}
|
||||
output.setOrientation(configuration.orientation)
|
||||
}
|
||||
}
|
||||
|
||||
// pragma MARK: Format
|
||||
|
||||
/**
|
||||
Configures the active format (`format`)
|
||||
*/
|
||||
func configureFormat(configuration: CameraConfiguration) throws {
|
||||
guard let targetFormat = configuration.format else {
|
||||
// No format was set, just use the default.
|
||||
return
|
||||
}
|
||||
|
||||
ReactLogger.log(level: .info, message: "Configuring Format (\(targetFormat))...")
|
||||
guard let device = videoDeviceInput?.device else {
|
||||
throw CameraError.session(.cameraNotReady)
|
||||
}
|
||||
|
||||
let currentFormat = CameraDeviceFormat(fromFormat: device.activeFormat)
|
||||
if currentFormat == targetFormat {
|
||||
ReactLogger.log(level: .info, message: "Already selected active format, no need to configure.")
|
||||
return
|
||||
}
|
||||
|
||||
// Find matching format (JS Dictionary -> strongly typed Swift class)
|
||||
let format = device.formats.first { targetFormat.isEqualTo(format: $0) }
|
||||
guard let format else {
|
||||
throw CameraError.format(.invalidFormat)
|
||||
}
|
||||
|
||||
// Set new device Format
|
||||
device.activeFormat = format
|
||||
|
||||
ReactLogger.log(level: .info, message: "Successfully configured Format!")
|
||||
}
|
||||
|
||||
// pragma MARK: Side-Props
|
||||
|
||||
/**
|
||||
Configures format-dependant "side-props" (`fps`, `lowLightBoost`, `torch`)
|
||||
*/
|
||||
func configureSideProps(configuration: CameraConfiguration) throws {
|
||||
guard let device = videoDeviceInput?.device else {
|
||||
throw CameraError.session(.cameraNotReady)
|
||||
}
|
||||
|
||||
// Configure FPS
|
||||
if let fps = configuration.fps {
|
||||
let supportsGivenFps = device.activeFormat.videoSupportedFrameRateRanges.contains { range in
|
||||
return range.includes(fps: Double(fps))
|
||||
}
|
||||
if !supportsGivenFps {
|
||||
throw CameraError.format(.invalidFps(fps: Int(fps)))
|
||||
}
|
||||
|
||||
let duration = CMTimeMake(value: 1, timescale: fps)
|
||||
device.activeVideoMinFrameDuration = duration
|
||||
device.activeVideoMaxFrameDuration = duration
|
||||
} else {
|
||||
device.activeVideoMinFrameDuration = CMTime.invalid
|
||||
device.activeVideoMaxFrameDuration = CMTime.invalid
|
||||
}
|
||||
|
||||
// Configure Low-Light-Boost
|
||||
if configuration.enableLowLightBoost {
|
||||
let isDifferent = configuration.enableLowLightBoost != device.automaticallyEnablesLowLightBoostWhenAvailable
|
||||
if isDifferent && !device.isLowLightBoostSupported {
|
||||
throw CameraError.device(.lowLightBoostNotSupported)
|
||||
}
|
||||
device.automaticallyEnablesLowLightBoostWhenAvailable = configuration.enableLowLightBoost
|
||||
}
|
||||
|
||||
// Configure Torch
|
||||
if configuration.torch != .off {
|
||||
guard device.hasTorch else {
|
||||
throw CameraError.device(.flashUnavailable)
|
||||
}
|
||||
|
||||
device.torchMode = configuration.torch.toTorchMode()
|
||||
try device.setTorchModeOn(level: 1.0)
|
||||
}
|
||||
}
|
||||
|
||||
// pragma MARK: Zoom
|
||||
|
||||
/**
|
||||
Configures zoom (`zoom`)
|
||||
*/
|
||||
func configureZoom(configuration: CameraConfiguration) throws {
|
||||
guard let device = videoDeviceInput?.device else {
|
||||
throw CameraError.session(.cameraNotReady)
|
||||
}
|
||||
guard let zoom = configuration.zoom else {
|
||||
return
|
||||
}
|
||||
|
||||
let clamped = max(min(zoom, device.activeFormat.videoMaxZoomFactor), device.minAvailableVideoZoomFactor)
|
||||
device.videoZoomFactor = clamped
|
||||
}
|
||||
|
||||
// pragma MARK: Audio
|
||||
|
||||
/**
|
||||
Configures the Audio Capture Session with an audio input and audio data output.
|
||||
*/
|
||||
func configureAudioSession(configuration: CameraConfiguration) throws {
|
||||
ReactLogger.log(level: .info, message: "Configuring Audio Session...")
|
||||
|
||||
// Prevent iOS from automatically configuring the Audio Session for us
|
||||
audioCaptureSession.automaticallyConfiguresApplicationAudioSession = false
|
||||
let enableAudio = configuration.audio != .disabled
|
||||
|
||||
// Check microphone permission
|
||||
if enableAudio {
|
||||
let audioPermissionStatus = AVCaptureDevice.authorizationStatus(for: .audio)
|
||||
if audioPermissionStatus != .authorized {
|
||||
throw CameraError.permission(.microphone)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove all current inputs
|
||||
audioCaptureSession.inputs.forEach { input in
|
||||
audioCaptureSession.removeInput(input)
|
||||
}
|
||||
audioDeviceInput = nil
|
||||
|
||||
// Audio Input (Microphone)
|
||||
if enableAudio {
|
||||
ReactLogger.log(level: .info, message: "Adding Audio input...")
|
||||
guard let microphone = AVCaptureDevice.default(for: .audio) else {
|
||||
throw CameraError.device(.microphoneUnavailable)
|
||||
}
|
||||
let input = try AVCaptureDeviceInput(device: microphone)
|
||||
guard audioCaptureSession.canAddInput(input) else {
|
||||
throw CameraError.parameter(.unsupportedInput(inputDescriptor: "audio-input"))
|
||||
}
|
||||
audioCaptureSession.addInput(input)
|
||||
audioDeviceInput = input
|
||||
}
|
||||
|
||||
// Remove all current outputs
|
||||
audioCaptureSession.outputs.forEach { output in
|
||||
audioCaptureSession.removeOutput(output)
|
||||
}
|
||||
audioOutput = nil
|
||||
|
||||
// Audio Output
|
||||
if enableAudio {
|
||||
ReactLogger.log(level: .info, message: "Adding Audio Data output...")
|
||||
let output = AVCaptureAudioDataOutput()
|
||||
guard audioCaptureSession.canAddOutput(output) else {
|
||||
throw CameraError.parameter(.unsupportedOutput(outputDescriptor: "audio-output"))
|
||||
}
|
||||
output.setSampleBufferDelegate(self, queue: CameraQueues.audioQueue)
|
||||
audioCaptureSession.addOutput(output)
|
||||
audioOutput = output
|
||||
}
|
||||
}
|
||||
}
|
82
package/ios/Core/CameraSession+Focus.swift
Normal file
82
package/ios/Core/CameraSession+Focus.swift
Normal file
@@ -0,0 +1,82 @@
|
||||
//
|
||||
// CameraSession+Focus.swift
|
||||
// VisionCamera
|
||||
//
|
||||
// Created by Marc Rousavy on 11.10.23.
|
||||
// Copyright © 2023 mrousavy. All rights reserved.
|
||||
//
|
||||
|
||||
import AVFoundation
|
||||
import Foundation
|
||||
|
||||
extension CameraSession {
|
||||
/**
|
||||
Focuses the Camera to the specified point. The point must be in the Camera coordinate system, so {0...1} on both axis.
|
||||
*/
|
||||
func focus(point: CGPoint) throws {
|
||||
guard let device = videoDeviceInput?.device else {
|
||||
throw CameraError.session(SessionError.cameraNotReady)
|
||||
}
|
||||
if !device.isFocusPointOfInterestSupported {
|
||||
throw CameraError.device(DeviceError.focusNotSupported)
|
||||
}
|
||||
|
||||
do {
|
||||
try device.lockForConfiguration()
|
||||
defer {
|
||||
device.unlockForConfiguration()
|
||||
}
|
||||
|
||||
// Set Focus
|
||||
if device.isFocusPointOfInterestSupported {
|
||||
device.focusPointOfInterest = point
|
||||
device.focusMode = .autoFocus
|
||||
}
|
||||
|
||||
// Set Exposure
|
||||
if device.isExposurePointOfInterestSupported {
|
||||
device.exposurePointOfInterest = point
|
||||
device.exposureMode = .autoExpose
|
||||
}
|
||||
|
||||
// Remove any existing listeners
|
||||
NotificationCenter.default.removeObserver(self,
|
||||
name: NSNotification.Name.AVCaptureDeviceSubjectAreaDidChange,
|
||||
object: nil)
|
||||
|
||||
// Listen for focus completion
|
||||
device.isSubjectAreaChangeMonitoringEnabled = true
|
||||
NotificationCenter.default.addObserver(self,
|
||||
selector: #selector(subjectAreaDidChange),
|
||||
name: NSNotification.Name.AVCaptureDeviceSubjectAreaDidChange,
|
||||
object: nil)
|
||||
} catch {
|
||||
throw CameraError.device(DeviceError.configureError)
|
||||
}
|
||||
}
|
||||
|
||||
@objc
|
||||
func subjectAreaDidChange(notification _: NSNotification) {
|
||||
guard let device = videoDeviceInput?.device else {
|
||||
return
|
||||
}
|
||||
|
||||
try? device.lockForConfiguration()
|
||||
defer {
|
||||
device.unlockForConfiguration()
|
||||
}
|
||||
|
||||
// Reset Focus to continuous/auto
|
||||
if device.isFocusPointOfInterestSupported {
|
||||
device.focusMode = .continuousAutoFocus
|
||||
}
|
||||
|
||||
// Reset Exposure to continuous/auto
|
||||
if device.isExposurePointOfInterestSupported {
|
||||
device.exposureMode = .continuousAutoExposure
|
||||
}
|
||||
|
||||
// Disable listeners
|
||||
device.isSubjectAreaChangeMonitoringEnabled = false
|
||||
}
|
||||
}
|
107
package/ios/Core/CameraSession+Photo.swift
Normal file
107
package/ios/Core/CameraSession+Photo.swift
Normal file
@@ -0,0 +1,107 @@
|
||||
//
|
||||
// CameraSession+Photo.swift
|
||||
// VisionCamera
|
||||
//
|
||||
// Created by Marc Rousavy on 11.10.23.
|
||||
// Copyright © 2023 mrousavy. All rights reserved.
|
||||
//
|
||||
|
||||
import AVFoundation
|
||||
import Foundation
|
||||
|
||||
extension CameraSession {
|
||||
/**
|
||||
Takes a photo.
|
||||
`takePhoto` is only available if `photo={true}`.
|
||||
*/
|
||||
func takePhoto(options: NSDictionary, promise: Promise) {
|
||||
// Run on Camera Queue
|
||||
CameraQueues.cameraQueue.async {
|
||||
// Get Photo Output configuration
|
||||
guard let configuration = self.configuration else {
|
||||
promise.reject(error: .session(.cameraNotReady))
|
||||
return
|
||||
}
|
||||
guard case let .enabled(config: photo) = configuration.photo else {
|
||||
// User needs to enable photo={true}
|
||||
promise.reject(error: .capture(.photoNotEnabled))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if Photo Output is available
|
||||
guard let photoOutput = self.photoOutput,
|
||||
let videoDeviceInput = self.videoDeviceInput else {
|
||||
// Camera is not yet ready
|
||||
promise.reject(error: .session(.cameraNotReady))
|
||||
return
|
||||
}
|
||||
|
||||
ReactLogger.log(level: .info, message: "Capturing photo...")
|
||||
|
||||
// Create photo settings
|
||||
let photoSettings = AVCapturePhotoSettings()
|
||||
|
||||
// default, overridable settings if high quality capture was enabled
|
||||
if photo.enableHighQualityPhotos {
|
||||
// TODO: On iOS 16+ this will be removed in favor of maxPhotoDimensions.
|
||||
photoSettings.isHighResolutionPhotoEnabled = true
|
||||
if #available(iOS 13.0, *) {
|
||||
photoSettings.photoQualityPrioritization = .quality
|
||||
}
|
||||
}
|
||||
|
||||
// flash
|
||||
if videoDeviceInput.device.isFlashAvailable, let flash = options["flash"] as? String {
|
||||
guard let flashMode = AVCaptureDevice.FlashMode(withString: flash) else {
|
||||
promise.reject(error: .parameter(.invalid(unionName: "FlashMode", receivedValue: flash)))
|
||||
return
|
||||
}
|
||||
photoSettings.flashMode = flashMode
|
||||
}
|
||||
|
||||
// shutter sound
|
||||
let enableShutterSound = options["enableShutterSound"] as? Bool ?? true
|
||||
|
||||
// depth data
|
||||
photoSettings.isDepthDataDeliveryEnabled = photoOutput.isDepthDataDeliveryEnabled
|
||||
if #available(iOS 12.0, *) {
|
||||
photoSettings.isPortraitEffectsMatteDeliveryEnabled = photoOutput.isPortraitEffectsMatteDeliveryEnabled
|
||||
}
|
||||
|
||||
// quality prioritization
|
||||
if #available(iOS 13.0, *), let qualityPrioritization = options["qualityPrioritization"] as? String {
|
||||
guard let photoQualityPrioritization = AVCapturePhotoOutput.QualityPrioritization(withString: qualityPrioritization) else {
|
||||
promise.reject(error: .parameter(.invalid(unionName: "QualityPrioritization", receivedValue: qualityPrioritization)))
|
||||
return
|
||||
}
|
||||
photoSettings.photoQualityPrioritization = photoQualityPrioritization
|
||||
}
|
||||
|
||||
// photo size is always the one selected in the format
|
||||
if #available(iOS 16.0, *) {
|
||||
photoSettings.maxPhotoDimensions = photoOutput.maxPhotoDimensions
|
||||
}
|
||||
|
||||
// red-eye reduction
|
||||
if #available(iOS 12.0, *), let autoRedEyeReduction = options["enableAutoRedEyeReduction"] as? Bool {
|
||||
photoSettings.isAutoRedEyeReductionEnabled = autoRedEyeReduction
|
||||
}
|
||||
|
||||
// stabilization
|
||||
if let enableAutoStabilization = options["enableAutoStabilization"] as? Bool {
|
||||
photoSettings.isAutoStillImageStabilizationEnabled = enableAutoStabilization
|
||||
}
|
||||
|
||||
// distortion correction
|
||||
if #available(iOS 14.1, *), let enableAutoDistortionCorrection = options["enableAutoDistortionCorrection"] as? Bool {
|
||||
photoSettings.isAutoContentAwareDistortionCorrectionEnabled = enableAutoDistortionCorrection
|
||||
}
|
||||
|
||||
// Actually do the capture!
|
||||
photoOutput.capturePhoto(with: photoSettings, delegate: PhotoCaptureDelegate(promise: promise, enableShutterSound: enableShutterSound))
|
||||
|
||||
// Assume that `takePhoto` is always called with the same parameters, so prepare the next call too.
|
||||
photoOutput.setPreparedPhotoSettingsArray([photoSettings], completionHandler: nil)
|
||||
}
|
||||
}
|
||||
}
|
221
package/ios/Core/CameraSession+Video.swift
Normal file
221
package/ios/Core/CameraSession+Video.swift
Normal file
@@ -0,0 +1,221 @@
|
||||
//
|
||||
// CameraSession+Video.swift
|
||||
// VisionCamera
|
||||
//
|
||||
// Created by Marc Rousavy on 11.10.23.
|
||||
// Copyright © 2023 mrousavy. All rights reserved.
|
||||
//
|
||||
|
||||
import AVFoundation
|
||||
import Foundation
|
||||
import UIKit
|
||||
|
||||
extension CameraSession {
|
||||
/**
|
||||
Starts a video + audio recording with a custom Asset Writer.
|
||||
*/
|
||||
func startRecording(options: RecordVideoOptions,
|
||||
onVideoRecorded: @escaping (_ video: Video) -> Void,
|
||||
onError: @escaping (_ error: CameraError) -> Void) {
|
||||
// Run on Camera Queue
|
||||
CameraQueues.cameraQueue.async {
|
||||
ReactLogger.log(level: .info, message: "Starting Video recording...")
|
||||
|
||||
if options.flash != .off {
|
||||
// use the torch as the video's flash
|
||||
self.configure { config in
|
||||
config.torch = options.flash
|
||||
}
|
||||
}
|
||||
|
||||
// Get Video Output
|
||||
guard let videoOutput = self.videoOutput else {
|
||||
if self.configuration?.video == .disabled {
|
||||
onError(.capture(.videoNotEnabled))
|
||||
} else {
|
||||
onError(.session(.cameraNotReady))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
let enableAudio = self.configuration?.audio != .disabled
|
||||
|
||||
// Callback for when the recording ends
|
||||
let onFinish = { (recordingSession: RecordingSession, status: AVAssetWriter.Status, error: Error?) in
|
||||
defer {
|
||||
// Disable Audio Session again
|
||||
if enableAudio {
|
||||
CameraQueues.audioQueue.async {
|
||||
self.deactivateAudioSession()
|
||||
}
|
||||
}
|
||||
// Reset flash
|
||||
if options.flash != .off {
|
||||
// Set torch mode back to what it was before if we used it for the video flash.
|
||||
self.configure { config in
|
||||
let torch = self.configuration?.torch ?? .off
|
||||
config.torch = torch
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.recordingSession = nil
|
||||
self.isRecording = false
|
||||
ReactLogger.log(level: .info, message: "RecordingSession finished with status \(status.descriptor).")
|
||||
|
||||
if let error = error as NSError? {
|
||||
ReactLogger.log(level: .error, message: "RecordingSession Error \(error.code): \(error.description)")
|
||||
// Something went wrong, we have an error
|
||||
if error.domain == "capture/aborted" {
|
||||
onError(.capture(.aborted))
|
||||
} else {
|
||||
onError(.capture(.unknown(message: "An unknown recording error occured! \(error.code) \(error.description)")))
|
||||
}
|
||||
} else {
|
||||
if status == .completed {
|
||||
// Recording was successfully saved
|
||||
let video = Video(path: recordingSession.url.absoluteString,
|
||||
duration: recordingSession.duration)
|
||||
onVideoRecorded(video)
|
||||
} else {
|
||||
// Recording wasn't saved and we don't have an error either.
|
||||
onError(.unknown(message: "AVAssetWriter completed with status: \(status.descriptor)"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create temporary file
|
||||
let errorPointer = ErrorPointer(nilLiteral: ())
|
||||
let fileExtension = options.fileType.descriptor ?? "mov"
|
||||
guard let tempFilePath = RCTTempFilePath(fileExtension, errorPointer) else {
|
||||
let message = errorPointer?.pointee?.description
|
||||
onError(.capture(.createTempFileError(message: message)))
|
||||
return
|
||||
}
|
||||
|
||||
ReactLogger.log(level: .info, message: "File path: \(tempFilePath)")
|
||||
let tempURL = URL(string: "file://\(tempFilePath)")!
|
||||
|
||||
let recordingSession: RecordingSession
|
||||
do {
|
||||
recordingSession = try RecordingSession(url: tempURL,
|
||||
fileType: options.fileType,
|
||||
completion: onFinish)
|
||||
} catch let error as NSError {
|
||||
onError(.capture(.createRecorderError(message: error.description)))
|
||||
return
|
||||
}
|
||||
self.recordingSession = recordingSession
|
||||
|
||||
// Init Video
|
||||
guard var videoSettings = self.recommendedVideoSettings(videoOutput: videoOutput,
|
||||
fileType: options.fileType,
|
||||
videoCodec: options.codec),
|
||||
!videoSettings.isEmpty else {
|
||||
onError(.capture(.createRecorderError(message: "Failed to get video settings!")))
|
||||
return
|
||||
}
|
||||
ReactLogger.log(level: .trace, message: "Recommended Video Settings: \(videoSettings.description)")
|
||||
|
||||
// Custom Video Bit Rate
|
||||
if let videoBitRate = options.bitRate {
|
||||
// Convert from Mbps -> bps
|
||||
let bitsPerSecond = videoBitRate * 1_000_000
|
||||
videoSettings[AVVideoCompressionPropertiesKey] = [
|
||||
AVVideoAverageBitRateKey: NSNumber(value: bitsPerSecond),
|
||||
]
|
||||
}
|
||||
|
||||
// get pixel format (420f, 420v, x420)
|
||||
let pixelFormat = videoOutput.pixelFormat
|
||||
recordingSession.initializeVideoWriter(withSettings: videoSettings,
|
||||
pixelFormat: pixelFormat)
|
||||
|
||||
// Enable/Activate Audio Session (optional)
|
||||
if enableAudio {
|
||||
if let audioOutput = self.audioOutput {
|
||||
// Activate Audio Session asynchronously
|
||||
CameraQueues.audioQueue.async {
|
||||
do {
|
||||
try self.activateAudioSession()
|
||||
} catch {
|
||||
self.onConfigureError(error)
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize audio asset writer
|
||||
let audioSettings = audioOutput.recommendedAudioSettingsForAssetWriter(writingTo: options.fileType)
|
||||
recordingSession.initializeAudioWriter(withSettings: audioSettings)
|
||||
}
|
||||
}
|
||||
|
||||
// start recording session with or without audio.
|
||||
do {
|
||||
try recordingSession.startAssetWriter()
|
||||
self.isRecording = true
|
||||
} catch let error as NSError {
|
||||
onError(.capture(.createRecorderError(message: "RecordingSession failed to start asset writer. \(error.description)")))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
Stops an active recording.
|
||||
*/
|
||||
func stopRecording(promise: Promise) {
|
||||
CameraQueues.cameraQueue.async {
|
||||
self.isRecording = false
|
||||
|
||||
withPromise(promise) {
|
||||
guard let recordingSession = self.recordingSession else {
|
||||
throw CameraError.capture(.noRecordingInProgress)
|
||||
}
|
||||
recordingSession.finish()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
Pauses an active recording.
|
||||
*/
|
||||
func pauseRecording(promise: Promise) {
|
||||
CameraQueues.cameraQueue.async {
|
||||
withPromise(promise) {
|
||||
guard self.recordingSession != nil else {
|
||||
// there's no active recording!
|
||||
throw CameraError.capture(.noRecordingInProgress)
|
||||
}
|
||||
self.isRecording = false
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
Resumes an active, but paused recording.
|
||||
*/
|
||||
func resumeRecording(promise: Promise) {
|
||||
CameraQueues.cameraQueue.async {
|
||||
withPromise(promise) {
|
||||
guard self.recordingSession != nil else {
|
||||
// there's no active recording!
|
||||
throw CameraError.capture(.noRecordingInProgress)
|
||||
}
|
||||
self.isRecording = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private func recommendedVideoSettings(videoOutput: AVCaptureVideoDataOutput,
|
||||
fileType: AVFileType,
|
||||
videoCodec: AVVideoCodecType?) -> [String: Any]? {
|
||||
if videoCodec != nil {
|
||||
return videoOutput.recommendedVideoSettings(forVideoCodecType: videoCodec!, assetWriterOutputFileType: fileType)
|
||||
} else {
|
||||
return videoOutput.recommendedVideoSettingsForAssetWriter(writingTo: fileType)
|
||||
}
|
||||
}
|
||||
}
|
260
package/ios/Core/CameraSession.swift
Normal file
260
package/ios/Core/CameraSession.swift
Normal file
@@ -0,0 +1,260 @@
|
||||
//
|
||||
// CameraSession.swift
|
||||
// VisionCamera
|
||||
//
|
||||
// Created by Marc Rousavy on 11.10.23.
|
||||
// Copyright © 2023 mrousavy. All rights reserved.
|
||||
//
|
||||
|
||||
import AVFoundation
|
||||
import Foundation
|
||||
|
||||
/**
|
||||
A fully-featured Camera Session supporting preview, video, photo, frame processing, and code scanning outputs.
|
||||
All changes to the session have to be controlled via the `configure` function.
|
||||
*/
|
||||
class CameraSession: NSObject, AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureAudioDataOutputSampleBufferDelegate {
|
||||
// Configuration
|
||||
var configuration: CameraConfiguration?
|
||||
// Capture Session
|
||||
let captureSession = AVCaptureSession()
|
||||
let audioCaptureSession = AVCaptureSession()
|
||||
// Inputs & Outputs
|
||||
var videoDeviceInput: AVCaptureDeviceInput?
|
||||
var audioDeviceInput: AVCaptureDeviceInput?
|
||||
var photoOutput: AVCapturePhotoOutput?
|
||||
var videoOutput: AVCaptureVideoDataOutput?
|
||||
var audioOutput: AVCaptureAudioDataOutput?
|
||||
var codeScannerOutput: AVCaptureMetadataOutput?
|
||||
// State
|
||||
var recordingSession: RecordingSession?
|
||||
var isRecording = false
|
||||
|
||||
// Callbacks
|
||||
weak var delegate: CameraSessionDelegate?
|
||||
|
||||
// Public accessors
|
||||
var maxZoom: Double {
|
||||
if let device = videoDeviceInput?.device {
|
||||
return device.maxAvailableVideoZoomFactor
|
||||
}
|
||||
return 1.0
|
||||
}
|
||||
|
||||
/**
|
||||
Create a new instance of the `CameraSession`.
|
||||
The `onError` callback is used for any runtime errors.
|
||||
*/
|
||||
override init() {
|
||||
super.init()
|
||||
|
||||
NotificationCenter.default.addObserver(self,
|
||||
selector: #selector(sessionRuntimeError),
|
||||
name: .AVCaptureSessionRuntimeError,
|
||||
object: captureSession)
|
||||
NotificationCenter.default.addObserver(self,
|
||||
selector: #selector(sessionRuntimeError),
|
||||
name: .AVCaptureSessionRuntimeError,
|
||||
object: audioCaptureSession)
|
||||
NotificationCenter.default.addObserver(self,
|
||||
selector: #selector(audioSessionInterrupted),
|
||||
name: AVAudioSession.interruptionNotification,
|
||||
object: AVAudioSession.sharedInstance)
|
||||
}
|
||||
|
||||
deinit {
|
||||
NotificationCenter.default.removeObserver(self,
|
||||
name: .AVCaptureSessionRuntimeError,
|
||||
object: captureSession)
|
||||
NotificationCenter.default.removeObserver(self,
|
||||
name: .AVCaptureSessionRuntimeError,
|
||||
object: audioCaptureSession)
|
||||
NotificationCenter.default.removeObserver(self,
|
||||
name: AVAudioSession.interruptionNotification,
|
||||
object: AVAudioSession.sharedInstance)
|
||||
}
|
||||
|
||||
/**
|
||||
Creates a PreviewView for the current Capture Session
|
||||
*/
|
||||
func createPreviewView(frame: CGRect) -> PreviewView {
|
||||
return PreviewView(frame: frame, session: captureSession)
|
||||
}
|
||||
|
||||
func onConfigureError(_ error: Error) {
|
||||
if let error = error as? CameraError {
|
||||
// It's a typed Error
|
||||
delegate?.onError(error)
|
||||
} else {
|
||||
// It's any kind of unknown error
|
||||
let cameraError = CameraError.unknown(message: error.localizedDescription)
|
||||
delegate?.onError(cameraError)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
Update the session configuration.
|
||||
Any changes in here will be re-configured only if required, and under a lock.
|
||||
The `configuration` object is a copy of the currently active configuration that can be modified by the caller in the lambda.
|
||||
*/
|
||||
func configure(_ lambda: (_ configuration: CameraConfiguration) throws -> Void) {
|
||||
ReactLogger.log(level: .info, message: "Updating Session Configuration...")
|
||||
|
||||
// Let caller configure a new configuration for the Camera.
|
||||
let config = CameraConfiguration(copyOf: configuration)
|
||||
do {
|
||||
try lambda(config)
|
||||
} catch {
|
||||
onConfigureError(error)
|
||||
}
|
||||
let difference = CameraConfiguration.Difference(between: configuration, and: config)
|
||||
|
||||
// Set up Camera (Video) Capture Session (on camera queue)
|
||||
CameraQueues.cameraQueue.async {
|
||||
do {
|
||||
// If needed, configure the AVCaptureSession (inputs, outputs)
|
||||
if difference.isSessionConfigurationDirty {
|
||||
// Lock Capture Session for configuration
|
||||
ReactLogger.log(level: .info, message: "Beginning CameraSession configuration...")
|
||||
self.captureSession.beginConfiguration()
|
||||
|
||||
// 1. Update input device
|
||||
if difference.inputChanged {
|
||||
try self.configureDevice(configuration: config)
|
||||
}
|
||||
// 2. Update outputs
|
||||
if difference.outputsChanged {
|
||||
try self.configureOutputs(configuration: config)
|
||||
}
|
||||
// 3. Update output orientation
|
||||
if difference.orientationChanged {
|
||||
self.configureOrientation(configuration: config)
|
||||
}
|
||||
|
||||
// Unlock Capture Session again and submit configuration to Hardware
|
||||
self.captureSession.commitConfiguration()
|
||||
ReactLogger.log(level: .info, message: "Committed CameraSession configuration!")
|
||||
}
|
||||
|
||||
// If needed, configure the AVCaptureDevice (format, zoom, low-light-boost, ..)
|
||||
if difference.isDeviceConfigurationDirty {
|
||||
guard let device = self.videoDeviceInput?.device else {
|
||||
throw CameraError.session(.cameraNotReady)
|
||||
}
|
||||
ReactLogger.log(level: .info, message: "Beginning CaptureDevice configuration...")
|
||||
try device.lockForConfiguration()
|
||||
|
||||
// 4. Configure format
|
||||
if difference.formatChanged {
|
||||
try self.configureFormat(configuration: config)
|
||||
}
|
||||
// 5. Configure side-props (fps, lowLightBoost)
|
||||
if difference.sidePropsChanged {
|
||||
try self.configureSideProps(configuration: config)
|
||||
}
|
||||
// 6. Configure zoom
|
||||
if difference.zoomChanged {
|
||||
try self.configureZoom(configuration: config)
|
||||
}
|
||||
|
||||
device.unlockForConfiguration()
|
||||
ReactLogger.log(level: .info, message: "Committed CaptureDevice configuration!")
|
||||
}
|
||||
|
||||
// 6. Start or stop the session if needed
|
||||
self.checkIsActive(configuration: config)
|
||||
|
||||
// Update successful, set the new configuration!
|
||||
self.configuration = config
|
||||
} catch {
|
||||
self.onConfigureError(error)
|
||||
}
|
||||
}
|
||||
|
||||
// Set up Audio Capture Session (on audio queue)
|
||||
if difference.audioSessionChanged {
|
||||
CameraQueues.audioQueue.async {
|
||||
do {
|
||||
// Lock Capture Session for configuration
|
||||
ReactLogger.log(level: .info, message: "Beginning AudioSession configuration...")
|
||||
self.audioCaptureSession.beginConfiguration()
|
||||
|
||||
try self.configureAudioSession(configuration: config)
|
||||
|
||||
// Unlock Capture Session again and submit configuration to Hardware
|
||||
self.audioCaptureSession.commitConfiguration()
|
||||
ReactLogger.log(level: .info, message: "Committed AudioSession configuration!")
|
||||
} catch {
|
||||
self.onConfigureError(error)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
Starts or stops the CaptureSession if needed (`isActive`)
|
||||
*/
|
||||
private func checkIsActive(configuration: CameraConfiguration) {
|
||||
if configuration.isActive == captureSession.isRunning {
|
||||
return
|
||||
}
|
||||
|
||||
// Start/Stop session
|
||||
if configuration.isActive {
|
||||
captureSession.startRunning()
|
||||
} else {
|
||||
captureSession.stopRunning()
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
Called for every new Frame in the Video output
|
||||
*/
|
||||
public final func captureOutput(_ captureOutput: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from _: AVCaptureConnection) {
|
||||
// Call Frame Processor (delegate) for every Video Frame
|
||||
if captureOutput is AVCaptureVideoDataOutput {
|
||||
delegate?.onFrame(sampleBuffer: sampleBuffer)
|
||||
}
|
||||
|
||||
// Record Video Frame/Audio Sample to File in custom `RecordingSession` (AVAssetWriter)
|
||||
if isRecording {
|
||||
guard let recordingSession = recordingSession else {
|
||||
delegate?.onError(.capture(.unknown(message: "isRecording was true but the RecordingSession was null!")))
|
||||
return
|
||||
}
|
||||
|
||||
switch captureOutput {
|
||||
case is AVCaptureVideoDataOutput:
|
||||
recordingSession.appendBuffer(sampleBuffer, type: .video, timestamp: CMSampleBufferGetPresentationTimeStamp(sampleBuffer))
|
||||
case is AVCaptureAudioDataOutput:
|
||||
let timestamp = CMSyncConvertTime(CMSampleBufferGetPresentationTimeStamp(sampleBuffer),
|
||||
from: audioCaptureSession.masterClock ?? CMClockGetHostTimeClock(),
|
||||
to: captureSession.masterClock ?? CMClockGetHostTimeClock())
|
||||
recordingSession.appendBuffer(sampleBuffer, type: .audio, timestamp: timestamp)
|
||||
default:
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// pragma MARK: Notifications
|
||||
|
||||
@objc
|
||||
func sessionRuntimeError(notification: Notification) {
|
||||
ReactLogger.log(level: .error, message: "Unexpected Camera Runtime Error occured!")
|
||||
guard let error = notification.userInfo?[AVCaptureSessionErrorKey] as? AVError else {
|
||||
return
|
||||
}
|
||||
|
||||
// Notify consumer about runtime error
|
||||
delegate?.onError(.unknown(message: error._nsError.description, cause: error._nsError))
|
||||
|
||||
let shouldRestart = configuration?.isActive == true
|
||||
if shouldRestart {
|
||||
// restart capture session after an error occured
|
||||
CameraQueues.cameraQueue.async {
|
||||
self.captureSession.startRunning()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
32
package/ios/Core/CameraSessionDelegate.swift
Normal file
32
package/ios/Core/CameraSessionDelegate.swift
Normal file
@@ -0,0 +1,32 @@
|
||||
//
|
||||
// CameraSessionDelegate.swift
|
||||
// VisionCamera
|
||||
//
|
||||
// Created by Marc Rousavy on 11.10.23.
|
||||
// Copyright © 2023 mrousavy. All rights reserved.
|
||||
//
|
||||
|
||||
import AVFoundation
|
||||
import Foundation
|
||||
|
||||
/**
|
||||
A listener for [CameraSession] events
|
||||
*/
|
||||
protocol CameraSessionDelegate: AnyObject {
|
||||
/**
|
||||
Called when there is a Runtime Error in the [CameraSession]
|
||||
*/
|
||||
func onError(_ error: CameraError)
|
||||
/**
|
||||
Called when the [CameraSession] successfully initializes
|
||||
*/
|
||||
func onSessionInitialized()
|
||||
/**
|
||||
Called for every frame (if video or frameProcessor is enabled)
|
||||
*/
|
||||
func onFrame(sampleBuffer: CMSampleBuffer)
|
||||
/**
|
||||
Called whenever a QR/Barcode has been scanned. Only if the CodeScanner Output is enabled
|
||||
*/
|
||||
func onCodeScanned(codes: [CameraSession.Code])
|
||||
}
|
115
package/ios/Core/PhotoCaptureDelegate.swift
Normal file
115
package/ios/Core/PhotoCaptureDelegate.swift
Normal file
@@ -0,0 +1,115 @@
|
||||
//
|
||||
// PhotoCaptureDelegate.swift
|
||||
// mrousavy
|
||||
//
|
||||
// Created by Marc Rousavy on 15.12.20.
|
||||
// Copyright © 2020 mrousavy. All rights reserved.
|
||||
//
|
||||
|
||||
import AVFoundation
|
||||
|
||||
// Keeps a strong reference on delegates, as the AVCapturePhotoOutput only holds a weak reference.
|
||||
private var delegatesReferences: [NSObject] = []
|
||||
|
||||
// MARK: - PhotoCaptureDelegate
|
||||
|
||||
class PhotoCaptureDelegate: NSObject, AVCapturePhotoCaptureDelegate {
|
||||
private let promise: Promise
|
||||
private let enableShutterSound: Bool
|
||||
|
||||
required init(promise: Promise, enableShutterSound: Bool) {
|
||||
self.promise = promise
|
||||
self.enableShutterSound = enableShutterSound
|
||||
super.init()
|
||||
delegatesReferences.append(self)
|
||||
}
|
||||
|
||||
func photoOutput(_: AVCapturePhotoOutput, willCapturePhotoFor _: AVCaptureResolvedPhotoSettings) {
|
||||
if !enableShutterSound {
|
||||
// disable system shutter sound (see https://stackoverflow.com/a/55235949/5281431)
|
||||
AudioServicesDisposeSystemSoundID(1108)
|
||||
}
|
||||
}
|
||||
|
||||
func photoOutput(_: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
|
||||
defer {
|
||||
delegatesReferences.removeAll(where: { $0 == self })
|
||||
}
|
||||
if let error = error as NSError? {
|
||||
promise.reject(error: .capture(.unknown(message: error.description)), cause: error)
|
||||
return
|
||||
}
|
||||
|
||||
let error = ErrorPointer(nilLiteral: ())
|
||||
guard let tempFilePath = RCTTempFilePath("jpeg", error)
|
||||
else {
|
||||
let message = error?.pointee?.description
|
||||
promise.reject(error: .capture(.createTempFileError(message: message)), cause: error?.pointee)
|
||||
return
|
||||
}
|
||||
let url = URL(string: "file://\(tempFilePath)")!
|
||||
|
||||
guard let data = photo.fileDataRepresentation() else {
|
||||
promise.reject(error: .capture(.fileError))
|
||||
return
|
||||
}
|
||||
|
||||
do {
|
||||
try data.write(to: url)
|
||||
let exif = photo.metadata["{Exif}"] as? [String: Any]
|
||||
let width = exif?["PixelXDimension"]
|
||||
let height = exif?["PixelYDimension"]
|
||||
let exifOrientation = photo.metadata[String(kCGImagePropertyOrientation)] as? UInt32 ?? CGImagePropertyOrientation.up.rawValue
|
||||
let cgOrientation = CGImagePropertyOrientation(rawValue: exifOrientation) ?? CGImagePropertyOrientation.up
|
||||
let orientation = getOrientation(forExifOrientation: cgOrientation)
|
||||
let isMirrored = getIsMirrored(forExifOrientation: cgOrientation)
|
||||
|
||||
promise.resolve([
|
||||
"path": tempFilePath,
|
||||
"width": width as Any,
|
||||
"height": height as Any,
|
||||
"orientation": orientation,
|
||||
"isMirrored": isMirrored,
|
||||
"isRawPhoto": photo.isRawPhoto,
|
||||
"metadata": photo.metadata,
|
||||
"thumbnail": photo.embeddedThumbnailPhotoFormat as Any,
|
||||
])
|
||||
} catch {
|
||||
promise.reject(error: .capture(.fileError), cause: error as NSError)
|
||||
}
|
||||
}
|
||||
|
||||
func photoOutput(_: AVCapturePhotoOutput, didFinishCaptureFor _: AVCaptureResolvedPhotoSettings, error: Error?) {
|
||||
defer {
|
||||
delegatesReferences.removeAll(where: { $0 == self })
|
||||
}
|
||||
if let error = error as NSError? {
|
||||
promise.reject(error: .capture(.unknown(message: error.description)), cause: error)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
private func getOrientation(forExifOrientation exifOrientation: CGImagePropertyOrientation) -> String {
|
||||
switch exifOrientation {
|
||||
case .up, .upMirrored:
|
||||
return "portrait"
|
||||
case .down, .downMirrored:
|
||||
return "portrait-upside-down"
|
||||
case .left, .leftMirrored:
|
||||
return "landscape-left"
|
||||
case .right, .rightMirrored:
|
||||
return "landscape-right"
|
||||
default:
|
||||
return "portrait"
|
||||
}
|
||||
}
|
||||
|
||||
private func getIsMirrored(forExifOrientation exifOrientation: CGImagePropertyOrientation) -> Bool {
|
||||
switch exifOrientation {
|
||||
case .upMirrored, .rightMirrored, .downMirrored, .leftMirrored:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
59
package/ios/Core/PreviewView.swift
Normal file
59
package/ios/Core/PreviewView.swift
Normal file
@@ -0,0 +1,59 @@
|
||||
//
|
||||
// PreviewView.swift
|
||||
// VisionCamera
|
||||
//
|
||||
// Created by Marc Rousavy on 30.11.22.
|
||||
// Copyright © 2022 mrousavy. All rights reserved.
|
||||
//
|
||||
|
||||
import AVFoundation
|
||||
import Foundation
|
||||
import UIKit
|
||||
|
||||
class PreviewView: UIView {
|
||||
/**
|
||||
Convenience wrapper to get layer as its statically known type.
|
||||
*/
|
||||
var videoPreviewLayer: AVCaptureVideoPreviewLayer {
|
||||
// swiftlint:disable force_cast
|
||||
return layer as! AVCaptureVideoPreviewLayer
|
||||
// swiftlint:enable force_cast
|
||||
}
|
||||
|
||||
/**
|
||||
Gets or sets the resize mode of the PreviewView.
|
||||
*/
|
||||
var resizeMode: ResizeMode = .cover {
|
||||
didSet {
|
||||
switch resizeMode {
|
||||
case .cover:
|
||||
videoPreviewLayer.videoGravity = .resizeAspectFill
|
||||
case .contain:
|
||||
videoPreviewLayer.videoGravity = .resizeAspect
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
override public class var layerClass: AnyClass {
|
||||
return AVCaptureVideoPreviewLayer.self
|
||||
}
|
||||
|
||||
func layerRectConverted(fromMetadataOutputRect rect: CGRect) -> CGRect {
|
||||
return videoPreviewLayer.layerRectConverted(fromMetadataOutputRect: rect)
|
||||
}
|
||||
|
||||
func captureDevicePointConverted(fromLayerPoint point: CGPoint) -> CGPoint {
|
||||
return videoPreviewLayer.captureDevicePointConverted(fromLayerPoint: point)
|
||||
}
|
||||
|
||||
init(frame: CGRect, session: AVCaptureSession) {
|
||||
super.init(frame: frame)
|
||||
videoPreviewLayer.session = session
|
||||
videoPreviewLayer.videoGravity = .resizeAspectFill
|
||||
}
|
||||
|
||||
@available(*, unavailable)
|
||||
required init?(coder _: NSCoder) {
|
||||
fatalError("init(coder:) is not implemented!")
|
||||
}
|
||||
}
|
213
package/ios/Core/RecordingSession.swift
Normal file
213
package/ios/Core/RecordingSession.swift
Normal file
@@ -0,0 +1,213 @@
|
||||
//
|
||||
// RecordingSession.swift
|
||||
// VisionCamera
|
||||
//
|
||||
// Created by Marc Rousavy on 01.05.21.
|
||||
// Copyright © 2021 mrousavy. All rights reserved.
|
||||
//
|
||||
|
||||
import AVFoundation
|
||||
import Foundation
|
||||
|
||||
// MARK: - BufferType
|
||||
|
||||
enum BufferType {
|
||||
case audio
|
||||
case video
|
||||
}
|
||||
|
||||
// MARK: - RecordingSessionError
|
||||
|
||||
enum RecordingSessionError: Error {
|
||||
case failedToStartSession
|
||||
}
|
||||
|
||||
// MARK: - RecordingSession
|
||||
|
||||
class RecordingSession {
|
||||
private let assetWriter: AVAssetWriter
|
||||
private var audioWriter: AVAssetWriterInput?
|
||||
private var bufferAdaptor: AVAssetWriterInputPixelBufferAdaptor?
|
||||
private let completionHandler: (RecordingSession, AVAssetWriter.Status, Error?) -> Void
|
||||
|
||||
private var initialTimestamp: CMTime?
|
||||
private var latestTimestamp: CMTime?
|
||||
private var hasStartedWritingSession = false
|
||||
private var hasWrittenFirstVideoFrame = false
|
||||
private var isFinishing = false
|
||||
|
||||
var url: URL {
|
||||
return assetWriter.outputURL
|
||||
}
|
||||
|
||||
var duration: Double {
|
||||
guard let latestTimestamp = latestTimestamp,
|
||||
let initialTimestamp = initialTimestamp else {
|
||||
return 0.0
|
||||
}
|
||||
return (latestTimestamp - initialTimestamp).seconds
|
||||
}
|
||||
|
||||
init(url: URL,
|
||||
fileType: AVFileType,
|
||||
completion: @escaping (RecordingSession, AVAssetWriter.Status, Error?) -> Void) throws {
|
||||
completionHandler = completion
|
||||
|
||||
do {
|
||||
assetWriter = try AVAssetWriter(outputURL: url, fileType: fileType)
|
||||
} catch let error as NSError {
|
||||
throw CameraError.capture(.createRecorderError(message: error.description))
|
||||
}
|
||||
}
|
||||
|
||||
deinit {
|
||||
if assetWriter.status == .writing {
|
||||
ReactLogger.log(level: .info, message: "Cancelling AssetWriter...")
|
||||
assetWriter.cancelWriting()
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
Initializes an AssetWriter for video frames (CMSampleBuffers).
|
||||
*/
|
||||
func initializeVideoWriter(withSettings settings: [String: Any], pixelFormat: OSType) {
|
||||
guard !settings.isEmpty else {
|
||||
ReactLogger.log(level: .error, message: "Tried to initialize Video Writer with empty settings!")
|
||||
return
|
||||
}
|
||||
guard bufferAdaptor == nil else {
|
||||
ReactLogger.log(level: .error, message: "Tried to add Video Writer twice!")
|
||||
return
|
||||
}
|
||||
|
||||
let videoWriter = AVAssetWriterInput(mediaType: .video, outputSettings: settings)
|
||||
videoWriter.expectsMediaDataInRealTime = true
|
||||
|
||||
assetWriter.add(videoWriter)
|
||||
bufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriter,
|
||||
withVideoSettings: settings,
|
||||
pixelFormat: pixelFormat)
|
||||
ReactLogger.log(level: .info, message: "Initialized Video AssetWriter.")
|
||||
}
|
||||
|
||||
/**
|
||||
Initializes an AssetWriter for audio frames (CMSampleBuffers).
|
||||
*/
|
||||
func initializeAudioWriter(withSettings settings: [String: Any]?) {
|
||||
guard audioWriter == nil else {
|
||||
ReactLogger.log(level: .error, message: "Tried to add Audio Writer twice!")
|
||||
return
|
||||
}
|
||||
|
||||
audioWriter = AVAssetWriterInput(mediaType: .audio, outputSettings: settings)
|
||||
audioWriter!.expectsMediaDataInRealTime = true
|
||||
assetWriter.add(audioWriter!)
|
||||
ReactLogger.log(level: .info, message: "Initialized Audio AssetWriter.")
|
||||
}
|
||||
|
||||
/**
|
||||
Start the Asset Writer(s). If the AssetWriter failed to start, an error will be thrown.
|
||||
*/
|
||||
func startAssetWriter() throws {
|
||||
ReactLogger.log(level: .info, message: "Starting Asset Writer(s)...")
|
||||
|
||||
let success = assetWriter.startWriting()
|
||||
if !success {
|
||||
ReactLogger.log(level: .error, message: "Failed to start Asset Writer(s)!")
|
||||
throw RecordingSessionError.failedToStartSession
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
Appends a new CMSampleBuffer to the Asset Writer. Use bufferType to specify if this is a video or audio frame.
|
||||
The timestamp parameter represents the presentation timestamp of the buffer, which should be synchronized across video and audio frames.
|
||||
*/
|
||||
func appendBuffer(_ buffer: CMSampleBuffer, type bufferType: BufferType, timestamp: CMTime) {
|
||||
guard assetWriter.status == .writing else {
|
||||
ReactLogger.log(level: .error, message: "Frame arrived, but AssetWriter status is \(assetWriter.status.descriptor)!")
|
||||
return
|
||||
}
|
||||
if !CMSampleBufferDataIsReady(buffer) {
|
||||
ReactLogger.log(level: .error, message: "Frame arrived, but sample buffer is not ready!")
|
||||
return
|
||||
}
|
||||
|
||||
latestTimestamp = timestamp
|
||||
|
||||
switch bufferType {
|
||||
case .video:
|
||||
guard let bufferAdaptor = bufferAdaptor else {
|
||||
ReactLogger.log(level: .error, message: "Video Frame arrived but VideoWriter was nil!")
|
||||
return
|
||||
}
|
||||
if !bufferAdaptor.assetWriterInput.isReadyForMoreMediaData {
|
||||
ReactLogger.log(level: .warning,
|
||||
message: "The Video AVAssetWriterInput was not ready for more data! Is your frame rate too high?")
|
||||
return
|
||||
}
|
||||
guard let imageBuffer = CMSampleBufferGetImageBuffer(buffer) else {
|
||||
ReactLogger.log(level: .error, message: "Failed to get the CVImageBuffer!")
|
||||
return
|
||||
}
|
||||
// Start the writing session before we write the first video frame
|
||||
if !hasStartedWritingSession {
|
||||
initialTimestamp = timestamp
|
||||
assetWriter.startSession(atSourceTime: timestamp)
|
||||
ReactLogger.log(level: .info, message: "Started RecordingSession at \(timestamp.seconds) seconds.")
|
||||
hasStartedWritingSession = true
|
||||
}
|
||||
bufferAdaptor.append(imageBuffer, withPresentationTime: timestamp)
|
||||
if !hasWrittenFirstVideoFrame {
|
||||
hasWrittenFirstVideoFrame = true
|
||||
}
|
||||
case .audio:
|
||||
guard let audioWriter = audioWriter else {
|
||||
ReactLogger.log(level: .error, message: "Audio Frame arrived but AudioWriter was nil!")
|
||||
return
|
||||
}
|
||||
if !audioWriter.isReadyForMoreMediaData {
|
||||
return
|
||||
}
|
||||
if !hasWrittenFirstVideoFrame || !hasStartedWritingSession {
|
||||
// first video frame has not been written yet, so skip this audio frame.
|
||||
return
|
||||
}
|
||||
audioWriter.append(buffer)
|
||||
}
|
||||
|
||||
if assetWriter.status == .failed {
|
||||
ReactLogger.log(level: .error,
|
||||
message: "AssetWriter failed to write buffer! Error: \(assetWriter.error?.localizedDescription ?? "none")")
|
||||
finish()
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
Marks the AssetWriters as finished and stops writing frames. The callback will be invoked either with an error or the status "success".
|
||||
*/
|
||||
func finish() {
|
||||
ReactLogger.log(level: .info, message: "Finishing Recording with AssetWriter status \"\(assetWriter.status.descriptor)\"...")
|
||||
|
||||
if isFinishing {
|
||||
ReactLogger.log(level: .warning, message: "Tried calling finish() twice while AssetWriter is still writing!")
|
||||
return
|
||||
}
|
||||
|
||||
if !hasWrittenFirstVideoFrame {
|
||||
let error = NSError(domain: "capture/aborted",
|
||||
code: 1,
|
||||
userInfo: [NSLocalizedDescriptionKey: "Stopped Recording Session too early, no frames have been recorded!"])
|
||||
completionHandler(self, .failed, error)
|
||||
} else if assetWriter.status == .writing {
|
||||
isFinishing = true
|
||||
bufferAdaptor?.assetWriterInput.markAsFinished()
|
||||
audioWriter?.markAsFinished()
|
||||
assetWriter.finishWriting {
|
||||
self.isFinishing = false
|
||||
self.completionHandler(self, self.assetWriter.status, self.assetWriter.error)
|
||||
}
|
||||
} else {
|
||||
completionHandler(self, assetWriter.status, assetWriter.error)
|
||||
}
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user