react-native-vision-camera/ios/CameraView+Focus.swift
Marc Rousavy 375e894038
feat: Complete iOS Codebase rewrite (#1647)
* Make Frame Processors an extra subspec

* Update VisionCamera.podspec

* Make optional

* Make VisionCamera compile without Skia

* Fix

* Add skia again

* Update VisionCamera.podspec

* Make VisionCamera build without Frame Processors

* Rename error to `system/frame-processors-unavailable`

* Fix Frame Processor returning early

* Remove `preset`, FP partial rewrite

* Only warn on frame drop

* Fix wrong queue

* fix: Run on CameraQueue again

* Update CameraView.swift

* fix: Activate audio session asynchronously on audio queue

* Update CameraView+RecordVideo.swift

* Update PreviewView.h

* Cleanups

* Cleanup

* fix cast

* feat: Add LiDAR Depth Camera support

* Upgrade Ruby

* Add vector icons type

* Update Gemfile.lock

* fix: Stop queues on deinit

* Also load `builtInTrueDepthCamera`

* Update CameraViewManager.swift

* Update SkImageHelpers.mm

* Extract FrameProcessorCallback to FrameProcessor

Holds more context now :)

* Rename to .m

* fix: Add `RCTLog` import

* Create SkiaFrameProcessor

* Update CameraBridge.h

* Call Frame Processor

* Fix defines

* fix: Allow deleting callback funcs

* fix Skia build

* batch

* Just call `setSkiaFrameProcessor`

* Rewrite in Swift

* Pass `SkiaRenderer`

* Fix Import

* Move `PreviewView` to Swift

* Fix Layer

* Set Skia Canvas to Frame Host Object

* Make `DrawableFrameHostObject` subclass

* Fix TS types

* Use same MTLDevice and apply scale

* Make getter

* Extract `setTorch` and `Preview`

* fix: Fix nil metal device

* Don't wait for session stop in deinit

* Use main pixel ratio

* Use unique_ptr for Render Contexts

* fix: Fix SkiaPreviewDisplayLink broken after deinit

* inline `getTextureCache`

* Update CameraPage.tsx

* chore: Format iOS

* perf: Allow MTLLayer to be optimized for only frame buffers

* Add RN Video types

* fix: Fix Frame Processors if guard

* Find nodeModules recursively

* Create `Frame.isDrawable`

* Add `cocoapods-check` dependency
2023-07-20 15:30:04 +02:00

98 lines
3.6 KiB
Swift

//
// CameraView+focus.swift
// mrousavy
//
// Created by Marc Rousavy on 19.02.21.
// Copyright © 2021 mrousavy. All rights reserved.
//
import Foundation
extension CameraView {
private func rotateFrameSize(frameSize: CGSize, orientation: UIInterfaceOrientation) -> CGSize {
switch orientation {
case .portrait, .portraitUpsideDown, .unknown:
// swap width and height since the input orientation is rotated
return CGSize(width: frameSize.height, height: frameSize.width)
case .landscapeLeft, .landscapeRight:
// is same as camera sensor orientation
return frameSize
@unknown default:
return frameSize
}
}
/// Converts a Point in the UI View Layer to a Point in the Camera Frame coordinate system
private func convertLayerPointToFramePoint(layerPoint point: CGPoint) -> CGPoint {
guard let previewView = previewView else {
invokeOnError(.session(.cameraNotReady))
return .zero
}
guard let videoDeviceInput = videoDeviceInput else {
invokeOnError(.session(.cameraNotReady))
return .zero
}
guard let viewScale = window?.screen.scale else {
invokeOnError(.unknown(message: "View has no parent Window!"))
return .zero
}
let frameSize = rotateFrameSize(frameSize: videoDeviceInput.device.activeFormat.videoDimensions,
orientation: outputOrientation)
let viewSize = CGSize(width: previewView.bounds.width * viewScale,
height: previewView.bounds.height * viewScale)
let scale = min(frameSize.width / viewSize.width, frameSize.height / viewSize.height)
let scaledViewSize = CGSize(width: viewSize.width * scale, height: viewSize.height * scale)
let overlapX = scaledViewSize.width - frameSize.width
let overlapY = scaledViewSize.height - frameSize.height
let scaledPoint = CGPoint(x: point.x * scale, y: point.y * scale)
return CGPoint(x: scaledPoint.x - (overlapX / 2), y: scaledPoint.y - (overlapY / 2))
}
/// Converts a Point in the UI View Layer to a Point in the Camera Device Sensor coordinate system (x: [0..1], y: [0..1])
private func captureDevicePointConverted(fromLayerPoint pointInLayer: CGPoint) -> CGPoint {
guard let videoDeviceInput = videoDeviceInput else {
invokeOnError(.session(.cameraNotReady))
return .zero
}
let frameSize = rotateFrameSize(frameSize: videoDeviceInput.device.activeFormat.videoDimensions,
orientation: outputOrientation)
let pointInFrame = convertLayerPointToFramePoint(layerPoint: pointInLayer)
return CGPoint(x: pointInFrame.x / frameSize.width, y: pointInFrame.y / frameSize.height)
}
func focus(point: CGPoint, promise: Promise) {
withPromise(promise) {
guard let device = self.videoDeviceInput?.device else {
throw CameraError.session(SessionError.cameraNotReady)
}
if !device.isFocusPointOfInterestSupported {
throw CameraError.device(DeviceError.focusNotSupported)
}
// in {0..1} system
let normalizedPoint = captureDevicePointConverted(fromLayerPoint: point)
do {
try device.lockForConfiguration()
device.focusPointOfInterest = normalizedPoint
device.focusMode = .continuousAutoFocus
if device.isExposurePointOfInterestSupported {
device.exposurePointOfInterest = normalizedPoint
device.exposureMode = .continuousAutoExposure
}
device.unlockForConfiguration()
return nil
} catch {
throw CameraError.device(DeviceError.configureError)
}
}
}
}