chore: Move everything into package/ (#1745)

* Move everything into package

* Remove .DS_Store

* Move scripts and eslintrc to package

* Create CODE_OF_CONDUCT.md

* fix some links

* Update all links (I think)

* Update generated docs

* Update notice-yarn-changes.yml

* Update validate-android.yml

* Update validate-cpp.yml

* Delete notice-yarn-changes.yml

* Update validate-cpp.yml

* Update validate-cpp.yml

* Update validate-js.yml

* Update validate-cpp.yml

* Update validate-cpp.yml

* wrong c++ style

* Revert "wrong c++ style"

This reverts commit 55a3575589c6f13f8b05134d83384f55e0601ab2.
This commit is contained in:
Marc Rousavy
2023-09-01 18:15:28 +02:00
committed by GitHub
parent 2a5c33323b
commit 036856aed5
347 changed files with 3088 additions and 154 deletions

426
package/src/Camera.tsx Normal file
View File

@@ -0,0 +1,426 @@
import React from 'react';
import { requireNativeComponent, NativeSyntheticEvent, findNodeHandle, NativeMethods } from 'react-native';
import type { CameraDevice } from './CameraDevice';
import type { ErrorWithCause } from './CameraError';
import { CameraCaptureError, CameraRuntimeError, tryParseNativeCameraError, isErrorWithCause } from './CameraError';
import type { CameraProps, FrameProcessor } from './CameraProps';
import { CameraModule } from './NativeCameraModule';
import type { PhotoFile, TakePhotoOptions } from './PhotoFile';
import type { Point } from './Point';
import type { RecordVideoOptions, VideoFile } from './VideoFile';
import { VisionCameraProxy } from './FrameProcessorPlugins';
//#region Types
export type CameraPermissionStatus = 'granted' | 'not-determined' | 'denied' | 'restricted';
export type CameraPermissionRequestResult = 'granted' | 'denied';
interface OnErrorEvent {
code: string;
message: string;
cause?: ErrorWithCause;
}
type NativeCameraViewProps = Omit<CameraProps, 'device' | 'onInitialized' | 'onError' | 'frameProcessor'> & {
cameraId: string;
enableFrameProcessor: boolean;
onInitialized?: (event: NativeSyntheticEvent<void>) => void;
onError?: (event: NativeSyntheticEvent<OnErrorEvent>) => void;
onViewReady: () => void;
};
type RefType = React.Component<NativeCameraViewProps> & Readonly<NativeMethods>;
//#endregion
//#region Camera Component
/**
* ### A powerful `<Camera>` component.
*
* Read the [VisionCamera documentation](https://react-native-vision-camera.com/) for more information.
*
* The `<Camera>` component's most important (and therefore _required_) properties are:
*
* * {@linkcode CameraProps.device | device}: Specifies the {@linkcode CameraDevice} to use. Get a {@linkcode CameraDevice} by using the {@linkcode useCameraDevices | useCameraDevices()} hook, or manually by using the {@linkcode Camera.getAvailableCameraDevices Camera.getAvailableCameraDevices()} function.
* * {@linkcode CameraProps.isActive | isActive}: A boolean value that specifies whether the Camera should actively stream video frames or not. This can be compared to a Video component, where `isActive` specifies whether the video is paused or not. If you fully unmount the `<Camera>` component instead of using `isActive={false}`, the Camera will take a bit longer to start again.
*
* @example
* ```tsx
* function App() {
* const devices = useCameraDevices('wide-angle-camera')
* const device = devices.back
*
* if (device == null) return <LoadingView />
* return (
* <Camera
* style={StyleSheet.absoluteFill}
* device={device}
* isActive={true}
* />
* )
* }
* ```
*
* @component
*/
export class Camera extends React.PureComponent<CameraProps> {
/** @internal */
static displayName = 'Camera';
/** @internal */
displayName = Camera.displayName;
private lastFrameProcessor: FrameProcessor | undefined;
private isNativeViewMounted = false;
private readonly ref: React.RefObject<RefType>;
/** @internal */
constructor(props: CameraProps) {
super(props);
this.onViewReady = this.onViewReady.bind(this);
this.onInitialized = this.onInitialized.bind(this);
this.onError = this.onError.bind(this);
this.ref = React.createRef<RefType>();
this.lastFrameProcessor = undefined;
}
private get handle(): number {
const nodeHandle = findNodeHandle(this.ref.current);
if (nodeHandle == null || nodeHandle === -1) {
throw new CameraRuntimeError(
'system/view-not-found',
"Could not get the Camera's native view tag! Does the Camera View exist in the native view-tree?",
);
}
return nodeHandle;
}
//#region View-specific functions (UIViewManager)
/**
* Take a single photo and write it's content to a temporary file.
*
* @throws {@linkcode CameraCaptureError} When any kind of error occured while capturing the photo. Use the {@linkcode CameraCaptureError.code | code} property to get the actual error
* @example
* ```ts
* const photo = await camera.current.takePhoto({
* qualityPrioritization: 'quality',
* flash: 'on',
* enableAutoRedEyeReduction: true
* })
* ```
*/
public async takePhoto(options?: TakePhotoOptions): Promise<PhotoFile> {
try {
return await CameraModule.takePhoto(this.handle, options ?? {});
} catch (e) {
throw tryParseNativeCameraError(e);
}
}
/**
* Start a new video recording.
*
* Records in the following formats:
* * **iOS**: QuickTime (`.mov`)
* * **Android**: MPEG4 (`.mp4`)
*
* @blocking This function is synchronized/blocking.
*
* @throws {@linkcode CameraCaptureError} When any kind of error occured while starting the video recording. Use the {@linkcode CameraCaptureError.code | code} property to get the actual error
*
* @example
* ```ts
* camera.current.startRecording({
* onRecordingFinished: (video) => console.log(video),
* onRecordingError: (error) => console.error(error),
* })
* setTimeout(() => {
* camera.current.stopRecording()
* }, 5000)
* ```
*/
public startRecording(options: RecordVideoOptions): void {
const { onRecordingError, onRecordingFinished, ...passThroughOptions } = options;
if (typeof onRecordingError !== 'function' || typeof onRecordingFinished !== 'function')
throw new CameraRuntimeError('parameter/invalid-parameter', 'The onRecordingError or onRecordingFinished functions were not set!');
const onRecordCallback = (video?: VideoFile, error?: CameraCaptureError): void => {
if (error != null) return onRecordingError(error);
if (video != null) return onRecordingFinished(video);
};
// TODO: Use TurboModules to either make this a sync invokation, or make it async.
try {
CameraModule.startRecording(this.handle, passThroughOptions, onRecordCallback);
} catch (e) {
throw tryParseNativeCameraError(e);
}
}
/**
* Pauses the current video recording.
*
* @throws {@linkcode CameraCaptureError} When any kind of error occured while pausing the video recording. Use the {@linkcode CameraCaptureError.code | code} property to get the actual error
*
* @example
* ```ts
* // Start
* await camera.current.startRecording()
* await timeout(1000)
* // Pause
* await camera.current.pauseRecording()
* await timeout(500)
* // Resume
* await camera.current.resumeRecording()
* await timeout(2000)
* // Stop
* const video = await camera.current.stopRecording()
* ```
*/
public async pauseRecording(): Promise<void> {
try {
return await CameraModule.pauseRecording(this.handle);
} catch (e) {
throw tryParseNativeCameraError(e);
}
}
/**
* Resumes a currently paused video recording.
*
* @throws {@linkcode CameraCaptureError} When any kind of error occured while resuming the video recording. Use the {@linkcode CameraCaptureError.code | code} property to get the actual error
*
* @example
* ```ts
* // Start
* await camera.current.startRecording()
* await timeout(1000)
* // Pause
* await camera.current.pauseRecording()
* await timeout(500)
* // Resume
* await camera.current.resumeRecording()
* await timeout(2000)
* // Stop
* const video = await camera.current.stopRecording()
* ```
*/
public async resumeRecording(): Promise<void> {
try {
return await CameraModule.resumeRecording(this.handle);
} catch (e) {
throw tryParseNativeCameraError(e);
}
}
/**
* Stop the current video recording.
*
* @throws {@linkcode CameraCaptureError} When any kind of error occured while stopping the video recording. Use the {@linkcode CameraCaptureError.code | code} property to get the actual error
*
* @example
* ```ts
* await camera.current.startRecording()
* setTimeout(async () => {
* const video = await camera.current.stopRecording()
* }, 5000)
* ```
*/
public async stopRecording(): Promise<void> {
try {
return await CameraModule.stopRecording(this.handle);
} catch (e) {
throw tryParseNativeCameraError(e);
}
}
/**
* Focus the camera to a specific point in the coordinate system.
* @param {Point} point The point to focus to. This should be relative to the Camera view's coordinate system,
* and expressed in Pixel on iOS and Points on Android.
* * `(0, 0)` means **top left**.
* * `(CameraView.width, CameraView.height)` means **bottom right**.
*
* Make sure the value doesn't exceed the CameraView's dimensions.
*
* @throws {@linkcode CameraRuntimeError} When any kind of error occured while focussing. Use the {@linkcode CameraRuntimeError.code | code} property to get the actual error
* @example
* ```ts
* await camera.current.focus({
* x: tapEvent.x,
* y: tapEvent.y
* })
* ```
*/
public async focus(point: Point): Promise<void> {
try {
return await CameraModule.focus(this.handle, point);
} catch (e) {
throw tryParseNativeCameraError(e);
}
}
//#endregion
//#region Static Functions (NativeModule)
/**
* Get a list of all available camera devices on the current phone.
*
* @throws {@linkcode CameraRuntimeError} When any kind of error occured while getting all available camera devices. Use the {@linkcode CameraRuntimeError.code | code} property to get the actual error
* @example
* ```ts
* const devices = await Camera.getAvailableCameraDevices()
* const filtered = devices.filter((d) => matchesMyExpectations(d))
* const sorted = devices.sort(sortDevicesByAmountOfCameras)
* return {
* back: sorted.find((d) => d.position === "back"),
* front: sorted.find((d) => d.position === "front")
* }
* ```
*/
public static async getAvailableCameraDevices(): Promise<CameraDevice[]> {
try {
return await CameraModule.getAvailableCameraDevices();
} catch (e) {
throw tryParseNativeCameraError(e);
}
}
/**
* Gets the current Camera Permission Status. Check this before mounting the Camera to ensure
* the user has permitted the app to use the camera.
*
* To actually prompt the user for camera permission, use {@linkcode Camera.requestCameraPermission | requestCameraPermission()}.
*
* @throws {@linkcode CameraRuntimeError} When any kind of error occured while getting the current permission status. Use the {@linkcode CameraRuntimeError.code | code} property to get the actual error
*/
public static async getCameraPermissionStatus(): Promise<CameraPermissionStatus> {
try {
return await CameraModule.getCameraPermissionStatus();
} catch (e) {
throw tryParseNativeCameraError(e);
}
}
/**
* Gets the current Microphone-Recording Permission Status. Check this before mounting the Camera to ensure
* the user has permitted the app to use the microphone.
*
* To actually prompt the user for microphone permission, use {@linkcode Camera.requestMicrophonePermission | requestMicrophonePermission()}.
*
* @throws {@linkcode CameraRuntimeError} When any kind of error occured while getting the current permission status. Use the {@linkcode CameraRuntimeError.code | code} property to get the actual error
*/
public static async getMicrophonePermissionStatus(): Promise<CameraPermissionStatus> {
try {
return await CameraModule.getMicrophonePermissionStatus();
} catch (e) {
throw tryParseNativeCameraError(e);
}
}
/**
* Shows a "request permission" alert to the user, and resolves with the new camera permission status.
*
* If the user has previously blocked the app from using the camera, the alert will not be shown
* and `"denied"` will be returned.
*
* @throws {@linkcode CameraRuntimeError} When any kind of error occured while requesting permission. Use the {@linkcode CameraRuntimeError.code | code} property to get the actual error
*/
public static async requestCameraPermission(): Promise<CameraPermissionRequestResult> {
try {
return await CameraModule.requestCameraPermission();
} catch (e) {
throw tryParseNativeCameraError(e);
}
}
/**
* Shows a "request permission" alert to the user, and resolves with the new microphone permission status.
*
* If the user has previously blocked the app from using the microphone, the alert will not be shown
* and `"denied"` will be returned.
*
* @throws {@linkcode CameraRuntimeError} When any kind of error occured while requesting permission. Use the {@linkcode CameraRuntimeError.code | code} property to get the actual error
*/
public static async requestMicrophonePermission(): Promise<CameraPermissionRequestResult> {
try {
return await CameraModule.requestMicrophonePermission();
} catch (e) {
throw tryParseNativeCameraError(e);
}
}
//#endregion
//#region Events (Wrapped to maintain reference equality)
private onError(event: NativeSyntheticEvent<OnErrorEvent>): void {
if (this.props.onError != null) {
const error = event.nativeEvent;
const cause = isErrorWithCause(error.cause) ? error.cause : undefined;
this.props.onError(
// @ts-expect-error We're casting from unknown bridge types to TS unions, I expect it to hopefully work
new CameraRuntimeError(error.code, error.message, cause),
);
}
}
private onInitialized(): void {
this.props.onInitialized?.();
}
//#endregion
//#region Lifecycle
private setFrameProcessor(frameProcessor: FrameProcessor): void {
VisionCameraProxy.setFrameProcessor(this.handle, frameProcessor);
}
private unsetFrameProcessor(): void {
VisionCameraProxy.removeFrameProcessor(this.handle);
}
private onViewReady(): void {
this.isNativeViewMounted = true;
if (this.props.frameProcessor != null) {
// user passed a `frameProcessor` but we didn't set it yet because the native view was not mounted yet. set it now.
this.setFrameProcessor(this.props.frameProcessor);
this.lastFrameProcessor = this.props.frameProcessor;
}
}
/** @internal */
componentDidUpdate(): void {
if (!this.isNativeViewMounted) return;
const frameProcessor = this.props.frameProcessor;
if (frameProcessor !== this.lastFrameProcessor) {
// frameProcessor argument identity changed. Update native to reflect the change.
if (frameProcessor != null) this.setFrameProcessor(frameProcessor);
else this.unsetFrameProcessor();
this.lastFrameProcessor = frameProcessor;
}
}
//#endregion
/** @internal */
public render(): React.ReactNode {
// We remove the big `device` object from the props because we only need to pass `cameraId` to native.
const { device, frameProcessor, ...props } = this.props;
// eslint-disable-next-line @typescript-eslint/no-unnecessary-condition
if (device == null) {
throw new Error(
'Camera: `device` is null! Select a valid Camera device. See: https://mrousavy.com/react-native-vision-camera/docs/guides/devices',
);
}
return (
<NativeCameraView
{...props}
cameraId={device.id}
ref={this.ref}
onViewReady={this.onViewReady}
onInitialized={this.onInitialized}
onError={this.onError}
enableFrameProcessor={frameProcessor != null}
/>
);
}
}
//#endregion
// requireNativeComponent automatically resolves 'CameraView' to 'CameraViewManager'
const NativeCameraView = requireNativeComponent<NativeCameraViewProps>(
'CameraView',
// @ts-expect-error because the type declarations are kinda wrong, no?
Camera,
);

235
package/src/CameraDevice.ts Normal file
View File

@@ -0,0 +1,235 @@
import type { CameraPosition } from './CameraPosition';
import { Orientation } from './Orientation';
import type { PixelFormat } from './PixelFormat';
/**
* Indentifiers for a physical camera (one that actually exists on the back/front of the device)
*
* * `"ultra-wide-angle-camera"`: A built-in camera with a shorter focal length than that of a wide-angle camera. (focal length between below 24mm)
* * `"wide-angle-camera"`: A built-in wide-angle camera. (focal length between 24mm and 35mm)
* * `"telephoto-camera"`: A built-in camera device with a longer focal length than a wide-angle camera. (focal length between above 85mm)
*/
export type PhysicalCameraDeviceType = 'ultra-wide-angle-camera' | 'wide-angle-camera' | 'telephoto-camera';
/**
* Indentifiers for a logical camera (Combinations of multiple physical cameras to create a single logical camera).
*
* * `"dual-camera"`: A combination of wide-angle and telephoto cameras that creates a capture device.
* * `"dual-wide-camera"`: A device that consists of two cameras of fixed focal length, one ultrawide angle and one wide angle.
* * `"triple-camera"`: A device that consists of three cameras of fixed focal length, one ultrawide angle, one wide angle, and one telephoto.
*/
export type LogicalCameraDeviceType = 'dual-camera' | 'dual-wide-camera' | 'triple-camera';
/**
* Parses an array of physical device types into a single {@linkcode PhysicalCameraDeviceType} or {@linkcode LogicalCameraDeviceType}, depending what matches.
* @method
*/
export const parsePhysicalDeviceTypes = (
physicalDeviceTypes: PhysicalCameraDeviceType[],
): PhysicalCameraDeviceType | LogicalCameraDeviceType => {
if (physicalDeviceTypes.length === 1) {
// @ts-expect-error for very obvious reasons
return physicalDeviceTypes[0];
}
const hasWide = physicalDeviceTypes.includes('wide-angle-camera');
const hasUltra = physicalDeviceTypes.includes('ultra-wide-angle-camera');
const hasTele = physicalDeviceTypes.includes('telephoto-camera');
if (hasTele && hasWide && hasUltra) return 'triple-camera';
if (hasWide && hasUltra) return 'dual-wide-camera';
if (hasWide && hasTele) return 'dual-camera';
throw new Error(`Invalid physical device type combination! ${physicalDeviceTypes.join(' + ')}`);
};
/**
* Indicates a format's autofocus system.
*
* * `"none"`: Indicates that autofocus is not available
* * `"contrast-detection"`: Indicates that autofocus is achieved by contrast detection. Contrast detection performs a focus scan to find the optimal position
* * `"phase-detection"`: Indicates that autofocus is achieved by phase detection. Phase detection has the ability to achieve focus in many cases without a focus scan. Phase detection autofocus is typically less visually intrusive than contrast detection autofocus
*/
export type AutoFocusSystem = 'contrast-detection' | 'phase-detection' | 'none';
/**
* Indicates a format's supported video stabilization mode. Enabling video stabilization may introduce additional latency into the video capture pipeline.
*
* * `"off"`: No video stabilization. Indicates that video should not be stabilized
* * `"standard"`: Standard software-based video stabilization. Standard video stabilization reduces the field of view by about 10%.
* * `"cinematic"`: Advanced software-based video stabilization. This applies more aggressive cropping or transformations than standard.
* * `"cinematic-extended"`: Extended software- and hardware-based stabilization that aggressively crops and transforms the video to apply a smooth cinematic stabilization.
* * `"auto"`: Indicates that the most appropriate video stabilization mode for the device and format should be chosen automatically
*/
export type VideoStabilizationMode = 'off' | 'standard' | 'cinematic' | 'cinematic-extended' | 'auto';
/**
* A Camera Device's video format. Do not create instances of this type yourself, only use {@linkcode Camera.getAvailableCameraDevices | Camera.getAvailableCameraDevices()}.
*/
export interface CameraDeviceFormat {
/**
* The height of the highest resolution a still image (photo) can be produced in
*/
photoHeight: number;
/**
* The width of the highest resolution a still image (photo) can be produced in
*/
photoWidth: number;
/**
* The video resolutions's height
*/
videoHeight: number;
/**
* The video resolution's width
*/
videoWidth: number;
/**
* Maximum supported ISO value
*/
maxISO: number;
/**
* Minimum supported ISO value
*/
minISO: number;
/**
* The video field of view in degrees
*/
fieldOfView: number;
/**
* The maximum zoom factor (e.g. `128`)
*/
maxZoom: number;
/**
* Specifies whether this format supports HDR mode for video capture
*/
supportsVideoHDR: boolean;
/**
* Specifies whether this format supports HDR mode for photo capture
*/
supportsPhotoHDR: boolean;
/**
* The minum frame rate this Format needs to run at. High resolution formats often run at lower frame rates.
*/
minFps: number;
/**
* The maximum frame rate this Format is able to run at. High resolution formats often run at lower frame rates.
*/
maxFps: number;
/**
* Specifies this format's auto focus system.
*/
autoFocusSystem: AutoFocusSystem;
/**
* All supported video stabilization modes
*/
videoStabilizationModes: VideoStabilizationMode[];
/**
* Specifies this format's supported pixel-formats.
* In most cases, this is `['native', 'yuv']`.
*/
pixelFormats: PixelFormat[];
}
/**
* Represents a camera device discovered by the {@linkcode Camera.getAvailableCameraDevices | Camera.getAvailableCameraDevices()} function
*/
export interface CameraDevice {
/**
* The native ID of the camera device instance.
*/
id: string;
/**
* The physical devices this `CameraDevice` contains.
*
* * If this camera device is a **logical camera** (combination of multiple physical cameras), there are multiple cameras in this array.
* * If this camera device is a **physical camera**, there is only a single element in this array.
*
* You can check if the camera is a logical multi-camera by using the `isMultiCam` property.
*/
devices: PhysicalCameraDeviceType[];
/**
* Specifies the physical position of this camera. (back or front)
*/
position: CameraPosition;
/**
* A friendly localized name describing the camera.
*/
name: string;
/**
* Specifies whether this camera supports enabling flash for photo capture.
*/
hasFlash: boolean;
/**
* Specifies whether this camera supports continuously enabling the flash to act like a torch (flash with video capture)
*/
hasTorch: boolean;
/**
* A property indicating whether the device is a virtual multi-camera consisting of multiple combined physical cameras.
*
* Examples:
* * The Dual Camera, which supports seamlessly switching between a wide and telephoto camera while zooming and generating depth data from the disparities between the different points of view of the physical cameras.
* * The TrueDepth Camera, which generates depth data from disparities between a YUV camera and an Infrared camera pointed in the same direction.
*/
isMultiCam: boolean;
/**
* Minimum available zoom factor (e.g. `1`)
*/
minZoom: number;
/**
* Maximum available zoom factor (e.g. `128`)
*/
maxZoom: number;
/**
* The zoom factor where the camera is "neutral".
*
* * For single-physical cameras this property is always `1.0`.
* * For multi cameras this property is a value between `minZoom` and `maxZoom`, where the camera is in _wide-angle_ mode and hasn't switched to the _ultra-wide-angle_ ("fish-eye") or telephoto camera yet.
*
* Use this value as an initial value for the zoom property if you implement custom zoom. (e.g. reanimated shared value should be initially set to this value)
* @example
* const device = ...
*
* const zoom = useSharedValue(device.neutralZoom) // <-- initial value so it doesn't start at ultra-wide
* const cameraProps = useAnimatedProps(() => ({
* zoom: zoom.value
* }))
*/
neutralZoom: number;
/**
* All available formats for this camera device. Use this to find the best format for your use case and set it to the Camera's {@linkcode CameraProps.format | Camera's .format} property.
*
* See [the Camera Formats documentation](https://react-native-vision-camera.com/docs/guides/formats) for more information about Camera Formats.
*/
formats: CameraDeviceFormat[];
/**
* Whether this camera device supports low light boost.
*/
supportsLowLightBoost: boolean;
/**
* Whether this camera supports taking photos with depth data.
*
* **! Work in Progress !**
*/
supportsDepthCapture: boolean;
/**
* Whether this camera supports taking photos in RAW format
*
* **! Work in Progress !**
*/
supportsRawCapture: boolean;
/**
* Specifies whether this device supports focusing ({@linkcode Camera.focus | Camera.focus(...)})
*/
supportsFocus: boolean;
/**
* The hardware level of the Camera.
* - On Android, some older devices are running at a `legacy` or `limited` level which means they are running in a backwards compatible mode.
* - On iOS, all devices are `full`.
*/
hardwareLevel: 'legacy' | 'limited' | 'full';
/**
* Represents the sensor's orientation relative to the phone.
* For most phones this will be landscape, as Camera sensors are usually always rotated by 90 degrees (i.e. width and height are flipped).
*/
sensorOrientation: Orientation;
}

217
package/src/CameraError.ts Normal file
View File

@@ -0,0 +1,217 @@
export type PermissionError = 'permission/microphone-permission-denied' | 'permission/camera-permission-denied';
export type ParameterError =
| 'parameter/invalid-parameter'
| 'parameter/unsupported-os'
| 'parameter/unsupported-output'
| 'parameter/unsupported-input'
| 'parameter/invalid-combination';
export type DeviceError =
| 'device/configuration-error'
| 'device/no-device'
| 'device/invalid-device'
| 'device/torch-unavailable'
| 'device/microphone-unavailable'
| 'device/pixel-format-not-supported'
| 'device/low-light-boost-not-supported'
| 'device/focus-not-supported'
| 'device/camera-not-available-on-simulator';
export type FormatError =
| 'format/invalid-fps'
| 'format/invalid-hdr'
| 'format/invalid-low-light-boost'
| 'format/invalid-format'
| 'format/invalid-color-space';
export type SessionError =
| 'session/camera-not-ready'
| 'session/camera-cannot-be-opened'
| 'session/camera-has-been-disconnected'
| 'session/audio-session-setup-failed'
| 'session/audio-in-use-by-other-app'
| 'session/audio-session-failed-to-activate';
export type CaptureError =
| 'capture/invalid-photo-format'
| 'capture/encoder-error'
| 'capture/muxer-error'
| 'capture/recording-in-progress'
| 'capture/no-recording-in-progress'
| 'capture/file-io-error'
| 'capture/create-temp-file-error'
| 'capture/invalid-video-options'
| 'capture/create-recorder-error'
| 'capture/recorder-error'
| 'capture/no-valid-data'
| 'capture/inactive-source'
| 'capture/insufficient-storage'
| 'capture/file-size-limit-reached'
| 'capture/invalid-photo-codec'
| 'capture/not-bound-error'
| 'capture/capture-type-not-supported'
| 'capture/video-not-enabled'
| 'capture/photo-not-enabled'
| 'capture/aborted'
| 'capture/unknown';
export type SystemError =
| 'system/camera-module-not-found'
| 'system/no-camera-manager'
| 'system/frame-processors-unavailable'
| 'system/view-not-found';
export type UnknownError = 'unknown/unknown';
/**
* Represents a JSON-style error cause. This contains native `NSError`/`Throwable` information, and can have recursive {@linkcode ErrorWithCause.cause | .cause} properties until the ultimate cause has been found.
*/
export interface ErrorWithCause {
/**
* The native error's code.
*
* * iOS: `NSError.code`
* * Android: N/A
*/
code?: number;
/**
* The native error's domain.
*
* * iOS: `NSError.domain`
* * Android: N/A
*/
domain?: string;
/**
* The native error description
*
* * iOS: `NSError.message`
* * Android: `Throwable.message`
*/
message: string;
/**
* Optional additional details
*
* * iOS: `NSError.userInfo`
* * Android: N/A
*/
details?: Record<string, unknown>;
/**
* Optional Java stacktrace
*
* * iOS: N/A
* * Android: `Throwable.stacktrace.toString()`
*/
stacktrace?: string;
/**
* Optional additional cause for nested errors
*
* * iOS: N/A
* * Android: `Throwable.cause`
*/
cause?: ErrorWithCause;
}
type CameraErrorCode =
| PermissionError
| ParameterError
| DeviceError
| FormatError
| SessionError
| CaptureError
| SystemError
| UnknownError;
/**
* Represents any kind of error that occured in the {@linkcode Camera} View Module.
*/
class CameraError<TCode extends CameraErrorCode> extends Error {
private readonly _code: TCode;
private readonly _message: string;
private readonly _cause?: ErrorWithCause;
public get code(): TCode {
return this._code;
}
public get message(): string {
return this._message;
}
public get cause(): Error | undefined {
const c = this._cause;
if (c == null) return undefined;
return new Error(`[${c.code}]: ${c.message}`);
}
/**
* @internal
*/
constructor(code: TCode, message: string, cause?: ErrorWithCause) {
super(`[${code}]: ${message}${cause != null ? ` (Cause: ${cause.message})` : ''}`);
super.name = code;
super.message = message;
this._code = code;
this._message = message;
this._cause = cause;
}
public toString(): string {
return `[${this.code}]: ${this.message}`;
}
}
/**
* Represents any kind of error that occured while trying to capture a video or photo.
*
* See the ["Camera Errors" documentation](https://react-native-vision-camera.com/docs/guides/errors) for more information about Camera Errors.
*/
export class CameraCaptureError extends CameraError<CaptureError> {}
/**
* Represents any kind of error that occured in the Camera View Module.
*
* See the ["Camera Errors" documentation](https://react-native-vision-camera.com/docs/guides/errors) for more information about Camera Errors.
*/
export class CameraRuntimeError extends CameraError<
PermissionError | ParameterError | DeviceError | FormatError | SessionError | SystemError | UnknownError
> {}
/**
* Checks if the given `error` is of type {@linkcode ErrorWithCause}
* @param {unknown} error Any unknown object to validate
* @returns `true` if the given `error` is of type {@linkcode ErrorWithCause}
*/
export const isErrorWithCause = (error: unknown): error is ErrorWithCause =>
typeof error === 'object' &&
error != null &&
// @ts-expect-error error is still unknown
typeof error.message === 'string' &&
// @ts-expect-error error is still unknown
(typeof error.stacktrace === 'string' || error.stacktrace == null) &&
// @ts-expect-error error is still unknown
(isErrorWithCause(error.cause) || error.cause == null);
const isCameraErrorJson = (error: unknown): error is { code: string; message: string; cause?: ErrorWithCause } =>
typeof error === 'object' &&
error != null &&
// @ts-expect-error error is still unknown
typeof error.code === 'string' &&
// @ts-expect-error error is still unknown
typeof error.message === 'string' &&
// @ts-expect-error error is still unknown
(typeof error.cause === 'object' || error.cause == null);
/**
* Tries to parse an error coming from native to a typed JS camera error.
* @param {CameraError} nativeError The native error instance. This is a JSON in the legacy native module architecture.
* @returns A {@linkcode CameraRuntimeError} or {@linkcode CameraCaptureError}, or the `nativeError` itself if it's not parsable
* @method
*/
export const tryParseNativeCameraError = <T>(nativeError: T): (CameraRuntimeError | CameraCaptureError) | T => {
if (isCameraErrorJson(nativeError)) {
if (nativeError.code.startsWith('capture')) {
return new CameraCaptureError(nativeError.code as CaptureError, nativeError.message, nativeError.cause);
} else {
return new CameraRuntimeError(
// @ts-expect-error the code is string, we narrow it down to TS union.
nativeError.code,
nativeError.message,
nativeError.cause,
);
}
} else {
return nativeError;
}
};

View File

@@ -0,0 +1,13 @@
/**
* Represents the camera device position.
*
* * `"back"`: Indicates that the device is physically located on the back of the system hardware
* * `"front"`: Indicates that the device is physically located on the front of the system hardware
*
* #### iOS only
* * `"unspecified"`: Indicates that the device's position relative to the system hardware is unspecified
*
* #### Android only
* * `"external"`: The camera device is an external camera, and has no fixed facing relative to the device's screen. (Android only)
*/
export type CameraPosition = 'front' | 'back' | 'unspecified' | 'external';

206
package/src/CameraProps.ts Normal file
View File

@@ -0,0 +1,206 @@
import type { ViewProps } from 'react-native';
import type { CameraDevice, CameraDeviceFormat, VideoStabilizationMode } from './CameraDevice';
import type { CameraRuntimeError } from './CameraError';
import type { Frame } from './Frame';
import type { Orientation } from './Orientation';
export type FrameProcessor = {
frameProcessor: (frame: Frame) => void;
type: 'frame-processor';
};
// TODO: Replace `enableHighQualityPhotos: boolean` in favor of `priorization: 'photo' | 'video'`
// TODO: Use RCT_ENUM_PARSER for stuff like torch, videoStabilizationMode, and orientation
// TODO: Use Photo HostObject for stuff like depthData, portraitEffects, etc.
// TODO: Add RAW capture support
export interface CameraProps extends ViewProps {
/**
* The Camera Device to use.
*
* See the [Camera Devices](https://react-native-vision-camera.com/docs/guides/devices) section in the documentation for more information about Camera Devices.
*
* @example
* ```tsx
* const devices = useCameraDevices('wide-angle-camera')
* const device = devices.back
*
* return (
* <Camera
* device={device}
* isActive={true}
* style={StyleSheet.absoluteFill}
* />
* )
* ```
*/
device: CameraDevice;
/**
* Whether the Camera should actively stream video frames, or not. See the [documentation about the `isActive` prop](https://react-native-vision-camera.com/docs/guides/lifecycle#the-isactive-prop) for more information.
*
* This can be compared to a Video component, where `isActive` specifies whether the video is paused or not.
*
* > Note: If you fully unmount the `<Camera>` component instead of using `isActive={false}`, the Camera will take a bit longer to start again. In return, it will use less resources since the Camera will be completely destroyed when unmounted.
*/
isActive: boolean;
//#region Use-cases
/**
* Enables **photo capture** with the `takePhoto` function (see ["Taking Photos"](https://react-native-vision-camera.com/docs/guides/capturing#taking-photos))
*/
photo?: boolean;
/**
* Enables **video capture** with the `startRecording` function (see ["Recording Videos"](https://react-native-vision-camera.com/docs/guides/capturing/#recording-videos))
*
* Note: If both the `photo` and `video` properties are enabled at the same time and the device is running at a `hardwareLevel` of `'legacy'` or `'limited'`, VisionCamera _might_ use a lower resolution for video capture due to hardware constraints.
*/
video?: boolean;
/**
* Enables **audio capture** for video recordings (see ["Recording Videos"](https://react-native-vision-camera.com/docs/guides/capturing/#recording-videos))
*/
audio?: boolean;
/**
* Specifies the pixel format for the video pipeline.
*
* Frames from a [Frame Processor](https://mrousavy.github.io/react-native-vision-camera/docs/guides/frame-processors) will be streamed in the pixel format specified here.
*
* While `native` and `yuv` are the most efficient formats, some ML models (such as MLKit Barcode detection) require input Frames to be in RGB colorspace, otherwise they just output nonsense.
*
* - `native`: The hardware native GPU buffer format. This is the most efficient format. (`PRIVATE` on Android, sometimes YUV on iOS)
* - `yuv`: The YUV (Y'CbCr 4:2:0 or NV21, 8-bit) format, either video- or full-range, depending on hardware capabilities. This is the second most efficient format.
* - `rgb`: The RGB (RGB, RGBA or ABGRA, 8-bit) format. This is least efficient and requires explicit conversion.
*
* @default `native`
*/
pixelFormat?: 'native' | 'yuv' | 'rgb';
//#endregion
//#region Common Props (torch, zoom)
/**
* Set the current torch mode.
*
* Note: The torch is only available on `"back"` cameras, and isn't supported by every phone.
*
* @default "off"
*/
torch?: 'off' | 'on';
/**
* Specifies the zoom factor of the current camera, in "factor"/scale.
*
* This value ranges from `minZoom` (e.g. `1`) to `maxZoom` (e.g. `128`). It is recommended to set this value
* to the CameraDevice's `neutralZoom` per default and let the user zoom out to the fish-eye (ultra-wide) camera
* on demand (if available)
*
* **Note:** Linearly increasing this value always appears logarithmic to the user.
*
* @default 1.0
*/
zoom?: number;
/**
* Enables or disables the native pinch to zoom gesture.
*
* If you want to implement a custom zoom gesture, see [the Zooming with Reanimated documentation](https://react-native-vision-camera.com/docs/guides/animated).
*
* @default false
*/
enableZoomGesture?: boolean;
//#endregion
//#region Format/Preset selection
/**
* Selects a given format. By default, the best matching format is chosen.
*/
format?: CameraDeviceFormat;
/**
* Specify the frames per second this camera should use. Make sure the given `format` includes a frame rate range with the given `fps`.
*
* Requires `format` to be set.
*/
fps?: number;
/**
* Enables or disables HDR on this camera device. Make sure the given `format` supports HDR mode.
*
* Requires `format` to be set.
*/
hdr?: boolean;
/**
* Enables or disables low-light boost on this camera device. Make sure the given `format` supports low-light boost.
*
* Requires `format` to be set.
*/
lowLightBoost?: boolean;
/**
* Specifies the video stabilization mode to use.
*
* Requires a `format` to be set that contains the given `videoStabilizationMode`.
*/
videoStabilizationMode?: VideoStabilizationMode;
//#endregion
/**
* Also captures data from depth-perception sensors. (e.g. disparity maps)
*
* @default false
*/
enableDepthData?: boolean;
/**
* A boolean specifying whether the photo render pipeline is prepared for portrait effects matte delivery.
*
* When enabling this, you must also set `enableDepthData` to `true`.
*
* @platform iOS 12.0+
* @default false
*/
enablePortraitEffectsMatteDelivery?: boolean;
/**
* Indicates whether the Camera should prepare the photo pipeline to provide maximum quality photos.
*
* This enables:
* * High Resolution Capture ([`isHighResolutionCaptureEnabled`](https://developer.apple.com/documentation/avfoundation/avcapturephotooutput/1648721-ishighresolutioncaptureenabled))
* * Virtual Device fusion for greater detail ([`isVirtualDeviceConstituentPhotoDeliveryEnabled`](https://developer.apple.com/documentation/avfoundation/avcapturephotooutput/3192189-isvirtualdeviceconstituentphotod))
* * Dual Device fusion for greater detail ([`isDualCameraDualPhotoDeliveryEnabled`](https://developer.apple.com/documentation/avfoundation/avcapturephotosettings/2873917-isdualcameradualphotodeliveryena))
* * Sets the maximum quality prioritization to `.quality` ([`maxPhotoQualityPrioritization`](https://developer.apple.com/documentation/avfoundation/avcapturephotooutput/3182995-maxphotoqualityprioritization))
*
* @default false
*/
enableHighQualityPhotos?: boolean;
/**
* If `true`, show a debug view to display the FPS of the Camera session.
* This is useful for debugging your Frame Processor's speed.
*
* @default false
*/
enableFpsGraph?: boolean;
/**
* Represents the orientation of all Camera Outputs (Photo, Video, and Frame Processor). If this value is not set, the device orientation is used.
*/
orientation?: Orientation;
//#region Events
/**
* Called when any kind of runtime error occured.
*/
onError?: (error: CameraRuntimeError) => void;
/**
* Called when the camera was successfully initialized.
*/
onInitialized?: () => void;
/**
* A worklet which will be called for every frame the Camera "sees".
*
* > See [the Frame Processors documentation](https://mrousavy.github.io/react-native-vision-camera/docs/guides/frame-processors) for more information
*
* @example
* ```tsx
* const frameProcessor = useFrameProcessor((frame) => {
* 'worklet'
* const qrCodes = scanQRCodes(frame)
* console.log(`Detected QR Codes: ${qrCodes}`)
* }, [])
*
* return <Camera {...cameraProps} frameProcessor={frameProcessor} />
* ```
*/
frameProcessor?: FrameProcessor;
//#endregion
}

77
package/src/Frame.ts Normal file
View File

@@ -0,0 +1,77 @@
import type { Orientation } from './Orientation';
import { PixelFormat } from './PixelFormat';
/**
* A single frame, as seen by the camera.
*/
export interface Frame {
/**
* Whether the underlying buffer is still valid or not. The buffer will be released after the frame processor returns, or `close()` is called.
*/
isValid: boolean;
/**
* Returns the width of the frame, in pixels.
*/
width: number;
/**
* Returns the height of the frame, in pixels.
*/
height: number;
/**
* Returns the amount of bytes per row.
*/
bytesPerRow: number;
/**
* Returns the number of planes this frame contains.
*/
planesCount: number;
/**
* Returns whether the Frame is mirrored (selfie camera) or not.
*/
isMirrored: boolean;
/**
* Returns the timestamp of the Frame relative to the host sytem's clock.
*/
timestamp: number;
/**
* Represents the orientation of the Frame.
*
* Some ML Models are trained for specific orientations, so they need to be taken into
* consideration when running a frame processor. See also: `isMirrored`
*/
orientation: Orientation;
/**
* Represents the pixel-format of the Frame.
*/
pixelFormat: PixelFormat;
/**
* Get the underlying data of the Frame as a uint8 array buffer.
*
* Note that Frames are allocated on the GPU, so calling `toArrayBuffer()` will copy from the GPU to the CPU.
*/
toArrayBuffer(): Uint8Array;
/**
* Returns a string representation of the frame.
* @example
* ```ts
* console.log(frame.toString()) // -> "3840 x 2160 Frame"
* ```
*/
toString(): string;
}
export interface FrameInternal extends Frame {
/**
* Increment the Frame Buffer ref-count by one.
*
* This is a private API, do not use this.
*/
incrementRefCount(): void;
/**
* Increment the Frame Buffer ref-count by one.
*
* This is a private API, do not use this.
*/
decrementRefCount(): void;
}

View File

@@ -0,0 +1,193 @@
import type { Frame, FrameInternal } from './Frame';
import type { FrameProcessor } from './CameraProps';
import { CameraRuntimeError } from './CameraError';
// only import typescript types
import type TWorklets from 'react-native-worklets-core';
import { CameraModule } from './NativeCameraModule';
import { assertJSIAvailable } from './JSIHelper';
type BasicParameterType = string | number | boolean | undefined;
type ParameterType = BasicParameterType | BasicParameterType[] | Record<string, BasicParameterType | undefined>;
interface FrameProcessorPlugin {
/**
* Call the native Frame Processor Plugin with the given Frame and options.
* @param frame The Frame from the Frame Processor.
* @param options (optional) Additional options. Options will be converted to a native dictionary
* @returns (optional) A value returned from the native Frame Processor Plugin (or undefined)
*/
call: (frame: Frame, options?: Record<string, ParameterType>) => ParameterType;
}
interface TVisionCameraProxy {
setFrameProcessor: (viewTag: number, frameProcessor: FrameProcessor) => void;
removeFrameProcessor: (viewTag: number) => void;
/**
* Creates a new instance of a Frame Processor Plugin.
* The Plugin has to be registered on the native side, otherwise this returns `undefined`
*/
getFrameProcessorPlugin: (name: string) => FrameProcessorPlugin | undefined;
}
let hasWorklets = false;
let isAsyncContextBusy = { value: false };
let runOnAsyncContext = (_frame: Frame, _func: () => void): void => {
throw new CameraRuntimeError(
'system/frame-processors-unavailable',
'Frame Processors are not available, react-native-worklets-core is not installed!',
);
};
try {
assertJSIAvailable();
// eslint-disable-next-line @typescript-eslint/no-var-requires
const { Worklets } = require('react-native-worklets-core') as typeof TWorklets;
isAsyncContextBusy = Worklets.createSharedValue(false);
const asyncContext = Worklets.createContext('VisionCamera.async');
runOnAsyncContext = Worklets.createRunInContextFn((frame: Frame, func: () => void) => {
'worklet';
try {
// Call long-running function
func();
} finally {
// Potentially delete Frame if we were the last ref
(frame as FrameInternal).decrementRefCount();
isAsyncContextBusy.value = false;
}
}, asyncContext);
hasWorklets = true;
} catch (e) {
// Worklets are not installed, so Frame Processors are disabled.
}
let proxy: TVisionCameraProxy = {
getFrameProcessorPlugin: () => {
throw new CameraRuntimeError('system/frame-processors-unavailable', 'Frame Processors are not enabled!');
},
removeFrameProcessor: () => {
throw new CameraRuntimeError('system/frame-processors-unavailable', 'Frame Processors are not enabled!');
},
setFrameProcessor: () => {
throw new CameraRuntimeError('system/frame-processors-unavailable', 'Frame Processors are not enabled!');
},
};
if (hasWorklets) {
// Install native Frame Processor Runtime Manager
const result = CameraModule.installFrameProcessorBindings() as unknown;
if (result !== true)
throw new CameraRuntimeError('system/frame-processors-unavailable', 'Failed to install Frame Processor JSI bindings!');
// @ts-expect-error global is untyped, it's a C++ host-object
proxy = global.VisionCameraProxy as TVisionCameraProxy;
// eslint-disable-next-line @typescript-eslint/no-unnecessary-condition
if (proxy == null) {
throw new CameraRuntimeError(
'system/frame-processors-unavailable',
'Failed to install VisionCameraProxy. Are Frame Processors properly enabled?',
);
}
}
export const VisionCameraProxy = proxy;
declare global {
// eslint-disable-next-line no-var
var __frameProcessorRunAtTargetFpsMap: Record<string, number | undefined> | undefined;
}
function getLastFrameProcessorCall(frameProcessorFuncId: string): number {
'worklet';
return global.__frameProcessorRunAtTargetFpsMap?.[frameProcessorFuncId] ?? 0;
}
function setLastFrameProcessorCall(frameProcessorFuncId: string, value: number): void {
'worklet';
if (global.__frameProcessorRunAtTargetFpsMap == null) global.__frameProcessorRunAtTargetFpsMap = {};
global.__frameProcessorRunAtTargetFpsMap[frameProcessorFuncId] = value;
}
/**
* Runs the given function at the given target FPS rate.
*
* For example, if you want to run a heavy face detection algorithm
* only once per second, you can use `runAtTargetFps(1, ...)` to
* throttle it to 1 FPS.
*
* @param fps The target FPS rate at which the given function should be executed
* @param func The function to execute.
* @returns The result of the function if it was executed, or `undefined` otherwise.
* @example
*
* ```ts
* const frameProcessor = useFrameProcessor((frame) => {
* 'worklet'
* console.log('New Frame')
* runAtTargetFps(5, () => {
* 'worklet'
* const faces = detectFaces(frame)
* console.log(`Detected a new face: ${faces[0]}`)
* })
* })
* ```
*/
export function runAtTargetFps<T>(fps: number, func: () => T): T | undefined {
'worklet';
// @ts-expect-error
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
const funcId = func.__workletHash ?? '1';
const targetIntervalMs = 1000 / fps; // <-- 60 FPS => 16,6667ms interval
const now = performance.now();
const diffToLastCall = now - getLastFrameProcessorCall(funcId);
if (diffToLastCall >= targetIntervalMs) {
setLastFrameProcessorCall(funcId, now);
// Last Frame Processor call is already so long ago that we want to make a new call
return func();
}
return undefined;
}
/**
* Runs the given function asynchronously, while keeping a strong reference to the Frame.
*
* For example, if you want to run a heavy face detection algorithm
* while still drawing to the screen at 60 FPS, you can use `runAsync(...)`
* to offload the face detection algorithm to a separate thread.
*
* @param frame The current Frame of the Frame Processor.
* @param func The function to execute.
* @example
*
* ```ts
* const frameProcessor = useFrameProcessor((frame) => {
* 'worklet'
* console.log('New Frame')
* runAsync(frame, () => {
* 'worklet'
* const faces = detectFaces(frame)
* const face = [faces0]
* console.log(`Detected a new face: ${face}`)
* })
* })
* ```
*/
export function runAsync(frame: Frame, func: () => void): void {
'worklet';
if (isAsyncContextBusy.value) {
// async context is currently busy, we cannot schedule new work in time.
// drop this frame/runAsync call.
return;
}
// Increment ref count by one
(frame as FrameInternal).incrementRefCount();
isAsyncContextBusy.value = true;
// Call in separate background context
runOnAsyncContext(frame, func);
}

12
package/src/JSIHelper.ts Normal file
View File

@@ -0,0 +1,12 @@
import { CameraRuntimeError } from './CameraError';
export function assertJSIAvailable(): void {
// Check if we are running on-device (JSI)
// @ts-expect-error JSI functions aren't typed
if (global.nativeCallSyncHook == null) {
throw new CameraRuntimeError(
'system/frame-processors-unavailable',
'Failed to initialize VisionCamera Frame Processors: React Native is not running on-device. Frame Processors can only be used when synchronous method invocations (JSI) are possible. If you are using a remote debugger (e.g. Chrome), switch to an on-device debugger (e.g. Flipper) instead.',
);
}
}

View File

@@ -0,0 +1,42 @@
import { NativeModules, Platform } from 'react-native';
import { CameraRuntimeError } from './CameraError';
const supportedPlatforms = ['ios', 'android', 'macos'];
// NativeModules automatically resolves 'CameraView' to 'CameraViewModule'
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
export const CameraModule = NativeModules.CameraView;
if (CameraModule == null) {
if (!supportedPlatforms.includes(Platform.OS)) {
throw new CameraRuntimeError(
'system/camera-module-not-found',
`Failed to initialize VisionCamera: VisionCamera currently does not work on ${Platform.OS}.`,
);
}
let message = 'Failed to initialize VisionCamera: The native Camera Module (`NativeModules.CameraView`) could not be found.';
message += '\n* Make sure react-native-vision-camera is correctly autolinked (run `npx react-native config` to verify)';
if (Platform.OS === 'ios' || Platform.OS === 'macos') message += '\n* Make sure you ran `pod install` in the ios/ directory.';
if (Platform.OS === 'android') message += '\n* Make sure gradle is synced.';
// check if Expo
// @ts-expect-error expo global JSI modules are not typed
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
const ExpoConstants = global.expo?.modules?.ExponentConstants;
if (ExpoConstants != null) {
if (ExpoConstants.appOwnership === 'expo') {
// We're running Expo Go
throw new CameraRuntimeError(
'system/camera-module-not-found',
`react-native-vision-camera is not supported in Expo Go! Use EAS/expo prebuild instead (\`expo run:${Platform.OS}\`). For more info, see https://docs.expo.dev/workflow/prebuild/.`,
);
} else {
// We're running Expo bare / standalone
message += '\n* Make sure you ran `expo prebuild`.';
}
}
message += '\n* Make sure you rebuilt the app.';
throw new CameraRuntimeError('system/camera-module-not-found', message);
}

View File

@@ -0,0 +1 @@
export type Orientation = 'portrait' | 'portrait-upside-down' | 'landscape-left' | 'landscape-right';

160
package/src/PhotoFile.ts Normal file
View File

@@ -0,0 +1,160 @@
import { Orientation } from './Orientation';
import type { TemporaryFile } from './TemporaryFile';
export interface TakePhotoOptions {
/**
* Indicates how photo quality should be prioritized against speed.
*
* * `"quality"` Indicates that photo quality is paramount, even at the expense of shot-to-shot time
* * `"balanced"` Indicates that photo quality and speed of delivery are balanced in priority
* * `"speed"` Indicates that speed of photo delivery is most important, even at the expense of quality
*
* @default "balanced"
*/
qualityPrioritization?: 'quality' | 'balanced' | 'speed';
/**
* Whether the Flash should be enabled or disabled
*
* @default "auto"
*/
flash?: 'on' | 'off' | 'auto';
/**
* Specifies whether red-eye reduction should be applied automatically on flash captures.
*
* @default false
*/
enableAutoRedEyeReduction?: boolean;
/**
* Indicates whether still image stabilization will be employed when capturing the photo
*
* @default false
*/
enableAutoStabilization?: boolean;
/**
* Specifies whether the photo output should use content aware distortion correction on this photo request.
* For example, the algorithm may not apply correction to faces in the center of a photo, but may apply it to faces near the photos edges.
*
* @platform iOS
* @default false
*/
enableAutoDistortionCorrection?: boolean;
/**
* Whether to play the default shutter "click" sound when taking a picture or not.
*
* @default true
*/
enableShutterSound?: boolean;
}
/**
* Represents a Photo taken by the Camera written to the local filesystem.
*
* See {@linkcode Camera.takePhoto | Camera.takePhoto()}
*/
export interface PhotoFile extends TemporaryFile {
/**
* The width of the photo, in pixels.
*/
width: number;
/**
* The height of the photo, in pixels.
*/
height: number;
/**
* Whether this photo is in RAW format or not.
*/
isRawPhoto: boolean;
/**
* Display orientation of the photo, relative to the Camera's sensor orientation.
*
* Note that Camera sensors are landscape, so e.g. "portrait" photos will have a value of "landscape-left", etc.
*/
orientation: Orientation;
/**
* Whether this photo is mirrored (selfies) or not.
*/
isMirrored: boolean;
thumbnail?: Record<string, unknown>;
/**
* Metadata information describing the captured image. (iOS only)
*
* @see [AVCapturePhoto.metadata](https://developer.apple.com/documentation/avfoundation/avcapturephoto/2873982-metadata)
*
* @platform iOS
*/
metadata?: {
/**
* Orientation of the EXIF Image.
*
* * 1 = 0 degrees: the correct orientation, no adjustment is required.
* * 2 = 0 degrees, mirrored: image has been flipped back-to-front.
* * 3 = 180 degrees: image is upside down.
* * 4 = 180 degrees, mirrored: image has been flipped back-to-front and is upside down.
* * 5 = 90 degrees: image has been flipped back-to-front and is on its side.
* * 6 = 90 degrees, mirrored: image is on its side.
* * 7 = 270 degrees: image has been flipped back-to-front and is on its far side.
* * 8 = 270 degrees, mirrored: image is on its far side.
*/
Orientation: number;
/**
* @platform iOS
*/
DPIHeight: number;
/**
* @platform iOS
*/
DPIWidth: number;
/**
* Represents any data Apple cameras write to the metadata
*
* @platform iOS
*/
'{MakerApple}'?: Record<string, unknown>;
'{TIFF}': {
ResolutionUnit: number;
Software: string;
Make: string;
DateTime: string;
XResolution: number;
/**
* @platform iOS
*/
HostComputer?: string;
Model: string;
YResolution: number;
};
'{Exif}': {
DateTimeOriginal: string;
ExposureTime: number;
FNumber: number;
LensSpecification: number[];
ExposureBiasValue: number;
ColorSpace: number;
FocalLenIn35mmFilm: number;
BrightnessValue: number;
ExposureMode: number;
LensModel: string;
SceneType: number;
PixelXDimension: number;
ShutterSpeedValue: number;
SensingMethod: number;
SubjectArea: number[];
ApertureValue: number;
SubsecTimeDigitized: string;
FocalLength: number;
LensMake: string;
SubsecTimeOriginal: string;
OffsetTimeDigitized: string;
PixelYDimension: number;
ISOSpeedRatings: number[];
WhiteBalance: number;
DateTimeDigitized: string;
OffsetTimeOriginal: string;
ExifVersion: string;
OffsetTime: string;
Flash: number;
ExposureProgram: number;
MeteringMode: number;
};
};
}

View File

@@ -0,0 +1,15 @@
/**
* Represents the pixel format of a `Frame`.
*
* If you intend to read Pixels from this Frame or use an ML model for processing, make sure that you are
* using the expected `PixelFormat`, otherwise the plugin might not be able to properly understand the Frame's content.
*
* Most ML models operate in either `yuv` (recommended) or `rgb`.
*
* - `yuv`: Frame is in YUV pixel-format (Y'CbCr 4:2:0 or NV21, 8-bit)
* - `rgb`: Frame is in RGB pixel-format (RGB or RGBA, 8-bit)
* - `dng`: Frame is in a depth-data pixel format (DNG)
* - `native`: Frame is in the Camera's native Hardware Buffer format (PRIVATE). This is the most efficient Format.
* - `unknown`: Frame has unknown/unsupported pixel-format.
*/
export type PixelFormat = 'yuv' | 'rgb' | 'dng' | 'native' | 'unknown';

13
package/src/Point.ts Normal file
View File

@@ -0,0 +1,13 @@
/**
* Represents a Point in a 2 dimensional coordinate system.
*/
export interface Point {
/**
* The X coordinate of this Point. (double)
*/
x: number;
/**
* The Y coordinate of this Point. (double)
*/
y: number;
}

View File

@@ -0,0 +1,13 @@
/**
* Represents a temporary file in the local filesystem.
*/
export interface TemporaryFile {
/**
* The path of the file.
*
* * **Note:** If you want to consume this file (e.g. for displaying it in an `<Image>` component), you might have to add the `file://` prefix.
*
* * **Note:** This file might get deleted once the app closes because it lives in the temp directory.
*/
path: string;
}

39
package/src/VideoFile.ts Normal file
View File

@@ -0,0 +1,39 @@
import type { CameraCaptureError } from './CameraError';
import type { TemporaryFile } from './TemporaryFile';
export interface RecordVideoOptions {
/**
* Set the video flash mode. Natively, this just enables the torch while recording.
*/
flash?: 'on' | 'off' | 'auto';
/**
* Specifies the output file type to record videos into.
*/
fileType?: 'mov' | 'mp4';
/**
* Called when there was an unexpected runtime error while recording the video.
*/
onRecordingError: (error: CameraCaptureError) => void;
/**
* Called when the recording has been successfully saved to file.
*/
onRecordingFinished: (video: VideoFile) => void;
/**
* The Video Codec to record in.
* - `h264`: Widely supported, but might be less efficient, especially with larger sizes or framerates.
* - `h265`: The HEVC (High-Efficient-Video-Codec) for higher efficient video recordings.
*/
videoCodec?: 'h265' | 'h265';
}
/**
* Represents a Video taken by the Camera written to the local filesystem.
*
* Related: {@linkcode Camera.startRecording | Camera.startRecording()}, {@linkcode Camera.stopRecording | Camera.stopRecording()}
*/
export interface VideoFile extends TemporaryFile {
/**
* Represents the duration of the video, in seconds.
*/
duration: number;
}

View File

@@ -0,0 +1,23 @@
import { ConfigPlugin, withGradleProperties } from '@expo/config-plugins';
/**
* Set the `VisionCamera_disableFrameProcessors` value in the static `gradle.properties` file.
* This is used to disable frame processors if you don't need it for android.
*/
export const withDisableFrameProcessorsAndroid: ConfigPlugin = (c) => {
const disableFrameProcessorsKey = 'VisionCamera_disableFrameProcessors';
return withGradleProperties(c, (config) => {
config.modResults = config.modResults.filter((item) => {
if (item.type === 'property' && item.key === disableFrameProcessorsKey) return false;
return true;
});
config.modResults.push({
type: 'property',
key: disableFrameProcessorsKey,
value: 'true',
});
return config;
});
};

View File

@@ -0,0 +1,13 @@
import { ConfigPlugin, withPodfileProperties } from '@expo/config-plugins';
/**
* Set the `disableFrameProcessors` inside of the XcodeProject.
* This is used to disable frame processors if you don't need it on iOS. (will save CPU and Memory)
*/
export const withDisableFrameProcessorsIOS: ConfigPlugin = (c) => {
return withPodfileProperties(c, (config) => {
// TODO: Implement Podfile writing
config.ios = config.ios;
return config;
});
};

View File

@@ -0,0 +1,37 @@
import { withPlugins, AndroidConfig, ConfigPlugin, createRunOncePlugin } from '@expo/config-plugins';
import { withDisableFrameProcessorsAndroid } from './withDisableFrameProcessorsAndroid';
import { withDisableFrameProcessorsIOS } from './withDisableFrameProcessorsIOS';
// eslint-disable-next-line @typescript-eslint/no-var-requires, @typescript-eslint/no-unsafe-assignment
const pkg = require('../../../package.json');
const CAMERA_USAGE = 'Allow $(PRODUCT_NAME) to access your camera';
const MICROPHONE_USAGE = 'Allow $(PRODUCT_NAME) to access your microphone';
type Props = {
cameraPermissionText?: string;
enableMicrophonePermission?: boolean;
microphonePermissionText?: string;
disableFrameProcessors?: boolean;
};
const withCamera: ConfigPlugin<Props> = (config, props = {}) => {
if (config.ios == null) config.ios = {};
if (config.ios.infoPlist == null) config.ios.infoPlist = {};
config.ios.infoPlist.NSCameraUsageDescription =
props.cameraPermissionText ?? (config.ios.infoPlist.NSCameraUsageDescription as string | undefined) ?? CAMERA_USAGE;
if (props.enableMicrophonePermission) {
config.ios.infoPlist.NSMicrophoneUsageDescription =
props.microphonePermissionText ?? (config.ios.infoPlist.NSMicrophoneUsageDescription as string | undefined) ?? MICROPHONE_USAGE;
}
const androidPermissions = ['android.permission.CAMERA'];
if (props.enableMicrophonePermission) androidPermissions.push('android.permission.RECORD_AUDIO');
if (props.disableFrameProcessors) {
config = withDisableFrameProcessorsAndroid(config);
config = withDisableFrameProcessorsIOS(config);
}
return withPlugins(config, [[AndroidConfig.Permissions.withPermissions, androidPermissions]]);
};
export default createRunOncePlugin(withCamera, pkg.name, pkg.version);

View File

@@ -0,0 +1,78 @@
import { useEffect, useState } from 'react';
import type { CameraPosition } from '../CameraPosition';
import { sortDevices } from '../utils/FormatFilter';
import { Camera } from '../Camera';
import { CameraDevice, LogicalCameraDeviceType, parsePhysicalDeviceTypes, PhysicalCameraDeviceType } from '../CameraDevice';
export type CameraDevices = {
[key in CameraPosition]: CameraDevice | undefined;
};
const DefaultCameraDevices: CameraDevices = {
back: undefined,
external: undefined,
front: undefined,
unspecified: undefined,
};
/**
* Gets the best available {@linkcode CameraDevice}. Devices with more cameras are preferred.
*
* @returns The best matching {@linkcode CameraDevice}.
* @throws {@linkcode CameraRuntimeError} if no device was found.
* @example
* ```tsx
* const device = useCameraDevice()
* // ...
* return <Camera device={device} />
* ```
*/
export function useCameraDevices(): CameraDevices;
/**
* Gets a {@linkcode CameraDevice} for the requested device type.
*
* @param {PhysicalCameraDeviceType | LogicalCameraDeviceType} deviceType Specifies a device type which will be used as a device filter.
* @returns A {@linkcode CameraDevice} for the requested device type.
* @throws {@linkcode CameraRuntimeError} if no device was found.
* @example
* ```tsx
* const device = useCameraDevice('wide-angle-camera')
* // ...
* return <Camera device={device} />
* ```
*/
export function useCameraDevices(deviceType: PhysicalCameraDeviceType | LogicalCameraDeviceType): CameraDevices;
export function useCameraDevices(deviceType?: PhysicalCameraDeviceType | LogicalCameraDeviceType): CameraDevices {
const [cameraDevices, setCameraDevices] = useState<CameraDevices>(DefaultCameraDevices);
useEffect(() => {
let isMounted = true;
const loadDevice = async (): Promise<void> => {
let devices = await Camera.getAvailableCameraDevices();
if (!isMounted) return;
devices = devices.sort(sortDevices);
if (deviceType != null) {
devices = devices.filter((d) => {
const parsedType = parsePhysicalDeviceTypes(d.devices);
return parsedType === deviceType;
});
}
setCameraDevices({
back: devices.find((d) => d.position === 'back'),
external: devices.find((d) => d.position === 'external'),
front: devices.find((d) => d.position === 'front'),
unspecified: devices.find((d) => d.position === 'unspecified'),
});
};
loadDevice();
return () => {
isMounted = false;
};
}, [deviceType]);
return cameraDevices;
}

View File

@@ -0,0 +1,16 @@
import { useMemo } from 'react';
import type { CameraDevice, CameraDeviceFormat } from '../CameraDevice';
import { sortFormats } from '../utils/FormatFilter';
/**
* Returns the best format for the given camera device.
*
* This function tries to choose a format with the highest possible photo-capture resolution and best matching aspect ratio.
*
* @param {CameraDevice} device The Camera Device
*
* @returns The best matching format for the given camera device, or `undefined` if the camera device is `undefined`.
*/
export function useCameraFormat(device?: CameraDevice): CameraDeviceFormat | undefined {
return useMemo(() => device?.formats.sort(sortFormats)[0], [device?.formats]);
}

View File

@@ -0,0 +1,52 @@
import { DependencyList, useMemo } from 'react';
import type { Frame, FrameInternal } from '../Frame';
import { FrameProcessor } from '../CameraProps';
/**
* Create a new Frame Processor function which you can pass to the `<Camera>`.
* (See ["Frame Processors"](https://mrousavy.github.io/react-native-vision-camera/docs/guides/frame-processors))
*
* Make sure to add the `'worklet'` directive to the top of the Frame Processor function, otherwise it will not get compiled into a worklet.
*
* Also make sure to memoize the returned object, so that the Camera doesn't reset the Frame Processor Context each time.
*/
export function createFrameProcessor(frameProcessor: FrameProcessor['frameProcessor'], type: FrameProcessor['type']): FrameProcessor {
return {
frameProcessor: (frame: Frame) => {
'worklet';
// Increment ref-count by one
(frame as FrameInternal).incrementRefCount();
try {
// Call sync frame processor
frameProcessor(frame);
} finally {
// Potentially delete Frame if we were the last ref (no runAsync)
(frame as FrameInternal).decrementRefCount();
}
},
type: type,
};
}
/**
* Returns a memoized Frame Processor function wich you can pass to the `<Camera>`.
* (See ["Frame Processors"](https://mrousavy.github.io/react-native-vision-camera/docs/guides/frame-processors))
*
* Make sure to add the `'worklet'` directive to the top of the Frame Processor function, otherwise it will not get compiled into a worklet.
*
* @param frameProcessor The Frame Processor
* @param dependencies The React dependencies which will be copied into the VisionCamera JS-Runtime.
* @returns The memoized Frame Processor.
* @example
* ```ts
* const frameProcessor = useFrameProcessor((frame) => {
* 'worklet'
* const qrCodes = scanQRCodes(frame)
* console.log(`QR Codes: ${qrCodes}`)
* }, [])
* ```
*/
export function useFrameProcessor(frameProcessor: (frame: Frame) => void, dependencies: DependencyList): FrameProcessor {
// eslint-disable-next-line react-hooks/exhaustive-deps
return useMemo(() => createFrameProcessor(frameProcessor, 'frame-processor'), dependencies);
}

18
package/src/index.ts Normal file
View File

@@ -0,0 +1,18 @@
export * from './Camera';
export * from './CameraDevice';
export * from './CameraError';
export * from './CameraPosition';
export * from './CameraProps';
export { Frame } from './Frame';
export * from './FrameProcessorPlugins';
export * from './CameraProps';
export * from './PhotoFile';
export * from './Point';
export * from './TemporaryFile';
export * from './VideoFile';
export * from './hooks/useCameraDevices';
export * from './hooks/useCameraFormat';
export * from './hooks/useFrameProcessor';
export * from './utils/FormatFilter';

View File

@@ -0,0 +1,93 @@
import { Dimensions } from 'react-native';
import type { CameraDevice, CameraDeviceFormat } from '../CameraDevice';
/**
* Compares two devices by the following criteria:
* * `wide-angle-camera`s are ranked higher than others
* * Devices with more physical cameras are ranked higher than ones with less. (e.g. "Triple Camera" > "Wide-Angle Camera")
*
* > Note that this makes the `sort()` function descending, so the first element (`[0]`) is the "best" device.
*
* @example
* ```ts
* const devices = camera.devices.sort(sortDevices)
* const bestDevice = devices[0]
* ```
* @method
*/
export const sortDevices = (left: CameraDevice, right: CameraDevice): number => {
let leftPoints = 0;
let rightPoints = 0;
const leftHasWideAngle = left.devices.includes('wide-angle-camera');
const rightHasWideAngle = right.devices.includes('wide-angle-camera');
if (leftHasWideAngle) leftPoints += 2;
if (rightHasWideAngle) rightPoints += 2;
if (left.isMultiCam) leftPoints += 2;
if (right.isMultiCam) rightPoints += 2;
if (left.hardwareLevel === 'full') leftPoints += 3;
if (right.hardwareLevel === 'full') rightPoints += 3;
if (left.hardwareLevel === 'limited') leftPoints += 1;
if (right.hardwareLevel === 'limited') rightPoints += 1;
if (left.hasFlash) leftPoints += 1;
if (right.hasFlash) rightPoints += 1;
const leftMaxResolution = left.formats.reduce(
(prev, curr) => Math.max(prev, curr.videoHeight * curr.videoWidth + curr.photoHeight * curr.photoWidth),
0,
);
const rightMaxResolution = right.formats.reduce(
(prev, curr) => Math.max(prev, curr.videoHeight * curr.videoWidth + curr.photoHeight * curr.photoWidth),
0,
);
if (leftMaxResolution > rightMaxResolution) leftPoints += 3;
if (rightMaxResolution > leftMaxResolution) rightPoints += 3;
// telephoto cameras often have very poor quality.
const leftHasTelephoto = left.devices.includes('telephoto-camera');
const rightHasTelephoto = right.devices.includes('telephoto-camera');
if (leftHasTelephoto) leftPoints -= 2;
if (rightHasTelephoto) rightPoints -= 2;
if (left.devices.length > right.devices.length) leftPoints += 1;
if (right.devices.length > left.devices.length) rightPoints += 1;
return rightPoints - leftPoints;
};
const SCREEN_SIZE = {
width: Dimensions.get('window').width,
height: Dimensions.get('window').height,
};
const SCREEN_ASPECT_RATIO = SCREEN_SIZE.width / SCREEN_SIZE.height;
/**
* Sort formats by resolution and aspect ratio difference (to the Screen size).
*
* > Note that this makes the `sort()` function descending, so the first element (`[0]`) is the "best" device.
*/
export const sortFormats = (left: CameraDeviceFormat, right: CameraDeviceFormat): number => {
let leftPoints = 0,
rightPoints = 0;
// we downscale the points so much that we are in smaller number ranges for future calculations
// e.g. for 4k (4096), this adds 8 points.
leftPoints += Math.round(left.photoWidth / 500);
rightPoints += Math.round(right.photoWidth / 500);
// e.g. for 4k (4096), this adds 8 points.
leftPoints += Math.round(left.videoWidth / 500);
rightPoints += Math.round(right.videoWidth / 500);
// we downscale the points here as well, so if left has 16:9 and right has 21:9, this roughly
// adds 5 points. If the difference is smaller, e.g. 16:9 vs 17:9, this roughly adds a little
// bit over 1 point, just enough to overrule the FPS below.
const leftAspectRatioDiff = left.photoHeight / left.photoWidth - SCREEN_ASPECT_RATIO;
const rightAspectRatioDiff = right.photoHeight / right.photoWidth - SCREEN_ASPECT_RATIO;
leftPoints -= Math.abs(leftAspectRatioDiff) * 10;
rightPoints -= Math.abs(rightAspectRatioDiff) * 10;
return rightPoints - leftPoints;
};