Compare commits

...

3 Commits

24 changed files with 655 additions and 1737 deletions

3
package/.gitignore vendored
View File

@ -67,3 +67,6 @@ package-lock.json
.cxx/
example/ios/vendor
#.direnv
.direnv

View File

@ -11,6 +11,7 @@ import com.mrousavy.camera.core.CodeScannerFrame
import com.mrousavy.camera.core.UnknownCameraError
import com.mrousavy.camera.core.code
import com.mrousavy.camera.types.CodeType
import java.io.File
fun CameraView.invokeOnInitialized() {
Log.i(CameraView.TAG, "invokeOnInitialized()")
@ -33,6 +34,15 @@ fun CameraView.invokeOnStopped() {
reactContext.getJSModule(RCTEventEmitter::class.java).receiveEvent(id, "cameraStopped", null)
}
fun CameraView.invokeOnChunkReady(filepath: File, index: Int) {
Log.e(CameraView.TAG, "invokeOnError(...):")
val event = Arguments.createMap()
event.putInt("index", index)
event.putString("filepath", filepath.toString())
val reactContext = context as ReactContext
reactContext.getJSModule(RCTEventEmitter::class.java).receiveEvent(id, "onVideoChunkReady", event)
}
fun CameraView.invokeOnError(error: Throwable) {
Log.e(CameraView.TAG, "invokeOnError(...):")
error.printStackTrace()

View File

@ -25,6 +25,7 @@ import com.mrousavy.camera.types.Torch
import com.mrousavy.camera.types.VideoStabilizationMode
import kotlinx.coroutines.CoroutineScope
import kotlinx.coroutines.launch
import java.io.File
//
// TODOs for the CameraView which are currently too hard to implement either because of CameraX' limitations, or my brain capacity.
@ -265,6 +266,10 @@ class CameraView(context: Context) :
invokeOnStopped()
}
override fun onVideoChunkReady(filepath: File, index: Int) {
invokeOnChunkReady(filepath, index)
}
override fun onCodeScanned(codes: List<Barcode>, scannerFrame: CodeScannerFrame) {
invokeOnCodeScanned(codes, scannerFrame)
}

View File

@ -29,6 +29,7 @@ class CameraViewManager : ViewGroupManager<CameraView>() {
.put("cameraStopped", MapBuilder.of("registrationName", "onStopped"))
.put("cameraError", MapBuilder.of("registrationName", "onError"))
.put("cameraCodeScanned", MapBuilder.of("registrationName", "onCodeScanned"))
.put("onVideoChunkReady", MapBuilder.of("registrationName", "onVideoChunkReady"))
.build()
override fun getName(): String = TAG

View File

@ -54,6 +54,7 @@ import kotlinx.coroutines.launch
import kotlinx.coroutines.runBlocking
import kotlinx.coroutines.sync.Mutex
import kotlinx.coroutines.sync.withLock
import java.io.File
class CameraSession(private val context: Context, private val cameraManager: CameraManager, private val callback: Callback) :
CameraManager.AvailabilityCallback(),
@ -640,7 +641,8 @@ class CameraSession(private val context: Context, private val cameraManager: Cam
orientation,
options,
callback,
onError
onError,
this.callback,
)
recording.start()
this.recording = recording
@ -724,6 +726,7 @@ class CameraSession(private val context: Context, private val cameraManager: Cam
fun onInitialized()
fun onStarted()
fun onStopped()
fun onVideoChunkReady(filepath: File, index: Int)
fun onCodeScanned(codes: List<Barcode>, scannerFrame: CodeScannerFrame)
}
}

View File

@ -13,12 +13,13 @@ import com.mrousavy.camera.types.RecordVideoOptions
import java.io.File
import java.nio.ByteBuffer
class ChunkedRecordingManager(private val encoder: MediaCodec, private val outputDirectory: File, private val orientationHint: Int, private val iFrameInterval: Int) :
class ChunkedRecordingManager(private val encoder: MediaCodec, private val outputDirectory: File, private val orientationHint: Int, private val iFrameInterval: Int, private val callbacks: CameraSession.Callback) :
MediaCodec.Callback() {
companion object {
private const val TAG = "ChunkedRecorder"
fun fromParams(
callbacks: CameraSession.Callback,
size: Size,
enableAudio: Boolean,
fps: Int? = null,
@ -57,7 +58,7 @@ class ChunkedRecordingManager(private val encoder: MediaCodec, private val outpu
// Create a MediaCodec encoder, and configure it with our format. Get a Surface
// we can use for input and wrap it with a class that handles the EGL work.
codec.configure(format, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE)
return ChunkedRecordingManager(codec, outputDirectory, 0, iFrameInterval)
return ChunkedRecordingManager(codec, outputDirectory, 0, iFrameInterval, callbacks)
}
}
@ -79,7 +80,7 @@ class ChunkedRecordingManager(private val encoder: MediaCodec, private val outpu
}
// Muxer specific
private class MuxerContext(val muxer: MediaMuxer, startTimeUs: Long, encodedFormat: MediaFormat) {
private class MuxerContext(val muxer: MediaMuxer, val filepath: File, val chunkIndex: Int, startTimeUs: Long, encodedFormat: MediaFormat) {
val videoTrack: Int = muxer.addTrack(encodedFormat)
val startTimeUs: Long = startTimeUs
@ -97,7 +98,10 @@ class ChunkedRecordingManager(private val encoder: MediaCodec, private val outpu
private var muxerContext: MuxerContext? = null
private fun createNextMuxer(bufferInfo: BufferInfo) {
muxerContext?.finish()
muxerContext?.let {
it.finish()
this.callbacks.onVideoChunkReady(it.filepath, it.chunkIndex)
}
chunkIndex++
val newFileName = "$chunkIndex.mp4"
@ -109,7 +113,7 @@ class ChunkedRecordingManager(private val encoder: MediaCodec, private val outpu
)
muxer.setOrientationHint(orientationHint)
muxerContext = MuxerContext(
muxer, bufferInfo.presentationTimeUs, this.encodedFormat!!
muxer, newOutputFile, chunkIndex, bufferInfo.presentationTimeUs, this.encodedFormat!!
)
}

View File

@ -24,7 +24,8 @@ class RecordingSession(
private val cameraOrientation: Orientation,
private val options: RecordVideoOptions,
private val callback: (video: Video) -> Unit,
private val onError: (error: CameraError) -> Unit
private val onError: (error: CameraError) -> Unit,
private val allCallbacks: CameraSession.Callback,
) {
companion object {
private const val TAG = "RecordingSession"
@ -45,6 +46,7 @@ class RecordingSession(
private val bitRate = getBitRate()
private val recorder = ChunkedRecordingManager.fromParams(
allCallbacks,
size,
enableAudio,
fps,

View File

@ -1,4 +1,4 @@
{
"name": "VisionCameraExample",
"displayName": "VisionCamera Example"
"displayName": "Railbird VisionCamera"
}

View File

@ -1,55 +1,6 @@
import { NavigationContainer } from '@react-navigation/native'
import React from 'react'
import { createNativeStackNavigator } from '@react-navigation/native-stack'
import { PermissionsPage } from './PermissionsPage'
import { MediaPage } from './MediaPage'
import { CameraPage } from './CameraPage'
import { CodeScannerPage } from './CodeScannerPage'
import type { Routes } from './Routes'
import { Camera } from 'react-native-vision-camera'
import { GestureHandlerRootView } from 'react-native-gesture-handler'
import { StyleSheet } from 'react-native'
import { DevicesPage } from './DevicesPage'
const Stack = createNativeStackNavigator<Routes>()
import CameraScreen from './camera'
export function App(): React.ReactElement | null {
const cameraPermission = Camera.getCameraPermissionStatus()
const microphonePermission = Camera.getMicrophonePermissionStatus()
console.log(`Re-rendering Navigator. Camera: ${cameraPermission} | Microphone: ${microphonePermission}`)
const showPermissionsPage = cameraPermission !== 'granted' || microphonePermission === 'not-determined'
return (
<NavigationContainer>
<GestureHandlerRootView style={styles.root}>
<Stack.Navigator
screenOptions={{
headerShown: false,
statusBarStyle: 'dark',
animationTypeForReplace: 'push',
}}
initialRouteName={showPermissionsPage ? 'PermissionsPage' : 'CameraPage'}>
<Stack.Screen name="PermissionsPage" component={PermissionsPage} />
<Stack.Screen name="CameraPage" component={CameraPage} />
<Stack.Screen name="CodeScannerPage" component={CodeScannerPage} />
<Stack.Screen
name="MediaPage"
component={MediaPage}
options={{
animation: 'none',
presentation: 'transparentModal',
}}
/>
<Stack.Screen name="Devices" component={DevicesPage} />
</Stack.Navigator>
</GestureHandlerRootView>
</NavigationContainer>
)
return <CameraScreen />
}
const styles = StyleSheet.create({
root: {
flex: 1,
},
})

View File

@ -1,280 +0,0 @@
import * as React from 'react'
import { useRef, useState, useCallback, useMemo } from 'react'
import { StyleSheet, Text, View } from 'react-native'
import { PinchGestureHandler, PinchGestureHandlerGestureEvent, TapGestureHandler } from 'react-native-gesture-handler'
import { CameraRuntimeError, PhotoFile, useCameraDevice, useCameraFormat, useFrameProcessor, VideoFile } from 'react-native-vision-camera'
import { Camera } from 'react-native-vision-camera'
import { CONTENT_SPACING, CONTROL_BUTTON_SIZE, MAX_ZOOM_FACTOR, SAFE_AREA_PADDING, SCREEN_HEIGHT, SCREEN_WIDTH } from './Constants'
import Reanimated, { Extrapolate, interpolate, useAnimatedGestureHandler, useAnimatedProps, useSharedValue } from 'react-native-reanimated'
import { useEffect } from 'react'
import { useIsForeground } from './hooks/useIsForeground'
import { StatusBarBlurBackground } from './views/StatusBarBlurBackground'
import { CaptureButton } from './views/CaptureButton'
import { PressableOpacity } from 'react-native-pressable-opacity'
import MaterialIcon from 'react-native-vector-icons/MaterialCommunityIcons'
import IonIcon from 'react-native-vector-icons/Ionicons'
import type { Routes } from './Routes'
import type { NativeStackScreenProps } from '@react-navigation/native-stack'
import { useIsFocused } from '@react-navigation/core'
import { examplePlugin } from './frame-processors/ExamplePlugin'
import { exampleKotlinSwiftPlugin } from './frame-processors/ExampleKotlinSwiftPlugin'
import { usePreferredCameraDevice } from './hooks/usePreferredCameraDevice'
const ReanimatedCamera = Reanimated.createAnimatedComponent(Camera)
Reanimated.addWhitelistedNativeProps({
zoom: true,
})
const SCALE_FULL_ZOOM = 3
type Props = NativeStackScreenProps<Routes, 'CameraPage'>
export function CameraPage({ navigation }: Props): React.ReactElement {
const camera = useRef<Camera>(null)
const [isCameraInitialized, setIsCameraInitialized] = useState(false)
const hasMicrophonePermission = useMemo(() => Camera.getMicrophonePermissionStatus() === 'granted', [])
const zoom = useSharedValue(0)
const isPressingButton = useSharedValue(false)
// check if camera page is active
const isFocussed = useIsFocused()
const isForeground = useIsForeground()
const isActive = isFocussed && isForeground
const [cameraPosition, setCameraPosition] = useState<'front' | 'back'>('back')
const [enableHdr, setEnableHdr] = useState(false)
const [flash, setFlash] = useState<'off' | 'on'>('off')
const [enableNightMode, setEnableNightMode] = useState(false)
// camera device settings
const [preferredDevice] = usePreferredCameraDevice()
let device = useCameraDevice(cameraPosition)
if (preferredDevice != null && preferredDevice.position === cameraPosition) {
// override default device with the one selected by the user in settings
device = preferredDevice
}
const [targetFps, setTargetFps] = useState(60)
const screenAspectRatio = SCREEN_HEIGHT / SCREEN_WIDTH
const format = useCameraFormat(device, [
{ fps: targetFps },
{ videoAspectRatio: screenAspectRatio },
{ videoResolution: 'max' },
{ photoAspectRatio: screenAspectRatio },
{ photoResolution: 'max' },
])
const fps = Math.min(format?.maxFps ?? 1, targetFps)
const supportsFlash = device?.hasFlash ?? false
const supportsHdr = format?.supportsPhotoHdr
const supports60Fps = useMemo(() => device?.formats.some((f) => f.maxFps >= 60), [device?.formats])
const canToggleNightMode = device?.supportsLowLightBoost ?? false
//#region Animated Zoom
// This just maps the zoom factor to a percentage value.
// so e.g. for [min, neutr., max] values [1, 2, 128] this would result in [0, 0.0081, 1]
const minZoom = device?.minZoom ?? 1
const maxZoom = Math.min(device?.maxZoom ?? 1, MAX_ZOOM_FACTOR)
const cameraAnimatedProps = useAnimatedProps(() => {
const z = Math.max(Math.min(zoom.value, maxZoom), minZoom)
return {
zoom: z,
}
}, [maxZoom, minZoom, zoom])
//#endregion
//#region Callbacks
const setIsPressingButton = useCallback(
(_isPressingButton: boolean) => {
isPressingButton.value = _isPressingButton
},
[isPressingButton],
)
// Camera callbacks
const onError = useCallback((error: CameraRuntimeError) => {
console.error(error)
}, [])
const onInitialized = useCallback(() => {
console.log('Camera initialized!')
setIsCameraInitialized(true)
}, [])
const onMediaCaptured = useCallback(
(media: PhotoFile | VideoFile, type: 'photo' | 'video') => {
console.log(`Media captured! ${JSON.stringify(media)}`)
navigation.navigate('MediaPage', {
path: media.path,
type: type,
})
},
[navigation],
)
const onFlipCameraPressed = useCallback(() => {
setCameraPosition((p) => (p === 'back' ? 'front' : 'back'))
}, [])
const onFlashPressed = useCallback(() => {
setFlash((f) => (f === 'off' ? 'on' : 'off'))
}, [])
//#endregion
//#region Tap Gesture
const onDoubleTap = useCallback(() => {
onFlipCameraPressed()
}, [onFlipCameraPressed])
//#endregion
//#region Effects
const neutralZoom = device?.neutralZoom ?? 1
useEffect(() => {
// Run everytime the neutralZoomScaled value changes. (reset zoom when device changes)
zoom.value = neutralZoom
}, [neutralZoom, zoom])
//#endregion
//#region Pinch to Zoom Gesture
// The gesture handler maps the linear pinch gesture (0 - 1) to an exponential curve since a camera's zoom
// function does not appear linear to the user. (aka zoom 0.1 -> 0.2 does not look equal in difference as 0.8 -> 0.9)
const onPinchGesture = useAnimatedGestureHandler<PinchGestureHandlerGestureEvent, { startZoom?: number }>({
onStart: (_, context) => {
context.startZoom = zoom.value
},
onActive: (event, context) => {
// we're trying to map the scale gesture to a linear zoom here
const startZoom = context.startZoom ?? 0
const scale = interpolate(event.scale, [1 - 1 / SCALE_FULL_ZOOM, 1, SCALE_FULL_ZOOM], [-1, 0, 1], Extrapolate.CLAMP)
zoom.value = interpolate(scale, [-1, 0, 1], [minZoom, startZoom, maxZoom], Extrapolate.CLAMP)
},
})
//#endregion
useEffect(() => {
const f =
format != null
? `(${format.photoWidth}x${format.photoHeight} photo / ${format.videoWidth}x${format.videoHeight}@${format.maxFps} video @ ${fps}fps)`
: undefined
console.log(`Camera: ${device?.name} | Format: ${f}`)
}, [device?.name, format, fps])
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
// console.log(`${frame.timestamp}: ${frame.width}x${frame.height} ${frame.pixelFormat} Frame (${frame.orientation})`)
// examplePlugin(frame)
// exampleKotlinSwiftPlugin(frame)
}, [])
return (
<View style={styles.container}>
{device != null && (
<PinchGestureHandler onGestureEvent={onPinchGesture} enabled={isActive}>
<Reanimated.View style={StyleSheet.absoluteFill}>
<TapGestureHandler onEnded={onDoubleTap} numberOfTaps={2}>
<ReanimatedCamera
ref={camera}
style={StyleSheet.absoluteFill}
device={device}
format={format}
fps={fps}
photoHdr={enableHdr}
videoHdr={enableHdr}
lowLightBoost={device.supportsLowLightBoost && enableNightMode}
isActive={isActive}
onInitialized={onInitialized}
onError={onError}
enableZoomGesture={false}
animatedProps={cameraAnimatedProps}
exposure={0}
enableFpsGraph={true}
orientation="portrait"
photo={true}
video={true}
audio={hasMicrophonePermission}
frameProcessor={frameProcessor}
/>
</TapGestureHandler>
</Reanimated.View>
</PinchGestureHandler>
)}
<CaptureButton
style={styles.captureButton}
camera={camera}
onMediaCaptured={onMediaCaptured}
cameraZoom={zoom}
minZoom={minZoom}
maxZoom={maxZoom}
flash={supportsFlash ? flash : 'off'}
enabled={isCameraInitialized && isActive}
setIsPressingButton={setIsPressingButton}
/>
<StatusBarBlurBackground />
<View style={styles.rightButtonRow}>
<PressableOpacity style={styles.button} onPress={onFlipCameraPressed} disabledOpacity={0.4}>
<IonIcon name="camera-reverse" color="white" size={24} />
</PressableOpacity>
{supportsFlash && (
<PressableOpacity style={styles.button} onPress={onFlashPressed} disabledOpacity={0.4}>
<IonIcon name={flash === 'on' ? 'flash' : 'flash-off'} color="white" size={24} />
</PressableOpacity>
)}
{supports60Fps && (
<PressableOpacity style={styles.button} onPress={() => setTargetFps((t) => (t === 30 ? 60 : 30))}>
<Text style={styles.text}>{`${targetFps}\nFPS`}</Text>
</PressableOpacity>
)}
{supportsHdr && (
<PressableOpacity style={styles.button} onPress={() => setEnableHdr((h) => !h)}>
<MaterialIcon name={enableHdr ? 'hdr' : 'hdr-off'} color="white" size={24} />
</PressableOpacity>
)}
{canToggleNightMode && (
<PressableOpacity style={styles.button} onPress={() => setEnableNightMode(!enableNightMode)} disabledOpacity={0.4}>
<IonIcon name={enableNightMode ? 'moon' : 'moon-outline'} color="white" size={24} />
</PressableOpacity>
)}
<PressableOpacity style={styles.button} onPress={() => navigation.navigate('Devices')}>
<IonIcon name="settings-outline" color="white" size={24} />
</PressableOpacity>
<PressableOpacity style={styles.button} onPress={() => navigation.navigate('CodeScannerPage')}>
<IonIcon name="qr-code-outline" color="white" size={24} />
</PressableOpacity>
</View>
</View>
)
}
const styles = StyleSheet.create({
container: {
flex: 1,
backgroundColor: 'black',
},
captureButton: {
position: 'absolute',
alignSelf: 'center',
bottom: SAFE_AREA_PADDING.paddingBottom,
},
button: {
marginBottom: CONTENT_SPACING,
width: CONTROL_BUTTON_SIZE,
height: CONTROL_BUTTON_SIZE,
borderRadius: CONTROL_BUTTON_SIZE / 2,
backgroundColor: 'rgba(140, 140, 140, 0.3)',
justifyContent: 'center',
alignItems: 'center',
},
rightButtonRow: {
position: 'absolute',
right: SAFE_AREA_PADDING.paddingRight,
top: SAFE_AREA_PADDING.paddingTop,
},
text: {
color: 'white',
fontSize: 11,
fontWeight: 'bold',
textAlign: 'center',
},
})

View File

@ -1,120 +0,0 @@
import * as React from 'react'
import { useCallback, useRef, useState } from 'react'
import { Alert, AlertButton, Linking, StyleSheet, View } from 'react-native'
import { Code, useCameraDevice, useCodeScanner } from 'react-native-vision-camera'
import { Camera } from 'react-native-vision-camera'
import { CONTENT_SPACING, CONTROL_BUTTON_SIZE, SAFE_AREA_PADDING } from './Constants'
import { useIsForeground } from './hooks/useIsForeground'
import { StatusBarBlurBackground } from './views/StatusBarBlurBackground'
import { PressableOpacity } from 'react-native-pressable-opacity'
import IonIcon from 'react-native-vector-icons/Ionicons'
import type { Routes } from './Routes'
import type { NativeStackScreenProps } from '@react-navigation/native-stack'
import { useIsFocused } from '@react-navigation/core'
const showCodeAlert = (value: string, onDismissed: () => void): void => {
const buttons: AlertButton[] = [
{
text: 'Close',
style: 'cancel',
onPress: onDismissed,
},
]
if (value.startsWith('http')) {
buttons.push({
text: 'Open URL',
onPress: () => {
Linking.openURL(value)
onDismissed()
},
})
}
Alert.alert('Scanned Code', value, buttons)
}
type Props = NativeStackScreenProps<Routes, 'CodeScannerPage'>
export function CodeScannerPage({ navigation }: Props): React.ReactElement {
// 1. Use a simple default back camera
const device = useCameraDevice('back')
// 2. Only activate Camera when the app is focused and this screen is currently opened
const isFocused = useIsFocused()
const isForeground = useIsForeground()
const isActive = isFocused && isForeground
// 3. (Optional) enable a torch setting
const [torch, setTorch] = useState(false)
// 4. On code scanned, we show an aler to the user
const isShowingAlert = useRef(false)
const onCodeScanned = useCallback((codes: Code[]) => {
console.log(`Scanned ${codes.length} codes:`, codes)
const value = codes[0]?.value
if (value == null) return
if (isShowingAlert.current) return
showCodeAlert(value, () => {
isShowingAlert.current = false
})
isShowingAlert.current = true
}, [])
// 5. Initialize the Code Scanner to scan QR codes and Barcodes
const codeScanner = useCodeScanner({
codeTypes: ['qr', 'ean-13'],
onCodeScanned: onCodeScanned,
})
return (
<View style={styles.container}>
{device != null && (
<Camera
style={StyleSheet.absoluteFill}
device={device}
isActive={isActive}
codeScanner={codeScanner}
torch={torch ? 'on' : 'off'}
enableZoomGesture={true}
/>
)}
<StatusBarBlurBackground />
<View style={styles.rightButtonRow}>
<PressableOpacity style={styles.button} onPress={() => setTorch(!torch)} disabledOpacity={0.4}>
<IonIcon name={torch ? 'flash' : 'flash-off'} color="white" size={24} />
</PressableOpacity>
</View>
{/* Back Button */}
<PressableOpacity style={styles.backButton} onPress={navigation.goBack}>
<IonIcon name="chevron-back" color="white" size={35} />
</PressableOpacity>
</View>
)
}
const styles = StyleSheet.create({
container: {
flex: 1,
backgroundColor: 'black',
},
button: {
marginBottom: CONTENT_SPACING,
width: CONTROL_BUTTON_SIZE,
height: CONTROL_BUTTON_SIZE,
borderRadius: CONTROL_BUTTON_SIZE / 2,
backgroundColor: 'rgba(140, 140, 140, 0.3)',
justifyContent: 'center',
alignItems: 'center',
},
rightButtonRow: {
position: 'absolute',
right: SAFE_AREA_PADDING.paddingRight,
top: SAFE_AREA_PADDING.paddingTop,
},
backButton: {
position: 'absolute',
left: SAFE_AREA_PADDING.paddingLeft,
top: SAFE_AREA_PADDING.paddingTop,
},
})

View File

@ -1,215 +0,0 @@
import type { NativeStackScreenProps } from '@react-navigation/native-stack'
import React, { useCallback, useMemo } from 'react'
import IonIcon from 'react-native-vector-icons/Ionicons'
import { StyleSheet, View, Text, ListRenderItemInfo, SectionList, SectionListData } from 'react-native'
import { CameraDevice, useCameraDevices } from 'react-native-vision-camera'
import { CONTENT_SPACING, SAFE_AREA_PADDING } from './Constants'
import type { Routes } from './Routes'
import { PressableOpacity } from 'react-native-pressable-opacity'
import { usePreferredCameraDevice } from './hooks/usePreferredCameraDevice'
const keyExtractor = (item: CameraDevice): string => item.id
interface SectionType {
position: CameraDevice['position'] | 'preferred'
}
type SectionData = SectionListData<CameraDevice, SectionType>
interface DeviceProps {
device: CameraDevice
onPress: () => void
}
function Device({ device, onPress }: DeviceProps): React.ReactElement {
const maxPhotoRes = useMemo(
() =>
device.formats.reduce((prev, curr) => {
if (curr.photoWidth * curr.photoHeight > prev.photoWidth * prev.photoHeight) return curr
return prev
}),
[device.formats],
)
const maxVideoRes = useMemo(
() =>
device.formats.reduce((prev, curr) => {
if (curr.videoWidth * curr.videoHeight > prev.videoWidth * prev.videoHeight) return curr
return prev
}),
[device.formats],
)
const deviceTypes = useMemo(() => device.physicalDevices.map((t) => t.replace('-camera', '')).join(' + '), [device.physicalDevices])
return (
<PressableOpacity style={styles.itemContainer} onPress={onPress}>
<View style={styles.horizontal}>
<IonIcon name="camera" size={18} color="black" />
<Text style={styles.deviceName} numberOfLines={3}>
{device.name} <Text style={styles.devicePosition}>({device.position})</Text>
</Text>
</View>
<Text style={styles.deviceTypes}>{deviceTypes}</Text>
<View style={styles.horizontal}>
<IonIcon name="camera" size={12} color="black" />
<Text style={styles.resolutionText}>
{maxPhotoRes.photoWidth}x{maxPhotoRes.photoHeight}
</Text>
</View>
<View style={styles.horizontal}>
<IonIcon name="videocam" size={12} color="black" />
<Text style={styles.resolutionText}>
{maxVideoRes.videoWidth}x{maxVideoRes.videoHeight} @ {maxVideoRes.maxFps} FPS
</Text>
</View>
<Text style={styles.deviceId} numberOfLines={2} ellipsizeMode="middle">
{device.id}
</Text>
</PressableOpacity>
)
}
type Props = NativeStackScreenProps<Routes, 'Devices'>
export function DevicesPage({ navigation }: Props): React.ReactElement {
const devices = useCameraDevices()
const [preferredDevice, setPreferredDevice] = usePreferredCameraDevice()
const sections = useMemo((): SectionData[] => {
return [
{
position: 'preferred',
data: preferredDevice != null ? [preferredDevice] : [],
},
{
position: 'back',
data: devices.filter((d) => d.position === 'back'),
},
{
position: 'front',
data: devices.filter((d) => d.position === 'front'),
},
{
position: 'external',
data: devices.filter((d) => d.position === 'external'),
},
]
}, [devices, preferredDevice])
const onDevicePressed = useCallback(
(device: CameraDevice) => {
setPreferredDevice(device)
navigation.navigate('CameraPage')
},
[navigation, setPreferredDevice],
)
const renderItem = useCallback(
({ item }: ListRenderItemInfo<CameraDevice>) => {
return <Device device={item} onPress={() => onDevicePressed(item)} />
},
[onDevicePressed],
)
const renderSectionHeader = useCallback(({ section }: { section: SectionData }) => {
if (section.data.length === 0) return null
return (
<View style={styles.sectionHeader}>
<Text style={styles.sectionHeaderText}>{section.position.toUpperCase()}</Text>
</View>
)
}, [])
return (
<View style={styles.container}>
<View style={styles.headerContainer}>
<View style={styles.horizontal}>
<PressableOpacity style={styles.backButton} onPress={navigation.goBack}>
<IonIcon name="chevron-back" size={35} color="black" />
</PressableOpacity>
<Text style={styles.header}>Camera Devices</Text>
</View>
<Text style={styles.subHeader}>
These are all detected Camera devices on your phone. This list will automatically update as you plug devices in or out.
</Text>
</View>
<SectionList
style={styles.list}
contentContainerStyle={styles.listContent}
sections={sections}
keyExtractor={keyExtractor}
renderItem={renderItem}
renderSectionHeader={renderSectionHeader}
stickySectionHeadersEnabled={false}
/>
</View>
)
}
const styles = StyleSheet.create({
container: {
flex: 1,
backgroundColor: 'white',
},
headerContainer: {
paddingTop: SAFE_AREA_PADDING.paddingTop,
paddingLeft: SAFE_AREA_PADDING.paddingLeft,
paddingRight: SAFE_AREA_PADDING.paddingRight,
},
header: {
fontSize: 38,
fontWeight: 'bold',
maxWidth: '80%',
},
subHeader: {
marginTop: 10,
fontSize: 18,
maxWidth: '80%',
},
list: {
marginTop: CONTENT_SPACING,
},
listContent: {
paddingBottom: SAFE_AREA_PADDING.paddingBottom,
},
sectionHeader: {
paddingHorizontal: CONTENT_SPACING / 2,
paddingVertical: 5,
},
sectionHeaderText: {
opacity: 0.4,
fontSize: 16,
},
itemContainer: {
paddingHorizontal: CONTENT_SPACING,
paddingVertical: 7,
},
deviceName: {
fontSize: 17,
marginLeft: 5,
flexShrink: 1,
fontWeight: 'bold',
},
devicePosition: {
opacity: 0.4,
},
deviceId: {
fontSize: 12,
opacity: 0.4,
},
deviceTypes: {
fontSize: 12,
opacity: 0.4,
},
horizontal: {
flexDirection: 'row',
alignItems: 'center',
},
backButton: {
width: 40,
height: 40,
marginTop: 7,
},
resolutionText: {
marginLeft: 5,
fontSize: 12,
},
})

View File

@ -1,151 +0,0 @@
import React, { useCallback, useMemo, useState } from 'react'
import { StyleSheet, View, ActivityIndicator, PermissionsAndroid, Platform } from 'react-native'
import Video, { LoadError, OnLoadData } from 'react-native-video'
import { SAFE_AREA_PADDING } from './Constants'
import { useIsForeground } from './hooks/useIsForeground'
import { PressableOpacity } from 'react-native-pressable-opacity'
import IonIcon from 'react-native-vector-icons/Ionicons'
import { Alert } from 'react-native'
import { CameraRoll } from '@react-native-camera-roll/camera-roll'
import { StatusBarBlurBackground } from './views/StatusBarBlurBackground'
import type { NativeStackScreenProps } from '@react-navigation/native-stack'
import type { Routes } from './Routes'
import { useIsFocused } from '@react-navigation/core'
import FastImage, { OnLoadEvent } from 'react-native-fast-image'
const requestSavePermission = async (): Promise<boolean> => {
if (Platform.OS !== 'android') return true
const permission = PermissionsAndroid.PERMISSIONS.WRITE_EXTERNAL_STORAGE
if (permission == null) return false
let hasPermission = await PermissionsAndroid.check(permission)
if (!hasPermission) {
const permissionRequestResult = await PermissionsAndroid.request(permission)
hasPermission = permissionRequestResult === 'granted'
}
return hasPermission
}
const isVideoOnLoadEvent = (event: OnLoadData | OnLoadEvent): event is OnLoadData => 'duration' in event && 'naturalSize' in event
type Props = NativeStackScreenProps<Routes, 'MediaPage'>
export function MediaPage({ navigation, route }: Props): React.ReactElement {
const { path, type } = route.params
const [hasMediaLoaded, setHasMediaLoaded] = useState(false)
const isForeground = useIsForeground()
const isScreenFocused = useIsFocused()
const isVideoPaused = !isForeground || !isScreenFocused
const [savingState, setSavingState] = useState<'none' | 'saving' | 'saved'>('none')
const onMediaLoad = useCallback((event: OnLoadData | OnLoadEvent) => {
if (isVideoOnLoadEvent(event)) {
console.log(
`Video loaded. Size: ${event.naturalSize.width}x${event.naturalSize.height} (${event.naturalSize.orientation}, ${event.duration} seconds)`,
)
} else {
console.log(`Image loaded. Size: ${event.nativeEvent.width}x${event.nativeEvent.height}`)
}
}, [])
const onMediaLoadEnd = useCallback(() => {
console.log('media has loaded.')
setHasMediaLoaded(true)
}, [])
const onMediaLoadError = useCallback((error: LoadError) => {
console.log(`failed to load media: ${JSON.stringify(error)}`)
}, [])
const onSavePressed = useCallback(async () => {
try {
setSavingState('saving')
const hasPermission = await requestSavePermission()
if (!hasPermission) {
Alert.alert('Permission denied!', 'Vision Camera does not have permission to save the media to your camera roll.')
return
}
await CameraRoll.save(`file://${path}`, {
type: type,
})
setSavingState('saved')
} catch (e) {
const message = e instanceof Error ? e.message : JSON.stringify(e)
setSavingState('none')
Alert.alert('Failed to save!', `An unexpected error occured while trying to save your ${type}. ${message}`)
}
}, [path, type])
const source = useMemo(() => ({ uri: `file://${path}/1.mp4` }), [path])
const screenStyle = useMemo(() => ({ opacity: hasMediaLoaded ? 1 : 0 }), [hasMediaLoaded])
return (
<View style={[styles.container, screenStyle]}>
{type === 'photo' && (
<FastImage source={source} style={StyleSheet.absoluteFill} resizeMode="cover" onLoadEnd={onMediaLoadEnd} onLoad={onMediaLoad} />
)}
{type === 'video' && (
<Video
source={source}
style={StyleSheet.absoluteFill}
paused={isVideoPaused}
resizeMode="cover"
posterResizeMode="cover"
allowsExternalPlayback={false}
automaticallyWaitsToMinimizeStalling={false}
disableFocus={true}
repeat={true}
useTextureView={false}
controls={false}
playWhenInactive={true}
ignoreSilentSwitch="ignore"
onReadyForDisplay={onMediaLoadEnd}
onLoad={onMediaLoad}
onError={onMediaLoadError}
/>
)}
<PressableOpacity style={styles.closeButton} onPress={navigation.goBack}>
<IonIcon name="close" size={35} color="white" style={styles.icon} />
</PressableOpacity>
<PressableOpacity style={styles.saveButton} onPress={onSavePressed} disabled={savingState !== 'none'}>
{savingState === 'none' && <IonIcon name="download" size={35} color="white" style={styles.icon} />}
{savingState === 'saved' && <IonIcon name="checkmark" size={35} color="white" style={styles.icon} />}
{savingState === 'saving' && <ActivityIndicator color="white" />}
</PressableOpacity>
<StatusBarBlurBackground />
</View>
)
}
const styles = StyleSheet.create({
container: {
flex: 1,
alignItems: 'center',
justifyContent: 'center',
backgroundColor: 'white',
},
closeButton: {
position: 'absolute',
top: SAFE_AREA_PADDING.paddingTop,
left: SAFE_AREA_PADDING.paddingLeft,
width: 40,
height: 40,
},
saveButton: {
position: 'absolute',
bottom: SAFE_AREA_PADDING.paddingBottom,
left: SAFE_AREA_PADDING.paddingLeft,
width: 40,
height: 40,
},
icon: {
textShadowColor: 'black',
textShadowOffset: {
height: 0,
width: 0,
},
textShadowRadius: 1,
},
})

View File

@ -1,96 +0,0 @@
import type { NativeStackScreenProps } from '@react-navigation/native-stack'
import React, { useCallback, useEffect, useState } from 'react'
import { ImageRequireSource, Linking } from 'react-native'
import { StyleSheet, View, Text, Image } from 'react-native'
import { Camera, CameraPermissionStatus } from 'react-native-vision-camera'
import { CONTENT_SPACING, SAFE_AREA_PADDING } from './Constants'
import type { Routes } from './Routes'
// eslint-disable-next-line @typescript-eslint/no-var-requires
const BANNER_IMAGE = require('./img/11.png') as ImageRequireSource
type Props = NativeStackScreenProps<Routes, 'PermissionsPage'>
export function PermissionsPage({ navigation }: Props): React.ReactElement {
const [cameraPermissionStatus, setCameraPermissionStatus] = useState<CameraPermissionStatus>('not-determined')
const [microphonePermissionStatus, setMicrophonePermissionStatus] = useState<CameraPermissionStatus>('not-determined')
const requestMicrophonePermission = useCallback(async () => {
console.log('Requesting microphone permission...')
const permission = await Camera.requestMicrophonePermission()
console.log(`Microphone permission status: ${permission}`)
if (permission === 'denied') await Linking.openSettings()
setMicrophonePermissionStatus(permission)
}, [])
const requestCameraPermission = useCallback(async () => {
console.log('Requesting camera permission...')
const permission = await Camera.requestCameraPermission()
console.log(`Camera permission status: ${permission}`)
if (permission === 'denied') await Linking.openSettings()
setCameraPermissionStatus(permission)
}, [])
useEffect(() => {
if (cameraPermissionStatus === 'granted' && microphonePermissionStatus === 'granted') navigation.replace('CameraPage')
}, [cameraPermissionStatus, microphonePermissionStatus, navigation])
return (
<View style={styles.container}>
<Image source={BANNER_IMAGE} style={styles.banner} />
<Text style={styles.welcome}>Welcome to{'\n'}Vision Camera.</Text>
<View style={styles.permissionsContainer}>
{cameraPermissionStatus !== 'granted' && (
<Text style={styles.permissionText}>
Vision Camera needs <Text style={styles.bold}>Camera permission</Text>.{' '}
<Text style={styles.hyperlink} onPress={requestCameraPermission}>
Grant
</Text>
</Text>
)}
{microphonePermissionStatus !== 'granted' && (
<Text style={styles.permissionText}>
Vision Camera needs <Text style={styles.bold}>Microphone permission</Text>.{' '}
<Text style={styles.hyperlink} onPress={requestMicrophonePermission}>
Grant
</Text>
</Text>
)}
</View>
</View>
)
}
const styles = StyleSheet.create({
welcome: {
fontSize: 38,
fontWeight: 'bold',
maxWidth: '80%',
},
banner: {
position: 'absolute',
opacity: 0.4,
bottom: 0,
left: 0,
},
container: {
flex: 1,
backgroundColor: 'white',
...SAFE_AREA_PADDING,
},
permissionsContainer: {
marginTop: CONTENT_SPACING * 2,
},
permissionText: {
fontSize: 17,
},
hyperlink: {
color: '#007aff',
fontWeight: 'bold',
},
bold: {
fontWeight: 'bold',
},
})

View File

@ -1,10 +0,0 @@
export type Routes = {
PermissionsPage: undefined
CameraPage: undefined
CodeScannerPage: undefined
MediaPage: {
path: string
type: 'video' | 'photo'
}
Devices: undefined
}

View File

@ -0,0 +1,112 @@
import React, { useCallback, useRef, useState } from 'react'
import { Button, StyleSheet, Text, View } from 'react-native'
import {
Camera,
useCameraPermission,
useCameraDevice,
useCameraFormat,
PhotoFile,
VideoFile,
CameraRuntimeError,
Orientation,
CameraDevice,
} from 'react-native-vision-camera'
import { RecordingButton } from './capture-button'
import { useIsForeground } from './is-foreground'
export default function CameraScreen() {
const camera = useRef<Camera>(null)
const { hasPermission, requestPermission } = useCameraPermission()
const [isCameraInitialized, setIsCameraInitialized] = useState<boolean>(false)
const isForeground: boolean = useIsForeground()
const isActive: boolean = isForeground // Should be combined with isFocused hook
const onError = useCallback((error: CameraRuntimeError) => {
console.error(error)
}, [])
const onInitialized = useCallback(() => {
console.log('Camera initialized!')
setIsCameraInitialized(true)
}, [])
const onMediaCaptured = useCallback((media: PhotoFile | VideoFile) => {
console.log(`Media captured! ${JSON.stringify(media)}`)
}, [])
if (!hasPermission) requestPermission()
// Error handling in case they refuse to give permission
const device = useCameraDevice('back')
const format = useCameraFormat(device, [{ videoResolution: { width: 3048, height: 2160 } }, { fps: 60 }]) // this sets as a target
//Orientation detection
const [orientation, setOrientation] = useState<Orientation>('portrait')
const toggleOrientation = () => {
setOrientation(
(currentOrientation) => (currentOrientation === 'landscape-left' ? 'portrait' : 'landscape-left'), // Can adjust this and the type to match what we want
)
}
// eslint-disable-next-line @typescript-eslint/no-unnecessary-condition
if (device === null) return <Text>Camera not available. Does user have permissions: {hasPermission}</Text>
return (
hasPermission && (
<View style={styles.container}>
<Camera
ref={camera}
style={StyleSheet.absoluteFill}
device={device as CameraDevice}
format={format}
onInitialized={onInitialized}
onError={onError}
video={true}
orientation={orientation} // TODO: #60
isActive={isActive}
/>
<RecordingButton
style={[styles.captureButton, orientation === 'portrait' ? styles.portrait : styles.landscape]}
camera={camera}
onMediaCaptured={onMediaCaptured}
enabled={isCameraInitialized}
/>
<View style={[styles.button, orientation === 'portrait' ? styles.togglePortrait : styles.toggleLandscape]}>
<Button title="Toggle Orientation" onPress={toggleOrientation} color="#841584" accessibilityLabel="Toggle camera orientation" />
</View>
</View>
)
)
}
const styles = StyleSheet.create({
container: {
flex: 1,
backgroundColor: 'black',
},
captureButton: {
position: 'absolute',
alignSelf: 'center',
},
button: {
position: 'absolute',
alignSelf: 'center',
},
togglePortrait: {
bottom: 110, // needs refined
},
toggleLandscape: {
transform: [{ rotate: '90deg' }],
bottom: '43%', // Should come from SafeAreaProvider, hardcoded right now, should roughly appear above the button
left: 50, // needs refined
},
portrait: {
bottom: 20, // needs refined
},
landscape: {
bottom: '40%', // Should come from SafeAreaProvider
left: 20, // needs refined
},
})

View File

@ -0,0 +1,96 @@
import React, { useCallback, useRef, useState } from 'react'
import { TouchableOpacity, StyleSheet, View, StyleProp, ViewStyle } from 'react-native'
import { Camera, VideoFile } from 'react-native-vision-camera'
interface RecordingButtonProps {
style: StyleProp<ViewStyle>
camera: React.RefObject<Camera>
onMediaCaptured: (media: VideoFile, mediaType: string) => void
enabled: boolean
}
export const RecordingButton: React.FC<RecordingButtonProps> = ({ style, camera, onMediaCaptured, enabled }) => {
const isRecording = useRef(false)
// UseRef won't trigger a re-render
const [, setRecordingState] = useState(false)
const onStoppedRecording = useCallback(() => {
isRecording.current = false
setRecordingState(false)
console.log('stopped recording video!')
}, [])
const stopRecording = useCallback(async () => {
try {
if (camera.current === null) throw new Error('Camera ref is null!') // Error handling could be more graceful
console.log('calling stopRecording()...')
await camera.current.stopRecording()
console.log('called stopRecording()!')
} catch (e) {
console.error('failed to stop recording!', e)
}
}, [camera])
const startRecording = useCallback(() => {
console.log('press')
try {
if (camera.current === null) throw new Error('Camera ref is null!') // Error handling could be more graceful
console.log('calling startRecording()...')
camera.current.startRecording({
onRecordingError: (error) => {
console.error('Recording failed!', error)
onStoppedRecording()
},
onRecordingFinished: (video) => {
onMediaCaptured(video, 'video')
onStoppedRecording()
},
})
console.log('called startRecording()!')
isRecording.current = true
setRecordingState(true)
} catch (e) {
console.error('failed to start recording!', e, 'camera')
}
}, [camera, onMediaCaptured, onStoppedRecording])
const handlePress = () => {
if (isRecording.current) stopRecording()
else startRecording()
}
return (
<TouchableOpacity style={[styles.captureButton, style]} onPress={handlePress} disabled={!enabled}>
<View style={isRecording.current ? styles.recordingSquare : styles.innerCircle} />
</TouchableOpacity>
)
}
const styles = StyleSheet.create({
captureButton: {
height: 80,
width: 80,
borderRadius: 40,
borderWidth: 3,
borderColor: 'white',
backgroundColor: 'transparent',
justifyContent: 'center',
alignItems: 'center',
},
innerCircle: {
height: 70,
width: 70,
borderRadius: 35,
backgroundColor: '#FF3B30',
},
recordingSquare: {
height: 40,
width: 40,
borderRadius: 10,
backgroundColor: '#FF3B30',
},
})
export default RecordingButton

View File

@ -1,17 +0,0 @@
import { VisionCameraProxy, Frame } from 'react-native-vision-camera'
const plugin = VisionCameraProxy.initFrameProcessorPlugin('example_kotlin_swift_plugin', { foo: 'bar' })
export function exampleKotlinSwiftPlugin(frame: Frame): string[] {
'worklet'
if (plugin == null) throw new Error('Failed to load Frame Processor Plugin "example_kotlin_swift_plugin"!')
return plugin.call(frame, {
someString: 'hello!',
someBoolean: true,
someNumber: 42,
someObject: { test: 0, second: 'test' },
someArray: ['another test', 5],
}) as string[]
}

View File

@ -1,25 +0,0 @@
import { VisionCameraProxy, Frame } from 'react-native-vision-camera'
const plugin = VisionCameraProxy.initFrameProcessorPlugin('example_plugin')
interface Result {
example_array: (string | number | boolean)[]
example_array_buffer: ArrayBuffer
example_str: string
example_bool: boolean
example_double: number
}
export function examplePlugin(frame: Frame): Result {
'worklet'
if (plugin == null) throw new Error('Failed to load Frame Processor Plugin "example_plugin"!')
return plugin.call(frame, {
someString: 'hello!',
someBoolean: true,
someNumber: 42,
someObject: { test: 0, second: 'test' },
someArray: ['another test', 5],
}) as unknown as Result
}

View File

@ -1,20 +0,0 @@
import { useMMKVString } from 'react-native-mmkv'
import { CameraDevice } from '../../../src/CameraDevice'
import { useCallback, useMemo } from 'react'
import { useCameraDevices } from '../../../src/hooks/useCameraDevices'
export function usePreferredCameraDevice(): [CameraDevice | undefined, (device: CameraDevice) => void] {
const [preferredDeviceId, setPreferredDeviceId] = useMMKVString('camera.preferredDeviceId')
const set = useCallback(
(device: CameraDevice) => {
setPreferredDeviceId(device.id)
},
[setPreferredDeviceId],
)
const devices = useCameraDevices()
const device = useMemo(() => devices.find((d) => d.id === preferredDeviceId), [devices, preferredDeviceId])
return [device, set]
}

View File

@ -1,5 +1,4 @@
import { useState } from 'react'
import { useEffect } from 'react'
import { useState, useEffect } from 'react'
import { AppState, AppStateStatus } from 'react-native'
export const useIsForeground = (): boolean => {

View File

@ -1,307 +0,0 @@
import React, { useCallback, useMemo, useRef } from 'react'
import { StyleSheet, View, ViewProps } from 'react-native'
import {
PanGestureHandler,
PanGestureHandlerGestureEvent,
State,
TapGestureHandler,
TapGestureHandlerStateChangeEvent,
} from 'react-native-gesture-handler'
import Reanimated, {
cancelAnimation,
Easing,
Extrapolate,
interpolate,
useAnimatedStyle,
withSpring,
withTiming,
useAnimatedGestureHandler,
useSharedValue,
withRepeat,
} from 'react-native-reanimated'
import type { Camera, PhotoFile, TakePhotoOptions, VideoFile } from 'react-native-vision-camera'
import { CAPTURE_BUTTON_SIZE, SCREEN_HEIGHT, SCREEN_WIDTH } from './../Constants'
const PAN_GESTURE_HANDLER_FAIL_X = [-SCREEN_WIDTH, SCREEN_WIDTH]
const PAN_GESTURE_HANDLER_ACTIVE_Y = [-2, 2]
const START_RECORDING_DELAY = 200
const BORDER_WIDTH = CAPTURE_BUTTON_SIZE * 0.1
interface Props extends ViewProps {
camera: React.RefObject<Camera>
onMediaCaptured: (media: PhotoFile | VideoFile, type: 'photo' | 'video') => void
minZoom: number
maxZoom: number
cameraZoom: Reanimated.SharedValue<number>
flash: 'off' | 'on'
enabled: boolean
setIsPressingButton: (isPressingButton: boolean) => void
}
const _CaptureButton: React.FC<Props> = ({
camera,
onMediaCaptured,
minZoom,
maxZoom,
cameraZoom,
flash,
enabled,
setIsPressingButton,
style,
...props
}): React.ReactElement => {
const pressDownDate = useRef<Date | undefined>(undefined)
const isRecording = useRef(false)
const recordingProgress = useSharedValue(0)
const takePhotoOptions = useMemo<TakePhotoOptions>(
() => ({
qualityPrioritization: 'speed',
flash: flash,
quality: 90,
enableShutterSound: false,
}),
[flash],
)
const isPressingButton = useSharedValue(false)
//#region Camera Capture
const takePhoto = useCallback(async () => {
try {
if (camera.current == null) throw new Error('Camera ref is null!')
console.log('Taking photo...')
const photo = await camera.current.takePhoto(takePhotoOptions)
onMediaCaptured(photo, 'photo')
} catch (e) {
console.error('Failed to take photo!', e)
}
}, [camera, onMediaCaptured, takePhotoOptions])
const onStoppedRecording = useCallback(() => {
isRecording.current = false
cancelAnimation(recordingProgress)
console.log('stopped recording video!')
}, [recordingProgress])
const stopRecording = useCallback(async () => {
try {
if (camera.current == null) throw new Error('Camera ref is null!')
console.log('calling stopRecording()...')
await camera.current.stopRecording()
console.log('called stopRecording()!')
} catch (e) {
console.error('failed to stop recording!', e)
}
}, [camera])
const startRecording = useCallback(() => {
try {
if (camera.current == null) throw new Error('Camera ref is null!')
console.log('calling startRecording()...')
camera.current.startRecording({
flash: flash,
onRecordingError: (error) => {
console.error('Recording failed!', error)
onStoppedRecording()
},
onRecordingFinished: (video) => {
console.log(`Recording successfully finished! ${video.path}`)
onMediaCaptured(video, 'video')
onStoppedRecording()
},
})
// TODO: wait until startRecording returns to actually find out if the recording has successfully started
console.log('called startRecording()!')
isRecording.current = true
} catch (e) {
console.error('failed to start recording!', e, 'camera')
}
}, [camera, flash, onMediaCaptured, onStoppedRecording])
//#endregion
//#region Tap handler
const tapHandler = useRef<TapGestureHandler>()
const onHandlerStateChanged = useCallback(
async ({ nativeEvent: event }: TapGestureHandlerStateChangeEvent) => {
// This is the gesture handler for the circular "shutter" button.
// Once the finger touches the button (State.BEGAN), a photo is being taken and "capture mode" is entered. (disabled tab bar)
// Also, we set `pressDownDate` to the time of the press down event, and start a 200ms timeout. If the `pressDownDate` hasn't changed
// after the 200ms, the user is still holding down the "shutter" button. In that case, we start recording.
//
// Once the finger releases the button (State.END/FAILED/CANCELLED), we leave "capture mode" (enable tab bar) and check the `pressDownDate`,
// if `pressDownDate` was less than 200ms ago, we know that the intention of the user is to take a photo. We check the `takePhotoPromise` if
// there already is an ongoing (or already resolved) takePhoto() call (remember that we called takePhoto() when the user pressed down), and
// if yes, use that. If no, we just try calling takePhoto() again
console.debug(`state: ${Object.keys(State)[event.state]}`)
switch (event.state) {
case State.BEGAN: {
// enter "recording mode"
recordingProgress.value = 0
isPressingButton.value = true
const now = new Date()
pressDownDate.current = now
setTimeout(() => {
if (pressDownDate.current === now) {
// user is still pressing down after 200ms, so his intention is to create a video
startRecording()
}
}, START_RECORDING_DELAY)
setIsPressingButton(true)
return
}
case State.END:
case State.FAILED:
case State.CANCELLED: {
// exit "recording mode"
try {
if (pressDownDate.current == null) throw new Error('PressDownDate ref .current was null!')
const now = new Date()
const diff = now.getTime() - pressDownDate.current.getTime()
pressDownDate.current = undefined
if (diff < START_RECORDING_DELAY) {
// user has released the button within 200ms, so his intention is to take a single picture.
await takePhoto()
} else {
// user has held the button for more than 200ms, so he has been recording this entire time.
await stopRecording()
}
} finally {
setTimeout(() => {
isPressingButton.value = false
setIsPressingButton(false)
}, 500)
}
return
}
default:
break
}
},
[isPressingButton, recordingProgress, setIsPressingButton, startRecording, stopRecording, takePhoto],
)
//#endregion
//#region Pan handler
const panHandler = useRef<PanGestureHandler>()
const onPanGestureEvent = useAnimatedGestureHandler<PanGestureHandlerGestureEvent, { offsetY?: number; startY?: number }>({
onStart: (event, context) => {
context.startY = event.absoluteY
const yForFullZoom = context.startY * 0.7
const offsetYForFullZoom = context.startY - yForFullZoom
// extrapolate [0 ... 1] zoom -> [0 ... Y_FOR_FULL_ZOOM] finger position
context.offsetY = interpolate(cameraZoom.value, [minZoom, maxZoom], [0, offsetYForFullZoom], Extrapolate.CLAMP)
},
onActive: (event, context) => {
const offset = context.offsetY ?? 0
const startY = context.startY ?? SCREEN_HEIGHT
const yForFullZoom = startY * 0.7
cameraZoom.value = interpolate(event.absoluteY - offset, [yForFullZoom, startY], [maxZoom, minZoom], Extrapolate.CLAMP)
},
})
//#endregion
const shadowStyle = useAnimatedStyle(
() => ({
transform: [
{
scale: withSpring(isPressingButton.value ? 1 : 0, {
mass: 1,
damping: 35,
stiffness: 300,
}),
},
],
}),
[isPressingButton],
)
const buttonStyle = useAnimatedStyle(() => {
let scale: number
if (enabled) {
if (isPressingButton.value) {
scale = withRepeat(
withSpring(1, {
stiffness: 100,
damping: 1000,
}),
-1,
true,
)
} else {
scale = withSpring(0.9, {
stiffness: 500,
damping: 300,
})
}
} else {
scale = withSpring(0.6, {
stiffness: 500,
damping: 300,
})
}
return {
opacity: withTiming(enabled ? 1 : 0.3, {
duration: 100,
easing: Easing.linear,
}),
transform: [
{
scale: scale,
},
],
}
}, [enabled, isPressingButton])
return (
<TapGestureHandler
enabled={enabled}
ref={tapHandler}
onHandlerStateChange={onHandlerStateChanged}
shouldCancelWhenOutside={false}
maxDurationMs={99999999} // <-- this prevents the TapGestureHandler from going to State.FAILED when the user moves his finger outside of the child view (to zoom)
simultaneousHandlers={panHandler}>
<Reanimated.View {...props} style={[buttonStyle, style]}>
<PanGestureHandler
enabled={enabled}
ref={panHandler}
failOffsetX={PAN_GESTURE_HANDLER_FAIL_X}
activeOffsetY={PAN_GESTURE_HANDLER_ACTIVE_Y}
onGestureEvent={onPanGestureEvent}
simultaneousHandlers={tapHandler}>
<Reanimated.View style={styles.flex}>
<Reanimated.View style={[styles.shadow, shadowStyle]} />
<View style={styles.button} />
</Reanimated.View>
</PanGestureHandler>
</Reanimated.View>
</TapGestureHandler>
)
}
export const CaptureButton = React.memo(_CaptureButton)
const styles = StyleSheet.create({
flex: {
flex: 1,
},
shadow: {
position: 'absolute',
width: CAPTURE_BUTTON_SIZE,
height: CAPTURE_BUTTON_SIZE,
borderRadius: CAPTURE_BUTTON_SIZE / 2,
backgroundColor: '#e34077',
},
button: {
width: CAPTURE_BUTTON_SIZE,
height: CAPTURE_BUTTON_SIZE,
borderRadius: CAPTURE_BUTTON_SIZE / 2,
borderWidth: BORDER_WIDTH,
borderColor: 'white',
},
})

View File

@ -1,32 +0,0 @@
import { BlurView, BlurViewProps } from '@react-native-community/blur'
import React from 'react'
import { Platform, StyleSheet } from 'react-native'
import StaticSafeAreaInsets from 'react-native-static-safe-area-insets'
const FALLBACK_COLOR = 'rgba(140, 140, 140, 0.3)'
const StatusBarBlurBackgroundImpl = ({ style, ...props }: BlurViewProps): React.ReactElement | null => {
if (Platform.OS !== 'ios') return null
return (
<BlurView
style={[styles.statusBarBackground, style]}
blurAmount={25}
blurType="light"
reducedTransparencyFallbackColor={FALLBACK_COLOR}
{...props}
/>
)
}
export const StatusBarBlurBackground = React.memo(StatusBarBlurBackgroundImpl)
const styles = StyleSheet.create({
statusBarBackground: {
position: 'absolute',
top: 0,
left: 0,
right: 0,
height: StaticSafeAreaInsets.safeAreaInsetsTop,
},
})

View File

@ -18,32 +18,37 @@ export type CameraPermissionStatus = 'granted' | 'not-determined' | 'denied' | '
export type CameraPermissionRequestResult = 'granted' | 'denied'
interface OnCodeScannedEvent {
codes: Code[]
frame: CodeScannerFrame
codes: Code[]
frame: CodeScannerFrame
}
interface OnErrorEvent {
code: string
message: string
cause?: ErrorWithCause
code: string
message: string
cause?: ErrorWithCause
}
interface OnVideoChunkReadyEvent {
filepath: string
index: int
}
type NativeCameraViewProps = Omit<CameraProps, 'device' | 'onInitialized' | 'onError' | 'frameProcessor' | 'codeScanner'> & {
cameraId: string
enableFrameProcessor: boolean
codeScannerOptions?: Omit<CodeScanner, 'onCodeScanned'>
onInitialized?: (event: NativeSyntheticEvent<void>) => void
onError?: (event: NativeSyntheticEvent<OnErrorEvent>) => void
onCodeScanned?: (event: NativeSyntheticEvent<OnCodeScannedEvent>) => void
onStarted?: (event: NativeSyntheticEvent<void>) => void
onStopped?: (event: NativeSyntheticEvent<void>) => void
onViewReady: () => void
cameraId: string
enableFrameProcessor: boolean
codeScannerOptions?: Omit<CodeScanner, 'onCodeScanned'>
onInitialized?: (event: NativeSyntheticEvent<void>) => void
onError?: (event: NativeSyntheticEvent<OnErrorEvent>) => void
onCodeScanned?: (event: NativeSyntheticEvent<OnCodeScannedEvent>) => void
onStarted?: (event: NativeSyntheticEvent<void>) => void
onStopped?: (event: NativeSyntheticEvent<void>) => void
onVideoChunkReady?: (event: NativeSyntheticEvent<OnVideoChunkReadyEvent>) => void
onViewReady: () => void
}
type NativeRecordVideoOptions = Omit<RecordVideoOptions, 'onRecordingError' | 'onRecordingFinished' | 'videoBitRate'> & {
videoBitRateOverride?: number
videoBitRateMultiplier?: number
videoBitRateOverride?: number
videoBitRateMultiplier?: number
}
type RefType = React.Component<NativeCameraViewProps> & Readonly<NativeMethods>
interface CameraState {
isRecordingWithFlash: boolean
isRecordingWithFlash: boolean
}
//#endregion
@ -77,427 +82,427 @@ interface CameraState {
* @component
*/
export class Camera extends React.PureComponent<CameraProps, CameraState> {
/** @internal */
static displayName = 'Camera'
/** @internal */
displayName = Camera.displayName
private lastFrameProcessor: FrameProcessor | undefined
private isNativeViewMounted = false
/** @internal */
static displayName = 'Camera'
/** @internal */
displayName = Camera.displayName
private lastFrameProcessor: FrameProcessor | undefined
private isNativeViewMounted = false
private readonly ref: React.RefObject<RefType>
private readonly ref: React.RefObject<RefType>
/** @internal */
constructor(props: CameraProps) {
super(props)
this.onViewReady = this.onViewReady.bind(this)
this.onInitialized = this.onInitialized.bind(this)
this.onStarted = this.onStarted.bind(this)
this.onStopped = this.onStopped.bind(this)
this.onError = this.onError.bind(this)
this.onCodeScanned = this.onCodeScanned.bind(this)
this.ref = React.createRef<RefType>()
this.lastFrameProcessor = undefined
this.state = {
isRecordingWithFlash: false,
}
}
/** @internal */
constructor(props: CameraProps) {
super(props)
this.onViewReady = this.onViewReady.bind(this)
this.onInitialized = this.onInitialized.bind(this)
this.onStarted = this.onStarted.bind(this)
this.onStopped = this.onStopped.bind(this)
this.onError = this.onError.bind(this)
this.onCodeScanned = this.onCodeScanned.bind(this)
this.ref = React.createRef<RefType>()
this.lastFrameProcessor = undefined
this.state = {
isRecordingWithFlash: false,
}
}
private get handle(): number {
const nodeHandle = findNodeHandle(this.ref.current)
if (nodeHandle == null || nodeHandle === -1) {
throw new CameraRuntimeError(
'system/view-not-found',
"Could not get the Camera's native view tag! Does the Camera View exist in the native view-tree?",
)
}
private get handle(): number {
const nodeHandle = findNodeHandle(this.ref.current)
if (nodeHandle == null || nodeHandle === -1) {
throw new CameraRuntimeError(
'system/view-not-found',
"Could not get the Camera's native view tag! Does the Camera View exist in the native view-tree?",
)
}
return nodeHandle
}
return nodeHandle
}
//#region View-specific functions (UIViewManager)
/**
* Take a single photo and write it's content to a temporary file.
*
* @throws {@linkcode CameraCaptureError} When any kind of error occured while capturing the photo. Use the {@linkcode CameraCaptureError.code | code} property to get the actual error
* @example
* ```ts
* const photo = await camera.current.takePhoto({
* qualityPrioritization: 'quality',
* flash: 'on',
* enableAutoRedEyeReduction: true
* })
* ```
*/
public async takePhoto(options?: TakePhotoOptions): Promise<PhotoFile> {
try {
return await CameraModule.takePhoto(this.handle, options ?? {})
} catch (e) {
throw tryParseNativeCameraError(e)
}
}
//#region View-specific functions (UIViewManager)
/**
* Take a single photo and write it's content to a temporary file.
*
* @throws {@linkcode CameraCaptureError} When any kind of error occured while capturing the photo. Use the {@linkcode CameraCaptureError.code | code} property to get the actual error
* @example
* ```ts
* const photo = await camera.current.takePhoto({
* qualityPrioritization: 'quality',
* flash: 'on',
* enableAutoRedEyeReduction: true
* })
* ```
*/
public async takePhoto(options?: TakePhotoOptions): Promise<PhotoFile> {
try {
return await CameraModule.takePhoto(this.handle, options ?? {})
} catch (e) {
throw tryParseNativeCameraError(e)
}
}
private getBitRateMultiplier(bitRate: RecordVideoOptions['videoBitRate']): number {
if (typeof bitRate === 'number' || bitRate == null) return 1
switch (bitRate) {
case 'extra-low':
return 0.6
case 'low':
return 0.8
case 'normal':
return 1
case 'high':
return 1.2
case 'extra-high':
return 1.4
}
}
private getBitRateMultiplier(bitRate: RecordVideoOptions['videoBitRate']): number {
if (typeof bitRate === 'number' || bitRate == null) return 1
switch (bitRate) {
case 'extra-low':
return 0.6
case 'low':
return 0.8
case 'normal':
return 1
case 'high':
return 1.2
case 'extra-high':
return 1.4
}
}
/**
* Start a new video recording.
*
* @throws {@linkcode CameraCaptureError} When any kind of error occured while starting the video recording. Use the {@linkcode CameraCaptureError.code | code} property to get the actual error
*
* @example
* ```ts
* camera.current.startRecording({
* onRecordingFinished: (video) => console.log(video),
* onRecordingError: (error) => console.error(error),
* })
* setTimeout(() => {
* camera.current.stopRecording()
* }, 5000)
* ```
*/
public startRecording(options: RecordVideoOptions): void {
const { onRecordingError, onRecordingFinished, videoBitRate, ...passThruOptions } = options
if (typeof onRecordingError !== 'function' || typeof onRecordingFinished !== 'function')
throw new CameraRuntimeError('parameter/invalid-parameter', 'The onRecordingError or onRecordingFinished functions were not set!')
/**
* Start a new video recording.
*
* @throws {@linkcode CameraCaptureError} When any kind of error occured while starting the video recording. Use the {@linkcode CameraCaptureError.code | code} property to get the actual error
*
* @example
* ```ts
* camera.current.startRecording({
* onRecordingFinished: (video) => console.log(video),
* onRecordingError: (error) => console.error(error),
* })
* setTimeout(() => {
* camera.current.stopRecording()
* }, 5000)
* ```
*/
public startRecording(options: RecordVideoOptions): void {
const { onRecordingError, onRecordingFinished, videoBitRate, ...passThruOptions } = options
if (typeof onRecordingError !== 'function' || typeof onRecordingFinished !== 'function')
throw new CameraRuntimeError('parameter/invalid-parameter', 'The onRecordingError or onRecordingFinished functions were not set!')
if (options.flash === 'on') {
// Enable torch for video recording
this.setState({
isRecordingWithFlash: true,
})
}
if (options.flash === 'on') {
// Enable torch for video recording
this.setState({
isRecordingWithFlash: true,
})
}
const nativeOptions: NativeRecordVideoOptions = passThruOptions
if (typeof videoBitRate === 'number') {
// If the user passed an absolute number as a bit-rate, we just use this as a full override.
nativeOptions.videoBitRateOverride = videoBitRate
} else if (typeof videoBitRate === 'string' && videoBitRate !== 'normal') {
// If the user passed 'low'/'normal'/'high', we need to apply this as a multiplier to the native bitrate instead of absolutely setting it
nativeOptions.videoBitRateMultiplier = this.getBitRateMultiplier(videoBitRate)
}
const nativeOptions: NativeRecordVideoOptions = passThruOptions
if (typeof videoBitRate === 'number') {
// If the user passed an absolute number as a bit-rate, we just use this as a full override.
nativeOptions.videoBitRateOverride = videoBitRate
} else if (typeof videoBitRate === 'string' && videoBitRate !== 'normal') {
// If the user passed 'low'/'normal'/'high', we need to apply this as a multiplier to the native bitrate instead of absolutely setting it
nativeOptions.videoBitRateMultiplier = this.getBitRateMultiplier(videoBitRate)
}
const onRecordCallback = (video?: VideoFile, error?: CameraCaptureError): void => {
if (this.state.isRecordingWithFlash) {
// disable torch again if it was enabled
this.setState({
isRecordingWithFlash: false,
})
}
const onRecordCallback = (video?: VideoFile, error?: CameraCaptureError): void => {
if (this.state.isRecordingWithFlash) {
// disable torch again if it was enabled
this.setState({
isRecordingWithFlash: false,
})
}
if (error != null) return onRecordingError(error)
if (video != null) return onRecordingFinished(video)
}
try {
// TODO: Use TurboModules to make this awaitable.
CameraModule.startRecording(this.handle, nativeOptions, onRecordCallback)
} catch (e) {
throw tryParseNativeCameraError(e)
}
}
if (error != null) return onRecordingError(error)
if (video != null) return onRecordingFinished(video)
}
try {
// TODO: Use TurboModules to make this awaitable.
CameraModule.startRecording(this.handle, nativeOptions, onRecordCallback)
} catch (e) {
throw tryParseNativeCameraError(e)
}
}
/**
* Pauses the current video recording.
*
* @throws {@linkcode CameraCaptureError} When any kind of error occured while pausing the video recording. Use the {@linkcode CameraCaptureError.code | code} property to get the actual error
*
* @example
* ```ts
* // Start
* await camera.current.startRecording()
* await timeout(1000)
* // Pause
* await camera.current.pauseRecording()
* await timeout(500)
* // Resume
* await camera.current.resumeRecording()
* await timeout(2000)
* // Stop
* const video = await camera.current.stopRecording()
* ```
*/
public async pauseRecording(): Promise<void> {
try {
return await CameraModule.pauseRecording(this.handle)
} catch (e) {
throw tryParseNativeCameraError(e)
}
}
/**
* Pauses the current video recording.
*
* @throws {@linkcode CameraCaptureError} When any kind of error occured while pausing the video recording. Use the {@linkcode CameraCaptureError.code | code} property to get the actual error
*
* @example
* ```ts
* // Start
* await camera.current.startRecording()
* await timeout(1000)
* // Pause
* await camera.current.pauseRecording()
* await timeout(500)
* // Resume
* await camera.current.resumeRecording()
* await timeout(2000)
* // Stop
* const video = await camera.current.stopRecording()
* ```
*/
public async pauseRecording(): Promise<void> {
try {
return await CameraModule.pauseRecording(this.handle)
} catch (e) {
throw tryParseNativeCameraError(e)
}
}
/**
* Resumes a currently paused video recording.
*
* @throws {@linkcode CameraCaptureError} When any kind of error occured while resuming the video recording. Use the {@linkcode CameraCaptureError.code | code} property to get the actual error
*
* @example
* ```ts
* // Start
* await camera.current.startRecording()
* await timeout(1000)
* // Pause
* await camera.current.pauseRecording()
* await timeout(500)
* // Resume
* await camera.current.resumeRecording()
* await timeout(2000)
* // Stop
* const video = await camera.current.stopRecording()
* ```
*/
public async resumeRecording(): Promise<void> {
try {
return await CameraModule.resumeRecording(this.handle)
} catch (e) {
throw tryParseNativeCameraError(e)
}
}
/**
* Resumes a currently paused video recording.
*
* @throws {@linkcode CameraCaptureError} When any kind of error occured while resuming the video recording. Use the {@linkcode CameraCaptureError.code | code} property to get the actual error
*
* @example
* ```ts
* // Start
* await camera.current.startRecording()
* await timeout(1000)
* // Pause
* await camera.current.pauseRecording()
* await timeout(500)
* // Resume
* await camera.current.resumeRecording()
* await timeout(2000)
* // Stop
* const video = await camera.current.stopRecording()
* ```
*/
public async resumeRecording(): Promise<void> {
try {
return await CameraModule.resumeRecording(this.handle)
} catch (e) {
throw tryParseNativeCameraError(e)
}
}
/**
* Stop the current video recording.
*
* @throws {@linkcode CameraCaptureError} When any kind of error occured while stopping the video recording. Use the {@linkcode CameraCaptureError.code | code} property to get the actual error
*
* @example
* ```ts
* await camera.current.startRecording()
* setTimeout(async () => {
* const video = await camera.current.stopRecording()
* }, 5000)
* ```
*/
public async stopRecording(): Promise<void> {
try {
return await CameraModule.stopRecording(this.handle)
} catch (e) {
throw tryParseNativeCameraError(e)
}
}
/**
* Stop the current video recording.
*
* @throws {@linkcode CameraCaptureError} When any kind of error occured while stopping the video recording. Use the {@linkcode CameraCaptureError.code | code} property to get the actual error
*
* @example
* ```ts
* await camera.current.startRecording()
* setTimeout(async () => {
* const video = await camera.current.stopRecording()
* }, 5000)
* ```
*/
public async stopRecording(): Promise<void> {
try {
return await CameraModule.stopRecording(this.handle)
} catch (e) {
throw tryParseNativeCameraError(e)
}
}
/**
* Focus the camera to a specific point in the coordinate system.
* @param {Point} point The point to focus to. This should be relative
* to the Camera view's coordinate system and is expressed in points.
* * `(0, 0)` means **top left**.
* * `(CameraView.width, CameraView.height)` means **bottom right**.
*
* Make sure the value doesn't exceed the CameraView's dimensions.
*
* @throws {@linkcode CameraRuntimeError} When any kind of error occured while focussing. Use the {@linkcode CameraRuntimeError.code | code} property to get the actual error
* @example
* ```ts
* await camera.current.focus({
* x: tapEvent.x,
* y: tapEvent.y
* })
* ```
*/
public async focus(point: Point): Promise<void> {
try {
return await CameraModule.focus(this.handle, point)
} catch (e) {
throw tryParseNativeCameraError(e)
}
}
//#endregion
/**
* Focus the camera to a specific point in the coordinate system.
* @param {Point} point The point to focus to. This should be relative
* to the Camera view's coordinate system and is expressed in points.
* * `(0, 0)` means **top left**.
* * `(CameraView.width, CameraView.height)` means **bottom right**.
*
* Make sure the value doesn't exceed the CameraView's dimensions.
*
* @throws {@linkcode CameraRuntimeError} When any kind of error occured while focussing. Use the {@linkcode CameraRuntimeError.code | code} property to get the actual error
* @example
* ```ts
* await camera.current.focus({
* x: tapEvent.x,
* y: tapEvent.y
* })
* ```
*/
public async focus(point: Point): Promise<void> {
try {
return await CameraModule.focus(this.handle, point)
} catch (e) {
throw tryParseNativeCameraError(e)
}
}
//#endregion
//#region Static Functions (NativeModule)
/**
* Get a list of all available camera devices on the current phone.
*
* If you use Hooks, use the `useCameraDevices(..)` hook instead.
*
* * For Camera Devices attached to the phone, it is safe to assume that this will never change.
* * For external Camera Devices (USB cameras, Mac continuity cameras, etc.) the available Camera Devices could change over time when the external Camera device gets plugged in or plugged out, so use {@link addCameraDevicesChangedListener | addCameraDevicesChangedListener(...)} to listen for such changes.
*
* @example
* ```ts
* const devices = Camera.getAvailableCameraDevices()
* const backCameras = devices.filter((d) => d.position === "back")
* const frontCameras = devices.filter((d) => d.position === "front")
* ```
*/
public static getAvailableCameraDevices(): CameraDevice[] {
return CameraDevices.getAvailableCameraDevices()
}
/**
* Adds a listener that gets called everytime the Camera Devices change, for example
* when an external Camera Device (USB or continuity Camera) gets plugged in or plugged out.
*
* If you use Hooks, use the `useCameraDevices()` hook instead.
*/
public static addCameraDevicesChangedListener(listener: (newDevices: CameraDevice[]) => void): EmitterSubscription {
return CameraDevices.addCameraDevicesChangedListener(listener)
}
/**
* Gets the current Camera Permission Status. Check this before mounting the Camera to ensure
* the user has permitted the app to use the camera.
*
* To actually prompt the user for camera permission, use {@linkcode Camera.requestCameraPermission | requestCameraPermission()}.
*/
public static getCameraPermissionStatus(): CameraPermissionStatus {
return CameraModule.getCameraPermissionStatus()
}
/**
* Gets the current Microphone-Recording Permission Status. Check this before mounting the Camera to ensure
* the user has permitted the app to use the microphone.
*
* To actually prompt the user for microphone permission, use {@linkcode Camera.requestMicrophonePermission | requestMicrophonePermission()}.
*/
public static getMicrophonePermissionStatus(): CameraPermissionStatus {
return CameraModule.getMicrophonePermissionStatus()
}
/**
* Shows a "request permission" alert to the user, and resolves with the new camera permission status.
*
* If the user has previously blocked the app from using the camera, the alert will not be shown
* and `"denied"` will be returned.
*
* @throws {@linkcode CameraRuntimeError} When any kind of error occured while requesting permission. Use the {@linkcode CameraRuntimeError.code | code} property to get the actual error
*/
public static async requestCameraPermission(): Promise<CameraPermissionRequestResult> {
try {
return await CameraModule.requestCameraPermission()
} catch (e) {
throw tryParseNativeCameraError(e)
}
}
/**
* Shows a "request permission" alert to the user, and resolves with the new microphone permission status.
*
* If the user has previously blocked the app from using the microphone, the alert will not be shown
* and `"denied"` will be returned.
*
* @throws {@linkcode CameraRuntimeError} When any kind of error occured while requesting permission. Use the {@linkcode CameraRuntimeError.code | code} property to get the actual error
*/
public static async requestMicrophonePermission(): Promise<CameraPermissionRequestResult> {
try {
return await CameraModule.requestMicrophonePermission()
} catch (e) {
throw tryParseNativeCameraError(e)
}
}
//#endregion
//#region Static Functions (NativeModule)
/**
* Get a list of all available camera devices on the current phone.
*
* If you use Hooks, use the `useCameraDevices(..)` hook instead.
*
* * For Camera Devices attached to the phone, it is safe to assume that this will never change.
* * For external Camera Devices (USB cameras, Mac continuity cameras, etc.) the available Camera Devices could change over time when the external Camera device gets plugged in or plugged out, so use {@link addCameraDevicesChangedListener | addCameraDevicesChangedListener(...)} to listen for such changes.
*
* @example
* ```ts
* const devices = Camera.getAvailableCameraDevices()
* const backCameras = devices.filter((d) => d.position === "back")
* const frontCameras = devices.filter((d) => d.position === "front")
* ```
*/
public static getAvailableCameraDevices(): CameraDevice[] {
return CameraDevices.getAvailableCameraDevices()
}
/**
* Adds a listener that gets called everytime the Camera Devices change, for example
* when an external Camera Device (USB or continuity Camera) gets plugged in or plugged out.
*
* If you use Hooks, use the `useCameraDevices()` hook instead.
*/
public static addCameraDevicesChangedListener(listener: (newDevices: CameraDevice[]) => void): EmitterSubscription {
return CameraDevices.addCameraDevicesChangedListener(listener)
}
/**
* Gets the current Camera Permission Status. Check this before mounting the Camera to ensure
* the user has permitted the app to use the camera.
*
* To actually prompt the user for camera permission, use {@linkcode Camera.requestCameraPermission | requestCameraPermission()}.
*/
public static getCameraPermissionStatus(): CameraPermissionStatus {
return CameraModule.getCameraPermissionStatus()
}
/**
* Gets the current Microphone-Recording Permission Status. Check this before mounting the Camera to ensure
* the user has permitted the app to use the microphone.
*
* To actually prompt the user for microphone permission, use {@linkcode Camera.requestMicrophonePermission | requestMicrophonePermission()}.
*/
public static getMicrophonePermissionStatus(): CameraPermissionStatus {
return CameraModule.getMicrophonePermissionStatus()
}
/**
* Shows a "request permission" alert to the user, and resolves with the new camera permission status.
*
* If the user has previously blocked the app from using the camera, the alert will not be shown
* and `"denied"` will be returned.
*
* @throws {@linkcode CameraRuntimeError} When any kind of error occured while requesting permission. Use the {@linkcode CameraRuntimeError.code | code} property to get the actual error
*/
public static async requestCameraPermission(): Promise<CameraPermissionRequestResult> {
try {
return await CameraModule.requestCameraPermission()
} catch (e) {
throw tryParseNativeCameraError(e)
}
}
/**
* Shows a "request permission" alert to the user, and resolves with the new microphone permission status.
*
* If the user has previously blocked the app from using the microphone, the alert will not be shown
* and `"denied"` will be returned.
*
* @throws {@linkcode CameraRuntimeError} When any kind of error occured while requesting permission. Use the {@linkcode CameraRuntimeError.code | code} property to get the actual error
*/
public static async requestMicrophonePermission(): Promise<CameraPermissionRequestResult> {
try {
return await CameraModule.requestMicrophonePermission()
} catch (e) {
throw tryParseNativeCameraError(e)
}
}
//#endregion
//#region Events (Wrapped to maintain reference equality)
private onError(event: NativeSyntheticEvent<OnErrorEvent>): void {
const error = event.nativeEvent
const cause = isErrorWithCause(error.cause) ? error.cause : undefined
// @ts-expect-error We're casting from unknown bridge types to TS unions, I expect it to hopefully work
const cameraError = new CameraRuntimeError(error.code, error.message, cause)
//#region Events (Wrapped to maintain reference equality)
private onError(event: NativeSyntheticEvent<OnErrorEvent>): void {
const error = event.nativeEvent
const cause = isErrorWithCause(error.cause) ? error.cause : undefined
// @ts-expect-error We're casting from unknown bridge types to TS unions, I expect it to hopefully work
const cameraError = new CameraRuntimeError(error.code, error.message, cause)
if (this.props.onError != null) {
this.props.onError(cameraError)
} else {
// User didn't pass an `onError` handler, so just log it to console
console.error(`Camera.onError(${cameraError.code}): ${cameraError.message}`, cameraError)
}
}
if (this.props.onError != null) {
this.props.onError(cameraError)
} else {
// User didn't pass an `onError` handler, so just log it to console
console.error(`Camera.onError(${cameraError.code}): ${cameraError.message}`, cameraError)
}
}
private onInitialized(): void {
this.props.onInitialized?.()
}
private onInitialized(): void {
this.props.onInitialized?.()
}
private onStarted(): void {
this.props.onStarted?.()
}
private onStarted(): void {
this.props.onStarted?.()
}
private onStopped(): void {
this.props.onStopped?.()
}
//#endregion
private onStopped(): void {
this.props.onStopped?.()
}
//#endregion
private onCodeScanned(event: NativeSyntheticEvent<OnCodeScannedEvent>): void {
const codeScanner = this.props.codeScanner
if (codeScanner == null) return
private onCodeScanned(event: NativeSyntheticEvent<OnCodeScannedEvent>): void {
const codeScanner = this.props.codeScanner
if (codeScanner == null) return
codeScanner.onCodeScanned(event.nativeEvent.codes, event.nativeEvent.frame)
}
codeScanner.onCodeScanned(event.nativeEvent.codes, event.nativeEvent.frame)
}
//#region Lifecycle
private setFrameProcessor(frameProcessor: FrameProcessor): void {
VisionCameraProxy.setFrameProcessor(this.handle, frameProcessor)
}
//#region Lifecycle
private setFrameProcessor(frameProcessor: FrameProcessor): void {
VisionCameraProxy.setFrameProcessor(this.handle, frameProcessor)
}
private unsetFrameProcessor(): void {
VisionCameraProxy.removeFrameProcessor(this.handle)
}
private unsetFrameProcessor(): void {
VisionCameraProxy.removeFrameProcessor(this.handle)
}
private onViewReady(): void {
this.isNativeViewMounted = true
if (this.props.frameProcessor != null) {
// user passed a `frameProcessor` but we didn't set it yet because the native view was not mounted yet. set it now.
this.setFrameProcessor(this.props.frameProcessor)
this.lastFrameProcessor = this.props.frameProcessor
}
}
private onViewReady(): void {
this.isNativeViewMounted = true
if (this.props.frameProcessor != null) {
// user passed a `frameProcessor` but we didn't set it yet because the native view was not mounted yet. set it now.
this.setFrameProcessor(this.props.frameProcessor)
this.lastFrameProcessor = this.props.frameProcessor
}
}
/** @internal */
componentDidUpdate(): void {
if (!this.isNativeViewMounted) return
const frameProcessor = this.props.frameProcessor
if (frameProcessor !== this.lastFrameProcessor) {
// frameProcessor argument identity changed. Update native to reflect the change.
if (frameProcessor != null) this.setFrameProcessor(frameProcessor)
else this.unsetFrameProcessor()
/** @internal */
componentDidUpdate(): void {
if (!this.isNativeViewMounted) return
const frameProcessor = this.props.frameProcessor
if (frameProcessor !== this.lastFrameProcessor) {
// frameProcessor argument identity changed. Update native to reflect the change.
if (frameProcessor != null) this.setFrameProcessor(frameProcessor)
else this.unsetFrameProcessor()
this.lastFrameProcessor = frameProcessor
}
}
//#endregion
this.lastFrameProcessor = frameProcessor
}
}
//#endregion
/** @internal */
public render(): React.ReactNode {
// We remove the big `device` object from the props because we only need to pass `cameraId` to native.
const { device, frameProcessor, codeScanner, ...props } = this.props
/** @internal */
public render(): React.ReactNode {
// We remove the big `device` object from the props because we only need to pass `cameraId` to native.
const { device, frameProcessor, codeScanner, ...props } = this.props
// eslint-disable-next-line @typescript-eslint/no-unnecessary-condition
if (device == null) {
throw new Error(
'Camera: `device` is null! Select a valid Camera device. See: https://mrousavy.com/react-native-vision-camera/docs/guides/devices',
)
}
// eslint-disable-next-line @typescript-eslint/no-unnecessary-condition
if (device == null) {
throw new Error(
'Camera: `device` is null! Select a valid Camera device. See: https://mrousavy.com/react-native-vision-camera/docs/guides/devices',
)
}
const shouldEnableBufferCompression = props.video === true && frameProcessor == null
const pixelFormat = props.pixelFormat ?? (frameProcessor != null ? 'yuv' : 'native')
const torch = this.state.isRecordingWithFlash ? 'on' : props.torch
const shouldEnableBufferCompression = props.video === true && frameProcessor == null
const pixelFormat = props.pixelFormat ?? (frameProcessor != null ? 'yuv' : 'native')
const torch = this.state.isRecordingWithFlash ? 'on' : props.torch
return (
<NativeCameraView
{...props}
cameraId={device.id}
ref={this.ref}
torch={torch}
onViewReady={this.onViewReady}
onInitialized={this.onInitialized}
onCodeScanned={this.onCodeScanned}
onStarted={this.onStarted}
onStopped={this.onStopped}
onError={this.onError}
codeScannerOptions={codeScanner}
enableFrameProcessor={frameProcessor != null}
enableBufferCompression={props.enableBufferCompression ?? shouldEnableBufferCompression}
pixelFormat={pixelFormat}
/>
)
}
return (
<NativeCameraView
{...props}
cameraId={device.id}
ref={this.ref}
torch={torch}
onViewReady={this.onViewReady}
onInitialized={this.onInitialized}
onCodeScanned={this.onCodeScanned}
onStarted={this.onStarted}
onStopped={this.onStopped}
onError={this.onError}
codeScannerOptions={codeScanner}
enableFrameProcessor={frameProcessor != null}
enableBufferCompression={props.enableBufferCompression ?? shouldEnableBufferCompression}
pixelFormat={pixelFormat}
/>
)
}
}
//#endregion
// requireNativeComponent automatically resolves 'CameraView' to 'CameraViewManager'
const NativeCameraView = requireNativeComponent<NativeCameraViewProps>(
'CameraView',
// @ts-expect-error because the type declarations are kinda wrong, no?
Camera,
'CameraView',
// @ts-expect-error because the type declarations are kinda wrong, no?
Camera,
)