11 Commits

Author SHA1 Message Date
Loewy
1f035ce557 replace example code with are minified camera screen/record button 2024-01-31 20:48:31 -08:00
fb42545890 some camera orientation stuff, maybe toss? 2024-01-31 16:50:36 -07:00
e82f068b21 Add isLandscape to orientation 2024-01-31 16:50:36 -07:00
08f37070a4 Simplify ChunkedRecorder 2024-01-31 16:50:36 -07:00
d95057fa47 Initial chunked recording implementation 2024-01-31 16:50:34 -07:00
999e789eee Get flake working 2024-01-31 16:44:01 -07:00
Marc Rousavy
5f339c60c6 chore: Update Podfile.lock 2024-01-31 21:09:42 +01:00
Marc Rousavy
e399df5d1b chore: Fix CI cache directory (#2492) 2024-01-31 20:48:10 +01:00
Marc Rousavy
ea568855a7 chore: Fix GitHub Action CocoaPods cache misses (#2491)
* chore: Fix GitHub Action CocoaPods cache misses

* Only use Pods directory
2024-01-31 20:48:05 +01:00
Marc Rousavy
ae75e22fc0 fix: Fix crash in toArrayBuffer() by properly acquiring a reference on AHardwareBuffer* (#2490)
* fix: Fix crash in `toArrayBuffer()` by properly acquiring a reference on `AHardwareBuffer*`

* Format

* Update Podfile.lock
2024-01-31 20:32:02 +01:00
Marc Rousavy
f896831d4a fix: Properly check HardwareBuffer usage flags before setting them (#2488)
* fix: Properly check `HardwareBuffer` usage flags before setting them

* fix: Use GPU flag if pixel format is NATIVE

* Update VideoPipeline.kt

* Add some logs

* fix: Properly convert ImageFormat to HardwareBufferFormat

* Update Podfile.lock

* fix: Add a safe `getHardwareBufferFormat` method

* Format
2024-01-31 20:31:56 +01:00
34 changed files with 924 additions and 1486 deletions

View File

@@ -39,7 +39,7 @@ jobs:
- name: Get yarn cache directory path
id: yarn-cache-dir-path
run: echo "::set-output name=dir::$(yarn cache dir)"
run: echo "dir=$(yarn cache dir)" >> $GITHUB_OUTPUT
- name: Restore node_modules from cache
uses: actions/cache@v4
id: yarn-cache
@@ -83,7 +83,7 @@ jobs:
- name: Get yarn cache directory path
id: yarn-cache-dir-path
run: echo "::set-output name=dir::$(yarn cache dir)"
run: echo "dir=$(yarn cache dir)" >> $GITHUB_OUTPUT
- name: Restore node_modules from cache
uses: actions/cache@v4
id: yarn-cache

View File

@@ -30,7 +30,7 @@ jobs:
- name: Get yarn cache directory path
id: yarn-cache-dir-path
run: echo "::set-output name=dir::$(yarn cache dir)"
run: echo "dir=$(yarn cache dir)" >> $GITHUB_OUTPUT
- name: Restore node_modules from cache
uses: actions/cache@v4
id: yarn-cache
@@ -54,12 +54,9 @@ jobs:
working-directory: package/example/ios
- name: Restore Pods cache
uses: actions/cache@v4
uses: actions/cache@v3
with:
path: |
package/example/ios/Pods
~/Library/Caches/CocoaPods
~/.cocoapods
path: package/example/ios/Pods
key: ${{ runner.os }}-pods-${{ hashFiles('**/Podfile.lock') }}
restore-keys: |
${{ runner.os }}-pods-
@@ -90,7 +87,7 @@ jobs:
- name: Get yarn cache directory path
id: yarn-cache-dir-path
run: echo "::set-output name=dir::$(yarn cache dir)"
run: echo "dir=$(yarn cache dir)" >> $GITHUB_OUTPUT
- name: Restore node_modules from cache
uses: actions/cache@v4
id: yarn-cache
@@ -116,12 +113,9 @@ jobs:
working-directory: package/example/ios
- name: Restore Pods cache
uses: actions/cache@v4
uses: actions/cache@v3
with:
path: |
package/example/ios/Pods
~/Library/Caches/CocoaPods
~/.cocoapods
path: package/example/ios/Pods
key: ${{ runner.os }}-pods-${{ hashFiles('**/Podfile.lock') }}
restore-keys: |
${{ runner.os }}-pods-

View File

@@ -36,39 +36,39 @@ jobs:
run:
working-directory: ./package
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v4
- name: Install reviewdog
uses: reviewdog/action-setup@v1
- name: Install reviewdog
uses: reviewdog/action-setup@v1
- name: Get yarn cache directory path
id: yarn-cache-dir-path
run: echo "::set-output name=dir::$(yarn cache dir)"
- name: Restore node_modules from cache
uses: actions/cache@v4
id: yarn-cache
with:
path: ${{ steps.yarn-cache-dir-path.outputs.dir }}
key: ${{ runner.os }}-yarn-${{ hashFiles('**/yarn.lock') }}
restore-keys: |
${{ runner.os }}-yarn-
- name: Get yarn cache directory path
id: yarn-cache-dir-path
run: echo "dir=$(yarn cache dir)" >> $GITHUB_OUTPUT
- name: Restore node_modules from cache
uses: actions/cache@v4
id: yarn-cache
with:
path: ${{ steps.yarn-cache-dir-path.outputs.dir }}
key: ${{ runner.os }}-yarn-${{ hashFiles('**/yarn.lock') }}
restore-keys: |
${{ runner.os }}-yarn-
- name: Install node_modules
run: yarn install --frozen-lockfile
- name: Install node_modules (example/)
run: yarn install --frozen-lockfile --cwd example
- name: Install node_modules
run: yarn install --frozen-lockfile
- name: Install node_modules (example/)
run: yarn install --frozen-lockfile --cwd example
- name: Run TypeScript # Reviewdog tsc errorformat: %f:%l:%c - error TS%n: %m
run: |
yarn typescript | reviewdog -name="tsc" -efm="%f(%l,%c): error TS%n: %m" -reporter="github-pr-review" -filter-mode="nofilter" -fail-on-error -tee
env:
REVIEWDOG_GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Run TypeScript # Reviewdog tsc errorformat: %f:%l:%c - error TS%n: %m
run: |
yarn typescript | reviewdog -name="tsc" -efm="%f(%l,%c): error TS%n: %m" -reporter="github-pr-review" -filter-mode="nofilter" -fail-on-error -tee
env:
REVIEWDOG_GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Run TypeScript in example/ # Reviewdog tsc errorformat: %f:%l:%c - error TS%n: %m
run: |
cd example && yarn typescript | reviewdog -name="tsc" -efm="%f(%l,%c): error TS%n: %m" -reporter="github-pr-review" -filter-mode="nofilter" -fail-on-error -tee && cd ..
env:
REVIEWDOG_GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Run TypeScript in example/ # Reviewdog tsc errorformat: %f:%l:%c - error TS%n: %m
run: |
cd example && yarn typescript | reviewdog -name="tsc" -efm="%f(%l,%c): error TS%n: %m" -reporter="github-pr-review" -filter-mode="nofilter" -fail-on-error -tee && cd ..
env:
REVIEWDOG_GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }}
lint:
name: Lint JS (eslint, prettier)
@@ -77,36 +77,36 @@ jobs:
run:
working-directory: ./package
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v4
- name: Get yarn cache directory path
id: yarn-cache-dir-path
run: echo "::set-output name=dir::$(yarn cache dir)"
- name: Restore node_modules from cache
uses: actions/cache@v4
id: yarn-cache
with:
path: ${{ steps.yarn-cache-dir-path.outputs.dir }}
key: ${{ runner.os }}-yarn-${{ hashFiles('**/yarn.lock') }}
restore-keys: |
${{ runner.os }}-yarn-
- name: Get yarn cache directory path
id: yarn-cache-dir-path
run: echo "dir=$(yarn cache dir)" >> $GITHUB_OUTPUT
- name: Restore node_modules from cache
uses: actions/cache@v4
id: yarn-cache
with:
path: ${{ steps.yarn-cache-dir-path.outputs.dir }}
key: ${{ runner.os }}-yarn-${{ hashFiles('**/yarn.lock') }}
restore-keys: |
${{ runner.os }}-yarn-
- name: Install node_modules
run: yarn install --frozen-lockfile
- name: Install node_modules (example/)
run: yarn install --frozen-lockfile --cwd example
- name: Install node_modules
run: yarn install --frozen-lockfile
- name: Install node_modules (example/)
run: yarn install --frozen-lockfile --cwd example
- name: Run ESLint
run: yarn lint -f @jamesacarr/github-actions
- name: Run ESLint
run: yarn lint -f @jamesacarr/github-actions
- name: Run ESLint with auto-fix
run: yarn lint --fix
- name: Run ESLint with auto-fix
run: yarn lint --fix
- name: Run ESLint in example/
run: cd example && yarn lint -f @jamesacarr/github-actions && cd ..
- name: Run ESLint in example/
run: cd example && yarn lint -f @jamesacarr/github-actions && cd ..
- name: Run ESLint in example/ with auto-fix
run: cd example && yarn lint --fix && cd ..
- name: Run ESLint in example/ with auto-fix
run: cd example && yarn lint --fix && cd ..
- name: Verify no files have changed after auto-fix
run: git diff --exit-code HEAD
- name: Verify no files have changed after auto-fix
run: git diff --exit-code HEAD

5
package/.envrc Normal file
View File

@@ -0,0 +1,5 @@
use flake . --impure
if [ -f .envrc.local ]; then
source .envrc.local
fi

3
package/.gitignore vendored
View File

@@ -67,3 +67,6 @@ package-lock.json
.cxx/
example/ios/vendor
#.direnv
.direnv

View File

@@ -15,6 +15,8 @@
#include <android/hardware_buffer.h>
#include <android/hardware_buffer_jni.h>
#include "FinalAction.h"
namespace vision {
using namespace facebook;
@@ -92,11 +94,13 @@ jsi::Value FrameHostObject::get(jsi::Runtime& runtime, const jsi::PropNameID& pr
jsi::HostFunctionType toArrayBuffer = JSI_FUNC {
#if __ANDROID_API__ >= 26
AHardwareBuffer* hardwareBuffer = this->frame->getHardwareBuffer();
AHardwareBuffer_acquire(hardwareBuffer);
finally([&]() { AHardwareBuffer_release(hardwareBuffer); });
AHardwareBuffer_Desc bufferDescription;
AHardwareBuffer_describe(hardwareBuffer, &bufferDescription);
__android_log_print(ANDROID_LOG_INFO, "Frame", "Buffer %i x %i @ %i", bufferDescription.width, bufferDescription.height,
bufferDescription.stride);
__android_log_print(ANDROID_LOG_INFO, "Frame", "Converting %i x %i @ %i HardwareBuffer...", bufferDescription.width,
bufferDescription.height, bufferDescription.stride);
size_t size = bufferDescription.height * bufferDescription.stride;
static constexpr auto ARRAYBUFFER_CACHE_PROP_NAME = "__frameArrayBufferCache";
@@ -118,16 +122,21 @@ jsi::Value FrameHostObject::get(jsi::Runtime& runtime, const jsi::PropNameID& pr
// Get CPU access to the HardwareBuffer (&buffer is a virtual temporary address)
void* buffer;
AHardwareBuffer_lock(hardwareBuffer, AHARDWAREBUFFER_USAGE_CPU_READ_MASK, -1, nullptr, &buffer);
int result = AHardwareBuffer_lock(hardwareBuffer, AHARDWAREBUFFER_USAGE_CPU_READ_MASK, -1, nullptr, &buffer);
if (result != 0) {
throw jsi::JSError(runtime, "Failed to lock HardwareBuffer for reading!");
}
finally([&]() {
int result = AHardwareBuffer_unlock(hardwareBuffer, nullptr);
if (result != 0) {
throw jsi::JSError(runtime, "Failed to lock HardwareBuffer for reading!");
}
});
// directly write to C++ JSI ArrayBuffer
auto destinationBuffer = arrayBuffer.data(runtime);
memcpy(destinationBuffer, buffer, sizeof(uint8_t) * size);
// Release HardwareBuffer again
AHardwareBuffer_unlock(hardwareBuffer, nullptr);
AHardwareBuffer_release(hardwareBuffer);
return arrayBuffer;
#else
throw jsi::JSError(runtime, "Frame.toArrayBuffer() is only available if minSdkVersion is set to 26 or higher!");

View File

@@ -0,0 +1,166 @@
package com.mrousavy.camera.core
import android.media.MediaCodec
import android.media.MediaCodec.BufferInfo
import android.media.MediaCodecInfo
import android.media.MediaFormat
import android.media.MediaMuxer
import android.util.Log
import android.util.Size
import android.view.Surface
import com.mrousavy.camera.types.Orientation
import com.mrousavy.camera.types.RecordVideoOptions
import java.io.File
import java.nio.ByteBuffer
class ChunkedRecordingManager(private val encoder: MediaCodec, private val outputDirectory: File, private val orientationHint: Int, private val iFrameInterval: Int) :
MediaCodec.Callback() {
companion object {
private const val TAG = "ChunkedRecorder"
fun fromParams(
size: Size,
enableAudio: Boolean,
fps: Int? = null,
cameraOrientation: Orientation,
bitRate: Int,
options: RecordVideoOptions,
outputDirectory: File,
iFrameInterval: Int = 3
): ChunkedRecordingManager {
val mimeType = options.videoCodec.toMimeType()
val orientationDegrees = cameraOrientation.toDegrees()
val (width, height) = if (cameraOrientation.isLandscape()) {
size.height to size.width
} else {
size.width to size.height
}
val format = MediaFormat.createVideoFormat(mimeType, width, height)
val codec = MediaCodec.createEncoderByType(mimeType)
// Set some properties. Failing to specify some of these can cause the MediaCodec
// configure() call to throw an unhelpful exception.
format.setInteger(
MediaFormat.KEY_COLOR_FORMAT,
MediaCodecInfo.CodecCapabilities.COLOR_FormatSurface
)
fps?.apply {
format.setInteger(MediaFormat.KEY_FRAME_RATE, this)
}
// TODO: Pull this out into configuration
format.setInteger(MediaFormat.KEY_I_FRAME_INTERVAL, iFrameInterval)
format.setInteger(MediaFormat.KEY_BIT_RATE, bitRate)
Log.i(TAG, "Video Format: $format, orientation $cameraOrientation")
// Create a MediaCodec encoder, and configure it with our format. Get a Surface
// we can use for input and wrap it with a class that handles the EGL work.
codec.configure(format, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE)
return ChunkedRecordingManager(codec, outputDirectory, 0, iFrameInterval)
}
}
// In flight details
private var currentFrameNumber: Int = 0
private var chunkIndex = -1
private var encodedFormat: MediaFormat? = null
private var recording = false;
private val targetDurationUs = iFrameInterval * 1000000
val surface: Surface = encoder.createInputSurface()
init {
if (!this.outputDirectory.exists()) {
this.outputDirectory.mkdirs()
}
encoder.setCallback(this)
}
// Muxer specific
private class MuxerContext(val muxer: MediaMuxer, startTimeUs: Long, encodedFormat: MediaFormat) {
val videoTrack: Int = muxer.addTrack(encodedFormat)
val startTimeUs: Long = startTimeUs
init {
muxer.start()
}
fun finish() {
muxer.stop()
muxer.release()
}
}
private var muxerContext: MuxerContext? = null
private fun createNextMuxer(bufferInfo: BufferInfo) {
muxerContext?.finish()
chunkIndex++
val newFileName = "$chunkIndex.mp4"
val newOutputFile = File(this.outputDirectory, newFileName)
Log.i(TAG, "Creating new muxer for file: $newFileName")
val muxer = MediaMuxer(
newOutputFile.absolutePath,
MediaMuxer.OutputFormat.MUXER_OUTPUT_MPEG_4
)
muxer.setOrientationHint(orientationHint)
muxerContext = MuxerContext(
muxer, bufferInfo.presentationTimeUs, this.encodedFormat!!
)
}
private fun atKeyframe(bufferInfo: BufferInfo): Boolean {
return (bufferInfo.flags and MediaCodec.BUFFER_FLAG_KEY_FRAME) != 0
}
private fun chunkLengthUs(bufferInfo: BufferInfo): Long {
return bufferInfo.presentationTimeUs - muxerContext!!.startTimeUs
}
fun start() {
encoder.start()
recording = true
}
fun finish() {
synchronized(this) {
muxerContext?.finish()
recording = false
muxerContext = null
encoder.stop()
}
}
// MediaCodec.Callback methods
override fun onInputBufferAvailable(codec: MediaCodec, index: Int) {
}
override fun onOutputBufferAvailable(codec: MediaCodec, index: Int, bufferInfo: MediaCodec.BufferInfo) {
synchronized(this) {
if (!recording) {
return
}
val encodedData: ByteBuffer = encoder.getOutputBuffer(index)
?: throw RuntimeException("getOutputBuffer was null")
if (muxerContext == null || (atKeyframe(bufferInfo) && chunkLengthUs(bufferInfo) >= targetDurationUs)) {
this.createNextMuxer(bufferInfo)
}
muxerContext!!.muxer.writeSampleData(muxerContext!!.videoTrack, encodedData, bufferInfo)
encoder.releaseOutputBuffer(index, false)
}
}
override fun onError(codec: MediaCodec, e: MediaCodec.CodecException) {
// Implement error handling
Log.e(TAG, "Codec error: ${e.message}")
}
override fun onOutputFormatChanged(codec: MediaCodec, format: MediaFormat) {
encodedFormat = format
}
}

View File

@@ -1,9 +1,6 @@
package com.mrousavy.camera.core
import android.content.Context
import android.media.MediaCodec
import android.media.MediaRecorder
import android.os.Build
import android.util.Log
import android.util.Size
import android.view.Surface
@@ -13,7 +10,10 @@ import com.mrousavy.camera.types.Orientation
import com.mrousavy.camera.types.RecordVideoOptions
import com.mrousavy.camera.utils.FileUtils
import java.io.File
import android.os.Environment
import java.text.SimpleDateFormat
import java.util.Locale
import java.util.Date
class RecordingSession(
context: Context,
val cameraId: String,
@@ -21,7 +21,7 @@ class RecordingSession(
private val enableAudio: Boolean,
private val fps: Int? = null,
private val hdr: Boolean = false,
private val orientation: Orientation,
private val cameraOrientation: Orientation,
private val options: RecordVideoOptions,
private val callback: (video: Video) -> Unit,
private val onError: (error: CameraError) -> Unit
@@ -36,69 +36,34 @@ class RecordingSession(
data class Video(val path: String, val durationMs: Long, val size: Size)
private val bitRate = getBitRate()
private val recorder: MediaRecorder
private val outputFile: File
private var startTime: Long? = null
val surface: Surface = MediaCodec.createPersistentInputSurface()
// TODO: Implement HDR
init {
outputFile = FileUtils.createTempFile(context, options.fileType.toExtension())
Log.i(TAG, "Creating RecordingSession for ${outputFile.absolutePath}")
recorder = if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.S) MediaRecorder(context) else MediaRecorder()
if (enableAudio) recorder.setAudioSource(MediaRecorder.AudioSource.CAMCORDER)
recorder.setVideoSource(MediaRecorder.VideoSource.SURFACE)
recorder.setOutputFormat(MediaRecorder.OutputFormat.MPEG_4)
recorder.setOutputFile(outputFile.absolutePath)
recorder.setVideoEncodingBitRate(bitRate)
recorder.setVideoSize(size.height, size.width)
recorder.setMaxFileSize(getMaxFileSize())
if (fps != null) recorder.setVideoFrameRate(fps)
Log.i(TAG, "Using ${options.videoCodec} Video Codec at ${bitRate / 1_000_000.0} Mbps..")
recorder.setVideoEncoder(options.videoCodec.toVideoEncoder())
if (enableAudio) {
Log.i(TAG, "Adding Audio Channel..")
recorder.setAudioEncoder(MediaRecorder.AudioEncoder.AAC)
recorder.setAudioEncodingBitRate(AUDIO_BIT_RATE)
recorder.setAudioSamplingRate(AUDIO_SAMPLING_RATE)
recorder.setAudioChannels(AUDIO_CHANNELS)
}
recorder.setInputSurface(surface)
// recorder.setOrientationHint(orientation.toDegrees())
recorder.setOnErrorListener { _, what, extra ->
Log.e(TAG, "MediaRecorder Error: $what ($extra)")
stop()
val name = when (what) {
MediaRecorder.MEDIA_RECORDER_ERROR_UNKNOWN -> "unknown"
MediaRecorder.MEDIA_ERROR_SERVER_DIED -> "server-died"
else -> "unknown"
}
onError(RecorderError(name, extra))
}
recorder.setOnInfoListener { _, what, extra ->
Log.i(TAG, "MediaRecorder Info: $what ($extra)")
if (what == MediaRecorder.MEDIA_RECORDER_INFO_MAX_FILESIZE_REACHED) {
onError(InsufficientStorageError())
}
}
Log.i(TAG, "Created $this!")
private val outputPath = run {
val videoDir = Environment.getExternalStoragePublicDirectory(Environment.DIRECTORY_MOVIES)
val sdf = SimpleDateFormat("yyyy_MM_dd_HH_mm_ss_SSS", Locale.US)
val videoFileName = "VID_${sdf.format(Date())}"
File(videoDir!!, videoFileName)
}
private val bitRate = getBitRate()
private val recorder = ChunkedRecordingManager.fromParams(
size,
enableAudio,
fps,
cameraOrientation,
bitRate,
options,
outputPath
)
private var startTime: Long? = null
val surface: Surface
get() {
return recorder.surface
}
fun start() {
synchronized(this) {
Log.i(TAG, "Starting RecordingSession..")
recorder.prepare()
recorder.start()
startTime = System.currentTimeMillis()
recorder.start()
}
}
@@ -106,29 +71,29 @@ class RecordingSession(
synchronized(this) {
Log.i(TAG, "Stopping RecordingSession..")
try {
recorder.stop()
recorder.release()
recorder.finish()
} catch (e: Error) {
Log.e(TAG, "Failed to stop MediaRecorder!", e)
}
val stopTime = System.currentTimeMillis()
val durationMs = stopTime - (startTime ?: stopTime)
callback(Video(outputFile.absolutePath, durationMs, size))
Log.i(TAG, "Finished recording video at $outputPath")
callback(Video(outputPath.absolutePath, durationMs, size))
}
}
fun pause() {
synchronized(this) {
Log.i(TAG, "Pausing Recording Session..")
recorder.pause()
// TODO: Implement pausing
}
}
fun resume() {
synchronized(this) {
Log.i(TAG, "Resuming Recording Session..")
recorder.resume()
// TODO: Implement pausing
}
}
@@ -159,6 +124,9 @@ class RecordingSession(
override fun toString(): String {
val audio = if (enableAudio) "with audio" else "without audio"
return "${size.width} x ${size.height} @ $fps FPS ${options.videoCodec} ${options.fileType} " +
"$orientation ${bitRate / 1_000_000.0} Mbps RecordingSession ($audio)"
"$cameraOrientation ${bitRate / 1_000_000.0} Mbps RecordingSession ($audio)"
}
fun onFrame() {
}
}

View File

@@ -9,6 +9,7 @@ import android.os.Build
import android.util.Log
import android.view.Surface
import androidx.annotation.Keep
import androidx.annotation.RequiresApi
import com.facebook.jni.HybridData
import com.facebook.proguard.annotations.DoNotStrip
import com.mrousavy.camera.frameprocessor.Frame
@@ -31,7 +32,7 @@ class VideoPipeline(
val height: Int,
val format: PixelFormat = PixelFormat.NATIVE,
private val isMirrored: Boolean = false,
enableFrameProcessor: Boolean = false,
private val enableFrameProcessor: Boolean = false,
private val callback: CameraSession.Callback
) : SurfaceTexture.OnFrameAvailableListener,
Closeable {
@@ -80,10 +81,9 @@ class VideoPipeline(
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.Q) {
Log.i(TAG, "Using API 29 for GPU ImageReader...")
// If we are in PRIVATE, we just pass it to the GPU as efficiently as possible - so use GPU flag.
// If we are in YUV/RGB/..., we probably want to access Frame data - so use CPU flag.
val usage = if (format == ImageFormat.PRIVATE) HardwareBuffer.USAGE_GPU_SAMPLED_IMAGE else HardwareBuffer.USAGE_CPU_READ_OFTEN
imageReader = ImageReader.newInstance(width, height, format, MAX_IMAGES, usage)
val usageFlags = getRecommendedHardwareBufferFlags()
Log.i(TAG, "Using ImageReader flags: $usageFlags")
imageReader = ImageReader.newInstance(width, height, format, MAX_IMAGES, usageFlags)
imageWriter = ImageWriter.newInstance(glSurface, MAX_IMAGES, format)
} else {
Log.i(TAG, "Using legacy API for CPU ImageReader...")
@@ -91,7 +91,7 @@ class VideoPipeline(
imageWriter = ImageWriter.newInstance(glSurface, MAX_IMAGES)
}
imageReader!!.setOnImageAvailableListener({ reader ->
Log.i(TAG, "ImageReader::onImageAvailable!")
// Log.i(TAG, "ImageReader::onImageAvailable!")s
val image = reader.acquireNextImage() ?: return@setOnImageAvailableListener
// TODO: Get correct orientation and isMirrored
@@ -103,7 +103,7 @@ class VideoPipeline(
if (hasOutputs) {
// If we have outputs (e.g. a RecordingSession), pass the frame along to the OpenGL pipeline
imageWriter!!.queueInputImage(image)
imageWriter?.queueInputImage(image)
}
} catch (e: Throwable) {
Log.e(TAG, "FrameProcessor/ImageReader pipeline threw an error!", e)
@@ -152,6 +152,9 @@ class VideoPipeline(
// 5. Draw it with applied rotation/mirroring
onFrame(transformMatrix)
// 6. Notify the recording session.
recordingSession?.onFrame()
}
}
@@ -182,6 +185,52 @@ class VideoPipeline(
}
}
/**
* Get the recommended HardwareBuffer flags for creating ImageReader instances with.
*
* Tries to use [HardwareBuffer.USAGE_GPU_SAMPLED_IMAGE] if possible, [HardwareBuffer.USAGE_CPU_READ_OFTEN]
* or a combination of both flags if CPU access is needed ([enableFrameProcessor]), and [0] otherwise.
*/
@RequiresApi(Build.VERSION_CODES.Q)
@Suppress("LiftReturnOrAssignment")
private fun getRecommendedHardwareBufferFlags(): Long {
val cpuFlag = HardwareBuffer.USAGE_CPU_READ_OFTEN
val gpuFlag = HardwareBuffer.USAGE_GPU_SAMPLED_IMAGE
val bothFlags = gpuFlag or cpuFlag
if (format == PixelFormat.NATIVE) {
// We don't need CPU access, so we can use GPU optimized buffers
if (supportsHardwareBufferFlags(gpuFlag)) {
// We support GPU Buffers directly and
Log.i(TAG, "GPU HardwareBuffers are supported!")
return gpuFlag
} else {
// no flags are supported - fall back to default
return 0
}
} else {
// We are using YUV or RGB formats, so we need CPU access on the Frame
if (supportsHardwareBufferFlags(bothFlags)) {
// We support both CPU and GPU flags!
Log.i(TAG, "GPU + CPU HardwareBuffers are supported!")
return bothFlags
} else if (supportsHardwareBufferFlags(cpuFlag)) {
// We only support a CPU read flag, that's fine
Log.i(TAG, "CPU HardwareBuffers are supported!")
return cpuFlag
} else {
// no flags are supported - fall back to default
return 0
}
}
}
@RequiresApi(Build.VERSION_CODES.Q)
private fun supportsHardwareBufferFlags(flags: Long): Boolean {
val hardwareBufferFormat = format.toHardwareBufferFormat()
return HardwareBuffer.isSupported(width, height, hardwareBufferFormat, 1, flags)
}
private external fun getInputTextureId(): Int
private external fun onBeforeFrame()
private external fun onFrame(transformMatrix: FloatArray)

View File

@@ -8,6 +8,14 @@ enum class Orientation(override val unionValue: String) : JSUnionValue {
PORTRAIT_UPSIDE_DOWN("portrait-upside-down"),
LANDSCAPE_LEFT("landscape-left");
fun isLandscape(): Boolean =
when (this) {
PORTRAIT -> false
PORTRAIT_UPSIDE_DOWN -> false
LANDSCAPE_LEFT -> true
LANDSCAPE_RIGHT -> true
}
fun toDegrees(): Int =
when (this) {
PORTRAIT -> 0

View File

@@ -4,6 +4,7 @@ import android.graphics.ImageFormat
import android.util.Log
import com.mrousavy.camera.core.InvalidTypeScriptUnionError
import com.mrousavy.camera.core.PixelFormatNotSupportedError
import com.mrousavy.camera.utils.HardwareBufferUtils
import com.mrousavy.camera.utils.ImageFormatUtils
enum class PixelFormat(override val unionValue: String) : JSUnionValue {
@@ -19,6 +20,11 @@ enum class PixelFormat(override val unionValue: String) : JSUnionValue {
else -> throw PixelFormatNotSupportedError(this.unionValue)
}
fun toHardwareBufferFormat(): Int {
val imageFormat = toImageFormat()
return HardwareBufferUtils.getHardwareBufferFormat(imageFormat)
}
companion object : JSUnionValue.Companion<PixelFormat> {
private const val TAG = "PixelFormat"
fun fromImageFormat(imageFormat: Int): PixelFormat =

View File

@@ -1,5 +1,6 @@
package com.mrousavy.camera.types
import android.media.MediaFormat
import android.media.MediaRecorder
enum class VideoCodec(override val unionValue: String) : JSUnionValue {
@@ -12,6 +13,12 @@ enum class VideoCodec(override val unionValue: String) : JSUnionValue {
H265 -> MediaRecorder.VideoEncoder.HEVC
}
fun toMimeType(): String =
when (this) {
H264 -> MediaFormat.MIMETYPE_VIDEO_AVC
H265 -> MediaFormat.MIMETYPE_VIDEO_HEVC
}
companion object : JSUnionValue.Companion<VideoCodec> {
override fun fromUnionValue(unionValue: String?): VideoCodec =
when (unionValue) {

View File

@@ -0,0 +1,36 @@
package com.mrousavy.camera.utils
import android.graphics.ImageFormat
import android.hardware.HardwareBuffer
import android.media.ImageReader
import android.os.Build
class HardwareBufferUtils {
companion object {
fun getHardwareBufferFormat(imageFormat: Int): Int {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.TIRAMISU) {
// Dynamically create an ImageReader with the target ImageFormat, and then
// get it's HardwareBuffer format to see what it uses underneath.
val imageReader = ImageReader.newInstance(1, 1, imageFormat, 1, HardwareBuffer.USAGE_GPU_SAMPLED_IMAGE)
val format = imageReader.hardwareBufferFormat
imageReader.close()
return format
}
if (imageFormat == ImageFormat.PRIVATE) {
// PRIVATE formats are opaque, their actual equivalent HardwareBuffer format is unknown.
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.R) {
// We can assume that YUV 4:2:0 or RGB is used.
return HardwareBuffer.YCBCR_420_888
} else {
// Maybe assume we are on RGB if we're not on API R or above...
return HardwareBuffer.RGB_888
}
}
// According to PublicFormat.cpp in Android's codebase, the formats map 1:1 anyways..
// https://cs.android.com/android/platform/superproject/main/+/main:frameworks/native/libs/ui/PublicFormat.cpp
return imageFormat
}
}
}

34
package/cpp/FinalAction.h Normal file
View File

@@ -0,0 +1,34 @@
//
// MutableRawBuffer.h
// VisionCamera
//
// Created by Marc Rousavy on 17.01.24.
// Copyright © 2024 mrousavy. All rights reserved.
//
#pragma once
namespace vision {
template <typename F> struct FinalAction {
public:
FinalAction(F f) : clean_{f} {}
~FinalAction() {
if (enabled_)
clean_();
}
void disable() {
enabled_ = false;
};
private:
F clean_;
bool enabled_ = true;
};
} // namespace vision
template <typename F> vision::FinalAction<F> finally(F f) {
return vision::FinalAction<F>(std::move(f));
}

View File

@@ -10,7 +10,7 @@
# Specifies the JVM arguments used for the daemon process.
# The setting is particularly useful for tweaking memory settings.
# Default value: -Xmx10248m -XX:MaxPermSize=256m
org.gradle.jvmargs=-Xms512M -Xmx4g -XX:MaxPermSize=1024m -XX:MaxMetaspaceSize=1g -Dkotlin.daemon.jvm.options="-Xmx1g"
org.gradle.jvmargs=-Xms512M -Xmx4g -XX:MaxMetaspaceSize=1g -Dkotlin.daemon.jvm.options="-Xmx1g"
org.gradle.parallel=true
org.gradle.daemon=true
org.gradle.configureondemand=true
@@ -43,3 +43,5 @@ hermesEnabled=true
#VisionCamera_disableFrameProcessors=true
# Can be set to true to include the full 2.4 MB MLKit dependency
VisionCamera_enableCodeScanner=true
android.aapt2FromMavenOverride=/nix/store/6nrdbhdcmrig3vr80sc7qf9lna5cs1gb-android-sdk-env/share/android-sdk/build-tools/33.0.0/aapt2

View File

@@ -1,4 +1,4 @@
{
"name": "VisionCameraExample",
"displayName": "VisionCamera Example"
"displayName": "Railbird VisionCamera"
}

View File

@@ -27,9 +27,9 @@ PODS:
- libwebp/sharpyuv (1.3.2)
- libwebp/webp (1.3.2):
- libwebp/sharpyuv
- MMKV (1.3.2):
- MMKVCore (~> 1.3.2)
- MMKVCore (1.3.2)
- MMKV (1.3.3):
- MMKVCore (~> 1.3.3)
- MMKVCore (1.3.3)
- RCT-Folly (2021.07.22.00):
- boost
- DoubleConversion
@@ -337,7 +337,7 @@ PODS:
- react-native-mmkv (2.11.0):
- MMKV (>= 1.2.13)
- React-Core
- react-native-safe-area-context (4.8.0):
- react-native-safe-area-context (4.8.2):
- React-Core
- react-native-video (5.2.1):
- React-Core
@@ -675,8 +675,8 @@ SPEC CHECKSUMS:
hermes-engine: 9180d43df05c1ed658a87cc733dc3044cf90c00a
libevent: 4049cae6c81cdb3654a443be001fb9bdceff7913
libwebp: 1786c9f4ff8a279e4dac1e8f385004d5fc253009
MMKV: f21593c0af4b3f2a0ceb8f820f28bb639ea22bb7
MMKVCore: 31b4cb83f8266467eef20a35b6d78e409a11060d
MMKV: f902fb6719da13c2ab0965233d8963a59416f911
MMKVCore: d26e4d3edd5cb8588c2569222cbd8be4231374e9
RCT-Folly: 424b8c9a7a0b9ab2886ffe9c3b041ef628fd4fb1
RCTRequired: 83bca1c184feb4d2e51c72c8369b83d641443f95
RCTTypeSafety: 13c4a87a16d7db6cd66006ce9759f073402ef85b
@@ -695,7 +695,7 @@ SPEC CHECKSUMS:
react-native-blur: cfdad7b3c01d725ab62a8a729f42ea463998afa2
react-native-cameraroll: 4701ae7c3dbcd3f5e9e150ca17f250a276154b35
react-native-mmkv: e97c0c79403fb94577e5d902ab1ebd42b0715b43
react-native-safe-area-context: d1c8161a1e9560f7066e8926a7d825eb57c5dab5
react-native-safe-area-context: 0ee144a6170530ccc37a0fd9388e28d06f516a89
react-native-video: c26780b224543c62d5e1b2a7244a5cd1b50e8253
react-native-worklets-core: a894d572639fcf37c6d284cc799882d25d00c93d
React-NativeModulesApple: b6868ee904013a7923128892ee4a032498a1024a
@@ -729,4 +729,4 @@ SPEC CHECKSUMS:
PODFILE CHECKSUM: 27f53791141a3303d814e09b55770336416ff4eb
COCOAPODS: 1.14.3
COCOAPODS: 1.11.3

View File

@@ -1,55 +1,6 @@
import { NavigationContainer } from '@react-navigation/native'
import React from 'react'
import { createNativeStackNavigator } from '@react-navigation/native-stack'
import { PermissionsPage } from './PermissionsPage'
import { MediaPage } from './MediaPage'
import { CameraPage } from './CameraPage'
import { CodeScannerPage } from './CodeScannerPage'
import type { Routes } from './Routes'
import { Camera } from 'react-native-vision-camera'
import { GestureHandlerRootView } from 'react-native-gesture-handler'
import { StyleSheet } from 'react-native'
import { DevicesPage } from './DevicesPage'
const Stack = createNativeStackNavigator<Routes>()
import CameraScreen from './camera'
export function App(): React.ReactElement | null {
const cameraPermission = Camera.getCameraPermissionStatus()
const microphonePermission = Camera.getMicrophonePermissionStatus()
console.log(`Re-rendering Navigator. Camera: ${cameraPermission} | Microphone: ${microphonePermission}`)
const showPermissionsPage = cameraPermission !== 'granted' || microphonePermission === 'not-determined'
return (
<NavigationContainer>
<GestureHandlerRootView style={styles.root}>
<Stack.Navigator
screenOptions={{
headerShown: false,
statusBarStyle: 'dark',
animationTypeForReplace: 'push',
}}
initialRouteName={showPermissionsPage ? 'PermissionsPage' : 'CameraPage'}>
<Stack.Screen name="PermissionsPage" component={PermissionsPage} />
<Stack.Screen name="CameraPage" component={CameraPage} />
<Stack.Screen name="CodeScannerPage" component={CodeScannerPage} />
<Stack.Screen
name="MediaPage"
component={MediaPage}
options={{
animation: 'none',
presentation: 'transparentModal',
}}
/>
<Stack.Screen name="Devices" component={DevicesPage} />
</Stack.Navigator>
</GestureHandlerRootView>
</NavigationContainer>
)
return <CameraScreen />
}
const styles = StyleSheet.create({
root: {
flex: 1,
},
})

View File

@@ -1,280 +0,0 @@
import * as React from 'react'
import { useRef, useState, useCallback, useMemo } from 'react'
import { StyleSheet, Text, View } from 'react-native'
import { PinchGestureHandler, PinchGestureHandlerGestureEvent, TapGestureHandler } from 'react-native-gesture-handler'
import { CameraRuntimeError, PhotoFile, useCameraDevice, useCameraFormat, useFrameProcessor, VideoFile } from 'react-native-vision-camera'
import { Camera } from 'react-native-vision-camera'
import { CONTENT_SPACING, CONTROL_BUTTON_SIZE, MAX_ZOOM_FACTOR, SAFE_AREA_PADDING, SCREEN_HEIGHT, SCREEN_WIDTH } from './Constants'
import Reanimated, { Extrapolate, interpolate, useAnimatedGestureHandler, useAnimatedProps, useSharedValue } from 'react-native-reanimated'
import { useEffect } from 'react'
import { useIsForeground } from './hooks/useIsForeground'
import { StatusBarBlurBackground } from './views/StatusBarBlurBackground'
import { CaptureButton } from './views/CaptureButton'
import { PressableOpacity } from 'react-native-pressable-opacity'
import MaterialIcon from 'react-native-vector-icons/MaterialCommunityIcons'
import IonIcon from 'react-native-vector-icons/Ionicons'
import type { Routes } from './Routes'
import type { NativeStackScreenProps } from '@react-navigation/native-stack'
import { useIsFocused } from '@react-navigation/core'
import { examplePlugin } from './frame-processors/ExamplePlugin'
import { exampleKotlinSwiftPlugin } from './frame-processors/ExampleKotlinSwiftPlugin'
import { usePreferredCameraDevice } from './hooks/usePreferredCameraDevice'
const ReanimatedCamera = Reanimated.createAnimatedComponent(Camera)
Reanimated.addWhitelistedNativeProps({
zoom: true,
})
const SCALE_FULL_ZOOM = 3
type Props = NativeStackScreenProps<Routes, 'CameraPage'>
export function CameraPage({ navigation }: Props): React.ReactElement {
const camera = useRef<Camera>(null)
const [isCameraInitialized, setIsCameraInitialized] = useState(false)
const hasMicrophonePermission = useMemo(() => Camera.getMicrophonePermissionStatus() === 'granted', [])
const zoom = useSharedValue(0)
const isPressingButton = useSharedValue(false)
// check if camera page is active
const isFocussed = useIsFocused()
const isForeground = useIsForeground()
const isActive = isFocussed && isForeground
const [cameraPosition, setCameraPosition] = useState<'front' | 'back'>('back')
const [enableHdr, setEnableHdr] = useState(false)
const [flash, setFlash] = useState<'off' | 'on'>('off')
const [enableNightMode, setEnableNightMode] = useState(false)
// camera device settings
const [preferredDevice] = usePreferredCameraDevice()
let device = useCameraDevice(cameraPosition)
if (preferredDevice != null && preferredDevice.position === cameraPosition) {
// override default device with the one selected by the user in settings
device = preferredDevice
}
const [targetFps, setTargetFps] = useState(60)
const screenAspectRatio = SCREEN_HEIGHT / SCREEN_WIDTH
const format = useCameraFormat(device, [
{ fps: targetFps },
{ videoAspectRatio: screenAspectRatio },
{ videoResolution: 'max' },
{ photoAspectRatio: screenAspectRatio },
{ photoResolution: 'max' },
])
const fps = Math.min(format?.maxFps ?? 1, targetFps)
const supportsFlash = device?.hasFlash ?? false
const supportsHdr = format?.supportsPhotoHdr
const supports60Fps = useMemo(() => device?.formats.some((f) => f.maxFps >= 60), [device?.formats])
const canToggleNightMode = device?.supportsLowLightBoost ?? false
//#region Animated Zoom
// This just maps the zoom factor to a percentage value.
// so e.g. for [min, neutr., max] values [1, 2, 128] this would result in [0, 0.0081, 1]
const minZoom = device?.minZoom ?? 1
const maxZoom = Math.min(device?.maxZoom ?? 1, MAX_ZOOM_FACTOR)
const cameraAnimatedProps = useAnimatedProps(() => {
const z = Math.max(Math.min(zoom.value, maxZoom), minZoom)
return {
zoom: z,
}
}, [maxZoom, minZoom, zoom])
//#endregion
//#region Callbacks
const setIsPressingButton = useCallback(
(_isPressingButton: boolean) => {
isPressingButton.value = _isPressingButton
},
[isPressingButton],
)
// Camera callbacks
const onError = useCallback((error: CameraRuntimeError) => {
console.error(error)
}, [])
const onInitialized = useCallback(() => {
console.log('Camera initialized!')
setIsCameraInitialized(true)
}, [])
const onMediaCaptured = useCallback(
(media: PhotoFile | VideoFile, type: 'photo' | 'video') => {
console.log(`Media captured! ${JSON.stringify(media)}`)
navigation.navigate('MediaPage', {
path: media.path,
type: type,
})
},
[navigation],
)
const onFlipCameraPressed = useCallback(() => {
setCameraPosition((p) => (p === 'back' ? 'front' : 'back'))
}, [])
const onFlashPressed = useCallback(() => {
setFlash((f) => (f === 'off' ? 'on' : 'off'))
}, [])
//#endregion
//#region Tap Gesture
const onDoubleTap = useCallback(() => {
onFlipCameraPressed()
}, [onFlipCameraPressed])
//#endregion
//#region Effects
const neutralZoom = device?.neutralZoom ?? 1
useEffect(() => {
// Run everytime the neutralZoomScaled value changes. (reset zoom when device changes)
zoom.value = neutralZoom
}, [neutralZoom, zoom])
//#endregion
//#region Pinch to Zoom Gesture
// The gesture handler maps the linear pinch gesture (0 - 1) to an exponential curve since a camera's zoom
// function does not appear linear to the user. (aka zoom 0.1 -> 0.2 does not look equal in difference as 0.8 -> 0.9)
const onPinchGesture = useAnimatedGestureHandler<PinchGestureHandlerGestureEvent, { startZoom?: number }>({
onStart: (_, context) => {
context.startZoom = zoom.value
},
onActive: (event, context) => {
// we're trying to map the scale gesture to a linear zoom here
const startZoom = context.startZoom ?? 0
const scale = interpolate(event.scale, [1 - 1 / SCALE_FULL_ZOOM, 1, SCALE_FULL_ZOOM], [-1, 0, 1], Extrapolate.CLAMP)
zoom.value = interpolate(scale, [-1, 0, 1], [minZoom, startZoom, maxZoom], Extrapolate.CLAMP)
},
})
//#endregion
useEffect(() => {
const f =
format != null
? `(${format.photoWidth}x${format.photoHeight} photo / ${format.videoWidth}x${format.videoHeight}@${format.maxFps} video @ ${fps}fps)`
: undefined
console.log(`Camera: ${device?.name} | Format: ${f}`)
}, [device?.name, format, fps])
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log(`${frame.timestamp}: ${frame.width}x${frame.height} ${frame.pixelFormat} Frame (${frame.orientation})`)
examplePlugin(frame)
exampleKotlinSwiftPlugin(frame)
}, [])
return (
<View style={styles.container}>
{device != null && (
<PinchGestureHandler onGestureEvent={onPinchGesture} enabled={isActive}>
<Reanimated.View style={StyleSheet.absoluteFill}>
<TapGestureHandler onEnded={onDoubleTap} numberOfTaps={2}>
<ReanimatedCamera
ref={camera}
style={StyleSheet.absoluteFill}
device={device}
format={format}
fps={fps}
photoHdr={enableHdr}
videoHdr={enableHdr}
lowLightBoost={device.supportsLowLightBoost && enableNightMode}
isActive={isActive}
onInitialized={onInitialized}
onError={onError}
enableZoomGesture={false}
animatedProps={cameraAnimatedProps}
exposure={0}
enableFpsGraph={true}
orientation="portrait"
photo={true}
video={true}
audio={hasMicrophonePermission}
frameProcessor={frameProcessor}
/>
</TapGestureHandler>
</Reanimated.View>
</PinchGestureHandler>
)}
<CaptureButton
style={styles.captureButton}
camera={camera}
onMediaCaptured={onMediaCaptured}
cameraZoom={zoom}
minZoom={minZoom}
maxZoom={maxZoom}
flash={supportsFlash ? flash : 'off'}
enabled={isCameraInitialized && isActive}
setIsPressingButton={setIsPressingButton}
/>
<StatusBarBlurBackground />
<View style={styles.rightButtonRow}>
<PressableOpacity style={styles.button} onPress={onFlipCameraPressed} disabledOpacity={0.4}>
<IonIcon name="camera-reverse" color="white" size={24} />
</PressableOpacity>
{supportsFlash && (
<PressableOpacity style={styles.button} onPress={onFlashPressed} disabledOpacity={0.4}>
<IonIcon name={flash === 'on' ? 'flash' : 'flash-off'} color="white" size={24} />
</PressableOpacity>
)}
{supports60Fps && (
<PressableOpacity style={styles.button} onPress={() => setTargetFps((t) => (t === 30 ? 60 : 30))}>
<Text style={styles.text}>{`${targetFps}\nFPS`}</Text>
</PressableOpacity>
)}
{supportsHdr && (
<PressableOpacity style={styles.button} onPress={() => setEnableHdr((h) => !h)}>
<MaterialIcon name={enableHdr ? 'hdr' : 'hdr-off'} color="white" size={24} />
</PressableOpacity>
)}
{canToggleNightMode && (
<PressableOpacity style={styles.button} onPress={() => setEnableNightMode(!enableNightMode)} disabledOpacity={0.4}>
<IonIcon name={enableNightMode ? 'moon' : 'moon-outline'} color="white" size={24} />
</PressableOpacity>
)}
<PressableOpacity style={styles.button} onPress={() => navigation.navigate('Devices')}>
<IonIcon name="settings-outline" color="white" size={24} />
</PressableOpacity>
<PressableOpacity style={styles.button} onPress={() => navigation.navigate('CodeScannerPage')}>
<IonIcon name="qr-code-outline" color="white" size={24} />
</PressableOpacity>
</View>
</View>
)
}
const styles = StyleSheet.create({
container: {
flex: 1,
backgroundColor: 'black',
},
captureButton: {
position: 'absolute',
alignSelf: 'center',
bottom: SAFE_AREA_PADDING.paddingBottom,
},
button: {
marginBottom: CONTENT_SPACING,
width: CONTROL_BUTTON_SIZE,
height: CONTROL_BUTTON_SIZE,
borderRadius: CONTROL_BUTTON_SIZE / 2,
backgroundColor: 'rgba(140, 140, 140, 0.3)',
justifyContent: 'center',
alignItems: 'center',
},
rightButtonRow: {
position: 'absolute',
right: SAFE_AREA_PADDING.paddingRight,
top: SAFE_AREA_PADDING.paddingTop,
},
text: {
color: 'white',
fontSize: 11,
fontWeight: 'bold',
textAlign: 'center',
},
})

View File

@@ -1,120 +0,0 @@
import * as React from 'react'
import { useCallback, useRef, useState } from 'react'
import { Alert, AlertButton, Linking, StyleSheet, View } from 'react-native'
import { Code, useCameraDevice, useCodeScanner } from 'react-native-vision-camera'
import { Camera } from 'react-native-vision-camera'
import { CONTENT_SPACING, CONTROL_BUTTON_SIZE, SAFE_AREA_PADDING } from './Constants'
import { useIsForeground } from './hooks/useIsForeground'
import { StatusBarBlurBackground } from './views/StatusBarBlurBackground'
import { PressableOpacity } from 'react-native-pressable-opacity'
import IonIcon from 'react-native-vector-icons/Ionicons'
import type { Routes } from './Routes'
import type { NativeStackScreenProps } from '@react-navigation/native-stack'
import { useIsFocused } from '@react-navigation/core'
const showCodeAlert = (value: string, onDismissed: () => void): void => {
const buttons: AlertButton[] = [
{
text: 'Close',
style: 'cancel',
onPress: onDismissed,
},
]
if (value.startsWith('http')) {
buttons.push({
text: 'Open URL',
onPress: () => {
Linking.openURL(value)
onDismissed()
},
})
}
Alert.alert('Scanned Code', value, buttons)
}
type Props = NativeStackScreenProps<Routes, 'CodeScannerPage'>
export function CodeScannerPage({ navigation }: Props): React.ReactElement {
// 1. Use a simple default back camera
const device = useCameraDevice('back')
// 2. Only activate Camera when the app is focused and this screen is currently opened
const isFocused = useIsFocused()
const isForeground = useIsForeground()
const isActive = isFocused && isForeground
// 3. (Optional) enable a torch setting
const [torch, setTorch] = useState(false)
// 4. On code scanned, we show an aler to the user
const isShowingAlert = useRef(false)
const onCodeScanned = useCallback((codes: Code[]) => {
console.log(`Scanned ${codes.length} codes:`, codes)
const value = codes[0]?.value
if (value == null) return
if (isShowingAlert.current) return
showCodeAlert(value, () => {
isShowingAlert.current = false
})
isShowingAlert.current = true
}, [])
// 5. Initialize the Code Scanner to scan QR codes and Barcodes
const codeScanner = useCodeScanner({
codeTypes: ['qr', 'ean-13'],
onCodeScanned: onCodeScanned,
})
return (
<View style={styles.container}>
{device != null && (
<Camera
style={StyleSheet.absoluteFill}
device={device}
isActive={isActive}
codeScanner={codeScanner}
torch={torch ? 'on' : 'off'}
enableZoomGesture={true}
/>
)}
<StatusBarBlurBackground />
<View style={styles.rightButtonRow}>
<PressableOpacity style={styles.button} onPress={() => setTorch(!torch)} disabledOpacity={0.4}>
<IonIcon name={torch ? 'flash' : 'flash-off'} color="white" size={24} />
</PressableOpacity>
</View>
{/* Back Button */}
<PressableOpacity style={styles.backButton} onPress={navigation.goBack}>
<IonIcon name="chevron-back" color="white" size={35} />
</PressableOpacity>
</View>
)
}
const styles = StyleSheet.create({
container: {
flex: 1,
backgroundColor: 'black',
},
button: {
marginBottom: CONTENT_SPACING,
width: CONTROL_BUTTON_SIZE,
height: CONTROL_BUTTON_SIZE,
borderRadius: CONTROL_BUTTON_SIZE / 2,
backgroundColor: 'rgba(140, 140, 140, 0.3)',
justifyContent: 'center',
alignItems: 'center',
},
rightButtonRow: {
position: 'absolute',
right: SAFE_AREA_PADDING.paddingRight,
top: SAFE_AREA_PADDING.paddingTop,
},
backButton: {
position: 'absolute',
left: SAFE_AREA_PADDING.paddingLeft,
top: SAFE_AREA_PADDING.paddingTop,
},
})

View File

@@ -1,215 +0,0 @@
import type { NativeStackScreenProps } from '@react-navigation/native-stack'
import React, { useCallback, useMemo } from 'react'
import IonIcon from 'react-native-vector-icons/Ionicons'
import { StyleSheet, View, Text, ListRenderItemInfo, SectionList, SectionListData } from 'react-native'
import { CameraDevice, useCameraDevices } from 'react-native-vision-camera'
import { CONTENT_SPACING, SAFE_AREA_PADDING } from './Constants'
import type { Routes } from './Routes'
import { PressableOpacity } from 'react-native-pressable-opacity'
import { usePreferredCameraDevice } from './hooks/usePreferredCameraDevice'
const keyExtractor = (item: CameraDevice): string => item.id
interface SectionType {
position: CameraDevice['position'] | 'preferred'
}
type SectionData = SectionListData<CameraDevice, SectionType>
interface DeviceProps {
device: CameraDevice
onPress: () => void
}
function Device({ device, onPress }: DeviceProps): React.ReactElement {
const maxPhotoRes = useMemo(
() =>
device.formats.reduce((prev, curr) => {
if (curr.photoWidth * curr.photoHeight > prev.photoWidth * prev.photoHeight) return curr
return prev
}),
[device.formats],
)
const maxVideoRes = useMemo(
() =>
device.formats.reduce((prev, curr) => {
if (curr.videoWidth * curr.videoHeight > prev.videoWidth * prev.videoHeight) return curr
return prev
}),
[device.formats],
)
const deviceTypes = useMemo(() => device.physicalDevices.map((t) => t.replace('-camera', '')).join(' + '), [device.physicalDevices])
return (
<PressableOpacity style={styles.itemContainer} onPress={onPress}>
<View style={styles.horizontal}>
<IonIcon name="camera" size={18} color="black" />
<Text style={styles.deviceName} numberOfLines={3}>
{device.name} <Text style={styles.devicePosition}>({device.position})</Text>
</Text>
</View>
<Text style={styles.deviceTypes}>{deviceTypes}</Text>
<View style={styles.horizontal}>
<IonIcon name="camera" size={12} color="black" />
<Text style={styles.resolutionText}>
{maxPhotoRes.photoWidth}x{maxPhotoRes.photoHeight}
</Text>
</View>
<View style={styles.horizontal}>
<IonIcon name="videocam" size={12} color="black" />
<Text style={styles.resolutionText}>
{maxVideoRes.videoWidth}x{maxVideoRes.videoHeight} @ {maxVideoRes.maxFps} FPS
</Text>
</View>
<Text style={styles.deviceId} numberOfLines={2} ellipsizeMode="middle">
{device.id}
</Text>
</PressableOpacity>
)
}
type Props = NativeStackScreenProps<Routes, 'Devices'>
export function DevicesPage({ navigation }: Props): React.ReactElement {
const devices = useCameraDevices()
const [preferredDevice, setPreferredDevice] = usePreferredCameraDevice()
const sections = useMemo((): SectionData[] => {
return [
{
position: 'preferred',
data: preferredDevice != null ? [preferredDevice] : [],
},
{
position: 'back',
data: devices.filter((d) => d.position === 'back'),
},
{
position: 'front',
data: devices.filter((d) => d.position === 'front'),
},
{
position: 'external',
data: devices.filter((d) => d.position === 'external'),
},
]
}, [devices, preferredDevice])
const onDevicePressed = useCallback(
(device: CameraDevice) => {
setPreferredDevice(device)
navigation.navigate('CameraPage')
},
[navigation, setPreferredDevice],
)
const renderItem = useCallback(
({ item }: ListRenderItemInfo<CameraDevice>) => {
return <Device device={item} onPress={() => onDevicePressed(item)} />
},
[onDevicePressed],
)
const renderSectionHeader = useCallback(({ section }: { section: SectionData }) => {
if (section.data.length === 0) return null
return (
<View style={styles.sectionHeader}>
<Text style={styles.sectionHeaderText}>{section.position.toUpperCase()}</Text>
</View>
)
}, [])
return (
<View style={styles.container}>
<View style={styles.headerContainer}>
<View style={styles.horizontal}>
<PressableOpacity style={styles.backButton} onPress={navigation.goBack}>
<IonIcon name="chevron-back" size={35} color="black" />
</PressableOpacity>
<Text style={styles.header}>Camera Devices</Text>
</View>
<Text style={styles.subHeader}>
These are all detected Camera devices on your phone. This list will automatically update as you plug devices in or out.
</Text>
</View>
<SectionList
style={styles.list}
contentContainerStyle={styles.listContent}
sections={sections}
keyExtractor={keyExtractor}
renderItem={renderItem}
renderSectionHeader={renderSectionHeader}
stickySectionHeadersEnabled={false}
/>
</View>
)
}
const styles = StyleSheet.create({
container: {
flex: 1,
backgroundColor: 'white',
},
headerContainer: {
paddingTop: SAFE_AREA_PADDING.paddingTop,
paddingLeft: SAFE_AREA_PADDING.paddingLeft,
paddingRight: SAFE_AREA_PADDING.paddingRight,
},
header: {
fontSize: 38,
fontWeight: 'bold',
maxWidth: '80%',
},
subHeader: {
marginTop: 10,
fontSize: 18,
maxWidth: '80%',
},
list: {
marginTop: CONTENT_SPACING,
},
listContent: {
paddingBottom: SAFE_AREA_PADDING.paddingBottom,
},
sectionHeader: {
paddingHorizontal: CONTENT_SPACING / 2,
paddingVertical: 5,
},
sectionHeaderText: {
opacity: 0.4,
fontSize: 16,
},
itemContainer: {
paddingHorizontal: CONTENT_SPACING,
paddingVertical: 7,
},
deviceName: {
fontSize: 17,
marginLeft: 5,
flexShrink: 1,
fontWeight: 'bold',
},
devicePosition: {
opacity: 0.4,
},
deviceId: {
fontSize: 12,
opacity: 0.4,
},
deviceTypes: {
fontSize: 12,
opacity: 0.4,
},
horizontal: {
flexDirection: 'row',
alignItems: 'center',
},
backButton: {
width: 40,
height: 40,
marginTop: 7,
},
resolutionText: {
marginLeft: 5,
fontSize: 12,
},
})

View File

@@ -1,151 +0,0 @@
import React, { useCallback, useMemo, useState } from 'react'
import { StyleSheet, View, ActivityIndicator, PermissionsAndroid, Platform } from 'react-native'
import Video, { LoadError, OnLoadData } from 'react-native-video'
import { SAFE_AREA_PADDING } from './Constants'
import { useIsForeground } from './hooks/useIsForeground'
import { PressableOpacity } from 'react-native-pressable-opacity'
import IonIcon from 'react-native-vector-icons/Ionicons'
import { Alert } from 'react-native'
import { CameraRoll } from '@react-native-camera-roll/camera-roll'
import { StatusBarBlurBackground } from './views/StatusBarBlurBackground'
import type { NativeStackScreenProps } from '@react-navigation/native-stack'
import type { Routes } from './Routes'
import { useIsFocused } from '@react-navigation/core'
import FastImage, { OnLoadEvent } from 'react-native-fast-image'
const requestSavePermission = async (): Promise<boolean> => {
if (Platform.OS !== 'android') return true
const permission = PermissionsAndroid.PERMISSIONS.WRITE_EXTERNAL_STORAGE
if (permission == null) return false
let hasPermission = await PermissionsAndroid.check(permission)
if (!hasPermission) {
const permissionRequestResult = await PermissionsAndroid.request(permission)
hasPermission = permissionRequestResult === 'granted'
}
return hasPermission
}
const isVideoOnLoadEvent = (event: OnLoadData | OnLoadEvent): event is OnLoadData => 'duration' in event && 'naturalSize' in event
type Props = NativeStackScreenProps<Routes, 'MediaPage'>
export function MediaPage({ navigation, route }: Props): React.ReactElement {
const { path, type } = route.params
const [hasMediaLoaded, setHasMediaLoaded] = useState(false)
const isForeground = useIsForeground()
const isScreenFocused = useIsFocused()
const isVideoPaused = !isForeground || !isScreenFocused
const [savingState, setSavingState] = useState<'none' | 'saving' | 'saved'>('none')
const onMediaLoad = useCallback((event: OnLoadData | OnLoadEvent) => {
if (isVideoOnLoadEvent(event)) {
console.log(
`Video loaded. Size: ${event.naturalSize.width}x${event.naturalSize.height} (${event.naturalSize.orientation}, ${event.duration} seconds)`,
)
} else {
console.log(`Image loaded. Size: ${event.nativeEvent.width}x${event.nativeEvent.height}`)
}
}, [])
const onMediaLoadEnd = useCallback(() => {
console.log('media has loaded.')
setHasMediaLoaded(true)
}, [])
const onMediaLoadError = useCallback((error: LoadError) => {
console.log(`failed to load media: ${JSON.stringify(error)}`)
}, [])
const onSavePressed = useCallback(async () => {
try {
setSavingState('saving')
const hasPermission = await requestSavePermission()
if (!hasPermission) {
Alert.alert('Permission denied!', 'Vision Camera does not have permission to save the media to your camera roll.')
return
}
await CameraRoll.save(`file://${path}`, {
type: type,
})
setSavingState('saved')
} catch (e) {
const message = e instanceof Error ? e.message : JSON.stringify(e)
setSavingState('none')
Alert.alert('Failed to save!', `An unexpected error occured while trying to save your ${type}. ${message}`)
}
}, [path, type])
const source = useMemo(() => ({ uri: `file://${path}` }), [path])
const screenStyle = useMemo(() => ({ opacity: hasMediaLoaded ? 1 : 0 }), [hasMediaLoaded])
return (
<View style={[styles.container, screenStyle]}>
{type === 'photo' && (
<FastImage source={source} style={StyleSheet.absoluteFill} resizeMode="cover" onLoadEnd={onMediaLoadEnd} onLoad={onMediaLoad} />
)}
{type === 'video' && (
<Video
source={source}
style={StyleSheet.absoluteFill}
paused={isVideoPaused}
resizeMode="cover"
posterResizeMode="cover"
allowsExternalPlayback={false}
automaticallyWaitsToMinimizeStalling={false}
disableFocus={true}
repeat={true}
useTextureView={false}
controls={false}
playWhenInactive={true}
ignoreSilentSwitch="ignore"
onReadyForDisplay={onMediaLoadEnd}
onLoad={onMediaLoad}
onError={onMediaLoadError}
/>
)}
<PressableOpacity style={styles.closeButton} onPress={navigation.goBack}>
<IonIcon name="close" size={35} color="white" style={styles.icon} />
</PressableOpacity>
<PressableOpacity style={styles.saveButton} onPress={onSavePressed} disabled={savingState !== 'none'}>
{savingState === 'none' && <IonIcon name="download" size={35} color="white" style={styles.icon} />}
{savingState === 'saved' && <IonIcon name="checkmark" size={35} color="white" style={styles.icon} />}
{savingState === 'saving' && <ActivityIndicator color="white" />}
</PressableOpacity>
<StatusBarBlurBackground />
</View>
)
}
const styles = StyleSheet.create({
container: {
flex: 1,
alignItems: 'center',
justifyContent: 'center',
backgroundColor: 'white',
},
closeButton: {
position: 'absolute',
top: SAFE_AREA_PADDING.paddingTop,
left: SAFE_AREA_PADDING.paddingLeft,
width: 40,
height: 40,
},
saveButton: {
position: 'absolute',
bottom: SAFE_AREA_PADDING.paddingBottom,
left: SAFE_AREA_PADDING.paddingLeft,
width: 40,
height: 40,
},
icon: {
textShadowColor: 'black',
textShadowOffset: {
height: 0,
width: 0,
},
textShadowRadius: 1,
},
})

View File

@@ -1,96 +0,0 @@
import type { NativeStackScreenProps } from '@react-navigation/native-stack'
import React, { useCallback, useEffect, useState } from 'react'
import { ImageRequireSource, Linking } from 'react-native'
import { StyleSheet, View, Text, Image } from 'react-native'
import { Camera, CameraPermissionStatus } from 'react-native-vision-camera'
import { CONTENT_SPACING, SAFE_AREA_PADDING } from './Constants'
import type { Routes } from './Routes'
// eslint-disable-next-line @typescript-eslint/no-var-requires
const BANNER_IMAGE = require('./img/11.png') as ImageRequireSource
type Props = NativeStackScreenProps<Routes, 'PermissionsPage'>
export function PermissionsPage({ navigation }: Props): React.ReactElement {
const [cameraPermissionStatus, setCameraPermissionStatus] = useState<CameraPermissionStatus>('not-determined')
const [microphonePermissionStatus, setMicrophonePermissionStatus] = useState<CameraPermissionStatus>('not-determined')
const requestMicrophonePermission = useCallback(async () => {
console.log('Requesting microphone permission...')
const permission = await Camera.requestMicrophonePermission()
console.log(`Microphone permission status: ${permission}`)
if (permission === 'denied') await Linking.openSettings()
setMicrophonePermissionStatus(permission)
}, [])
const requestCameraPermission = useCallback(async () => {
console.log('Requesting camera permission...')
const permission = await Camera.requestCameraPermission()
console.log(`Camera permission status: ${permission}`)
if (permission === 'denied') await Linking.openSettings()
setCameraPermissionStatus(permission)
}, [])
useEffect(() => {
if (cameraPermissionStatus === 'granted' && microphonePermissionStatus === 'granted') navigation.replace('CameraPage')
}, [cameraPermissionStatus, microphonePermissionStatus, navigation])
return (
<View style={styles.container}>
<Image source={BANNER_IMAGE} style={styles.banner} />
<Text style={styles.welcome}>Welcome to{'\n'}Vision Camera.</Text>
<View style={styles.permissionsContainer}>
{cameraPermissionStatus !== 'granted' && (
<Text style={styles.permissionText}>
Vision Camera needs <Text style={styles.bold}>Camera permission</Text>.{' '}
<Text style={styles.hyperlink} onPress={requestCameraPermission}>
Grant
</Text>
</Text>
)}
{microphonePermissionStatus !== 'granted' && (
<Text style={styles.permissionText}>
Vision Camera needs <Text style={styles.bold}>Microphone permission</Text>.{' '}
<Text style={styles.hyperlink} onPress={requestMicrophonePermission}>
Grant
</Text>
</Text>
)}
</View>
</View>
)
}
const styles = StyleSheet.create({
welcome: {
fontSize: 38,
fontWeight: 'bold',
maxWidth: '80%',
},
banner: {
position: 'absolute',
opacity: 0.4,
bottom: 0,
left: 0,
},
container: {
flex: 1,
backgroundColor: 'white',
...SAFE_AREA_PADDING,
},
permissionsContainer: {
marginTop: CONTENT_SPACING * 2,
},
permissionText: {
fontSize: 17,
},
hyperlink: {
color: '#007aff',
fontWeight: 'bold',
},
bold: {
fontWeight: 'bold',
},
})

View File

@@ -1,10 +0,0 @@
export type Routes = {
PermissionsPage: undefined
CameraPage: undefined
CodeScannerPage: undefined
MediaPage: {
path: string
type: 'video' | 'photo'
}
Devices: undefined
}

View File

@@ -0,0 +1,112 @@
import React, { useCallback, useRef, useState } from 'react'
import { Button, StyleSheet, Text, View } from 'react-native'
import {
Camera,
useCameraPermission,
useCameraDevice,
useCameraFormat,
PhotoFile,
VideoFile,
CameraRuntimeError,
Orientation,
CameraDevice,
} from 'react-native-vision-camera'
import { RecordingButton } from './capture-button'
import { useIsForeground } from './is-foreground'
export default function CameraScreen() {
const camera = useRef<Camera>(null)
const { hasPermission, requestPermission } = useCameraPermission()
const [isCameraInitialized, setIsCameraInitialized] = useState<boolean>(false)
const isForeground: boolean = useIsForeground()
const isActive: boolean = isForeground // Should be combined with isFocused hook
const onError = useCallback((error: CameraRuntimeError) => {
console.error(error)
}, [])
const onInitialized = useCallback(() => {
console.log('Camera initialized!')
setIsCameraInitialized(true)
}, [])
const onMediaCaptured = useCallback((media: PhotoFile | VideoFile) => {
console.log(`Media captured! ${JSON.stringify(media)}`)
}, [])
if (!hasPermission) requestPermission()
// Error handling in case they refuse to give permission
const device = useCameraDevice('back')
const format = useCameraFormat(device, [{ videoResolution: { width: 3048, height: 2160 } }, { fps: 60 }]) // this sets as a target
//Orientation detection
const [orientation, setOrientation] = useState<Orientation>('portrait')
const toggleOrientation = () => {
setOrientation(
(currentOrientation) => (currentOrientation === 'landscape-left' ? 'portrait' : 'landscape-left'), // Can adjust this and the type to match what we want
)
}
// eslint-disable-next-line @typescript-eslint/no-unnecessary-condition
if (device === null) return <Text>Camera not available. Does user have permissions: {hasPermission}</Text>
return (
hasPermission && (
<View style={styles.container}>
<Camera
ref={camera}
style={StyleSheet.absoluteFill}
device={device as CameraDevice}
format={format}
onInitialized={onInitialized}
onError={onError}
video={true}
orientation={orientation} // TODO: #60
isActive={isActive}
/>
<RecordingButton
style={[styles.captureButton, orientation === 'portrait' ? styles.portrait : styles.landscape]}
camera={camera}
onMediaCaptured={onMediaCaptured}
enabled={isCameraInitialized}
/>
<View style={[styles.button, orientation === 'portrait' ? styles.togglePortrait : styles.toggleLandscape]}>
<Button title="Toggle Orientation" onPress={toggleOrientation} color="#841584" accessibilityLabel="Toggle camera orientation" />
</View>
</View>
)
)
}
const styles = StyleSheet.create({
container: {
flex: 1,
backgroundColor: 'black',
},
captureButton: {
position: 'absolute',
alignSelf: 'center',
},
button: {
position: 'absolute',
alignSelf: 'center',
},
togglePortrait: {
bottom: 110, // needs refined
},
toggleLandscape: {
transform: [{ rotate: '90deg' }],
bottom: '43%', // Should come from SafeAreaProvider, hardcoded right now, should roughly appear above the button
left: 50, // needs refined
},
portrait: {
bottom: 20, // needs refined
},
landscape: {
bottom: '40%', // Should come from SafeAreaProvider
left: 20, // needs refined
},
})

View File

@@ -0,0 +1,96 @@
import React, { useCallback, useRef, useState } from 'react'
import { TouchableOpacity, StyleSheet, View, StyleProp, ViewStyle } from 'react-native'
import { Camera, VideoFile } from 'react-native-vision-camera'
interface RecordingButtonProps {
style: StyleProp<ViewStyle>
camera: React.RefObject<Camera>
onMediaCaptured: (media: VideoFile, mediaType: string) => void
enabled: boolean
}
export const RecordingButton: React.FC<RecordingButtonProps> = ({ style, camera, onMediaCaptured, enabled }) => {
const isRecording = useRef(false)
// UseRef won't trigger a re-render
const [, setRecordingState] = useState(false)
const onStoppedRecording = useCallback(() => {
isRecording.current = false
setRecordingState(false)
console.log('stopped recording video!')
}, [])
const stopRecording = useCallback(async () => {
try {
if (camera.current === null) throw new Error('Camera ref is null!') // Error handling could be more graceful
console.log('calling stopRecording()...')
await camera.current.stopRecording()
console.log('called stopRecording()!')
} catch (e) {
console.error('failed to stop recording!', e)
}
}, [camera])
const startRecording = useCallback(() => {
console.log('press')
try {
if (camera.current === null) throw new Error('Camera ref is null!') // Error handling could be more graceful
console.log('calling startRecording()...')
camera.current.startRecording({
onRecordingError: (error) => {
console.error('Recording failed!', error)
onStoppedRecording()
},
onRecordingFinished: (video) => {
onMediaCaptured(video, 'video')
onStoppedRecording()
},
})
console.log('called startRecording()!')
isRecording.current = true
setRecordingState(true)
} catch (e) {
console.error('failed to start recording!', e, 'camera')
}
}, [camera, onMediaCaptured, onStoppedRecording])
const handlePress = () => {
if (isRecording.current) stopRecording()
else startRecording()
}
return (
<TouchableOpacity style={[styles.captureButton, style]} onPress={handlePress} disabled={!enabled}>
<View style={isRecording.current ? styles.recordingSquare : styles.innerCircle} />
</TouchableOpacity>
)
}
const styles = StyleSheet.create({
captureButton: {
height: 80,
width: 80,
borderRadius: 40,
borderWidth: 3,
borderColor: 'white',
backgroundColor: 'transparent',
justifyContent: 'center',
alignItems: 'center',
},
innerCircle: {
height: 70,
width: 70,
borderRadius: 35,
backgroundColor: '#FF3B30',
},
recordingSquare: {
height: 40,
width: 40,
borderRadius: 10,
backgroundColor: '#FF3B30',
},
})
export default RecordingButton

View File

@@ -1,17 +0,0 @@
import { VisionCameraProxy, Frame } from 'react-native-vision-camera'
const plugin = VisionCameraProxy.initFrameProcessorPlugin('example_kotlin_swift_plugin', { foo: 'bar' })
export function exampleKotlinSwiftPlugin(frame: Frame): string[] {
'worklet'
if (plugin == null) throw new Error('Failed to load Frame Processor Plugin "example_kotlin_swift_plugin"!')
return plugin.call(frame, {
someString: 'hello!',
someBoolean: true,
someNumber: 42,
someObject: { test: 0, second: 'test' },
someArray: ['another test', 5],
}) as string[]
}

View File

@@ -1,25 +0,0 @@
import { VisionCameraProxy, Frame } from 'react-native-vision-camera'
const plugin = VisionCameraProxy.initFrameProcessorPlugin('example_plugin')
interface Result {
example_array: (string | number | boolean)[]
example_array_buffer: ArrayBuffer
example_str: string
example_bool: boolean
example_double: number
}
export function examplePlugin(frame: Frame): Result {
'worklet'
if (plugin == null) throw new Error('Failed to load Frame Processor Plugin "example_plugin"!')
return plugin.call(frame, {
someString: 'hello!',
someBoolean: true,
someNumber: 42,
someObject: { test: 0, second: 'test' },
someArray: ['another test', 5],
}) as unknown as Result
}

View File

@@ -1,20 +0,0 @@
import { useMMKVString } from 'react-native-mmkv'
import { CameraDevice } from '../../../src/CameraDevice'
import { useCallback, useMemo } from 'react'
import { useCameraDevices } from '../../../src/hooks/useCameraDevices'
export function usePreferredCameraDevice(): [CameraDevice | undefined, (device: CameraDevice) => void] {
const [preferredDeviceId, setPreferredDeviceId] = useMMKVString('camera.preferredDeviceId')
const set = useCallback(
(device: CameraDevice) => {
setPreferredDeviceId(device.id)
},
[setPreferredDeviceId],
)
const devices = useCameraDevices()
const device = useMemo(() => devices.find((d) => d.id === preferredDeviceId), [devices, preferredDeviceId])
return [device, set]
}

View File

@@ -1,5 +1,4 @@
import { useState } from 'react'
import { useEffect } from 'react'
import { useState, useEffect } from 'react'
import { AppState, AppStateStatus } from 'react-native'
export const useIsForeground = (): boolean => {

View File

@@ -1,307 +0,0 @@
import React, { useCallback, useMemo, useRef } from 'react'
import { StyleSheet, View, ViewProps } from 'react-native'
import {
PanGestureHandler,
PanGestureHandlerGestureEvent,
State,
TapGestureHandler,
TapGestureHandlerStateChangeEvent,
} from 'react-native-gesture-handler'
import Reanimated, {
cancelAnimation,
Easing,
Extrapolate,
interpolate,
useAnimatedStyle,
withSpring,
withTiming,
useAnimatedGestureHandler,
useSharedValue,
withRepeat,
} from 'react-native-reanimated'
import type { Camera, PhotoFile, TakePhotoOptions, VideoFile } from 'react-native-vision-camera'
import { CAPTURE_BUTTON_SIZE, SCREEN_HEIGHT, SCREEN_WIDTH } from './../Constants'
const PAN_GESTURE_HANDLER_FAIL_X = [-SCREEN_WIDTH, SCREEN_WIDTH]
const PAN_GESTURE_HANDLER_ACTIVE_Y = [-2, 2]
const START_RECORDING_DELAY = 200
const BORDER_WIDTH = CAPTURE_BUTTON_SIZE * 0.1
interface Props extends ViewProps {
camera: React.RefObject<Camera>
onMediaCaptured: (media: PhotoFile | VideoFile, type: 'photo' | 'video') => void
minZoom: number
maxZoom: number
cameraZoom: Reanimated.SharedValue<number>
flash: 'off' | 'on'
enabled: boolean
setIsPressingButton: (isPressingButton: boolean) => void
}
const _CaptureButton: React.FC<Props> = ({
camera,
onMediaCaptured,
minZoom,
maxZoom,
cameraZoom,
flash,
enabled,
setIsPressingButton,
style,
...props
}): React.ReactElement => {
const pressDownDate = useRef<Date | undefined>(undefined)
const isRecording = useRef(false)
const recordingProgress = useSharedValue(0)
const takePhotoOptions = useMemo<TakePhotoOptions>(
() => ({
qualityPrioritization: 'speed',
flash: flash,
quality: 90,
enableShutterSound: false,
}),
[flash],
)
const isPressingButton = useSharedValue(false)
//#region Camera Capture
const takePhoto = useCallback(async () => {
try {
if (camera.current == null) throw new Error('Camera ref is null!')
console.log('Taking photo...')
const photo = await camera.current.takePhoto(takePhotoOptions)
onMediaCaptured(photo, 'photo')
} catch (e) {
console.error('Failed to take photo!', e)
}
}, [camera, onMediaCaptured, takePhotoOptions])
const onStoppedRecording = useCallback(() => {
isRecording.current = false
cancelAnimation(recordingProgress)
console.log('stopped recording video!')
}, [recordingProgress])
const stopRecording = useCallback(async () => {
try {
if (camera.current == null) throw new Error('Camera ref is null!')
console.log('calling stopRecording()...')
await camera.current.stopRecording()
console.log('called stopRecording()!')
} catch (e) {
console.error('failed to stop recording!', e)
}
}, [camera])
const startRecording = useCallback(() => {
try {
if (camera.current == null) throw new Error('Camera ref is null!')
console.log('calling startRecording()...')
camera.current.startRecording({
flash: flash,
onRecordingError: (error) => {
console.error('Recording failed!', error)
onStoppedRecording()
},
onRecordingFinished: (video) => {
console.log(`Recording successfully finished! ${video.path}`)
onMediaCaptured(video, 'video')
onStoppedRecording()
},
})
// TODO: wait until startRecording returns to actually find out if the recording has successfully started
console.log('called startRecording()!')
isRecording.current = true
} catch (e) {
console.error('failed to start recording!', e, 'camera')
}
}, [camera, flash, onMediaCaptured, onStoppedRecording])
//#endregion
//#region Tap handler
const tapHandler = useRef<TapGestureHandler>()
const onHandlerStateChanged = useCallback(
async ({ nativeEvent: event }: TapGestureHandlerStateChangeEvent) => {
// This is the gesture handler for the circular "shutter" button.
// Once the finger touches the button (State.BEGAN), a photo is being taken and "capture mode" is entered. (disabled tab bar)
// Also, we set `pressDownDate` to the time of the press down event, and start a 200ms timeout. If the `pressDownDate` hasn't changed
// after the 200ms, the user is still holding down the "shutter" button. In that case, we start recording.
//
// Once the finger releases the button (State.END/FAILED/CANCELLED), we leave "capture mode" (enable tab bar) and check the `pressDownDate`,
// if `pressDownDate` was less than 200ms ago, we know that the intention of the user is to take a photo. We check the `takePhotoPromise` if
// there already is an ongoing (or already resolved) takePhoto() call (remember that we called takePhoto() when the user pressed down), and
// if yes, use that. If no, we just try calling takePhoto() again
console.debug(`state: ${Object.keys(State)[event.state]}`)
switch (event.state) {
case State.BEGAN: {
// enter "recording mode"
recordingProgress.value = 0
isPressingButton.value = true
const now = new Date()
pressDownDate.current = now
setTimeout(() => {
if (pressDownDate.current === now) {
// user is still pressing down after 200ms, so his intention is to create a video
startRecording()
}
}, START_RECORDING_DELAY)
setIsPressingButton(true)
return
}
case State.END:
case State.FAILED:
case State.CANCELLED: {
// exit "recording mode"
try {
if (pressDownDate.current == null) throw new Error('PressDownDate ref .current was null!')
const now = new Date()
const diff = now.getTime() - pressDownDate.current.getTime()
pressDownDate.current = undefined
if (diff < START_RECORDING_DELAY) {
// user has released the button within 200ms, so his intention is to take a single picture.
await takePhoto()
} else {
// user has held the button for more than 200ms, so he has been recording this entire time.
await stopRecording()
}
} finally {
setTimeout(() => {
isPressingButton.value = false
setIsPressingButton(false)
}, 500)
}
return
}
default:
break
}
},
[isPressingButton, recordingProgress, setIsPressingButton, startRecording, stopRecording, takePhoto],
)
//#endregion
//#region Pan handler
const panHandler = useRef<PanGestureHandler>()
const onPanGestureEvent = useAnimatedGestureHandler<PanGestureHandlerGestureEvent, { offsetY?: number; startY?: number }>({
onStart: (event, context) => {
context.startY = event.absoluteY
const yForFullZoom = context.startY * 0.7
const offsetYForFullZoom = context.startY - yForFullZoom
// extrapolate [0 ... 1] zoom -> [0 ... Y_FOR_FULL_ZOOM] finger position
context.offsetY = interpolate(cameraZoom.value, [minZoom, maxZoom], [0, offsetYForFullZoom], Extrapolate.CLAMP)
},
onActive: (event, context) => {
const offset = context.offsetY ?? 0
const startY = context.startY ?? SCREEN_HEIGHT
const yForFullZoom = startY * 0.7
cameraZoom.value = interpolate(event.absoluteY - offset, [yForFullZoom, startY], [maxZoom, minZoom], Extrapolate.CLAMP)
},
})
//#endregion
const shadowStyle = useAnimatedStyle(
() => ({
transform: [
{
scale: withSpring(isPressingButton.value ? 1 : 0, {
mass: 1,
damping: 35,
stiffness: 300,
}),
},
],
}),
[isPressingButton],
)
const buttonStyle = useAnimatedStyle(() => {
let scale: number
if (enabled) {
if (isPressingButton.value) {
scale = withRepeat(
withSpring(1, {
stiffness: 100,
damping: 1000,
}),
-1,
true,
)
} else {
scale = withSpring(0.9, {
stiffness: 500,
damping: 300,
})
}
} else {
scale = withSpring(0.6, {
stiffness: 500,
damping: 300,
})
}
return {
opacity: withTiming(enabled ? 1 : 0.3, {
duration: 100,
easing: Easing.linear,
}),
transform: [
{
scale: scale,
},
],
}
}, [enabled, isPressingButton])
return (
<TapGestureHandler
enabled={enabled}
ref={tapHandler}
onHandlerStateChange={onHandlerStateChanged}
shouldCancelWhenOutside={false}
maxDurationMs={99999999} // <-- this prevents the TapGestureHandler from going to State.FAILED when the user moves his finger outside of the child view (to zoom)
simultaneousHandlers={panHandler}>
<Reanimated.View {...props} style={[buttonStyle, style]}>
<PanGestureHandler
enabled={enabled}
ref={panHandler}
failOffsetX={PAN_GESTURE_HANDLER_FAIL_X}
activeOffsetY={PAN_GESTURE_HANDLER_ACTIVE_Y}
onGestureEvent={onPanGestureEvent}
simultaneousHandlers={tapHandler}>
<Reanimated.View style={styles.flex}>
<Reanimated.View style={[styles.shadow, shadowStyle]} />
<View style={styles.button} />
</Reanimated.View>
</PanGestureHandler>
</Reanimated.View>
</TapGestureHandler>
)
}
export const CaptureButton = React.memo(_CaptureButton)
const styles = StyleSheet.create({
flex: {
flex: 1,
},
shadow: {
position: 'absolute',
width: CAPTURE_BUTTON_SIZE,
height: CAPTURE_BUTTON_SIZE,
borderRadius: CAPTURE_BUTTON_SIZE / 2,
backgroundColor: '#e34077',
},
button: {
width: CAPTURE_BUTTON_SIZE,
height: CAPTURE_BUTTON_SIZE,
borderRadius: CAPTURE_BUTTON_SIZE / 2,
borderWidth: BORDER_WIDTH,
borderColor: 'white',
},
})

View File

@@ -1,32 +0,0 @@
import { BlurView, BlurViewProps } from '@react-native-community/blur'
import React from 'react'
import { Platform, StyleSheet } from 'react-native'
import StaticSafeAreaInsets from 'react-native-static-safe-area-insets'
const FALLBACK_COLOR = 'rgba(140, 140, 140, 0.3)'
const StatusBarBlurBackgroundImpl = ({ style, ...props }: BlurViewProps): React.ReactElement | null => {
if (Platform.OS !== 'ios') return null
return (
<BlurView
style={[styles.statusBarBackground, style]}
blurAmount={25}
blurType="light"
reducedTransparencyFallbackColor={FALLBACK_COLOR}
{...props}
/>
)
}
export const StatusBarBlurBackground = React.memo(StatusBarBlurBackgroundImpl)
const styles = StyleSheet.create({
statusBarBackground: {
position: 'absolute',
top: 0,
left: 0,
right: 0,
height: StaticSafeAreaInsets.safeAreaInsetsTop,
},
})

189
package/flake.lock generated Normal file
View File

@@ -0,0 +1,189 @@
{
"nodes": {
"android-nixpkgs": {
"inputs": {
"devshell": "devshell",
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs"
},
"locked": {
"lastModified": 1701980274,
"narHash": "sha256-uKJIFvsahbWw52TsIht7g2iosXBgJDRMSMoCE1fvEAk=",
"owner": "tadfisher",
"repo": "android-nixpkgs",
"rev": "bce9d437ed54ee1425b66442a12814fee4cdbd51",
"type": "github"
},
"original": {
"owner": "tadfisher",
"repo": "android-nixpkgs",
"type": "github"
}
},
"devshell": {
"inputs": {
"nixpkgs": [
"android-nixpkgs",
"nixpkgs"
],
"systems": "systems"
},
"locked": {
"lastModified": 1701787589,
"narHash": "sha256-ce+oQR4Zq9VOsLoh9bZT8Ip9PaMLcjjBUHVPzW5d7Cw=",
"owner": "numtide",
"repo": "devshell",
"rev": "44ddedcbcfc2d52a76b64fb6122f209881bd3e1e",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "devshell",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems_2"
},
"locked": {
"lastModified": 1701680307,
"narHash": "sha256-kAuep2h5ajznlPMD9rnQyffWG8EM/C73lejGofXvdM8=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "4022d587cbbfd70fe950c1e2083a02621806a725",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_2": {
"inputs": {
"systems": "systems_3"
},
"locked": {
"lastModified": 1701680307,
"narHash": "sha256-kAuep2h5ajznlPMD9rnQyffWG8EM/C73lejGofXvdM8=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "4022d587cbbfd70fe950c1e2083a02621806a725",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"gitignore": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1694102001,
"narHash": "sha256-vky6VPK1n1od6vXbqzOXnekrQpTL4hbPAwUhT5J9c9E=",
"owner": "hercules-ci",
"repo": "gitignore.nix",
"rev": "9e21c80adf67ebcb077d75bd5e7d724d21eeafd6",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "gitignore.nix",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1701718080,
"narHash": "sha256-6ovz0pG76dE0P170pmmZex1wWcQoeiomUZGggfH9XPs=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "2c7f3c0fb7c08a0814627611d9d7d45ab6d75335",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1702312524,
"narHash": "sha256-gkZJRDBUCpTPBvQk25G0B7vfbpEYM5s5OZqghkjZsnE=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "a9bf124c46ef298113270b1f84a164865987a91c",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"android-nixpkgs": "android-nixpkgs",
"flake-utils": "flake-utils_2",
"gitignore": "gitignore",
"nixpkgs": "nixpkgs_2"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_3": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

77
package/flake.nix Normal file
View File

@@ -0,0 +1,77 @@
{
description = "Sample Nix ts-node build";
inputs = {
nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable";
flake-utils.url = "github:numtide/flake-utils";
gitignore = {
url = "github:hercules-ci/gitignore.nix";
inputs.nixpkgs.follows = "nixpkgs";
};
android-nixpkgs = {
url = "github:tadfisher/android-nixpkgs";
};
};
outputs = {
self,
nixpkgs,
flake-utils,
gitignore,
android-nixpkgs,
...
}:
flake-utils.lib.eachDefaultSystem (system: let
pkgs = import nixpkgs {inherit system;};
nodejs = pkgs.nodejs-18_x;
# NOTE: this does not work
appBuild = pkgs.stdenv.mkDerivation {
name = "example-ts-node";
version = "0.1.0";
src = gitignore.lib.gitignoreSource ./.; # uses the gitignore in the repo to only copy files git would see
buildInputs = [nodejs];
# https://nixos.org/manual/nixpkgs/stable/#sec-stdenv-phases
buildPhase = ''
# each phase has pre/postHooks. When you make your own phase be sure to still call the hooks
runHook preBuild
npm ci
npm run build
runHook postBuild
'';
installPhase = ''
runHook preInstall
cp -r node_modules $out/node_modules
cp package.json $out/package.json
cp -r dist $out/dist
runHook postInstall
'';
};
android-sdk = android-nixpkgs.sdk.${system} (sdkPkgs:
with sdkPkgs; [
cmdline-tools-latest
build-tools-30-0-3
build-tools-33-0-0
build-tools-33-0-1
build-tools-34-0-0
platform-tools
platforms-android-33
platforms-android-34
emulator
ndk-23-1-7779620
cmake-3-22-1
system-images-android-33-google-apis-x86-64
system-images-android-34-google-apis-x86-64
]);
in
with pkgs; {
defaultPackage = appBuild;
devShell = mkShell {
buildInputs = [nodejs yarn watchman gradle_7 alejandra nodePackages.prettier ktlint kotlin-language-server];
ANDROID_SDK_BIN = android-sdk;
shellHook = ''
export JAVA_HOME=${pkgs.jdk17.home}
source ${android-sdk.out}/nix-support/setup-hook
export PATH=${android-sdk}/bin:$PATH
ORG_GRADLE_PROJECT_ANDROID_HOME="$ANDROID_HOME"
'';
};
});
}