Compare commits

..

28 Commits

Author SHA1 Message Date
d1d359d836 attempt to fix segment corruption w/ tfhd base data offset & tfdt injection, moof size updaes -- very messy code, WIP 2025-12-23 15:14:45 -05:00
6b0a3cbb98 fix clockwise rotation error 2025-12-22 18:55:08 -05:00
49fba9ed60 Fix fMP4 video orientation by using raw sensor frames with Y-flip transform 2025-12-22 18:48:12 -05:00
a2d218580c feat: Add fragmented MP4 (fMP4) support for Android
Implements HLS-compatible fragmented MP4 recording on Android using
AndroidX Media3 FragmentedMp4Muxer, matching the iOS implementation.

Changes:
- Add FragmentedRecordingManager for fMP4 segment output
- Add ChunkedRecorderInterface to abstract recorder implementations
- Add onInitSegmentReady callback for init segment (init.mp4)
- Update onVideoChunkReady to include segment duration
- RecordingSession now uses FragmentedRecordingManager by default

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2025-12-18 12:29:03 -08:00
61863149c0 flip orientation in camera session 2025-12-11 14:17:34 -08:00
09b50938d2 get orientation change from WindowManager for android 2025-12-11 13:02:00 -08:00
a158ed8350 Merge pull request 'Bump react native w/ api 35 compatability' (#10) from bump-react-native-with-api35 into main
Reviewed-on: #10
2025-12-11 18:04:51 +00:00
Dean
e7b295546a fix: Add null safety checks in ChunkedRecordingManager
Replace !! operators with proper null checks to prevent
NullPointerExceptions when encodedFormat or muxerContext are null.
This can happen if createNextMuxer is called before
onOutputFormatChanged sets the format.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2025-12-11 10:04:18 -08:00
Dean
d87ed8ced2 fix: Handle null error message in promise rejection
Prevents crash when an exception with null message is caught and rejected
through the React Native bridge.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2025-12-11 09:48:32 -08:00
f055119735 respect frame processor flag when compiling and force 16kb page alignment 2025-12-01 11:24:24 -07:00
35d80b13d6 disable frame processor jni bindings, preserve video pipeline registration 2025-11-24 10:30:33 -08:00
3d09106e45 skip native library loading for frame processors, wip: failing to launch app 2025-11-17 18:58:13 -08:00
b523e1884f Fix React Native 0.79 Kotlin compilation errors
- Fix currentActivity reference in CameraViewModule
- Convert Map to MutableMap in CameraViewManager for RN 0.79 compatibility
2025-11-17 13:38:30 -08:00
5fcc1a4f77 Bumps and fixes for react native version bump 2025-11-17 13:35:59 -08:00
364171a107 Update Java/Kotlin versions and add opt-in flags for RN 0.79 2025-11-17 13:35:25 -08:00
f90e11897f Fix CMake target for React Native 0.79 compatibility 2025-11-17 11:45:07 -08:00
4798aad464 Merge pull request 'fix/android-api-35-bitmap-config' (#9) from fix/android-api-35-bitmap-config into main
Reviewed-on: #9
2025-10-29 03:22:49 +00:00
Dean
2c8d503e66 Fix Bitmap.Config null-safety for Android API 35 2025-10-28 13:54:54 -07:00
5b52acda26 Gross hack to make things sort of work 2024-11-10 17:51:15 -07:00
17f675657e WIP 2024-11-09 19:52:05 -07:00
c64516693c Merge pull request 'Fix Preview View Aspect Ratio Orientation Issues in android' (#8) from ivan/fix-android-preview-view-aspect-ratio-orientation-issues into main
Reviewed-on: #8
2024-10-12 16:21:18 -06:00
e9f08ef488 Fix Preview View Aspect Ratio Orientation Issues in android 2024-10-12 16:20:23 -06:00
bf122db919 Merge pull request 'Ensure custom exposure mode is supported' (#7) from ivan/ensure-capture-mode-is-supported into main
Reviewed-on: #7
2024-10-10 15:18:12 -06:00
3319e48f7d Ensure custom exposure mode is supported 2024-10-10 15:17:55 -06:00
58714f9dac Merge pull request 'iOS Camera Settings' (#6) from volodymyr/ios-camera-settings into main
Reviewed-on: #6
Reviewed-by: Ivan Malison <ivanmalison@gmail.com>
2024-10-10 15:12:32 -06:00
8991779851 iOS Camera Settings 2024-10-08 15:53:47 +02:00
f8efa172ba Merge pull request 'Lock exposure on start recording' (#4) from volodymyr/ios-lock-exposure into main
Reviewed-on: #4
2024-09-27 11:52:36 -06:00
66f840eecb Lock exposure on start recording 2024-09-27 10:35:29 +02:00
36 changed files with 3674 additions and 3371 deletions

View File

@@ -1,5 +0,0 @@
use flake . --impure
if [ -f .envrc.local ]; then
source .envrc.local
fi

View File

@@ -19,9 +19,7 @@ endif()
# Add react-native-vision-camera sources
add_library(
${PACKAGE_NAME}
SHARED
set(SOURCES
# Shared C++
../cpp/MutableRawBuffer.cpp
# Java JNI
@@ -31,7 +29,11 @@ add_library(
src/main/cpp/OpenGLContext.cpp
src/main/cpp/OpenGLRenderer.cpp
src/main/cpp/MutableJByteBuffer.cpp
# Frame Processor
)
# Only add Frame Processor sources if enabled
if (ENABLE_FRAME_PROCESSORS)
list(APPEND SOURCES
src/main/cpp/frameprocessor/FrameHostObject.cpp
src/main/cpp/frameprocessor/FrameProcessorPluginHostObject.cpp
src/main/cpp/frameprocessor/JSIJNIConversion.cpp
@@ -43,6 +45,18 @@ add_library(
src/main/cpp/frameprocessor/java-bindings/JVisionCameraProxy.cpp
src/main/cpp/frameprocessor/java-bindings/JVisionCameraScheduler.cpp
)
endif()
add_library(
${PACKAGE_NAME}
SHARED
${SOURCES}
)
# Force 16KB page alignment for Android 15+ compatibility
set_target_properties(${PACKAGE_NAME} PROPERTIES
LINK_FLAGS "-Wl,-z,max-page-size=16384"
)
# Header Search Paths (includes)
target_include_directories(
@@ -63,7 +77,7 @@ target_link_libraries(
${LOG_LIB} # <-- Logcat logger
android # <-- Android JNI core
ReactAndroid::jsi # <-- RN: JSI
ReactAndroid::reactnativejni # <-- RN: React Native JNI bindings
ReactAndroid::reactnative # <-- RN: React Native JNI bindings (RN 0.76+)
fbjni::fbjni # <-- fbjni
GLESv2 # <-- OpenGL (for VideoPipeline)
EGL # <-- OpenGL (EGL) (for VideoPipeline)

View File

@@ -133,8 +133,16 @@ android {
}
compileOptions {
sourceCompatibility JavaVersion.VERSION_1_8
targetCompatibility JavaVersion.VERSION_1_8
sourceCompatibility JavaVersion.VERSION_17
targetCompatibility JavaVersion.VERSION_17
}
kotlinOptions {
jvmTarget = "17"
freeCompilerArgs += [
"-opt-in=kotlin.RequiresOptIn",
"-opt-in=com.facebook.react.annotations.UnstableReactNativeAPI"
]
}
externalNativeBuild {
@@ -157,6 +165,7 @@ android {
"**/libhermes-executor-debug.so",
"**/libhermes_executor.so",
"**/libreactnativejni.so",
"**/libreactnative.so",
"**/libturbomodulejsijni.so",
"**/libreact_nativemodule_core.so",
"**/libjscexecutor.so"
@@ -169,6 +178,10 @@ dependencies {
implementation "com.facebook.react:react-android:+"
implementation "org.jetbrains.kotlinx:kotlinx-coroutines-android:1.7.3"
// Media3 muxer for fragmented MP4 (HLS-compatible) recording
implementation "androidx.media3:media3-muxer:1.5.0"
implementation "androidx.media3:media3-common:1.5.0"
if (enableCodeScanner) {
// User enabled code-scanner, so we bundle the 2.4 MB model in the app.
implementation 'com.google.mlkit:barcode-scanning:17.2.0'

View File

@@ -1,5 +1,5 @@
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-7.5.1-all.zip
distributionUrl=https\://services.gradle.org/distributions/gradle-8.13-all.zip
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists

View File

@@ -26,6 +26,7 @@ OpenGLRenderer::OpenGLRenderer(std::shared_ptr<OpenGLContext> context, ANativeWi
_outputSurface = surface;
_width = ANativeWindow_getWidth(surface);
_height = ANativeWindow_getHeight(surface);
__android_log_print(ANDROID_LOG_INFO, TAG, "ROTATION_DEBUG OpenGLRenderer created with output surface dimensions: %dx%d", _width, _height);
}
OpenGLRenderer::~OpenGLRenderer() {

View File

@@ -56,6 +56,11 @@ void VideoPipeline::setRecordingSessionOutputSurface(jobject surface) {
_recordingSessionOutput = OpenGLRenderer::CreateWithWindowSurface(_context, window);
}
void VideoPipeline::setRecordingOrientation(int orientation) {
_recordingOrientation = orientation;
__android_log_print(ANDROID_LOG_INFO, TAG, "Recording orientation set to: %d", orientation);
}
int VideoPipeline::getInputTextureId() {
if (_inputTexture == std::nullopt) {
_inputTexture = _context->createTexture(OpenGLTexture::Type::ExternalOES, _width, _height);
@@ -78,8 +83,29 @@ void VideoPipeline::onFrame(jni::alias_ref<jni::JArrayFloat> transformMatrixPara
OpenGLTexture& texture = _inputTexture.value();
if (_recordingSessionOutput) {
__android_log_print(ANDROID_LOG_INFO, TAG, "Rendering to RecordingSession..");
_recordingSessionOutput->renderTextureToSurface(texture, transformMatrix);
__android_log_print(ANDROID_LOG_INFO, TAG, "Rendering to RecordingSession.. orientation=%d", _recordingOrientation);
// For recording, use a simple transform matrix instead of the display transform.
// The display transform includes rotations for preview which we don't want in recordings.
float recordingMatrix[16];
if (_recordingOrientation == 1) {
// LANDSCAPE_RIGHT (CW): Y-flip + 180° rotation = flip both X and Y
// This negates both X and Y, then translates by (1,1)
recordingMatrix[0] = -1.0f; recordingMatrix[1] = 0.0f; recordingMatrix[2] = 0.0f; recordingMatrix[3] = 0.0f;
recordingMatrix[4] = 0.0f; recordingMatrix[5] = 1.0f; recordingMatrix[6] = 0.0f; recordingMatrix[7] = 0.0f;
recordingMatrix[8] = 0.0f; recordingMatrix[9] = 0.0f; recordingMatrix[10] = 1.0f; recordingMatrix[11] = 0.0f;
recordingMatrix[12] = 1.0f; recordingMatrix[13] = 0.0f; recordingMatrix[14] = 0.0f; recordingMatrix[15] = 1.0f;
} else {
// LANDSCAPE_LEFT (CCW): Simple Y-flip
// OpenGL origin is bottom-left, video expects top-left
recordingMatrix[0] = 1.0f; recordingMatrix[1] = 0.0f; recordingMatrix[2] = 0.0f; recordingMatrix[3] = 0.0f;
recordingMatrix[4] = 0.0f; recordingMatrix[5] = -1.0f; recordingMatrix[6] = 0.0f; recordingMatrix[7] = 0.0f;
recordingMatrix[8] = 0.0f; recordingMatrix[9] = 0.0f; recordingMatrix[10] = 1.0f; recordingMatrix[11] = 0.0f;
recordingMatrix[12] = 0.0f; recordingMatrix[13] = 1.0f; recordingMatrix[14] = 0.0f; recordingMatrix[15] = 1.0f;
}
_recordingSessionOutput->renderTextureToSurface(texture, recordingMatrix);
}
}
@@ -88,6 +114,7 @@ void VideoPipeline::registerNatives() {
makeNativeMethod("initHybrid", VideoPipeline::initHybrid),
makeNativeMethod("setRecordingSessionOutputSurface", VideoPipeline::setRecordingSessionOutputSurface),
makeNativeMethod("removeRecordingSessionOutputSurface", VideoPipeline::removeRecordingSessionOutputSurface),
makeNativeMethod("setRecordingOrientation", VideoPipeline::setRecordingOrientation),
makeNativeMethod("getInputTextureId", VideoPipeline::getInputTextureId),
makeNativeMethod("onBeforeFrame", VideoPipeline::onBeforeFrame),
makeNativeMethod("onFrame", VideoPipeline::onFrame),

View File

@@ -33,6 +33,7 @@ public:
// <- MediaRecorder output
void setRecordingSessionOutputSurface(jobject surface);
void removeRecordingSessionOutputSurface();
void setRecordingOrientation(int orientation);
// Frame callbacks
void onBeforeFrame();
@@ -47,6 +48,7 @@ private:
std::optional<OpenGLTexture> _inputTexture = std::nullopt;
int _width = 0;
int _height = 0;
int _recordingOrientation = 0; // 0=LANDSCAPE_LEFT, 1=LANDSCAPE_RIGHT
// Output Contexts
std::shared_ptr<OpenGLContext> _context = nullptr;

View File

@@ -9,11 +9,13 @@
JNIEXPORT jint JNICALL JNI_OnLoad(JavaVM* vm, void*) {
return facebook::jni::initialize(vm, [] {
// VideoPipeline is needed for video recording even without Frame Processors
vision::VideoPipeline::registerNatives();
#if VISION_CAMERA_ENABLE_FRAME_PROCESSORS
// Frame Processor JNI bindings - only register when Frame Processors are enabled
vision::VisionCameraInstaller::registerNatives();
vision::JVisionCameraProxy::registerNatives();
vision::JVisionCameraScheduler::registerNatives();
vision::VideoPipeline::registerNatives();
#if VISION_CAMERA_ENABLE_FRAME_PROCESSORS
vision::JFrameProcessor::registerNatives();
vision::JSharedArray::registerNatives();
#endif

View File

@@ -40,15 +40,26 @@ fun CameraView.invokeOnStopped() {
this.sendEvent(event)
}
fun CameraView.invokeOnChunkReady(filepath: File, index: Int) {
Log.e(CameraView.TAG, "invokeOnError(...):")
fun CameraView.invokeOnChunkReady(filepath: File, index: Int, durationUs: Long?) {
Log.i(CameraView.TAG, "invokeOnChunkReady(...): index=$index, filepath=$filepath, durationUs=$durationUs")
val event = Arguments.createMap()
event.putInt("index", index)
event.putString("filepath", filepath.toString())
if (durationUs != null) {
event.putDouble("duration", durationUs / 1_000_000.0) // Convert microseconds to seconds
}
val reactContext = context as ReactContext
reactContext.getJSModule(RCTEventEmitter::class.java).receiveEvent(id, "onVideoChunkReady", event)
}
fun CameraView.invokeOnInitReady(filepath: File) {
Log.i(CameraView.TAG, "invokeOnInitReady(...): filepath=$filepath")
val event = Arguments.createMap()
event.putString("filepath", filepath.toString())
val reactContext = context as ReactContext
reactContext.getJSModule(RCTEventEmitter::class.java).receiveEvent(id, "onInitReady", event)
}
fun CameraView.invokeOnError(error: Throwable) {
Log.e(CameraView.TAG, "invokeOnError(...):")
error.printStackTrace()

View File

@@ -13,70 +13,37 @@ import com.facebook.react.bridge.ReadableMap
import com.facebook.react.bridge.WritableMap
import com.mrousavy.camera.core.CameraSession
import com.mrousavy.camera.core.InsufficientStorageError
import com.mrousavy.camera.utils.FileUtils
import com.mrousavy.camera.types.Flash
import com.mrousavy.camera.types.QualityPrioritization
import com.mrousavy.camera.utils.*
import java.io.File
import java.io.FileOutputStream
import java.io.IOException
import kotlinx.coroutines.*
private const val TAG = "CameraView.takePhoto"
private const val TAG = "CameraView.takeSnapshot"
@SuppressLint("UnsafeOptInUsageError")
suspend fun CameraView.takePhoto(optionsMap: ReadableMap): WritableMap {
val options = optionsMap.toHashMap()
Log.i(TAG, "Taking photo... Options: $options")
Log.i(TAG, "Taking snapshot... Options: $options")
val bitmap = previewView.getBitmap() ?: throw Error()
val qualityPrioritization = options["qualityPrioritization"] as? String ?: "balanced"
val flash = options["flash"] as? String ?: "off"
val enableAutoStabilization = options["enableAutoStabilization"] == true
val enableShutterSound = options["enableShutterSound"] as? Boolean ?: true
val enablePrecapture = options["enablePrecapture"] as? Boolean ?: false
val file = FileUtils.createTempFile(context, "png");
// TODO: Implement Red Eye Reduction
options["enableAutoRedEyeReduction"]
// Write snapshot to .jpg file
FileUtils.writeBitmapTofile(bitmap, file, 100)
val flashMode = Flash.fromUnionValue(flash)
val qualityPrioritizationMode = QualityPrioritization.fromUnionValue(qualityPrioritization)
val photo = cameraSession.takePhoto(
qualityPrioritizationMode,
flashMode,
enableShutterSound,
enableAutoStabilization,
enablePrecapture,
orientation
)
photo.use {
Log.i(TAG, "Successfully captured ${photo.image.width} x ${photo.image.height} photo!")
val cameraCharacteristics = cameraManager.getCameraCharacteristics(cameraId!!)
val path = try {
savePhotoToFile(context, cameraCharacteristics, photo)
} catch (e: IOException) {
if (e.message?.contains("no space left", true) == true) {
throw InsufficientStorageError()
} else {
throw e
}
}
Log.i(TAG, "Successfully saved photo to file! $path")
Log.i(TAG, "Successfully saved snapshot to file!")
// Parse output data
val map = Arguments.createMap()
map.putString("path", path)
map.putInt("width", photo.image.width)
map.putInt("height", photo.image.height)
map.putString("orientation", photo.orientation.unionValue)
map.putBoolean("isRawPhoto", photo.format == ImageFormat.RAW_SENSOR)
map.putBoolean("isMirrored", photo.isMirrored)
map.putString("path", file.absolutePath)
map.putInt("width", bitmap.width)
map.putInt("height", bitmap.height)
map.putBoolean("isMirrored", false)
return map
}
}
private fun writePhotoToFile(photo: CameraSession.CapturedPhoto, file: File) {
val byteBuffer = photo.image.planes[0].buffer

View File

@@ -102,7 +102,7 @@ class CameraView(context: Context) :
// session
internal val cameraSession: CameraSession
private val previewView: PreviewView
val previewView: PreviewView
private var currentConfigureCall: Long = System.currentTimeMillis()
internal var frameProcessor: FrameProcessor? = null
@@ -271,8 +271,12 @@ class CameraView(context: Context) :
invokeOnStopped()
}
override fun onVideoChunkReady(filepath: File, index: Int) {
invokeOnChunkReady(filepath, index)
override fun onVideoChunkReady(filepath: File, index: Int, durationUs: Long?) {
invokeOnChunkReady(filepath, index, durationUs)
}
override fun onInitSegmentReady(filepath: File) {
invokeOnInitReady(filepath)
}
override fun onCodeScanned(codes: List<Barcode>, scannerFrame: CodeScannerFrame) {

View File

@@ -32,7 +32,8 @@ class CameraViewManager : ViewGroupManager<CameraView>() {
.put("cameraError", MapBuilder.of("registrationName", "onError"))
.put("cameraCodeScanned", MapBuilder.of("registrationName", "onCodeScanned"))
.put("onVideoChunkReady", MapBuilder.of("registrationName", "onVideoChunkReady"))
.build()
.put("onInitReady", MapBuilder.of("registrationName", "onInitReady"))
.build()?.toMutableMap()
override fun getName(): String = TAG

View File

@@ -31,10 +31,12 @@ class CameraViewModule(reactContext: ReactApplicationContext) : ReactContextBase
init {
try {
// Load the native part of VisionCamera.
// Includes the OpenGL VideoPipeline, as well as Frame Processor JSI bindings
// Includes the OpenGL VideoPipeline (needed for video recording)
// Frame Processors remain disabled for RN 0.79+ compatibility
System.loadLibrary("VisionCamera")
Log.i(TAG, "VisionCamera native library loaded successfully")
} catch (e: UnsatisfiedLinkError) {
Log.e(VisionCameraProxy.TAG, "Failed to load VisionCamera C++ library!", e)
Log.e(TAG, "Failed to load VisionCamera C++ library!", e)
throw e
}
}
@@ -73,14 +75,10 @@ class CameraViewModule(reactContext: ReactApplicationContext) : ReactContextBase
}
@ReactMethod(isBlockingSynchronousMethod = true)
fun installFrameProcessorBindings(): Boolean =
try {
val proxy = VisionCameraProxy(reactApplicationContext)
VisionCameraInstaller.install(proxy)
true
} catch (e: Error) {
Log.e(TAG, "Failed to install Frame Processor JSI Bindings!", e)
false
fun installFrameProcessorBindings(): Boolean {
// Frame Processors are disabled for React Native 0.79+ compatibility
Log.i(TAG, "Frame Processor bindings not installed - Frame Processors disabled for RN 0.79+ compatibility")
return false
}
@ReactMethod
@@ -157,7 +155,7 @@ class CameraViewModule(reactContext: ReactApplicationContext) : ReactContextBase
}
private fun canRequestPermission(permission: String): Boolean {
val activity = currentActivity as? PermissionAwareActivity
val activity = reactApplicationContext.currentActivity as? PermissionAwareActivity
return activity?.shouldShowRequestPermissionRationale(permission) ?: false
}

View File

@@ -15,6 +15,7 @@ import android.util.Log
import android.util.Size
import android.view.Surface
import android.view.SurfaceHolder
import android.view.WindowManager
import androidx.core.content.ContextCompat
import com.google.mlkit.vision.barcode.common.Barcode
import com.mrousavy.camera.core.capture.RepeatingCaptureRequest
@@ -408,7 +409,8 @@ class CameraSession(private val context: Context, private val cameraManager: Cam
private fun updateVideoOutputs() {
val videoOutput = videoOutput ?: return
Log.i(TAG, "Updating Video Outputs...")
videoOutput.videoPipeline.setRecordingSessionOutput(recording)
val orientation = recording?.cameraOrientation ?: Orientation.LANDSCAPE_LEFT
videoOutput.videoPipeline.setRecordingSessionOutput(recording, orientation)
}
suspend fun startRecording(
@@ -425,6 +427,19 @@ class CameraSession(private val context: Context, private val cameraManager: Cam
val fps = configuration?.fps ?: 30
// Get actual device rotation from WindowManager since the React Native orientation hook
// doesn't update when rotating between landscape-left and landscape-right on Android.
val windowManager = context.getSystemService(Context.WINDOW_SERVICE) as WindowManager
val deviceRotation = windowManager.defaultDisplay.rotation
val recordingOrientation = when (deviceRotation) {
Surface.ROTATION_0 -> Orientation.PORTRAIT
Surface.ROTATION_90 -> Orientation.LANDSCAPE_LEFT // CCW rotation, top to left
Surface.ROTATION_180 -> Orientation.PORTRAIT_UPSIDE_DOWN
Surface.ROTATION_270 -> Orientation.LANDSCAPE_RIGHT // CW rotation, top to right
else -> Orientation.PORTRAIT
}
Log.i(TAG, "ROTATION_DEBUG: deviceRotation=$deviceRotation, recordingOrientation=$recordingOrientation, options.orientation=${options.orientation}")
val recording = RecordingSession(
context,
cameraId,
@@ -432,7 +447,7 @@ class CameraSession(private val context: Context, private val cameraManager: Cam
enableAudio,
fps,
videoOutput.enableHdr,
orientation,
recordingOrientation,
options,
filePath,
callback,
@@ -497,7 +512,8 @@ class CameraSession(private val context: Context, private val cameraManager: Cam
fun onInitialized()
fun onStarted()
fun onStopped()
fun onVideoChunkReady(filepath: File, index: Int)
fun onVideoChunkReady(filepath: File, index: Int, durationUs: Long?)
fun onInitSegmentReady(filepath: File)
fun onCodeScanned(codes: List<Barcode>, scannerFrame: CodeScannerFrame)
}
}

View File

@@ -14,7 +14,7 @@ import java.io.File
import java.nio.ByteBuffer
class ChunkedRecordingManager(private val encoder: MediaCodec, private val outputDirectory: File, private val orientationHint: Int, private val iFrameInterval: Int, private val callbacks: CameraSession.Callback) :
MediaCodec.Callback() {
MediaCodec.Callback(), ChunkedRecorderInterface {
companion object {
private const val TAG = "ChunkedRecorder"
@@ -73,7 +73,7 @@ class ChunkedRecordingManager(private val encoder: MediaCodec, private val outpu
private val targetDurationUs = iFrameInterval * 1000000
val surface: Surface = encoder.createInputSurface()
override val surface: Surface = encoder.createInputSurface()
init {
if (!this.outputDirectory.exists()) {
@@ -95,7 +95,9 @@ class ChunkedRecordingManager(private val encoder: MediaCodec, private val outpu
fun finish() {
muxer.stop()
muxer.release()
callbacks.onVideoChunkReady(filepath, chunkIndex)
// Calculate duration from start time - this is approximate
// The new FragmentedRecordingManager provides accurate duration
callbacks.onVideoChunkReady(filepath, chunkIndex, null)
}
}
@@ -105,6 +107,12 @@ class ChunkedRecordingManager(private val encoder: MediaCodec, private val outpu
muxerContext?.finish()
chunkIndex++
val format = this.encodedFormat
if (format == null) {
Log.e(TAG, "Cannot create muxer: encodedFormat is null (onOutputFormatChanged not called yet)")
return
}
val newFileName = "$chunkIndex.mp4"
val newOutputFile = File(this.outputDirectory, newFileName)
Log.i(TAG, "Creating new muxer for file: $newFileName")
@@ -114,7 +122,7 @@ class ChunkedRecordingManager(private val encoder: MediaCodec, private val outpu
)
muxer.setOrientationHint(orientationHint)
muxerContext = MuxerContext(
muxer, newOutputFile, chunkIndex, bufferInfo.presentationTimeUs, this.encodedFormat!!, this.callbacks
muxer, newOutputFile, chunkIndex, bufferInfo.presentationTimeUs, format, this.callbacks
)
}
@@ -123,15 +131,16 @@ class ChunkedRecordingManager(private val encoder: MediaCodec, private val outpu
}
private fun chunkLengthUs(bufferInfo: BufferInfo): Long {
return bufferInfo.presentationTimeUs - muxerContext!!.startTimeUs
val context = muxerContext ?: return 0L
return bufferInfo.presentationTimeUs - context.startTimeUs
}
fun start() {
override fun start() {
encoder.start()
recording = true
}
fun finish() {
override fun finish() {
synchronized(this) {
muxerContext?.finish()
recording = false
@@ -155,7 +164,13 @@ class ChunkedRecordingManager(private val encoder: MediaCodec, private val outpu
if (muxerContext == null || (atKeyframe(bufferInfo) && chunkLengthUs(bufferInfo) >= targetDurationUs)) {
this.createNextMuxer(bufferInfo)
}
muxerContext!!.muxer.writeSampleData(muxerContext!!.videoTrack, encodedData, bufferInfo)
val context = muxerContext
if (context == null) {
Log.e(TAG, "Cannot write sample data: muxerContext is null")
encoder.releaseOutputBuffer(index, false)
return
}
context.muxer.writeSampleData(context.videoTrack, encodedData, bufferInfo)
encoder.releaseOutputBuffer(index, false)
}
}

View File

@@ -0,0 +1,15 @@
package com.mrousavy.camera.core
import android.view.Surface
/**
* Common interface for chunked video recorders.
* Implemented by both ChunkedRecordingManager (regular MP4) and
* FragmentedRecordingManager (HLS-compatible fMP4).
*/
interface ChunkedRecorderInterface {
val surface: Surface
fun start()
fun finish()
}

View File

@@ -0,0 +1,762 @@
package com.mrousavy.camera.core
import android.media.MediaCodec
import android.media.MediaCodec.BufferInfo
import android.media.MediaCodecInfo
import android.media.MediaFormat
import android.util.Log
import android.util.Size
import android.view.Surface
import androidx.media3.common.Format
import androidx.media3.common.MimeTypes
import androidx.media3.common.util.UnstableApi
import androidx.media3.muxer.FragmentedMp4Muxer
import androidx.media3.muxer.Muxer
import com.mrousavy.camera.types.Orientation
import com.mrousavy.camera.types.RecordVideoOptions
import java.io.File
import java.io.FileOutputStream
import java.io.RandomAccessFile
import java.nio.ByteBuffer
import java.nio.ByteOrder
/**
* A recording manager that produces HLS-compatible fragmented MP4 segments.
*
* This produces output similar to the iOS implementation:
* - An initialization segment (init.mp4) containing codec configuration
* - Numbered data segments (0.mp4, 1.mp4, ...) containing media data
*
* Uses AndroidX Media3's FragmentedMp4Muxer which produces proper fMP4 output.
*/
@UnstableApi
class FragmentedRecordingManager(
private val encoder: MediaCodec,
private val outputDirectory: File,
private val orientationDegrees: Int,
private val targetSegmentDurationUs: Long,
private val callbacks: CameraSession.Callback
) : MediaCodec.Callback(), ChunkedRecorderInterface {
companion object {
private const val TAG = "FragmentedRecorder"
fun fromParams(
callbacks: CameraSession.Callback,
size: Size,
enableAudio: Boolean,
fps: Int? = null,
cameraOrientation: Orientation,
bitRate: Int,
options: RecordVideoOptions,
outputDirectory: File,
segmentDurationSeconds: Int = 6
): FragmentedRecordingManager {
val mimeType = options.videoCodec.toMimeType()
// For fragmented MP4: DON'T swap dimensions, use camera's native dimensions.
// The C++ VideoPipeline uses a custom transform matrix (not the display transform).
// This gives us raw sensor frames, and we rely on rotation metadata for playback.
val cameraOrientationDegrees = when (cameraOrientation) {
Orientation.LANDSCAPE_LEFT -> 0 // CCW landscape
Orientation.LANDSCAPE_RIGHT -> 0 // CW landscape
Orientation.PORTRAIT -> 90
Orientation.PORTRAIT_UPSIDE_DOWN -> 270
}
Log.i(TAG, "ROTATION_DEBUG FragmentedRecordingManager: cameraOrientation=$cameraOrientation, cameraOrientationDegrees=$cameraOrientationDegrees, inputSize=${size.width}x${size.height}")
// Keep original dimensions - don't swap. Let rotation metadata handle orientation.
val width = size.width
val height = size.height
Log.i(TAG, "ROTATION_DEBUG FragmentedRecordingManager: outputDimensions=${width}x${height} (no swap)")
val format = MediaFormat.createVideoFormat(mimeType, width, height)
val codec = MediaCodec.createEncoderByType(mimeType)
format.setInteger(
MediaFormat.KEY_COLOR_FORMAT,
MediaCodecInfo.CodecCapabilities.COLOR_FormatSurface
)
fps?.apply {
format.setInteger(MediaFormat.KEY_FRAME_RATE, this)
}
// I-frame interval affects segment boundaries
format.setInteger(MediaFormat.KEY_I_FRAME_INTERVAL, segmentDurationSeconds)
format.setInteger(MediaFormat.KEY_BIT_RATE, bitRate)
Log.d(TAG, "Video Format: $format, orientationDegrees: $cameraOrientationDegrees")
codec.configure(format, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE)
return FragmentedRecordingManager(
codec,
outputDirectory,
cameraOrientationDegrees,
segmentDurationSeconds * 1_000_000L,
callbacks
)
}
}
// State management
private var chunkIndex = 0
private var encodedFormat: MediaFormat? = null
private var recording = false
// Segment tracking
private var segmentContext: SegmentContext? = null
private var initSegmentEmitted = false
// Cumulative base time for HLS-compatible timestamps (in timescale units)
// Each segment's baseMediaDecodeTime should be the sum of all previous segment durations
private var cumulativeBaseTimeUs: Long = 0L
// Timescale used in the fMP4 (typically 1000000 for microseconds)
private val timescale: Long = 1_000_000L
override val surface: Surface = encoder.createInputSurface()
init {
if (!outputDirectory.exists()) {
outputDirectory.mkdirs()
}
encoder.setCallback(this)
}
/**
* Result from finishing a segment, used for tfdt patching.
*/
private data class SegmentResult(
val file: File,
val segmentIndex: Int,
val durationUs: Long
)
/**
* Context for a single data segment being written.
* Init segments are created separately via createInitSegment().
*/
private inner class SegmentContext(
private val format: MediaFormat,
val segmentIndex: Int,
private val baseTimeUs: Long // The baseMediaDecodeTime for this segment
) {
private val filename = "$segmentIndex.mp4"
val file = File(outputDirectory, filename)
private val outputStream = FileOutputStream(file)
private val muxer = FragmentedMp4Muxer.Builder(outputStream).build()
private lateinit var videoTrack: Muxer.TrackToken
private var startTimeUs: Long = -1L
private var lastTimeUs: Long = 0L
private var sampleCount = 0
init {
val media3Format = convertToMedia3Format(format)
videoTrack = muxer.addTrack(media3Format)
Log.d(TAG, "Created segment context: $filename with baseTimeUs=$baseTimeUs")
}
fun writeSample(buffer: ByteBuffer, bufferInfo: BufferInfo) {
if (startTimeUs < 0) {
startTimeUs = bufferInfo.presentationTimeUs
Log.i(TAG, "PTS_DEBUG Segment $segmentIndex FIRST sample: absolutePTS=${bufferInfo.presentationTimeUs}us, baseTimeUs=$baseTimeUs")
}
// Log first 3 samples and every keyframe for debugging
val isKeyFrame = (bufferInfo.flags and MediaCodec.BUFFER_FLAG_KEY_FRAME) != 0
if (sampleCount < 3 || isKeyFrame) {
Log.i(TAG, "PTS_DEBUG Segment $segmentIndex sample $sampleCount: PTS=${bufferInfo.presentationTimeUs}us, keyframe=$isKeyFrame")
}
lastTimeUs = bufferInfo.presentationTimeUs
muxer.writeSampleData(videoTrack, buffer, bufferInfo)
sampleCount++
}
/**
* Check if we've accumulated enough duration to start a new segment.
* Should only be called when we have a keyframe available.
*/
fun shouldStartNewSegmentOnKeyframe(): Boolean {
if (sampleCount == 0) return false // Need at least one sample first
val currentDurationUs = lastTimeUs - startTimeUs
return currentDurationUs >= targetSegmentDurationUs
}
fun finish(): SegmentResult {
try {
muxer.close()
outputStream.close()
} catch (e: Exception) {
Log.e(TAG, "Error closing segment", e)
}
val durationUs = if (lastTimeUs > startTimeUs) lastTimeUs - startTimeUs else 0L
Log.i(TAG, "PTS_DEBUG Segment $segmentIndex FINISHED: startPTS=${startTimeUs}us, lastPTS=${lastTimeUs}us, duration=${durationUs/1000}ms, samples=$sampleCount, baseTimeUs=$baseTimeUs")
return SegmentResult(file, segmentIndex, durationUs)
}
}
private fun createNewSegment() {
val format = encodedFormat
if (format == null) {
Log.e(TAG, "Cannot create segment: encodedFormat is null")
return
}
// Close previous segment and process it for HLS
segmentContext?.let { ctx ->
val result = ctx.finish()
// Process the segment: extract init (if first), strip headers, inject tfdt
processSegmentForHLS(result.file, result.segmentIndex, cumulativeBaseTimeUs)
// Update cumulative time for next segment
cumulativeBaseTimeUs += result.durationUs
// Notify callback
callbacks.onVideoChunkReady(result.file, result.segmentIndex, result.durationUs)
}
// Create new data segment with current cumulative base time
segmentContext = SegmentContext(format, chunkIndex, cumulativeBaseTimeUs)
chunkIndex++
}
override fun start() {
encoder.start()
recording = true
}
override fun finish() {
synchronized(this) {
recording = false
// Close final segment and process it for HLS
segmentContext?.let { ctx ->
val result = ctx.finish()
processSegmentForHLS(result.file, result.segmentIndex, cumulativeBaseTimeUs)
callbacks.onVideoChunkReady(result.file, result.segmentIndex, result.durationUs)
}
segmentContext = null
try {
encoder.stop()
encoder.release()
} catch (e: Exception) {
Log.e(TAG, "Error stopping encoder", e)
}
}
}
// MediaCodec.Callback methods
override fun onInputBufferAvailable(codec: MediaCodec, index: Int) {
// Not used for Surface input
}
override fun onOutputBufferAvailable(codec: MediaCodec, index: Int, bufferInfo: BufferInfo) {
synchronized(this) {
if (!recording) {
encoder.releaseOutputBuffer(index, false)
return
}
// Skip codec config buffers - these contain SPS/PPS with annex-b start codes
// and should NOT be written as samples (they're already in the Format's initializationData)
if ((bufferInfo.flags and MediaCodec.BUFFER_FLAG_CODEC_CONFIG) != 0) {
Log.d(TAG, "Skipping codec config buffer (size=${bufferInfo.size})")
encoder.releaseOutputBuffer(index, false)
return
}
val encodedData = encoder.getOutputBuffer(index)
if (encodedData == null) {
Log.e(TAG, "getOutputBuffer returned null")
encoder.releaseOutputBuffer(index, false)
return
}
// Create first data segment if needed
if (segmentContext == null) {
createNewSegment()
}
val context = segmentContext
if (context == null) {
encoder.releaseOutputBuffer(index, false)
return
}
try {
// Check if this keyframe should start a new segment BEFORE writing
val isKeyFrame = (bufferInfo.flags and MediaCodec.BUFFER_FLAG_KEY_FRAME) != 0
val shouldStartNewSegment = isKeyFrame && context.shouldStartNewSegmentOnKeyframe()
if (shouldStartNewSegment) {
// Finish old segment WITHOUT writing this keyframe to it
createNewSegment()
// Write keyframe to the NEW segment only
segmentContext?.writeSample(encodedData, bufferInfo)
} else {
// Write to current segment
context.writeSample(encodedData, bufferInfo)
}
} catch (e: Exception) {
Log.e(TAG, "Error writing sample", e)
}
encoder.releaseOutputBuffer(index, false)
}
}
override fun onError(codec: MediaCodec, e: MediaCodec.CodecException) {
Log.e(TAG, "Codec error: ${e.message}")
}
override fun onOutputFormatChanged(codec: MediaCodec, format: MediaFormat) {
Log.i(TAG, "Output format changed: $format")
encodedFormat = format
// Note: init segment is now extracted from the first segment's ftyp+moov
// rather than created separately (Media3's empty init was not working)
}
private fun convertToMedia3Format(mediaFormat: MediaFormat): Format {
val mimeType = mediaFormat.getString(MediaFormat.KEY_MIME) ?: MimeTypes.VIDEO_H264
val width = mediaFormat.getInteger(MediaFormat.KEY_WIDTH)
val height = mediaFormat.getInteger(MediaFormat.KEY_HEIGHT)
val bitRate = try { mediaFormat.getInteger(MediaFormat.KEY_BIT_RATE) } catch (e: Exception) { -1 }
val frameRate = try { mediaFormat.getInteger(MediaFormat.KEY_FRAME_RATE) } catch (e: Exception) { -1 }
// Get CSD (Codec Specific Data) if available - required for init segment
// csd-0 contains SPS (Sequence Parameter Set)
// csd-1 contains PPS (Picture Parameter Set)
val csd0 = mediaFormat.getByteBuffer("csd-0")
val csd1 = mediaFormat.getByteBuffer("csd-1")
val initData = mutableListOf<ByteArray>()
csd0?.let {
val bytes = ByteArray(it.remaining())
it.duplicate().get(bytes)
initData.add(bytes)
Log.i(TAG, "CSD_DEBUG: csd-0 (SPS) size=${bytes.size} bytes, hex=${bytes.take(32).joinToString("") { "%02x".format(it) }}...")
}
csd1?.let {
val bytes = ByteArray(it.remaining())
it.duplicate().get(bytes)
initData.add(bytes)
Log.i(TAG, "CSD_DEBUG: csd-1 (PPS) size=${bytes.size} bytes, hex=${bytes.joinToString("") { "%02x".format(it) }}")
}
val totalCsdSize = initData.sumOf { it.size }
Log.i(TAG, "CSD_DEBUG: Total CSD size=$totalCsdSize bytes (csd-0=${csd0?.remaining() ?: 0}, csd-1=${csd1?.remaining() ?: 0})")
Log.i(TAG, "ROTATION_DEBUG convertToMedia3Format: orientationDegrees=$orientationDegrees, width=$width, height=$height")
return Format.Builder()
.setSampleMimeType(mimeType)
.setWidth(width)
.setHeight(height)
.setRotationDegrees(orientationDegrees)
.apply {
if (bitRate > 0) setAverageBitrate(bitRate)
if (frameRate > 0) setFrameRate(frameRate.toFloat())
if (initData.isNotEmpty()) setInitializationData(initData)
}
.build()
}
/**
* Processes a segment file for HLS compatibility:
* 1. For segment 0: extracts ftyp+moov header as init.mp4
* 2. Strips ftyp+moov from segment, keeping only moof+mdat (the fragment)
* 3. Injects tfdt box into moof for proper HLS timing
*
* Media3's FragmentedMp4Muxer creates self-contained MP4s, but HLS needs:
* - init.mp4: ftyp + moov (codec configuration)
* - segments: moof + mdat only (fragments referencing init)
*/
private fun processSegmentForHLS(file: File, segmentIndex: Int, baseMediaDecodeTimeUs: Long) {
try {
val originalBytes = file.readBytes()
val buffer = ByteBuffer.wrap(originalBytes).order(ByteOrder.BIG_ENDIAN)
// Find where moof starts (everything before is header: ftyp + moov)
val moofStartPos = findMoofPosition(buffer)
if (moofStartPos < 0) {
Log.e(TAG, "HLS_PROCESS: Could not find moof in ${file.name}")
return
}
Log.i(TAG, "HLS_PROCESS: Segment $segmentIndex - moof starts at $moofStartPos, total size=${originalBytes.size}")
// For segment 0, extract header as init.mp4
if (segmentIndex == 0 && !initSegmentEmitted) {
val headerBytes = originalBytes.copyOfRange(0, moofStartPos)
val initFile = File(outputDirectory, "init.mp4")
initFile.writeBytes(headerBytes)
Log.i(TAG, "HLS_PROCESS: Created init.mp4 with ${headerBytes.size} bytes (ftyp+moov)")
// Debug: dump the init.mp4 structure
dumpMp4BoxStructure(headerBytes, "INIT_STRUCTURE")
callbacks.onInitSegmentReady(initFile)
initSegmentEmitted = true
}
// Extract fragment (moof + mdat only)
val fragmentBytes = originalBytes.copyOfRange(moofStartPos, originalBytes.size)
Log.d(TAG, "HLS_PROCESS: Extracted fragment of ${fragmentBytes.size} bytes")
// Inject tfdt into the fragment
// Note: in the fragment, moof is at position 0
val processedFragment = injectTfdtIntoFragment(fragmentBytes, baseMediaDecodeTimeUs)
// Write back the processed fragment (stripped of header)
file.writeBytes(processedFragment)
Log.i(TAG, "HLS_PROCESS: Segment $segmentIndex processed - header stripped, tfdt injected, final size=${processedFragment.size}")
} catch (e: Exception) {
Log.e(TAG, "Error processing segment ${file.name} for HLS", e)
}
}
/**
* Finds the position of the moof box in the file.
* Returns -1 if not found.
*/
private fun findMoofPosition(buffer: ByteBuffer): Int {
var pos = 0
while (pos < buffer.limit() - 8) {
buffer.position(pos)
val size = buffer.int.toLong() and 0xFFFFFFFFL
val type = buffer.int
if (size < 8) break
// 'moof' = 0x6D6F6F66
if (type == 0x6D6F6F66) {
return pos
}
pos += size.toInt()
}
return -1
}
/**
* Injects a tfdt box into a fragment (moof+mdat).
* The fragment has moof at position 0 (header already stripped).
* Also fixes tfhd.base_data_offset since we stripped the original file header.
*/
private fun injectTfdtIntoFragment(fragmentBytes: ByteArray, baseMediaDecodeTimeUs: Long): ByteArray {
val buffer = ByteBuffer.wrap(fragmentBytes).order(ByteOrder.BIG_ENDIAN)
// Find box positions within the fragment (moof is at position 0)
val positions = findBoxPositionsInFragment(buffer)
if (positions == null) {
Log.e(TAG, "TFDT_INJECT: Could not find required boxes in fragment")
return fragmentBytes
}
val (moofPos, moofSize, trafPos, trafSize, tfhdPos, tfhdEnd, trunPos) = positions
Log.d(TAG, "TFDT_INJECT: Fragment boxes - moof@$moofPos(size=$moofSize), traf@$trafPos, tfhd@$tfhdPos, trun@$trunPos")
// First, fix tfhd.base_data_offset - it was pointing to the original file position
// but now moof is at position 0, so base_data_offset should be 0
fixTfhdBaseDataOffset(buffer, tfhdPos.toInt())
// Create tfdt box (version 1, 64-bit baseMediaDecodeTime)
val tfdtSize = 20
val tfdtBytes = ByteBuffer.allocate(tfdtSize).order(ByteOrder.BIG_ENDIAN)
tfdtBytes.putInt(tfdtSize) // size
tfdtBytes.putInt(0x74666474) // 'tfdt'
tfdtBytes.put(1.toByte()) // version = 1
tfdtBytes.put(0.toByte()) // flags[0]
tfdtBytes.put(0.toByte()) // flags[1]
tfdtBytes.put(0.toByte()) // flags[2]
tfdtBytes.putLong(baseMediaDecodeTimeUs) // baseMediaDecodeTime
// Create new fragment with tfdt injected after tfhd
val newBytes = ByteArray(fragmentBytes.size + tfdtSize)
val insertPos = tfhdEnd.toInt()
// Copy bytes before insertion point
System.arraycopy(fragmentBytes, 0, newBytes, 0, insertPos)
// Insert tfdt
System.arraycopy(tfdtBytes.array(), 0, newBytes, insertPos, tfdtSize)
// Copy bytes after insertion point
System.arraycopy(fragmentBytes, insertPos, newBytes, insertPos + tfdtSize, fragmentBytes.size - insertPos)
// Update box sizes in the new buffer
val newBuffer = ByteBuffer.wrap(newBytes).order(ByteOrder.BIG_ENDIAN)
// Update moof size
val newMoofSize = moofSize + tfdtSize
newBuffer.putInt(moofPos.toInt(), newMoofSize.toInt())
// Update traf size
val newTrafSize = trafSize + tfdtSize
newBuffer.putInt(trafPos.toInt(), newTrafSize.toInt())
// Update trun data_offset if present
val newTrunPos = trunPos.toInt() + tfdtSize
updateTrunDataOffset(newBuffer, newTrunPos, tfdtSize)
Log.i(TAG, "TFDT_INJECT: Injected tfdt with baseMediaDecodeTime=$baseMediaDecodeTimeUs us")
return newBytes
}
/**
* Data class to hold box positions for tfdt injection.
*/
private data class BoxPositions(
val moofPos: Long,
val moofSize: Long,
val trafPos: Long,
val trafSize: Long,
val tfhdPos: Long, // Position of tfhd (need to fix base_data_offset)
val tfhdEnd: Long, // Position right after tfhd where we'll insert tfdt
val trunPos: Long // Position of trun (need to update its data_offset)
)
/**
* Finds the positions of moof, traf, tfhd, and trun boxes in a fragment.
* In a fragment, moof is expected to be at position 0.
*/
private fun findBoxPositionsInFragment(buffer: ByteBuffer): BoxPositions? {
val fileSize = buffer.limit()
var pos = 0
while (pos < fileSize - 8) {
buffer.position(pos)
val size = buffer.int.toLong() and 0xFFFFFFFFL
val type = buffer.int
if (size < 8) break
// 'moof' = 0x6D6F6F66
if (type == 0x6D6F6F66) {
val moofPos = pos.toLong()
val moofSize = size
val moofEnd = pos + size.toInt()
var childPos = pos + 8
while (childPos < moofEnd - 8) {
buffer.position(childPos)
val childSize = buffer.int.toLong() and 0xFFFFFFFFL
val childType = buffer.int
if (childSize < 8) break
// 'traf' = 0x74726166
if (childType == 0x74726166) {
val trafPos = childPos.toLong()
val trafSize = childSize
val trafEnd = childPos + childSize.toInt()
var trafChildPos = childPos + 8
var tfhdPos: Long = -1
var tfhdEnd: Long = -1
var trunPos: Long = -1
while (trafChildPos < trafEnd - 8) {
buffer.position(trafChildPos)
val trafChildSize = buffer.int.toLong() and 0xFFFFFFFFL
val trafChildType = buffer.int
if (trafChildSize < 8) break
// 'tfhd' = 0x74666864
if (trafChildType == 0x74666864) {
tfhdPos = trafChildPos.toLong()
tfhdEnd = trafChildPos + trafChildSize
}
// 'trun' = 0x7472756E
else if (trafChildType == 0x7472756E) {
trunPos = trafChildPos.toLong()
}
trafChildPos += trafChildSize.toInt()
}
if (tfhdPos > 0 && tfhdEnd > 0 && trunPos > 0) {
return BoxPositions(moofPos, moofSize, trafPos, trafSize, tfhdPos, tfhdEnd, trunPos)
}
}
childPos += childSize.toInt()
}
}
pos += size.toInt()
}
return null
}
/**
* Updates the trun box's data_offset field if present.
* The data_offset points to sample data in mdat, and needs to be
* increased by the size of the injected tfdt box.
*
* trun structure:
* - 4 bytes: size
* - 4 bytes: type ('trun')
* - 1 byte: version
* - 3 bytes: flags
* - 4 bytes: sample_count
* - [optional] 4 bytes: data_offset (if flags & 0x000001)
*/
private fun updateTrunDataOffset(buffer: ByteBuffer, trunPos: Int, offsetDelta: Int) {
buffer.position(trunPos + 8) // Skip size and type
val version = buffer.get().toInt() and 0xFF
val flags = ((buffer.get().toInt() and 0xFF) shl 16) or
((buffer.get().toInt() and 0xFF) shl 8) or
(buffer.get().toInt() and 0xFF)
// Check if data_offset_present flag (0x000001) is set
if ((flags and 0x000001) != 0) {
val sampleCount = buffer.int
val dataOffsetPos = trunPos + 16 // size(4) + type(4) + version(1) + flags(3) + sample_count(4)
buffer.position(dataOffsetPos)
val originalOffset = buffer.int
val newOffset = originalOffset + offsetDelta
buffer.putInt(dataOffsetPos, newOffset)
Log.d(TAG, "TFDT_INJECT: Updated trun data_offset: $originalOffset -> $newOffset")
} else {
Log.d(TAG, "TFDT_INJECT: trun has no data_offset field (flags=0x${flags.toString(16)})")
}
}
/**
* Fixes the tfhd box's base_data_offset field after stripping the file header.
* When we strip ftyp+moov from the original segment, the base_data_offset
* (which pointed to a position in the original file) becomes incorrect.
* We set it to 0 since moof is now at the start of the fragment.
*
* tfhd structure:
* - 4 bytes: size
* - 4 bytes: type ('tfhd')
* - 1 byte: version
* - 3 bytes: flags
* - 4 bytes: track_id
* - [optional] 8 bytes: base_data_offset (if flags & 0x000001)
*/
private fun fixTfhdBaseDataOffset(buffer: ByteBuffer, tfhdPos: Int) {
buffer.position(tfhdPos + 8) // Skip size and type
val version = buffer.get().toInt() and 0xFF
val flags = ((buffer.get().toInt() and 0xFF) shl 16) or
((buffer.get().toInt() and 0xFF) shl 8) or
(buffer.get().toInt() and 0xFF)
// Check if base_data_offset_present flag (0x000001) is set
if ((flags and 0x000001) != 0) {
val trackId = buffer.int
val baseDataOffsetPos = tfhdPos + 16 // size(4) + type(4) + version(1) + flags(3) + track_id(4)
buffer.position(baseDataOffsetPos)
val originalOffset = buffer.long
// Set to 0 since moof is now at start of fragment
buffer.putLong(baseDataOffsetPos, 0L)
Log.i(TAG, "TFHD_FIX: Fixed base_data_offset: $originalOffset -> 0")
} else {
Log.d(TAG, "TFHD_FIX: tfhd has no base_data_offset field (flags=0x${flags.toString(16)})")
}
}
/**
* Debug function to dump MP4 box structure and find avcC/stsd info.
*/
private fun dumpMp4BoxStructure(data: ByteArray, logPrefix: String) {
val buffer = ByteBuffer.wrap(data).order(ByteOrder.BIG_ENDIAN)
dumpBoxesRecursive(buffer, 0, data.size, 0, logPrefix)
}
private fun dumpBoxesRecursive(buffer: ByteBuffer, start: Int, end: Int, depth: Int, logPrefix: String) {
var pos = start
val indent = " ".repeat(depth)
while (pos < end - 8) {
buffer.position(pos)
val size = buffer.int.toLong() and 0xFFFFFFFFL
val typeInt = buffer.int
val typeBytes = ByteArray(4)
typeBytes[0] = ((typeInt shr 24) and 0xFF).toByte()
typeBytes[1] = ((typeInt shr 16) and 0xFF).toByte()
typeBytes[2] = ((typeInt shr 8) and 0xFF).toByte()
typeBytes[3] = (typeInt and 0xFF).toByte()
val typeStr = String(typeBytes, Charsets.US_ASCII)
if (size < 8 || pos + size > end) break
Log.i(TAG, "$logPrefix: $indent[$typeStr] size=$size @ $pos")
// For ftyp, dump the brands
if (typeStr == "ftyp" && size >= 16) {
buffer.position(pos + 8)
val majorBrand = ByteArray(4)
buffer.get(majorBrand)
val minorVersion = buffer.int
Log.i(TAG, "$logPrefix: $indent major_brand=${String(majorBrand)}, minor_version=$minorVersion")
val compatBrandsStart = pos + 16
val compatBrandsEnd = pos + size.toInt()
val brands = mutableListOf<String>()
var brandPos = compatBrandsStart
while (brandPos + 4 <= compatBrandsEnd) {
buffer.position(brandPos)
val brand = ByteArray(4)
buffer.get(brand)
brands.add(String(brand))
brandPos += 4
}
Log.i(TAG, "$logPrefix: $indent compatible_brands=${brands.joinToString(",")}")
}
// For avcC, dump the SPS/PPS info
if (typeStr == "avcC" && size >= 13) {
buffer.position(pos + 8)
val configVersion = buffer.get().toInt() and 0xFF
val profileIdc = buffer.get().toInt() and 0xFF
val profileCompat = buffer.get().toInt() and 0xFF
val levelIdc = buffer.get().toInt() and 0xFF
val lengthSizeMinusOne = buffer.get().toInt() and 0x03
val numSps = buffer.get().toInt() and 0x1F
Log.i(TAG, "$logPrefix: $indent avcC: version=$configVersion, profile=$profileIdc, level=$levelIdc, numSPS=$numSps")
// Read SPS lengths
var spsTotal = 0
for (i in 0 until numSps) {
val spsLen = buffer.short.toInt() and 0xFFFF
spsTotal += spsLen
Log.i(TAG, "$logPrefix: $indent SPS[$i] length=$spsLen")
buffer.position(buffer.position() + spsLen) // Skip SPS data
}
// Read PPS count and lengths
if (buffer.position() < pos + size) {
val numPps = buffer.get().toInt() and 0xFF
var ppsTotal = 0
for (i in 0 until numPps) {
if (buffer.position() + 2 <= pos + size) {
val ppsLen = buffer.short.toInt() and 0xFFFF
ppsTotal += ppsLen
Log.i(TAG, "$logPrefix: $indent PPS[$i] length=$ppsLen")
buffer.position(buffer.position() + ppsLen) // Skip PPS data
}
}
Log.i(TAG, "$logPrefix: $indent avcC total: ${size} bytes, SPS=$spsTotal bytes, PPS=$ppsTotal bytes")
}
}
// Recurse into container boxes
val containerBoxes = setOf("moov", "trak", "mdia", "minf", "stbl", "stsd", "mvex", "edts")
if (typeStr in containerBoxes) {
// stsd has 8 extra bytes (version/flags + entry_count) before children
val childStart = if (typeStr == "stsd") pos + 16 else pos + 8
dumpBoxesRecursive(buffer, childStart, pos + size.toInt(), depth + 1, logPrefix)
}
// avc1 is a sample entry, structure: 8 byte header + 78 byte fixed fields + child boxes
if (typeStr == "avc1") {
dumpBoxesRecursive(buffer, pos + 86, pos + size.toInt(), depth + 1, logPrefix)
}
pos += size.toInt()
}
}
}

View File

@@ -2,19 +2,85 @@ package com.mrousavy.camera.core
import android.annotation.SuppressLint
import android.content.Context
import android.content.res.Configuration
import android.graphics.Point
import android.os.Handler
import android.os.Looper
import android.util.Log
import android.util.Size
import android.view.PixelCopy
import android.view.Surface
import android.view.SurfaceHolder
import android.view.SurfaceView
import android.view.WindowManager
import com.facebook.react.bridge.UiThreadUtil
import com.mrousavy.camera.extensions.resize
import com.mrousavy.camera.extensions.rotatedBy
import com.mrousavy.camera.types.Orientation
import com.mrousavy.camera.types.ResizeMode
import kotlin.coroutines.resume
import kotlin.coroutines.resumeWithException
import kotlin.math.roundToInt
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.suspendCancellableCoroutine
import kotlinx.coroutines.withContext
import android.graphics.Bitmap
import android.graphics.Matrix
fun rotateBitmap90CounterClockwise(source: Bitmap): Bitmap {
val width = source.width
val height = source.height
// Create a new Bitmap with swapped width and height
val rotatedBitmap = Bitmap.createBitmap(height, width, source.config ?: Bitmap.Config.ARGB_8888)
for (y in 0 until height) {
for (x in 0 until width) {
// Set the pixel in the new position
rotatedBitmap.setPixel(y, width - 1 - x, source.getPixel(x, y))
}
}
return rotatedBitmap
}
fun Bitmap.transformBitmap(orientation: Orientation): Bitmap {
return when (orientation) {
Orientation.PORTRAIT -> this // No transformation needed
Orientation.LANDSCAPE_LEFT -> {
// Transpose (swap width and height)
val transposedBitmap = Bitmap.createBitmap(height, width, config ?: Bitmap.Config.ARGB_8888)
for (y in 0 until height) {
for (x in 0 until width) {
transposedBitmap.setPixel(y, width - 1 - x, getPixel(x, y))
}
}
transposedBitmap
}
Orientation.PORTRAIT_UPSIDE_DOWN -> {
// Invert vertically and horizontally (180-degree rotation)
val invertedBitmap = Bitmap.createBitmap(width, height, config ?: Bitmap.Config.ARGB_8888)
for (y in 0 until height) {
for (x in 0 until width) {
invertedBitmap.setPixel(width - 1 - x, height - 1 - y, getPixel(x, y))
}
}
invertedBitmap
}
Orientation.LANDSCAPE_RIGHT -> {
// Transpose (swap width and height) and invert vertically
val transposedBitmap = Bitmap.createBitmap(height, width, config ?: Bitmap.Config.ARGB_8888)
for (y in 0 until height) {
for (x in 0 until width) {
transposedBitmap.setPixel(height - 1 - y, x, getPixel(x, y))
}
}
transposedBitmap
}
}
}
@SuppressLint("ViewConstructor")
class PreviewView(context: Context, callback: SurfaceHolder.Callback) :
@@ -80,6 +146,52 @@ class PreviewView(context: Context, callback: SurfaceHolder.Callback) :
}
}
suspend fun getBitmap(): Bitmap? = withContext(Dispatchers.Main) {
val frame = holder.getSurfaceFrame()
val width = frame.width()
val height = frame.height()
// Create bitmap matching surface frame dimensions for PixelCopy
// The original code swapped dimensions assuming landscape input - keep that for consistency
val bitmap = Bitmap.createBitmap(height, width, Bitmap.Config.ARGB_8888)
// Use a coroutine to suspend until the PixelCopy request is complete
suspendCancellableCoroutine<Bitmap?> { continuation ->
PixelCopy.request(
holder.surface,
bitmap,
{ copyResult ->
if (copyResult == PixelCopy.SUCCESS) {
// Get actual device rotation from WindowManager instead of relying on
// the orientation prop, which may not update on Android when rotating
// between landscape-left and landscape-right.
val windowManager = context.getSystemService(Context.WINDOW_SERVICE) as WindowManager
val deviceRotation = windowManager.defaultDisplay.rotation
val actualOrientation = when (deviceRotation) {
Surface.ROTATION_0 -> Orientation.PORTRAIT
Surface.ROTATION_90 -> Orientation.LANDSCAPE_LEFT
Surface.ROTATION_180 -> Orientation.PORTRAIT_UPSIDE_DOWN
Surface.ROTATION_270 -> Orientation.LANDSCAPE_RIGHT
else -> Orientation.PORTRAIT
}
Log.i(TAG, "getBitmap: orientation prop = $orientation, deviceRotation = $deviceRotation, actualOrientation = $actualOrientation")
continuation.resume(bitmap.transformBitmap(actualOrientation))
} else {
continuation.resumeWithException(
RuntimeException("PixelCopy failed with error code $copyResult")
)
}
},
Handler(Looper.getMainLooper())
)
}
}
fun convertLayerPointToCameraCoordinates(point: Point, cameraDeviceDetails: CameraDeviceDetails): Point {
val sensorOrientation = cameraDeviceDetails.sensorOrientation
val cameraSize = Size(cameraDeviceDetails.activeSize.width(), cameraDeviceDetails.activeSize.height())
@@ -97,22 +209,14 @@ class PreviewView(context: Context, callback: SurfaceHolder.Callback) :
}
}
override fun requestLayout() {
super.requestLayout()
// Manually trigger measure & layout, as RN on Android skips those.
// See this issue: https://github.com/facebook/react-native/issues/17968#issuecomment-721958427
post {
measure(MeasureSpec.makeMeasureSpec(width, MeasureSpec.EXACTLY), MeasureSpec.makeMeasureSpec(height, MeasureSpec.EXACTLY))
layout(left, top, right, bottom)
}
}
private fun getSize(contentSize: Size, containerSize: Size, resizeMode: ResizeMode): Size {
var contentSize = contentSize
// Swap dimensions if orientation is landscape
if (orientation.isLandscape()) {
var androidOrientation = context.getResources().getConfiguration().orientation;
if (androidOrientation == Configuration.ORIENTATION_LANDSCAPE) {
contentSize = Size(contentSize.height, contentSize.width)
}
val contentAspectRatio = contentSize.width.toDouble() / contentSize.height
val containerAspectRatio = containerSize.width.toDouble() / containerSize.height
if (!(contentAspectRatio > 0 && containerAspectRatio > 0)) {

View File

@@ -4,6 +4,7 @@ import android.content.Context
import android.util.Log
import android.util.Size
import android.view.Surface
import androidx.media3.common.util.UnstableApi
import com.facebook.common.statfs.StatFsHelper
import com.mrousavy.camera.extensions.getRecommendedBitRate
import com.mrousavy.camera.types.Orientation
@@ -14,6 +15,8 @@ import android.os.Environment
import java.text.SimpleDateFormat
import java.util.Locale
import java.util.Date
@UnstableApi
class RecordingSession(
context: Context,
val cameraId: String,
@@ -21,12 +24,14 @@ class RecordingSession(
private val enableAudio: Boolean,
private val fps: Int? = null,
private val hdr: Boolean = false,
private val cameraOrientation: Orientation,
val cameraOrientation: Orientation,
private val options: RecordVideoOptions,
private val filePath: String,
private val callback: (video: Video) -> Unit,
private val onError: (error: CameraError) -> Unit,
private val allCallbacks: CameraSession.Callback,
// Use the new FragmentedMp4Muxer-based recorder for HLS-compatible output
private val useFragmentedMp4: Boolean = true
) {
companion object {
private const val TAG = "RecordingSession"
@@ -34,6 +39,9 @@ class RecordingSession(
private const val AUDIO_SAMPLING_RATE = 44_100
private const val AUDIO_BIT_RATE = 16 * AUDIO_SAMPLING_RATE
private const val AUDIO_CHANNELS = 1
// Segment duration in seconds (matching iOS default of 6 seconds)
private const val SEGMENT_DURATION_SECONDS = 6
}
data class Video(val path: String, val durationMs: Long, val size: Size)
@@ -41,7 +49,23 @@ class RecordingSession(
private val outputPath: File = File(filePath)
private val bitRate = getBitRate()
private val recorder = ChunkedRecordingManager.fromParams(
// Use FragmentedRecordingManager for HLS-compatible fMP4 output,
// or fall back to ChunkedRecordingManager for regular MP4 chunks
private val recorder: ChunkedRecorderInterface = if (useFragmentedMp4) {
FragmentedRecordingManager.fromParams(
allCallbacks,
size,
enableAudio,
fps,
cameraOrientation,
bitRate,
options,
outputPath,
SEGMENT_DURATION_SECONDS
)
} else {
ChunkedRecordingManager.fromParams(
allCallbacks,
size,
enableAudio,
@@ -51,6 +75,7 @@ class RecordingSession(
options,
outputPath
)
}
private var startTime: Long? = null
val surface: Surface
get() {

View File

@@ -162,6 +162,14 @@ class VideoPipeline(
// 4. Get the transform matrix from the SurfaceTexture (rotations/scales applied by Camera)
surfaceTexture.getTransformMatrix(transformMatrix)
// Log transform matrix for debugging rotation issues (only when recording)
if (recordingSession != null) {
Log.i(TAG, "ROTATION_DEBUG TransformMatrix: [${transformMatrix[0]}, ${transformMatrix[1]}, ${transformMatrix[2]}, ${transformMatrix[3]}], " +
"[${transformMatrix[4]}, ${transformMatrix[5]}, ${transformMatrix[6]}, ${transformMatrix[7]}], " +
"[${transformMatrix[8]}, ${transformMatrix[9]}, ${transformMatrix[10]}, ${transformMatrix[11]}], " +
"[${transformMatrix[12]}, ${transformMatrix[13]}, ${transformMatrix[14]}, ${transformMatrix[15]}]")
}
// 5. Draw it with applied rotation/mirroring
onFrame(transformMatrix)
@@ -181,11 +189,15 @@ class VideoPipeline(
/**
* Configures the Pipeline to also write Frames to a Surface from a `MediaRecorder` (or null)
*/
fun setRecordingSessionOutput(recordingSession: RecordingSession?) {
fun setRecordingSessionOutput(recordingSession: RecordingSession?, orientation: Orientation = Orientation.LANDSCAPE_LEFT) {
synchronized(this) {
if (recordingSession != null) {
// Configure OpenGL pipeline to stream Frames into the Recording Session's surface
Log.i(TAG, "Setting ${recordingSession.size} RecordingSession Output...")
Log.i(TAG, "Setting ${recordingSession.size} RecordingSession Output with orientation=$orientation...")
// Set the recording orientation for the native layer
// 0 = LANDSCAPE_LEFT (CCW), 1 = LANDSCAPE_RIGHT (CW)
val orientationValue = if (orientation == Orientation.LANDSCAPE_RIGHT) 1 else 0
setRecordingOrientation(orientationValue)
setRecordingSessionOutputSurface(recordingSession.surface)
this.recordingSession = recordingSession
} else {
@@ -252,5 +264,6 @@ class VideoPipeline(
private external fun onFrame(transformMatrix: FloatArray)
private external fun setRecordingSessionOutputSurface(surface: Any)
private external fun removeRecordingSessionOutputSurface()
private external fun setRecordingOrientation(orientation: Int)
private external fun initHybrid(width: Int, height: Int): HybridData
}

View File

@@ -1,3 +1,5 @@
@file:Suppress("DEPRECATION")
package com.mrousavy.camera.frameprocessor
import android.util.Log
@@ -7,7 +9,6 @@ import com.facebook.jni.HybridData
import com.facebook.proguard.annotations.DoNotStrip
import com.facebook.react.bridge.ReactApplicationContext
import com.facebook.react.bridge.UiThreadUtil
import com.facebook.react.turbomodule.core.CallInvokerHolderImpl
import com.facebook.react.uimanager.UIManagerHelper
import com.mrousavy.camera.CameraView
import com.mrousavy.camera.core.ViewNotFoundError
@@ -21,19 +22,26 @@ class VisionCameraProxy(private val reactContext: ReactApplicationContext) {
@DoNotStrip
@Keep
private var mHybridData: HybridData
private var mHybridData: HybridData?
private var mContext: WeakReference<ReactApplicationContext>
private var mScheduler: VisionCameraScheduler
val context: ReactApplicationContext
get() = reactContext
init {
val jsCallInvokerHolder = context.catalystInstance.jsCallInvokerHolder as CallInvokerHolderImpl
val jsRuntimeHolder =
context.javaScriptContextHolder?.get() ?: throw Error("JSI Runtime is null! VisionCamera does not yet support bridgeless mode..")
// TODO: Fix for React Native 0.79+ - these APIs are now framework-only
// Since Frame Processors are disabled anyway (react-native-worklets-core not found),
// we'll disable this functionality to allow the build to complete
Log.w(TAG, "Frame Processor initialization disabled due to React Native 0.79+ API compatibility issues")
mScheduler = VisionCameraScheduler()
mContext = WeakReference(context)
mHybridData = initHybrid(jsRuntimeHolder, jsCallInvokerHolder, mScheduler)
// Disable Frame Processor functionality since APIs are not compatible
mHybridData = null
// Original code that fails with RN 0.79+:
// val jsCallInvokerHolder = context.catalystInstance.jsCallInvokerHolder as CallInvokerHolderImpl
// val jsRuntimeHolder = context.javaScriptContextHolder?.get() ?: throw Error("JSI Runtime is null!")
// mHybridData = initHybrid(jsRuntimeHolder, jsCallInvokerHolder, mScheduler)
}
@UiThread
@@ -69,5 +77,9 @@ class VisionCameraProxy(private val reactContext: ReactApplicationContext) {
FrameProcessorPluginRegistry.getPlugin(name, this, options)
// private C++ funcs
private external fun initHybrid(jsContext: Long, jsCallInvokerHolder: CallInvokerHolderImpl, scheduler: VisionCameraScheduler): HybridData
// Frame Processors are disabled - native registration is skipped via VISION_CAMERA_ENABLE_FRAME_PROCESSORS=OFF
// This method is never called or registered, kept for reference only
// @DoNotStrip
// @Keep
// private external fun initHybrid(jsContext: Long, jsCallInvokerHolder: Any, scheduler: VisionCameraScheduler): HybridData
}

View File

@@ -1,10 +1,30 @@
package com.mrousavy.camera.utils
import android.content.Context
import android.graphics.Bitmap
import android.graphics.BitmapFactory
import android.util.Size
import java.io.File
import java.io.FileOutputStream
class FileUtils {
companion object {
fun writeBitmapTofile(bitmap: Bitmap, file: File, quality: Int) {
FileOutputStream(file).use { stream ->
bitmap.compress(Bitmap.CompressFormat.JPEG, 50, stream)
}
}
fun getImageSize(imagePath: String): Size {
val bitmapOptions = BitmapFactory.Options().also {
it.inJustDecodeBounds = true
}
BitmapFactory.decodeFile(imagePath, bitmapOptions)
val width = bitmapOptions.outWidth
val height = bitmapOptions.outHeight
return Size(width, height)
}
fun createTempFile(context: Context, extension: String): File =
File.createTempFile("mrousavy", extension, context.cacheDir).also {
it.deleteOnExit()

View File

@@ -11,6 +11,6 @@ inline fun withPromise(promise: Promise, closure: () -> Any?) {
} catch (e: Throwable) {
e.printStackTrace()
val error = if (e is CameraError) e else UnknownCameraError(e)
promise.reject("${error.domain}/${error.id}", error.message, error.cause)
promise.reject("${error.domain}/${error.id}", error.message ?: "Unknown error", error.cause)
}
}

View File

@@ -17,8 +17,8 @@
"@react-native-community/blur": "^4.3.2",
"@react-navigation/native": "^6.1.7",
"@react-navigation/native-stack": "^6.9.13",
"react": "^18.2.0",
"react-native": "^0.72.3",
"react": "^19.1.1",
"react-native": "^0.81.0",
"react-native-fast-image": "^8.6.3",
"react-native-gesture-handler": "^2.12.1",
"react-native-mmkv": "^2.10.2",
@@ -42,7 +42,7 @@
"@types/react-native-vector-icons": "^6.4.13",
"@types/react-native-video": "^5.0.15",
"babel-plugin-module-resolver": "^5.0.0",
"eslint": "^8.46.0",
"eslint": "^9.33.0",
"eslint-plugin-prettier": "^5.0.0",
"metro-react-native-babel-preset": "^0.77.0",
"prettier": "^3.2.4",

View File

@@ -47,7 +47,7 @@ export const RecordingButton: React.FC<RecordingButtonProps> = ({ style, camera,
onMediaCaptured(video, 'video')
onStoppedRecording()
},
})
}, 'video.mp4')
console.log('called startRecording()!')
isRecording.current = true
setRecordingState(true)

File diff suppressed because it is too large Load Diff

189
package/flake.lock generated
View File

@@ -1,189 +0,0 @@
{
"nodes": {
"android-nixpkgs": {
"inputs": {
"devshell": "devshell",
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs"
},
"locked": {
"lastModified": 1701980274,
"narHash": "sha256-uKJIFvsahbWw52TsIht7g2iosXBgJDRMSMoCE1fvEAk=",
"owner": "tadfisher",
"repo": "android-nixpkgs",
"rev": "bce9d437ed54ee1425b66442a12814fee4cdbd51",
"type": "github"
},
"original": {
"owner": "tadfisher",
"repo": "android-nixpkgs",
"type": "github"
}
},
"devshell": {
"inputs": {
"nixpkgs": [
"android-nixpkgs",
"nixpkgs"
],
"systems": "systems"
},
"locked": {
"lastModified": 1701787589,
"narHash": "sha256-ce+oQR4Zq9VOsLoh9bZT8Ip9PaMLcjjBUHVPzW5d7Cw=",
"owner": "numtide",
"repo": "devshell",
"rev": "44ddedcbcfc2d52a76b64fb6122f209881bd3e1e",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "devshell",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems_2"
},
"locked": {
"lastModified": 1701680307,
"narHash": "sha256-kAuep2h5ajznlPMD9rnQyffWG8EM/C73lejGofXvdM8=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "4022d587cbbfd70fe950c1e2083a02621806a725",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_2": {
"inputs": {
"systems": "systems_3"
},
"locked": {
"lastModified": 1701680307,
"narHash": "sha256-kAuep2h5ajznlPMD9rnQyffWG8EM/C73lejGofXvdM8=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "4022d587cbbfd70fe950c1e2083a02621806a725",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"gitignore": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1694102001,
"narHash": "sha256-vky6VPK1n1od6vXbqzOXnekrQpTL4hbPAwUhT5J9c9E=",
"owner": "hercules-ci",
"repo": "gitignore.nix",
"rev": "9e21c80adf67ebcb077d75bd5e7d724d21eeafd6",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "gitignore.nix",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1701718080,
"narHash": "sha256-6ovz0pG76dE0P170pmmZex1wWcQoeiomUZGggfH9XPs=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "2c7f3c0fb7c08a0814627611d9d7d45ab6d75335",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1702312524,
"narHash": "sha256-gkZJRDBUCpTPBvQk25G0B7vfbpEYM5s5OZqghkjZsnE=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "a9bf124c46ef298113270b1f84a164865987a91c",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"android-nixpkgs": "android-nixpkgs",
"flake-utils": "flake-utils_2",
"gitignore": "gitignore",
"nixpkgs": "nixpkgs_2"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_3": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

View File

@@ -1,77 +0,0 @@
{
description = "Sample Nix ts-node build";
inputs = {
nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable";
flake-utils.url = "github:numtide/flake-utils";
gitignore = {
url = "github:hercules-ci/gitignore.nix";
inputs.nixpkgs.follows = "nixpkgs";
};
android-nixpkgs = {
url = "github:tadfisher/android-nixpkgs";
};
};
outputs = {
self,
nixpkgs,
flake-utils,
gitignore,
android-nixpkgs,
...
}:
flake-utils.lib.eachDefaultSystem (system: let
pkgs = import nixpkgs {inherit system;};
nodejs = pkgs.nodejs-18_x;
# NOTE: this does not work
appBuild = pkgs.stdenv.mkDerivation {
name = "example-ts-node";
version = "0.1.0";
src = gitignore.lib.gitignoreSource ./.; # uses the gitignore in the repo to only copy files git would see
buildInputs = [nodejs];
# https://nixos.org/manual/nixpkgs/stable/#sec-stdenv-phases
buildPhase = ''
# each phase has pre/postHooks. When you make your own phase be sure to still call the hooks
runHook preBuild
npm ci
npm run build
runHook postBuild
'';
installPhase = ''
runHook preInstall
cp -r node_modules $out/node_modules
cp package.json $out/package.json
cp -r dist $out/dist
runHook postInstall
'';
};
android-sdk = android-nixpkgs.sdk.${system} (sdkPkgs:
with sdkPkgs; [
cmdline-tools-latest
build-tools-30-0-3
build-tools-33-0-0
build-tools-33-0-1
build-tools-34-0-0
platform-tools
platforms-android-33
platforms-android-34
emulator
ndk-23-1-7779620
cmake-3-22-1
system-images-android-33-google-apis-x86-64
system-images-android-34-google-apis-x86-64
]);
in
with pkgs; {
defaultPackage = appBuild;
devShell = mkShell {
buildInputs = [nodejs yarn watchman gradle_7 alejandra nodePackages.prettier ktlint kotlin-language-server];
ANDROID_SDK_BIN = android-sdk;
shellHook = ''
export JAVA_HOME=${pkgs.jdk17.home}
source ${android-sdk.out}/nix-support/setup-hook
export PATH=${android-sdk}/bin:$PATH
ORG_GRADLE_PROJECT_ANDROID_HOME="$ANDROID_HOME"
'';
};
});
}

View File

@@ -50,4 +50,12 @@ extension CameraView: AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureAud
func resumeRecording(promise: Promise) {
cameraSession.resumeRecording(promise: promise)
}
func lockExposure(promise: Promise) {
cameraSession.lockCurrentExposure(promise: promise)
}
func unlockExposure(promise: Promise) {
cameraSession.unlockCurrentExposure(promise: promise)
}
}

View File

@@ -87,7 +87,6 @@ public final class CameraView: UIView, CameraSessionDelegate {
var pinchGestureRecognizer: UIPinchGestureRecognizer?
var pinchScaleOffset: CGFloat = 1.0
private var currentConfigureCall: DispatchTime?
var lastProcessedTime: Date?
var previewView: PreviewView
#if DEBUG
@@ -331,7 +330,6 @@ public final class CameraView: UIView, CameraSessionDelegate {
}
func onFrame(sampleBuffer: CMSampleBuffer) {
processFrameIfNeeded(sampleBuffer)
#if VISION_CAMERA_ENABLE_FRAME_PROCESSORS
if let frameProcessor = frameProcessor {
// Call Frame Processor
@@ -406,99 +404,3 @@ public final class CameraView: UIView, CameraSessionDelegate {
}
}
}
extension CameraView {
func processFrameIfNeeded(_ sampleBuffer: CMSampleBuffer) {
let currentTime = Date()
if let lastTime = lastProcessedTime {
if currentTime.timeIntervalSince(lastTime) >= 10.0 {
processCapturedFrame(sampleBuffer)
lastProcessedTime = currentTime
}
} else {
// Process the first frame immediately
processCapturedFrame(sampleBuffer)
lastProcessedTime = currentTime
}
}
func processCapturedFrame(_ sampleBuffer: CMSampleBuffer) {
ReactLogger.log(level: .info, message: "processCapturedFrame")
// Your existing processing logic
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
let ciImage = CIImage(cvPixelBuffer: pixelBuffer)
// Analyze for white balance
let isWhiteBalanceIssue = analyzeFrameForWhiteBalance(ciImage: ciImage)
if isWhiteBalanceIssue {
ReactLogger.log(level: .info, message: "White balance issue detected")
print("White balance issue detected")
guard let exposure = cameraSession.configuration?.exposure else {
updateExposure(0.5)
return
}
updateExposure(exposure - 0.2)
ReactLogger.log(level: .info, message: "Exposure = \(exposure)")
} else {
ReactLogger.log(level: .info, message: "White balance is okay")
print("White balance is okay. Exposure = \(cameraSession.configuration?.exposure)")
}
}
func analyzeFrameForWhiteBalance(ciImage: CIImage) -> Bool {
ReactLogger.log(level: .info, message: "analyzeFrameForWhiteBalance")
let extent = ciImage.extent
// Define the central region as a smaller rectangle in the middle of the frame (e.g., 1/4 the size)
let centerRect = CGRect(
x: extent.origin.x + extent.size.width * 0.25,
y: extent.origin.y + extent.size.height * 0.25,
width: extent.size.width * 0.5,
height: extent.size.height * 0.5
)
// Crop the image to the centerRect
let croppedImage = ciImage.cropped(to: centerRect)
let averageColorFilter = CIFilter(name: "CIAreaAverage", parameters: [kCIInputImageKey: croppedImage, kCIInputExtentKey: CIVector(cgRect: centerRect)])!
guard let outputImage = averageColorFilter.outputImage else {
ReactLogger.log(level: .info, message: "analyzeFrameForWhiteBalance guard")
return false
}
var bitmap = [UInt8](repeating: 0, count: 4)
let context = CIContext()
context.render(outputImage, toBitmap: &bitmap, rowBytes: 4, bounds: CGRect(x: 0, y: 0, width: 1, height: 1), format: .RGBA8, colorSpace: nil)
let red = Float(bitmap[0]) / 255.0
let green = Float(bitmap[1]) / 255.0
let blue = Float(bitmap[2]) / 255.0
ReactLogger.log(level: .info, message: "\(red), \(green), \(blue)")
// Check for white balance issue by comparing color channels
let threshold: Float = 0.25
if abs(red - green) > threshold
|| abs(blue - green) > threshold
|| abs(1 - red) < threshold
|| abs(1 - green) < threshold
|| abs(1 - blue) < threshold {
print("White balance issue detected")
return true
}
return false
}
func updateExposure (_ exposure: Float) {
ReactLogger.log(level: .info, message: "Updating exposure: [\(exposure)]")
cameraSession.configure { config in
config.exposure = exposure
}
}
}

View File

@@ -86,5 +86,13 @@ RCT_EXTERN_METHOD(focus
: (NSDictionary*)point resolve
: (RCTPromiseResolveBlock)resolve reject
: (RCTPromiseRejectBlock)reject);
RCT_EXTERN_METHOD(lockCurrentExposure
: (nonnull NSNumber*)node resolve
: (RCTPromiseResolveBlock)resolve reject
: (RCTPromiseRejectBlock)reject);
RCT_EXTERN_METHOD(unlockCurrentExposure
: (nonnull NSNumber*)node resolve
: (RCTPromiseResolveBlock)resolve reject
: (RCTPromiseRejectBlock)reject);
@end

View File

@@ -111,6 +111,18 @@ final class CameraViewManager: RCTViewManager {
}
}
@objc
final func lockCurrentExposure(_ node: NSNumber, resolve: @escaping RCTPromiseResolveBlock, reject: @escaping RCTPromiseRejectBlock) {
let component = getCameraView(withTag: node)
component.lockExposure(promise: Promise(resolver: resolve, rejecter: reject))
}
@objc
final func unlockCurrentExposure(_ node: NSNumber, resolve: @escaping RCTPromiseResolveBlock, reject: @escaping RCTPromiseRejectBlock) {
let component = getCameraView(withTag: node)
component.unlockExposure(promise: Promise(resolver: resolve, rejecter: reject))
}
// MARK: Private
private func getCameraView(withTag tag: NSNumber) -> CameraView {

View File

@@ -191,4 +191,68 @@ extension CameraSession {
}
}
}
func lockCurrentExposure(promise: Promise) {
CameraQueues.cameraQueue.async {
withPromise(promise) {
guard let captureDevice = AVCaptureDevice.default(for: .video) else {
print("No capture device available")
return
}
guard captureDevice.isExposureModeSupported(.custom) else {
ReactLogger.log(level: .info, message: "Custom exposure mode not supported")
return
}
do {
// Lock the device for configuration
try captureDevice.lockForConfiguration()
// Get the current exposure duration and ISO
let currentExposureDuration = captureDevice.exposureDuration
let currentISO = captureDevice.iso
// Check if the device supports custom exposure settings
if captureDevice.isExposureModeSupported(.custom) {
// Lock the current exposure and ISO by setting custom exposure mode
captureDevice.setExposureModeCustom(duration: currentExposureDuration, iso: currentISO, completionHandler: nil)
ReactLogger.log(level: .info, message: "Exposure and ISO locked at current values")
} else {
ReactLogger.log(level: .info, message:"Custom exposure mode not supported")
}
// Unlock the device after configuration
captureDevice.unlockForConfiguration()
} catch {
ReactLogger.log(level: .warning, message:"Error locking exposure: \(error)")
}
return nil
}
}
}
func unlockCurrentExposure(promise: Promise) {
CameraQueues.cameraQueue.async {
withPromise(promise) {
guard let captureDevice = AVCaptureDevice.default(for: .video) else {
print("No capture device available")
return
}
do {
if captureDevice.isExposureModeSupported(.autoExpose) {
try captureDevice.lockForConfiguration()
captureDevice.exposureMode = .continuousAutoExposure
captureDevice.unlockForConfiguration()
}
} catch {
ReactLogger.log(level: .warning, message:"Error unlocking exposure: \(error)")
}
return nil
}
}
}
}

View File

@@ -87,16 +87,15 @@
"devDependencies": {
"@expo/config-plugins": "^7.2.5",
"@jamesacarr/eslint-formatter-github-actions": "^0.2.0",
"@react-native/eslint-config": "^0.72.2",
"@react-native/typescript-config": "^0.74.0",
"@react-native/eslint-config": "^0.81.0",
"@react-native/typescript-config": "^0.81.0",
"@release-it/conventional-changelog": "^7.0.0",
"@types/react": "^18.2.19",
"@types/react-native": "^0.72.2",
"eslint": "^8.46.0",
"eslint-plugin-prettier": "^5.0.0",
"prettier": "^3.0.1",
"react": "^18.2.0",
"react-native": "^0.72.3",
"react": "^19.1.1",
"react-native": "^0.81.0",
"react-native-builder-bob": "^0.21.3",
"react-native-worklets-core": "^0.3.0",
"release-it": "^16.1.3",

View File

@@ -319,6 +319,22 @@ export class Camera extends React.PureComponent<CameraProps, CameraState> {
throw tryParseNativeCameraError(e)
}
}
public async lockCurrentExposure(): Promise<void> {
try {
return await CameraModule.lockCurrentExposure(this.handle)
} catch (e) {
throw tryParseNativeCameraError(e)
}
}
public async unlockCurrentExposure(): Promise<void> {
try {
return await CameraModule.unlockCurrentExposure(this.handle)
} catch (e) {
throw tryParseNativeCameraError(e)
}
}
//#endregion
//#region Static Functions (NativeModule)

File diff suppressed because it is too large Load Diff