chore: Move everything into package/
(#1745)
* Move everything into package * Remove .DS_Store * Move scripts and eslintrc to package * Create CODE_OF_CONDUCT.md * fix some links * Update all links (I think) * Update generated docs * Update notice-yarn-changes.yml * Update validate-android.yml * Update validate-cpp.yml * Delete notice-yarn-changes.yml * Update validate-cpp.yml * Update validate-cpp.yml * Update validate-js.yml * Update validate-cpp.yml * Update validate-cpp.yml * wrong c++ style * Revert "wrong c++ style" This reverts commit 55a3575589c6f13f8b05134d83384f55e0601ab2.
This commit is contained in:
@@ -0,0 +1,16 @@
|
||||
package com.mrousavy.camera
|
||||
|
||||
import com.facebook.react.ReactPackage
|
||||
import com.facebook.react.bridge.NativeModule
|
||||
import com.facebook.react.bridge.ReactApplicationContext
|
||||
import com.facebook.react.uimanager.ViewManager
|
||||
|
||||
class CameraPackage : ReactPackage {
|
||||
override fun createNativeModules(reactContext: ReactApplicationContext): List<NativeModule> {
|
||||
return listOf(CameraViewModule(reactContext))
|
||||
}
|
||||
|
||||
override fun createViewManagers(reactContext: ReactApplicationContext): List<ViewManager<*, *>> {
|
||||
return listOf(CameraViewManager())
|
||||
}
|
||||
}
|
@@ -0,0 +1,35 @@
|
||||
package com.mrousavy.camera
|
||||
|
||||
import android.os.Handler
|
||||
import android.os.HandlerThread
|
||||
import kotlinx.coroutines.CoroutineDispatcher
|
||||
import kotlinx.coroutines.android.asCoroutineDispatcher
|
||||
import kotlinx.coroutines.asExecutor
|
||||
import java.util.concurrent.Executor
|
||||
|
||||
class CameraQueues {
|
||||
companion object {
|
||||
val cameraQueue = CameraQueue("mrousavy/VisionCamera.main")
|
||||
val videoQueue = CameraQueue("mrousavy/VisionCamera.video")
|
||||
}
|
||||
|
||||
class CameraQueue(name: String) {
|
||||
val handler: Handler
|
||||
private val thread: HandlerThread
|
||||
val executor: Executor
|
||||
val coroutineDispatcher: CoroutineDispatcher
|
||||
|
||||
init {
|
||||
thread = HandlerThread(name)
|
||||
thread.start()
|
||||
handler = Handler(thread.looper)
|
||||
coroutineDispatcher = handler.asCoroutineDispatcher(name)
|
||||
executor = coroutineDispatcher.asExecutor()
|
||||
}
|
||||
|
||||
protected fun finalize() {
|
||||
thread.quitSafely()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -0,0 +1,48 @@
|
||||
package com.mrousavy.camera
|
||||
|
||||
import android.util.Log
|
||||
import com.facebook.react.bridge.Arguments
|
||||
import com.facebook.react.bridge.ReactContext
|
||||
import com.facebook.react.bridge.WritableMap
|
||||
import com.facebook.react.uimanager.events.RCTEventEmitter
|
||||
|
||||
fun CameraView.invokeOnInitialized() {
|
||||
Log.i(CameraView.TAG, "invokeOnInitialized()")
|
||||
|
||||
val reactContext = context as ReactContext
|
||||
reactContext.getJSModule(RCTEventEmitter::class.java).receiveEvent(id, "cameraInitialized", null)
|
||||
}
|
||||
|
||||
fun CameraView.invokeOnError(error: Throwable) {
|
||||
Log.e(CameraView.TAG, "invokeOnError(...):")
|
||||
error.printStackTrace()
|
||||
|
||||
val cameraError = when (error) {
|
||||
is CameraError -> error
|
||||
else -> UnknownCameraError(error)
|
||||
}
|
||||
val event = Arguments.createMap()
|
||||
event.putString("code", cameraError.code)
|
||||
event.putString("message", cameraError.message)
|
||||
cameraError.cause?.let { cause ->
|
||||
event.putMap("cause", errorToMap(cause))
|
||||
}
|
||||
val reactContext = context as ReactContext
|
||||
reactContext.getJSModule(RCTEventEmitter::class.java).receiveEvent(id, "cameraError", event)
|
||||
}
|
||||
|
||||
fun CameraView.invokeOnViewReady() {
|
||||
val event = Arguments.createMap()
|
||||
val reactContext = context as ReactContext
|
||||
reactContext.getJSModule(RCTEventEmitter::class.java).receiveEvent(id, "cameraViewReady", event)
|
||||
}
|
||||
|
||||
private fun errorToMap(error: Throwable): WritableMap {
|
||||
val map = Arguments.createMap()
|
||||
map.putString("message", error.message)
|
||||
map.putString("stacktrace", error.stackTraceToString())
|
||||
error.cause?.let { cause ->
|
||||
map.putMap("cause", errorToMap(cause))
|
||||
}
|
||||
return map
|
||||
}
|
@@ -0,0 +1,9 @@
|
||||
package com.mrousavy.camera
|
||||
|
||||
import com.facebook.react.bridge.ReadableMap
|
||||
|
||||
suspend fun CameraView.focus(pointMap: ReadableMap) {
|
||||
val x = pointMap.getInt("x")
|
||||
val y = pointMap.getInt("y")
|
||||
cameraSession.focus(x, y)
|
||||
}
|
@@ -0,0 +1,64 @@
|
||||
package com.mrousavy.camera
|
||||
|
||||
import android.Manifest
|
||||
import android.annotation.SuppressLint
|
||||
import android.content.pm.PackageManager
|
||||
import androidx.core.content.ContextCompat
|
||||
import com.facebook.react.bridge.*
|
||||
import com.mrousavy.camera.parsers.Torch
|
||||
import com.mrousavy.camera.parsers.VideoCodec
|
||||
import com.mrousavy.camera.parsers.VideoFileType
|
||||
import com.mrousavy.camera.core.RecordingSession
|
||||
import com.mrousavy.camera.utils.makeErrorMap
|
||||
import java.util.*
|
||||
|
||||
suspend fun CameraView.startRecording(options: ReadableMap, onRecordCallback: Callback) {
|
||||
// check audio permission
|
||||
if (audio == true) {
|
||||
if (ContextCompat.checkSelfPermission(context, Manifest.permission.RECORD_AUDIO) != PackageManager.PERMISSION_GRANTED) {
|
||||
throw MicrophonePermissionError()
|
||||
}
|
||||
}
|
||||
|
||||
if (options.hasKey("flash")) {
|
||||
val enableFlash = options.getString("flash") == "on"
|
||||
// overrides current torch mode value to enable flash while recording
|
||||
cameraSession.setTorchMode(enableFlash)
|
||||
}
|
||||
var codec = VideoCodec.H264
|
||||
if (options.hasKey("videoCodec")) {
|
||||
codec = VideoCodec.fromUnionValue(options.getString("videoCodec"))
|
||||
}
|
||||
var fileType = VideoFileType.MP4
|
||||
if (options.hasKey("fileType")) {
|
||||
fileType = VideoFileType.fromUnionValue(options.getString("fileType"))
|
||||
}
|
||||
|
||||
val callback = { video: RecordingSession.Video ->
|
||||
val map = Arguments.createMap()
|
||||
map.putString("path", video.path)
|
||||
map.putDouble("duration", video.durationMs.toDouble() / 1000.0)
|
||||
onRecordCallback(map, null)
|
||||
}
|
||||
val onError = { error: RecorderError ->
|
||||
val errorMap = makeErrorMap(error.code, error.message)
|
||||
onRecordCallback(null, errorMap)
|
||||
}
|
||||
cameraSession.startRecording(audio == true, codec, fileType, callback, onError)
|
||||
}
|
||||
|
||||
@SuppressLint("RestrictedApi")
|
||||
suspend fun CameraView.pauseRecording() {
|
||||
cameraSession.pauseRecording()
|
||||
}
|
||||
|
||||
@SuppressLint("RestrictedApi")
|
||||
suspend fun CameraView.resumeRecording() {
|
||||
cameraSession.resumeRecording()
|
||||
}
|
||||
|
||||
@SuppressLint("RestrictedApi")
|
||||
suspend fun CameraView.stopRecording() {
|
||||
cameraSession.stopRecording()
|
||||
cameraSession.setTorchMode(torch == Torch.ON)
|
||||
}
|
@@ -0,0 +1,115 @@
|
||||
package com.mrousavy.camera
|
||||
|
||||
import android.annotation.SuppressLint
|
||||
import android.content.Context
|
||||
import android.graphics.Bitmap
|
||||
import android.graphics.BitmapFactory
|
||||
import android.graphics.ImageFormat
|
||||
import android.graphics.Matrix
|
||||
import android.hardware.camera2.*
|
||||
import android.util.Log
|
||||
import com.facebook.react.bridge.Arguments
|
||||
import com.facebook.react.bridge.ReadableMap
|
||||
import com.facebook.react.bridge.WritableMap
|
||||
import com.mrousavy.camera.core.CameraSession
|
||||
import com.mrousavy.camera.parsers.Flash
|
||||
import com.mrousavy.camera.parsers.QualityPrioritization
|
||||
import com.mrousavy.camera.utils.*
|
||||
import kotlinx.coroutines.*
|
||||
import java.io.File
|
||||
import java.io.FileOutputStream
|
||||
import java.io.OutputStream
|
||||
|
||||
private const val TAG = "CameraView.takePhoto"
|
||||
|
||||
@SuppressLint("UnsafeOptInUsageError")
|
||||
suspend fun CameraView.takePhoto(optionsMap: ReadableMap): WritableMap {
|
||||
val options = optionsMap.toHashMap()
|
||||
Log.i(TAG, "Taking photo... Options: $options")
|
||||
|
||||
val qualityPrioritization = options["qualityPrioritization"] as? String ?: "balanced"
|
||||
val flash = options["flash"] as? String ?: "off"
|
||||
val enableAutoRedEyeReduction = options["enableAutoRedEyeReduction"] == true
|
||||
val enableAutoStabilization = options["enableAutoStabilization"] == true
|
||||
val enableShutterSound = options["enableShutterSound"] as? Boolean ?: true
|
||||
|
||||
val flashMode = Flash.fromUnionValue(flash)
|
||||
val qualityPrioritizationMode = QualityPrioritization.fromUnionValue(qualityPrioritization)
|
||||
|
||||
val photo = cameraSession.takePhoto(qualityPrioritizationMode,
|
||||
flashMode,
|
||||
enableShutterSound,
|
||||
enableAutoRedEyeReduction,
|
||||
enableAutoStabilization,
|
||||
outputOrientation)
|
||||
|
||||
photo.use {
|
||||
Log.i(TAG, "Successfully captured ${photo.image.width} x ${photo.image.height} photo!")
|
||||
|
||||
val cameraCharacteristics = cameraManager.getCameraCharacteristics(cameraId!!)
|
||||
|
||||
val path = savePhotoToFile(context, cameraCharacteristics, photo)
|
||||
|
||||
Log.i(TAG, "Successfully saved photo to file! $path")
|
||||
|
||||
val map = Arguments.createMap()
|
||||
map.putString("path", path)
|
||||
map.putInt("width", photo.image.width)
|
||||
map.putInt("height", photo.image.height)
|
||||
map.putString("orientation", photo.orientation.unionValue)
|
||||
map.putBoolean("isRawPhoto", photo.format == ImageFormat.RAW_SENSOR)
|
||||
map.putBoolean("isMirrored", photo.isMirrored)
|
||||
|
||||
return map
|
||||
}
|
||||
}
|
||||
|
||||
private fun writeImageToStream(imageBytes: ByteArray, stream: OutputStream, isMirrored: Boolean) {
|
||||
if (isMirrored) {
|
||||
val bitmap = BitmapFactory.decodeByteArray(imageBytes, 0, imageBytes.size)
|
||||
val matrix = Matrix()
|
||||
matrix.preScale(-1f, 1f)
|
||||
val processedBitmap = Bitmap.createBitmap(bitmap, 0, 0, bitmap.width, bitmap.height, matrix, false)
|
||||
processedBitmap.compress(Bitmap.CompressFormat.JPEG, 100, stream)
|
||||
} else {
|
||||
stream.write(imageBytes)
|
||||
}
|
||||
}
|
||||
|
||||
private suspend fun savePhotoToFile(context: Context,
|
||||
cameraCharacteristics: CameraCharacteristics,
|
||||
photo: CameraSession.CapturedPhoto): String {
|
||||
return withContext(Dispatchers.IO) {
|
||||
when (photo.format) {
|
||||
// When the format is JPEG or DEPTH JPEG we can simply save the bytes as-is
|
||||
ImageFormat.JPEG, ImageFormat.DEPTH_JPEG -> {
|
||||
val buffer = photo.image.planes[0].buffer
|
||||
val bytes = ByteArray(buffer.remaining()).apply { buffer.get(this) }
|
||||
val file = createFile(context, ".jpg")
|
||||
FileOutputStream(file).use { stream ->
|
||||
writeImageToStream(bytes, stream, photo.isMirrored)
|
||||
}
|
||||
return@withContext file.absolutePath
|
||||
}
|
||||
|
||||
// When the format is RAW we use the DngCreator utility library
|
||||
ImageFormat.RAW_SENSOR -> {
|
||||
val dngCreator = DngCreator(cameraCharacteristics, photo.metadata)
|
||||
val file = createFile(context, ".dng")
|
||||
FileOutputStream(file).use { stream ->
|
||||
// TODO: Make sure orientation is loaded properly here?
|
||||
dngCreator.writeImage(stream, photo.image)
|
||||
}
|
||||
return@withContext file.absolutePath
|
||||
}
|
||||
|
||||
else -> {
|
||||
throw Error("Failed to save Photo to file, image format is not supported! ${photo.format}")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private fun createFile(context: Context, extension: String): File {
|
||||
return File.createTempFile("mrousavy", extension, context.cacheDir).apply { deleteOnExit() }
|
||||
}
|
247
package/android/src/main/java/com/mrousavy/camera/CameraView.kt
Normal file
247
package/android/src/main/java/com/mrousavy/camera/CameraView.kt
Normal file
@@ -0,0 +1,247 @@
|
||||
package com.mrousavy.camera
|
||||
|
||||
import android.Manifest
|
||||
import android.annotation.SuppressLint
|
||||
import android.content.Context
|
||||
import android.content.pm.PackageManager
|
||||
import android.content.res.Configuration
|
||||
import android.hardware.camera2.CameraManager
|
||||
import android.util.Log
|
||||
import android.util.Size
|
||||
import android.view.ScaleGestureDetector
|
||||
import android.view.Surface
|
||||
import android.view.View
|
||||
import android.widget.FrameLayout
|
||||
import androidx.core.content.ContextCompat
|
||||
import com.facebook.react.bridge.ReadableMap
|
||||
import com.mrousavy.camera.core.CameraSession
|
||||
import com.mrousavy.camera.core.PreviewView
|
||||
import com.mrousavy.camera.extensions.containsAny
|
||||
import com.mrousavy.camera.extensions.installHierarchyFitter
|
||||
import com.mrousavy.camera.frameprocessor.FrameProcessor
|
||||
import com.mrousavy.camera.parsers.Orientation
|
||||
import com.mrousavy.camera.parsers.PixelFormat
|
||||
import com.mrousavy.camera.parsers.Torch
|
||||
import com.mrousavy.camera.parsers.VideoStabilizationMode
|
||||
import com.mrousavy.camera.core.outputs.CameraOutputs
|
||||
import kotlinx.coroutines.CoroutineScope
|
||||
import kotlinx.coroutines.Dispatchers
|
||||
import kotlinx.coroutines.launch
|
||||
|
||||
//
|
||||
// TODOs for the CameraView which are currently too hard to implement either because of CameraX' limitations, or my brain capacity.
|
||||
//
|
||||
// CameraView
|
||||
// TODO: High-speed video recordings (export in CameraViewModule::getAvailableVideoDevices(), and set in CameraView::configurePreview()) (120FPS+)
|
||||
|
||||
// CameraView+RecordVideo
|
||||
// TODO: Better startRecording()/stopRecording() (promise + callback, wait for TurboModules/JSI)
|
||||
|
||||
// CameraView+TakePhoto
|
||||
// TODO: takePhoto() depth data
|
||||
// TODO: takePhoto() raw capture
|
||||
// TODO: takePhoto() return with jsi::Value Image reference for faster capture
|
||||
|
||||
@SuppressLint("ClickableViewAccessibility", "ViewConstructor", "MissingPermission")
|
||||
class CameraView(context: Context) : FrameLayout(context) {
|
||||
companion object {
|
||||
const val TAG = "CameraView"
|
||||
|
||||
private val propsThatRequirePreviewReconfiguration = arrayListOf("cameraId")
|
||||
private val propsThatRequireSessionReconfiguration = arrayListOf("cameraId", "format", "photo", "video", "enableFrameProcessor", "pixelFormat")
|
||||
private val propsThatRequireFormatReconfiguration = arrayListOf("fps", "hdr", "videoStabilizationMode", "lowLightBoost")
|
||||
}
|
||||
|
||||
// react properties
|
||||
// props that require reconfiguring
|
||||
var cameraId: String? = null
|
||||
var enableDepthData = false
|
||||
var enableHighQualityPhotos: Boolean? = null
|
||||
var enablePortraitEffectsMatteDelivery = false
|
||||
// use-cases
|
||||
var photo: Boolean? = null
|
||||
var video: Boolean? = null
|
||||
var audio: Boolean? = null
|
||||
var enableFrameProcessor = false
|
||||
var pixelFormat: PixelFormat = PixelFormat.NATIVE
|
||||
// props that require format reconfiguring
|
||||
var format: ReadableMap? = null
|
||||
var fps: Int? = null
|
||||
var videoStabilizationMode: VideoStabilizationMode? = null
|
||||
var hdr: Boolean? = null // nullable bool
|
||||
var lowLightBoost: Boolean? = null // nullable bool
|
||||
// other props
|
||||
var isActive = false
|
||||
var torch: Torch = Torch.OFF
|
||||
var zoom: Float = 1f // in "factor"
|
||||
var orientation: Orientation? = null
|
||||
var enableZoomGesture: Boolean = false
|
||||
|
||||
// private properties
|
||||
private var isMounted = false
|
||||
internal val cameraManager = context.getSystemService(Context.CAMERA_SERVICE) as CameraManager
|
||||
|
||||
// session
|
||||
internal val cameraSession: CameraSession
|
||||
private var previewView: View? = null
|
||||
private var previewSurface: Surface? = null
|
||||
|
||||
internal var frameProcessor: FrameProcessor? = null
|
||||
set(value) {
|
||||
field = value
|
||||
cameraSession.frameProcessor = frameProcessor
|
||||
}
|
||||
|
||||
private val inputOrientation: Orientation
|
||||
get() = cameraSession.orientation
|
||||
internal val outputOrientation: Orientation
|
||||
get() = orientation ?: inputOrientation
|
||||
|
||||
init {
|
||||
this.installHierarchyFitter()
|
||||
setupPreviewView()
|
||||
cameraSession = CameraSession(context, cameraManager, { invokeOnInitialized() }, { error -> invokeOnError(error) })
|
||||
}
|
||||
|
||||
override fun onConfigurationChanged(newConfig: Configuration?) {
|
||||
super.onConfigurationChanged(newConfig)
|
||||
// TODO: updateOrientation()
|
||||
}
|
||||
|
||||
override fun onAttachedToWindow() {
|
||||
super.onAttachedToWindow()
|
||||
if (!isMounted) {
|
||||
isMounted = true
|
||||
invokeOnViewReady()
|
||||
}
|
||||
updateLifecycle()
|
||||
}
|
||||
|
||||
override fun onDetachedFromWindow() {
|
||||
super.onDetachedFromWindow()
|
||||
updateLifecycle()
|
||||
}
|
||||
|
||||
private fun setupPreviewView() {
|
||||
removeView(previewView)
|
||||
this.previewSurface = null
|
||||
|
||||
val cameraId = cameraId ?: return
|
||||
val previewView = PreviewView(context, cameraManager, cameraId) { surface ->
|
||||
previewSurface = surface
|
||||
configureSession()
|
||||
}
|
||||
previewView.layoutParams = LayoutParams(LayoutParams.MATCH_PARENT, LayoutParams.MATCH_PARENT)
|
||||
addView(previewView)
|
||||
this.previewView = previewView
|
||||
}
|
||||
|
||||
fun update(changedProps: ArrayList<String>) {
|
||||
Log.i(TAG, "Props changed: $changedProps")
|
||||
try {
|
||||
val shouldReconfigurePreview = changedProps.containsAny(propsThatRequirePreviewReconfiguration)
|
||||
val shouldReconfigureSession = shouldReconfigurePreview || changedProps.containsAny(propsThatRequireSessionReconfiguration)
|
||||
val shouldReconfigureFormat = shouldReconfigureSession || changedProps.containsAny(propsThatRequireFormatReconfiguration)
|
||||
val shouldReconfigureZoom = shouldReconfigureSession || changedProps.contains("zoom")
|
||||
val shouldReconfigureTorch = shouldReconfigureSession || changedProps.contains("torch")
|
||||
val shouldUpdateOrientation = /* TODO: When should we reconfigure this? */ shouldReconfigureSession || changedProps.contains("orientation")
|
||||
val shouldCheckActive = shouldReconfigureFormat || changedProps.contains("isActive")
|
||||
val shouldReconfigureZoomGesture = changedProps.contains("enableZoomGesture")
|
||||
|
||||
if (shouldReconfigurePreview) {
|
||||
setupPreviewView()
|
||||
}
|
||||
if (shouldReconfigureSession) {
|
||||
configureSession()
|
||||
}
|
||||
if (shouldReconfigureFormat) {
|
||||
configureFormat()
|
||||
}
|
||||
if (shouldCheckActive) {
|
||||
updateLifecycle()
|
||||
}
|
||||
|
||||
if (shouldReconfigureZoom) {
|
||||
updateZoom()
|
||||
}
|
||||
if (shouldReconfigureTorch) {
|
||||
updateTorch()
|
||||
}
|
||||
if (shouldUpdateOrientation) {
|
||||
// TODO: updateOrientation()
|
||||
}
|
||||
if (shouldReconfigureZoomGesture) {
|
||||
updateZoomGesture()
|
||||
}
|
||||
} catch (e: Throwable) {
|
||||
Log.e(TAG, "update() threw: ${e.message}")
|
||||
invokeOnError(e)
|
||||
}
|
||||
}
|
||||
|
||||
private fun configureSession() {
|
||||
try {
|
||||
Log.i(TAG, "Configuring Camera Device...")
|
||||
|
||||
if (ContextCompat.checkSelfPermission(context, Manifest.permission.CAMERA) != PackageManager.PERMISSION_GRANTED) {
|
||||
throw CameraPermissionError()
|
||||
}
|
||||
val cameraId = cameraId ?: throw NoCameraDeviceError()
|
||||
|
||||
val format = format
|
||||
val targetVideoSize = if (format != null) Size(format.getInt("videoWidth"), format.getInt("videoHeight")) else null
|
||||
val targetPhotoSize = if (format != null) Size(format.getInt("photoWidth"), format.getInt("photoHeight")) else null
|
||||
// TODO: Allow previewSurface to be null/none
|
||||
val previewSurface = previewSurface ?: return
|
||||
|
||||
val previewOutput = CameraOutputs.PreviewOutput(previewSurface)
|
||||
val photoOutput = if (photo == true) {
|
||||
CameraOutputs.PhotoOutput(targetPhotoSize)
|
||||
} else null
|
||||
val videoOutput = if (video == true || enableFrameProcessor) {
|
||||
CameraOutputs.VideoOutput(targetVideoSize, video == true, enableFrameProcessor, pixelFormat.toImageFormat())
|
||||
} else null
|
||||
|
||||
cameraSession.configureSession(cameraId, previewOutput, photoOutput, videoOutput)
|
||||
} catch (e: Throwable) {
|
||||
Log.e(TAG, "Failed to configure session: ${e.message}", e)
|
||||
invokeOnError(e)
|
||||
}
|
||||
}
|
||||
|
||||
private fun configureFormat() {
|
||||
cameraSession.configureFormat(fps, videoStabilizationMode, hdr, lowLightBoost)
|
||||
}
|
||||
|
||||
private fun updateLifecycle() {
|
||||
cameraSession.setIsActive(isActive && isAttachedToWindow)
|
||||
}
|
||||
|
||||
private fun updateZoom() {
|
||||
cameraSession.setZoom(zoom)
|
||||
}
|
||||
|
||||
private fun updateTorch() {
|
||||
CoroutineScope(Dispatchers.Default).launch {
|
||||
cameraSession.setTorchMode(torch == Torch.ON)
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressLint("ClickableViewAccessibility")
|
||||
private fun updateZoomGesture() {
|
||||
if (enableZoomGesture) {
|
||||
val scaleGestureDetector = ScaleGestureDetector(context, object: ScaleGestureDetector.SimpleOnScaleGestureListener() {
|
||||
override fun onScale(detector: ScaleGestureDetector): Boolean {
|
||||
zoom *= detector.scaleFactor
|
||||
cameraSession.setZoom(zoom)
|
||||
return true
|
||||
}
|
||||
})
|
||||
setOnTouchListener { _, event ->
|
||||
scaleGestureDetector.onTouchEvent(event)
|
||||
}
|
||||
} else {
|
||||
setOnTouchListener(null)
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,192 @@
|
||||
package com.mrousavy.camera
|
||||
|
||||
import com.facebook.react.bridge.ReadableMap
|
||||
import com.facebook.react.common.MapBuilder
|
||||
import com.facebook.react.uimanager.ThemedReactContext
|
||||
import com.facebook.react.uimanager.ViewGroupManager
|
||||
import com.facebook.react.uimanager.annotations.ReactProp
|
||||
import com.mrousavy.camera.parsers.Orientation
|
||||
import com.mrousavy.camera.parsers.PixelFormat
|
||||
import com.mrousavy.camera.parsers.Torch
|
||||
import com.mrousavy.camera.parsers.VideoStabilizationMode
|
||||
|
||||
@Suppress("unused")
|
||||
class CameraViewManager : ViewGroupManager<CameraView>() {
|
||||
|
||||
public override fun createViewInstance(context: ThemedReactContext): CameraView {
|
||||
return CameraView(context)
|
||||
}
|
||||
|
||||
override fun onAfterUpdateTransaction(view: CameraView) {
|
||||
super.onAfterUpdateTransaction(view)
|
||||
val changedProps = cameraViewTransactions[view] ?: ArrayList()
|
||||
view.update(changedProps)
|
||||
cameraViewTransactions.remove(view)
|
||||
}
|
||||
|
||||
override fun getExportedCustomDirectEventTypeConstants(): MutableMap<String, Any>? {
|
||||
return MapBuilder.builder<String, Any>()
|
||||
.put("cameraViewReady", MapBuilder.of("registrationName", "onViewReady"))
|
||||
.put("cameraInitialized", MapBuilder.of("registrationName", "onInitialized"))
|
||||
.put("cameraError", MapBuilder.of("registrationName", "onError"))
|
||||
.build()
|
||||
}
|
||||
|
||||
override fun getName(): String {
|
||||
return TAG
|
||||
}
|
||||
|
||||
@ReactProp(name = "cameraId")
|
||||
fun setCameraId(view: CameraView, cameraId: String) {
|
||||
if (view.cameraId != cameraId)
|
||||
addChangedPropToTransaction(view, "cameraId")
|
||||
view.cameraId = cameraId
|
||||
}
|
||||
|
||||
@ReactProp(name = "photo")
|
||||
fun setPhoto(view: CameraView, photo: Boolean?) {
|
||||
if (view.photo != photo)
|
||||
addChangedPropToTransaction(view, "photo")
|
||||
view.photo = photo
|
||||
}
|
||||
|
||||
@ReactProp(name = "video")
|
||||
fun setVideo(view: CameraView, video: Boolean?) {
|
||||
if (view.video != video)
|
||||
addChangedPropToTransaction(view, "video")
|
||||
view.video = video
|
||||
}
|
||||
|
||||
@ReactProp(name = "audio")
|
||||
fun setAudio(view: CameraView, audio: Boolean?) {
|
||||
if (view.audio != audio)
|
||||
addChangedPropToTransaction(view, "audio")
|
||||
view.audio = audio
|
||||
}
|
||||
|
||||
@ReactProp(name = "enableFrameProcessor")
|
||||
fun setEnableFrameProcessor(view: CameraView, enableFrameProcessor: Boolean) {
|
||||
if (view.enableFrameProcessor != enableFrameProcessor)
|
||||
addChangedPropToTransaction(view, "enableFrameProcessor")
|
||||
view.enableFrameProcessor = enableFrameProcessor
|
||||
}
|
||||
|
||||
@ReactProp(name = "pixelFormat")
|
||||
fun setPixelFormat(view: CameraView, pixelFormat: String?) {
|
||||
val newPixelFormat = PixelFormat.fromUnionValue(pixelFormat)
|
||||
if (view.pixelFormat != newPixelFormat)
|
||||
addChangedPropToTransaction(view, "pixelFormat")
|
||||
view.pixelFormat = newPixelFormat ?: PixelFormat.NATIVE
|
||||
}
|
||||
|
||||
@ReactProp(name = "enableDepthData")
|
||||
fun setEnableDepthData(view: CameraView, enableDepthData: Boolean) {
|
||||
if (view.enableDepthData != enableDepthData)
|
||||
addChangedPropToTransaction(view, "enableDepthData")
|
||||
view.enableDepthData = enableDepthData
|
||||
}
|
||||
|
||||
@ReactProp(name = "enableZoomGesture")
|
||||
fun setEnableZoomGesture(view: CameraView, enableZoomGesture: Boolean) {
|
||||
if (view.enableZoomGesture != enableZoomGesture)
|
||||
addChangedPropToTransaction(view, "enableZoomGesture")
|
||||
view.enableZoomGesture = enableZoomGesture
|
||||
}
|
||||
|
||||
@ReactProp(name = "videoStabilizationMode")
|
||||
fun setVideoStabilizationMode(view: CameraView, videoStabilizationMode: String?) {
|
||||
val newMode = VideoStabilizationMode.fromUnionValue(videoStabilizationMode)
|
||||
if (view.videoStabilizationMode != newMode)
|
||||
addChangedPropToTransaction(view, "videoStabilizationMode")
|
||||
view.videoStabilizationMode = newMode
|
||||
}
|
||||
|
||||
@ReactProp(name = "enableHighQualityPhotos")
|
||||
fun setEnableHighQualityPhotos(view: CameraView, enableHighQualityPhotos: Boolean?) {
|
||||
if (view.enableHighQualityPhotos != enableHighQualityPhotos)
|
||||
addChangedPropToTransaction(view, "enableHighQualityPhotos")
|
||||
view.enableHighQualityPhotos = enableHighQualityPhotos
|
||||
}
|
||||
|
||||
@ReactProp(name = "enablePortraitEffectsMatteDelivery")
|
||||
fun setEnablePortraitEffectsMatteDelivery(view: CameraView, enablePortraitEffectsMatteDelivery: Boolean) {
|
||||
if (view.enablePortraitEffectsMatteDelivery != enablePortraitEffectsMatteDelivery)
|
||||
addChangedPropToTransaction(view, "enablePortraitEffectsMatteDelivery")
|
||||
view.enablePortraitEffectsMatteDelivery = enablePortraitEffectsMatteDelivery
|
||||
}
|
||||
|
||||
@ReactProp(name = "format")
|
||||
fun setFormat(view: CameraView, format: ReadableMap?) {
|
||||
if (view.format != format)
|
||||
addChangedPropToTransaction(view, "format")
|
||||
view.format = format
|
||||
}
|
||||
|
||||
// TODO: Change when TurboModules release.
|
||||
// We're treating -1 as "null" here, because when I make the fps parameter
|
||||
// of type "Int?" the react bridge throws an error.
|
||||
@ReactProp(name = "fps", defaultInt = -1)
|
||||
fun setFps(view: CameraView, fps: Int) {
|
||||
if (view.fps != fps)
|
||||
addChangedPropToTransaction(view, "fps")
|
||||
view.fps = if (fps > 0) fps else null
|
||||
}
|
||||
|
||||
@ReactProp(name = "hdr")
|
||||
fun setHdr(view: CameraView, hdr: Boolean?) {
|
||||
if (view.hdr != hdr)
|
||||
addChangedPropToTransaction(view, "hdr")
|
||||
view.hdr = hdr
|
||||
}
|
||||
|
||||
@ReactProp(name = "lowLightBoost")
|
||||
fun setLowLightBoost(view: CameraView, lowLightBoost: Boolean?) {
|
||||
if (view.lowLightBoost != lowLightBoost)
|
||||
addChangedPropToTransaction(view, "lowLightBoost")
|
||||
view.lowLightBoost = lowLightBoost
|
||||
}
|
||||
|
||||
@ReactProp(name = "isActive")
|
||||
fun setIsActive(view: CameraView, isActive: Boolean) {
|
||||
if (view.isActive != isActive)
|
||||
addChangedPropToTransaction(view, "isActive")
|
||||
view.isActive = isActive
|
||||
}
|
||||
|
||||
@ReactProp(name = "torch")
|
||||
fun setTorch(view: CameraView, torch: String) {
|
||||
val newMode = Torch.fromUnionValue(torch)
|
||||
if (view.torch != newMode)
|
||||
addChangedPropToTransaction(view, "torch")
|
||||
view.torch = newMode
|
||||
}
|
||||
|
||||
@ReactProp(name = "zoom")
|
||||
fun setZoom(view: CameraView, zoom: Double) {
|
||||
val zoomFloat = zoom.toFloat()
|
||||
if (view.zoom != zoomFloat)
|
||||
addChangedPropToTransaction(view, "zoom")
|
||||
view.zoom = zoomFloat
|
||||
}
|
||||
|
||||
@ReactProp(name = "orientation")
|
||||
fun setOrientation(view: CameraView, orientation: String?) {
|
||||
val newMode = Orientation.fromUnionValue(orientation)
|
||||
if (view.orientation != newMode)
|
||||
addChangedPropToTransaction(view, "orientation")
|
||||
view.orientation = newMode
|
||||
}
|
||||
|
||||
companion object {
|
||||
const val TAG = "CameraView"
|
||||
|
||||
val cameraViewTransactions: HashMap<CameraView, ArrayList<String>> = HashMap()
|
||||
|
||||
private fun addChangedPropToTransaction(view: CameraView, changedProp: String) {
|
||||
if (cameraViewTransactions[view] == null) {
|
||||
cameraViewTransactions[view] = ArrayList()
|
||||
}
|
||||
cameraViewTransactions[view]!!.add(changedProp)
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,220 @@
|
||||
package com.mrousavy.camera
|
||||
|
||||
import android.Manifest
|
||||
import android.content.Context
|
||||
import android.content.pm.PackageManager
|
||||
import android.hardware.camera2.CameraManager
|
||||
import android.util.Log
|
||||
import androidx.core.content.ContextCompat
|
||||
import com.facebook.react.bridge.*
|
||||
import com.facebook.react.module.annotations.ReactModule
|
||||
import com.facebook.react.modules.core.PermissionAwareActivity
|
||||
import com.facebook.react.modules.core.PermissionListener
|
||||
import com.facebook.react.uimanager.UIManagerHelper
|
||||
import com.mrousavy.camera.core.CameraDeviceDetails
|
||||
import com.mrousavy.camera.frameprocessor.VisionCameraInstaller
|
||||
import com.mrousavy.camera.frameprocessor.VisionCameraProxy
|
||||
import com.mrousavy.camera.parsers.*
|
||||
import com.mrousavy.camera.utils.*
|
||||
import kotlinx.coroutines.*
|
||||
import kotlin.coroutines.resume
|
||||
import kotlin.coroutines.resumeWithException
|
||||
import kotlin.coroutines.suspendCoroutine
|
||||
|
||||
@ReactModule(name = CameraViewModule.TAG)
|
||||
@Suppress("unused")
|
||||
class CameraViewModule(reactContext: ReactApplicationContext): ReactContextBaseJavaModule(reactContext) {
|
||||
companion object {
|
||||
const val TAG = "CameraView"
|
||||
var RequestCode = 10
|
||||
}
|
||||
|
||||
private val coroutineScope = CoroutineScope(Dispatchers.Default) // TODO: or Dispatchers.Main?
|
||||
|
||||
override fun invalidate() {
|
||||
super.invalidate()
|
||||
if (coroutineScope.isActive) {
|
||||
coroutineScope.cancel("CameraViewModule has been destroyed.")
|
||||
}
|
||||
}
|
||||
|
||||
override fun getName(): String {
|
||||
return TAG
|
||||
}
|
||||
|
||||
private suspend fun findCameraView(viewId: Int): CameraView {
|
||||
return suspendCoroutine { continuation ->
|
||||
UiThreadUtil.runOnUiThread {
|
||||
Log.d(TAG, "Finding view $viewId...")
|
||||
val view = if (reactApplicationContext != null) UIManagerHelper.getUIManager(reactApplicationContext, viewId)?.resolveView(viewId) as CameraView? else null
|
||||
Log.d(TAG, if (reactApplicationContext != null) "Found view $viewId!" else "Couldn't find view $viewId!")
|
||||
if (view != null) continuation.resume(view)
|
||||
else continuation.resumeWithException(ViewNotFoundError(viewId))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ReactMethod(isBlockingSynchronousMethod = true)
|
||||
fun installFrameProcessorBindings(): Boolean {
|
||||
return try {
|
||||
val proxy = VisionCameraProxy(reactApplicationContext)
|
||||
VisionCameraInstaller.install(proxy)
|
||||
true
|
||||
} catch (e: Error) {
|
||||
Log.e(TAG, "Failed to install Frame Processor JSI Bindings!", e)
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
fun takePhoto(viewTag: Int, options: ReadableMap, promise: Promise) {
|
||||
coroutineScope.launch {
|
||||
val view = findCameraView(viewTag)
|
||||
withPromise(promise) {
|
||||
view.takePhoto(options)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: startRecording() cannot be awaited, because I can't have a Promise and a onRecordedCallback in the same function. Hopefully TurboModules allows that
|
||||
@ReactMethod
|
||||
fun startRecording(viewTag: Int, options: ReadableMap, onRecordCallback: Callback) {
|
||||
coroutineScope.launch {
|
||||
val view = findCameraView(viewTag)
|
||||
try {
|
||||
view.startRecording(options, onRecordCallback)
|
||||
} catch (error: CameraError) {
|
||||
val map = makeErrorMap("${error.domain}/${error.id}", error.message, error)
|
||||
onRecordCallback(null, map)
|
||||
} catch (error: Throwable) {
|
||||
val map = makeErrorMap("capture/unknown", "An unknown error occurred while trying to start a video recording! ${error.message}", error)
|
||||
onRecordCallback(null, map)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
fun pauseRecording(viewTag: Int, promise: Promise) {
|
||||
coroutineScope.launch {
|
||||
withPromise(promise) {
|
||||
val view = findCameraView(viewTag)
|
||||
view.pauseRecording()
|
||||
return@withPromise null
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
fun resumeRecording(viewTag: Int, promise: Promise) {
|
||||
coroutineScope.launch {
|
||||
val view = findCameraView(viewTag)
|
||||
withPromise(promise) {
|
||||
view.resumeRecording()
|
||||
return@withPromise null
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
fun stopRecording(viewTag: Int, promise: Promise) {
|
||||
coroutineScope.launch {
|
||||
val view = findCameraView(viewTag)
|
||||
withPromise(promise) {
|
||||
view.stopRecording()
|
||||
return@withPromise null
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
fun focus(viewTag: Int, point: ReadableMap, promise: Promise) {
|
||||
coroutineScope.launch {
|
||||
val view = findCameraView(viewTag)
|
||||
withPromise(promise) {
|
||||
view.focus(point)
|
||||
return@withPromise null
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
fun getAvailableCameraDevices(promise: Promise) {
|
||||
coroutineScope.launch {
|
||||
withPromise(promise) {
|
||||
val manager = reactApplicationContext.getSystemService(Context.CAMERA_SERVICE) as CameraManager
|
||||
|
||||
val devices = Arguments.createArray()
|
||||
manager.cameraIdList.forEach { cameraId ->
|
||||
val device = CameraDeviceDetails(manager, cameraId)
|
||||
devices.pushMap(device.toMap())
|
||||
}
|
||||
promise.resolve(devices)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private fun canRequestPermission(permission: String): Boolean {
|
||||
val activity = currentActivity as? PermissionAwareActivity
|
||||
return activity?.shouldShowRequestPermissionRationale(permission) ?: false
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
fun getCameraPermissionStatus(promise: Promise) {
|
||||
val status = ContextCompat.checkSelfPermission(reactApplicationContext, Manifest.permission.CAMERA)
|
||||
var parsed = PermissionStatus.fromPermissionStatus(status)
|
||||
if (parsed == PermissionStatus.DENIED && canRequestPermission(Manifest.permission.CAMERA)) {
|
||||
parsed = PermissionStatus.NOT_DETERMINED
|
||||
}
|
||||
promise.resolve(parsed.unionValue)
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
fun getMicrophonePermissionStatus(promise: Promise) {
|
||||
val status = ContextCompat.checkSelfPermission(reactApplicationContext, Manifest.permission.RECORD_AUDIO)
|
||||
var parsed = PermissionStatus.fromPermissionStatus(status)
|
||||
if (parsed == PermissionStatus.DENIED && canRequestPermission(Manifest.permission.RECORD_AUDIO)) {
|
||||
parsed = PermissionStatus.NOT_DETERMINED
|
||||
}
|
||||
promise.resolve(parsed.unionValue)
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
fun requestCameraPermission(promise: Promise) {
|
||||
val activity = reactApplicationContext.currentActivity
|
||||
if (activity is PermissionAwareActivity) {
|
||||
val currentRequestCode = RequestCode++
|
||||
val listener = PermissionListener { requestCode: Int, _: Array<String>, grantResults: IntArray ->
|
||||
if (requestCode == currentRequestCode) {
|
||||
val permissionStatus = if (grantResults.isNotEmpty()) grantResults[0] else PackageManager.PERMISSION_DENIED
|
||||
val parsed = PermissionStatus.fromPermissionStatus(permissionStatus)
|
||||
promise.resolve(parsed.unionValue)
|
||||
return@PermissionListener true
|
||||
}
|
||||
return@PermissionListener false
|
||||
}
|
||||
activity.requestPermissions(arrayOf(Manifest.permission.CAMERA), currentRequestCode, listener)
|
||||
} else {
|
||||
promise.reject("NO_ACTIVITY", "No PermissionAwareActivity was found! Make sure the app has launched before calling this function.")
|
||||
}
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
fun requestMicrophonePermission(promise: Promise) {
|
||||
val activity = reactApplicationContext.currentActivity
|
||||
if (activity is PermissionAwareActivity) {
|
||||
val currentRequestCode = RequestCode++
|
||||
val listener = PermissionListener { requestCode: Int, _: Array<String>, grantResults: IntArray ->
|
||||
if (requestCode == currentRequestCode) {
|
||||
val permissionStatus = if (grantResults.isNotEmpty()) grantResults[0] else PackageManager.PERMISSION_DENIED
|
||||
val parsed = PermissionStatus.fromPermissionStatus(permissionStatus)
|
||||
promise.resolve(parsed.unionValue)
|
||||
return@PermissionListener true
|
||||
}
|
||||
return@PermissionListener false
|
||||
}
|
||||
activity.requestPermissions(arrayOf(Manifest.permission.RECORD_AUDIO), currentRequestCode, listener)
|
||||
} else {
|
||||
promise.reject("NO_ACTIVITY", "No PermissionAwareActivity was found! Make sure the app has launched before calling this function.")
|
||||
}
|
||||
}
|
||||
}
|
67
package/android/src/main/java/com/mrousavy/camera/Errors.kt
Normal file
67
package/android/src/main/java/com/mrousavy/camera/Errors.kt
Normal file
@@ -0,0 +1,67 @@
|
||||
package com.mrousavy.camera
|
||||
|
||||
import com.mrousavy.camera.parsers.CameraDeviceError
|
||||
import com.mrousavy.camera.core.outputs.CameraOutputs
|
||||
|
||||
abstract class CameraError(
|
||||
/**
|
||||
* The domain of the error. Error domains are used to group errors.
|
||||
*
|
||||
* Example: "permission"
|
||||
*/
|
||||
val domain: String,
|
||||
/**
|
||||
* The id of the error. Errors are uniquely identified under a given domain.
|
||||
*
|
||||
* Example: "microphone-permission-denied"
|
||||
*/
|
||||
val id: String,
|
||||
/**
|
||||
* A detailed error description of "what went wrong".
|
||||
*
|
||||
* Example: "The microphone permission was denied!"
|
||||
*/
|
||||
message: String,
|
||||
/**
|
||||
* A throwable that caused this error.
|
||||
*/
|
||||
cause: Throwable? = null
|
||||
) : Throwable("[$domain/$id] $message", cause)
|
||||
|
||||
val CameraError.code: String
|
||||
get() = "$domain/$id"
|
||||
|
||||
class MicrophonePermissionError : CameraError("permission", "microphone-permission-denied", "The Microphone permission was denied! If you want to record Video without sound, pass `audio={false}`.")
|
||||
class CameraPermissionError : CameraError("permission", "camera-permission-denied", "The Camera permission was denied!")
|
||||
|
||||
class InvalidTypeScriptUnionError(unionName: String, unionValue: String) : CameraError("parameter", "invalid-parameter", "The given value for $unionName could not be parsed! (Received: $unionValue)")
|
||||
|
||||
class NoCameraDeviceError : CameraError("device", "no-device", "No device was set! Use `getAvailableCameraDevices()` to select a suitable Camera device.")
|
||||
class NoFlashAvailableError : CameraError("device", "flash-unavailable", "The Camera Device does not have a flash unit! Make sure you select a device where `hasFlash`/`hasTorch` is true!")
|
||||
class PixelFormatNotSupportedError(format: String) : CameraError("device", "pixel-format-not-supported", "The pixelFormat $format is not supported on the given Camera Device!")
|
||||
|
||||
class HdrNotContainedInFormatError : CameraError(
|
||||
"format", "invalid-hdr",
|
||||
"The currently selected format does not support HDR capture! " +
|
||||
"Make sure you select a format which includes `supportsPhotoHDR`!"
|
||||
)
|
||||
|
||||
class CameraNotReadyError : CameraError("session", "camera-not-ready", "The Camera is not ready yet! Wait for the onInitialized() callback!")
|
||||
class CameraCannotBeOpenedError(cameraId: String, error: CameraDeviceError) : CameraError("session", "camera-cannot-be-opened", "The given Camera device (id: $cameraId) could not be opened! Error: $error")
|
||||
class CameraSessionCannotBeConfiguredError(cameraId: String, outputs: CameraOutputs) : CameraError("session", "cannot-create-session", "Failed to create a Camera Session for Camera $cameraId! Outputs: $outputs")
|
||||
class CameraDisconnectedError(cameraId: String, error: CameraDeviceError) : CameraError("session", "camera-has-been-disconnected", "The given Camera device (id: $cameraId) has been disconnected! Error: $error")
|
||||
|
||||
class VideoNotEnabledError : CameraError("capture", "video-not-enabled", "Video capture is disabled! Pass `video={true}` to enable video recordings.")
|
||||
class PhotoNotEnabledError : CameraError("capture", "photo-not-enabled", "Photo capture is disabled! Pass `photo={true}` to enable photo capture.")
|
||||
class CaptureAbortedError(wasImageCaptured: Boolean) : CameraError("capture", "aborted", "The image capture was aborted! Was Image captured: $wasImageCaptured")
|
||||
class UnknownCaptureError(wasImageCaptured: Boolean) : CameraError("capture", "unknown", "An unknown error occurred while trying to capture an Image! Was Image captured: $wasImageCaptured")
|
||||
|
||||
class RecorderError(name: String, extra: Int) : CameraError("capture", "recorder-error", "An error occured while recording a video! $name $extra")
|
||||
|
||||
class NoRecordingInProgressError : CameraError("capture", "no-recording-in-progress", "There was no active video recording in progress! Did you call stopRecording() twice?")
|
||||
class RecordingInProgressError : CameraError("capture", "recording-in-progress", "There is already an active video recording in progress! Did you call startRecording() twice?")
|
||||
|
||||
class ViewNotFoundError(viewId: Int) : CameraError("system", "view-not-found", "The given view (ID $viewId) was not found in the view manager.")
|
||||
|
||||
class UnknownCameraError(cause: Throwable?) : CameraError("unknown", "unknown", cause?.message ?: "An unknown camera error occured.", cause)
|
||||
|
@@ -0,0 +1,243 @@
|
||||
package com.mrousavy.camera.core
|
||||
|
||||
import android.graphics.ImageFormat
|
||||
import android.hardware.camera2.CameraCharacteristics
|
||||
import android.hardware.camera2.CameraManager
|
||||
import android.hardware.camera2.CameraMetadata
|
||||
import android.hardware.camera2.params.DynamicRangeProfiles
|
||||
import android.os.Build
|
||||
import android.util.Range
|
||||
import android.util.Size
|
||||
import com.facebook.react.bridge.Arguments
|
||||
import com.facebook.react.bridge.ReadableArray
|
||||
import com.facebook.react.bridge.ReadableMap
|
||||
import com.mrousavy.camera.extensions.bigger
|
||||
import com.mrousavy.camera.extensions.getPhotoSizes
|
||||
import com.mrousavy.camera.extensions.getVideoSizes
|
||||
import com.mrousavy.camera.parsers.PixelFormat
|
||||
import com.mrousavy.camera.parsers.HardwareLevel
|
||||
import com.mrousavy.camera.parsers.LensFacing
|
||||
import com.mrousavy.camera.parsers.Orientation
|
||||
import com.mrousavy.camera.parsers.VideoStabilizationMode
|
||||
import kotlin.math.PI
|
||||
import kotlin.math.atan
|
||||
|
||||
class CameraDeviceDetails(private val cameraManager: CameraManager, private val cameraId: String) {
|
||||
private val characteristics = cameraManager.getCameraCharacteristics(cameraId)
|
||||
private val hardwareLevel = HardwareLevel.fromCameraCharacteristics(characteristics)
|
||||
private val capabilities = characteristics.get(CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES) ?: IntArray(0)
|
||||
private val extensions = getSupportedExtensions()
|
||||
|
||||
// device characteristics
|
||||
private val isMultiCam = capabilities.contains(11 /* TODO: CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA */)
|
||||
private val supportsDepthCapture = capabilities.contains(8 /* TODO: CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES_DEPTH_OUTPUT */)
|
||||
private val supportsRawCapture = capabilities.contains(CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES_RAW)
|
||||
private val supportsLowLightBoost = extensions.contains(4 /* TODO: CameraExtensionCharacteristics.EXTENSION_NIGHT */)
|
||||
private val lensFacing = LensFacing.fromCameraCharacteristics(characteristics)
|
||||
private val hasFlash = characteristics.get(CameraCharacteristics.FLASH_INFO_AVAILABLE) ?: false
|
||||
private val focalLengths = characteristics.get(CameraCharacteristics.LENS_INFO_AVAILABLE_FOCAL_LENGTHS) ?: floatArrayOf(35f /* 35mm default */)
|
||||
private val sensorSize = characteristics.get(CameraCharacteristics.SENSOR_INFO_PHYSICAL_SIZE)!!
|
||||
private val sensorOrientation = characteristics.get(CameraCharacteristics.SENSOR_ORIENTATION)!!
|
||||
private val name = (if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.P) characteristics.get(CameraCharacteristics.INFO_VERSION)
|
||||
else null) ?: "$lensFacing (${cameraId})"
|
||||
|
||||
// "formats" (all possible configurations for this device)
|
||||
private val zoomRange = (if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.R) characteristics.get(CameraCharacteristics.CONTROL_ZOOM_RATIO_RANGE)
|
||||
else null) ?: Range(1f, characteristics.get(CameraCharacteristics.SCALER_AVAILABLE_MAX_DIGITAL_ZOOM) ?: 1f)
|
||||
private val minZoom = zoomRange.lower.toDouble()
|
||||
private val maxZoom = zoomRange.upper.toDouble()
|
||||
|
||||
private val cameraConfig = characteristics.get(CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP)!!
|
||||
private val isoRange = characteristics.get(CameraCharacteristics.SENSOR_INFO_SENSITIVITY_RANGE) ?: Range(0, 0)
|
||||
private val digitalStabilizationModes = characteristics.get(CameraCharacteristics.CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES) ?: IntArray(0)
|
||||
private val opticalStabilizationModes = characteristics.get(CameraCharacteristics.LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION) ?: IntArray(0)
|
||||
private val supportsPhotoHdr = extensions.contains(3 /* TODO: CameraExtensionCharacteristics.EXTENSION_HDR */)
|
||||
private val supportsVideoHdr = getHasVideoHdr()
|
||||
|
||||
private val videoFormat = ImageFormat.YUV_420_888
|
||||
|
||||
// get extensions (HDR, Night Mode, ..)
|
||||
private fun getSupportedExtensions(): List<Int> {
|
||||
return if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.S) {
|
||||
val extensions = cameraManager.getCameraExtensionCharacteristics(cameraId)
|
||||
extensions.supportedExtensions
|
||||
} else {
|
||||
emptyList()
|
||||
}
|
||||
}
|
||||
|
||||
private fun getHasVideoHdr(): Boolean {
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.TIRAMISU) {
|
||||
if (capabilities.contains(CameraMetadata.REQUEST_AVAILABLE_CAPABILITIES_DYNAMIC_RANGE_TEN_BIT)) {
|
||||
val availableProfiles = characteristics.get(CameraCharacteristics.REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES)
|
||||
?: DynamicRangeProfiles(LongArray(0))
|
||||
return availableProfiles.supportedProfiles.contains(DynamicRangeProfiles.HLG10)
|
||||
|| availableProfiles.supportedProfiles.contains(DynamicRangeProfiles.HDR10)
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
private fun createStabilizationModes(): ReadableArray {
|
||||
val array = Arguments.createArray()
|
||||
digitalStabilizationModes.forEach { videoStabilizationMode ->
|
||||
val mode = VideoStabilizationMode.fromDigitalVideoStabilizationMode(videoStabilizationMode)
|
||||
array.pushString(mode.unionValue)
|
||||
}
|
||||
opticalStabilizationModes.forEach { videoStabilizationMode ->
|
||||
val mode = VideoStabilizationMode.fromOpticalVideoStabilizationMode(videoStabilizationMode)
|
||||
array.pushString(mode.unionValue)
|
||||
}
|
||||
return array
|
||||
}
|
||||
|
||||
|
||||
// 35mm is 135 film format, a standard in which focal lengths are usually measured
|
||||
private val size35mm = Size(36, 24)
|
||||
|
||||
private fun getDeviceTypes(): ReadableArray {
|
||||
// TODO: Check if getDeviceType() works correctly, even for logical multi-cameras
|
||||
|
||||
// To get valid focal length standards we have to upscale to the 35mm measurement (film standard)
|
||||
val cropFactor = size35mm.bigger / sensorSize.bigger
|
||||
|
||||
val deviceTypes = Arguments.createArray()
|
||||
|
||||
// https://en.wikipedia.org/wiki/Telephoto_lens
|
||||
val containsTelephoto = focalLengths.any { l -> (l * cropFactor) > 35 } // TODO: Telephoto lenses are > 85mm, but we don't have anything between that range..
|
||||
// val containsNormalLens = focalLengths.any { l -> (l * cropFactor) > 35 && (l * cropFactor) <= 55 }
|
||||
// https://en.wikipedia.org/wiki/Wide-angle_lens
|
||||
val containsWideAngle = focalLengths.any { l -> (l * cropFactor) >= 24 && (l * cropFactor) <= 35 }
|
||||
// https://en.wikipedia.org/wiki/Ultra_wide_angle_lens
|
||||
val containsUltraWideAngle = focalLengths.any { l -> (l * cropFactor) < 24 }
|
||||
|
||||
if (containsTelephoto)
|
||||
deviceTypes.pushString("telephoto-camera")
|
||||
if (containsWideAngle)
|
||||
deviceTypes.pushString("wide-angle-camera")
|
||||
if (containsUltraWideAngle)
|
||||
deviceTypes.pushString("ultra-wide-angle-camera")
|
||||
|
||||
return deviceTypes
|
||||
}
|
||||
|
||||
private fun getFieldOfView(): Double {
|
||||
return 2 * atan(sensorSize.bigger / (focalLengths[0] * 2)) * (180 / PI)
|
||||
}
|
||||
|
||||
private fun getVideoSizes(): List<Size> {
|
||||
return characteristics.getVideoSizes(cameraId, videoFormat)
|
||||
}
|
||||
private fun getPhotoSizes(): List<Size> {
|
||||
return characteristics.getPhotoSizes(ImageFormat.JPEG)
|
||||
}
|
||||
|
||||
private fun getFormats(): ReadableArray {
|
||||
val array = Arguments.createArray()
|
||||
|
||||
val videoSizes = getVideoSizes()
|
||||
val photoSizes = getPhotoSizes()
|
||||
|
||||
videoSizes.forEach { videoSize ->
|
||||
val frameDuration = cameraConfig.getOutputMinFrameDuration(videoFormat, videoSize)
|
||||
val maxFps = (1.0 / (frameDuration.toDouble() / 1_000_000_000)).toInt()
|
||||
|
||||
photoSizes.forEach { photoSize ->
|
||||
val map = buildFormatMap(photoSize, videoSize, Range(1, maxFps))
|
||||
array.pushMap(map)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Add high-speed video ranges (high-fps / slow-motion)
|
||||
|
||||
return array
|
||||
}
|
||||
|
||||
// Get available pixel formats for the given Size
|
||||
private fun createPixelFormats(size: Size): ReadableArray {
|
||||
val formats = cameraConfig.outputFormats
|
||||
val array = Arguments.createArray()
|
||||
formats.forEach { format ->
|
||||
val sizes = cameraConfig.getOutputSizes(format)
|
||||
val hasSize = sizes.any { it.width == size.width && it.height == size.height }
|
||||
if (hasSize) {
|
||||
array.pushString(PixelFormat.fromImageFormat(format).unionValue)
|
||||
}
|
||||
}
|
||||
return array
|
||||
}
|
||||
|
||||
private fun buildFormatMap(photoSize: Size, videoSize: Size, fpsRange: Range<Int>): ReadableMap {
|
||||
val map = Arguments.createMap()
|
||||
map.putInt("photoHeight", photoSize.height)
|
||||
map.putInt("photoWidth", photoSize.width)
|
||||
map.putInt("videoHeight", videoSize.height)
|
||||
map.putInt("videoWidth", videoSize.width)
|
||||
map.putInt("minISO", isoRange.lower)
|
||||
map.putInt("maxISO", isoRange.upper)
|
||||
map.putInt("minFps", fpsRange.lower)
|
||||
map.putInt("maxFps", fpsRange.upper)
|
||||
map.putDouble("fieldOfView", getFieldOfView())
|
||||
map.putBoolean("supportsVideoHDR", supportsVideoHdr)
|
||||
map.putBoolean("supportsPhotoHDR", supportsPhotoHdr)
|
||||
map.putString("autoFocusSystem", "contrast-detection") // TODO: Is this wrong?
|
||||
map.putArray("videoStabilizationModes", createStabilizationModes())
|
||||
map.putArray("pixelFormats", createPixelFormats(videoSize))
|
||||
return map
|
||||
}
|
||||
|
||||
// convert to React Native JS object (map)
|
||||
fun toMap(): ReadableMap {
|
||||
val map = Arguments.createMap()
|
||||
map.putString("id", cameraId)
|
||||
map.putArray("devices", getDeviceTypes())
|
||||
map.putString("position", lensFacing.unionValue)
|
||||
map.putString("name", name)
|
||||
map.putBoolean("hasFlash", hasFlash)
|
||||
map.putBoolean("hasTorch", hasFlash)
|
||||
map.putBoolean("isMultiCam", isMultiCam)
|
||||
map.putBoolean("supportsRawCapture", supportsRawCapture)
|
||||
map.putBoolean("supportsDepthCapture", supportsDepthCapture)
|
||||
map.putBoolean("supportsLowLightBoost", supportsLowLightBoost)
|
||||
map.putBoolean("supportsFocus", true) // I believe every device here supports focussing
|
||||
map.putDouble("minZoom", minZoom)
|
||||
map.putDouble("maxZoom", maxZoom)
|
||||
map.putDouble("neutralZoom", 1.0) // Zoom is always relative to 1.0 on Android
|
||||
map.putString("hardwareLevel", hardwareLevel.unionValue)
|
||||
map.putString("sensorOrientation", Orientation.fromRotationDegrees(sensorOrientation).unionValue)
|
||||
|
||||
val array = Arguments.createArray()
|
||||
cameraConfig.outputFormats.forEach { f ->
|
||||
val str = when (f) {
|
||||
ImageFormat.YUV_420_888 -> "YUV_420_888"
|
||||
ImageFormat.YUV_422_888 -> "YUV_422_888"
|
||||
ImageFormat.YUV_444_888 -> "YUV_444_888"
|
||||
ImageFormat.JPEG -> "JPEG"
|
||||
ImageFormat.DEPTH16 -> "DEPTH16"
|
||||
ImageFormat.DEPTH_JPEG -> "DEPTH_JPEG"
|
||||
ImageFormat.FLEX_RGBA_8888 -> "FLEX_RGBA_8888"
|
||||
ImageFormat.FLEX_RGB_888 -> "FLEX_RGB_888"
|
||||
ImageFormat.YUY2 -> "YUY2"
|
||||
ImageFormat.Y8 -> "Y8"
|
||||
ImageFormat.YV12 -> "YV12"
|
||||
ImageFormat.HEIC -> "HEIC"
|
||||
ImageFormat.PRIVATE -> "PRIVATE"
|
||||
ImageFormat.RAW_PRIVATE -> "RAW_PRIVATE"
|
||||
ImageFormat.RAW_SENSOR -> "RAW_SENSOR"
|
||||
ImageFormat.RAW10 -> "RAW10"
|
||||
ImageFormat.RAW12 -> "RAW12"
|
||||
ImageFormat.NV16 -> "NV16"
|
||||
ImageFormat.NV21 -> "NV21"
|
||||
ImageFormat.UNKNOWN -> "UNKNOWN"
|
||||
ImageFormat.YCBCR_P010 -> "YCBCR_P010"
|
||||
else -> "unknown ($f)"
|
||||
}
|
||||
array.pushString(str)
|
||||
}
|
||||
map.putArray("pixelFormats", array)
|
||||
|
||||
map.putArray("formats", getFormats())
|
||||
|
||||
return map
|
||||
}
|
||||
}
|
@@ -0,0 +1,563 @@
|
||||
package com.mrousavy.camera.core
|
||||
|
||||
import android.content.Context
|
||||
import android.graphics.Point
|
||||
import android.hardware.camera2.CameraCaptureSession
|
||||
import android.hardware.camera2.CameraCharacteristics
|
||||
import android.hardware.camera2.CameraDevice
|
||||
import android.hardware.camera2.CameraManager
|
||||
import android.hardware.camera2.CameraMetadata
|
||||
import android.hardware.camera2.CaptureRequest
|
||||
import android.hardware.camera2.CaptureResult
|
||||
import android.hardware.camera2.TotalCaptureResult
|
||||
import android.hardware.camera2.params.MeteringRectangle
|
||||
import android.media.Image
|
||||
import android.os.Build
|
||||
import android.util.Log
|
||||
import android.util.Range
|
||||
import android.util.Size
|
||||
import com.mrousavy.camera.CameraNotReadyError
|
||||
import com.mrousavy.camera.CameraQueues
|
||||
import com.mrousavy.camera.CameraView
|
||||
import com.mrousavy.camera.CaptureAbortedError
|
||||
import com.mrousavy.camera.NoRecordingInProgressError
|
||||
import com.mrousavy.camera.PhotoNotEnabledError
|
||||
import com.mrousavy.camera.RecorderError
|
||||
import com.mrousavy.camera.RecordingInProgressError
|
||||
import com.mrousavy.camera.VideoNotEnabledError
|
||||
import com.mrousavy.camera.extensions.SessionType
|
||||
import com.mrousavy.camera.extensions.capture
|
||||
import com.mrousavy.camera.extensions.createCaptureSession
|
||||
import com.mrousavy.camera.extensions.createPhotoCaptureRequest
|
||||
import com.mrousavy.camera.extensions.openCamera
|
||||
import com.mrousavy.camera.extensions.tryClose
|
||||
import com.mrousavy.camera.extensions.zoomed
|
||||
import com.mrousavy.camera.frameprocessor.FrameProcessor
|
||||
import com.mrousavy.camera.parsers.Flash
|
||||
import com.mrousavy.camera.parsers.Orientation
|
||||
import com.mrousavy.camera.parsers.QualityPrioritization
|
||||
import com.mrousavy.camera.parsers.VideoCodec
|
||||
import com.mrousavy.camera.parsers.VideoFileType
|
||||
import com.mrousavy.camera.parsers.VideoStabilizationMode
|
||||
import com.mrousavy.camera.core.outputs.CameraOutputs
|
||||
import kotlinx.coroutines.CoroutineScope
|
||||
import kotlinx.coroutines.launch
|
||||
import kotlinx.coroutines.sync.Mutex
|
||||
import kotlinx.coroutines.sync.withLock
|
||||
import java.io.Closeable
|
||||
import java.util.concurrent.CancellationException
|
||||
import kotlin.coroutines.CoroutineContext
|
||||
|
||||
class CameraSession(private val context: Context,
|
||||
private val cameraManager: CameraManager,
|
||||
private val onInitialized: () -> Unit,
|
||||
private val onError: (e: Throwable) -> Unit): CoroutineScope, Closeable, CameraOutputs.Callback, CameraManager.AvailabilityCallback() {
|
||||
companion object {
|
||||
private const val TAG = "CameraSession"
|
||||
|
||||
// TODO: Samsung advertises 60 FPS but only allows 30 FPS for some reason.
|
||||
private val CAN_SET_FPS = !Build.MANUFACTURER.equals("samsung", true)
|
||||
}
|
||||
|
||||
data class CapturedPhoto(val image: Image,
|
||||
val metadata: TotalCaptureResult,
|
||||
val orientation: Orientation,
|
||||
val isMirrored: Boolean,
|
||||
val format: Int): Closeable {
|
||||
override fun close() {
|
||||
image.close()
|
||||
}
|
||||
}
|
||||
|
||||
// setInput(..)
|
||||
private var cameraId: String? = null
|
||||
|
||||
// setOutputs(..)
|
||||
private var outputs: CameraOutputs? = null
|
||||
|
||||
// setIsActive(..)
|
||||
private var isActive = false
|
||||
|
||||
// configureFormat(..)
|
||||
private var fps: Int? = null
|
||||
private var videoStabilizationMode: VideoStabilizationMode? = null
|
||||
private var lowLightBoost: Boolean? = null
|
||||
private var hdr: Boolean? = null
|
||||
|
||||
// zoom(..)
|
||||
private var zoom: Float = 1.0f
|
||||
|
||||
private var captureSession: CameraCaptureSession? = null
|
||||
private var cameraDevice: CameraDevice? = null
|
||||
private var previewRequest: CaptureRequest.Builder? = null
|
||||
private val photoOutputSynchronizer = PhotoOutputSynchronizer()
|
||||
private val mutex = Mutex()
|
||||
private var isRunning = false
|
||||
private var enableTorch = false
|
||||
// Video Outputs
|
||||
private var recording: RecordingSession? = null
|
||||
set(value) {
|
||||
field = value
|
||||
updateVideoOutputs()
|
||||
}
|
||||
var frameProcessor: FrameProcessor? = null
|
||||
set(value) {
|
||||
field = value
|
||||
updateVideoOutputs()
|
||||
}
|
||||
|
||||
override val coroutineContext: CoroutineContext = CameraQueues.cameraQueue.coroutineDispatcher
|
||||
|
||||
init {
|
||||
cameraManager.registerAvailabilityCallback(this, CameraQueues.cameraQueue.handler)
|
||||
}
|
||||
|
||||
override fun close() {
|
||||
cameraManager.unregisterAvailabilityCallback(this)
|
||||
photoOutputSynchronizer.clear()
|
||||
captureSession?.close()
|
||||
cameraDevice?.tryClose()
|
||||
outputs?.close()
|
||||
isRunning = false
|
||||
}
|
||||
|
||||
val orientation: Orientation
|
||||
get() {
|
||||
val cameraId = cameraId ?: return Orientation.PORTRAIT
|
||||
val characteristics = cameraManager.getCameraCharacteristics(cameraId)
|
||||
val sensorRotation = characteristics.get(CameraCharacteristics.SENSOR_ORIENTATION) ?: 0
|
||||
return Orientation.fromRotationDegrees(sensorRotation)
|
||||
}
|
||||
|
||||
fun configureSession(cameraId: String,
|
||||
preview: CameraOutputs.PreviewOutput? = null,
|
||||
photo: CameraOutputs.PhotoOutput? = null,
|
||||
video: CameraOutputs.VideoOutput? = null) {
|
||||
Log.i(TAG, "Configuring Session for Camera $cameraId...")
|
||||
val outputs = CameraOutputs(cameraId,
|
||||
cameraManager,
|
||||
preview,
|
||||
photo,
|
||||
video,
|
||||
hdr == true,
|
||||
this)
|
||||
if (this.cameraId == cameraId && this.outputs == outputs && isActive == isRunning) {
|
||||
Log.i(TAG, "Nothing changed in configuration, canceling..")
|
||||
}
|
||||
|
||||
// 1. Close previous outputs
|
||||
this.outputs?.close()
|
||||
// 2. Assign new outputs
|
||||
this.outputs = outputs
|
||||
// 3. Update with existing render targets (surfaces)
|
||||
updateVideoOutputs()
|
||||
|
||||
this.cameraId = cameraId
|
||||
launch {
|
||||
startRunning()
|
||||
}
|
||||
}
|
||||
|
||||
fun configureFormat(fps: Int? = null,
|
||||
videoStabilizationMode: VideoStabilizationMode? = null,
|
||||
hdr: Boolean? = null,
|
||||
lowLightBoost: Boolean? = null) {
|
||||
Log.i(TAG, "Setting Format (fps: $fps | videoStabilization: $videoStabilizationMode | hdr: $hdr | lowLightBoost: $lowLightBoost)...")
|
||||
this.fps = fps
|
||||
this.videoStabilizationMode = videoStabilizationMode
|
||||
this.hdr = hdr
|
||||
this.lowLightBoost = lowLightBoost
|
||||
|
||||
var needsReconfiguration = false
|
||||
val currentOutputs = outputs
|
||||
if (currentOutputs != null && currentOutputs.enableHdr != hdr) {
|
||||
// Update existing HDR for Outputs
|
||||
this.outputs = CameraOutputs(currentOutputs.cameraId,
|
||||
cameraManager,
|
||||
currentOutputs.preview,
|
||||
currentOutputs.photo,
|
||||
currentOutputs.video,
|
||||
hdr,
|
||||
this)
|
||||
needsReconfiguration = true
|
||||
}
|
||||
launch {
|
||||
if (needsReconfiguration) startRunning()
|
||||
else updateRepeatingRequest()
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Starts or stops the Camera.
|
||||
*/
|
||||
fun setIsActive(isActive: Boolean) {
|
||||
Log.i(TAG, "Setting isActive: $isActive (isRunning: $isRunning)")
|
||||
this.isActive = isActive
|
||||
if (isActive == isRunning) return
|
||||
|
||||
launch {
|
||||
if (isActive) {
|
||||
startRunning()
|
||||
} else {
|
||||
stopRunning()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private fun updateVideoOutputs() {
|
||||
val videoPipeline = outputs?.videoOutput?.videoPipeline ?: return
|
||||
val previewOutput = outputs?.previewOutput
|
||||
videoPipeline.setRecordingSessionOutput(this.recording)
|
||||
videoPipeline.setFrameProcessorOutput(this.frameProcessor)
|
||||
}
|
||||
|
||||
suspend fun takePhoto(qualityPrioritization: QualityPrioritization,
|
||||
flashMode: Flash,
|
||||
enableShutterSound: Boolean,
|
||||
enableRedEyeReduction: Boolean,
|
||||
enableAutoStabilization: Boolean,
|
||||
outputOrientation: Orientation): CapturedPhoto {
|
||||
val captureSession = captureSession ?: throw CameraNotReadyError()
|
||||
val outputs = outputs ?: throw CameraNotReadyError()
|
||||
|
||||
val photoOutput = outputs.photoOutput ?: throw PhotoNotEnabledError()
|
||||
|
||||
Log.i(TAG, "Photo capture 0/3 - preparing capture request (${photoOutput.size.width}x${photoOutput.size.height})...")
|
||||
|
||||
val cameraCharacteristics = cameraManager.getCameraCharacteristics(captureSession.device.id)
|
||||
val orientation = outputOrientation.toSensorRelativeOrientation(cameraCharacteristics)
|
||||
val captureRequest = captureSession.device.createPhotoCaptureRequest(cameraManager,
|
||||
photoOutput.surface,
|
||||
zoom,
|
||||
qualityPrioritization,
|
||||
flashMode,
|
||||
enableRedEyeReduction,
|
||||
enableAutoStabilization,
|
||||
orientation)
|
||||
Log.i(TAG, "Photo capture 1/3 - starting capture...")
|
||||
val result = captureSession.capture(captureRequest, enableShutterSound)
|
||||
val timestamp = result[CaptureResult.SENSOR_TIMESTAMP]!!
|
||||
Log.i(TAG, "Photo capture 2/3 complete - received metadata with timestamp $timestamp")
|
||||
try {
|
||||
val image = photoOutputSynchronizer.await(timestamp)
|
||||
|
||||
val isMirrored = cameraCharacteristics.get(CameraCharacteristics.LENS_FACING) == CameraCharacteristics.LENS_FACING_FRONT
|
||||
|
||||
Log.i(TAG, "Photo capture 3/3 complete - received ${image.width} x ${image.height} image.")
|
||||
return CapturedPhoto(image, result, orientation, isMirrored, image.format)
|
||||
} catch (e: CancellationException) {
|
||||
throw CaptureAbortedError(false)
|
||||
}
|
||||
}
|
||||
|
||||
override fun onPhotoCaptured(image: Image) {
|
||||
Log.i(CameraView.TAG, "Photo captured! ${image.width} x ${image.height}")
|
||||
photoOutputSynchronizer.set(image.timestamp, image)
|
||||
}
|
||||
|
||||
suspend fun startRecording(enableAudio: Boolean,
|
||||
codec: VideoCodec,
|
||||
fileType: VideoFileType,
|
||||
callback: (video: RecordingSession.Video) -> Unit,
|
||||
onError: (error: RecorderError) -> Unit) {
|
||||
mutex.withLock {
|
||||
if (recording != null) throw RecordingInProgressError()
|
||||
val outputs = outputs ?: throw CameraNotReadyError()
|
||||
val videoOutput = outputs.videoOutput ?: throw VideoNotEnabledError()
|
||||
|
||||
val recording = RecordingSession(context, videoOutput.size, enableAudio, fps, codec, orientation, fileType, callback, onError)
|
||||
recording.start()
|
||||
this.recording = recording
|
||||
}
|
||||
}
|
||||
|
||||
suspend fun stopRecording() {
|
||||
mutex.withLock {
|
||||
val recording = recording ?: throw NoRecordingInProgressError()
|
||||
|
||||
recording.stop()
|
||||
this.recording = null
|
||||
}
|
||||
}
|
||||
|
||||
suspend fun pauseRecording() {
|
||||
mutex.withLock {
|
||||
val recording = recording ?: throw NoRecordingInProgressError()
|
||||
recording.pause()
|
||||
}
|
||||
}
|
||||
|
||||
suspend fun resumeRecording() {
|
||||
mutex.withLock {
|
||||
val recording = recording ?: throw NoRecordingInProgressError()
|
||||
recording.resume()
|
||||
}
|
||||
}
|
||||
|
||||
suspend fun setTorchMode(enableTorch: Boolean) {
|
||||
if (this.enableTorch != enableTorch) {
|
||||
this.enableTorch = enableTorch
|
||||
updateRepeatingRequest()
|
||||
}
|
||||
}
|
||||
|
||||
fun setZoom(zoom: Float) {
|
||||
if (this.zoom != zoom) {
|
||||
this.zoom = zoom
|
||||
launch {
|
||||
updateRepeatingRequest()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
suspend fun focus(x: Int, y: Int) {
|
||||
val captureSession = captureSession ?: throw CameraNotReadyError()
|
||||
val previewOutput = outputs?.previewOutput ?: throw CameraNotReadyError()
|
||||
val characteristics = cameraManager.getCameraCharacteristics(captureSession.device.id)
|
||||
val sensorSize = characteristics.get(CameraCharacteristics.SENSOR_INFO_ACTIVE_ARRAY_SIZE)!!
|
||||
val previewSize = previewOutput.size
|
||||
val pX = x.toDouble() / previewSize.width * sensorSize.height()
|
||||
val pY = y.toDouble() / previewSize.height * sensorSize.width()
|
||||
val point = Point(pX.toInt(), pY.toInt())
|
||||
|
||||
Log.i(TAG, "Focusing (${point.x}, ${point.y})...")
|
||||
focus(point)
|
||||
}
|
||||
|
||||
override fun onCameraAvailable(cameraId: String) {
|
||||
super.onCameraAvailable(cameraId)
|
||||
Log.i(TAG, "Camera became available: $cameraId")
|
||||
}
|
||||
|
||||
override fun onCameraUnavailable(cameraId: String) {
|
||||
super.onCameraUnavailable(cameraId)
|
||||
Log.i(TAG, "Camera became un-available: $cameraId")
|
||||
}
|
||||
|
||||
private suspend fun focus(point: Point) {
|
||||
mutex.withLock {
|
||||
val captureSession = captureSession ?: throw CameraNotReadyError()
|
||||
val request = previewRequest ?: throw CameraNotReadyError()
|
||||
|
||||
val weight = MeteringRectangle.METERING_WEIGHT_MAX - 1
|
||||
val focusAreaTouch = MeteringRectangle(point, Size(150, 150), weight)
|
||||
|
||||
// Quickly pause preview
|
||||
captureSession.stopRepeating()
|
||||
|
||||
request.set(CaptureRequest.CONTROL_AF_TRIGGER, CameraMetadata.CONTROL_AF_TRIGGER_CANCEL)
|
||||
request.set(CaptureRequest.CONTROL_AF_MODE, CaptureRequest.CONTROL_AF_MODE_OFF)
|
||||
captureSession.capture(request.build(), null, null)
|
||||
|
||||
// Add AF trigger with focus region
|
||||
val characteristics = cameraManager.getCameraCharacteristics(captureSession.device.id)
|
||||
val maxSupportedFocusRegions = characteristics.get(CameraCharacteristics.CONTROL_MAX_REGIONS_AE) ?: 0
|
||||
if (maxSupportedFocusRegions >= 1) {
|
||||
request.set(CaptureRequest.CONTROL_AF_REGIONS, arrayOf(focusAreaTouch))
|
||||
}
|
||||
request.set(CaptureRequest.CONTROL_MODE, CameraMetadata.CONTROL_MODE_AUTO)
|
||||
request.set(CaptureRequest.CONTROL_AF_MODE, CaptureRequest.CONTROL_AF_MODE_AUTO)
|
||||
request.set(CaptureRequest.CONTROL_AF_TRIGGER, CameraMetadata.CONTROL_AF_TRIGGER_START)
|
||||
|
||||
captureSession.capture(request.build(), false)
|
||||
|
||||
// Resume preview
|
||||
request.set(CaptureRequest.CONTROL_AF_TRIGGER, CaptureRequest.CONTROL_AF_TRIGGER_IDLE)
|
||||
captureSession.setRepeatingRequest(request.build(), null, null)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Opens a [CameraDevice]. If there already is an open Camera for the given [cameraId], use that.
|
||||
*/
|
||||
private suspend fun getCameraDevice(cameraId: String, onClosed: (error: Throwable) -> Unit): CameraDevice {
|
||||
val currentDevice = cameraDevice
|
||||
if (currentDevice?.id == cameraId) {
|
||||
// We already opened that device
|
||||
return currentDevice
|
||||
}
|
||||
// Close previous device
|
||||
cameraDevice?.tryClose()
|
||||
cameraDevice = null
|
||||
|
||||
val device = cameraManager.openCamera(cameraId, { camera, reason ->
|
||||
Log.d(TAG, "Camera Closed ($cameraDevice == $camera)")
|
||||
if (cameraDevice == camera) {
|
||||
// The current CameraDevice has been closed, handle that!
|
||||
onClosed(reason)
|
||||
cameraDevice = null
|
||||
} else {
|
||||
// A new CameraDevice has been opened, we don't care about this one anymore.
|
||||
}
|
||||
}, CameraQueues.cameraQueue)
|
||||
|
||||
// Cache device in memory
|
||||
cameraDevice = device
|
||||
return device
|
||||
}
|
||||
|
||||
// Caches the result of outputs.hashCode() of the last getCaptureSession call
|
||||
private var lastOutputsHashCode: Int? = null
|
||||
|
||||
private suspend fun getCaptureSession(cameraDevice: CameraDevice,
|
||||
outputs: CameraOutputs,
|
||||
onClosed: () -> Unit): CameraCaptureSession {
|
||||
val currentSession = captureSession
|
||||
if (currentSession?.device == cameraDevice && outputs.hashCode() == lastOutputsHashCode) {
|
||||
// We already opened a CameraCaptureSession on this device
|
||||
return currentSession
|
||||
}
|
||||
captureSession?.close()
|
||||
captureSession = null
|
||||
|
||||
val session = cameraDevice.createCaptureSession(cameraManager, SessionType.REGULAR, outputs, { session ->
|
||||
Log.d(TAG, "Capture Session Closed ($captureSession == $session)")
|
||||
if (captureSession == session) {
|
||||
// The current CameraCaptureSession has been closed, handle that!
|
||||
onClosed()
|
||||
captureSession = null
|
||||
} else {
|
||||
// A new CameraCaptureSession has been opened, we don't care about this one anymore.
|
||||
}
|
||||
}, CameraQueues.cameraQueue)
|
||||
|
||||
// Cache session in memory
|
||||
captureSession = session
|
||||
lastOutputsHashCode = outputs.hashCode()
|
||||
// New session initialized
|
||||
onInitialized()
|
||||
return session
|
||||
}
|
||||
|
||||
private fun getPreviewCaptureRequest(fps: Int? = null,
|
||||
videoStabilizationMode: VideoStabilizationMode? = null,
|
||||
lowLightBoost: Boolean? = null,
|
||||
hdr: Boolean? = null,
|
||||
torch: Boolean? = null): CaptureRequest {
|
||||
val captureRequest = previewRequest ?: throw CameraNotReadyError()
|
||||
|
||||
// FPS
|
||||
val fpsRange = if (fps != null && CAN_SET_FPS) Range(fps, fps) else Range(30, 30)
|
||||
captureRequest.set(CaptureRequest.CONTROL_AE_TARGET_FPS_RANGE, fpsRange)
|
||||
|
||||
// Video Stabilization
|
||||
captureRequest.set(CaptureRequest.CONTROL_VIDEO_STABILIZATION_MODE, videoStabilizationMode?.toDigitalStabilizationMode())
|
||||
captureRequest.set(CaptureRequest.LENS_OPTICAL_STABILIZATION_MODE, videoStabilizationMode?.toOpticalStabilizationMode())
|
||||
|
||||
// Night/HDR Mode
|
||||
val sceneMode = if (hdr == true) CaptureRequest.CONTROL_SCENE_MODE_HDR else if (lowLightBoost == true) CaptureRequest.CONTROL_SCENE_MODE_NIGHT else null
|
||||
captureRequest.set(CaptureRequest.CONTROL_SCENE_MODE, sceneMode)
|
||||
captureRequest.set(CaptureRequest.CONTROL_MODE, if (sceneMode != null) CaptureRequest.CONTROL_MODE_USE_SCENE_MODE else CaptureRequest.CONTROL_MODE_AUTO)
|
||||
|
||||
// Zoom
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.R) {
|
||||
captureRequest.set(CaptureRequest.CONTROL_ZOOM_RATIO, zoom)
|
||||
} else {
|
||||
val cameraCharacteristics = cameraManager.getCameraCharacteristics(cameraId!!)
|
||||
val size = cameraCharacteristics.get(CameraCharacteristics.SENSOR_INFO_ACTIVE_ARRAY_SIZE)!!
|
||||
captureRequest.set(CaptureRequest.SCALER_CROP_REGION, size.zoomed(zoom))
|
||||
}
|
||||
|
||||
// Torch Mode
|
||||
val torchMode = if (torch == true) CaptureRequest.FLASH_MODE_TORCH else CaptureRequest.FLASH_MODE_OFF
|
||||
captureRequest.set(CaptureRequest.FLASH_MODE, torchMode)
|
||||
|
||||
return captureRequest.build()
|
||||
}
|
||||
|
||||
private fun destroy() {
|
||||
Log.i(TAG, "Destroying session..")
|
||||
captureSession?.stopRepeating()
|
||||
captureSession?.close()
|
||||
captureSession = null
|
||||
|
||||
cameraDevice?.close()
|
||||
cameraDevice = null
|
||||
|
||||
isRunning = false
|
||||
}
|
||||
|
||||
private suspend fun startRunning() {
|
||||
isRunning = false
|
||||
val cameraId = cameraId ?: return
|
||||
if (!isActive) return
|
||||
|
||||
Log.i(TAG, "Starting Camera Session...")
|
||||
|
||||
try {
|
||||
mutex.withLock {
|
||||
val outputs = outputs
|
||||
if (outputs == null || outputs.size == 0) {
|
||||
Log.i(TAG, "CameraSession doesn't have any Outputs, canceling..")
|
||||
destroy()
|
||||
return@withLock
|
||||
}
|
||||
|
||||
// 1. Open Camera Device
|
||||
val camera = getCameraDevice(cameraId) { reason ->
|
||||
isRunning = false
|
||||
onError(reason)
|
||||
}
|
||||
|
||||
// 2. Create capture session with outputs
|
||||
val session = getCaptureSession(camera, outputs) {
|
||||
isRunning = false
|
||||
}
|
||||
|
||||
// 3. Create request template
|
||||
val template = if (outputs.videoOutput != null) CameraDevice.TEMPLATE_RECORD else CameraDevice.TEMPLATE_PREVIEW
|
||||
val captureRequest = camera.createCaptureRequest(template)
|
||||
outputs.previewOutput?.let { output ->
|
||||
Log.i(TAG, "Adding output surface ${output.outputType}..")
|
||||
captureRequest.addTarget(output.surface)
|
||||
}
|
||||
outputs.videoOutput?.let { output ->
|
||||
Log.i(TAG, "Adding output surface ${output.outputType}..")
|
||||
captureRequest.addTarget(output.surface)
|
||||
}
|
||||
|
||||
Log.i(TAG, "Camera Session initialized! Starting repeating request..")
|
||||
isRunning = true
|
||||
this.previewRequest = captureRequest
|
||||
this.captureSession = session
|
||||
this.cameraDevice = camera
|
||||
}
|
||||
|
||||
updateRepeatingRequest()
|
||||
} catch (e: IllegalStateException) {
|
||||
Log.e(TAG, "Failed to start Camera Session, this session is already closed.", e)
|
||||
}
|
||||
}
|
||||
|
||||
private suspend fun updateRepeatingRequest() {
|
||||
mutex.withLock {
|
||||
val session = captureSession
|
||||
if (session == null) {
|
||||
// Not yet ready. Start session first, then it will update repeating request.
|
||||
startRunning()
|
||||
return
|
||||
}
|
||||
|
||||
val fps = fps
|
||||
val videoStabilizationMode = videoStabilizationMode
|
||||
val lowLightBoost = lowLightBoost
|
||||
val hdr = hdr
|
||||
|
||||
val repeatingRequest = getPreviewCaptureRequest(fps, videoStabilizationMode, lowLightBoost, hdr)
|
||||
Log.d(TAG, "Setting Repeating Request..")
|
||||
session.setRepeatingRequest(repeatingRequest, null, null)
|
||||
}
|
||||
}
|
||||
|
||||
private suspend fun stopRunning() {
|
||||
Log.i(TAG, "Stopping Camera Session...")
|
||||
try {
|
||||
mutex.withLock {
|
||||
destroy()
|
||||
Log.i(TAG, "Camera Session stopped!")
|
||||
}
|
||||
} catch (e: IllegalStateException) {
|
||||
Log.e(TAG, "Failed to stop Camera Session, this session is already closed.", e)
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,32 @@
|
||||
package com.mrousavy.camera.core;
|
||||
|
||||
import android.media.Image
|
||||
import kotlinx.coroutines.CompletableDeferred
|
||||
|
||||
class PhotoOutputSynchronizer {
|
||||
private val photoOutputQueue = HashMap<Long, CompletableDeferred<Image>>()
|
||||
|
||||
private operator fun get(key: Long): CompletableDeferred<Image> {
|
||||
if (!photoOutputQueue.containsKey(key)) {
|
||||
photoOutputQueue[key] = CompletableDeferred()
|
||||
}
|
||||
return photoOutputQueue[key]!!
|
||||
}
|
||||
|
||||
suspend fun await(timestamp: Long): Image {
|
||||
val image = this[timestamp].await()
|
||||
photoOutputQueue.remove(timestamp)
|
||||
return image
|
||||
}
|
||||
|
||||
fun set(timestamp: Long, image: Image) {
|
||||
this[timestamp].complete(image)
|
||||
}
|
||||
|
||||
fun clear() {
|
||||
photoOutputQueue.forEach {
|
||||
it.value.cancel()
|
||||
}
|
||||
photoOutputQueue.clear()
|
||||
}
|
||||
}
|
@@ -0,0 +1,71 @@
|
||||
package com.mrousavy.camera.core
|
||||
|
||||
import android.annotation.SuppressLint
|
||||
import android.content.Context
|
||||
import android.hardware.camera2.CameraManager
|
||||
import android.util.Log
|
||||
import android.util.Size
|
||||
import android.view.Surface
|
||||
import android.view.SurfaceHolder
|
||||
import android.view.SurfaceView
|
||||
import com.mrousavy.camera.extensions.getPreviewSize
|
||||
import kotlin.math.roundToInt
|
||||
|
||||
@SuppressLint("ViewConstructor")
|
||||
class PreviewView(context: Context,
|
||||
cameraManager: CameraManager,
|
||||
cameraId: String,
|
||||
private val onSurfaceChanged: (surface: Surface?) -> Unit): SurfaceView(context) {
|
||||
private val targetSize: Size
|
||||
private val aspectRatio: Float
|
||||
get() = targetSize.width.toFloat() / targetSize.height.toFloat()
|
||||
|
||||
init {
|
||||
val characteristics = cameraManager.getCameraCharacteristics(cameraId)
|
||||
targetSize = characteristics.getPreviewSize()
|
||||
|
||||
Log.i(TAG, "Using Preview Size ${targetSize.width} x ${targetSize.height}.")
|
||||
holder.setFixedSize(targetSize.width, targetSize.height)
|
||||
holder.addCallback(object: SurfaceHolder.Callback {
|
||||
override fun surfaceCreated(holder: SurfaceHolder) {
|
||||
Log.i(TAG, "Surface created! ${holder.surface}")
|
||||
onSurfaceChanged(holder.surface)
|
||||
}
|
||||
|
||||
override fun surfaceChanged(holder: SurfaceHolder, format: Int, width: Int, height: Int) {
|
||||
Log.i(TAG, "Surface resized! ${holder.surface} ($width x $height in format #$format)")
|
||||
}
|
||||
|
||||
override fun surfaceDestroyed(holder: SurfaceHolder) {
|
||||
Log.i(TAG, "Surface destroyed! ${holder.surface}")
|
||||
onSurfaceChanged(null)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
override fun onMeasure(widthMeasureSpec: Int, heightMeasureSpec: Int) {
|
||||
super.onMeasure(widthMeasureSpec, heightMeasureSpec)
|
||||
val width = MeasureSpec.getSize(widthMeasureSpec)
|
||||
val height = MeasureSpec.getSize(heightMeasureSpec)
|
||||
Log.d(TAG, "onMeasure($width, $height)")
|
||||
|
||||
// Performs center-crop transformation of the camera frames
|
||||
val newWidth: Int
|
||||
val newHeight: Int
|
||||
val actualRatio = if (width > height) aspectRatio else 1f / aspectRatio
|
||||
if (width < height * actualRatio) {
|
||||
newHeight = height
|
||||
newWidth = (height * actualRatio).roundToInt()
|
||||
} else {
|
||||
newWidth = width
|
||||
newHeight = (width / actualRatio).roundToInt()
|
||||
}
|
||||
|
||||
Log.d(TAG, "Measured dimensions set: $newWidth x $newHeight")
|
||||
setMeasuredDimension(newWidth, newHeight)
|
||||
}
|
||||
|
||||
companion object {
|
||||
private const val TAG = "NativePreviewView"
|
||||
}
|
||||
}
|
@@ -0,0 +1,135 @@
|
||||
package com.mrousavy.camera.core
|
||||
|
||||
import android.content.Context
|
||||
import android.media.ImageWriter
|
||||
import android.media.MediaCodec
|
||||
import android.media.MediaRecorder
|
||||
import android.os.Build
|
||||
import android.util.Log
|
||||
import android.util.Size
|
||||
import android.view.Surface
|
||||
import com.mrousavy.camera.RecorderError
|
||||
import com.mrousavy.camera.parsers.Orientation
|
||||
import com.mrousavy.camera.parsers.VideoCodec
|
||||
import com.mrousavy.camera.parsers.VideoFileType
|
||||
import java.io.File
|
||||
|
||||
class RecordingSession(context: Context,
|
||||
val size: Size,
|
||||
private val enableAudio: Boolean,
|
||||
private val fps: Int? = null,
|
||||
private val codec: VideoCodec = VideoCodec.H264,
|
||||
private val orientation: Orientation,
|
||||
private val fileType: VideoFileType = VideoFileType.MP4,
|
||||
private val callback: (video: Video) -> Unit,
|
||||
private val onError: (error: RecorderError) -> Unit) {
|
||||
companion object {
|
||||
private const val TAG = "RecordingSession"
|
||||
// bits per second
|
||||
private const val VIDEO_BIT_RATE = 10_000_000
|
||||
private const val AUDIO_SAMPLING_RATE = 44_100
|
||||
private const val AUDIO_BIT_RATE = 16 * AUDIO_SAMPLING_RATE
|
||||
private const val AUDIO_CHANNELS = 1
|
||||
}
|
||||
|
||||
data class Video(val path: String, val durationMs: Long)
|
||||
|
||||
private val recorder: MediaRecorder
|
||||
private val outputFile: File
|
||||
private var startTime: Long? = null
|
||||
private var imageWriter: ImageWriter? = null
|
||||
val surface: Surface = MediaCodec.createPersistentInputSurface()
|
||||
|
||||
init {
|
||||
|
||||
outputFile = File.createTempFile("mrousavy", fileType.toExtension(), context.cacheDir)
|
||||
|
||||
Log.i(TAG, "Creating RecordingSession for ${outputFile.absolutePath}")
|
||||
|
||||
recorder = if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.S) MediaRecorder(context) else MediaRecorder()
|
||||
|
||||
if (enableAudio) recorder.setAudioSource(MediaRecorder.AudioSource.CAMCORDER)
|
||||
recorder.setVideoSource(MediaRecorder.VideoSource.SURFACE)
|
||||
|
||||
recorder.setOutputFormat(MediaRecorder.OutputFormat.MPEG_4)
|
||||
recorder.setOutputFile(outputFile.absolutePath)
|
||||
recorder.setVideoEncodingBitRate(VIDEO_BIT_RATE)
|
||||
recorder.setVideoSize(size.height, size.width)
|
||||
if (fps != null) recorder.setVideoFrameRate(fps)
|
||||
|
||||
Log.i(TAG, "Using $codec Video Codec..")
|
||||
recorder.setVideoEncoder(codec.toVideoCodec())
|
||||
if (enableAudio) {
|
||||
Log.i(TAG, "Adding Audio Channel..")
|
||||
recorder.setAudioEncoder(MediaRecorder.AudioEncoder.AAC)
|
||||
recorder.setAudioEncodingBitRate(AUDIO_BIT_RATE)
|
||||
recorder.setAudioSamplingRate(AUDIO_SAMPLING_RATE)
|
||||
recorder.setAudioChannels(AUDIO_CHANNELS)
|
||||
}
|
||||
recorder.setInputSurface(surface)
|
||||
//recorder.setOrientationHint(orientation.toDegrees())
|
||||
|
||||
recorder.setOnErrorListener { _, what, extra ->
|
||||
Log.e(TAG, "MediaRecorder Error: $what ($extra)")
|
||||
stop()
|
||||
val name = when (what) {
|
||||
MediaRecorder.MEDIA_RECORDER_ERROR_UNKNOWN -> "unknown"
|
||||
MediaRecorder.MEDIA_ERROR_SERVER_DIED -> "server-died"
|
||||
else -> "unknown"
|
||||
}
|
||||
onError(RecorderError(name, extra))
|
||||
}
|
||||
recorder.setOnInfoListener { _, what, extra ->
|
||||
Log.i(TAG, "MediaRecorder Info: $what ($extra)")
|
||||
}
|
||||
|
||||
Log.i(TAG, "Created $this!")
|
||||
}
|
||||
|
||||
fun start() {
|
||||
synchronized(this) {
|
||||
Log.i(TAG, "Starting RecordingSession..")
|
||||
recorder.prepare()
|
||||
recorder.start()
|
||||
startTime = System.currentTimeMillis()
|
||||
}
|
||||
}
|
||||
|
||||
fun stop() {
|
||||
synchronized(this) {
|
||||
Log.i(TAG, "Stopping RecordingSession..")
|
||||
try {
|
||||
recorder.stop()
|
||||
recorder.release()
|
||||
|
||||
imageWriter?.close()
|
||||
imageWriter = null
|
||||
} catch (e: Error) {
|
||||
Log.e(TAG, "Failed to stop MediaRecorder!", e)
|
||||
}
|
||||
|
||||
val stopTime = System.currentTimeMillis()
|
||||
val durationMs = stopTime - (startTime ?: stopTime)
|
||||
callback(Video(outputFile.absolutePath, durationMs))
|
||||
}
|
||||
}
|
||||
|
||||
fun pause() {
|
||||
synchronized(this) {
|
||||
Log.i(TAG, "Pausing Recording Session..")
|
||||
recorder.pause()
|
||||
}
|
||||
}
|
||||
|
||||
fun resume() {
|
||||
synchronized(this) {
|
||||
Log.i(TAG, "Resuming Recording Session..")
|
||||
recorder.resume()
|
||||
}
|
||||
}
|
||||
|
||||
override fun toString(): String {
|
||||
val audio = if (enableAudio) "with audio" else "without audio"
|
||||
return "${size.width} x ${size.height} @ $fps FPS $codec $fileType $orientation RecordingSession ($audio)"
|
||||
}
|
||||
}
|
@@ -0,0 +1,168 @@
|
||||
package com.mrousavy.camera.core
|
||||
|
||||
import android.graphics.ImageFormat
|
||||
import android.graphics.SurfaceTexture
|
||||
import android.media.ImageReader
|
||||
import android.media.ImageWriter
|
||||
import android.media.MediaRecorder
|
||||
import android.util.Log
|
||||
import android.view.Surface
|
||||
import com.facebook.jni.HybridData
|
||||
import com.mrousavy.camera.frameprocessor.Frame
|
||||
import com.mrousavy.camera.frameprocessor.FrameProcessor
|
||||
import com.mrousavy.camera.parsers.Orientation
|
||||
import java.io.Closeable
|
||||
|
||||
/**
|
||||
* An OpenGL pipeline for streaming Camera Frames to one or more outputs.
|
||||
* Currently, [VideoPipeline] can stream to a [FrameProcessor] and a [MediaRecorder].
|
||||
*
|
||||
* @param [width] The width of the Frames to stream (> 0)
|
||||
* @param [height] The height of the Frames to stream (> 0)
|
||||
* @param [format] The format of the Frames to stream. ([ImageFormat.PRIVATE], [ImageFormat.YUV_420_888] or [ImageFormat.JPEG])
|
||||
*/
|
||||
@Suppress("KotlinJniMissingFunction")
|
||||
class VideoPipeline(val width: Int,
|
||||
val height: Int,
|
||||
val format: Int = ImageFormat.PRIVATE,
|
||||
private val isMirrored: Boolean = false): SurfaceTexture.OnFrameAvailableListener, Closeable {
|
||||
companion object {
|
||||
private const val MAX_IMAGES = 3
|
||||
private const val TAG = "VideoPipeline"
|
||||
}
|
||||
|
||||
private val mHybridData: HybridData
|
||||
private var openGLTextureId: Int? = null
|
||||
private var transformMatrix = FloatArray(16)
|
||||
private var isActive = true
|
||||
|
||||
// Output 1
|
||||
private var frameProcessor: FrameProcessor? = null
|
||||
private var imageReader: ImageReader? = null
|
||||
|
||||
// Output 2
|
||||
private var recordingSession: RecordingSession? = null
|
||||
|
||||
// Input
|
||||
private val surfaceTexture: SurfaceTexture
|
||||
val surface: Surface
|
||||
|
||||
init {
|
||||
mHybridData = initHybrid(width, height)
|
||||
surfaceTexture = SurfaceTexture(false)
|
||||
surfaceTexture.setDefaultBufferSize(width, height)
|
||||
surfaceTexture.setOnFrameAvailableListener(this)
|
||||
surface = Surface(surfaceTexture)
|
||||
}
|
||||
|
||||
override fun close() {
|
||||
synchronized(this) {
|
||||
isActive = false
|
||||
imageReader?.close()
|
||||
imageReader = null
|
||||
frameProcessor = null
|
||||
recordingSession = null
|
||||
surfaceTexture.release()
|
||||
mHybridData.resetNative()
|
||||
}
|
||||
}
|
||||
|
||||
override fun onFrameAvailable(surfaceTexture: SurfaceTexture) {
|
||||
synchronized(this) {
|
||||
if (!isActive) return@synchronized
|
||||
|
||||
// 1. Attach Surface to OpenGL context
|
||||
if (openGLTextureId == null) {
|
||||
openGLTextureId = getInputTextureId()
|
||||
surfaceTexture.attachToGLContext(openGLTextureId!!)
|
||||
Log.i(TAG, "Attached Texture to Context $openGLTextureId")
|
||||
}
|
||||
|
||||
// 2. Prepare the OpenGL context (eglMakeCurrent)
|
||||
onBeforeFrame()
|
||||
|
||||
// 3. Update the OpenGL texture
|
||||
surfaceTexture.updateTexImage()
|
||||
|
||||
// 4. Get the transform matrix from the SurfaceTexture (rotations/scales applied by Camera)
|
||||
surfaceTexture.getTransformMatrix(transformMatrix)
|
||||
|
||||
// 5. Draw it with applied rotation/mirroring
|
||||
onFrame(transformMatrix)
|
||||
}
|
||||
}
|
||||
|
||||
private fun getImageReader(): ImageReader {
|
||||
val imageReader = ImageReader.newInstance(width, height, format, MAX_IMAGES)
|
||||
imageReader.setOnImageAvailableListener({ reader ->
|
||||
Log.i("VideoPipeline", "ImageReader::onImageAvailable!")
|
||||
val image = reader.acquireLatestImage() ?: return@setOnImageAvailableListener
|
||||
|
||||
// TODO: Get correct orientation and isMirrored
|
||||
val frame = Frame(image, image.timestamp, Orientation.PORTRAIT, isMirrored)
|
||||
frame.incrementRefCount()
|
||||
frameProcessor?.call(frame)
|
||||
frame.decrementRefCount()
|
||||
}, null)
|
||||
return imageReader
|
||||
}
|
||||
|
||||
/**
|
||||
* Configures the Pipeline to also call the given [FrameProcessor].
|
||||
* * If the [frameProcessor] is `null`, this output channel will be removed.
|
||||
* * If the [frameProcessor] is not `null`, the [VideoPipeline] will create Frames
|
||||
* using an [ImageWriter] and call the [FrameProcessor] with those Frames.
|
||||
*/
|
||||
fun setFrameProcessorOutput(frameProcessor: FrameProcessor?) {
|
||||
synchronized(this) {
|
||||
Log.i(TAG, "Setting $width x $height FrameProcessor Output...")
|
||||
this.frameProcessor = frameProcessor
|
||||
|
||||
if (frameProcessor != null) {
|
||||
if (this.imageReader == null) {
|
||||
// 1. Create new ImageReader that just calls the Frame Processor
|
||||
this.imageReader = getImageReader()
|
||||
}
|
||||
|
||||
// 2. Configure OpenGL pipeline to stream Frames into the ImageReader's surface
|
||||
setFrameProcessorOutputSurface(imageReader!!.surface)
|
||||
} else {
|
||||
// 1. Configure OpenGL pipeline to stop streaming Frames into the ImageReader's surface
|
||||
removeFrameProcessorOutputSurface()
|
||||
|
||||
// 2. Close the ImageReader
|
||||
this.imageReader?.close()
|
||||
this.imageReader = null
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Configures the Pipeline to also write Frames to a Surface from a [MediaRecorder].
|
||||
* * If the [surface] is `null`, this output channel will be removed.
|
||||
* * If the [surface] is not `null`, the [VideoPipeline] will write Frames to this Surface.
|
||||
*/
|
||||
fun setRecordingSessionOutput(recordingSession: RecordingSession?) {
|
||||
synchronized(this) {
|
||||
Log.i(TAG, "Setting $width x $height RecordingSession Output...")
|
||||
if (recordingSession != null) {
|
||||
// Configure OpenGL pipeline to stream Frames into the Recording Session's surface
|
||||
setRecordingSessionOutputSurface(recordingSession.surface)
|
||||
this.recordingSession = recordingSession
|
||||
} else {
|
||||
// Configure OpenGL pipeline to stop streaming Frames into the Recording Session's surface
|
||||
removeRecordingSessionOutputSurface()
|
||||
this.recordingSession = null
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private external fun getInputTextureId(): Int
|
||||
private external fun onBeforeFrame()
|
||||
private external fun onFrame(transformMatrix: FloatArray)
|
||||
private external fun setFrameProcessorOutputSurface(surface: Any)
|
||||
private external fun removeFrameProcessorOutputSurface()
|
||||
private external fun setRecordingSessionOutputSurface(surface: Any)
|
||||
private external fun removeRecordingSessionOutputSurface()
|
||||
private external fun initHybrid(width: Int, height: Int): HybridData
|
||||
}
|
@@ -0,0 +1,130 @@
|
||||
package com.mrousavy.camera.core.outputs
|
||||
|
||||
import android.graphics.ImageFormat
|
||||
import android.hardware.camera2.CameraCharacteristics
|
||||
import android.hardware.camera2.CameraManager
|
||||
import android.media.Image
|
||||
import android.media.ImageReader
|
||||
import android.util.Log
|
||||
import android.util.Size
|
||||
import android.view.Surface
|
||||
import com.mrousavy.camera.CameraQueues
|
||||
import com.mrousavy.camera.extensions.closestToOrMax
|
||||
import com.mrousavy.camera.extensions.getPhotoSizes
|
||||
import com.mrousavy.camera.extensions.getPreviewSize
|
||||
import com.mrousavy.camera.extensions.getVideoSizes
|
||||
import com.mrousavy.camera.core.VideoPipeline
|
||||
import java.io.Closeable
|
||||
|
||||
class CameraOutputs(val cameraId: String,
|
||||
cameraManager: CameraManager,
|
||||
val preview: PreviewOutput? = null,
|
||||
val photo: PhotoOutput? = null,
|
||||
val video: VideoOutput? = null,
|
||||
val enableHdr: Boolean? = false,
|
||||
val callback: Callback
|
||||
): Closeable {
|
||||
companion object {
|
||||
private const val TAG = "CameraOutputs"
|
||||
const val PHOTO_OUTPUT_BUFFER_SIZE = 3
|
||||
}
|
||||
|
||||
data class PreviewOutput(val surface: Surface)
|
||||
data class PhotoOutput(val targetSize: Size? = null,
|
||||
val format: Int = ImageFormat.JPEG)
|
||||
data class VideoOutput(val targetSize: Size? = null,
|
||||
val enableRecording: Boolean = false,
|
||||
val enableFrameProcessor: Boolean? = false,
|
||||
val format: Int = ImageFormat.PRIVATE)
|
||||
|
||||
interface Callback {
|
||||
fun onPhotoCaptured(image: Image)
|
||||
}
|
||||
|
||||
var previewOutput: SurfaceOutput? = null
|
||||
private set
|
||||
var photoOutput: ImageReaderOutput? = null
|
||||
private set
|
||||
var videoOutput: VideoPipelineOutput? = null
|
||||
private set
|
||||
|
||||
val size: Int
|
||||
get() {
|
||||
var size = 0
|
||||
if (previewOutput != null) size++
|
||||
if (photoOutput != null) size++
|
||||
if (videoOutput != null) size++
|
||||
return size
|
||||
}
|
||||
|
||||
override fun equals(other: Any?): Boolean {
|
||||
if (other !is CameraOutputs) return false
|
||||
return this.cameraId == other.cameraId
|
||||
&& this.preview?.surface == other.preview?.surface
|
||||
&& this.photo?.targetSize == other.photo?.targetSize
|
||||
&& this.photo?.format == other.photo?.format
|
||||
&& this.video?.enableRecording == other.video?.enableRecording
|
||||
&& this.video?.targetSize == other.video?.targetSize
|
||||
&& this.video?.format == other.video?.format
|
||||
&& this.enableHdr == other.enableHdr
|
||||
}
|
||||
|
||||
override fun hashCode(): Int {
|
||||
var result = cameraId.hashCode()
|
||||
result += (preview?.hashCode() ?: 0)
|
||||
result += (photo?.hashCode() ?: 0)
|
||||
result += (video?.hashCode() ?: 0)
|
||||
return result
|
||||
}
|
||||
|
||||
override fun close() {
|
||||
photoOutput?.close()
|
||||
videoOutput?.close()
|
||||
}
|
||||
|
||||
override fun toString(): String {
|
||||
val strings = arrayListOf<String>()
|
||||
previewOutput?.let { strings.add(it.toString()) }
|
||||
photoOutput?.let { strings.add(it.toString()) }
|
||||
videoOutput?.let { strings.add(it.toString()) }
|
||||
return strings.joinToString(", ", "[", "]")
|
||||
}
|
||||
|
||||
init {
|
||||
val characteristics = cameraManager.getCameraCharacteristics(cameraId)
|
||||
val isMirrored = characteristics.get(CameraCharacteristics.LENS_FACING) == CameraCharacteristics.LENS_FACING_FRONT
|
||||
|
||||
Log.i(TAG, "Preparing Outputs for Camera $cameraId...")
|
||||
|
||||
// Preview output: Low resolution repeating images (SurfaceView)
|
||||
if (preview != null) {
|
||||
Log.i(TAG, "Adding native preview view output.")
|
||||
previewOutput = SurfaceOutput(preview.surface, characteristics.getPreviewSize(), SurfaceOutput.OutputType.PREVIEW)
|
||||
}
|
||||
|
||||
// Photo output: High quality still images (takePhoto())
|
||||
if (photo != null) {
|
||||
val size = characteristics.getPhotoSizes(photo.format).closestToOrMax(photo.targetSize)
|
||||
|
||||
val imageReader = ImageReader.newInstance(size.width, size.height, photo.format, PHOTO_OUTPUT_BUFFER_SIZE)
|
||||
imageReader.setOnImageAvailableListener({ reader ->
|
||||
val image = reader.acquireLatestImage() ?: return@setOnImageAvailableListener
|
||||
callback.onPhotoCaptured(image)
|
||||
}, CameraQueues.cameraQueue.handler)
|
||||
|
||||
Log.i(TAG, "Adding ${size.width}x${size.height} photo output. (Format: ${photo.format})")
|
||||
photoOutput = ImageReaderOutput(imageReader, SurfaceOutput.OutputType.PHOTO)
|
||||
}
|
||||
|
||||
// Video output: High resolution repeating images (startRecording() or useFrameProcessor())
|
||||
if (video != null) {
|
||||
val size = characteristics.getVideoSizes(cameraId, video.format).closestToOrMax(video.targetSize)
|
||||
val videoPipeline = VideoPipeline(size.width, size.height, video.format, isMirrored)
|
||||
|
||||
Log.i(TAG, "Adding ${size.width}x${size.height} video output. (Format: ${video.format})")
|
||||
videoOutput = VideoPipelineOutput(videoPipeline, SurfaceOutput.OutputType.VIDEO)
|
||||
}
|
||||
|
||||
Log.i(TAG, "Prepared $size Outputs for Camera $cameraId!")
|
||||
}
|
||||
}
|
@@ -0,0 +1,22 @@
|
||||
package com.mrousavy.camera.core.outputs
|
||||
|
||||
import android.media.ImageReader
|
||||
import android.util.Log
|
||||
import android.util.Size
|
||||
import java.io.Closeable
|
||||
|
||||
/**
|
||||
* A [SurfaceOutput] that uses an [ImageReader] as it's surface.
|
||||
*/
|
||||
class ImageReaderOutput(private val imageReader: ImageReader,
|
||||
outputType: OutputType,
|
||||
dynamicRangeProfile: Long? = null): Closeable, SurfaceOutput(imageReader.surface, Size(imageReader.width, imageReader.height), outputType, dynamicRangeProfile) {
|
||||
override fun close() {
|
||||
Log.i(TAG, "Closing ${imageReader.width}x${imageReader.height} $outputType ImageReader..")
|
||||
imageReader.close()
|
||||
}
|
||||
|
||||
override fun toString(): String {
|
||||
return "$outputType (${imageReader.width} x ${imageReader.height} in format #${imageReader.imageFormat})"
|
||||
}
|
||||
}
|
@@ -0,0 +1,79 @@
|
||||
package com.mrousavy.camera.core.outputs
|
||||
|
||||
import android.hardware.camera2.CameraCharacteristics
|
||||
import android.hardware.camera2.CameraMetadata
|
||||
import android.hardware.camera2.params.OutputConfiguration
|
||||
import android.os.Build
|
||||
import android.util.Log
|
||||
import android.util.Size
|
||||
import android.view.Surface
|
||||
import androidx.annotation.RequiresApi
|
||||
import java.io.Closeable
|
||||
|
||||
/**
|
||||
* A general-purpose Camera Output that writes to a [Surface]
|
||||
*/
|
||||
open class SurfaceOutput(val surface: Surface,
|
||||
val size: Size,
|
||||
val outputType: OutputType,
|
||||
private val dynamicRangeProfile: Long? = null,
|
||||
private val closeSurfaceOnEnd: Boolean = false): Closeable {
|
||||
companion object {
|
||||
const val TAG = "SurfaceOutput"
|
||||
|
||||
private fun supportsOutputType(characteristics: CameraCharacteristics, outputType: OutputType): Boolean {
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.TIRAMISU) {
|
||||
val availableUseCases = characteristics.get(CameraCharacteristics.SCALER_AVAILABLE_STREAM_USE_CASES)
|
||||
if (availableUseCases != null) {
|
||||
if (availableUseCases.contains(outputType.toOutputType().toLong())) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
fun toOutputConfiguration(characteristics: CameraCharacteristics): OutputConfiguration {
|
||||
val result = OutputConfiguration(surface)
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.TIRAMISU) {
|
||||
if (dynamicRangeProfile != null) {
|
||||
result.dynamicRangeProfile = dynamicRangeProfile
|
||||
Log.i(TAG, "Using dynamic range profile ${result.dynamicRangeProfile} for $outputType output.")
|
||||
}
|
||||
if (supportsOutputType(characteristics, outputType)) {
|
||||
result.streamUseCase = outputType.toOutputType().toLong()
|
||||
Log.i(TAG, "Using optimized stream use case ${result.streamUseCase} for $outputType output.")
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
override fun toString(): String {
|
||||
return "$outputType (${size.width} x ${size.height})"
|
||||
}
|
||||
|
||||
override fun close() {
|
||||
if (closeSurfaceOnEnd) {
|
||||
surface.release()
|
||||
}
|
||||
}
|
||||
|
||||
enum class OutputType {
|
||||
PHOTO,
|
||||
VIDEO,
|
||||
PREVIEW,
|
||||
VIDEO_AND_PREVIEW;
|
||||
|
||||
@RequiresApi(Build.VERSION_CODES.TIRAMISU)
|
||||
fun toOutputType(): Int {
|
||||
return when(this) {
|
||||
PHOTO -> CameraMetadata.SCALER_AVAILABLE_STREAM_USE_CASES_STILL_CAPTURE
|
||||
VIDEO -> CameraMetadata.SCALER_AVAILABLE_STREAM_USE_CASES_VIDEO_RECORD
|
||||
PREVIEW -> CameraMetadata.SCALER_AVAILABLE_STREAM_USE_CASES_PREVIEW
|
||||
VIDEO_AND_PREVIEW -> CameraMetadata.SCALER_AVAILABLE_STREAM_USE_CASES_PREVIEW_VIDEO_STILL
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,22 @@
|
||||
package com.mrousavy.camera.core.outputs
|
||||
|
||||
import android.util.Log
|
||||
import android.util.Size
|
||||
import com.mrousavy.camera.core.VideoPipeline
|
||||
import java.io.Closeable
|
||||
|
||||
/**
|
||||
* A [SurfaceOutput] that uses a [VideoPipeline] as it's surface.
|
||||
*/
|
||||
class VideoPipelineOutput(val videoPipeline: VideoPipeline,
|
||||
outputType: OutputType,
|
||||
dynamicRangeProfile: Long? = null): Closeable, SurfaceOutput(videoPipeline.surface, Size(videoPipeline.width, videoPipeline.height), outputType, dynamicRangeProfile) {
|
||||
override fun close() {
|
||||
Log.i(TAG, "Closing ${videoPipeline.width}x${videoPipeline.height} Video Pipeline..")
|
||||
videoPipeline.close()
|
||||
}
|
||||
|
||||
override fun toString(): String {
|
||||
return "$outputType (${videoPipeline.width} x ${videoPipeline.height} in format #${videoPipeline.format})"
|
||||
}
|
||||
}
|
@@ -0,0 +1,53 @@
|
||||
package com.mrousavy.camera.extensions
|
||||
|
||||
import android.hardware.camera2.CameraCaptureSession
|
||||
import android.hardware.camera2.CaptureFailure
|
||||
import android.hardware.camera2.CaptureRequest
|
||||
import android.hardware.camera2.TotalCaptureResult
|
||||
import android.media.MediaActionSound
|
||||
import com.mrousavy.camera.CameraQueues
|
||||
import com.mrousavy.camera.CaptureAbortedError
|
||||
import com.mrousavy.camera.UnknownCaptureError
|
||||
import kotlin.coroutines.resume
|
||||
import kotlin.coroutines.resumeWithException
|
||||
import kotlin.coroutines.suspendCoroutine
|
||||
|
||||
suspend fun CameraCaptureSession.capture(captureRequest: CaptureRequest, enableShutterSound: Boolean): TotalCaptureResult {
|
||||
return suspendCoroutine { continuation ->
|
||||
this.capture(captureRequest, object: CameraCaptureSession.CaptureCallback() {
|
||||
override fun onCaptureCompleted(
|
||||
session: CameraCaptureSession,
|
||||
request: CaptureRequest,
|
||||
result: TotalCaptureResult
|
||||
) {
|
||||
super.onCaptureCompleted(session, request, result)
|
||||
|
||||
continuation.resume(result)
|
||||
}
|
||||
|
||||
override fun onCaptureStarted(session: CameraCaptureSession, request: CaptureRequest, timestamp: Long, frameNumber: Long) {
|
||||
super.onCaptureStarted(session, request, timestamp, frameNumber)
|
||||
|
||||
if (enableShutterSound) {
|
||||
val mediaActionSound = MediaActionSound()
|
||||
mediaActionSound.play(MediaActionSound.SHUTTER_CLICK)
|
||||
}
|
||||
}
|
||||
|
||||
override fun onCaptureFailed(
|
||||
session: CameraCaptureSession,
|
||||
request: CaptureRequest,
|
||||
failure: CaptureFailure
|
||||
) {
|
||||
super.onCaptureFailed(session, request, failure)
|
||||
val wasImageCaptured = failure.wasImageCaptured()
|
||||
val error = when (failure.reason) {
|
||||
CaptureFailure.REASON_ERROR -> UnknownCaptureError(wasImageCaptured)
|
||||
CaptureFailure.REASON_FLUSHED -> CaptureAbortedError(wasImageCaptured)
|
||||
else -> UnknownCaptureError(wasImageCaptured)
|
||||
}
|
||||
continuation.resumeWithException(error)
|
||||
}
|
||||
}, CameraQueues.cameraQueue.handler)
|
||||
}
|
||||
}
|
@@ -0,0 +1,68 @@
|
||||
package com.mrousavy.camera.extensions
|
||||
|
||||
import android.content.res.Resources
|
||||
import android.hardware.camera2.CameraCharacteristics
|
||||
import android.media.CamcorderProfile
|
||||
import android.os.Build
|
||||
import android.util.Log
|
||||
import android.util.Size
|
||||
import android.view.SurfaceHolder
|
||||
import android.view.SurfaceView
|
||||
|
||||
private fun getMaximumPreviewSize(): Size {
|
||||
// See https://developer.android.com/reference/android/hardware/camera2/params/StreamConfigurationMap
|
||||
// According to the Android Developer documentation, PREVIEW streams can have a resolution
|
||||
// of up to the phone's display's resolution, with a maximum of 1920x1080.
|
||||
val display1080p = Size(1920, 1080)
|
||||
val displaySize = Size(Resources.getSystem().displayMetrics.widthPixels, Resources.getSystem().displayMetrics.heightPixels)
|
||||
val isHighResScreen = displaySize.bigger >= display1080p.bigger || displaySize.smaller >= display1080p.smaller
|
||||
Log.i("PreviewSize", "Phone has a ${displaySize.width} x ${displaySize.height} screen.")
|
||||
return if (isHighResScreen) display1080p else displaySize
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the maximum Preview Resolution this device is capable of streaming at. (For [SurfaceView])
|
||||
*/
|
||||
fun CameraCharacteristics.getPreviewSize(): Size {
|
||||
val config = this.get(CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP)!!
|
||||
val previewSize = getMaximumPreviewSize()
|
||||
val outputSizes = config.getOutputSizes(SurfaceHolder::class.java).sortedByDescending { it.width * it.height }
|
||||
return outputSizes.first { it.bigger <= previewSize.bigger && it.smaller <= previewSize.smaller }
|
||||
}
|
||||
|
||||
private fun getMaximumVideoSize(cameraId: String): Size? {
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.S) {
|
||||
val profiles = CamcorderProfile.getAll(cameraId, CamcorderProfile.QUALITY_HIGH)
|
||||
if (profiles != null) {
|
||||
val largestProfile = profiles.videoProfiles.filterNotNull().maxByOrNull { it.width * it.height }
|
||||
if (largestProfile != null) {
|
||||
return Size(largestProfile.width, largestProfile.height)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
val cameraIdInt = cameraId.toIntOrNull()
|
||||
if (cameraIdInt != null) {
|
||||
val profile = CamcorderProfile.get(cameraIdInt, CamcorderProfile.QUALITY_HIGH)
|
||||
return Size(profile.videoFrameWidth, profile.videoFrameHeight)
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
fun CameraCharacteristics.getVideoSizes(cameraId: String, format: Int): List<Size> {
|
||||
val config = this.get(CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP)!!
|
||||
val sizes = config.getOutputSizes(format) ?: emptyArray()
|
||||
val maxVideoSize = getMaximumVideoSize(cameraId)
|
||||
if (maxVideoSize != null) {
|
||||
return sizes.filter { it.bigger <= maxVideoSize.bigger }
|
||||
}
|
||||
return sizes.toList()
|
||||
}
|
||||
|
||||
fun CameraCharacteristics.getPhotoSizes(format: Int): List<Size> {
|
||||
val config = this.get(CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP)!!
|
||||
val sizes = config.getOutputSizes(format) ?: emptyArray()
|
||||
val highResSizes = config.getHighResolutionOutputSizes(format) ?: emptyArray()
|
||||
return sizes.plus(highResSizes).toList()
|
||||
}
|
@@ -0,0 +1,95 @@
|
||||
package com.mrousavy.camera.extensions
|
||||
|
||||
import android.hardware.camera2.CameraCaptureSession
|
||||
import android.hardware.camera2.CameraCharacteristics
|
||||
import android.hardware.camera2.CameraDevice
|
||||
import android.hardware.camera2.CameraManager
|
||||
import android.hardware.camera2.params.OutputConfiguration
|
||||
import android.hardware.camera2.params.SessionConfiguration
|
||||
import android.os.Build
|
||||
import android.util.Log
|
||||
import androidx.annotation.RequiresApi
|
||||
import com.mrousavy.camera.CameraQueues
|
||||
import com.mrousavy.camera.CameraSessionCannotBeConfiguredError
|
||||
import com.mrousavy.camera.core.outputs.CameraOutputs
|
||||
import kotlinx.coroutines.suspendCancellableCoroutine
|
||||
import kotlin.coroutines.resume
|
||||
import kotlin.coroutines.resumeWithException
|
||||
|
||||
enum class SessionType {
|
||||
REGULAR,
|
||||
HIGH_SPEED;
|
||||
|
||||
@RequiresApi(Build.VERSION_CODES.P)
|
||||
fun toSessionType(): Int {
|
||||
return when(this) {
|
||||
REGULAR -> SessionConfiguration.SESSION_REGULAR
|
||||
HIGH_SPEED -> SessionConfiguration.SESSION_HIGH_SPEED
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private const val TAG = "CreateCaptureSession"
|
||||
private var sessionId = 1000
|
||||
|
||||
suspend fun CameraDevice.createCaptureSession(cameraManager: CameraManager,
|
||||
sessionType: SessionType,
|
||||
outputs: CameraOutputs,
|
||||
onClosed: (session: CameraCaptureSession) -> Unit,
|
||||
queue: CameraQueues.CameraQueue): CameraCaptureSession {
|
||||
return suspendCancellableCoroutine { continuation ->
|
||||
val characteristics = cameraManager.getCameraCharacteristics(id)
|
||||
val hardwareLevel = characteristics.get(CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL)!!
|
||||
val sessionId = sessionId++
|
||||
Log.i(TAG, "Camera $id: Creating Capture Session #$sessionId... " +
|
||||
"Hardware Level: $hardwareLevel} | Outputs: $outputs")
|
||||
|
||||
val callback = object: CameraCaptureSession.StateCallback() {
|
||||
override fun onConfigured(session: CameraCaptureSession) {
|
||||
Log.i(TAG, "Camera $id: Capture Session #$sessionId configured!")
|
||||
continuation.resume(session)
|
||||
}
|
||||
|
||||
override fun onConfigureFailed(session: CameraCaptureSession) {
|
||||
Log.e(TAG, "Camera $id: Failed to configure Capture Session #$sessionId!")
|
||||
continuation.resumeWithException(CameraSessionCannotBeConfiguredError(id, outputs))
|
||||
}
|
||||
|
||||
override fun onClosed(session: CameraCaptureSession) {
|
||||
super.onClosed(session)
|
||||
Log.i(TAG, "Camera $id: Capture Session #$sessionId closed!")
|
||||
onClosed(session)
|
||||
}
|
||||
}
|
||||
|
||||
val outputConfigurations = arrayListOf<OutputConfiguration>()
|
||||
outputs.previewOutput?.let { output ->
|
||||
outputConfigurations.add(output.toOutputConfiguration(characteristics))
|
||||
}
|
||||
outputs.photoOutput?.let { output ->
|
||||
outputConfigurations.add(output.toOutputConfiguration(characteristics))
|
||||
}
|
||||
outputs.videoOutput?.let { output ->
|
||||
outputConfigurations.add(output.toOutputConfiguration(characteristics))
|
||||
}
|
||||
if (outputs.enableHdr == true && Build.VERSION.SDK_INT >= Build.VERSION_CODES.TIRAMISU) {
|
||||
val supportedProfiles = characteristics.get(CameraCharacteristics.REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES)
|
||||
val hdrProfile = supportedProfiles?.bestProfile ?: supportedProfiles?.supportedProfiles?.firstOrNull()
|
||||
if (hdrProfile != null) {
|
||||
Log.i(TAG, "Camera $id: Using HDR Profile $hdrProfile...")
|
||||
outputConfigurations.forEach { it.dynamicRangeProfile = hdrProfile }
|
||||
} else {
|
||||
Log.w(TAG, "Camera $id: HDR was enabled, but the device does not support any matching HDR profile!")
|
||||
}
|
||||
}
|
||||
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.P) {
|
||||
Log.i(TAG, "Using new API (>=28)")
|
||||
val config = SessionConfiguration(sessionType.toSessionType(), outputConfigurations, queue.executor, callback)
|
||||
this.createCaptureSession(config)
|
||||
} else {
|
||||
Log.i(TAG, "Using legacy API (<28)")
|
||||
this.createCaptureSessionByOutputConfigurations(outputConfigurations, callback, queue.handler)
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,97 @@
|
||||
package com.mrousavy.camera.extensions
|
||||
|
||||
import android.hardware.camera2.CameraCharacteristics
|
||||
import android.hardware.camera2.CameraDevice
|
||||
import android.hardware.camera2.CameraManager
|
||||
import android.hardware.camera2.CaptureRequest
|
||||
import android.os.Build
|
||||
import android.view.Surface
|
||||
import com.mrousavy.camera.parsers.Flash
|
||||
import com.mrousavy.camera.parsers.Orientation
|
||||
import com.mrousavy.camera.parsers.QualityPrioritization
|
||||
|
||||
private fun supportsSnapshotCapture(cameraCharacteristics: CameraCharacteristics): Boolean {
|
||||
// As per CameraDevice.TEMPLATE_VIDEO_SNAPSHOT in documentation:
|
||||
val hardwareLevel = cameraCharacteristics.get(CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL)!!
|
||||
if (hardwareLevel == CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_LEGACY) return false
|
||||
|
||||
val capabilities = cameraCharacteristics.get(CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES)!!
|
||||
val hasDepth = capabilities.contains(CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES_DEPTH_OUTPUT)
|
||||
val isBackwardsCompatible = !capabilities.contains(CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES_BACKWARD_COMPATIBLE)
|
||||
if (hasDepth && !isBackwardsCompatible) return false
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
fun CameraDevice.createPhotoCaptureRequest(cameraManager: CameraManager,
|
||||
surface: Surface,
|
||||
zoom: Float,
|
||||
qualityPrioritization: QualityPrioritization,
|
||||
flashMode: Flash,
|
||||
enableRedEyeReduction: Boolean,
|
||||
enableAutoStabilization: Boolean,
|
||||
orientation: Orientation): CaptureRequest {
|
||||
val cameraCharacteristics = cameraManager.getCameraCharacteristics(this.id)
|
||||
|
||||
val template = if (qualityPrioritization == QualityPrioritization.SPEED && supportsSnapshotCapture(cameraCharacteristics)) {
|
||||
CameraDevice.TEMPLATE_VIDEO_SNAPSHOT
|
||||
} else {
|
||||
CameraDevice.TEMPLATE_STILL_CAPTURE
|
||||
}
|
||||
val captureRequest = this.createCaptureRequest(template)
|
||||
|
||||
// TODO: Maybe we can even expose that prop directly?
|
||||
val jpegQuality = when (qualityPrioritization) {
|
||||
QualityPrioritization.SPEED -> 85
|
||||
QualityPrioritization.BALANCED -> 92
|
||||
QualityPrioritization.QUALITY -> 100
|
||||
}
|
||||
captureRequest.set(CaptureRequest.JPEG_QUALITY, jpegQuality.toByte())
|
||||
|
||||
captureRequest.set(CaptureRequest.JPEG_ORIENTATION, orientation.toDegrees())
|
||||
|
||||
when (flashMode) {
|
||||
// Set the Flash Mode
|
||||
Flash.OFF -> {
|
||||
captureRequest.set(CaptureRequest.CONTROL_AE_MODE, CaptureRequest.CONTROL_AE_MODE_ON)
|
||||
}
|
||||
Flash.ON -> {
|
||||
captureRequest.set(CaptureRequest.CONTROL_AE_MODE, CaptureRequest.CONTROL_AE_MODE_ON_ALWAYS_FLASH)
|
||||
}
|
||||
Flash.AUTO -> {
|
||||
if (enableRedEyeReduction) {
|
||||
captureRequest.set(CaptureRequest.CONTROL_AE_MODE, CaptureRequest.CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE)
|
||||
} else {
|
||||
captureRequest.set(CaptureRequest.CONTROL_AE_MODE, CaptureRequest.CONTROL_AE_MODE_ON_AUTO_FLASH)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (enableAutoStabilization) {
|
||||
// Enable optical or digital image stabilization
|
||||
val digitalStabilization = cameraCharacteristics.get(CameraCharacteristics.CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES)
|
||||
val hasDigitalStabilization = digitalStabilization?.contains(CameraCharacteristics.CONTROL_VIDEO_STABILIZATION_MODE_ON) ?: false
|
||||
|
||||
val opticalStabilization = cameraCharacteristics.get(CameraCharacteristics.LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION)
|
||||
val hasOpticalStabilization = opticalStabilization?.contains(CameraCharacteristics.LENS_OPTICAL_STABILIZATION_MODE_ON) ?: false
|
||||
if (hasOpticalStabilization) {
|
||||
captureRequest.set(CaptureRequest.CONTROL_VIDEO_STABILIZATION_MODE, CaptureRequest.CONTROL_VIDEO_STABILIZATION_MODE_OFF)
|
||||
captureRequest.set(CaptureRequest.LENS_OPTICAL_STABILIZATION_MODE, CaptureRequest.LENS_OPTICAL_STABILIZATION_MODE_ON)
|
||||
} else if (hasDigitalStabilization) {
|
||||
captureRequest.set(CaptureRequest.CONTROL_VIDEO_STABILIZATION_MODE, CaptureRequest.CONTROL_VIDEO_STABILIZATION_MODE_ON)
|
||||
} else {
|
||||
// no stabilization is supported. ignore it
|
||||
}
|
||||
}
|
||||
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.R) {
|
||||
captureRequest.set(CaptureRequest.CONTROL_ZOOM_RATIO, zoom)
|
||||
} else {
|
||||
val size = cameraCharacteristics.get(CameraCharacteristics.SENSOR_INFO_ACTIVE_ARRAY_SIZE)!!
|
||||
captureRequest.set(CaptureRequest.SCALER_CROP_REGION, size.zoomed(zoom))
|
||||
}
|
||||
|
||||
captureRequest.addTarget(surface)
|
||||
|
||||
return captureRequest.build()
|
||||
}
|
@@ -0,0 +1,68 @@
|
||||
package com.mrousavy.camera.extensions
|
||||
|
||||
import android.annotation.SuppressLint
|
||||
import android.hardware.camera2.CameraDevice
|
||||
import android.hardware.camera2.CameraManager
|
||||
import android.os.Build
|
||||
import android.util.Log
|
||||
import com.mrousavy.camera.CameraCannotBeOpenedError
|
||||
import com.mrousavy.camera.CameraDisconnectedError
|
||||
import com.mrousavy.camera.CameraQueues
|
||||
import com.mrousavy.camera.parsers.CameraDeviceError
|
||||
import kotlinx.coroutines.suspendCancellableCoroutine
|
||||
import kotlin.coroutines.resume
|
||||
import kotlin.coroutines.resumeWithException
|
||||
|
||||
private const val TAG = "CameraManager"
|
||||
|
||||
@SuppressLint("MissingPermission")
|
||||
suspend fun CameraManager.openCamera(cameraId: String,
|
||||
onDisconnected: (camera: CameraDevice, reason: Throwable) -> Unit,
|
||||
queue: CameraQueues.CameraQueue): CameraDevice {
|
||||
return suspendCancellableCoroutine { continuation ->
|
||||
Log.i(TAG, "Camera $cameraId: Opening...")
|
||||
|
||||
val callback = object: CameraDevice.StateCallback() {
|
||||
override fun onOpened(camera: CameraDevice) {
|
||||
Log.i(TAG, "Camera $cameraId: Opened!")
|
||||
continuation.resume(camera)
|
||||
}
|
||||
|
||||
override fun onDisconnected(camera: CameraDevice) {
|
||||
Log.i(TAG, "Camera $cameraId: Disconnected!")
|
||||
if (continuation.isActive) {
|
||||
continuation.resumeWithException(CameraCannotBeOpenedError(cameraId, CameraDeviceError.DISCONNECTED))
|
||||
} else {
|
||||
onDisconnected(camera, CameraDisconnectedError(cameraId, CameraDeviceError.DISCONNECTED))
|
||||
}
|
||||
camera.tryClose()
|
||||
}
|
||||
|
||||
override fun onError(camera: CameraDevice, errorCode: Int) {
|
||||
Log.e(TAG, "Camera $cameraId: Error! $errorCode")
|
||||
val error = CameraDeviceError.fromCameraDeviceError(errorCode)
|
||||
if (continuation.isActive) {
|
||||
continuation.resumeWithException(CameraCannotBeOpenedError(cameraId, error))
|
||||
} else {
|
||||
onDisconnected(camera, CameraDisconnectedError(cameraId, error))
|
||||
}
|
||||
camera.tryClose()
|
||||
}
|
||||
}
|
||||
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.P) {
|
||||
this.openCamera(cameraId, queue.executor, callback)
|
||||
} else {
|
||||
this.openCamera(cameraId, callback, queue.handler)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fun CameraDevice.tryClose() {
|
||||
try {
|
||||
Log.i(TAG, "Camera $id: Closing...")
|
||||
this.close()
|
||||
} catch (e: Throwable) {
|
||||
Log.e(TAG, "Camera $id: Failed to close!", e)
|
||||
}
|
||||
}
|
@@ -0,0 +1,27 @@
|
||||
package com.mrousavy.camera.extensions
|
||||
|
||||
import android.hardware.camera2.params.DynamicRangeProfiles
|
||||
import android.os.Build
|
||||
import androidx.annotation.RequiresApi
|
||||
|
||||
private fun Set<Long>.firstMatch(filter: Set<Long>): Long? {
|
||||
filter.forEach { f ->
|
||||
if (this.contains(f)) {
|
||||
return f
|
||||
}
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
@RequiresApi(Build.VERSION_CODES.TIRAMISU)
|
||||
private val bestProfiles = setOf(
|
||||
DynamicRangeProfiles.HDR10_PLUS,
|
||||
DynamicRangeProfiles.HDR10,
|
||||
DynamicRangeProfiles.HLG10
|
||||
)
|
||||
|
||||
val DynamicRangeProfiles.bestProfile: Long?
|
||||
@RequiresApi(Build.VERSION_CODES.TIRAMISU)
|
||||
get() {
|
||||
return supportedProfiles.firstMatch(bestProfiles)
|
||||
}
|
@@ -0,0 +1,21 @@
|
||||
package com.mrousavy.camera.extensions
|
||||
|
||||
import android.os.Handler
|
||||
import java.util.concurrent.Semaphore
|
||||
|
||||
/**
|
||||
* Posts a Message to this Handler and blocks the calling Thread until the Handler finished executing the given job.
|
||||
*/
|
||||
fun Handler.postAndWait(job: () -> Unit) {
|
||||
val semaphore = Semaphore(0)
|
||||
|
||||
this.post {
|
||||
try {
|
||||
job()
|
||||
} finally {
|
||||
semaphore.release()
|
||||
}
|
||||
}
|
||||
|
||||
semaphore.acquire()
|
||||
}
|
@@ -0,0 +1,5 @@
|
||||
package com.mrousavy.camera.extensions
|
||||
|
||||
fun <T> List<T>.containsAny(elements: List<T>): Boolean {
|
||||
return elements.any { element -> this.contains(element) }
|
||||
}
|
@@ -0,0 +1,14 @@
|
||||
package com.mrousavy.camera.extensions
|
||||
|
||||
import android.graphics.Rect
|
||||
|
||||
fun Rect.zoomed(zoomFactor: Float): Rect {
|
||||
val height = bottom - top
|
||||
val width = right - left
|
||||
|
||||
val left = this.left + (width / zoomFactor / 2)
|
||||
val top = this.top + (height / zoomFactor / 2)
|
||||
val right = this.right - (width / zoomFactor / 2)
|
||||
val bottom = this.bottom - (height / zoomFactor / 2)
|
||||
return Rect(left.toInt(), top.toInt(), right.toInt(), bottom.toInt())
|
||||
}
|
@@ -0,0 +1,44 @@
|
||||
package com.mrousavy.camera.extensions
|
||||
|
||||
import android.util.Size
|
||||
import android.util.SizeF
|
||||
import android.view.Surface
|
||||
import kotlin.math.abs
|
||||
import kotlin.math.max
|
||||
import kotlin.math.min
|
||||
|
||||
fun List<Size>.closestToOrMax(size: Size?): Size {
|
||||
return if (size != null) {
|
||||
this.minBy { abs(it.width - size.width) + abs(it.height - size.height) }
|
||||
} else {
|
||||
this.maxBy { it.width * it.height }
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Rotate by a given Surface Rotation
|
||||
*/
|
||||
fun Size.rotated(surfaceRotation: Int): Size {
|
||||
return when (surfaceRotation) {
|
||||
Surface.ROTATION_0 -> Size(width, height)
|
||||
Surface.ROTATION_90 -> Size(height, width)
|
||||
Surface.ROTATION_180 -> Size(width, height)
|
||||
Surface.ROTATION_270 -> Size(height, width)
|
||||
else -> Size(width, height)
|
||||
}
|
||||
}
|
||||
|
||||
val Size.bigger: Int
|
||||
get() = max(width, height)
|
||||
val Size.smaller: Int
|
||||
get() = min(width, height)
|
||||
|
||||
val SizeF.bigger: Float
|
||||
get() = max(this.width, this.height)
|
||||
val SizeF.smaller: Float
|
||||
get() = min(this.width, this.height)
|
||||
|
||||
operator fun Size.compareTo(other: Size): Int {
|
||||
return (this.width * this.height).compareTo(other.width * other.height)
|
||||
}
|
||||
|
@@ -0,0 +1,20 @@
|
||||
package com.mrousavy.camera.extensions
|
||||
|
||||
import android.view.View
|
||||
import android.view.ViewGroup
|
||||
|
||||
// React does not trigger onLayout events for dynamically added views (`addView`).
|
||||
// This fixes that.
|
||||
// https://github.com/facebook/react-native/issues/17968#issuecomment-633308615
|
||||
fun ViewGroup.installHierarchyFitter() {
|
||||
setOnHierarchyChangeListener(object : ViewGroup.OnHierarchyChangeListener {
|
||||
override fun onChildViewRemoved(parent: View?, child: View?) = Unit
|
||||
override fun onChildViewAdded(parent: View?, child: View?) {
|
||||
parent?.measure(
|
||||
View.MeasureSpec.makeMeasureSpec(measuredWidth, View.MeasureSpec.EXACTLY),
|
||||
View.MeasureSpec.makeMeasureSpec(measuredHeight, View.MeasureSpec.EXACTLY)
|
||||
)
|
||||
parent?.layout(0, 0, parent.measuredWidth, parent.measuredHeight)
|
||||
}
|
||||
})
|
||||
}
|
@@ -0,0 +1,17 @@
|
||||
package com.mrousavy.camera.extensions
|
||||
|
||||
import com.facebook.react.bridge.WritableMap
|
||||
|
||||
fun WritableMap.putInt(key: String, value: Int?) {
|
||||
if (value == null)
|
||||
this.putNull(key)
|
||||
else
|
||||
this.putInt(key, value)
|
||||
}
|
||||
|
||||
fun WritableMap.putDouble(key: String, value: Double?) {
|
||||
if (value == null)
|
||||
this.putNull(key)
|
||||
else
|
||||
this.putDouble(key, value)
|
||||
}
|
@@ -0,0 +1,147 @@
|
||||
package com.mrousavy.camera.frameprocessor;
|
||||
|
||||
import android.graphics.ImageFormat;
|
||||
import android.media.Image;
|
||||
import com.facebook.proguard.annotations.DoNotStrip;
|
||||
import com.mrousavy.camera.parsers.PixelFormat;
|
||||
import com.mrousavy.camera.parsers.Orientation;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
public class Frame {
|
||||
private final Image image;
|
||||
private final boolean isMirrored;
|
||||
private final long timestamp;
|
||||
private final Orientation orientation;
|
||||
private int refCount = 0;
|
||||
|
||||
public Frame(Image image, long timestamp, Orientation orientation, boolean isMirrored) {
|
||||
this.image = image;
|
||||
this.timestamp = timestamp;
|
||||
this.orientation = orientation;
|
||||
this.isMirrored = isMirrored;
|
||||
}
|
||||
|
||||
public Image getImage() {
|
||||
return image;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
@DoNotStrip
|
||||
public int getWidth() {
|
||||
return image.getWidth();
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
@DoNotStrip
|
||||
public int getHeight() {
|
||||
return image.getHeight();
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
@DoNotStrip
|
||||
public boolean getIsValid() {
|
||||
try {
|
||||
// will throw an exception if the image is already closed
|
||||
image.getCropRect();
|
||||
// no exception thrown, image must still be valid.
|
||||
return true;
|
||||
} catch (Exception e) {
|
||||
// exception thrown, image has already been closed.
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
@DoNotStrip
|
||||
public boolean getIsMirrored() {
|
||||
return isMirrored;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
@DoNotStrip
|
||||
public long getTimestamp() {
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
@DoNotStrip
|
||||
public String getOrientation() {
|
||||
return orientation.getUnionValue();
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
@DoNotStrip
|
||||
public String getPixelFormat() {
|
||||
PixelFormat format = PixelFormat.Companion.fromImageFormat(image.getFormat());
|
||||
return format.getUnionValue();
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
@DoNotStrip
|
||||
public int getPlanesCount() {
|
||||
return image.getPlanes().length;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
@DoNotStrip
|
||||
public int getBytesPerRow() {
|
||||
return image.getPlanes()[0].getRowStride();
|
||||
}
|
||||
|
||||
private static ByteBuffer byteArrayCache;
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
@DoNotStrip
|
||||
public ByteBuffer toByteBuffer() {
|
||||
switch (image.getFormat()) {
|
||||
case ImageFormat.YUV_420_888:
|
||||
ByteBuffer yBuffer = image.getPlanes()[0].getBuffer();
|
||||
ByteBuffer uBuffer = image.getPlanes()[1].getBuffer();
|
||||
ByteBuffer vBuffer = image.getPlanes()[2].getBuffer();
|
||||
int ySize = yBuffer.remaining();
|
||||
int uSize = uBuffer.remaining();
|
||||
int vSize = vBuffer.remaining();
|
||||
int totalSize = ySize + uSize + vSize;
|
||||
|
||||
if (byteArrayCache != null) byteArrayCache.rewind();
|
||||
if (byteArrayCache == null || byteArrayCache.remaining() != totalSize) {
|
||||
byteArrayCache = ByteBuffer.allocateDirect(totalSize);
|
||||
}
|
||||
|
||||
byteArrayCache.put(yBuffer).put(uBuffer).put(vBuffer);
|
||||
|
||||
return byteArrayCache;
|
||||
case ImageFormat.JPEG:
|
||||
return image.getPlanes()[0].getBuffer();
|
||||
default:
|
||||
throw new RuntimeException("Cannot convert Frame with Format " + image.getFormat() + " to byte array!");
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
@DoNotStrip
|
||||
public void incrementRefCount() {
|
||||
synchronized (this) {
|
||||
refCount++;
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
@DoNotStrip
|
||||
public void decrementRefCount() {
|
||||
synchronized (this) {
|
||||
refCount--;
|
||||
if (refCount <= 0) {
|
||||
// If no reference is held on this Image, close it.
|
||||
image.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
@DoNotStrip
|
||||
private void close() {
|
||||
image.close();
|
||||
}
|
||||
}
|
@@ -0,0 +1,27 @@
|
||||
package com.mrousavy.camera.frameprocessor;
|
||||
|
||||
import androidx.annotation.Keep;
|
||||
import androidx.annotation.NonNull;
|
||||
import androidx.annotation.Nullable;
|
||||
|
||||
import com.facebook.jni.HybridData;
|
||||
import com.facebook.proguard.annotations.DoNotStrip;
|
||||
|
||||
/**
|
||||
* Represents a JS Frame Processor
|
||||
*/
|
||||
@SuppressWarnings("JavaJniMissingFunction") // we're using fbjni.
|
||||
public final class FrameProcessor {
|
||||
/**
|
||||
* Call the JS Frame Processor function with the given Frame
|
||||
*/
|
||||
public native void call(Frame frame);
|
||||
|
||||
@DoNotStrip
|
||||
@Keep
|
||||
private final HybridData mHybridData;
|
||||
|
||||
public FrameProcessor(HybridData hybridData) {
|
||||
mHybridData = hybridData;
|
||||
}
|
||||
}
|
@@ -0,0 +1,25 @@
|
||||
package com.mrousavy.camera.frameprocessor;
|
||||
|
||||
import androidx.annotation.Keep;
|
||||
import androidx.annotation.NonNull;
|
||||
import androidx.annotation.Nullable;
|
||||
import com.facebook.proguard.annotations.DoNotStrip;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Declares a Frame Processor Plugin.
|
||||
*/
|
||||
@DoNotStrip
|
||||
@Keep
|
||||
public abstract class FrameProcessorPlugin {
|
||||
/**
|
||||
* The actual Frame Processor plugin callback. Called for every frame the ImageAnalyzer receives.
|
||||
* @param frame The Frame from the Camera. Don't call .close() on this, as VisionCamera handles that.
|
||||
* @return You can return any primitive, map or array you want. See the
|
||||
* <a href="https://react-native-vision-camera.com/docs/guides/frame-processors-plugins-overview#types">Types</a>
|
||||
* table for a list of supported types.
|
||||
*/
|
||||
@DoNotStrip
|
||||
@Keep
|
||||
public abstract @Nullable Object callback(@NonNull Frame frame, @Nullable Map<String, Object> params);
|
||||
}
|
@@ -0,0 +1,35 @@
|
||||
package com.mrousavy.camera.frameprocessor;
|
||||
|
||||
import androidx.annotation.Keep;
|
||||
import androidx.annotation.Nullable;
|
||||
import com.facebook.proguard.annotations.DoNotStrip;
|
||||
import java.util.Map;
|
||||
import java.util.HashMap;
|
||||
|
||||
@DoNotStrip
|
||||
@Keep
|
||||
public class FrameProcessorPluginRegistry {
|
||||
private static final Map<String, PluginInitializer> Plugins = new HashMap<>();
|
||||
|
||||
@DoNotStrip
|
||||
@Keep
|
||||
public static void addFrameProcessorPlugin(String name, PluginInitializer pluginInitializer) {
|
||||
assert !Plugins.containsKey(name) : "Tried to add a Frame Processor Plugin with a name that already exists! " +
|
||||
"Either choose unique names, or remove the unused plugin. Name: ";
|
||||
Plugins.put(name, pluginInitializer);
|
||||
}
|
||||
|
||||
@DoNotStrip
|
||||
@Keep
|
||||
public static FrameProcessorPlugin getPlugin(String name, Map<String, Object> options) {
|
||||
PluginInitializer initializer = Plugins.get(name);
|
||||
if (initializer == null) {
|
||||
return null;
|
||||
}
|
||||
return initializer.initializePlugin(options);
|
||||
}
|
||||
|
||||
public interface PluginInitializer {
|
||||
FrameProcessorPlugin initializePlugin(@Nullable Map<String, Object> options);
|
||||
}
|
||||
}
|
@@ -0,0 +1,6 @@
|
||||
package com.mrousavy.camera.frameprocessor;
|
||||
|
||||
@SuppressWarnings("JavaJniMissingFunction") // we use fbjni
|
||||
public class VisionCameraInstaller {
|
||||
public static native void install(VisionCameraProxy proxy);
|
||||
}
|
@@ -0,0 +1,82 @@
|
||||
package com.mrousavy.camera.frameprocessor
|
||||
|
||||
import android.util.Log
|
||||
import androidx.annotation.Keep
|
||||
import androidx.annotation.UiThread
|
||||
import com.facebook.jni.HybridData
|
||||
import com.facebook.proguard.annotations.DoNotStrip
|
||||
import com.facebook.react.bridge.ReactApplicationContext
|
||||
import com.facebook.react.bridge.UiThreadUtil
|
||||
import com.facebook.react.turbomodule.core.CallInvokerHolderImpl
|
||||
import com.facebook.react.uimanager.UIManagerHelper
|
||||
import com.mrousavy.camera.CameraView
|
||||
import com.mrousavy.camera.ViewNotFoundError
|
||||
import java.lang.ref.WeakReference
|
||||
|
||||
@Suppress("KotlinJniMissingFunction") // we use fbjni.
|
||||
class VisionCameraProxy(context: ReactApplicationContext) {
|
||||
companion object {
|
||||
const val TAG = "VisionCameraProxy"
|
||||
init {
|
||||
try {
|
||||
System.loadLibrary("VisionCamera")
|
||||
} catch (e: UnsatisfiedLinkError) {
|
||||
Log.e(TAG, "Failed to load VisionCamera C++ library!", e)
|
||||
throw e
|
||||
}
|
||||
}
|
||||
}
|
||||
@DoNotStrip
|
||||
@Keep
|
||||
private var mHybridData: HybridData
|
||||
private var mContext: WeakReference<ReactApplicationContext>
|
||||
private var mScheduler: VisionCameraScheduler
|
||||
|
||||
init {
|
||||
val jsCallInvokerHolder = context.catalystInstance.jsCallInvokerHolder as CallInvokerHolderImpl
|
||||
val jsRuntimeHolder = context.javaScriptContextHolder.get()
|
||||
mScheduler = VisionCameraScheduler()
|
||||
mContext = WeakReference(context)
|
||||
mHybridData = initHybrid(jsRuntimeHolder, jsCallInvokerHolder, mScheduler)
|
||||
}
|
||||
|
||||
@UiThread
|
||||
private fun findCameraViewById(viewId: Int): CameraView {
|
||||
Log.d(TAG, "Finding view $viewId...")
|
||||
val ctx = mContext.get()
|
||||
val view = if (ctx != null) UIManagerHelper.getUIManager(ctx, viewId)?.resolveView(viewId) as CameraView? else null
|
||||
Log.d(TAG, if (view != null) "Found view $viewId!" else "Couldn't find view $viewId!")
|
||||
return view ?: throw ViewNotFoundError(viewId)
|
||||
}
|
||||
|
||||
@DoNotStrip
|
||||
@Keep
|
||||
fun setFrameProcessor(viewId: Int, frameProcessor: FrameProcessor) {
|
||||
UiThreadUtil.runOnUiThread {
|
||||
val view = findCameraViewById(viewId)
|
||||
view.frameProcessor = frameProcessor
|
||||
}
|
||||
}
|
||||
|
||||
@DoNotStrip
|
||||
@Keep
|
||||
fun removeFrameProcessor(viewId: Int) {
|
||||
UiThreadUtil.runOnUiThread {
|
||||
val view = findCameraViewById(viewId)
|
||||
view.frameProcessor = null
|
||||
}
|
||||
}
|
||||
|
||||
@DoNotStrip
|
||||
@Keep
|
||||
fun getFrameProcessorPlugin(name: String, options: Map<String, Any>): FrameProcessorPlugin {
|
||||
return FrameProcessorPluginRegistry.getPlugin(name, options)
|
||||
}
|
||||
|
||||
// private C++ funcs
|
||||
private external fun initHybrid(
|
||||
jsContext: Long,
|
||||
jsCallInvokerHolder: CallInvokerHolderImpl,
|
||||
scheduler: VisionCameraScheduler
|
||||
): HybridData
|
||||
}
|
@@ -0,0 +1,29 @@
|
||||
package com.mrousavy.camera.frameprocessor;
|
||||
|
||||
import com.facebook.jni.HybridData;
|
||||
import com.facebook.proguard.annotations.DoNotStrip;
|
||||
import com.mrousavy.camera.CameraQueues;
|
||||
|
||||
import java.util.concurrent.ExecutorService;
|
||||
|
||||
@SuppressWarnings("JavaJniMissingFunction") // using fbjni here
|
||||
public class VisionCameraScheduler {
|
||||
@SuppressWarnings({"unused", "FieldCanBeLocal"})
|
||||
@DoNotStrip
|
||||
private final HybridData mHybridData;
|
||||
|
||||
public VisionCameraScheduler() {
|
||||
mHybridData = initHybrid();
|
||||
}
|
||||
|
||||
private native HybridData initHybrid();
|
||||
private native void trigger();
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
@DoNotStrip
|
||||
private void scheduleTrigger() {
|
||||
CameraQueues.CameraQueue videoQueue = CameraQueues.Companion.getVideoQueue();
|
||||
// TODO: Make sure post(this::trigger) works.
|
||||
videoQueue.getHandler().post(this::trigger);
|
||||
}
|
||||
}
|
@@ -0,0 +1,25 @@
|
||||
package com.mrousavy.camera.parsers
|
||||
|
||||
import android.hardware.camera2.CameraDevice
|
||||
|
||||
enum class CameraDeviceError(override val unionValue: String): JSUnionValue {
|
||||
CAMERA_ALREADY_IN_USE("camera-already-in-use"),
|
||||
TOO_MANY_OPEN_CAMERAS("too-many-open-cameras"),
|
||||
CAMERA_IS_DISABLED_BY_ANDROID("camera-is-disabled-by-android"),
|
||||
UNKNOWN_CAMERA_DEVICE_ERROR("unknown-camera-device-error"),
|
||||
UNKNOWN_FATAL_CAMERA_SERVICE_ERROR("unknown-fatal-camera-service-error"),
|
||||
DISCONNECTED("camera-has-been-disconnected");
|
||||
|
||||
companion object {
|
||||
fun fromCameraDeviceError(cameraDeviceError: Int): CameraDeviceError {
|
||||
return when (cameraDeviceError) {
|
||||
CameraDevice.StateCallback.ERROR_CAMERA_IN_USE -> CAMERA_ALREADY_IN_USE
|
||||
CameraDevice.StateCallback.ERROR_MAX_CAMERAS_IN_USE -> TOO_MANY_OPEN_CAMERAS
|
||||
CameraDevice.StateCallback.ERROR_CAMERA_DISABLED -> CAMERA_IS_DISABLED_BY_ANDROID
|
||||
CameraDevice.StateCallback.ERROR_CAMERA_DEVICE -> UNKNOWN_CAMERA_DEVICE_ERROR
|
||||
CameraDevice.StateCallback.ERROR_CAMERA_SERVICE -> UNKNOWN_FATAL_CAMERA_SERVICE_ERROR
|
||||
else -> UNKNOWN_CAMERA_DEVICE_ERROR
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,20 @@
|
||||
package com.mrousavy.camera.parsers
|
||||
|
||||
import com.mrousavy.camera.InvalidTypeScriptUnionError
|
||||
|
||||
enum class Flash(override val unionValue: String): JSUnionValue {
|
||||
OFF("off"),
|
||||
ON("on"),
|
||||
AUTO("auto");
|
||||
|
||||
companion object: JSUnionValue.Companion<Flash> {
|
||||
override fun fromUnionValue(unionValue: String?): Flash {
|
||||
return when (unionValue) {
|
||||
"off" -> OFF
|
||||
"on" -> ON
|
||||
"auto" -> AUTO
|
||||
else -> throw InvalidTypeScriptUnionError("flash", unionValue ?: "(null)")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,24 @@
|
||||
package com.mrousavy.camera.parsers
|
||||
|
||||
import android.hardware.camera2.CameraCharacteristics
|
||||
|
||||
enum class HardwareLevel(override val unionValue: String): JSUnionValue {
|
||||
LEGACY("legacy"),
|
||||
LIMITED("limited"),
|
||||
EXTERNAL("external"),
|
||||
FULL("full"),
|
||||
LEVEL_3("level-3");
|
||||
|
||||
companion object {
|
||||
fun fromCameraCharacteristics(cameraCharacteristics: CameraCharacteristics): HardwareLevel {
|
||||
return when (cameraCharacteristics.get(CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL)) {
|
||||
CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_LEGACY -> LEGACY
|
||||
CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED -> LIMITED
|
||||
CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_EXTERNAL -> EXTERNAL
|
||||
CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_FULL -> FULL
|
||||
CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_3 -> LEVEL_3
|
||||
else -> LEGACY
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,9 @@
|
||||
package com.mrousavy.camera.parsers
|
||||
|
||||
interface JSUnionValue {
|
||||
val unionValue: String
|
||||
|
||||
interface Companion<T> {
|
||||
fun fromUnionValue(unionValue: String?): T?
|
||||
}
|
||||
}
|
@@ -0,0 +1,20 @@
|
||||
package com.mrousavy.camera.parsers
|
||||
|
||||
import android.hardware.camera2.CameraCharacteristics
|
||||
|
||||
enum class LensFacing(override val unionValue: String): JSUnionValue {
|
||||
BACK("back"),
|
||||
FRONT("front"),
|
||||
EXTERNAL("external");
|
||||
|
||||
companion object {
|
||||
fun fromCameraCharacteristics(cameraCharacteristics: CameraCharacteristics): LensFacing {
|
||||
return when (cameraCharacteristics.get(CameraCharacteristics.LENS_FACING)!!) {
|
||||
CameraCharacteristics.LENS_FACING_BACK -> BACK
|
||||
CameraCharacteristics.LENS_FACING_FRONT -> FRONT
|
||||
CameraCharacteristics.LENS_FACING_EXTERNAL -> EXTERNAL
|
||||
else -> EXTERNAL
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,56 @@
|
||||
package com.mrousavy.camera.parsers
|
||||
|
||||
import android.hardware.camera2.CameraCharacteristics
|
||||
|
||||
enum class Orientation(override val unionValue: String): JSUnionValue {
|
||||
PORTRAIT("portrait"),
|
||||
LANDSCAPE_RIGHT("landscape-right"),
|
||||
PORTRAIT_UPSIDE_DOWN("portrait-upside-down"),
|
||||
LANDSCAPE_LEFT("landscape-left");
|
||||
|
||||
fun toDegrees(): Int {
|
||||
return when(this) {
|
||||
PORTRAIT -> 0
|
||||
LANDSCAPE_RIGHT -> 90
|
||||
PORTRAIT_UPSIDE_DOWN -> 180
|
||||
LANDSCAPE_LEFT -> 270
|
||||
}
|
||||
}
|
||||
|
||||
fun toSensorRelativeOrientation(cameraCharacteristics: CameraCharacteristics): Orientation {
|
||||
val sensorOrientation = cameraCharacteristics.get(CameraCharacteristics.SENSOR_ORIENTATION)!!
|
||||
|
||||
// Convert target orientation to rotation degrees (0, 90, 180, 270)
|
||||
var rotationDegrees = this.toDegrees()
|
||||
|
||||
// Reverse device orientation for front-facing cameras
|
||||
val facingFront = cameraCharacteristics.get(CameraCharacteristics.LENS_FACING) == CameraCharacteristics.LENS_FACING_FRONT
|
||||
if (facingFront) rotationDegrees = -rotationDegrees
|
||||
|
||||
// Rotate sensor rotation by target rotation
|
||||
val newRotationDegrees = (sensorOrientation + rotationDegrees + 360) % 360
|
||||
|
||||
return fromRotationDegrees(newRotationDegrees)
|
||||
}
|
||||
|
||||
companion object: JSUnionValue.Companion<Orientation> {
|
||||
override fun fromUnionValue(unionValue: String?): Orientation? {
|
||||
return when (unionValue) {
|
||||
"portrait" -> PORTRAIT
|
||||
"landscape-right" -> LANDSCAPE_RIGHT
|
||||
"portrait-upside-down" -> PORTRAIT_UPSIDE_DOWN
|
||||
"landscape-left" -> LANDSCAPE_LEFT
|
||||
else -> null
|
||||
}
|
||||
}
|
||||
|
||||
fun fromRotationDegrees(rotationDegrees: Int): Orientation {
|
||||
return when (rotationDegrees) {
|
||||
in 45..135 -> LANDSCAPE_RIGHT
|
||||
in 135..225 -> PORTRAIT_UPSIDE_DOWN
|
||||
in 225..315 -> LANDSCAPE_LEFT
|
||||
else -> PORTRAIT
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,19 @@
|
||||
package com.mrousavy.camera.parsers
|
||||
|
||||
import android.content.pm.PackageManager
|
||||
|
||||
enum class PermissionStatus(override val unionValue: String): JSUnionValue {
|
||||
DENIED("denied"),
|
||||
NOT_DETERMINED("not-determined"),
|
||||
GRANTED("granted");
|
||||
|
||||
companion object {
|
||||
fun fromPermissionStatus(status: Int): PermissionStatus {
|
||||
return when (status) {
|
||||
PackageManager.PERMISSION_DENIED -> DENIED
|
||||
PackageManager.PERMISSION_GRANTED -> GRANTED
|
||||
else -> NOT_DETERMINED
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,50 @@
|
||||
package com.mrousavy.camera.parsers
|
||||
|
||||
import android.graphics.ImageFormat
|
||||
import com.mrousavy.camera.PixelFormatNotSupportedError
|
||||
|
||||
@Suppress("FoldInitializerAndIfToElvis")
|
||||
enum class PixelFormat(override val unionValue: String): JSUnionValue {
|
||||
YUV("yuv"),
|
||||
RGB("rgb"),
|
||||
DNG("dng"),
|
||||
NATIVE("native"),
|
||||
UNKNOWN("unknown");
|
||||
|
||||
fun toImageFormat(): Int {
|
||||
val result = when (this) {
|
||||
YUV -> ImageFormat.YUV_420_888
|
||||
RGB -> ImageFormat.JPEG
|
||||
DNG -> ImageFormat.RAW_SENSOR
|
||||
NATIVE -> ImageFormat.PRIVATE
|
||||
UNKNOWN -> null
|
||||
}
|
||||
if (result == null) {
|
||||
throw PixelFormatNotSupportedError(this.unionValue)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
companion object: JSUnionValue.Companion<PixelFormat> {
|
||||
fun fromImageFormat(imageFormat: Int): PixelFormat {
|
||||
return when (imageFormat) {
|
||||
ImageFormat.YUV_420_888 -> YUV
|
||||
ImageFormat.JPEG, ImageFormat.DEPTH_JPEG -> RGB
|
||||
ImageFormat.RAW_SENSOR -> DNG
|
||||
ImageFormat.PRIVATE -> NATIVE
|
||||
else -> UNKNOWN
|
||||
}
|
||||
}
|
||||
|
||||
override fun fromUnionValue(unionValue: String?): PixelFormat? {
|
||||
return when (unionValue) {
|
||||
"yuv" -> YUV
|
||||
"rgb" -> RGB
|
||||
"dng" -> DNG
|
||||
"native" -> NATIVE
|
||||
"unknown" -> UNKNOWN
|
||||
else -> null
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,18 @@
|
||||
package com.mrousavy.camera.parsers
|
||||
|
||||
enum class QualityPrioritization(override val unionValue: String): JSUnionValue {
|
||||
SPEED("speed"),
|
||||
BALANCED("balanced"),
|
||||
QUALITY("quality");
|
||||
|
||||
companion object: JSUnionValue.Companion<QualityPrioritization> {
|
||||
override fun fromUnionValue(unionValue: String?): QualityPrioritization {
|
||||
return when (unionValue) {
|
||||
"speed" -> SPEED
|
||||
"balanced" -> BALANCED
|
||||
"quality" -> QUALITY
|
||||
else -> BALANCED
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,16 @@
|
||||
package com.mrousavy.camera.parsers
|
||||
|
||||
enum class Torch(override val unionValue: String): JSUnionValue {
|
||||
OFF("off"),
|
||||
ON("on");
|
||||
|
||||
companion object: JSUnionValue.Companion<Torch> {
|
||||
override fun fromUnionValue(unionValue: String?): Torch {
|
||||
return when (unionValue) {
|
||||
"off" -> OFF
|
||||
"on" -> ON
|
||||
else -> OFF
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,25 @@
|
||||
package com.mrousavy.camera.parsers
|
||||
|
||||
import android.media.MediaRecorder
|
||||
|
||||
enum class VideoCodec(override val unionValue: String): JSUnionValue {
|
||||
H264("h264"),
|
||||
H265("h265");
|
||||
|
||||
fun toVideoCodec(): Int {
|
||||
return when (this) {
|
||||
H264 -> MediaRecorder.VideoEncoder.H264
|
||||
H265 -> MediaRecorder.VideoEncoder.HEVC
|
||||
}
|
||||
}
|
||||
|
||||
companion object: JSUnionValue.Companion<VideoCodec> {
|
||||
override fun fromUnionValue(unionValue: String?): VideoCodec {
|
||||
return when (unionValue) {
|
||||
"h264" -> H264
|
||||
"h265" -> H265
|
||||
else -> H264
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,25 @@
|
||||
package com.mrousavy.camera.parsers
|
||||
|
||||
import com.mrousavy.camera.InvalidTypeScriptUnionError
|
||||
|
||||
enum class VideoFileType(override val unionValue: String): JSUnionValue {
|
||||
MOV("mov"),
|
||||
MP4("mp4");
|
||||
|
||||
fun toExtension(): String {
|
||||
return when (this) {
|
||||
MOV -> ".mov"
|
||||
MP4 -> ".mp4"
|
||||
}
|
||||
}
|
||||
|
||||
companion object: JSUnionValue.Companion<VideoFileType> {
|
||||
override fun fromUnionValue(unionValue: String?): VideoFileType {
|
||||
return when (unionValue) {
|
||||
"mov" -> MOV
|
||||
"mp4" -> MP4
|
||||
else -> throw InvalidTypeScriptUnionError("fileType", unionValue ?: "(null)")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,59 @@
|
||||
package com.mrousavy.camera.parsers
|
||||
|
||||
import android.hardware.camera2.CameraMetadata.CONTROL_VIDEO_STABILIZATION_MODE_OFF
|
||||
import android.hardware.camera2.CameraMetadata.CONTROL_VIDEO_STABILIZATION_MODE_ON
|
||||
import android.hardware.camera2.CameraMetadata.CONTROL_VIDEO_STABILIZATION_MODE_PREVIEW_STABILIZATION
|
||||
import android.hardware.camera2.CameraMetadata.LENS_OPTICAL_STABILIZATION_MODE_OFF
|
||||
import android.hardware.camera2.CameraMetadata.LENS_OPTICAL_STABILIZATION_MODE_ON
|
||||
|
||||
enum class VideoStabilizationMode(override val unionValue: String): JSUnionValue {
|
||||
OFF("off"),
|
||||
STANDARD("standard"),
|
||||
CINEMATIC("cinematic"),
|
||||
CINEMATIC_EXTENDED("cinematic-extended");
|
||||
|
||||
fun toDigitalStabilizationMode(): Int {
|
||||
return when (this) {
|
||||
OFF -> CONTROL_VIDEO_STABILIZATION_MODE_OFF
|
||||
STANDARD -> CONTROL_VIDEO_STABILIZATION_MODE_ON
|
||||
CINEMATIC -> 2 /* CONTROL_VIDEO_STABILIZATION_MODE_PREVIEW_STABILIZATION */
|
||||
else -> CONTROL_VIDEO_STABILIZATION_MODE_OFF
|
||||
}
|
||||
}
|
||||
|
||||
fun toOpticalStabilizationMode(): Int {
|
||||
return when (this) {
|
||||
OFF -> LENS_OPTICAL_STABILIZATION_MODE_OFF
|
||||
CINEMATIC_EXTENDED -> LENS_OPTICAL_STABILIZATION_MODE_ON
|
||||
else -> LENS_OPTICAL_STABILIZATION_MODE_OFF
|
||||
}
|
||||
}
|
||||
|
||||
companion object: JSUnionValue.Companion<VideoStabilizationMode> {
|
||||
override fun fromUnionValue(unionValue: String?): VideoStabilizationMode? {
|
||||
return when (unionValue) {
|
||||
"off" -> OFF
|
||||
"standard" -> STANDARD
|
||||
"cinematic" -> CINEMATIC
|
||||
"cinematic-extended" -> CINEMATIC_EXTENDED
|
||||
else -> null
|
||||
}
|
||||
}
|
||||
|
||||
fun fromDigitalVideoStabilizationMode(stabiliazionMode: Int): VideoStabilizationMode {
|
||||
return when (stabiliazionMode) {
|
||||
CONTROL_VIDEO_STABILIZATION_MODE_OFF -> OFF
|
||||
CONTROL_VIDEO_STABILIZATION_MODE_ON -> STANDARD
|
||||
CONTROL_VIDEO_STABILIZATION_MODE_PREVIEW_STABILIZATION -> CINEMATIC
|
||||
else -> OFF
|
||||
}
|
||||
}
|
||||
fun fromOpticalVideoStabilizationMode(stabiliazionMode: Int): VideoStabilizationMode {
|
||||
return when (stabiliazionMode) {
|
||||
LENS_OPTICAL_STABILIZATION_MODE_OFF -> OFF
|
||||
LENS_OPTICAL_STABILIZATION_MODE_ON -> CINEMATIC_EXTENDED
|
||||
else -> OFF
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,22 @@
|
||||
package com.mrousavy.camera.utils
|
||||
|
||||
import com.facebook.react.bridge.*
|
||||
|
||||
private fun makeErrorCauseMap(throwable: Throwable): ReadableMap {
|
||||
val map = Arguments.createMap()
|
||||
map.putString("message", throwable.message)
|
||||
map.putString("stacktrace", throwable.stackTraceToString())
|
||||
if (throwable.cause != null) {
|
||||
map.putMap("cause", makeErrorCauseMap(throwable.cause!!))
|
||||
}
|
||||
return map
|
||||
}
|
||||
|
||||
fun makeErrorMap(code: String? = null, message: String? = null, throwable: Throwable? = null, userInfo: WritableMap? = null): ReadableMap {
|
||||
val map = Arguments.createMap()
|
||||
map.putString("code", code)
|
||||
map.putString("message", message)
|
||||
map.putMap("cause", if (throwable != null) makeErrorCauseMap(throwable) else null)
|
||||
map.putMap("userInfo", userInfo)
|
||||
return map
|
||||
}
|
@@ -0,0 +1,16 @@
|
||||
package com.mrousavy.camera.utils
|
||||
|
||||
import com.facebook.react.bridge.Promise
|
||||
import com.mrousavy.camera.CameraError
|
||||
import com.mrousavy.camera.UnknownCameraError
|
||||
|
||||
inline fun withPromise(promise: Promise, closure: () -> Any?) {
|
||||
try {
|
||||
val result = closure()
|
||||
promise.resolve(result)
|
||||
} catch (e: Throwable) {
|
||||
e.printStackTrace()
|
||||
val error = if (e is CameraError) e else UnknownCameraError(e)
|
||||
promise.reject("${error.domain}/${error.id}", error.message, error.cause)
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user