feat: Implement atomically single-lock core/
library on Android (#2049)
* feat: Create base for `CameraConfiguration` diff * Fix * Write three configure methods * Build? * MOre * Update CameraView+RecordVideo.kt * Fix errors * Update CameraDeviceDetails.kt * Update CameraSession.kt * Auto-resize Preview View * More * Make it work? idk * Format * Call `configure` under mutex, and change isActive * fix: Make Outputs comparable * fix: Make CodeScanner comparable * Format * fix: Update outputs after reconfiguring * Update CameraPage.tsx * fix: Close CaptureSession before
This commit is contained in:
parent
23d173f6fc
commit
de0d6cda5d
@ -23,10 +23,12 @@ suspend fun CameraView.startRecording(options: ReadableMap, onRecordCallback: Ca
|
||||
}
|
||||
}
|
||||
|
||||
if (options.hasKey("flash")) {
|
||||
val enableFlash = options.getString("flash") == "on"
|
||||
val enableFlash = options.getString("flash") == "on"
|
||||
if (enableFlash) {
|
||||
// overrides current torch mode value to enable flash while recording
|
||||
cameraSession.setTorchMode(enableFlash)
|
||||
cameraSession.configure { config ->
|
||||
config.torch = Torch.ON
|
||||
}
|
||||
}
|
||||
var codec = VideoCodec.H264
|
||||
if (options.hasKey("videoCodec")) {
|
||||
@ -67,5 +69,8 @@ suspend fun CameraView.resumeRecording() {
|
||||
@SuppressLint("RestrictedApi")
|
||||
suspend fun CameraView.stopRecording() {
|
||||
cameraSession.stopRecording()
|
||||
cameraSession.setTorchMode(torch == Torch.ON)
|
||||
// Set torch back to it's original value in case we just used it as a flash for the recording.
|
||||
cameraSession.configure { config ->
|
||||
config.torch = torch
|
||||
}
|
||||
}
|
||||
|
@ -41,7 +41,7 @@ suspend fun CameraView.takePhoto(optionsMap: ReadableMap): WritableMap {
|
||||
enableShutterSound,
|
||||
enableAutoRedEyeReduction,
|
||||
enableAutoStabilization,
|
||||
outputOrientation
|
||||
orientation
|
||||
)
|
||||
|
||||
photo.use {
|
||||
|
@ -1,31 +1,20 @@
|
||||
package com.mrousavy.camera
|
||||
|
||||
import android.Manifest
|
||||
import android.annotation.SuppressLint
|
||||
import android.content.Context
|
||||
import android.content.pm.PackageManager
|
||||
import android.hardware.camera2.CameraManager
|
||||
import android.util.Log
|
||||
import android.util.Size
|
||||
import android.view.Gravity
|
||||
import android.view.ScaleGestureDetector
|
||||
import android.view.Surface
|
||||
import android.widget.FrameLayout
|
||||
import androidx.core.content.ContextCompat
|
||||
import com.facebook.react.bridge.ReadableMap
|
||||
import com.facebook.react.bridge.UiThreadUtil
|
||||
import com.mrousavy.camera.core.CameraPermissionError
|
||||
import com.google.mlkit.vision.barcode.common.Barcode
|
||||
import com.mrousavy.camera.core.CameraConfiguration
|
||||
import com.mrousavy.camera.core.CameraQueues
|
||||
import com.mrousavy.camera.core.CameraSession
|
||||
import com.mrousavy.camera.core.NoCameraDeviceError
|
||||
import com.mrousavy.camera.core.PreviewView
|
||||
import com.mrousavy.camera.core.outputs.CameraOutputs
|
||||
import com.mrousavy.camera.extensions.bigger
|
||||
import com.mrousavy.camera.extensions.containsAny
|
||||
import com.mrousavy.camera.extensions.getPreviewTargetSize
|
||||
import com.mrousavy.camera.extensions.installHierarchyFitter
|
||||
import com.mrousavy.camera.extensions.smaller
|
||||
import com.mrousavy.camera.frameprocessor.FrameProcessor
|
||||
import com.mrousavy.camera.types.CameraDeviceFormat
|
||||
import com.mrousavy.camera.types.CodeScannerOptions
|
||||
import com.mrousavy.camera.types.Orientation
|
||||
import com.mrousavy.camera.types.PixelFormat
|
||||
@ -48,19 +37,23 @@ import kotlinx.coroutines.launch
|
||||
@SuppressLint("ClickableViewAccessibility", "ViewConstructor", "MissingPermission")
|
||||
class CameraView(context: Context) :
|
||||
FrameLayout(context),
|
||||
CoroutineScope {
|
||||
CoroutineScope,
|
||||
CameraSession.CameraSessionCallback {
|
||||
companion object {
|
||||
const val TAG = "CameraView"
|
||||
|
||||
private val propsThatRequirePreviewReconfiguration = arrayListOf("cameraId", "format", "resizeMode")
|
||||
private val propsThatRequireSessionReconfiguration =
|
||||
arrayListOf("cameraId", "format", "photo", "video", "enableFrameProcessor", "codeScannerOptions", "pixelFormat")
|
||||
private val propsThatRequireFormatReconfiguration = arrayListOf("fps", "hdr", "videoStabilizationMode", "lowLightBoost")
|
||||
}
|
||||
|
||||
// react properties
|
||||
// props that require reconfiguring
|
||||
var cameraId: String? = null
|
||||
set(value) {
|
||||
if (value != null) {
|
||||
// TODO: Move this into CameraSession
|
||||
val f = if (format != null) CameraDeviceFormat.fromJSValue(format!!) else null
|
||||
previewView.resizeToInputCamera(value, cameraManager, f)
|
||||
}
|
||||
field = value
|
||||
}
|
||||
var enableDepthData = false
|
||||
var enableHighQualityPhotos: Boolean? = null
|
||||
var enablePortraitEffectsMatteDelivery = false
|
||||
@ -74,7 +67,6 @@ class CameraView(context: Context) :
|
||||
|
||||
// props that require format reconfiguring
|
||||
var format: ReadableMap? = null
|
||||
var resizeMode: ResizeMode = ResizeMode.COVER
|
||||
var fps: Int? = null
|
||||
var videoStabilizationMode: VideoStabilizationMode? = null
|
||||
var hdr: Boolean? = null // nullable bool
|
||||
@ -84,8 +76,17 @@ class CameraView(context: Context) :
|
||||
var isActive = false
|
||||
var torch: Torch = Torch.OFF
|
||||
var zoom: Float = 1f // in "factor"
|
||||
var orientation: Orientation? = null
|
||||
var orientation: Orientation = Orientation.PORTRAIT
|
||||
var enableZoomGesture: Boolean = false
|
||||
set(value) {
|
||||
field = value
|
||||
updateZoomGesture()
|
||||
}
|
||||
var resizeMode: ResizeMode = ResizeMode.COVER
|
||||
set(value) {
|
||||
previewView.resizeMode = value
|
||||
field = value
|
||||
}
|
||||
|
||||
// code scanner
|
||||
var codeScannerOptions: CodeScannerOptions? = null
|
||||
@ -96,8 +97,7 @@ class CameraView(context: Context) :
|
||||
|
||||
// session
|
||||
internal val cameraSession: CameraSession
|
||||
private var previewView: PreviewView? = null
|
||||
private var previewSurface: Surface? = null
|
||||
private val previewView: PreviewView
|
||||
|
||||
internal var frameProcessor: FrameProcessor? = null
|
||||
set(value) {
|
||||
@ -105,167 +105,100 @@ class CameraView(context: Context) :
|
||||
cameraSession.frameProcessor = frameProcessor
|
||||
}
|
||||
|
||||
private val inputOrientation: Orientation
|
||||
get() = cameraSession.orientation
|
||||
internal val outputOrientation: Orientation
|
||||
get() = orientation ?: inputOrientation
|
||||
|
||||
override val coroutineContext: CoroutineContext = CameraQueues.cameraQueue.coroutineDispatcher
|
||||
|
||||
init {
|
||||
this.installHierarchyFitter()
|
||||
clipToOutline = true
|
||||
setupPreviewView()
|
||||
cameraSession = CameraSession(context, cameraManager, { invokeOnInitialized() }, { error -> invokeOnError(error) })
|
||||
cameraSession = CameraSession(context, cameraManager, this)
|
||||
previewView = cameraSession.createPreviewView(context)
|
||||
addView(previewView)
|
||||
}
|
||||
|
||||
override fun onAttachedToWindow() {
|
||||
super.onAttachedToWindow()
|
||||
if (!isMounted) {
|
||||
isMounted = true
|
||||
invokeOnViewReady()
|
||||
}
|
||||
launch { updateLifecycle() }
|
||||
update()
|
||||
super.onAttachedToWindow()
|
||||
}
|
||||
|
||||
override fun onDetachedFromWindow() {
|
||||
update()
|
||||
super.onDetachedFromWindow()
|
||||
launch { updateLifecycle() }
|
||||
}
|
||||
|
||||
private fun getPreviewTargetSize(): Size {
|
||||
val cameraId = cameraId ?: throw NoCameraDeviceError()
|
||||
|
||||
val format = format
|
||||
val targetPreviewSize = if (format != null) Size(format.getInt("videoWidth"), format.getInt("videoHeight")) else null
|
||||
val formatAspectRatio = if (targetPreviewSize != null) targetPreviewSize.bigger.toDouble() / targetPreviewSize.smaller else null
|
||||
|
||||
return this.cameraManager.getCameraCharacteristics(cameraId).getPreviewTargetSize(formatAspectRatio)
|
||||
}
|
||||
|
||||
private fun setupPreviewView() {
|
||||
removeView(previewView)
|
||||
this.previewSurface = null
|
||||
|
||||
if (cameraId == null) return
|
||||
|
||||
val previewView = PreviewView(context, this.getPreviewTargetSize(), resizeMode) { surface ->
|
||||
previewSurface = surface
|
||||
launch { configureSession() }
|
||||
}
|
||||
previewView.layoutParams = LayoutParams(
|
||||
LayoutParams.MATCH_PARENT,
|
||||
LayoutParams.MATCH_PARENT,
|
||||
Gravity.CENTER
|
||||
)
|
||||
this.previewView = previewView
|
||||
UiThreadUtil.runOnUiThread {
|
||||
addView(previewView)
|
||||
}
|
||||
}
|
||||
|
||||
fun update(changedProps: ArrayList<String>) {
|
||||
Log.i(TAG, "Props changed: $changedProps")
|
||||
val shouldReconfigurePreview = changedProps.containsAny(propsThatRequirePreviewReconfiguration)
|
||||
val shouldReconfigureSession = shouldReconfigurePreview || changedProps.containsAny(propsThatRequireSessionReconfiguration)
|
||||
val shouldReconfigureFormat = shouldReconfigureSession || changedProps.containsAny(propsThatRequireFormatReconfiguration)
|
||||
val shouldReconfigureZoom = shouldReconfigureSession || changedProps.contains("zoom")
|
||||
val shouldReconfigureTorch = shouldReconfigureSession || changedProps.contains("torch")
|
||||
val shouldCheckActive = shouldReconfigureFormat || changedProps.contains("isActive")
|
||||
val shouldReconfigureZoomGesture = changedProps.contains("enableZoomGesture")
|
||||
fun update() {
|
||||
Log.i(TAG, "Updating CameraSession...")
|
||||
|
||||
launch {
|
||||
try {
|
||||
// Expensive Calls
|
||||
if (shouldReconfigurePreview) {
|
||||
setupPreviewView()
|
||||
cameraSession.configure { config ->
|
||||
// Input Camera Device
|
||||
config.cameraId = cameraId
|
||||
|
||||
// Photo
|
||||
if (photo == true) {
|
||||
config.photo = CameraConfiguration.Output.Enabled.create(CameraConfiguration.Photo(Unit))
|
||||
} else {
|
||||
config.photo = CameraConfiguration.Output.Disabled.create()
|
||||
}
|
||||
if (shouldReconfigureSession) {
|
||||
configureSession()
|
||||
|
||||
// Video/Frame Processor
|
||||
if (video == true || enableFrameProcessor) {
|
||||
config.video = CameraConfiguration.Output.Enabled.create(
|
||||
CameraConfiguration.Video(
|
||||
pixelFormat,
|
||||
enableFrameProcessor
|
||||
)
|
||||
)
|
||||
} else {
|
||||
config.video = CameraConfiguration.Output.Disabled.create()
|
||||
}
|
||||
if (shouldReconfigureFormat) {
|
||||
configureFormat()
|
||||
|
||||
// Audio
|
||||
if (audio == true) {
|
||||
config.audio = CameraConfiguration.Output.Enabled.create(CameraConfiguration.Audio(Unit))
|
||||
} else {
|
||||
config.audio = CameraConfiguration.Output.Disabled.create()
|
||||
}
|
||||
if (shouldCheckActive) {
|
||||
updateLifecycle()
|
||||
|
||||
// Code Scanner
|
||||
val codeScanner = codeScannerOptions
|
||||
if (codeScanner != null) {
|
||||
config.codeScanner = CameraConfiguration.Output.Enabled.create(
|
||||
CameraConfiguration.CodeScanner(codeScanner.codeTypes)
|
||||
)
|
||||
} else {
|
||||
config.codeScanner = CameraConfiguration.Output.Disabled.create()
|
||||
}
|
||||
// Fast Calls
|
||||
if (shouldReconfigureZoom) {
|
||||
updateZoom()
|
||||
|
||||
// Orientation
|
||||
config.orientation = orientation
|
||||
|
||||
// Format
|
||||
val format = format
|
||||
if (format != null) {
|
||||
config.format = CameraDeviceFormat.fromJSValue(format)
|
||||
} else {
|
||||
config.format = null
|
||||
}
|
||||
if (shouldReconfigureTorch) {
|
||||
updateTorch()
|
||||
}
|
||||
if (shouldReconfigureZoomGesture) {
|
||||
updateZoomGesture()
|
||||
}
|
||||
} catch (e: Throwable) {
|
||||
Log.e(TAG, "update() threw: ${e.message}")
|
||||
invokeOnError(e)
|
||||
|
||||
// Side-Props
|
||||
config.fps = fps
|
||||
config.enableLowLightBoost = lowLightBoost ?: false
|
||||
config.enableHdr = hdr ?: false
|
||||
config.torch = torch
|
||||
|
||||
// Zoom
|
||||
config.zoom = zoom
|
||||
|
||||
// isActive
|
||||
config.isActive = isActive && isAttachedToWindow
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private suspend fun configureSession() {
|
||||
try {
|
||||
Log.i(TAG, "Configuring Camera Device...")
|
||||
|
||||
if (ContextCompat.checkSelfPermission(context, Manifest.permission.CAMERA) != PackageManager.PERMISSION_GRANTED) {
|
||||
throw CameraPermissionError()
|
||||
}
|
||||
val cameraId = cameraId ?: throw NoCameraDeviceError()
|
||||
|
||||
val format = format
|
||||
val targetVideoSize = if (format != null) Size(format.getInt("videoWidth"), format.getInt("videoHeight")) else null
|
||||
val targetPhotoSize = if (format != null) Size(format.getInt("photoWidth"), format.getInt("photoHeight")) else null
|
||||
// TODO: Allow previewSurface to be null/none
|
||||
val previewSurface = previewSurface ?: return
|
||||
val codeScannerOptions = codeScannerOptions
|
||||
|
||||
val previewOutput = CameraOutputs.PreviewOutput(previewSurface, previewView?.targetSize)
|
||||
val photoOutput = if (photo == true) {
|
||||
CameraOutputs.PhotoOutput(targetPhotoSize)
|
||||
} else {
|
||||
null
|
||||
}
|
||||
val videoOutput = if (video == true || enableFrameProcessor) {
|
||||
CameraOutputs.VideoOutput(targetVideoSize, video == true, enableFrameProcessor, pixelFormat)
|
||||
} else {
|
||||
null
|
||||
}
|
||||
val codeScanner = if (codeScannerOptions != null) {
|
||||
CameraOutputs.CodeScannerOutput(
|
||||
codeScannerOptions,
|
||||
{ codes -> invokeOnCodeScanned(codes) },
|
||||
{ error -> invokeOnError(error) }
|
||||
)
|
||||
} else {
|
||||
null
|
||||
}
|
||||
|
||||
cameraSession.configureSession(cameraId, previewOutput, photoOutput, videoOutput, codeScanner)
|
||||
} catch (e: Throwable) {
|
||||
Log.e(TAG, "Failed to configure session: ${e.message}", e)
|
||||
invokeOnError(e)
|
||||
}
|
||||
}
|
||||
|
||||
private suspend fun configureFormat() {
|
||||
cameraSession.configureFormat(fps, videoStabilizationMode, hdr, lowLightBoost)
|
||||
}
|
||||
|
||||
private suspend fun updateLifecycle() {
|
||||
cameraSession.setIsActive(isActive && isAttachedToWindow)
|
||||
}
|
||||
|
||||
private suspend fun updateZoom() {
|
||||
cameraSession.setZoom(zoom)
|
||||
}
|
||||
|
||||
private suspend fun updateTorch() {
|
||||
cameraSession.setTorchMode(torch == Torch.ON)
|
||||
}
|
||||
|
||||
@SuppressLint("ClickableViewAccessibility")
|
||||
private fun updateZoomGesture() {
|
||||
if (enableZoomGesture) {
|
||||
@ -274,7 +207,7 @@ class CameraView(context: Context) :
|
||||
object : ScaleGestureDetector.SimpleOnScaleGestureListener() {
|
||||
override fun onScale(detector: ScaleGestureDetector): Boolean {
|
||||
zoom *= detector.scaleFactor
|
||||
launch { updateZoom() }
|
||||
update()
|
||||
return true
|
||||
}
|
||||
}
|
||||
@ -286,4 +219,16 @@ class CameraView(context: Context) :
|
||||
setOnTouchListener(null)
|
||||
}
|
||||
}
|
||||
|
||||
override fun onError(error: Throwable) {
|
||||
invokeOnError(error)
|
||||
}
|
||||
|
||||
override fun onInitialized() {
|
||||
invokeOnInitialized()
|
||||
}
|
||||
|
||||
override fun onCodeScanned(codes: List<Barcode>) {
|
||||
invokeOnCodeScanned(codes)
|
||||
}
|
||||
}
|
||||
|
@ -18,9 +18,7 @@ class CameraViewManager : ViewGroupManager<CameraView>() {
|
||||
|
||||
override fun onAfterUpdateTransaction(view: CameraView) {
|
||||
super.onAfterUpdateTransaction(view)
|
||||
val changedProps = cameraViewTransactions[view] ?: ArrayList()
|
||||
view.update(changedProps)
|
||||
cameraViewTransactions.remove(view)
|
||||
view.update()
|
||||
}
|
||||
|
||||
override fun getExportedCustomDirectEventTypeConstants(): MutableMap<String, Any>? =
|
||||
@ -35,108 +33,69 @@ class CameraViewManager : ViewGroupManager<CameraView>() {
|
||||
|
||||
@ReactProp(name = "cameraId")
|
||||
fun setCameraId(view: CameraView, cameraId: String) {
|
||||
if (view.cameraId != cameraId) {
|
||||
addChangedPropToTransaction(view, "cameraId")
|
||||
}
|
||||
view.cameraId = cameraId
|
||||
}
|
||||
|
||||
@ReactProp(name = "photo")
|
||||
fun setPhoto(view: CameraView, photo: Boolean?) {
|
||||
if (view.photo != photo) {
|
||||
addChangedPropToTransaction(view, "photo")
|
||||
}
|
||||
view.photo = photo
|
||||
}
|
||||
|
||||
@ReactProp(name = "video")
|
||||
fun setVideo(view: CameraView, video: Boolean?) {
|
||||
if (view.video != video) {
|
||||
addChangedPropToTransaction(view, "video")
|
||||
}
|
||||
view.video = video
|
||||
}
|
||||
|
||||
@ReactProp(name = "audio")
|
||||
fun setAudio(view: CameraView, audio: Boolean?) {
|
||||
if (view.audio != audio) {
|
||||
addChangedPropToTransaction(view, "audio")
|
||||
}
|
||||
view.audio = audio
|
||||
}
|
||||
|
||||
@ReactProp(name = "enableFrameProcessor")
|
||||
fun setEnableFrameProcessor(view: CameraView, enableFrameProcessor: Boolean) {
|
||||
if (view.enableFrameProcessor != enableFrameProcessor) {
|
||||
addChangedPropToTransaction(view, "enableFrameProcessor")
|
||||
}
|
||||
view.enableFrameProcessor = enableFrameProcessor
|
||||
}
|
||||
|
||||
@ReactProp(name = "pixelFormat")
|
||||
fun setPixelFormat(view: CameraView, pixelFormat: String?) {
|
||||
val newPixelFormat = PixelFormat.fromUnionValue(pixelFormat)
|
||||
if (view.pixelFormat != newPixelFormat) {
|
||||
addChangedPropToTransaction(view, "pixelFormat")
|
||||
}
|
||||
view.pixelFormat = newPixelFormat ?: PixelFormat.NATIVE
|
||||
view.pixelFormat = newPixelFormat
|
||||
}
|
||||
|
||||
@ReactProp(name = "enableDepthData")
|
||||
fun setEnableDepthData(view: CameraView, enableDepthData: Boolean) {
|
||||
if (view.enableDepthData != enableDepthData) {
|
||||
addChangedPropToTransaction(view, "enableDepthData")
|
||||
}
|
||||
view.enableDepthData = enableDepthData
|
||||
}
|
||||
|
||||
@ReactProp(name = "enableZoomGesture")
|
||||
fun setEnableZoomGesture(view: CameraView, enableZoomGesture: Boolean) {
|
||||
if (view.enableZoomGesture != enableZoomGesture) {
|
||||
addChangedPropToTransaction(view, "enableZoomGesture")
|
||||
}
|
||||
view.enableZoomGesture = enableZoomGesture
|
||||
}
|
||||
|
||||
@ReactProp(name = "videoStabilizationMode")
|
||||
fun setVideoStabilizationMode(view: CameraView, videoStabilizationMode: String?) {
|
||||
val newMode = VideoStabilizationMode.fromUnionValue(videoStabilizationMode)
|
||||
if (view.videoStabilizationMode != newMode) {
|
||||
addChangedPropToTransaction(view, "videoStabilizationMode")
|
||||
}
|
||||
view.videoStabilizationMode = newMode
|
||||
}
|
||||
|
||||
@ReactProp(name = "enableHighQualityPhotos")
|
||||
fun setEnableHighQualityPhotos(view: CameraView, enableHighQualityPhotos: Boolean?) {
|
||||
if (view.enableHighQualityPhotos != enableHighQualityPhotos) {
|
||||
addChangedPropToTransaction(view, "enableHighQualityPhotos")
|
||||
}
|
||||
view.enableHighQualityPhotos = enableHighQualityPhotos
|
||||
}
|
||||
|
||||
@ReactProp(name = "enablePortraitEffectsMatteDelivery")
|
||||
fun setEnablePortraitEffectsMatteDelivery(view: CameraView, enablePortraitEffectsMatteDelivery: Boolean) {
|
||||
if (view.enablePortraitEffectsMatteDelivery != enablePortraitEffectsMatteDelivery) {
|
||||
addChangedPropToTransaction(view, "enablePortraitEffectsMatteDelivery")
|
||||
}
|
||||
view.enablePortraitEffectsMatteDelivery = enablePortraitEffectsMatteDelivery
|
||||
}
|
||||
|
||||
@ReactProp(name = "format")
|
||||
fun setFormat(view: CameraView, format: ReadableMap?) {
|
||||
if (view.format != format) {
|
||||
addChangedPropToTransaction(view, "format")
|
||||
}
|
||||
view.format = format
|
||||
}
|
||||
|
||||
@ReactProp(name = "resizeMode")
|
||||
fun setResizeMode(view: CameraView, resizeMode: String) {
|
||||
val newMode = ResizeMode.fromUnionValue(resizeMode)
|
||||
if (view.resizeMode != newMode) {
|
||||
addChangedPropToTransaction(view, "resizeMode")
|
||||
}
|
||||
view.resizeMode = newMode
|
||||
}
|
||||
|
||||
@ -145,82 +104,49 @@ class CameraViewManager : ViewGroupManager<CameraView>() {
|
||||
// of type "Int?" the react bridge throws an error.
|
||||
@ReactProp(name = "fps", defaultInt = -1)
|
||||
fun setFps(view: CameraView, fps: Int) {
|
||||
if (view.fps != fps) {
|
||||
addChangedPropToTransaction(view, "fps")
|
||||
}
|
||||
view.fps = if (fps > 0) fps else null
|
||||
}
|
||||
|
||||
@ReactProp(name = "hdr")
|
||||
fun setHdr(view: CameraView, hdr: Boolean?) {
|
||||
if (view.hdr != hdr) {
|
||||
addChangedPropToTransaction(view, "hdr")
|
||||
}
|
||||
view.hdr = hdr
|
||||
}
|
||||
|
||||
@ReactProp(name = "lowLightBoost")
|
||||
fun setLowLightBoost(view: CameraView, lowLightBoost: Boolean?) {
|
||||
if (view.lowLightBoost != lowLightBoost) {
|
||||
addChangedPropToTransaction(view, "lowLightBoost")
|
||||
}
|
||||
view.lowLightBoost = lowLightBoost
|
||||
}
|
||||
|
||||
@ReactProp(name = "isActive")
|
||||
fun setIsActive(view: CameraView, isActive: Boolean) {
|
||||
if (view.isActive != isActive) {
|
||||
addChangedPropToTransaction(view, "isActive")
|
||||
}
|
||||
view.isActive = isActive
|
||||
}
|
||||
|
||||
@ReactProp(name = "torch")
|
||||
fun setTorch(view: CameraView, torch: String) {
|
||||
val newMode = Torch.fromUnionValue(torch)
|
||||
if (view.torch != newMode) {
|
||||
addChangedPropToTransaction(view, "torch")
|
||||
}
|
||||
view.torch = newMode
|
||||
}
|
||||
|
||||
@ReactProp(name = "zoom")
|
||||
fun setZoom(view: CameraView, zoom: Double) {
|
||||
val zoomFloat = zoom.toFloat()
|
||||
if (view.zoom != zoomFloat) {
|
||||
addChangedPropToTransaction(view, "zoom")
|
||||
}
|
||||
view.zoom = zoomFloat
|
||||
}
|
||||
|
||||
@ReactProp(name = "orientation")
|
||||
fun setOrientation(view: CameraView, orientation: String?) {
|
||||
val newMode = Orientation.fromUnionValue(orientation)
|
||||
if (view.orientation != newMode) {
|
||||
addChangedPropToTransaction(view, "orientation")
|
||||
}
|
||||
view.orientation = newMode
|
||||
}
|
||||
|
||||
@ReactProp(name = "codeScannerOptions")
|
||||
fun setCodeScanner(view: CameraView, codeScannerOptions: ReadableMap) {
|
||||
val newCodeScannerOptions = CodeScannerOptions(codeScannerOptions)
|
||||
if (view.codeScannerOptions != newCodeScannerOptions) {
|
||||
addChangedPropToTransaction(view, "codeScannerOptions")
|
||||
}
|
||||
view.codeScannerOptions = newCodeScannerOptions
|
||||
}
|
||||
|
||||
companion object {
|
||||
const val TAG = "CameraView"
|
||||
|
||||
val cameraViewTransactions: HashMap<CameraView, ArrayList<String>> = HashMap()
|
||||
|
||||
private fun addChangedPropToTransaction(view: CameraView, changedProp: String) {
|
||||
if (cameraViewTransactions[view] == null) {
|
||||
cameraViewTransactions[view] = ArrayList()
|
||||
}
|
||||
cameraViewTransactions[view]!!.add(changedProp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,103 @@
|
||||
package com.mrousavy.camera.core
|
||||
|
||||
import android.view.Surface
|
||||
import com.mrousavy.camera.types.CameraDeviceFormat
|
||||
import com.mrousavy.camera.types.CodeType
|
||||
import com.mrousavy.camera.types.Orientation
|
||||
import com.mrousavy.camera.types.PixelFormat
|
||||
import com.mrousavy.camera.types.Torch
|
||||
import com.mrousavy.camera.types.VideoStabilizationMode
|
||||
|
||||
data class CameraConfiguration(
|
||||
// Input
|
||||
var cameraId: String? = null,
|
||||
|
||||
// Outputs
|
||||
var preview: Output<Preview> = Output.Disabled.create(),
|
||||
var photo: Output<Photo> = Output.Disabled.create(),
|
||||
var video: Output<Video> = Output.Disabled.create(),
|
||||
var codeScanner: Output<CodeScanner> = Output.Disabled.create(),
|
||||
var enableHdr: Boolean = false,
|
||||
|
||||
// Orientation
|
||||
var orientation: Orientation = Orientation.PORTRAIT,
|
||||
|
||||
// Format
|
||||
var format: CameraDeviceFormat? = null,
|
||||
|
||||
// Side-Props
|
||||
var fps: Int? = null,
|
||||
var enableLowLightBoost: Boolean = false,
|
||||
var torch: Torch = Torch.OFF,
|
||||
var videoStabilizationMode: VideoStabilizationMode = VideoStabilizationMode.OFF,
|
||||
|
||||
// Zoom
|
||||
var zoom: Float = 1f,
|
||||
|
||||
// isActive (Start/Stop)
|
||||
var isActive: Boolean = false,
|
||||
|
||||
// Audio Session
|
||||
var audio: Output<Audio> = Output.Disabled.create()
|
||||
) {
|
||||
|
||||
// Output<T> types, those need to be comparable
|
||||
data class CodeScanner(val codeTypes: List<CodeType>)
|
||||
data class Photo(val nothing: Unit)
|
||||
data class Video(val pixelFormat: PixelFormat, val enableFrameProcessor: Boolean)
|
||||
data class Audio(val nothing: Unit)
|
||||
data class Preview(val surface: Surface)
|
||||
|
||||
@Suppress("EqualsOrHashCode")
|
||||
sealed class Output<T> {
|
||||
val isEnabled: Boolean
|
||||
get() = this is Enabled<*>
|
||||
class Disabled<T> private constructor() : Output<T>() {
|
||||
override fun equals(other: Any?): Boolean = other is Disabled<*>
|
||||
companion object {
|
||||
fun <T> create(): Disabled<T> = Disabled()
|
||||
}
|
||||
}
|
||||
class Enabled<T> private constructor(val config: T) : Output<T>() {
|
||||
override fun equals(other: Any?): Boolean = other is Enabled<*> && config == other.config
|
||||
companion object {
|
||||
fun <T> create(config: T): Enabled<T> = Enabled(config)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
data class Difference(
|
||||
// Input Camera (cameraId and isActive)
|
||||
val deviceChanged: Boolean,
|
||||
// Outputs & Session (Photo, Video, CodeScanner, HDR, Format)
|
||||
val outputsChanged: Boolean,
|
||||
// Side-Props for CaptureRequest (fps, low-light-boost, torch, zoom, videoStabilization)
|
||||
val sidePropsChanged: Boolean
|
||||
) {
|
||||
val hasAnyDifference: Boolean
|
||||
get() = sidePropsChanged || outputsChanged || deviceChanged
|
||||
}
|
||||
|
||||
companion object {
|
||||
fun copyOf(other: CameraConfiguration?): CameraConfiguration = other?.copy() ?: CameraConfiguration()
|
||||
|
||||
fun difference(left: CameraConfiguration?, right: CameraConfiguration): Difference {
|
||||
val deviceChanged = left?.cameraId != right.cameraId
|
||||
|
||||
val outputsChanged = deviceChanged || // input device
|
||||
left?.photo != right.photo || left.video != right.video || left.codeScanner != right.codeScanner ||
|
||||
left.preview != right.preview || // outputs
|
||||
left.enableHdr != right.enableHdr || left.format != right.format // props that affect the outputs (hdr, format, ..)
|
||||
|
||||
val sidePropsChanged = outputsChanged || // depend on outputs
|
||||
left?.torch != right.torch || left.enableLowLightBoost != right.enableLowLightBoost || left.fps != right.fps ||
|
||||
left.zoom != right.zoom || left.videoStabilizationMode != right.videoStabilizationMode || left.isActive != right.isActive
|
||||
|
||||
return Difference(
|
||||
deviceChanged,
|
||||
outputsChanged,
|
||||
sidePropsChanged
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
@ -14,6 +14,7 @@ import com.facebook.react.bridge.ReadableMap
|
||||
import com.mrousavy.camera.extensions.bigger
|
||||
import com.mrousavy.camera.extensions.getPhotoSizes
|
||||
import com.mrousavy.camera.extensions.getVideoSizes
|
||||
import com.mrousavy.camera.types.AutoFocusSystem
|
||||
import com.mrousavy.camera.types.HardwareLevel
|
||||
import com.mrousavy.camera.types.LensFacing
|
||||
import com.mrousavy.camera.types.Orientation
|
||||
@ -175,11 +176,12 @@ class CameraDeviceDetails(private val cameraManager: CameraManager, private val
|
||||
map.putInt("maxISO", isoRange.upper)
|
||||
map.putInt("minFps", fpsRange.lower)
|
||||
map.putInt("maxFps", fpsRange.upper)
|
||||
map.putDouble("maxZoom", maxZoom)
|
||||
map.putDouble("fieldOfView", getFieldOfView())
|
||||
map.putBoolean("supportsVideoHDR", supportsVideoHdr)
|
||||
map.putBoolean("supportsPhotoHDR", supportsPhotoHdr)
|
||||
map.putBoolean("supportsDepthCapture", supportsDepthCapture)
|
||||
map.putString("autoFocusSystem", "contrast-detection") // TODO: Is this wrong?
|
||||
map.putString("autoFocusSystem", AutoFocusSystem.CONTRAST_DETECTION.unionValue)
|
||||
map.putArray("videoStabilizationModes", createStabilizationModes())
|
||||
map.putArray("pixelFormats", createPixelFormats())
|
||||
return map
|
||||
|
@ -1,6 +1,5 @@
|
||||
package com.mrousavy.camera.core
|
||||
|
||||
import com.mrousavy.camera.core.outputs.CameraOutputs
|
||||
import com.mrousavy.camera.types.CameraDeviceError
|
||||
|
||||
abstract class CameraError(
|
||||
@ -39,7 +38,7 @@ class MicrophonePermissionError :
|
||||
)
|
||||
class CameraPermissionError : CameraError("permission", "camera-permission-denied", "The Camera permission was denied!")
|
||||
|
||||
class InvalidTypeScriptUnionError(unionName: String, unionValue: String) :
|
||||
class InvalidTypeScriptUnionError(unionName: String, unionValue: String?) :
|
||||
CameraError("parameter", "invalid-parameter", "The given value for $unionName could not be parsed! (Received: $unionValue)")
|
||||
|
||||
class NoCameraDeviceError :
|
||||
@ -55,8 +54,8 @@ class CameraNotReadyError :
|
||||
CameraError("session", "camera-not-ready", "The Camera is not ready yet! Wait for the onInitialized() callback!")
|
||||
class CameraCannotBeOpenedError(cameraId: String, error: CameraDeviceError) :
|
||||
CameraError("session", "camera-cannot-be-opened", "The given Camera device (id: $cameraId) could not be opened! Error: $error")
|
||||
class CameraSessionCannotBeConfiguredError(cameraId: String, outputs: CameraOutputs) :
|
||||
CameraError("session", "cannot-create-session", "Failed to create a Camera Session for Camera $cameraId! Outputs: $outputs")
|
||||
class CameraSessionCannotBeConfiguredError(cameraId: String) :
|
||||
CameraError("session", "cannot-create-session", "Failed to create a Camera Session for Camera $cameraId!")
|
||||
class CameraDisconnectedError(cameraId: String, error: CameraDeviceError) :
|
||||
CameraError("session", "camera-has-been-disconnected", "The given Camera device (id: $cameraId) has been disconnected! Error: $error")
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
package com.mrousavy.camera.core
|
||||
|
||||
import android.content.Context
|
||||
import android.graphics.ImageFormat
|
||||
import android.graphics.Point
|
||||
import android.hardware.camera2.CameraCaptureSession
|
||||
import android.hardware.camera2.CameraCharacteristics
|
||||
@ -11,38 +12,51 @@ import android.hardware.camera2.CaptureRequest
|
||||
import android.hardware.camera2.CaptureResult
|
||||
import android.hardware.camera2.TotalCaptureResult
|
||||
import android.hardware.camera2.params.MeteringRectangle
|
||||
import android.hardware.camera2.params.OutputConfiguration
|
||||
import android.media.Image
|
||||
import android.media.ImageReader
|
||||
import android.os.Build
|
||||
import android.util.Log
|
||||
import android.util.Range
|
||||
import android.util.Size
|
||||
import com.mrousavy.camera.CameraView
|
||||
import com.mrousavy.camera.core.outputs.CameraOutputs
|
||||
import android.view.Surface
|
||||
import android.view.SurfaceHolder
|
||||
import com.google.mlkit.vision.barcode.common.Barcode
|
||||
import com.mrousavy.camera.core.outputs.BarcodeScannerOutput
|
||||
import com.mrousavy.camera.core.outputs.PhotoOutput
|
||||
import com.mrousavy.camera.core.outputs.SurfaceOutput
|
||||
import com.mrousavy.camera.core.outputs.VideoPipelineOutput
|
||||
import com.mrousavy.camera.extensions.bigger
|
||||
import com.mrousavy.camera.extensions.capture
|
||||
import com.mrousavy.camera.extensions.closestToOrMax
|
||||
import com.mrousavy.camera.extensions.createCaptureSession
|
||||
import com.mrousavy.camera.extensions.createPhotoCaptureRequest
|
||||
import com.mrousavy.camera.extensions.getPhotoSizes
|
||||
import com.mrousavy.camera.extensions.getPreviewTargetSize
|
||||
import com.mrousavy.camera.extensions.getVideoSizes
|
||||
import com.mrousavy.camera.extensions.openCamera
|
||||
import com.mrousavy.camera.extensions.setZoom
|
||||
import com.mrousavy.camera.extensions.smaller
|
||||
import com.mrousavy.camera.frameprocessor.FrameProcessor
|
||||
import com.mrousavy.camera.types.Flash
|
||||
import com.mrousavy.camera.types.Orientation
|
||||
import com.mrousavy.camera.types.QualityPrioritization
|
||||
import com.mrousavy.camera.types.Torch
|
||||
import com.mrousavy.camera.types.VideoCodec
|
||||
import com.mrousavy.camera.types.VideoFileType
|
||||
import com.mrousavy.camera.types.VideoStabilizationMode
|
||||
import java.io.Closeable
|
||||
import java.util.concurrent.CancellationException
|
||||
import kotlin.coroutines.CoroutineContext
|
||||
import kotlinx.coroutines.CoroutineScope
|
||||
import kotlinx.coroutines.launch
|
||||
import kotlinx.coroutines.runBlocking
|
||||
import kotlinx.coroutines.sync.Mutex
|
||||
import kotlinx.coroutines.sync.withLock
|
||||
|
||||
class CameraSession(
|
||||
private val context: Context,
|
||||
private val cameraManager: CameraManager,
|
||||
private val onInitialized: () -> Unit,
|
||||
private val onError: (e: Throwable) -> Unit
|
||||
) : CameraManager.AvailabilityCallback(),
|
||||
class CameraSession(private val context: Context, private val cameraManager: CameraManager, private val callback: CameraSessionCallback) :
|
||||
Closeable,
|
||||
CameraOutputs.Callback {
|
||||
CoroutineScope {
|
||||
companion object {
|
||||
private const val TAG = "CameraSession"
|
||||
|
||||
@ -50,43 +64,24 @@ class CameraSession(
|
||||
private val CAN_SET_FPS = !Build.MANUFACTURER.equals("samsung", true)
|
||||
}
|
||||
|
||||
data class CapturedPhoto(
|
||||
val image: Image,
|
||||
val metadata: TotalCaptureResult,
|
||||
val orientation: Orientation,
|
||||
val isMirrored: Boolean,
|
||||
val format: Int
|
||||
) : Closeable {
|
||||
override fun close() {
|
||||
image.close()
|
||||
}
|
||||
}
|
||||
|
||||
// setInput(..)
|
||||
private var cameraId: String? = null
|
||||
|
||||
// setOutputs(..)
|
||||
private var outputs: CameraOutputs? = null
|
||||
|
||||
// setIsActive(..)
|
||||
private var isActive = false
|
||||
|
||||
// configureFormat(..)
|
||||
private var fps: Int? = null
|
||||
private var videoStabilizationMode: VideoStabilizationMode? = null
|
||||
private var lowLightBoost: Boolean? = null
|
||||
private var hdr: Boolean? = null
|
||||
|
||||
// zoom(..)
|
||||
private var zoom: Float = 1.0f
|
||||
// Camera Configuration
|
||||
private var configuration: CameraConfiguration? = null
|
||||
|
||||
// Camera State
|
||||
private var captureSession: CameraCaptureSession? = null
|
||||
private var cameraDevice: CameraDevice? = null
|
||||
private var previewRequest: CaptureRequest.Builder? = null
|
||||
private var photoOutput: PhotoOutput? = null
|
||||
private var videoOutput: VideoPipelineOutput? = null
|
||||
private var previewOutput: SurfaceOutput? = null
|
||||
private var codeScannerOutput: BarcodeScannerOutput? = null
|
||||
private var previewView: PreviewView? = null
|
||||
private val photoOutputSynchronizer = PhotoOutputSynchronizer()
|
||||
private val mutex = Mutex()
|
||||
private var isRunning = false
|
||||
private var enableTorch = false
|
||||
|
||||
override val coroutineContext: CoroutineContext
|
||||
get() = CameraQueues.cameraQueue.coroutineDispatcher
|
||||
|
||||
// Video Outputs
|
||||
private var recording: RecordingSession? = null
|
||||
@ -100,115 +95,341 @@ class CameraSession(
|
||||
updateVideoOutputs()
|
||||
}
|
||||
|
||||
init {
|
||||
cameraManager.registerAvailabilityCallback(this, CameraQueues.cameraQueue.handler)
|
||||
}
|
||||
|
||||
override fun close() {
|
||||
cameraManager.unregisterAvailabilityCallback(this)
|
||||
photoOutputSynchronizer.clear()
|
||||
captureSession?.close()
|
||||
cameraDevice?.close()
|
||||
outputs?.close()
|
||||
isRunning = false
|
||||
runBlocking {
|
||||
mutex.withLock {
|
||||
destroy()
|
||||
photoOutputSynchronizer.clear()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
val orientation: Orientation
|
||||
get() {
|
||||
val cameraId = cameraId ?: return Orientation.PORTRAIT
|
||||
val cameraId = configuration?.cameraId ?: return Orientation.PORTRAIT
|
||||
val characteristics = cameraManager.getCameraCharacteristics(cameraId)
|
||||
val sensorRotation = characteristics.get(CameraCharacteristics.SENSOR_ORIENTATION) ?: 0
|
||||
return Orientation.fromRotationDegrees(sensorRotation)
|
||||
}
|
||||
|
||||
suspend fun configureSession(
|
||||
cameraId: String,
|
||||
preview: CameraOutputs.PreviewOutput? = null,
|
||||
photo: CameraOutputs.PhotoOutput? = null,
|
||||
video: CameraOutputs.VideoOutput? = null,
|
||||
codeScanner: CameraOutputs.CodeScannerOutput? = null
|
||||
) {
|
||||
Log.i(TAG, "Configuring Session for Camera $cameraId...")
|
||||
val outputs = CameraOutputs(
|
||||
cameraId,
|
||||
cameraManager,
|
||||
preview,
|
||||
photo,
|
||||
video,
|
||||
codeScanner,
|
||||
hdr == true,
|
||||
this
|
||||
)
|
||||
if (this.cameraId == cameraId && this.outputs == outputs && isActive == isRunning) {
|
||||
Log.i(TAG, "Nothing changed in configuration, canceling..")
|
||||
suspend fun configure(lambda: (configuration: CameraConfiguration) -> Unit) {
|
||||
mutex.withLock {
|
||||
Log.i(TAG, "Updating CameraSession Configuration...")
|
||||
|
||||
val config = CameraConfiguration.copyOf(this.configuration)
|
||||
lambda(config)
|
||||
val diff = CameraConfiguration.difference(this.configuration, config)
|
||||
|
||||
if (!diff.hasAnyDifference) {
|
||||
Log.w(TAG, "Called configure(...) but nothing changed...")
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
// Build up session or update any props
|
||||
if (diff.deviceChanged) {
|
||||
// 1. cameraId changed, open device
|
||||
configureCameraDevice(config)
|
||||
}
|
||||
if (diff.outputsChanged) {
|
||||
// 2. outputs changed, build new session
|
||||
configureOutputs(config)
|
||||
}
|
||||
if (diff.sidePropsChanged) {
|
||||
// 3. zoom etc changed, update repeating request
|
||||
configureCaptureRequest(config)
|
||||
}
|
||||
|
||||
Log.i(TAG, "Successfully updated CameraSession Configuration! isActive: ${config.isActive}")
|
||||
this.configuration = config
|
||||
} catch (error: Throwable) {
|
||||
Log.e(TAG, "Failed to configure CameraSession! Error: ${error.message}, Config-Diff: $diff", error)
|
||||
callback.onError(error)
|
||||
}
|
||||
}
|
||||
|
||||
// 1. Close previous outputs
|
||||
this.outputs?.close()
|
||||
// 2. Assign new outputs
|
||||
this.outputs = outputs
|
||||
// 3. Update with existing render targets (surfaces)
|
||||
updateVideoOutputs()
|
||||
|
||||
this.cameraId = cameraId
|
||||
startRunning()
|
||||
}
|
||||
|
||||
suspend fun configureFormat(
|
||||
fps: Int? = null,
|
||||
videoStabilizationMode: VideoStabilizationMode? = null,
|
||||
hdr: Boolean? = null,
|
||||
lowLightBoost: Boolean? = null
|
||||
) {
|
||||
Log.i(TAG, "Setting Format (fps: $fps | videoStabilization: $videoStabilizationMode | hdr: $hdr | lowLightBoost: $lowLightBoost)...")
|
||||
this.fps = fps
|
||||
this.videoStabilizationMode = videoStabilizationMode
|
||||
this.hdr = hdr
|
||||
this.lowLightBoost = lowLightBoost
|
||||
private fun destroy() {
|
||||
Log.i(TAG, "Destroying session..")
|
||||
captureSession?.stopRepeating()
|
||||
captureSession?.close()
|
||||
captureSession = null
|
||||
|
||||
var needsReconfiguration = false
|
||||
val currentOutputs = outputs
|
||||
if (currentOutputs != null && currentOutputs.enableHdr != hdr) {
|
||||
// Update existing HDR for Outputs
|
||||
this.outputs?.close()
|
||||
this.outputs = CameraOutputs(
|
||||
currentOutputs.cameraId,
|
||||
cameraManager,
|
||||
currentOutputs.preview,
|
||||
currentOutputs.photo,
|
||||
currentOutputs.video,
|
||||
currentOutputs.codeScanner,
|
||||
hdr,
|
||||
this
|
||||
)
|
||||
needsReconfiguration = true
|
||||
cameraDevice?.close()
|
||||
cameraDevice = null
|
||||
|
||||
previewOutput?.close()
|
||||
previewOutput = null
|
||||
photoOutput?.close()
|
||||
photoOutput = null
|
||||
videoOutput?.close()
|
||||
videoOutput = null
|
||||
codeScannerOutput?.close()
|
||||
codeScannerOutput = null
|
||||
|
||||
isRunning = false
|
||||
}
|
||||
|
||||
fun createPreviewView(context: Context): PreviewView {
|
||||
val previewView = PreviewView(
|
||||
context,
|
||||
object : SurfaceHolder.Callback {
|
||||
override fun surfaceCreated(holder: SurfaceHolder) {
|
||||
Log.i(TAG, "PreviewView Surface created! ${holder.surface}")
|
||||
createPreviewOutput(holder.surface)
|
||||
}
|
||||
|
||||
override fun surfaceChanged(holder: SurfaceHolder, format: Int, width: Int, height: Int) {
|
||||
Log.i(TAG, "PreviewView Surface updated! ${holder.surface} $width x $height")
|
||||
}
|
||||
|
||||
override fun surfaceDestroyed(holder: SurfaceHolder) {
|
||||
Log.i(TAG, "PreviewView Surface destroyed! ${holder.surface}")
|
||||
destroyPreviewOutputSync()
|
||||
}
|
||||
}
|
||||
)
|
||||
this.previewView = previewView
|
||||
return previewView
|
||||
}
|
||||
|
||||
private fun createPreviewOutput(surface: Surface) {
|
||||
Log.i(TAG, "Setting Preview Output...")
|
||||
launch {
|
||||
configure { config ->
|
||||
config.preview = CameraConfiguration.Output.Enabled.create(CameraConfiguration.Preview(surface))
|
||||
}
|
||||
}
|
||||
if (needsReconfiguration) {
|
||||
startRunning()
|
||||
} else {
|
||||
updateRepeatingRequest()
|
||||
}
|
||||
|
||||
private fun destroyPreviewOutputSync() {
|
||||
Log.i(TAG, "Destroying Preview Output...")
|
||||
runBlocking {
|
||||
configure { config ->
|
||||
config.preview = CameraConfiguration.Output.Disabled.create()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Starts or stops the Camera.
|
||||
* Set up the `CameraDevice` (`cameraId`)
|
||||
*/
|
||||
suspend fun setIsActive(isActive: Boolean) {
|
||||
Log.i(TAG, "Setting isActive: $isActive (isRunning: $isRunning)")
|
||||
this.isActive = isActive
|
||||
if (isActive == isRunning) return
|
||||
private suspend fun configureCameraDevice(configuration: CameraConfiguration) {
|
||||
val cameraId = configuration.cameraId ?: throw NoCameraDeviceError()
|
||||
|
||||
if (isActive) {
|
||||
startRunning()
|
||||
} else {
|
||||
stopRunning()
|
||||
}
|
||||
Log.i(TAG, "Configuring Camera #$cameraId...")
|
||||
|
||||
cameraDevice?.close()
|
||||
cameraDevice = cameraManager.openCamera(cameraId, { device, error ->
|
||||
if (this.cameraDevice == device) {
|
||||
Log.e(TAG, "Camera Device $device has been disconnected!", error)
|
||||
callback.onError(error)
|
||||
} else {
|
||||
// a previous device has been disconnected, but we already have a new one.
|
||||
// this is just normal behavior
|
||||
}
|
||||
}, CameraQueues.cameraQueue)
|
||||
|
||||
// Update PreviewView's Surface Size to a supported value from this Capture Device
|
||||
previewView?.resizeToInputCamera(cameraId, cameraManager, configuration.format)
|
||||
|
||||
Log.i(TAG, "Successfully configured Camera #$cameraId!")
|
||||
}
|
||||
|
||||
private fun updateVideoOutputs() {
|
||||
val videoPipeline = outputs?.videoOutput?.videoPipeline ?: return
|
||||
videoPipeline.setRecordingSessionOutput(this.recording)
|
||||
videoPipeline.setFrameProcessorOutput(this.frameProcessor)
|
||||
/**
|
||||
* Set up the `CaptureSession` with all outputs (preview, photo, video, codeScanner) and their HDR/Format settings.
|
||||
*/
|
||||
private suspend fun configureOutputs(configuration: CameraConfiguration) {
|
||||
val cameraDevice = cameraDevice ?: throw NoCameraDeviceError()
|
||||
val characteristics = cameraManager.getCameraCharacteristics(cameraDevice.id)
|
||||
val format = configuration.format
|
||||
|
||||
Log.i(TAG, "Configuring Session for Camera #${cameraDevice.id}...")
|
||||
|
||||
// TODO: Do we want to skip this is this.cameraSession already contains all outputs?
|
||||
// Destroy previous CaptureSession
|
||||
captureSession?.close()
|
||||
captureSession = null
|
||||
// Destroy previous outputs
|
||||
photoOutput?.close()
|
||||
photoOutput = null
|
||||
videoOutput?.close()
|
||||
videoOutput = null
|
||||
previewOutput?.close()
|
||||
previewOutput = null
|
||||
codeScannerOutput?.close()
|
||||
codeScannerOutput = null
|
||||
|
||||
val isSelfie = characteristics.get(CameraCharacteristics.LENS_FACING) == CameraCharacteristics.LENS_FACING_FRONT
|
||||
|
||||
val outputs = mutableListOf<OutputConfiguration>()
|
||||
|
||||
// Photo Output
|
||||
val photo = configuration.photo as? CameraConfiguration.Output.Enabled<CameraConfiguration.Photo>
|
||||
if (photo != null) {
|
||||
val imageFormat = ImageFormat.JPEG
|
||||
val sizes = characteristics.getPhotoSizes(imageFormat)
|
||||
val size = sizes.closestToOrMax(format?.photoSize)
|
||||
val maxImages = 3
|
||||
|
||||
Log.i(TAG, "Adding ${size.width} x ${size.height} Photo Output in Format #$imageFormat...")
|
||||
val imageReader = ImageReader.newInstance(size.width, size.height, imageFormat, maxImages)
|
||||
imageReader.setOnImageAvailableListener({ reader ->
|
||||
Log.i(TAG, "Photo Captured!")
|
||||
val image = reader.acquireLatestImage()
|
||||
onPhotoCaptured(image)
|
||||
}, CameraQueues.cameraQueue.handler)
|
||||
val output = PhotoOutput(imageReader, configuration.enableHdr)
|
||||
outputs.add(output.toOutputConfiguration(characteristics))
|
||||
photoOutput = output
|
||||
}
|
||||
|
||||
// Video Output
|
||||
val video = configuration.video as? CameraConfiguration.Output.Enabled<CameraConfiguration.Video>
|
||||
if (video != null) {
|
||||
val imageFormat = video.config.pixelFormat.toImageFormat()
|
||||
val sizes = characteristics.getVideoSizes(cameraDevice.id, imageFormat)
|
||||
val size = sizes.closestToOrMax(format?.videoSize)
|
||||
|
||||
Log.i(TAG, "Adding ${size.width} x ${size.height} Video Output in Format #$imageFormat...")
|
||||
val videoPipeline = VideoPipeline(
|
||||
size.width,
|
||||
size.height,
|
||||
video.config.pixelFormat,
|
||||
isSelfie,
|
||||
video.config.enableFrameProcessor
|
||||
)
|
||||
val output = VideoPipelineOutput(videoPipeline, configuration.enableHdr)
|
||||
outputs.add(output.toOutputConfiguration(characteristics))
|
||||
videoOutput = output
|
||||
}
|
||||
|
||||
// Preview Output
|
||||
val preview = configuration.preview as? CameraConfiguration.Output.Enabled<CameraConfiguration.Preview>
|
||||
if (preview != null) {
|
||||
// Compute Preview Size based on chosen video size
|
||||
val videoSize = videoOutput?.size ?: format?.videoSize
|
||||
val size = if (videoSize != null) {
|
||||
val formatAspectRatio = videoSize.bigger.toDouble() / videoSize.smaller
|
||||
characteristics.getPreviewTargetSize(formatAspectRatio)
|
||||
} else {
|
||||
characteristics.getPreviewTargetSize(null)
|
||||
}
|
||||
|
||||
Log.i(TAG, "Adding ${size.width} x ${size.height} Preview Output...")
|
||||
val output = SurfaceOutput(
|
||||
preview.config.surface,
|
||||
size,
|
||||
SurfaceOutput.OutputType.PREVIEW,
|
||||
configuration.enableHdr
|
||||
)
|
||||
outputs.add(output.toOutputConfiguration(characteristics))
|
||||
previewOutput = output
|
||||
previewView?.size = size
|
||||
}
|
||||
|
||||
// CodeScanner Output
|
||||
val codeScanner = configuration.codeScanner as? CameraConfiguration.Output.Enabled<CameraConfiguration.CodeScanner>
|
||||
if (codeScanner != null) {
|
||||
val imageFormat = ImageFormat.YUV_420_888
|
||||
val sizes = characteristics.getVideoSizes(cameraDevice.id, imageFormat)
|
||||
val size = sizes.closestToOrMax(Size(1280, 720))
|
||||
|
||||
Log.i(TAG, "Adding ${size.width} x ${size.height} CodeScanner Output in Format #$imageFormat...")
|
||||
val pipeline = CodeScannerPipeline(size, imageFormat, codeScanner.config, callback)
|
||||
val output = BarcodeScannerOutput(pipeline)
|
||||
outputs.add(output.toOutputConfiguration(characteristics))
|
||||
codeScannerOutput = output
|
||||
}
|
||||
|
||||
// Create new session
|
||||
captureSession = cameraDevice.createCaptureSession(cameraManager, outputs, { session ->
|
||||
if (this.captureSession == session) {
|
||||
Log.i(TAG, "Camera Session $session has been closed!")
|
||||
isRunning = false
|
||||
}
|
||||
}, CameraQueues.cameraQueue)
|
||||
|
||||
Log.i(TAG, "Successfully configured Session with ${outputs.size} outputs for Camera #${cameraDevice.id}!")
|
||||
callback.onInitialized()
|
||||
|
||||
// Update Frame Processor and RecordingSession for newly changed output
|
||||
updateVideoOutputs()
|
||||
}
|
||||
|
||||
private fun configureCaptureRequest(config: CameraConfiguration) {
|
||||
val device = cameraDevice ?: throw NoCameraDeviceError()
|
||||
val captureSession = captureSession ?: throw CameraNotReadyError()
|
||||
|
||||
if (!config.isActive) {
|
||||
// TODO: Do we want to do stopRepeating() or entirely destroy the session?
|
||||
// If the Camera is not active, we don't do anything.
|
||||
captureSession.stopRepeating()
|
||||
return
|
||||
}
|
||||
|
||||
val cameraCharacteristics = cameraManager.getCameraCharacteristics(device.id)
|
||||
|
||||
val template = if (config.video.isEnabled) CameraDevice.TEMPLATE_RECORD else CameraDevice.TEMPLATE_PREVIEW
|
||||
val captureRequest = device.createCaptureRequest(template)
|
||||
|
||||
previewOutput?.let { output ->
|
||||
captureRequest.addTarget(output.surface)
|
||||
}
|
||||
videoOutput?.let { output ->
|
||||
captureRequest.addTarget(output.surface)
|
||||
}
|
||||
|
||||
// Set FPS
|
||||
// TODO: Check if the FPS range is actually supported in the current configuration.
|
||||
val fps = config.fps
|
||||
if (fps != null && CAN_SET_FPS) {
|
||||
captureRequest.set(CaptureRequest.CONTROL_AE_TARGET_FPS_RANGE, Range(fps, fps))
|
||||
}
|
||||
|
||||
// Set Video Stabilization
|
||||
when (config.videoStabilizationMode) {
|
||||
VideoStabilizationMode.OFF -> {
|
||||
// do nothing
|
||||
}
|
||||
VideoStabilizationMode.STANDARD -> {
|
||||
// TODO: Check if that stabilization mode is even supported
|
||||
val mode = if (Build.VERSION.SDK_INT >=
|
||||
Build.VERSION_CODES.TIRAMISU
|
||||
) {
|
||||
CaptureRequest.CONTROL_VIDEO_STABILIZATION_MODE_PREVIEW_STABILIZATION
|
||||
} else {
|
||||
CaptureRequest.CONTROL_VIDEO_STABILIZATION_MODE_ON
|
||||
}
|
||||
captureRequest.set(CaptureRequest.CONTROL_VIDEO_STABILIZATION_MODE, mode)
|
||||
}
|
||||
VideoStabilizationMode.CINEMATIC, VideoStabilizationMode.CINEMATIC_EXTENDED -> {
|
||||
// TODO: Check if that stabilization mode is even supported
|
||||
captureRequest.set(CaptureRequest.LENS_OPTICAL_STABILIZATION_MODE, CaptureRequest.LENS_OPTICAL_STABILIZATION_MODE_ON)
|
||||
}
|
||||
}
|
||||
|
||||
// Set HDR
|
||||
// TODO: Check if that value is even supported
|
||||
if (config.enableHdr) {
|
||||
captureRequest.set(CaptureRequest.CONTROL_SCENE_MODE, CaptureRequest.CONTROL_SCENE_MODE_HDR)
|
||||
} else if (config.enableLowLightBoost) {
|
||||
captureRequest.set(CaptureRequest.CONTROL_SCENE_MODE, CaptureRequest.CONTROL_SCENE_MODE_NIGHT)
|
||||
}
|
||||
|
||||
// Set Zoom
|
||||
// TODO: Check if that zoom value is even supported
|
||||
captureRequest.setZoom(config.zoom, cameraCharacteristics)
|
||||
|
||||
// Set Torch
|
||||
// TODO: Check if torch is even supported
|
||||
if (config.torch == Torch.ON) {
|
||||
captureRequest.set(CaptureRequest.FLASH_MODE, CaptureRequest.FLASH_MODE_TORCH)
|
||||
}
|
||||
|
||||
// Start repeating request if the Camera is active
|
||||
val request = captureRequest.build()
|
||||
captureSession.setRepeatingRequest(request, null, null)
|
||||
}
|
||||
|
||||
suspend fun takePhoto(
|
||||
@ -220,12 +441,12 @@ class CameraSession(
|
||||
outputOrientation: Orientation
|
||||
): CapturedPhoto {
|
||||
val captureSession = captureSession ?: throw CameraNotReadyError()
|
||||
val outputs = outputs ?: throw CameraNotReadyError()
|
||||
|
||||
val photoOutput = outputs.photoOutput ?: throw PhotoNotEnabledError()
|
||||
val photoOutput = photoOutput ?: throw PhotoNotEnabledError()
|
||||
|
||||
Log.i(TAG, "Photo capture 0/3 - preparing capture request (${photoOutput.size.width}x${photoOutput.size.height})...")
|
||||
|
||||
val zoom = configuration?.zoom ?: 1f
|
||||
|
||||
val cameraCharacteristics = cameraManager.getCameraCharacteristics(captureSession.device.id)
|
||||
val orientation = outputOrientation.toSensorRelativeOrientation(cameraCharacteristics)
|
||||
val captureRequest = captureSession.device.createPhotoCaptureRequest(
|
||||
@ -254,11 +475,18 @@ class CameraSession(
|
||||
}
|
||||
}
|
||||
|
||||
override fun onPhotoCaptured(image: Image) {
|
||||
Log.i(CameraView.TAG, "Photo captured! ${image.width} x ${image.height}")
|
||||
private fun onPhotoCaptured(image: Image) {
|
||||
Log.i(TAG, "Photo captured! ${image.width} x ${image.height}")
|
||||
photoOutputSynchronizer.set(image.timestamp, image)
|
||||
}
|
||||
|
||||
private fun updateVideoOutputs() {
|
||||
val videoOutput = videoOutput ?: return
|
||||
Log.i(TAG, "Updating Video Outputs...")
|
||||
videoOutput.videoPipeline.setFrameProcessorOutput(frameProcessor)
|
||||
videoOutput.videoPipeline.setRecordingSessionOutput(recording)
|
||||
}
|
||||
|
||||
suspend fun startRecording(
|
||||
enableAudio: Boolean,
|
||||
codec: VideoCodec,
|
||||
@ -269,8 +497,9 @@ class CameraSession(
|
||||
) {
|
||||
mutex.withLock {
|
||||
if (recording != null) throw RecordingInProgressError()
|
||||
val outputs = outputs ?: throw CameraNotReadyError()
|
||||
val videoOutput = outputs.videoOutput ?: throw VideoNotEnabledError()
|
||||
val videoOutput = videoOutput ?: throw VideoNotEnabledError()
|
||||
|
||||
val fps = configuration?.fps ?: 30
|
||||
|
||||
val recording =
|
||||
RecordingSession(context, videoOutput.size, enableAudio, fps, codec, orientation, fileType, bitRate, callback, onError)
|
||||
@ -302,23 +531,9 @@ class CameraSession(
|
||||
}
|
||||
}
|
||||
|
||||
suspend fun setTorchMode(enableTorch: Boolean) {
|
||||
if (this.enableTorch != enableTorch) {
|
||||
this.enableTorch = enableTorch
|
||||
updateRepeatingRequest()
|
||||
}
|
||||
}
|
||||
|
||||
suspend fun setZoom(zoom: Float) {
|
||||
if (this.zoom != zoom) {
|
||||
this.zoom = zoom
|
||||
updateRepeatingRequest()
|
||||
}
|
||||
}
|
||||
|
||||
suspend fun focus(x: Int, y: Int) {
|
||||
val captureSession = captureSession ?: throw CameraNotReadyError()
|
||||
val previewOutput = outputs?.previewOutput ?: throw CameraNotReadyError()
|
||||
val previewOutput = previewOutput ?: throw CameraNotReadyError()
|
||||
val characteristics = cameraManager.getCameraCharacteristics(captureSession.device.id)
|
||||
val sensorSize = characteristics.get(CameraCharacteristics.SENSOR_INFO_ACTIVE_ARRAY_SIZE)!!
|
||||
val previewSize = previewOutput.size
|
||||
@ -330,18 +545,9 @@ class CameraSession(
|
||||
focus(point)
|
||||
}
|
||||
|
||||
override fun onCameraAvailable(cameraId: String) {
|
||||
super.onCameraAvailable(cameraId)
|
||||
Log.i(TAG, "Camera became available: $cameraId")
|
||||
}
|
||||
|
||||
override fun onCameraUnavailable(cameraId: String) {
|
||||
super.onCameraUnavailable(cameraId)
|
||||
Log.i(TAG, "Camera became un-available: $cameraId")
|
||||
}
|
||||
|
||||
private suspend fun focus(point: Point) {
|
||||
mutex.withLock {
|
||||
// TODO: Fix this method
|
||||
val captureSession = captureSession ?: throw CameraNotReadyError()
|
||||
val request = previewRequest ?: throw CameraNotReadyError()
|
||||
|
||||
@ -373,207 +579,21 @@ class CameraSession(
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Opens a [CameraDevice]. If there already is an open Camera for the given [cameraId], use that.
|
||||
*/
|
||||
private suspend fun getCameraDevice(cameraId: String, onClosed: (error: Throwable) -> Unit): CameraDevice {
|
||||
val currentDevice = cameraDevice
|
||||
if (currentDevice?.id == cameraId) {
|
||||
// We already opened that device
|
||||
return currentDevice
|
||||
}
|
||||
// Close previous device
|
||||
cameraDevice?.close()
|
||||
cameraDevice = null
|
||||
|
||||
val device = cameraManager.openCamera(cameraId, { camera, reason ->
|
||||
Log.d(TAG, "Camera Closed ($cameraDevice == $camera)")
|
||||
if (cameraDevice == camera) {
|
||||
// The current CameraDevice has been closed, handle that!
|
||||
onClosed(reason)
|
||||
cameraDevice = null
|
||||
} else {
|
||||
// A new CameraDevice has been opened, we don't care about this one anymore.
|
||||
}
|
||||
}, CameraQueues.cameraQueue)
|
||||
|
||||
// Cache device in memory
|
||||
cameraDevice = device
|
||||
return device
|
||||
}
|
||||
|
||||
// Caches the result of outputs.hashCode() of the last getCaptureSession call
|
||||
private var lastOutputsHashCode: Int? = null
|
||||
|
||||
private suspend fun getCaptureSession(cameraDevice: CameraDevice, outputs: CameraOutputs, onClosed: () -> Unit): CameraCaptureSession {
|
||||
val currentSession = captureSession
|
||||
if (currentSession?.device == cameraDevice && outputs.hashCode() == lastOutputsHashCode) {
|
||||
// We already opened a CameraCaptureSession on this device
|
||||
return currentSession
|
||||
}
|
||||
captureSession?.close()
|
||||
captureSession = null
|
||||
|
||||
val session = cameraDevice.createCaptureSession(cameraManager, outputs, { session ->
|
||||
Log.d(TAG, "Capture Session Closed ($captureSession == $session)")
|
||||
if (captureSession == session) {
|
||||
// The current CameraCaptureSession has been closed, handle that!
|
||||
onClosed()
|
||||
captureSession = null
|
||||
} else {
|
||||
// A new CameraCaptureSession has been opened, we don't care about this one anymore.
|
||||
}
|
||||
}, CameraQueues.cameraQueue)
|
||||
|
||||
// Cache session in memory
|
||||
captureSession = session
|
||||
lastOutputsHashCode = outputs.hashCode()
|
||||
// New session initialized
|
||||
onInitialized()
|
||||
return session
|
||||
}
|
||||
|
||||
private fun getPreviewCaptureRequest(
|
||||
fps: Int? = null,
|
||||
videoStabilizationMode: VideoStabilizationMode? = null,
|
||||
lowLightBoost: Boolean? = null,
|
||||
hdr: Boolean? = null,
|
||||
torch: Boolean? = null
|
||||
): CaptureRequest {
|
||||
val captureRequest = previewRequest ?: throw CameraNotReadyError()
|
||||
|
||||
// FPS
|
||||
val fpsRange = if (fps != null && CAN_SET_FPS) Range(fps, fps) else Range(30, 30)
|
||||
captureRequest.set(CaptureRequest.CONTROL_AE_TARGET_FPS_RANGE, fpsRange)
|
||||
|
||||
// Video Stabilization
|
||||
captureRequest.set(CaptureRequest.CONTROL_VIDEO_STABILIZATION_MODE, videoStabilizationMode?.toDigitalStabilizationMode())
|
||||
captureRequest.set(CaptureRequest.LENS_OPTICAL_STABILIZATION_MODE, videoStabilizationMode?.toOpticalStabilizationMode())
|
||||
|
||||
// Night/HDR Mode
|
||||
val sceneMode = if (hdr ==
|
||||
true
|
||||
) {
|
||||
CaptureRequest.CONTROL_SCENE_MODE_HDR
|
||||
} else if (lowLightBoost == true) CaptureRequest.CONTROL_SCENE_MODE_NIGHT else null
|
||||
captureRequest.set(CaptureRequest.CONTROL_SCENE_MODE, sceneMode)
|
||||
captureRequest.set(
|
||||
CaptureRequest.CONTROL_MODE,
|
||||
if (sceneMode != null) CaptureRequest.CONTROL_MODE_USE_SCENE_MODE else CaptureRequest.CONTROL_MODE_AUTO
|
||||
)
|
||||
|
||||
// Zoom
|
||||
val cameraCharacteristics = cameraManager.getCameraCharacteristics(cameraId!!)
|
||||
captureRequest.setZoom(zoom, cameraCharacteristics)
|
||||
|
||||
// Torch Mode
|
||||
val torchMode = if (torch == true) CaptureRequest.FLASH_MODE_TORCH else CaptureRequest.FLASH_MODE_OFF
|
||||
captureRequest.set(CaptureRequest.FLASH_MODE, torchMode)
|
||||
|
||||
return captureRequest.build()
|
||||
}
|
||||
|
||||
private fun destroy() {
|
||||
Log.i(TAG, "Destroying session..")
|
||||
captureSession?.stopRepeating()
|
||||
captureSession?.close()
|
||||
captureSession = null
|
||||
|
||||
cameraDevice?.close()
|
||||
cameraDevice = null
|
||||
|
||||
outputs?.close()
|
||||
outputs = null
|
||||
|
||||
isRunning = false
|
||||
}
|
||||
|
||||
private suspend fun startRunning() {
|
||||
isRunning = false
|
||||
val cameraId = cameraId ?: return
|
||||
if (!isActive) return
|
||||
|
||||
Log.i(TAG, "Starting Camera Session...")
|
||||
|
||||
try {
|
||||
mutex.withLock {
|
||||
val outputs = outputs
|
||||
if (outputs == null || outputs.size == 0) {
|
||||
Log.i(TAG, "CameraSession doesn't have any Outputs, canceling..")
|
||||
destroy()
|
||||
return@withLock
|
||||
}
|
||||
|
||||
// 1. Open Camera Device
|
||||
val camera = getCameraDevice(cameraId) { reason ->
|
||||
isRunning = false
|
||||
onError(reason)
|
||||
}
|
||||
|
||||
// 2. Create capture session with outputs
|
||||
val session = getCaptureSession(camera, outputs) {
|
||||
isRunning = false
|
||||
}
|
||||
|
||||
// 3. Create request template
|
||||
val template = if (outputs.videoOutput != null) CameraDevice.TEMPLATE_RECORD else CameraDevice.TEMPLATE_PREVIEW
|
||||
val captureRequest = camera.createCaptureRequest(template)
|
||||
outputs.previewOutput?.let { output ->
|
||||
Log.i(TAG, "Adding preview output surface ${output.outputType}..")
|
||||
captureRequest.addTarget(output.surface)
|
||||
}
|
||||
outputs.videoOutput?.let { output ->
|
||||
Log.i(TAG, "Adding video output surface ${output.outputType}..")
|
||||
captureRequest.addTarget(output.surface)
|
||||
}
|
||||
outputs.codeScannerOutput?.let { output ->
|
||||
Log.i(TAG, "Adding code scanner output surface ${output.outputType}")
|
||||
captureRequest.addTarget(output.surface)
|
||||
}
|
||||
|
||||
Log.i(TAG, "Camera Session initialized! Starting repeating request..")
|
||||
isRunning = true
|
||||
this.previewRequest = captureRequest
|
||||
this.captureSession = session
|
||||
this.cameraDevice = camera
|
||||
}
|
||||
|
||||
updateRepeatingRequest()
|
||||
} catch (e: IllegalStateException) {
|
||||
Log.e(TAG, "Failed to start Camera Session, this session is already closed.", e)
|
||||
data class CapturedPhoto(
|
||||
val image: Image,
|
||||
val metadata: TotalCaptureResult,
|
||||
val orientation: Orientation,
|
||||
val isMirrored: Boolean,
|
||||
val format: Int
|
||||
) : Closeable {
|
||||
override fun close() {
|
||||
image.close()
|
||||
}
|
||||
}
|
||||
|
||||
private suspend fun updateRepeatingRequest() {
|
||||
mutex.withLock {
|
||||
val session = captureSession
|
||||
if (session == null) {
|
||||
// Not yet ready. Start session first, then it will update repeating request.
|
||||
startRunning()
|
||||
return
|
||||
}
|
||||
|
||||
val fps = fps
|
||||
val videoStabilizationMode = videoStabilizationMode
|
||||
val lowLightBoost = lowLightBoost
|
||||
val hdr = hdr
|
||||
val enableTorch = enableTorch
|
||||
|
||||
val repeatingRequest = getPreviewCaptureRequest(fps, videoStabilizationMode, lowLightBoost, hdr, enableTorch)
|
||||
Log.d(TAG, "Setting Repeating Request..")
|
||||
session.setRepeatingRequest(repeatingRequest, null, null)
|
||||
}
|
||||
}
|
||||
|
||||
private suspend fun stopRunning() {
|
||||
Log.i(TAG, "Stopping Camera Session...")
|
||||
try {
|
||||
mutex.withLock {
|
||||
destroy()
|
||||
Log.i(TAG, "Camera Session stopped!")
|
||||
}
|
||||
} catch (e: IllegalStateException) {
|
||||
Log.e(TAG, "Failed to stop Camera Session, this session is already closed.", e)
|
||||
}
|
||||
interface CameraSessionCallback {
|
||||
fun onError(error: Throwable)
|
||||
fun onInitialized()
|
||||
fun onCodeScanned(codes: List<Barcode>)
|
||||
}
|
||||
}
|
||||
|
@ -7,11 +7,15 @@ import com.google.mlkit.vision.barcode.BarcodeScanner
|
||||
import com.google.mlkit.vision.barcode.BarcodeScannerOptions
|
||||
import com.google.mlkit.vision.barcode.BarcodeScanning
|
||||
import com.google.mlkit.vision.common.InputImage
|
||||
import com.mrousavy.camera.core.outputs.CameraOutputs
|
||||
import com.mrousavy.camera.types.Orientation
|
||||
import java.io.Closeable
|
||||
|
||||
class CodeScannerPipeline(val size: Size, val format: Int, val output: CameraOutputs.CodeScannerOutput) : Closeable {
|
||||
class CodeScannerPipeline(
|
||||
val size: Size,
|
||||
val format: Int,
|
||||
val configuration: CameraConfiguration.CodeScanner,
|
||||
val callback: CameraSession.CameraSessionCallback
|
||||
) : Closeable {
|
||||
companion object {
|
||||
// We want to have a buffer of 2 images, but we always only acquire one.
|
||||
// That way the pipeline is free to stream one frame into the unused buffer,
|
||||
@ -26,7 +30,7 @@ class CodeScannerPipeline(val size: Size, val format: Int, val output: CameraOut
|
||||
get() = imageReader.surface
|
||||
|
||||
init {
|
||||
val types = output.options.codeTypes.map { it.toBarcodeType() }
|
||||
val types = configuration.codeTypes.map { it.toBarcodeType() }
|
||||
val barcodeScannerOptions = BarcodeScannerOptions.Builder()
|
||||
.setBarcodeFormats(types[0], *types.toIntArray())
|
||||
.build()
|
||||
@ -52,13 +56,13 @@ class CodeScannerPipeline(val size: Size, val format: Int, val output: CameraOut
|
||||
image.close()
|
||||
isBusy = false
|
||||
if (barcodes.isNotEmpty()) {
|
||||
output.onCodeScanned(barcodes)
|
||||
callback.onCodeScanned(barcodes)
|
||||
}
|
||||
}
|
||||
.addOnFailureListener { error ->
|
||||
image.close()
|
||||
isBusy = false
|
||||
output.onError(error)
|
||||
callback.onError(error)
|
||||
}
|
||||
}, CameraQueues.videoQueue.handler)
|
||||
}
|
||||
@ -69,7 +73,7 @@ class CodeScannerPipeline(val size: Size, val format: Int, val output: CameraOut
|
||||
}
|
||||
|
||||
override fun toString(): String {
|
||||
val codeTypes = output.options.codeTypes.joinToString(", ")
|
||||
val codeTypes = configuration.codeTypes.joinToString(", ")
|
||||
return "${size.width} x ${size.height} CodeScanner for [$codeTypes] ($format)"
|
||||
}
|
||||
}
|
||||
|
@ -2,87 +2,89 @@ package com.mrousavy.camera.core
|
||||
|
||||
import android.annotation.SuppressLint
|
||||
import android.content.Context
|
||||
import android.hardware.camera2.CameraManager
|
||||
import android.util.Log
|
||||
import android.util.Size
|
||||
import android.view.Surface
|
||||
import android.view.Gravity
|
||||
import android.view.SurfaceHolder
|
||||
import android.view.SurfaceView
|
||||
import android.widget.FrameLayout
|
||||
import com.mrousavy.camera.extensions.bigger
|
||||
import com.mrousavy.camera.extensions.getMaximumPreviewSize
|
||||
import com.mrousavy.camera.extensions.getPreviewTargetSize
|
||||
import com.mrousavy.camera.extensions.smaller
|
||||
import com.mrousavy.camera.types.CameraDeviceFormat
|
||||
import com.mrousavy.camera.types.ResizeMode
|
||||
import kotlin.math.roundToInt
|
||||
|
||||
@SuppressLint("ViewConstructor")
|
||||
class PreviewView(
|
||||
context: Context,
|
||||
val targetSize: Size,
|
||||
private val resizeMode: ResizeMode,
|
||||
private val onSurfaceChanged: (surface: Surface?) -> Unit
|
||||
) : SurfaceView(context) {
|
||||
class PreviewView(context: Context, callback: SurfaceHolder.Callback) : SurfaceView(context) {
|
||||
var size: Size = getMaximumPreviewSize()
|
||||
set(value) {
|
||||
Log.i(TAG, "Resizing PreviewView to ${value.width} x ${value.height}...")
|
||||
holder.setFixedSize(value.width, value.height)
|
||||
requestLayout()
|
||||
invalidate()
|
||||
field = value
|
||||
}
|
||||
var resizeMode: ResizeMode = ResizeMode.COVER
|
||||
set(value) {
|
||||
if (value != field) {
|
||||
requestLayout()
|
||||
invalidate()
|
||||
}
|
||||
field = value
|
||||
}
|
||||
|
||||
init {
|
||||
Log.i(TAG, "Using Preview Size ${targetSize.width} x ${targetSize.height}.")
|
||||
holder.setFixedSize(targetSize.width, targetSize.height)
|
||||
holder.addCallback(object : SurfaceHolder.Callback {
|
||||
override fun surfaceCreated(holder: SurfaceHolder) {
|
||||
Log.i(TAG, "Surface created! ${holder.surface}")
|
||||
onSurfaceChanged(holder.surface)
|
||||
}
|
||||
|
||||
override fun surfaceChanged(holder: SurfaceHolder, format: Int, width: Int, height: Int) {
|
||||
Log.i(TAG, "Surface resized! ${holder.surface} ($width x $height in format #$format)")
|
||||
}
|
||||
|
||||
override fun surfaceDestroyed(holder: SurfaceHolder) {
|
||||
Log.i(TAG, "Surface destroyed! ${holder.surface}")
|
||||
onSurfaceChanged(null)
|
||||
}
|
||||
})
|
||||
Log.i(TAG, "Creating PreviewView...")
|
||||
layoutParams = FrameLayout.LayoutParams(
|
||||
FrameLayout.LayoutParams.MATCH_PARENT,
|
||||
FrameLayout.LayoutParams.MATCH_PARENT,
|
||||
Gravity.CENTER
|
||||
)
|
||||
holder.addCallback(callback)
|
||||
}
|
||||
|
||||
private fun coverSize(contentSize: Size, containerWidth: Int, containerHeight: Int): Size {
|
||||
fun resizeToInputCamera(cameraId: String, cameraManager: CameraManager, format: CameraDeviceFormat?) {
|
||||
val characteristics = cameraManager.getCameraCharacteristics(cameraId)
|
||||
|
||||
val targetPreviewSize = format?.videoSize
|
||||
val formatAspectRatio = if (targetPreviewSize != null) targetPreviewSize.bigger.toDouble() / targetPreviewSize.smaller else null
|
||||
size = characteristics.getPreviewTargetSize(formatAspectRatio)
|
||||
}
|
||||
|
||||
private fun getSize(contentSize: Size, containerSize: Size, resizeMode: ResizeMode): Size {
|
||||
val contentAspectRatio = contentSize.height.toDouble() / contentSize.width
|
||||
val containerAspectRatio = containerWidth.toDouble() / containerHeight
|
||||
val containerAspectRatio = containerSize.width.toDouble() / containerSize.height
|
||||
|
||||
Log.d(TAG, "coverSize :: $contentSize ($contentAspectRatio), ${containerWidth}x$containerHeight ($containerAspectRatio)")
|
||||
Log.d(TAG, "coverSize :: $contentSize ($contentAspectRatio), ${containerSize.width}x${containerSize.height} ($containerAspectRatio)")
|
||||
|
||||
return if (contentAspectRatio > containerAspectRatio) {
|
||||
val widthOverHeight = when (resizeMode) {
|
||||
ResizeMode.COVER -> contentAspectRatio > containerAspectRatio
|
||||
ResizeMode.CONTAIN -> contentAspectRatio < containerAspectRatio
|
||||
}
|
||||
|
||||
return if (widthOverHeight) {
|
||||
// Scale by width to cover height
|
||||
val scaledWidth = containerHeight * contentAspectRatio
|
||||
Size(scaledWidth.roundToInt(), containerHeight)
|
||||
val scaledWidth = containerSize.height * contentAspectRatio
|
||||
Size(scaledWidth.roundToInt(), containerSize.height)
|
||||
} else {
|
||||
// Scale by height to cover width
|
||||
val scaledHeight = containerWidth / contentAspectRatio
|
||||
Size(containerWidth, scaledHeight.roundToInt())
|
||||
}
|
||||
}
|
||||
|
||||
private fun containSize(contentSize: Size, containerWidth: Int, containerHeight: Int): Size {
|
||||
val contentAspectRatio = contentSize.height.toDouble() / contentSize.width
|
||||
val containerAspectRatio = containerWidth.toDouble() / containerHeight
|
||||
|
||||
Log.d(TAG, "containSize :: $contentSize ($contentAspectRatio), ${containerWidth}x$containerHeight ($containerAspectRatio)")
|
||||
|
||||
return if (contentAspectRatio > containerAspectRatio) {
|
||||
// Scale by height to fit within width
|
||||
val scaledHeight = containerWidth / contentAspectRatio
|
||||
return Size(containerWidth, scaledHeight.roundToInt())
|
||||
} else {
|
||||
// Scale by width to fit within height
|
||||
val scaledWidth = containerHeight * contentAspectRatio
|
||||
return Size(scaledWidth.roundToInt(), containerHeight)
|
||||
val scaledHeight = containerSize.width / contentAspectRatio
|
||||
Size(containerSize.width, scaledHeight.roundToInt())
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressLint("DrawAllocation")
|
||||
override fun onMeasure(widthMeasureSpec: Int, heightMeasureSpec: Int) {
|
||||
super.onMeasure(widthMeasureSpec, heightMeasureSpec)
|
||||
val viewWidth = MeasureSpec.getSize(widthMeasureSpec)
|
||||
val viewHeight = MeasureSpec.getSize(heightMeasureSpec)
|
||||
|
||||
Log.d(TAG, "onMeasure($viewWidth, $viewHeight)")
|
||||
Log.i(TAG, "PreviewView onMeasure($viewWidth, $viewHeight)")
|
||||
|
||||
val fittedSize = when (resizeMode) {
|
||||
ResizeMode.COVER -> this.coverSize(targetSize, viewWidth, viewHeight)
|
||||
ResizeMode.CONTAIN -> this.containSize(targetSize, viewWidth, viewHeight)
|
||||
}
|
||||
val fittedSize = getSize(size, Size(viewWidth, viewHeight), resizeMode)
|
||||
|
||||
Log.d(TAG, "Fitted dimensions set: $fittedSize")
|
||||
setMeasuredDimension(fittedSize.width, fittedSize.height)
|
||||
|
@ -105,9 +105,7 @@ class VideoPipeline(
|
||||
Log.i(TAG, "ImageReader::onImageAvailable!")
|
||||
val image = reader.acquireNextImage() ?: return@setOnImageAvailableListener
|
||||
|
||||
Log.i(TAG, "Image Format: ${image.format}")
|
||||
|
||||
// // TODO: Get correct orientation and isMirrored
|
||||
// TODO: Get correct orientation and isMirrored
|
||||
val frame = Frame(image, image.timestamp, Orientation.PORTRAIT, isMirrored)
|
||||
frame.incrementRefCount()
|
||||
frameProcessor?.call(frame)
|
||||
@ -177,7 +175,11 @@ class VideoPipeline(
|
||||
*/
|
||||
fun setFrameProcessorOutput(frameProcessor: FrameProcessor?) {
|
||||
synchronized(this) {
|
||||
Log.i(TAG, "Setting $width x $height FrameProcessor Output...")
|
||||
if (frameProcessor != null) {
|
||||
Log.i(TAG, "Setting $width x $height FrameProcessor Output...")
|
||||
} else {
|
||||
Log.i(TAG, "Removing FrameProcessor Output...")
|
||||
}
|
||||
this.frameProcessor = frameProcessor
|
||||
}
|
||||
}
|
||||
@ -187,13 +189,14 @@ class VideoPipeline(
|
||||
*/
|
||||
fun setRecordingSessionOutput(recordingSession: RecordingSession?) {
|
||||
synchronized(this) {
|
||||
Log.i(TAG, "Setting $width x $height RecordingSession Output...")
|
||||
if (recordingSession != null) {
|
||||
// Configure OpenGL pipeline to stream Frames into the Recording Session's surface
|
||||
Log.i(TAG, "Setting $width x $height RecordingSession Output...")
|
||||
setRecordingSessionOutputSurface(recordingSession.surface)
|
||||
this.recordingSession = recordingSession
|
||||
} else {
|
||||
// Configure OpenGL pipeline to stop streaming Frames into the Recording Session's surface
|
||||
Log.i(TAG, "Removing RecordingSession Output...")
|
||||
removeRecordingSessionOutputSurface()
|
||||
this.recordingSession = null
|
||||
}
|
||||
|
@ -1,177 +0,0 @@
|
||||
package com.mrousavy.camera.core.outputs
|
||||
|
||||
import android.graphics.ImageFormat
|
||||
import android.hardware.camera2.CameraCharacteristics
|
||||
import android.hardware.camera2.CameraManager
|
||||
import android.media.Image
|
||||
import android.media.ImageReader
|
||||
import android.util.Log
|
||||
import android.util.Size
|
||||
import android.view.Surface
|
||||
import com.google.mlkit.vision.barcode.common.Barcode
|
||||
import com.mrousavy.camera.core.CameraQueues
|
||||
import com.mrousavy.camera.core.CodeScannerPipeline
|
||||
import com.mrousavy.camera.core.VideoPipeline
|
||||
import com.mrousavy.camera.extensions.bigger
|
||||
import com.mrousavy.camera.extensions.closestToOrMax
|
||||
import com.mrousavy.camera.extensions.getPhotoSizes
|
||||
import com.mrousavy.camera.extensions.getPreviewTargetSize
|
||||
import com.mrousavy.camera.extensions.getVideoSizes
|
||||
import com.mrousavy.camera.extensions.smaller
|
||||
import com.mrousavy.camera.types.CodeScannerOptions
|
||||
import com.mrousavy.camera.types.PixelFormat
|
||||
import java.io.Closeable
|
||||
|
||||
class CameraOutputs(
|
||||
val cameraId: String,
|
||||
cameraManager: CameraManager,
|
||||
val preview: PreviewOutput? = null,
|
||||
val photo: PhotoOutput? = null,
|
||||
val video: VideoOutput? = null,
|
||||
val codeScanner: CodeScannerOutput? = null,
|
||||
val enableHdr: Boolean? = false,
|
||||
val callback: Callback
|
||||
) : Closeable {
|
||||
companion object {
|
||||
private const val TAG = "CameraOutputs"
|
||||
const val PHOTO_OUTPUT_BUFFER_SIZE = 3
|
||||
}
|
||||
|
||||
data class PreviewOutput(val surface: Surface, val targetSize: Size? = null)
|
||||
data class PhotoOutput(val targetSize: Size? = null, val format: Int = ImageFormat.JPEG)
|
||||
data class VideoOutput(
|
||||
val targetSize: Size? = null,
|
||||
val enableRecording: Boolean = false,
|
||||
val enableFrameProcessor: Boolean? = false,
|
||||
val format: PixelFormat = PixelFormat.NATIVE
|
||||
)
|
||||
data class CodeScannerOutput(
|
||||
val options: CodeScannerOptions,
|
||||
val onCodeScanned: (codes: List<Barcode>) -> Unit,
|
||||
val onError: (error: Throwable) -> Unit
|
||||
)
|
||||
|
||||
interface Callback {
|
||||
fun onPhotoCaptured(image: Image)
|
||||
}
|
||||
|
||||
var previewOutput: SurfaceOutput? = null
|
||||
private set
|
||||
var photoOutput: ImageReaderOutput? = null
|
||||
private set
|
||||
var videoOutput: VideoPipelineOutput? = null
|
||||
private set
|
||||
var codeScannerOutput: BarcodeScannerOutput? = null
|
||||
private set
|
||||
|
||||
val size: Int
|
||||
get() {
|
||||
var size = 0
|
||||
if (previewOutput != null) size++
|
||||
if (photoOutput != null) size++
|
||||
if (videoOutput != null) size++
|
||||
if (codeScannerOutput != null) size++
|
||||
return size
|
||||
}
|
||||
|
||||
override fun equals(other: Any?): Boolean {
|
||||
if (other !is CameraOutputs) return false
|
||||
return this.cameraId == other.cameraId &&
|
||||
this.preview?.surface == other.preview?.surface &&
|
||||
this.preview?.targetSize == other.preview?.targetSize &&
|
||||
this.photo?.targetSize == other.photo?.targetSize &&
|
||||
this.photo?.format == other.photo?.format &&
|
||||
this.video?.enableRecording == other.video?.enableRecording &&
|
||||
this.video?.targetSize == other.video?.targetSize &&
|
||||
this.video?.format == other.video?.format &&
|
||||
this.codeScanner?.options == other.codeScanner?.options &&
|
||||
this.enableHdr == other.enableHdr
|
||||
}
|
||||
|
||||
override fun hashCode(): Int {
|
||||
var result = cameraId.hashCode()
|
||||
result += (preview?.hashCode() ?: 0)
|
||||
result += (photo?.hashCode() ?: 0)
|
||||
result += (video?.hashCode() ?: 0)
|
||||
result += (codeScanner?.hashCode() ?: 0)
|
||||
return result
|
||||
}
|
||||
|
||||
override fun close() {
|
||||
previewOutput?.close()
|
||||
photoOutput?.close()
|
||||
videoOutput?.close()
|
||||
codeScannerOutput?.close()
|
||||
}
|
||||
|
||||
override fun toString(): String {
|
||||
val strings = arrayListOf<String>()
|
||||
previewOutput?.let { strings.add(it.toString()) }
|
||||
photoOutput?.let { strings.add(it.toString()) }
|
||||
videoOutput?.let { strings.add(it.toString()) }
|
||||
codeScannerOutput?.let { strings.add(it.toString()) }
|
||||
return strings.joinToString(", ", "[", "]")
|
||||
}
|
||||
|
||||
init {
|
||||
val characteristics = cameraManager.getCameraCharacteristics(cameraId)
|
||||
val isMirrored = characteristics.get(CameraCharacteristics.LENS_FACING) == CameraCharacteristics.LENS_FACING_FRONT
|
||||
|
||||
Log.i(TAG, "Preparing Outputs for Camera $cameraId...")
|
||||
|
||||
// Preview output: Low resolution repeating images (SurfaceView)
|
||||
if (preview != null) {
|
||||
Log.i(TAG, "Adding native preview view output.")
|
||||
val previewSizeAspectRatio = if (preview.targetSize !=
|
||||
null
|
||||
) {
|
||||
preview.targetSize.bigger.toDouble() / preview.targetSize.smaller
|
||||
} else {
|
||||
null
|
||||
}
|
||||
previewOutput = SurfaceOutput(
|
||||
preview.surface,
|
||||
characteristics.getPreviewTargetSize(previewSizeAspectRatio),
|
||||
SurfaceOutput.OutputType.PREVIEW
|
||||
)
|
||||
}
|
||||
|
||||
// Photo output: High quality still images (takePhoto())
|
||||
if (photo != null) {
|
||||
val size = characteristics.getPhotoSizes(photo.format).closestToOrMax(photo.targetSize)
|
||||
|
||||
val imageReader = ImageReader.newInstance(size.width, size.height, photo.format, PHOTO_OUTPUT_BUFFER_SIZE)
|
||||
imageReader.setOnImageAvailableListener({ reader ->
|
||||
val image = reader.acquireLatestImage() ?: return@setOnImageAvailableListener
|
||||
callback.onPhotoCaptured(image)
|
||||
}, CameraQueues.cameraQueue.handler)
|
||||
|
||||
Log.i(TAG, "Adding ${size.width}x${size.height} photo output. (Format: ${photo.format})")
|
||||
photoOutput = ImageReaderOutput(imageReader, SurfaceOutput.OutputType.PHOTO)
|
||||
}
|
||||
|
||||
// Video output: High resolution repeating images (startRecording() or useFrameProcessor())
|
||||
if (video != null) {
|
||||
val format = video.format.toImageFormat()
|
||||
val size = characteristics.getVideoSizes(cameraId, format).closestToOrMax(video.targetSize)
|
||||
val enableFrameProcessor = video.enableFrameProcessor ?: false
|
||||
val videoPipeline = VideoPipeline(size.width, size.height, video.format, isMirrored, enableFrameProcessor)
|
||||
|
||||
Log.i(TAG, "Adding ${size.width}x${size.height} video output. (Format: ${video.format})")
|
||||
videoOutput = VideoPipelineOutput(videoPipeline, SurfaceOutput.OutputType.VIDEO)
|
||||
}
|
||||
|
||||
// Code Scanner
|
||||
if (codeScanner != null) {
|
||||
val format = ImageFormat.YUV_420_888
|
||||
val targetSize = Size(1280, 720)
|
||||
val size = characteristics.getVideoSizes(cameraId, format).closestToOrMax(targetSize)
|
||||
val pipeline = CodeScannerPipeline(size, format, codeScanner)
|
||||
|
||||
Log.i(TAG, "Adding ${size.width}x${size.height} code scanner output. (Code Types: ${codeScanner.options.codeTypes})")
|
||||
codeScannerOutput = BarcodeScannerOutput(pipeline)
|
||||
}
|
||||
|
||||
Log.i(TAG, "Prepared $size Outputs for Camera $cameraId!")
|
||||
}
|
||||
}
|
@ -5,12 +5,12 @@ import android.util.Log
|
||||
import android.util.Size
|
||||
import java.io.Closeable
|
||||
|
||||
open class ImageReaderOutput(private val imageReader: ImageReader, outputType: OutputType, dynamicRangeProfile: Long? = null) :
|
||||
open class PhotoOutput(private val imageReader: ImageReader, enableHdr: Boolean = false) :
|
||||
SurfaceOutput(
|
||||
imageReader.surface,
|
||||
Size(imageReader.width, imageReader.height),
|
||||
outputType,
|
||||
dynamicRangeProfile
|
||||
OutputType.PHOTO,
|
||||
enableHdr
|
||||
),
|
||||
Closeable {
|
||||
override fun close() {
|
@ -14,7 +14,7 @@ open class SurfaceOutput(
|
||||
val surface: Surface,
|
||||
val size: Size,
|
||||
val outputType: OutputType,
|
||||
private val dynamicRangeProfile: Long? = null,
|
||||
private val enableHdr: Boolean = false,
|
||||
private val closeSurfaceOnEnd: Boolean = false
|
||||
) : Closeable {
|
||||
companion object {
|
||||
@ -37,9 +37,12 @@ open class SurfaceOutput(
|
||||
fun toOutputConfiguration(characteristics: CameraCharacteristics): OutputConfiguration {
|
||||
val result = OutputConfiguration(surface)
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.TIRAMISU) {
|
||||
if (dynamicRangeProfile != null) {
|
||||
result.dynamicRangeProfile = dynamicRangeProfile
|
||||
Log.i(TAG, "Using dynamic range profile ${result.dynamicRangeProfile} for $outputType output.")
|
||||
if (enableHdr) {
|
||||
val profile = characteristics.get(CameraCharacteristics.REQUEST_RECOMMENDED_TEN_BIT_DYNAMIC_RANGE_PROFILE)
|
||||
if (profile != null) {
|
||||
result.dynamicRangeProfile = profile
|
||||
Log.i(TAG, "Using dynamic range profile ${result.dynamicRangeProfile} for $outputType output.")
|
||||
}
|
||||
}
|
||||
if (supportsOutputType(characteristics, outputType)) {
|
||||
result.streamUseCase = outputType.toOutputType().toLong()
|
||||
|
@ -5,12 +5,12 @@ import android.util.Size
|
||||
import com.mrousavy.camera.core.VideoPipeline
|
||||
import java.io.Closeable
|
||||
|
||||
class VideoPipelineOutput(val videoPipeline: VideoPipeline, outputType: OutputType, dynamicRangeProfile: Long? = null) :
|
||||
class VideoPipelineOutput(val videoPipeline: VideoPipeline, enableHdr: Boolean = false) :
|
||||
SurfaceOutput(
|
||||
videoPipeline.surface,
|
||||
Size(videoPipeline.width, videoPipeline.height),
|
||||
outputType,
|
||||
dynamicRangeProfile
|
||||
OutputType.VIDEO,
|
||||
enableHdr
|
||||
),
|
||||
Closeable {
|
||||
override fun close() {
|
||||
|
@ -6,7 +6,7 @@ import android.util.Size
|
||||
import android.view.SurfaceHolder
|
||||
import kotlin.math.abs
|
||||
|
||||
private fun getMaximumPreviewSize(): Size {
|
||||
fun getMaximumPreviewSize(): Size {
|
||||
// See https://developer.android.com/reference/android/hardware/camera2/params/StreamConfigurationMap
|
||||
// According to the Android Developer documentation, PREVIEW streams can have a resolution
|
||||
// of up to the phone's display's resolution, with a maximum of 1920x1080.
|
||||
|
@ -10,7 +10,6 @@ import android.os.Build
|
||||
import android.util.Log
|
||||
import com.mrousavy.camera.core.CameraQueues
|
||||
import com.mrousavy.camera.core.CameraSessionCannotBeConfiguredError
|
||||
import com.mrousavy.camera.core.outputs.CameraOutputs
|
||||
import kotlin.coroutines.resume
|
||||
import kotlin.coroutines.resumeWithException
|
||||
import kotlinx.coroutines.suspendCancellableCoroutine
|
||||
@ -20,7 +19,7 @@ private var sessionId = 1000
|
||||
|
||||
suspend fun CameraDevice.createCaptureSession(
|
||||
cameraManager: CameraManager,
|
||||
outputs: CameraOutputs,
|
||||
outputs: List<OutputConfiguration>,
|
||||
onClosed: (session: CameraCaptureSession) -> Unit,
|
||||
queue: CameraQueues.CameraQueue
|
||||
): CameraCaptureSession =
|
||||
@ -42,7 +41,7 @@ suspend fun CameraDevice.createCaptureSession(
|
||||
|
||||
override fun onConfigureFailed(session: CameraCaptureSession) {
|
||||
Log.e(TAG, "Camera $id: Failed to configure Capture Session #$sessionId!")
|
||||
continuation.resumeWithException(CameraSessionCannotBeConfiguredError(id, outputs))
|
||||
continuation.resumeWithException(CameraSessionCannotBeConfiguredError(id))
|
||||
}
|
||||
|
||||
override fun onClosed(session: CameraCaptureSession) {
|
||||
@ -52,36 +51,12 @@ suspend fun CameraDevice.createCaptureSession(
|
||||
}
|
||||
}
|
||||
|
||||
val outputConfigurations = arrayListOf<OutputConfiguration>()
|
||||
outputs.previewOutput?.let { output ->
|
||||
outputConfigurations.add(output.toOutputConfiguration(characteristics))
|
||||
}
|
||||
outputs.photoOutput?.let { output ->
|
||||
outputConfigurations.add(output.toOutputConfiguration(characteristics))
|
||||
}
|
||||
outputs.videoOutput?.let { output ->
|
||||
outputConfigurations.add(output.toOutputConfiguration(characteristics))
|
||||
}
|
||||
outputs.codeScannerOutput?.let { output ->
|
||||
outputConfigurations.add(output.toOutputConfiguration(characteristics))
|
||||
}
|
||||
if (outputs.enableHdr == true && Build.VERSION.SDK_INT >= Build.VERSION_CODES.TIRAMISU) {
|
||||
val supportedProfiles = characteristics.get(CameraCharacteristics.REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES)
|
||||
val hdrProfile = supportedProfiles?.bestProfile ?: supportedProfiles?.supportedProfiles?.firstOrNull()
|
||||
if (hdrProfile != null) {
|
||||
Log.i(TAG, "Camera $id: Using HDR Profile $hdrProfile...")
|
||||
outputConfigurations.forEach { it.dynamicRangeProfile = hdrProfile }
|
||||
} else {
|
||||
Log.w(TAG, "Camera $id: HDR was enabled, but the device does not support any matching HDR profile!")
|
||||
}
|
||||
}
|
||||
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.P) {
|
||||
Log.i(TAG, "Using new API (>=28)")
|
||||
val config = SessionConfiguration(SessionConfiguration.SESSION_REGULAR, outputConfigurations, queue.executor, callback)
|
||||
val config = SessionConfiguration(SessionConfiguration.SESSION_REGULAR, outputs, queue.executor, callback)
|
||||
this.createCaptureSession(config)
|
||||
} else {
|
||||
Log.i(TAG, "Using legacy API (<28)")
|
||||
this.createCaptureSessionByOutputConfigurations(outputConfigurations, callback, queue.handler)
|
||||
this.createCaptureSessionByOutputConfigurations(outputs, callback, queue.handler)
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,14 @@
|
||||
package com.mrousavy.camera.types
|
||||
|
||||
enum class AutoFocusSystem(override val unionValue: String) : JSUnionValue {
|
||||
CONTRAST_DETECTION("contrast-detection"),
|
||||
NONE("none");
|
||||
|
||||
companion object : JSUnionValue.Companion<AutoFocusSystem> {
|
||||
override fun fromUnionValue(unionValue: String?): AutoFocusSystem =
|
||||
when (unionValue) {
|
||||
"contrast-detection" -> CONTRAST_DETECTION
|
||||
else -> NONE
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,106 @@
|
||||
package com.mrousavy.camera.types
|
||||
|
||||
import android.util.Size
|
||||
import com.facebook.react.bridge.ReadableMap
|
||||
import com.mrousavy.camera.core.InvalidTypeScriptUnionError
|
||||
|
||||
data class CameraDeviceFormat(
|
||||
val videoWidth: Int,
|
||||
val videoHeight: Int,
|
||||
val photoWidth: Int,
|
||||
val photoHeight: Int,
|
||||
val minFps: Double,
|
||||
val maxFps: Double,
|
||||
val minISO: Double,
|
||||
val maxISO: Double,
|
||||
val fieldOfView: Double,
|
||||
val maxZoom: Double,
|
||||
val videoStabilizationModes: Array<VideoStabilizationMode>,
|
||||
val autoFocusSystem: AutoFocusSystem,
|
||||
val supportsVideoHDR: Boolean,
|
||||
val supportsPhotoHDR: Boolean,
|
||||
val pixelFormats: Array<PixelFormat>,
|
||||
val supportsDepthCapture: Boolean
|
||||
) {
|
||||
val photoSize: Size
|
||||
get() = Size(photoWidth, photoHeight)
|
||||
val videoSize: Size
|
||||
get() = Size(videoWidth, videoHeight)
|
||||
|
||||
companion object {
|
||||
fun fromJSValue(value: ReadableMap): CameraDeviceFormat {
|
||||
val modes = value.getArray("videoStabilizationModes") ?: throw InvalidTypeScriptUnionError("format", value.toString())
|
||||
val videoStabilizationModes = modes.toArrayList().map { VideoStabilizationMode.fromUnionValue(it as String) }
|
||||
|
||||
val formats = value.getArray("pixelFormats") ?: throw InvalidTypeScriptUnionError("format", value.toString())
|
||||
val pixelFormats = formats.toArrayList().map { PixelFormat.fromUnionValue(it as String) }
|
||||
|
||||
val autoFocusSystem = AutoFocusSystem.fromUnionValue(value.getString("autoFocusSystem"))
|
||||
|
||||
return CameraDeviceFormat(
|
||||
value.getInt("videoWidth"),
|
||||
value.getInt("videoHeight"),
|
||||
value.getInt("photoWidth"),
|
||||
value.getInt("photoHeight"),
|
||||
value.getDouble("minFps"),
|
||||
value.getDouble("maxFps"),
|
||||
value.getDouble("minISO"),
|
||||
value.getDouble("maxISO"),
|
||||
value.getDouble("fieldOfView"),
|
||||
value.getDouble("maxZoom"),
|
||||
videoStabilizationModes.toTypedArray(),
|
||||
autoFocusSystem,
|
||||
value.getBoolean("supportsVideoHDR"),
|
||||
value.getBoolean("supportsPhotoHDR"),
|
||||
pixelFormats.toTypedArray(),
|
||||
value.getBoolean("supportsDepthCapture")
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
override fun equals(other: Any?): Boolean {
|
||||
if (this === other) return true
|
||||
if (javaClass != other?.javaClass) return false
|
||||
|
||||
other as CameraDeviceFormat
|
||||
|
||||
if (videoWidth != other.videoWidth) return false
|
||||
if (videoHeight != other.videoHeight) return false
|
||||
if (photoWidth != other.photoWidth) return false
|
||||
if (photoHeight != other.photoHeight) return false
|
||||
if (minFps != other.minFps) return false
|
||||
if (maxFps != other.maxFps) return false
|
||||
if (minISO != other.minISO) return false
|
||||
if (maxISO != other.maxISO) return false
|
||||
if (fieldOfView != other.fieldOfView) return false
|
||||
if (maxZoom != other.maxZoom) return false
|
||||
if (!videoStabilizationModes.contentEquals(other.videoStabilizationModes)) return false
|
||||
if (autoFocusSystem != other.autoFocusSystem) return false
|
||||
if (supportsVideoHDR != other.supportsVideoHDR) return false
|
||||
if (supportsPhotoHDR != other.supportsPhotoHDR) return false
|
||||
if (!pixelFormats.contentEquals(other.pixelFormats)) return false
|
||||
if (supportsDepthCapture != other.supportsDepthCapture) return false
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
override fun hashCode(): Int {
|
||||
var result = videoWidth
|
||||
result = 31 * result + videoHeight
|
||||
result = 31 * result + photoWidth
|
||||
result = 31 * result + photoHeight
|
||||
result = 31 * result + minFps.hashCode()
|
||||
result = 31 * result + maxFps.hashCode()
|
||||
result = 31 * result + minISO.hashCode()
|
||||
result = 31 * result + maxISO.hashCode()
|
||||
result = 31 * result + fieldOfView.hashCode()
|
||||
result = 31 * result + maxZoom.hashCode()
|
||||
result = 31 * result + videoStabilizationModes.contentHashCode()
|
||||
result = 31 * result + autoFocusSystem.hashCode()
|
||||
result = 31 * result + supportsVideoHDR.hashCode()
|
||||
result = 31 * result + supportsPhotoHDR.hashCode()
|
||||
result = 31 * result + pixelFormats.contentHashCode()
|
||||
result = 31 * result + supportsDepthCapture.hashCode()
|
||||
return result
|
||||
}
|
||||
}
|
@ -33,13 +33,13 @@ enum class Orientation(override val unionValue: String) : JSUnionValue {
|
||||
}
|
||||
|
||||
companion object : JSUnionValue.Companion<Orientation> {
|
||||
override fun fromUnionValue(unionValue: String?): Orientation? =
|
||||
override fun fromUnionValue(unionValue: String?): Orientation =
|
||||
when (unionValue) {
|
||||
"portrait" -> PORTRAIT
|
||||
"landscape-right" -> LANDSCAPE_RIGHT
|
||||
"portrait-upside-down" -> PORTRAIT_UPSIDE_DOWN
|
||||
"landscape-left" -> LANDSCAPE_LEFT
|
||||
else -> null
|
||||
else -> PORTRAIT
|
||||
}
|
||||
|
||||
fun fromRotationDegrees(rotationDegrees: Int): Orientation =
|
||||
|
@ -1,6 +1,7 @@
|
||||
package com.mrousavy.camera.types
|
||||
|
||||
import android.graphics.ImageFormat
|
||||
import com.mrousavy.camera.core.InvalidTypeScriptUnionError
|
||||
import com.mrousavy.camera.core.PixelFormatNotSupportedError
|
||||
|
||||
enum class PixelFormat(override val unionValue: String) : JSUnionValue {
|
||||
@ -24,13 +25,13 @@ enum class PixelFormat(override val unionValue: String) : JSUnionValue {
|
||||
else -> UNKNOWN
|
||||
}
|
||||
|
||||
override fun fromUnionValue(unionValue: String?): PixelFormat? =
|
||||
override fun fromUnionValue(unionValue: String?): PixelFormat =
|
||||
when (unionValue) {
|
||||
"yuv" -> YUV
|
||||
"rgb" -> RGB
|
||||
"native" -> NATIVE
|
||||
"unknown" -> UNKNOWN
|
||||
else -> null
|
||||
else -> throw InvalidTypeScriptUnionError("pixelFormat", unionValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -5,6 +5,7 @@ import android.hardware.camera2.CameraMetadata.CONTROL_VIDEO_STABILIZATION_MODE_
|
||||
import android.hardware.camera2.CameraMetadata.CONTROL_VIDEO_STABILIZATION_MODE_PREVIEW_STABILIZATION
|
||||
import android.hardware.camera2.CameraMetadata.LENS_OPTICAL_STABILIZATION_MODE_OFF
|
||||
import android.hardware.camera2.CameraMetadata.LENS_OPTICAL_STABILIZATION_MODE_ON
|
||||
import com.mrousavy.camera.core.InvalidTypeScriptUnionError
|
||||
|
||||
enum class VideoStabilizationMode(override val unionValue: String) : JSUnionValue {
|
||||
OFF("off"),
|
||||
@ -28,13 +29,13 @@ enum class VideoStabilizationMode(override val unionValue: String) : JSUnionValu
|
||||
}
|
||||
|
||||
companion object : JSUnionValue.Companion<VideoStabilizationMode> {
|
||||
override fun fromUnionValue(unionValue: String?): VideoStabilizationMode? =
|
||||
override fun fromUnionValue(unionValue: String?): VideoStabilizationMode =
|
||||
when (unionValue) {
|
||||
"off" -> OFF
|
||||
"standard" -> STANDARD
|
||||
"cinematic" -> CINEMATIC
|
||||
"cinematic-extended" -> CINEMATIC_EXTENDED
|
||||
else -> null
|
||||
else -> throw InvalidTypeScriptUnionError("videoStabilizationMode", unionValue)
|
||||
}
|
||||
|
||||
fun fromDigitalVideoStabilizationMode(stabiliazionMode: Int): VideoStabilizationMode =
|
||||
|
@ -507,7 +507,7 @@ PODS:
|
||||
- libwebp (~> 1.0)
|
||||
- SDWebImage/Core (~> 5.10)
|
||||
- SocketRocket (0.6.1)
|
||||
- VisionCamera (3.4.0):
|
||||
- VisionCamera (3.5.1):
|
||||
- React
|
||||
- React-callinvoker
|
||||
- React-Core
|
||||
@ -747,7 +747,7 @@ SPEC CHECKSUMS:
|
||||
SDWebImage: a7f831e1a65eb5e285e3fb046a23fcfbf08e696d
|
||||
SDWebImageWebPCoder: 908b83b6adda48effe7667cd2b7f78c897e5111d
|
||||
SocketRocket: f32cd54efbe0f095c4d7594881e52619cfe80b17
|
||||
VisionCamera: eead9df29ac5935d5685b5ecaea3ae8b6da84bff
|
||||
VisionCamera: 2f5f9841150719f73b9efb9e0c8fa48fe8556f2d
|
||||
Yoga: 8796b55dba14d7004f980b54bcc9833ee45b28ce
|
||||
|
||||
PODFILE CHECKSUM: 27f53791141a3303d814e09b55770336416ff4eb
|
||||
|
Loading…
Reference in New Issue
Block a user