feat: Create C++/OpenGL-based Video Pipeline for more efficient Recording and Frame Processing (#1721)
* Create `VideoPipeline` c++ * Remove folly C++ dependency * Create `VideoPipeline` HybridClass * Set up OpenGL * Add outputs * Update VideoPipeline.kt * Bum `minSdkVersion` to `26` * Create `VideoPipelineOutput` * Create output funcs * Set output pipelines * Add FP/Recording on Output change * Update VideoPipeline.cpp * Create `PassThroughShader` * Try to draw? I have honestly no idea * fix: Fix `setFrameProcessor` nameclash * fix: Fix `high-res-sizes` being null * Add preview output * Create `OpenGLContext.cpp` * Make screen red * This _should_ work (MESSY) * FINALLY RENDER TEXTURE * Rotate * Mirror * Clean up a bit * Add `getWidth()`/`getHeight()` * Cleanup * fix: Use uniforms instead of attributes * Draw with passed rotation/mirror mode * feat: Use SurfaceTexture's transformMatrix in OpenGL pipeline (#1727) * feat: Use Transform Matrix from SurfaceTexture * Renam * feat: Fix OpenGL Shader * Update VideoPipeline.kt * Measure elapsed time * fix: Fix low resolution * Render to offscreen * Render to every context * Release `SurfaceTexture` on close * Use one OpenGL context to render to multiple EGLSurfaces * Clean up a bit * fix: Fix recording pipeline not triggering * fix: Synchronize close to prevent nulls * Update OpenGLRenderer.cpp * fix: Hardcode Android recorder size
This commit is contained in:
@@ -23,7 +23,6 @@ import com.mrousavy.camera.extensions.createPhotoCaptureRequest
|
||||
import com.mrousavy.camera.extensions.openCamera
|
||||
import com.mrousavy.camera.extensions.tryClose
|
||||
import com.mrousavy.camera.extensions.zoomed
|
||||
import com.mrousavy.camera.frameprocessor.Frame
|
||||
import com.mrousavy.camera.frameprocessor.FrameProcessor
|
||||
import com.mrousavy.camera.parsers.Flash
|
||||
import com.mrousavy.camera.parsers.Orientation
|
||||
@@ -88,8 +87,17 @@ class CameraSession(private val context: Context,
|
||||
private val mutex = Mutex()
|
||||
private var isRunning = false
|
||||
private var enableTorch = false
|
||||
// Video Outputs
|
||||
private var recording: RecordingSession? = null
|
||||
private var frameProcessor: FrameProcessor? = null
|
||||
set(value) {
|
||||
field = value
|
||||
updateVideoOutputs()
|
||||
}
|
||||
var frameProcessor: FrameProcessor? = null
|
||||
set(value) {
|
||||
field = value
|
||||
updateVideoOutputs()
|
||||
}
|
||||
|
||||
override val coroutineContext: CoroutineContext = CameraQueues.cameraQueue.coroutineDispatcher
|
||||
|
||||
@@ -130,8 +138,14 @@ class CameraSession(private val context: Context,
|
||||
Log.i(TAG, "Nothing changed in configuration, canceling..")
|
||||
}
|
||||
|
||||
this.cameraId = cameraId
|
||||
// 1. Close previous outputs
|
||||
this.outputs?.close()
|
||||
// 2. Assign new outputs
|
||||
this.outputs = outputs
|
||||
// 3. Update with existing render targets (surfaces)
|
||||
updateVideoOutputs()
|
||||
|
||||
this.cameraId = cameraId
|
||||
launch {
|
||||
startRunning()
|
||||
}
|
||||
@@ -183,8 +197,12 @@ class CameraSession(private val context: Context,
|
||||
}
|
||||
}
|
||||
|
||||
fun setFrameProcessor(frameProcessor: FrameProcessor?) {
|
||||
this.frameProcessor = frameProcessor
|
||||
private fun updateVideoOutputs() {
|
||||
val videoPipeline = outputs?.videoOutput?.videoPipeline ?: return
|
||||
val previewOutput = outputs?.previewOutput
|
||||
videoPipeline.setRecordingSessionOutput(this.recording)
|
||||
videoPipeline.setFrameProcessorOutput(this.frameProcessor)
|
||||
videoPipeline.setPreviewOutput(previewOutput?.surface)
|
||||
}
|
||||
|
||||
suspend fun takePhoto(qualityPrioritization: QualityPrioritization,
|
||||
@@ -229,20 +247,6 @@ class CameraSession(private val context: Context,
|
||||
photoOutputSynchronizer.set(image.timestamp, image)
|
||||
}
|
||||
|
||||
override fun onVideoFrameCaptured(image: Image) {
|
||||
// TODO: Correctly get orientation and everything
|
||||
val frame = Frame(image, System.currentTimeMillis(), Orientation.PORTRAIT, false)
|
||||
frame.incrementRefCount()
|
||||
|
||||
// Call (Skia-) Frame Processor
|
||||
frameProcessor?.call(frame)
|
||||
|
||||
// Write Image to the Recording
|
||||
recording?.appendImage(image)
|
||||
|
||||
frame.decrementRefCount()
|
||||
}
|
||||
|
||||
suspend fun startRecording(enableAudio: Boolean,
|
||||
codec: VideoCodec,
|
||||
fileType: VideoFileType,
|
||||
@@ -253,7 +257,7 @@ class CameraSession(private val context: Context,
|
||||
val outputs = outputs ?: throw CameraNotReadyError()
|
||||
val videoOutput = outputs.videoOutput ?: throw VideoNotEnabledError()
|
||||
|
||||
val recording = RecordingSession(context, enableAudio, videoOutput.size, fps, codec, orientation, fileType, callback, onError)
|
||||
val recording = RecordingSession(context, videoOutput.size, enableAudio, fps, codec, orientation, fileType, callback, onError)
|
||||
recording.start()
|
||||
this.recording = recording
|
||||
}
|
||||
@@ -497,7 +501,8 @@ class CameraSession(private val context: Context,
|
||||
val captureRequest = camera.createCaptureRequest(template)
|
||||
outputs.previewOutput?.let { output ->
|
||||
Log.i(TAG, "Adding output surface ${output.outputType}..")
|
||||
captureRequest.addTarget(output.surface)
|
||||
// TODO: Add here again?
|
||||
// captureRequest.addTarget(output.surface)
|
||||
}
|
||||
outputs.videoOutput?.let { output ->
|
||||
Log.i(TAG, "Adding output surface ${output.outputType}..")
|
||||
|
||||
@@ -96,7 +96,7 @@ class CameraView(context: Context) : FrameLayout(context) {
|
||||
internal var frameProcessor: FrameProcessor? = null
|
||||
set(value) {
|
||||
field = value
|
||||
cameraSession.setFrameProcessor(frameProcessor)
|
||||
cameraSession.frameProcessor = frameProcessor
|
||||
}
|
||||
|
||||
private val inputOrientation: Orientation
|
||||
|
||||
@@ -180,11 +180,6 @@ class CameraViewModule(reactContext: ReactApplicationContext): ReactContextBaseJ
|
||||
|
||||
@ReactMethod
|
||||
fun requestCameraPermission(promise: Promise) {
|
||||
if (Build.VERSION.SDK_INT < Build.VERSION_CODES.M) {
|
||||
// API 21 and below always grants permission on app install
|
||||
return promise.resolve(PermissionStatus.GRANTED.unionValue)
|
||||
}
|
||||
|
||||
val activity = reactApplicationContext.currentActivity
|
||||
if (activity is PermissionAwareActivity) {
|
||||
val currentRequestCode = RequestCode++
|
||||
@@ -205,11 +200,6 @@ class CameraViewModule(reactContext: ReactApplicationContext): ReactContextBaseJ
|
||||
|
||||
@ReactMethod
|
||||
fun requestMicrophonePermission(promise: Promise) {
|
||||
if (Build.VERSION.SDK_INT < Build.VERSION_CODES.M) {
|
||||
// API 21 and below always grants permission on app install
|
||||
return promise.resolve(PermissionStatus.GRANTED.unionValue)
|
||||
}
|
||||
|
||||
val activity = reactApplicationContext.currentActivity
|
||||
if (activity is PermissionAwareActivity) {
|
||||
val currentRequestCode = RequestCode++
|
||||
|
||||
@@ -63,10 +63,6 @@ fun CameraCharacteristics.getVideoSizes(cameraId: String, format: Int): List<Siz
|
||||
fun CameraCharacteristics.getPhotoSizes(format: Int): List<Size> {
|
||||
val config = this.get(CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP)!!
|
||||
val sizes = config.getOutputSizes(format) ?: emptyArray()
|
||||
val highResSizes = if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
|
||||
config.getHighResolutionOutputSizes(format)
|
||||
} else {
|
||||
null
|
||||
} ?: emptyArray()
|
||||
val highResSizes = config.getHighResolutionOutputSizes(format) ?: emptyArray()
|
||||
return sizes.plus(highResSizes).toList()
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import android.hardware.camera2.params.OutputConfiguration
|
||||
import android.hardware.camera2.params.SessionConfiguration
|
||||
import android.os.Build
|
||||
import android.util.Log
|
||||
import android.view.Surface
|
||||
import androidx.annotation.RequiresApi
|
||||
import com.mrousavy.camera.CameraQueues
|
||||
import com.mrousavy.camera.CameraSessionCannotBeConfiguredError
|
||||
@@ -63,47 +62,35 @@ suspend fun CameraDevice.createCaptureSession(cameraManager: CameraManager,
|
||||
}
|
||||
}
|
||||
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.N) {
|
||||
// API >= 24
|
||||
val outputConfigurations = arrayListOf<OutputConfiguration>()
|
||||
outputs.previewOutput?.let { output ->
|
||||
outputConfigurations.add(output.toOutputConfiguration(characteristics))
|
||||
}
|
||||
outputs.photoOutput?.let { output ->
|
||||
outputConfigurations.add(output.toOutputConfiguration(characteristics))
|
||||
}
|
||||
outputs.videoOutput?.let { output ->
|
||||
outputConfigurations.add(output.toOutputConfiguration(characteristics))
|
||||
}
|
||||
if (outputs.enableHdr == true && Build.VERSION.SDK_INT >= Build.VERSION_CODES.TIRAMISU) {
|
||||
val supportedProfiles = characteristics.get(CameraCharacteristics.REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES)
|
||||
val hdrProfile = supportedProfiles?.bestProfile ?: supportedProfiles?.supportedProfiles?.firstOrNull()
|
||||
if (hdrProfile != null) {
|
||||
Log.i(TAG, "Camera $id: Using HDR Profile $hdrProfile...")
|
||||
outputConfigurations.forEach { it.dynamicRangeProfile = hdrProfile }
|
||||
} else {
|
||||
Log.w(TAG, "Camera $id: HDR was enabled, but the device does not support any matching HDR profile!")
|
||||
}
|
||||
}
|
||||
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.P) {
|
||||
// API >=28
|
||||
Log.i(TAG, "Using new API (>=28)")
|
||||
val config = SessionConfiguration(sessionType.toSessionType(), outputConfigurations, queue.executor, callback)
|
||||
this.createCaptureSession(config)
|
||||
val outputConfigurations = arrayListOf<OutputConfiguration>()
|
||||
outputs.previewOutput?.let { output ->
|
||||
// TODO: add here again?
|
||||
// outputConfigurations.add(output.toOutputConfiguration(characteristics))
|
||||
}
|
||||
outputs.photoOutput?.let { output ->
|
||||
outputConfigurations.add(output.toOutputConfiguration(characteristics))
|
||||
}
|
||||
outputs.videoOutput?.let { output ->
|
||||
outputConfigurations.add(output.toOutputConfiguration(characteristics))
|
||||
}
|
||||
if (outputs.enableHdr == true && Build.VERSION.SDK_INT >= Build.VERSION_CODES.TIRAMISU) {
|
||||
val supportedProfiles = characteristics.get(CameraCharacteristics.REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES)
|
||||
val hdrProfile = supportedProfiles?.bestProfile ?: supportedProfiles?.supportedProfiles?.firstOrNull()
|
||||
if (hdrProfile != null) {
|
||||
Log.i(TAG, "Camera $id: Using HDR Profile $hdrProfile...")
|
||||
outputConfigurations.forEach { it.dynamicRangeProfile = hdrProfile }
|
||||
} else {
|
||||
// API >=24
|
||||
Log.i(TAG, "Using legacy API (<28)")
|
||||
this.createCaptureSessionByOutputConfigurations(outputConfigurations, callback, queue.handler)
|
||||
Log.w(TAG, "Camera $id: HDR was enabled, but the device does not support any matching HDR profile!")
|
||||
}
|
||||
}
|
||||
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.P) {
|
||||
Log.i(TAG, "Using new API (>=28)")
|
||||
val config = SessionConfiguration(sessionType.toSessionType(), outputConfigurations, queue.executor, callback)
|
||||
this.createCaptureSession(config)
|
||||
} else {
|
||||
// API <24
|
||||
Log.i(TAG, "Using legacy API (<24)")
|
||||
val surfaces = arrayListOf<Surface>()
|
||||
outputs.previewOutput?.let { surfaces.add(it.surface) }
|
||||
outputs.photoOutput?.let { surfaces.add(it.surface) }
|
||||
outputs.videoOutput?.let { surfaces.add(it.surface) }
|
||||
this.createCaptureSession(surfaces, callback, queue.handler)
|
||||
Log.i(TAG, "Using legacy API (<28)")
|
||||
this.createCaptureSessionByOutputConfigurations(outputConfigurations, callback, queue.handler)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package com.mrousavy.camera.utils
|
||||
|
||||
import android.content.Context
|
||||
import android.media.Image
|
||||
import android.media.ImageWriter
|
||||
import android.media.MediaCodec
|
||||
import android.media.MediaRecorder
|
||||
@@ -13,12 +12,11 @@ import com.mrousavy.camera.RecorderError
|
||||
import com.mrousavy.camera.parsers.Orientation
|
||||
import com.mrousavy.camera.parsers.VideoCodec
|
||||
import com.mrousavy.camera.parsers.VideoFileType
|
||||
import com.mrousavy.camera.utils.outputs.CameraOutputs
|
||||
import java.io.File
|
||||
|
||||
class RecordingSession(context: Context,
|
||||
val size: Size,
|
||||
private val enableAudio: Boolean,
|
||||
private val videoSize: Size,
|
||||
private val fps: Int? = null,
|
||||
private val codec: VideoCodec = VideoCodec.H264,
|
||||
private val orientation: Orientation,
|
||||
@@ -40,14 +38,9 @@ class RecordingSession(context: Context,
|
||||
private val outputFile: File
|
||||
private var startTime: Long? = null
|
||||
private var imageWriter: ImageWriter? = null
|
||||
val surface: Surface
|
||||
val surface: Surface = MediaCodec.createPersistentInputSurface()
|
||||
|
||||
init {
|
||||
if (Build.VERSION.SDK_INT < Build.VERSION_CODES.M) {
|
||||
throw Error("Video Recording is only supported on Devices running Android version 23 (M) or newer.")
|
||||
}
|
||||
|
||||
surface = MediaCodec.createPersistentInputSurface()
|
||||
|
||||
outputFile = File.createTempFile("mrousavy", fileType.toExtension(), context.cacheDir)
|
||||
|
||||
@@ -61,7 +54,7 @@ class RecordingSession(context: Context,
|
||||
recorder.setOutputFormat(MediaRecorder.OutputFormat.MPEG_4)
|
||||
recorder.setOutputFile(outputFile.absolutePath)
|
||||
recorder.setVideoEncodingBitRate(VIDEO_BIT_RATE)
|
||||
recorder.setVideoSize(videoSize.width, videoSize.height)
|
||||
recorder.setVideoSize(size.height, size.width)
|
||||
if (fps != null) recorder.setVideoFrameRate(fps)
|
||||
|
||||
Log.i(TAG, "Using $codec Video Codec..")
|
||||
@@ -74,7 +67,7 @@ class RecordingSession(context: Context,
|
||||
recorder.setAudioChannels(AUDIO_CHANNELS)
|
||||
}
|
||||
recorder.setInputSurface(surface)
|
||||
recorder.setOrientationHint(orientation.toDegrees())
|
||||
//recorder.setOrientationHint(orientation.toDegrees())
|
||||
|
||||
recorder.setOnErrorListener { _, what, extra ->
|
||||
Log.e(TAG, "MediaRecorder Error: $what ($extra)")
|
||||
@@ -109,10 +102,8 @@ class RecordingSession(context: Context,
|
||||
recorder.stop()
|
||||
recorder.release()
|
||||
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
|
||||
imageWriter?.close()
|
||||
imageWriter = null
|
||||
}
|
||||
imageWriter?.close()
|
||||
imageWriter = null
|
||||
} catch (e: Error) {
|
||||
Log.e(TAG, "Failed to stop MediaRecorder!", e)
|
||||
}
|
||||
@@ -125,9 +116,6 @@ class RecordingSession(context: Context,
|
||||
|
||||
fun pause() {
|
||||
synchronized(this) {
|
||||
if (Build.VERSION.SDK_INT < Build.VERSION_CODES.N) {
|
||||
throw Error("Pausing a recording is only supported on Devices running Android version 24 (N) or newer.")
|
||||
}
|
||||
Log.i(TAG, "Pausing Recording Session..")
|
||||
recorder.pause()
|
||||
}
|
||||
@@ -135,32 +123,13 @@ class RecordingSession(context: Context,
|
||||
|
||||
fun resume() {
|
||||
synchronized(this) {
|
||||
if (Build.VERSION.SDK_INT < Build.VERSION_CODES.N) {
|
||||
throw Error("Resuming a recording is only supported on Devices running Android version 24 (N) or newer.")
|
||||
}
|
||||
Log.i(TAG, "Resuming Recording Session..")
|
||||
recorder.resume()
|
||||
}
|
||||
}
|
||||
|
||||
fun appendImage(image: Image) {
|
||||
synchronized(this) {
|
||||
if (Build.VERSION.SDK_INT < Build.VERSION_CODES.M) {
|
||||
throw Error("Video Recording is only supported on Devices running Android version 23 (M) or newer.")
|
||||
}
|
||||
|
||||
// TODO: Correctly mirror/flip Image in OpenGL pipeline, otherwise flipping camera while recording results in inverted frames
|
||||
|
||||
if (imageWriter == null) {
|
||||
imageWriter = ImageWriter.newInstance(surface, CameraOutputs.VIDEO_OUTPUT_BUFFER_SIZE)
|
||||
}
|
||||
image.timestamp = System.nanoTime()
|
||||
imageWriter!!.queueInputImage(image)
|
||||
}
|
||||
}
|
||||
|
||||
override fun toString(): String {
|
||||
val audio = if (enableAudio) "with audio" else "without audio"
|
||||
return "${videoSize.width} x ${videoSize.height} @ $fps FPS $codec $fileType $orientation RecordingSession ($audio)"
|
||||
return "${size.width} x ${size.height} @ $fps FPS $codec $fileType $orientation RecordingSession ($audio)"
|
||||
}
|
||||
}
|
||||
|
||||
185
android/src/main/java/com/mrousavy/camera/utils/VideoPipeline.kt
Normal file
185
android/src/main/java/com/mrousavy/camera/utils/VideoPipeline.kt
Normal file
@@ -0,0 +1,185 @@
|
||||
package com.mrousavy.camera.utils
|
||||
|
||||
import android.graphics.ImageFormat
|
||||
import android.graphics.SurfaceTexture
|
||||
import android.media.ImageReader
|
||||
import android.media.ImageWriter
|
||||
import android.media.MediaRecorder
|
||||
import android.util.Log
|
||||
import android.view.Surface
|
||||
import com.facebook.jni.HybridData
|
||||
import com.mrousavy.camera.frameprocessor.Frame
|
||||
import com.mrousavy.camera.frameprocessor.FrameProcessor
|
||||
import com.mrousavy.camera.parsers.Orientation
|
||||
import java.io.Closeable
|
||||
|
||||
/**
|
||||
* An OpenGL pipeline for streaming Camera Frames to one or more outputs.
|
||||
* Currently, [VideoPipeline] can stream to a [FrameProcessor] and a [MediaRecorder].
|
||||
*
|
||||
* @param [width] The width of the Frames to stream (> 0)
|
||||
* @param [height] The height of the Frames to stream (> 0)
|
||||
* @param [format] The format of the Frames to stream. ([ImageFormat.PRIVATE], [ImageFormat.YUV_420_888] or [ImageFormat.JPEG])
|
||||
*/
|
||||
@Suppress("KotlinJniMissingFunction")
|
||||
class VideoPipeline(val width: Int,
|
||||
val height: Int,
|
||||
val format: Int = ImageFormat.PRIVATE): SurfaceTexture.OnFrameAvailableListener, Closeable {
|
||||
companion object {
|
||||
private const val MAX_IMAGES = 5
|
||||
private const val TAG = "VideoPipeline"
|
||||
}
|
||||
|
||||
private val mHybridData: HybridData
|
||||
private var openGLTextureId: Int? = null
|
||||
private var transformMatrix = FloatArray(16)
|
||||
private var isActive = true
|
||||
|
||||
// Output 1
|
||||
private var frameProcessor: FrameProcessor? = null
|
||||
private var imageReader: ImageReader? = null
|
||||
|
||||
// Output 2
|
||||
private var recordingSession: RecordingSession? = null
|
||||
|
||||
// Output 3
|
||||
private var previewSurface: Surface? = null
|
||||
|
||||
// Input
|
||||
private val surfaceTexture: SurfaceTexture
|
||||
val surface: Surface
|
||||
|
||||
init {
|
||||
mHybridData = initHybrid(width, height)
|
||||
surfaceTexture = SurfaceTexture(false)
|
||||
surfaceTexture.setDefaultBufferSize(width, height)
|
||||
surfaceTexture.setOnFrameAvailableListener(this)
|
||||
surface = Surface(surfaceTexture)
|
||||
}
|
||||
|
||||
override fun close() {
|
||||
synchronized(this) {
|
||||
isActive = false
|
||||
imageReader?.close()
|
||||
imageReader = null
|
||||
frameProcessor = null
|
||||
recordingSession = null
|
||||
surfaceTexture.release()
|
||||
mHybridData.resetNative()
|
||||
}
|
||||
}
|
||||
|
||||
override fun onFrameAvailable(surfaceTexture: SurfaceTexture) {
|
||||
synchronized(this) {
|
||||
if (!isActive) return@synchronized
|
||||
|
||||
// 1. Attach Surface to OpenGL context
|
||||
if (openGLTextureId == null) {
|
||||
openGLTextureId = getInputTextureId()
|
||||
surfaceTexture.attachToGLContext(openGLTextureId!!)
|
||||
Log.i(TAG, "Attached Texture to Context $openGLTextureId")
|
||||
}
|
||||
|
||||
// 2. Prepare the OpenGL context (eglMakeCurrent)
|
||||
onBeforeFrame()
|
||||
|
||||
// 3. Update the OpenGL texture
|
||||
surfaceTexture.updateTexImage()
|
||||
|
||||
// 4. Get the transform matrix from the SurfaceTexture (rotations/scales applied by Camera)
|
||||
surfaceTexture.getTransformMatrix(transformMatrix)
|
||||
|
||||
// 5. Draw it with applied rotation/mirroring
|
||||
onFrame(transformMatrix)
|
||||
}
|
||||
}
|
||||
|
||||
private fun getImageReader(): ImageReader {
|
||||
val imageReader = ImageReader.newInstance(width, height, format, MAX_IMAGES)
|
||||
imageReader.setOnImageAvailableListener({ reader ->
|
||||
Log.i("VideoPipeline", "ImageReader::onImageAvailable!")
|
||||
val image = reader.acquireLatestImage() ?: return@setOnImageAvailableListener
|
||||
|
||||
// TODO: Get correct orientation and isMirrored
|
||||
val frame = Frame(image, image.timestamp, Orientation.PORTRAIT, false)
|
||||
frame.incrementRefCount()
|
||||
frameProcessor?.call(frame)
|
||||
frame.decrementRefCount()
|
||||
}, null)
|
||||
return imageReader
|
||||
}
|
||||
|
||||
/**
|
||||
* Configures the Pipeline to also call the given [FrameProcessor].
|
||||
* * If the [frameProcessor] is `null`, this output channel will be removed.
|
||||
* * If the [frameProcessor] is not `null`, the [VideoPipeline] will create Frames
|
||||
* using an [ImageWriter] and call the [FrameProcessor] with those Frames.
|
||||
*/
|
||||
fun setFrameProcessorOutput(frameProcessor: FrameProcessor?) {
|
||||
synchronized(this) {
|
||||
Log.i(TAG, "Setting $width x $height FrameProcessor Output...")
|
||||
this.frameProcessor = frameProcessor
|
||||
|
||||
if (frameProcessor != null) {
|
||||
if (this.imageReader == null) {
|
||||
// 1. Create new ImageReader that just calls the Frame Processor
|
||||
this.imageReader = getImageReader()
|
||||
}
|
||||
|
||||
// 2. Configure OpenGL pipeline to stream Frames into the ImageReader's surface
|
||||
setFrameProcessorOutputSurface(imageReader!!.surface)
|
||||
} else {
|
||||
// 1. Configure OpenGL pipeline to stop streaming Frames into the ImageReader's surface
|
||||
removeFrameProcessorOutputSurface()
|
||||
|
||||
// 2. Close the ImageReader
|
||||
this.imageReader?.close()
|
||||
this.imageReader = null
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Configures the Pipeline to also write Frames to a Surface from a [MediaRecorder].
|
||||
* * If the [surface] is `null`, this output channel will be removed.
|
||||
* * If the [surface] is not `null`, the [VideoPipeline] will write Frames to this Surface.
|
||||
*/
|
||||
fun setRecordingSessionOutput(recordingSession: RecordingSession?) {
|
||||
synchronized(this) {
|
||||
Log.i(TAG, "Setting $width x $height RecordingSession Output...")
|
||||
if (recordingSession != null) {
|
||||
// Configure OpenGL pipeline to stream Frames into the Recording Session's surface
|
||||
setRecordingSessionOutputSurface(recordingSession.surface)
|
||||
this.recordingSession = recordingSession
|
||||
} else {
|
||||
// Configure OpenGL pipeline to stop streaming Frames into the Recording Session's surface
|
||||
removeRecordingSessionOutputSurface()
|
||||
this.recordingSession = null
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fun setPreviewOutput(surface: Surface?) {
|
||||
synchronized(this) {
|
||||
Log.i(TAG, "Setting Preview Output...")
|
||||
if (surface != null) {
|
||||
setPreviewOutputSurface(surface)
|
||||
this.previewSurface = surface
|
||||
} else {
|
||||
removePreviewOutputSurface()
|
||||
this.previewSurface = null
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private external fun getInputTextureId(): Int
|
||||
private external fun onBeforeFrame()
|
||||
private external fun onFrame(transformMatrix: FloatArray)
|
||||
private external fun setFrameProcessorOutputSurface(surface: Any)
|
||||
private external fun removeFrameProcessorOutputSurface()
|
||||
private external fun setRecordingSessionOutputSurface(surface: Any)
|
||||
private external fun removeRecordingSessionOutputSurface()
|
||||
private external fun setPreviewOutputSurface(surface: Any)
|
||||
private external fun removePreviewOutputSurface()
|
||||
private external fun initHybrid(width: Int, height: Int): HybridData
|
||||
}
|
||||
@@ -1,11 +1,9 @@
|
||||
package com.mrousavy.camera.utils.outputs
|
||||
|
||||
import android.graphics.ImageFormat
|
||||
import android.hardware.HardwareBuffer
|
||||
import android.hardware.camera2.CameraManager
|
||||
import android.media.Image
|
||||
import android.media.ImageReader
|
||||
import android.os.Build
|
||||
import android.util.Log
|
||||
import android.util.Size
|
||||
import android.view.Surface
|
||||
@@ -14,6 +12,7 @@ import com.mrousavy.camera.extensions.closestToOrMax
|
||||
import com.mrousavy.camera.extensions.getPhotoSizes
|
||||
import com.mrousavy.camera.extensions.getPreviewSize
|
||||
import com.mrousavy.camera.extensions.getVideoSizes
|
||||
import com.mrousavy.camera.utils.VideoPipeline
|
||||
import java.io.Closeable
|
||||
|
||||
class CameraOutputs(val cameraId: String,
|
||||
@@ -25,7 +24,6 @@ class CameraOutputs(val cameraId: String,
|
||||
val callback: Callback): Closeable {
|
||||
companion object {
|
||||
private const val TAG = "CameraOutputs"
|
||||
const val VIDEO_OUTPUT_BUFFER_SIZE = 3
|
||||
const val PHOTO_OUTPUT_BUFFER_SIZE = 3
|
||||
}
|
||||
|
||||
@@ -39,14 +37,13 @@ class CameraOutputs(val cameraId: String,
|
||||
|
||||
interface Callback {
|
||||
fun onPhotoCaptured(image: Image)
|
||||
fun onVideoFrameCaptured(image: Image)
|
||||
}
|
||||
|
||||
var previewOutput: SurfaceOutput? = null
|
||||
private set
|
||||
var photoOutput: ImageReaderOutput? = null
|
||||
private set
|
||||
var videoOutput: SurfaceOutput? = null
|
||||
var videoOutput: VideoPipelineOutput? = null
|
||||
private set
|
||||
|
||||
val size: Int
|
||||
@@ -118,23 +115,11 @@ class CameraOutputs(val cameraId: String,
|
||||
|
||||
// Video output: High resolution repeating images (startRecording() or useFrameProcessor())
|
||||
if (video != null) {
|
||||
if (Build.VERSION.SDK_INT < Build.VERSION_CODES.Q) throw Error("Video Recordings and/or Frame Processors are only available on API 29 and above!")
|
||||
|
||||
val size = characteristics.getVideoSizes(cameraId, video.format).closestToOrMax(video.targetSize)
|
||||
|
||||
val flags = HardwareBuffer.USAGE_GPU_SAMPLED_IMAGE or HardwareBuffer.USAGE_VIDEO_ENCODE
|
||||
val imageReader = ImageReader.newInstance(size.width, size.height, video.format, VIDEO_OUTPUT_BUFFER_SIZE, flags)
|
||||
imageReader.setOnImageAvailableListener({ reader ->
|
||||
try {
|
||||
val image = reader.acquireNextImage() ?: return@setOnImageAvailableListener
|
||||
callback.onVideoFrameCaptured(image)
|
||||
} catch (e: IllegalStateException) {
|
||||
Log.e(TAG, "Failed to acquire a new Image, dropping a Frame.. The Frame Processor cannot keep up with the Camera's FPS!", e)
|
||||
}
|
||||
}, CameraQueues.videoQueue.handler)
|
||||
val videoPipeline = VideoPipeline(size.width, size.height, video.format)
|
||||
|
||||
Log.i(TAG, "Adding ${size.width}x${size.height} video output. (Format: ${video.format})")
|
||||
videoOutput = ImageReaderOutput(imageReader, SurfaceOutput.OutputType.VIDEO)
|
||||
videoOutput = VideoPipelineOutput(videoPipeline, SurfaceOutput.OutputType.VIDEO)
|
||||
}
|
||||
|
||||
Log.i(TAG, "Prepared $size Outputs for Camera $cameraId!")
|
||||
|
||||
@@ -35,7 +35,6 @@ open class SurfaceOutput(val surface: Surface,
|
||||
}
|
||||
}
|
||||
|
||||
@RequiresApi(Build.VERSION_CODES.N)
|
||||
fun toOutputConfiguration(characteristics: CameraCharacteristics): OutputConfiguration {
|
||||
val result = OutputConfiguration(surface)
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.TIRAMISU) {
|
||||
|
||||
@@ -0,0 +1,22 @@
|
||||
package com.mrousavy.camera.utils.outputs
|
||||
|
||||
import android.util.Log
|
||||
import android.util.Size
|
||||
import com.mrousavy.camera.utils.VideoPipeline
|
||||
import java.io.Closeable
|
||||
|
||||
/**
|
||||
* A [SurfaceOutput] that uses a [VideoPipeline] as it's surface.
|
||||
*/
|
||||
class VideoPipelineOutput(val videoPipeline: VideoPipeline,
|
||||
outputType: OutputType,
|
||||
dynamicRangeProfile: Long? = null): Closeable, SurfaceOutput(videoPipeline.surface, Size(videoPipeline.width, videoPipeline.height), outputType, dynamicRangeProfile) {
|
||||
override fun close() {
|
||||
Log.i(TAG, "Closing ${videoPipeline.width}x${videoPipeline.height} Video Pipeline..")
|
||||
videoPipeline.close()
|
||||
}
|
||||
|
||||
override fun toString(): String {
|
||||
return "$outputType (${videoPipeline.width} x ${videoPipeline.height} in format #${videoPipeline.format})"
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user