2021-05-06 06:11:55 -06:00
|
|
|
//
|
|
|
|
// FrameHostObject.m
|
|
|
|
// VisionCamera
|
|
|
|
//
|
|
|
|
// Created by Marc Rousavy on 22.03.21.
|
2021-06-01 05:07:57 -06:00
|
|
|
// Copyright © 2021 mrousavy. All rights reserved.
|
2021-05-06 06:11:55 -06:00
|
|
|
//
|
|
|
|
|
|
|
|
#import "FrameHostObject.h"
|
|
|
|
#import <Foundation/Foundation.h>
|
|
|
|
#import <jsi/jsi.h>
|
2023-02-21 07:00:48 -07:00
|
|
|
#import "WKTJsiHostObject.h"
|
|
|
|
|
|
|
|
#import "../../cpp/JSITypedArray.h"
|
2021-05-06 06:11:55 -06:00
|
|
|
|
|
|
|
std::vector<jsi::PropNameID> FrameHostObject::getPropertyNames(jsi::Runtime& rt) {
|
|
|
|
std::vector<jsi::PropNameID> result;
|
|
|
|
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("width")));
|
|
|
|
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("height")));
|
|
|
|
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("bytesPerRow")));
|
|
|
|
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("planesCount")));
|
2023-02-21 07:00:48 -07:00
|
|
|
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("orientation")));
|
|
|
|
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("isMirrored")));
|
|
|
|
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("timestamp")));
|
2023-07-20 07:30:04 -06:00
|
|
|
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("isDrawable")));
|
2023-08-21 04:50:14 -06:00
|
|
|
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("pixelFormat")));
|
2023-02-21 07:00:48 -07:00
|
|
|
// Conversion
|
feat: Sync Frame Processors (plus `runAsync` and `runAtTargetFps`) (#1472)
Before, Frame Processors ran on a separate Thread.
After, Frame Processors run fully synchronous and always at the same FPS as the Camera.
Two new functions have been introduced:
* `runAtTargetFps(fps: number, func: () => void)`: Runs the given code as often as the given `fps`, effectively throttling it's calls.
* `runAsync(frame: Frame, func: () => void)`: Runs the given function on a separate Thread for Frame Processing. A strong reference to the Frame is held as long as the function takes to execute.
You can use `runAtTargetFps` to throttle calls to a specific API (e.g. if your Camera is running at 60 FPS, but you only want to run face detection at ~25 FPS, use `runAtTargetFps(25, ...)`.)
You can use `runAsync` to run a heavy algorithm asynchronous, so that the Camera is not blocked while your algorithm runs. This is useful if your main sync processor draws something, and your async processor is doing some image analysis on the side.
You can also combine both functions.
Examples:
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAtTargetFps(10, () => {
'worklet'
console.log("I'm running at 10 FPS!")
})
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAsync(frame, () => {
'worklet'
console.log("I'm running on another Thread, I can block for longer!")
})
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAtTargetFps(10, () => {
'worklet'
runAsync(frame, () => {
'worklet'
console.log("I'm running on another Thread at 10 FPS, I can block for longer!")
})
})
}, [])
```
2023-02-15 08:47:09 -07:00
|
|
|
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("toString")));
|
2023-02-21 07:00:48 -07:00
|
|
|
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("toArrayBuffer")));
|
feat: Sync Frame Processors (plus `runAsync` and `runAtTargetFps`) (#1472)
Before, Frame Processors ran on a separate Thread.
After, Frame Processors run fully synchronous and always at the same FPS as the Camera.
Two new functions have been introduced:
* `runAtTargetFps(fps: number, func: () => void)`: Runs the given code as often as the given `fps`, effectively throttling it's calls.
* `runAsync(frame: Frame, func: () => void)`: Runs the given function on a separate Thread for Frame Processing. A strong reference to the Frame is held as long as the function takes to execute.
You can use `runAtTargetFps` to throttle calls to a specific API (e.g. if your Camera is running at 60 FPS, but you only want to run face detection at ~25 FPS, use `runAtTargetFps(25, ...)`.)
You can use `runAsync` to run a heavy algorithm asynchronous, so that the Camera is not blocked while your algorithm runs. This is useful if your main sync processor draws something, and your async processor is doing some image analysis on the side.
You can also combine both functions.
Examples:
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAtTargetFps(10, () => {
'worklet'
console.log("I'm running at 10 FPS!")
})
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAsync(frame, () => {
'worklet'
console.log("I'm running on another Thread, I can block for longer!")
})
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAtTargetFps(10, () => {
'worklet'
runAsync(frame, () => {
'worklet'
console.log("I'm running on another Thread at 10 FPS, I can block for longer!")
})
})
}, [])
```
2023-02-15 08:47:09 -07:00
|
|
|
// Ref Management
|
|
|
|
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("isValid")));
|
|
|
|
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("incrementRefCount")));
|
|
|
|
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("decrementRefCount")));
|
2023-02-21 07:00:48 -07:00
|
|
|
|
2021-05-06 06:11:55 -06:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
jsi::Value FrameHostObject::get(jsi::Runtime& runtime, const jsi::PropNameID& propName) {
|
|
|
|
auto name = propName.utf8(runtime);
|
2021-06-01 05:07:57 -06:00
|
|
|
|
2021-05-06 06:11:55 -06:00
|
|
|
if (name == "toString") {
|
feat: Sync Frame Processors (plus `runAsync` and `runAtTargetFps`) (#1472)
Before, Frame Processors ran on a separate Thread.
After, Frame Processors run fully synchronous and always at the same FPS as the Camera.
Two new functions have been introduced:
* `runAtTargetFps(fps: number, func: () => void)`: Runs the given code as often as the given `fps`, effectively throttling it's calls.
* `runAsync(frame: Frame, func: () => void)`: Runs the given function on a separate Thread for Frame Processing. A strong reference to the Frame is held as long as the function takes to execute.
You can use `runAtTargetFps` to throttle calls to a specific API (e.g. if your Camera is running at 60 FPS, but you only want to run face detection at ~25 FPS, use `runAtTargetFps(25, ...)`.)
You can use `runAsync` to run a heavy algorithm asynchronous, so that the Camera is not blocked while your algorithm runs. This is useful if your main sync processor draws something, and your async processor is doing some image analysis on the side.
You can also combine both functions.
Examples:
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAtTargetFps(10, () => {
'worklet'
console.log("I'm running at 10 FPS!")
})
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAsync(frame, () => {
'worklet'
console.log("I'm running on another Thread, I can block for longer!")
})
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAtTargetFps(10, () => {
'worklet'
runAsync(frame, () => {
'worklet'
console.log("I'm running on another Thread at 10 FPS, I can block for longer!")
})
})
}, [])
```
2023-02-15 08:47:09 -07:00
|
|
|
auto toString = JSI_HOST_FUNCTION_LAMBDA {
|
2021-07-06 02:08:44 -06:00
|
|
|
if (this->frame == nil) {
|
|
|
|
return jsi::String::createFromUtf8(runtime, "[closed frame]");
|
|
|
|
}
|
2021-06-09 02:57:05 -06:00
|
|
|
auto imageBuffer = CMSampleBufferGetImageBuffer(frame.buffer);
|
2021-05-06 06:11:55 -06:00
|
|
|
auto width = CVPixelBufferGetWidth(imageBuffer);
|
|
|
|
auto height = CVPixelBufferGetHeight(imageBuffer);
|
2021-06-01 05:07:57 -06:00
|
|
|
|
2021-05-06 06:11:55 -06:00
|
|
|
NSMutableString* string = [NSMutableString stringWithFormat:@"%lu x %lu Frame", width, height];
|
|
|
|
return jsi::String::createFromUtf8(runtime, string.UTF8String);
|
|
|
|
};
|
|
|
|
return jsi::Function::createFromHostFunction(runtime, jsi::PropNameID::forUtf8(runtime, "toString"), 0, toString);
|
|
|
|
}
|
feat: Sync Frame Processors (plus `runAsync` and `runAtTargetFps`) (#1472)
Before, Frame Processors ran on a separate Thread.
After, Frame Processors run fully synchronous and always at the same FPS as the Camera.
Two new functions have been introduced:
* `runAtTargetFps(fps: number, func: () => void)`: Runs the given code as often as the given `fps`, effectively throttling it's calls.
* `runAsync(frame: Frame, func: () => void)`: Runs the given function on a separate Thread for Frame Processing. A strong reference to the Frame is held as long as the function takes to execute.
You can use `runAtTargetFps` to throttle calls to a specific API (e.g. if your Camera is running at 60 FPS, but you only want to run face detection at ~25 FPS, use `runAtTargetFps(25, ...)`.)
You can use `runAsync` to run a heavy algorithm asynchronous, so that the Camera is not blocked while your algorithm runs. This is useful if your main sync processor draws something, and your async processor is doing some image analysis on the side.
You can also combine both functions.
Examples:
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAtTargetFps(10, () => {
'worklet'
console.log("I'm running at 10 FPS!")
})
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAsync(frame, () => {
'worklet'
console.log("I'm running on another Thread, I can block for longer!")
})
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAtTargetFps(10, () => {
'worklet'
runAsync(frame, () => {
'worklet'
console.log("I'm running on another Thread at 10 FPS, I can block for longer!")
})
})
}, [])
```
2023-02-15 08:47:09 -07:00
|
|
|
if (name == "incrementRefCount") {
|
|
|
|
auto incrementRefCount = JSI_HOST_FUNCTION_LAMBDA {
|
|
|
|
// Increment retain count by one so ARC doesn't destroy the Frame Buffer.
|
|
|
|
CFRetain(frame.buffer);
|
2021-07-06 02:08:44 -06:00
|
|
|
return jsi::Value::undefined();
|
|
|
|
};
|
feat: Sync Frame Processors (plus `runAsync` and `runAtTargetFps`) (#1472)
Before, Frame Processors ran on a separate Thread.
After, Frame Processors run fully synchronous and always at the same FPS as the Camera.
Two new functions have been introduced:
* `runAtTargetFps(fps: number, func: () => void)`: Runs the given code as often as the given `fps`, effectively throttling it's calls.
* `runAsync(frame: Frame, func: () => void)`: Runs the given function on a separate Thread for Frame Processing. A strong reference to the Frame is held as long as the function takes to execute.
You can use `runAtTargetFps` to throttle calls to a specific API (e.g. if your Camera is running at 60 FPS, but you only want to run face detection at ~25 FPS, use `runAtTargetFps(25, ...)`.)
You can use `runAsync` to run a heavy algorithm asynchronous, so that the Camera is not blocked while your algorithm runs. This is useful if your main sync processor draws something, and your async processor is doing some image analysis on the side.
You can also combine both functions.
Examples:
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAtTargetFps(10, () => {
'worklet'
console.log("I'm running at 10 FPS!")
})
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAsync(frame, () => {
'worklet'
console.log("I'm running on another Thread, I can block for longer!")
})
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAtTargetFps(10, () => {
'worklet'
runAsync(frame, () => {
'worklet'
console.log("I'm running on another Thread at 10 FPS, I can block for longer!")
})
})
}, [])
```
2023-02-15 08:47:09 -07:00
|
|
|
return jsi::Function::createFromHostFunction(runtime,
|
|
|
|
jsi::PropNameID::forUtf8(runtime, "incrementRefCount"),
|
|
|
|
0,
|
|
|
|
incrementRefCount);
|
|
|
|
}
|
|
|
|
if (name == "decrementRefCount") {
|
|
|
|
auto decrementRefCount = JSI_HOST_FUNCTION_LAMBDA {
|
|
|
|
// Decrement retain count by one. If the retain count is zero, ARC will destroy the Frame Buffer.
|
|
|
|
CFRelease(frame.buffer);
|
|
|
|
return jsi::Value::undefined();
|
|
|
|
};
|
|
|
|
return jsi::Function::createFromHostFunction(runtime,
|
|
|
|
jsi::PropNameID::forUtf8(runtime, "decrementRefCount"),
|
|
|
|
0,
|
|
|
|
decrementRefCount);
|
2021-07-06 02:08:44 -06:00
|
|
|
}
|
2023-02-21 07:00:48 -07:00
|
|
|
if (name == "toArrayBuffer") {
|
|
|
|
auto toArrayBuffer = JSI_HOST_FUNCTION_LAMBDA {
|
|
|
|
auto pixelBuffer = CMSampleBufferGetImageBuffer(frame.buffer);
|
|
|
|
auto bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer);
|
|
|
|
auto height = CVPixelBufferGetHeight(pixelBuffer);
|
2023-08-24 08:37:20 -06:00
|
|
|
|
2023-02-21 07:00:48 -07:00
|
|
|
auto arraySize = bytesPerRow * height;
|
|
|
|
|
|
|
|
static constexpr auto ARRAYBUFFER_CACHE_PROP_NAME = "__frameArrayBufferCache";
|
|
|
|
if (!runtime.global().hasProperty(runtime, ARRAYBUFFER_CACHE_PROP_NAME)) {
|
|
|
|
vision::TypedArray<vision::TypedArrayKind::Uint8ClampedArray> arrayBuffer(runtime, arraySize);
|
|
|
|
runtime.global().setProperty(runtime, ARRAYBUFFER_CACHE_PROP_NAME, arrayBuffer);
|
|
|
|
}
|
|
|
|
|
|
|
|
auto arrayBufferCache = runtime.global().getPropertyAsObject(runtime, ARRAYBUFFER_CACHE_PROP_NAME);
|
|
|
|
auto arrayBuffer = vision::getTypedArray(runtime, arrayBufferCache).get<vision::TypedArrayKind::Uint8ClampedArray>(runtime);
|
|
|
|
|
|
|
|
if (arrayBuffer.size(runtime) != arraySize) {
|
|
|
|
arrayBuffer = vision::TypedArray<vision::TypedArrayKind::Uint8ClampedArray>(runtime, arraySize);
|
|
|
|
runtime.global().setProperty(runtime, ARRAYBUFFER_CACHE_PROP_NAME, arrayBuffer);
|
|
|
|
}
|
|
|
|
|
2023-08-24 08:37:20 -06:00
|
|
|
CVPixelBufferLockBaseAddress(pixelBuffer, kCVPixelBufferLock_ReadOnly);
|
|
|
|
auto buffer = (uint8_t*) CVPixelBufferGetBaseAddress(pixelBuffer);
|
2023-02-21 07:00:48 -07:00
|
|
|
arrayBuffer.updateUnsafe(runtime, buffer, arraySize);
|
2023-08-24 08:37:20 -06:00
|
|
|
CVPixelBufferUnlockBaseAddress(pixelBuffer, kCVPixelBufferLock_ReadOnly);
|
2023-02-21 07:00:48 -07:00
|
|
|
|
|
|
|
return arrayBuffer;
|
|
|
|
};
|
|
|
|
return jsi::Function::createFromHostFunction(runtime, jsi::PropNameID::forUtf8(runtime, "toArrayBuffer"), 0, toArrayBuffer);
|
|
|
|
}
|
2021-06-01 05:07:57 -06:00
|
|
|
|
2023-07-20 07:30:04 -06:00
|
|
|
if (name == "isDrawable") {
|
|
|
|
return jsi::Value(false);
|
|
|
|
}
|
2021-05-06 06:11:55 -06:00
|
|
|
if (name == "isValid") {
|
feat: Sync Frame Processors (plus `runAsync` and `runAtTargetFps`) (#1472)
Before, Frame Processors ran on a separate Thread.
After, Frame Processors run fully synchronous and always at the same FPS as the Camera.
Two new functions have been introduced:
* `runAtTargetFps(fps: number, func: () => void)`: Runs the given code as often as the given `fps`, effectively throttling it's calls.
* `runAsync(frame: Frame, func: () => void)`: Runs the given function on a separate Thread for Frame Processing. A strong reference to the Frame is held as long as the function takes to execute.
You can use `runAtTargetFps` to throttle calls to a specific API (e.g. if your Camera is running at 60 FPS, but you only want to run face detection at ~25 FPS, use `runAtTargetFps(25, ...)`.)
You can use `runAsync` to run a heavy algorithm asynchronous, so that the Camera is not blocked while your algorithm runs. This is useful if your main sync processor draws something, and your async processor is doing some image analysis on the side.
You can also combine both functions.
Examples:
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAtTargetFps(10, () => {
'worklet'
console.log("I'm running at 10 FPS!")
})
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAsync(frame, () => {
'worklet'
console.log("I'm running on another Thread, I can block for longer!")
})
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAtTargetFps(10, () => {
'worklet'
runAsync(frame, () => {
'worklet'
console.log("I'm running on another Thread at 10 FPS, I can block for longer!")
})
})
}, [])
```
2023-02-15 08:47:09 -07:00
|
|
|
auto isValid = frame != nil && frame.buffer != nil && CFGetRetainCount(frame.buffer) > 0 && CMSampleBufferIsValid(frame.buffer);
|
2021-05-06 06:11:55 -06:00
|
|
|
return jsi::Value(isValid);
|
|
|
|
}
|
|
|
|
if (name == "width") {
|
2021-06-09 02:57:05 -06:00
|
|
|
auto imageBuffer = CMSampleBufferGetImageBuffer(frame.buffer);
|
2021-05-06 06:11:55 -06:00
|
|
|
auto width = CVPixelBufferGetWidth(imageBuffer);
|
|
|
|
return jsi::Value((double) width);
|
|
|
|
}
|
|
|
|
if (name == "height") {
|
2021-06-09 02:57:05 -06:00
|
|
|
auto imageBuffer = CMSampleBufferGetImageBuffer(frame.buffer);
|
2021-05-06 06:11:55 -06:00
|
|
|
auto height = CVPixelBufferGetHeight(imageBuffer);
|
|
|
|
return jsi::Value((double) height);
|
|
|
|
}
|
2023-02-21 07:00:48 -07:00
|
|
|
if (name == "orientation") {
|
|
|
|
switch (frame.orientation) {
|
|
|
|
case UIImageOrientationUp:
|
|
|
|
case UIImageOrientationUpMirrored:
|
|
|
|
return jsi::String::createFromUtf8(runtime, "portrait");
|
|
|
|
case UIImageOrientationDown:
|
|
|
|
case UIImageOrientationDownMirrored:
|
2023-08-21 04:50:14 -06:00
|
|
|
return jsi::String::createFromUtf8(runtime, "portrait-upside-down");
|
2023-02-21 07:00:48 -07:00
|
|
|
case UIImageOrientationLeft:
|
|
|
|
case UIImageOrientationLeftMirrored:
|
2023-08-21 04:50:14 -06:00
|
|
|
return jsi::String::createFromUtf8(runtime, "landscape-left");
|
2023-02-21 07:00:48 -07:00
|
|
|
case UIImageOrientationRight:
|
|
|
|
case UIImageOrientationRightMirrored:
|
2023-08-21 04:50:14 -06:00
|
|
|
return jsi::String::createFromUtf8(runtime, "landscape-right");
|
2023-02-21 07:00:48 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (name == "isMirrored") {
|
|
|
|
switch (frame.orientation) {
|
|
|
|
case UIImageOrientationUp:
|
|
|
|
case UIImageOrientationDown:
|
|
|
|
case UIImageOrientationLeft:
|
|
|
|
case UIImageOrientationRight:
|
|
|
|
return jsi::Value(false);
|
|
|
|
case UIImageOrientationDownMirrored:
|
|
|
|
case UIImageOrientationUpMirrored:
|
|
|
|
case UIImageOrientationLeftMirrored:
|
|
|
|
case UIImageOrientationRightMirrored:
|
|
|
|
return jsi::Value(true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (name == "timestamp") {
|
|
|
|
auto timestamp = CMSampleBufferGetPresentationTimeStamp(frame.buffer);
|
|
|
|
auto seconds = static_cast<double>(CMTimeGetSeconds(timestamp));
|
|
|
|
return jsi::Value(seconds * 1000.0);
|
|
|
|
}
|
2023-08-21 04:50:14 -06:00
|
|
|
if (name == "pixelFormat") {
|
|
|
|
auto format = CMSampleBufferGetFormatDescription(frame.buffer);
|
|
|
|
auto mediaType = CMFormatDescriptionGetMediaSubType(format);
|
|
|
|
switch (mediaType) {
|
|
|
|
case kCVPixelFormatType_32BGRA:
|
|
|
|
return jsi::String::createFromUtf8(runtime, "rgb");
|
|
|
|
case kCVPixelFormatType_420YpCbCr8BiPlanarFullRange:
|
|
|
|
case kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange:
|
|
|
|
return jsi::String::createFromUtf8(runtime, "yuv");
|
|
|
|
default:
|
|
|
|
return jsi::String::createFromUtf8(runtime, "unknown");
|
|
|
|
}
|
|
|
|
}
|
2021-05-06 06:11:55 -06:00
|
|
|
if (name == "bytesPerRow") {
|
2021-06-09 02:57:05 -06:00
|
|
|
auto imageBuffer = CMSampleBufferGetImageBuffer(frame.buffer);
|
2021-12-31 08:59:05 -07:00
|
|
|
auto bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
|
2021-05-06 06:11:55 -06:00
|
|
|
return jsi::Value((double) bytesPerRow);
|
|
|
|
}
|
|
|
|
if (name == "planesCount") {
|
2021-06-09 02:57:05 -06:00
|
|
|
auto imageBuffer = CMSampleBufferGetImageBuffer(frame.buffer);
|
2021-05-06 06:11:55 -06:00
|
|
|
auto planesCount = CVPixelBufferGetPlaneCount(imageBuffer);
|
|
|
|
return jsi::Value((double) planesCount);
|
|
|
|
}
|
2021-06-01 05:07:57 -06:00
|
|
|
|
feat: Sync Frame Processors (plus `runAsync` and `runAtTargetFps`) (#1472)
Before, Frame Processors ran on a separate Thread.
After, Frame Processors run fully synchronous and always at the same FPS as the Camera.
Two new functions have been introduced:
* `runAtTargetFps(fps: number, func: () => void)`: Runs the given code as often as the given `fps`, effectively throttling it's calls.
* `runAsync(frame: Frame, func: () => void)`: Runs the given function on a separate Thread for Frame Processing. A strong reference to the Frame is held as long as the function takes to execute.
You can use `runAtTargetFps` to throttle calls to a specific API (e.g. if your Camera is running at 60 FPS, but you only want to run face detection at ~25 FPS, use `runAtTargetFps(25, ...)`.)
You can use `runAsync` to run a heavy algorithm asynchronous, so that the Camera is not blocked while your algorithm runs. This is useful if your main sync processor draws something, and your async processor is doing some image analysis on the side.
You can also combine both functions.
Examples:
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAtTargetFps(10, () => {
'worklet'
console.log("I'm running at 10 FPS!")
})
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAsync(frame, () => {
'worklet'
console.log("I'm running on another Thread, I can block for longer!")
})
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAtTargetFps(10, () => {
'worklet'
runAsync(frame, () => {
'worklet'
console.log("I'm running on another Thread at 10 FPS, I can block for longer!")
})
})
}, [])
```
2023-02-15 08:47:09 -07:00
|
|
|
// fallback to base implementation
|
|
|
|
return HostObject::get(runtime, propName);
|
2021-05-06 06:11:55 -06:00
|
|
|
}
|