2021-05-06 06:11:55 -06:00
|
|
|
//
|
|
|
|
// FrameHostObject.m
|
|
|
|
// VisionCamera
|
|
|
|
//
|
|
|
|
// Created by Marc Rousavy on 22.03.21.
|
2021-06-01 05:07:57 -06:00
|
|
|
// Copyright © 2021 mrousavy. All rights reserved.
|
2021-05-06 06:11:55 -06:00
|
|
|
//
|
|
|
|
|
|
|
|
#import "FrameHostObject.h"
|
|
|
|
#import <Foundation/Foundation.h>
|
|
|
|
#import <jsi/jsi.h>
|
2023-02-21 07:00:48 -07:00
|
|
|
#import "WKTJsiHostObject.h"
|
|
|
|
|
|
|
|
#import "SkCanvas.h"
|
|
|
|
#import "../Skia Render Layer/SkImageHelpers.h"
|
|
|
|
#import "../../cpp/JSITypedArray.h"
|
2021-05-06 06:11:55 -06:00
|
|
|
|
|
|
|
std::vector<jsi::PropNameID> FrameHostObject::getPropertyNames(jsi::Runtime& rt) {
|
|
|
|
std::vector<jsi::PropNameID> result;
|
|
|
|
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("width")));
|
|
|
|
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("height")));
|
|
|
|
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("bytesPerRow")));
|
|
|
|
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("planesCount")));
|
2023-02-21 07:00:48 -07:00
|
|
|
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("orientation")));
|
|
|
|
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("isMirrored")));
|
|
|
|
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("timestamp")));
|
|
|
|
// Conversion
|
feat: Sync Frame Processors (plus `runAsync` and `runAtTargetFps`) (#1472)
Before, Frame Processors ran on a separate Thread.
After, Frame Processors run fully synchronous and always at the same FPS as the Camera.
Two new functions have been introduced:
* `runAtTargetFps(fps: number, func: () => void)`: Runs the given code as often as the given `fps`, effectively throttling it's calls.
* `runAsync(frame: Frame, func: () => void)`: Runs the given function on a separate Thread for Frame Processing. A strong reference to the Frame is held as long as the function takes to execute.
You can use `runAtTargetFps` to throttle calls to a specific API (e.g. if your Camera is running at 60 FPS, but you only want to run face detection at ~25 FPS, use `runAtTargetFps(25, ...)`.)
You can use `runAsync` to run a heavy algorithm asynchronous, so that the Camera is not blocked while your algorithm runs. This is useful if your main sync processor draws something, and your async processor is doing some image analysis on the side.
You can also combine both functions.
Examples:
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAtTargetFps(10, () => {
'worklet'
console.log("I'm running at 10 FPS!")
})
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAsync(frame, () => {
'worklet'
console.log("I'm running on another Thread, I can block for longer!")
})
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAtTargetFps(10, () => {
'worklet'
runAsync(frame, () => {
'worklet'
console.log("I'm running on another Thread at 10 FPS, I can block for longer!")
})
})
}, [])
```
2023-02-15 08:47:09 -07:00
|
|
|
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("toString")));
|
2023-02-21 07:00:48 -07:00
|
|
|
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("toArrayBuffer")));
|
feat: Sync Frame Processors (plus `runAsync` and `runAtTargetFps`) (#1472)
Before, Frame Processors ran on a separate Thread.
After, Frame Processors run fully synchronous and always at the same FPS as the Camera.
Two new functions have been introduced:
* `runAtTargetFps(fps: number, func: () => void)`: Runs the given code as often as the given `fps`, effectively throttling it's calls.
* `runAsync(frame: Frame, func: () => void)`: Runs the given function on a separate Thread for Frame Processing. A strong reference to the Frame is held as long as the function takes to execute.
You can use `runAtTargetFps` to throttle calls to a specific API (e.g. if your Camera is running at 60 FPS, but you only want to run face detection at ~25 FPS, use `runAtTargetFps(25, ...)`.)
You can use `runAsync` to run a heavy algorithm asynchronous, so that the Camera is not blocked while your algorithm runs. This is useful if your main sync processor draws something, and your async processor is doing some image analysis on the side.
You can also combine both functions.
Examples:
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAtTargetFps(10, () => {
'worklet'
console.log("I'm running at 10 FPS!")
})
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAsync(frame, () => {
'worklet'
console.log("I'm running on another Thread, I can block for longer!")
})
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAtTargetFps(10, () => {
'worklet'
runAsync(frame, () => {
'worklet'
console.log("I'm running on another Thread at 10 FPS, I can block for longer!")
})
})
}, [])
```
2023-02-15 08:47:09 -07:00
|
|
|
// Ref Management
|
|
|
|
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("isValid")));
|
|
|
|
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("incrementRefCount")));
|
|
|
|
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("decrementRefCount")));
|
2023-02-21 07:00:48 -07:00
|
|
|
// Skia
|
|
|
|
result.push_back(jsi::PropNameID::forUtf8(rt, std::string("render")));
|
|
|
|
|
|
|
|
if (canvas != nullptr) {
|
|
|
|
auto canvasPropNames = canvas->getPropertyNames(rt);
|
|
|
|
for (auto& prop : canvasPropNames) {
|
|
|
|
result.push_back(std::move(prop));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-06 06:11:55 -06:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2023-02-21 07:00:48 -07:00
|
|
|
SkRect inscribe(SkSize size, SkRect rect) {
|
|
|
|
auto halfWidthDelta = (rect.width() - size.width()) / 2.0;
|
|
|
|
auto halfHeightDelta = (rect.height() - size.height()) / 2.0;
|
|
|
|
return SkRect::MakeXYWH(rect.x() + halfWidthDelta,
|
|
|
|
rect.y() + halfHeightDelta, size.width(),
|
|
|
|
size.height());
|
|
|
|
}
|
|
|
|
|
2021-05-06 06:11:55 -06:00
|
|
|
jsi::Value FrameHostObject::get(jsi::Runtime& runtime, const jsi::PropNameID& propName) {
|
|
|
|
auto name = propName.utf8(runtime);
|
2021-06-01 05:07:57 -06:00
|
|
|
|
2021-05-06 06:11:55 -06:00
|
|
|
if (name == "toString") {
|
feat: Sync Frame Processors (plus `runAsync` and `runAtTargetFps`) (#1472)
Before, Frame Processors ran on a separate Thread.
After, Frame Processors run fully synchronous and always at the same FPS as the Camera.
Two new functions have been introduced:
* `runAtTargetFps(fps: number, func: () => void)`: Runs the given code as often as the given `fps`, effectively throttling it's calls.
* `runAsync(frame: Frame, func: () => void)`: Runs the given function on a separate Thread for Frame Processing. A strong reference to the Frame is held as long as the function takes to execute.
You can use `runAtTargetFps` to throttle calls to a specific API (e.g. if your Camera is running at 60 FPS, but you only want to run face detection at ~25 FPS, use `runAtTargetFps(25, ...)`.)
You can use `runAsync` to run a heavy algorithm asynchronous, so that the Camera is not blocked while your algorithm runs. This is useful if your main sync processor draws something, and your async processor is doing some image analysis on the side.
You can also combine both functions.
Examples:
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAtTargetFps(10, () => {
'worklet'
console.log("I'm running at 10 FPS!")
})
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAsync(frame, () => {
'worklet'
console.log("I'm running on another Thread, I can block for longer!")
})
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAtTargetFps(10, () => {
'worklet'
runAsync(frame, () => {
'worklet'
console.log("I'm running on another Thread at 10 FPS, I can block for longer!")
})
})
}, [])
```
2023-02-15 08:47:09 -07:00
|
|
|
auto toString = JSI_HOST_FUNCTION_LAMBDA {
|
2021-07-06 02:08:44 -06:00
|
|
|
if (this->frame == nil) {
|
|
|
|
return jsi::String::createFromUtf8(runtime, "[closed frame]");
|
|
|
|
}
|
2021-06-09 02:57:05 -06:00
|
|
|
auto imageBuffer = CMSampleBufferGetImageBuffer(frame.buffer);
|
2021-05-06 06:11:55 -06:00
|
|
|
auto width = CVPixelBufferGetWidth(imageBuffer);
|
|
|
|
auto height = CVPixelBufferGetHeight(imageBuffer);
|
2021-06-01 05:07:57 -06:00
|
|
|
|
2021-05-06 06:11:55 -06:00
|
|
|
NSMutableString* string = [NSMutableString stringWithFormat:@"%lu x %lu Frame", width, height];
|
|
|
|
return jsi::String::createFromUtf8(runtime, string.UTF8String);
|
|
|
|
};
|
|
|
|
return jsi::Function::createFromHostFunction(runtime, jsi::PropNameID::forUtf8(runtime, "toString"), 0, toString);
|
|
|
|
}
|
feat: Sync Frame Processors (plus `runAsync` and `runAtTargetFps`) (#1472)
Before, Frame Processors ran on a separate Thread.
After, Frame Processors run fully synchronous and always at the same FPS as the Camera.
Two new functions have been introduced:
* `runAtTargetFps(fps: number, func: () => void)`: Runs the given code as often as the given `fps`, effectively throttling it's calls.
* `runAsync(frame: Frame, func: () => void)`: Runs the given function on a separate Thread for Frame Processing. A strong reference to the Frame is held as long as the function takes to execute.
You can use `runAtTargetFps` to throttle calls to a specific API (e.g. if your Camera is running at 60 FPS, but you only want to run face detection at ~25 FPS, use `runAtTargetFps(25, ...)`.)
You can use `runAsync` to run a heavy algorithm asynchronous, so that the Camera is not blocked while your algorithm runs. This is useful if your main sync processor draws something, and your async processor is doing some image analysis on the side.
You can also combine both functions.
Examples:
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAtTargetFps(10, () => {
'worklet'
console.log("I'm running at 10 FPS!")
})
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAsync(frame, () => {
'worklet'
console.log("I'm running on another Thread, I can block for longer!")
})
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAtTargetFps(10, () => {
'worklet'
runAsync(frame, () => {
'worklet'
console.log("I'm running on another Thread at 10 FPS, I can block for longer!")
})
})
}, [])
```
2023-02-15 08:47:09 -07:00
|
|
|
if (name == "incrementRefCount") {
|
|
|
|
auto incrementRefCount = JSI_HOST_FUNCTION_LAMBDA {
|
|
|
|
// Increment retain count by one so ARC doesn't destroy the Frame Buffer.
|
|
|
|
CFRetain(frame.buffer);
|
2021-07-06 02:08:44 -06:00
|
|
|
return jsi::Value::undefined();
|
|
|
|
};
|
feat: Sync Frame Processors (plus `runAsync` and `runAtTargetFps`) (#1472)
Before, Frame Processors ran on a separate Thread.
After, Frame Processors run fully synchronous and always at the same FPS as the Camera.
Two new functions have been introduced:
* `runAtTargetFps(fps: number, func: () => void)`: Runs the given code as often as the given `fps`, effectively throttling it's calls.
* `runAsync(frame: Frame, func: () => void)`: Runs the given function on a separate Thread for Frame Processing. A strong reference to the Frame is held as long as the function takes to execute.
You can use `runAtTargetFps` to throttle calls to a specific API (e.g. if your Camera is running at 60 FPS, but you only want to run face detection at ~25 FPS, use `runAtTargetFps(25, ...)`.)
You can use `runAsync` to run a heavy algorithm asynchronous, so that the Camera is not blocked while your algorithm runs. This is useful if your main sync processor draws something, and your async processor is doing some image analysis on the side.
You can also combine both functions.
Examples:
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAtTargetFps(10, () => {
'worklet'
console.log("I'm running at 10 FPS!")
})
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAsync(frame, () => {
'worklet'
console.log("I'm running on another Thread, I can block for longer!")
})
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAtTargetFps(10, () => {
'worklet'
runAsync(frame, () => {
'worklet'
console.log("I'm running on another Thread at 10 FPS, I can block for longer!")
})
})
}, [])
```
2023-02-15 08:47:09 -07:00
|
|
|
return jsi::Function::createFromHostFunction(runtime,
|
|
|
|
jsi::PropNameID::forUtf8(runtime, "incrementRefCount"),
|
|
|
|
0,
|
|
|
|
incrementRefCount);
|
|
|
|
}
|
2023-02-21 07:00:48 -07:00
|
|
|
|
feat: Sync Frame Processors (plus `runAsync` and `runAtTargetFps`) (#1472)
Before, Frame Processors ran on a separate Thread.
After, Frame Processors run fully synchronous and always at the same FPS as the Camera.
Two new functions have been introduced:
* `runAtTargetFps(fps: number, func: () => void)`: Runs the given code as often as the given `fps`, effectively throttling it's calls.
* `runAsync(frame: Frame, func: () => void)`: Runs the given function on a separate Thread for Frame Processing. A strong reference to the Frame is held as long as the function takes to execute.
You can use `runAtTargetFps` to throttle calls to a specific API (e.g. if your Camera is running at 60 FPS, but you only want to run face detection at ~25 FPS, use `runAtTargetFps(25, ...)`.)
You can use `runAsync` to run a heavy algorithm asynchronous, so that the Camera is not blocked while your algorithm runs. This is useful if your main sync processor draws something, and your async processor is doing some image analysis on the side.
You can also combine both functions.
Examples:
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAtTargetFps(10, () => {
'worklet'
console.log("I'm running at 10 FPS!")
})
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAsync(frame, () => {
'worklet'
console.log("I'm running on another Thread, I can block for longer!")
})
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAtTargetFps(10, () => {
'worklet'
runAsync(frame, () => {
'worklet'
console.log("I'm running on another Thread at 10 FPS, I can block for longer!")
})
})
}, [])
```
2023-02-15 08:47:09 -07:00
|
|
|
if (name == "decrementRefCount") {
|
|
|
|
auto decrementRefCount = JSI_HOST_FUNCTION_LAMBDA {
|
|
|
|
// Decrement retain count by one. If the retain count is zero, ARC will destroy the Frame Buffer.
|
|
|
|
CFRelease(frame.buffer);
|
|
|
|
return jsi::Value::undefined();
|
|
|
|
};
|
|
|
|
return jsi::Function::createFromHostFunction(runtime,
|
|
|
|
jsi::PropNameID::forUtf8(runtime, "decrementRefCount"),
|
|
|
|
0,
|
|
|
|
decrementRefCount);
|
2021-07-06 02:08:44 -06:00
|
|
|
}
|
2023-02-21 07:00:48 -07:00
|
|
|
if (name == "render") {
|
|
|
|
auto render = JSI_HOST_FUNCTION_LAMBDA {
|
|
|
|
if (canvas == nullptr) {
|
|
|
|
throw jsi::JSError(runtime, "Trying to render a Frame without a Skia Canvas! Did you install Skia?");
|
|
|
|
}
|
|
|
|
|
|
|
|
// convert CMSampleBuffer to SkImage
|
|
|
|
auto context = canvas->getCanvas()->recordingContext();
|
|
|
|
auto image = SkImageHelpers::convertCMSampleBufferToSkImage(context, frame.buffer);
|
|
|
|
|
|
|
|
// draw SkImage
|
|
|
|
if (count > 0) {
|
|
|
|
// ..with paint/shader
|
|
|
|
auto paintHostObject = arguments[0].asObject(runtime).asHostObject<RNSkia::JsiSkPaint>(runtime);
|
|
|
|
auto paint = paintHostObject->getObject();
|
|
|
|
canvas->getCanvas()->drawImage(image, 0, 0, SkSamplingOptions(), paint.get());
|
|
|
|
} else {
|
|
|
|
// ..without paint/shader
|
|
|
|
canvas->getCanvas()->drawImage(image, 0, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
return jsi::Value::undefined();
|
|
|
|
};
|
|
|
|
return jsi::Function::createFromHostFunction(runtime, jsi::PropNameID::forUtf8(runtime, "render"), 1, render);
|
|
|
|
}
|
|
|
|
if (name == "toArrayBuffer") {
|
|
|
|
auto toArrayBuffer = JSI_HOST_FUNCTION_LAMBDA {
|
|
|
|
auto pixelBuffer = CMSampleBufferGetImageBuffer(frame.buffer);
|
|
|
|
auto bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer);
|
|
|
|
auto height = CVPixelBufferGetHeight(pixelBuffer);
|
|
|
|
auto buffer = (uint8_t*) CVPixelBufferGetBaseAddress(pixelBuffer);
|
|
|
|
auto arraySize = bytesPerRow * height;
|
|
|
|
|
|
|
|
static constexpr auto ARRAYBUFFER_CACHE_PROP_NAME = "__frameArrayBufferCache";
|
|
|
|
if (!runtime.global().hasProperty(runtime, ARRAYBUFFER_CACHE_PROP_NAME)) {
|
|
|
|
vision::TypedArray<vision::TypedArrayKind::Uint8ClampedArray> arrayBuffer(runtime, arraySize);
|
|
|
|
runtime.global().setProperty(runtime, ARRAYBUFFER_CACHE_PROP_NAME, arrayBuffer);
|
|
|
|
}
|
|
|
|
|
|
|
|
auto arrayBufferCache = runtime.global().getPropertyAsObject(runtime, ARRAYBUFFER_CACHE_PROP_NAME);
|
|
|
|
auto arrayBuffer = vision::getTypedArray(runtime, arrayBufferCache).get<vision::TypedArrayKind::Uint8ClampedArray>(runtime);
|
|
|
|
|
|
|
|
if (arrayBuffer.size(runtime) != arraySize) {
|
|
|
|
arrayBuffer = vision::TypedArray<vision::TypedArrayKind::Uint8ClampedArray>(runtime, arraySize);
|
|
|
|
runtime.global().setProperty(runtime, ARRAYBUFFER_CACHE_PROP_NAME, arrayBuffer);
|
|
|
|
}
|
|
|
|
|
|
|
|
arrayBuffer.updateUnsafe(runtime, buffer, arraySize);
|
|
|
|
|
|
|
|
return arrayBuffer;
|
|
|
|
};
|
|
|
|
return jsi::Function::createFromHostFunction(runtime, jsi::PropNameID::forUtf8(runtime, "toArrayBuffer"), 0, toArrayBuffer);
|
|
|
|
}
|
2021-06-01 05:07:57 -06:00
|
|
|
|
2021-05-06 06:11:55 -06:00
|
|
|
if (name == "isValid") {
|
feat: Sync Frame Processors (plus `runAsync` and `runAtTargetFps`) (#1472)
Before, Frame Processors ran on a separate Thread.
After, Frame Processors run fully synchronous and always at the same FPS as the Camera.
Two new functions have been introduced:
* `runAtTargetFps(fps: number, func: () => void)`: Runs the given code as often as the given `fps`, effectively throttling it's calls.
* `runAsync(frame: Frame, func: () => void)`: Runs the given function on a separate Thread for Frame Processing. A strong reference to the Frame is held as long as the function takes to execute.
You can use `runAtTargetFps` to throttle calls to a specific API (e.g. if your Camera is running at 60 FPS, but you only want to run face detection at ~25 FPS, use `runAtTargetFps(25, ...)`.)
You can use `runAsync` to run a heavy algorithm asynchronous, so that the Camera is not blocked while your algorithm runs. This is useful if your main sync processor draws something, and your async processor is doing some image analysis on the side.
You can also combine both functions.
Examples:
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAtTargetFps(10, () => {
'worklet'
console.log("I'm running at 10 FPS!")
})
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAsync(frame, () => {
'worklet'
console.log("I'm running on another Thread, I can block for longer!")
})
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAtTargetFps(10, () => {
'worklet'
runAsync(frame, () => {
'worklet'
console.log("I'm running on another Thread at 10 FPS, I can block for longer!")
})
})
}, [])
```
2023-02-15 08:47:09 -07:00
|
|
|
auto isValid = frame != nil && frame.buffer != nil && CFGetRetainCount(frame.buffer) > 0 && CMSampleBufferIsValid(frame.buffer);
|
2021-05-06 06:11:55 -06:00
|
|
|
return jsi::Value(isValid);
|
|
|
|
}
|
|
|
|
if (name == "width") {
|
2021-06-09 02:57:05 -06:00
|
|
|
auto imageBuffer = CMSampleBufferGetImageBuffer(frame.buffer);
|
2021-05-06 06:11:55 -06:00
|
|
|
auto width = CVPixelBufferGetWidth(imageBuffer);
|
|
|
|
return jsi::Value((double) width);
|
|
|
|
}
|
|
|
|
if (name == "height") {
|
2021-06-09 02:57:05 -06:00
|
|
|
auto imageBuffer = CMSampleBufferGetImageBuffer(frame.buffer);
|
2021-05-06 06:11:55 -06:00
|
|
|
auto height = CVPixelBufferGetHeight(imageBuffer);
|
|
|
|
return jsi::Value((double) height);
|
|
|
|
}
|
2023-02-21 07:00:48 -07:00
|
|
|
if (name == "orientation") {
|
|
|
|
switch (frame.orientation) {
|
|
|
|
case UIImageOrientationUp:
|
|
|
|
case UIImageOrientationUpMirrored:
|
|
|
|
return jsi::String::createFromUtf8(runtime, "portrait");
|
|
|
|
case UIImageOrientationDown:
|
|
|
|
case UIImageOrientationDownMirrored:
|
|
|
|
return jsi::String::createFromUtf8(runtime, "portraitUpsideDown");
|
|
|
|
case UIImageOrientationLeft:
|
|
|
|
case UIImageOrientationLeftMirrored:
|
|
|
|
return jsi::String::createFromUtf8(runtime, "landscapeLeft");
|
|
|
|
case UIImageOrientationRight:
|
|
|
|
case UIImageOrientationRightMirrored:
|
|
|
|
return jsi::String::createFromUtf8(runtime, "landscapeRight");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (name == "isMirrored") {
|
|
|
|
switch (frame.orientation) {
|
|
|
|
case UIImageOrientationUp:
|
|
|
|
case UIImageOrientationDown:
|
|
|
|
case UIImageOrientationLeft:
|
|
|
|
case UIImageOrientationRight:
|
|
|
|
return jsi::Value(false);
|
|
|
|
case UIImageOrientationDownMirrored:
|
|
|
|
case UIImageOrientationUpMirrored:
|
|
|
|
case UIImageOrientationLeftMirrored:
|
|
|
|
case UIImageOrientationRightMirrored:
|
|
|
|
return jsi::Value(true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (name == "timestamp") {
|
|
|
|
auto timestamp = CMSampleBufferGetPresentationTimeStamp(frame.buffer);
|
|
|
|
auto seconds = static_cast<double>(CMTimeGetSeconds(timestamp));
|
|
|
|
return jsi::Value(seconds * 1000.0);
|
|
|
|
}
|
2021-05-06 06:11:55 -06:00
|
|
|
if (name == "bytesPerRow") {
|
2021-06-09 02:57:05 -06:00
|
|
|
auto imageBuffer = CMSampleBufferGetImageBuffer(frame.buffer);
|
2021-12-31 08:59:05 -07:00
|
|
|
auto bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
|
2021-05-06 06:11:55 -06:00
|
|
|
return jsi::Value((double) bytesPerRow);
|
|
|
|
}
|
|
|
|
if (name == "planesCount") {
|
2021-06-09 02:57:05 -06:00
|
|
|
auto imageBuffer = CMSampleBufferGetImageBuffer(frame.buffer);
|
2021-05-06 06:11:55 -06:00
|
|
|
auto planesCount = CVPixelBufferGetPlaneCount(imageBuffer);
|
|
|
|
return jsi::Value((double) planesCount);
|
|
|
|
}
|
2021-06-01 05:07:57 -06:00
|
|
|
|
2023-02-21 07:00:48 -07:00
|
|
|
if (canvas != nullptr) {
|
|
|
|
// If we have a Canvas, try to access the property on there.
|
|
|
|
return canvas->get(runtime, propName);
|
|
|
|
}
|
|
|
|
|
feat: Sync Frame Processors (plus `runAsync` and `runAtTargetFps`) (#1472)
Before, Frame Processors ran on a separate Thread.
After, Frame Processors run fully synchronous and always at the same FPS as the Camera.
Two new functions have been introduced:
* `runAtTargetFps(fps: number, func: () => void)`: Runs the given code as often as the given `fps`, effectively throttling it's calls.
* `runAsync(frame: Frame, func: () => void)`: Runs the given function on a separate Thread for Frame Processing. A strong reference to the Frame is held as long as the function takes to execute.
You can use `runAtTargetFps` to throttle calls to a specific API (e.g. if your Camera is running at 60 FPS, but you only want to run face detection at ~25 FPS, use `runAtTargetFps(25, ...)`.)
You can use `runAsync` to run a heavy algorithm asynchronous, so that the Camera is not blocked while your algorithm runs. This is useful if your main sync processor draws something, and your async processor is doing some image analysis on the side.
You can also combine both functions.
Examples:
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAtTargetFps(10, () => {
'worklet'
console.log("I'm running at 10 FPS!")
})
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAsync(frame, () => {
'worklet'
console.log("I'm running on another Thread, I can block for longer!")
})
}, [])
```
```js
const frameProcessor = useFrameProcessor((frame) => {
'worklet'
console.log("I'm running at 60 FPS!")
runAtTargetFps(10, () => {
'worklet'
runAsync(frame, () => {
'worklet'
console.log("I'm running on another Thread at 10 FPS, I can block for longer!")
})
})
}, [])
```
2023-02-15 08:47:09 -07:00
|
|
|
// fallback to base implementation
|
|
|
|
return HostObject::get(runtime, propName);
|
2021-05-06 06:11:55 -06:00
|
|
|
}
|