Skip to content

Commit

Permalink
Add quarter of frame mode for flatbuffers
Browse files Browse the repository at this point in the history
  • Loading branch information
awawa-dev committed Sep 23, 2024
1 parent 8f48dce commit 6d37f16
Show file tree
Hide file tree
Showing 10 changed files with 69 additions and 48 deletions.
1 change: 1 addition & 0 deletions include/flatbuffers/server/FlatBuffersServer.h
Original file line number Diff line number Diff line change
Expand Up @@ -71,4 +71,5 @@ private slots:
QString _userLutFile;
PixelFormat _currentLutPixelFormat;
int _flatbufferToneMappingMode;
bool _quarterOfFrameMode;
};
2 changes: 1 addition & 1 deletion include/utils/FrameDecoder.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ class FrameDecoder
const PixelFormat pixelFormat, const uint8_t* lutBuffer, Image<ColorRgb>& outputImage);

static void processQImage(
const uint8_t* data, int width, int height, int lineLength,
const uint8_t* data, const uint8_t* dataUV, int width, int height, int lineLength,
const PixelFormat pixelFormat, const uint8_t* lutBuffer, Image<ColorRgb>& outputImage);

static void processSystemImageBGRA(Image<ColorRgb>& image, int targetSizeX, int targetSizeY,
Expand Down
19 changes: 6 additions & 13 deletions sources/base/schema/schema-flatbufServer.json
Original file line number Diff line number Diff line change
Expand Up @@ -42,21 +42,14 @@
"default" : false,
"propertyOrder" : 4
},
"hdrToneMappingMode" :
"quarterOfFrameMode" :
{
"type" : "integer",
"title" : "edt_conf_fbs_hdrToneMappingMode_title",
"append" : "edt_append_mode",
"enum" : [1, 2],
"default" : 1,
"type" : "boolean",
"format": "checkbox",
"required" : true,
"propertyOrder" : 5,
"options": {
"enum_titles": ["Fullscreen", "Light (border only)"],
"dependencies": {
"hdrToneMapping": true
}
}
"title" : "flatbuffers_nv12_quarter_of_frame_title",
"default" : false,
"propertyOrder" : 5
}
},
"additionalProperties" : false
Expand Down
45 changes: 33 additions & 12 deletions sources/flatbuffers/server/FlatBuffersServer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ FlatBuffersServer::FlatBuffersServer(std::shared_ptr<NetOrigin> netOrigin, const
, _userLutFile("")
, _currentLutPixelFormat(PixelFormat::RGB24)
, _flatbufferToneMappingMode(0)
, _quarterOfFrameMode(false)
{
connect(GlobalSignals::getInstance(), &GlobalSignals::SignalSetLut, this, &FlatBuffersServer::signalSetLutHandler, Qt::BlockingQueuedConnection);
}
Expand Down Expand Up @@ -70,6 +71,8 @@ void FlatBuffersServer::signalRequestSourceHandler(hyperhdr::Components componen
{
_hdrToneMappingEnabled = (listen) ? _flatbufferToneMappingMode : 0;

Info(_log, "Tone mapping: %i", _hdrToneMappingEnabled);

if (_hdrToneMappingEnabled || _currentLutPixelFormat == PixelFormat::YUYV)
loadLutFile();
else
Expand Down Expand Up @@ -113,14 +116,19 @@ void FlatBuffersServer::handleSettingsUpdate(settings::type type, const QJsonDoc
}

// HDR tone mapping
_flatbufferToneMappingMode = obj["hdrToneMapping"].toBool(false) ? obj["hdrToneMappingMode"].toInt(1) : 0;
_flatbufferToneMappingMode = obj["hdrToneMapping"].toBool(false) ? 1 : 0;

signalRequestSourceHandler(hyperhdr::Components::COMP_HDR, -1, _flatbufferToneMappingMode);

// new timeout just for new connections
_timeout = obj["timeout"].toInt(5000);
// enable check
obj["enable"].toBool(true) ? startServer() : stopServer();

_quarterOfFrameMode = obj["quarterOfFrameMode"].toBool(false);

Info(_log, "Tone mapping: %i", _flatbufferToneMappingMode);
Info(_log, "NV12 quarter of frame mode: %i", _quarterOfFrameMode);
}
}

Expand Down Expand Up @@ -294,15 +302,20 @@ void FlatBuffersServer::handlerImageReceived(int priority, FlatBuffersParser::Fl

if (flatImage->format == FlatBuffersParser::FLATBUFFERS_IMAGE_FORMAT::RGB)
{
if (_currentLutPixelFormat != PixelFormat::RGB24 && _hdrToneMappingEnabled)
if (_currentLutPixelFormat != PixelFormat::RGB24)
{
_currentLutPixelFormat = PixelFormat::RGB24;
loadLutFile();
_currentLutPixelFormat = PixelFormat::RGB24;
if (_hdrToneMappingEnabled)
{
loadLutFile();
}

Debug(_log, "Received first RGB frame. Image size: %i (%i x %i)", flatImage->size, flatImage->width, flatImage->height);
}

if (flatImage->size != flatImage->width * flatImage->height * 3)
if (flatImage->size != flatImage->width * flatImage->height * 3 || flatImage->size == 0)
{
Error(_log, "The RGB image data size does not match the width and height");
Error(_log, "The RGB image data size does not match the width and height or it's empty. Image size: %i (%i x %i)", flatImage->size, flatImage->width, flatImage->height);
}
else
{
Expand Down Expand Up @@ -332,9 +345,9 @@ void FlatBuffersServer::handlerImageReceived(int priority, FlatBuffersParser::Fl
{
Error(_log, "The LUT file is not loaded");
}
else if (flatImage->size != ((flatImage->width * flatImage->height * 3) / 2))
else if (flatImage->size != ((flatImage->width * flatImage->height * 3) / 2) || flatImage->size == 0)
{
Error(_log, "The NV12 image data size (%i) does not match the width and height (%i)", flatImage->size, ((flatImage->width * flatImage->height * 3) / 2));
Error(_log, "The NV12 image data size (%i) does not match the width and height (%i) or it's empty", flatImage->size, ((flatImage->width * flatImage->height * 3) / 2));
}
else if ((flatImage->firstPlane.stride != flatImage->secondPlane.stride) ||
(flatImage->firstPlane.stride != 0 && flatImage->firstPlane.stride != flatImage->width))
Expand All @@ -345,10 +358,18 @@ void FlatBuffersServer::handlerImageReceived(int priority, FlatBuffersParser::Fl
else
{
Image<ColorRgb> image(flatImage->width, flatImage->height);

FrameDecoder::processImage(
0, 0, 0, 0,
flatImage->firstPlane.data, flatImage->secondPlane.data, flatImage->width, flatImage->height, flatImage->width, PixelFormat::NV12, _lut.data(), image);

if (_quarterOfFrameMode)
{
FrameDecoder::processQImage(
flatImage->firstPlane.data, flatImage->secondPlane.data, flatImage->width, flatImage->height, flatImage->width, PixelFormat::NV12, _lut.data(), image);
}
else
{
FrameDecoder::processImage(
0, 0, 0, 0,
flatImage->firstPlane.data, flatImage->secondPlane.data, flatImage->width, flatImage->height, flatImage->width, PixelFormat::NV12, _lut.data(), image);
}
emit GlobalSignals::getInstance()->SignalSetGlobalImage(priority, image, timeout_ms, origin, clientDescription);
}
}
Expand Down
2 changes: 1 addition & 1 deletion sources/grabber/linux/v4l2/V4L2Worker.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ void V4L2Worker::runMe()
{
Image<ColorRgb> image(_width >> 1, _height >> 1);
FrameDecoder::processQImage(
_sharedData, _width, _height, _lineLength, _pixelFormat, _lutBuffer, image);
_sharedData, nullptr, _width, _height, _lineLength, _pixelFormat, _lutBuffer, image);

image.setBufferCacheSize();
if (!_directAccess)
Expand Down
2 changes: 1 addition & 1 deletion sources/grabber/osx/AVF/AVFWorker.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ void AVFWorker::runMe()
{
Image<ColorRgb> image(_width >> 1, _height >> 1);
FrameDecoder::processQImage(
_localBuffer.data(), _width, _height, _lineLength, _pixelFormat, _lutBuffer, image);
_localBuffer.data(), nullptr, _width, _height, _lineLength, _pixelFormat, _lutBuffer, image);

image.setBufferCacheSize();
if (!_directAccess)
Expand Down
2 changes: 1 addition & 1 deletion sources/grabber/windows/MF/MFWorker.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ void MFWorker::runMe()
{
Image<ColorRgb> image(_width >> 1, _height >> 1);
FrameDecoder::processQImage(
_localBuffer.data(), _width, _height, _lineLength, _pixelFormat, _lutBuffer, image);
_localBuffer.data(), nullptr, _width, _height, _lineLength, _pixelFormat, _lutBuffer, image);

image.setBufferCacheSize();
if (!_directAccess)
Expand Down
6 changes: 3 additions & 3 deletions sources/utils/FrameDecoder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,7 @@ void FrameDecoder::processImage(
}

void FrameDecoder::processQImage(
const uint8_t* data, int width, int height, int lineLength,
const uint8_t* data, const uint8_t* dataUV, int width, int height, int lineLength,
const PixelFormat pixelFormat, const uint8_t* lutBuffer, Image<ColorRgb>& outputImage)
{
uint32_t ind_lutd;
Expand Down Expand Up @@ -429,13 +429,13 @@ void FrameDecoder::processQImage(

if (pixelFormat == PixelFormat::NV12)
{
int deltaU = lineLength * height;
uint8_t* deltaUV = (dataUV != nullptr) ? (uint8_t*)dataUV : (uint8_t*)data + lineLength * height;
for (int yDest = 0, ySource = 0; yDest < outputHeight; ySource += 2, ++yDest)
{
uint8_t* currentDest = destMemory + ((uint64_t)destLineSize) * yDest;
uint8_t* endDest = currentDest + destLineSize;
uint8_t* currentSource = (uint8_t*)data + (((uint64_t)lineLength * ySource));
uint8_t* currentSourceU = (uint8_t*)data + deltaU + (((uint64_t)ySource / 2) * lineLength);
uint8_t* currentSourceU = deltaUV + (((uint64_t)ySource / 2) * lineLength);

while (currentDest < endDest)
{
Expand Down
4 changes: 3 additions & 1 deletion www/i18n/en.json
Original file line number Diff line number Diff line change
Expand Up @@ -1248,5 +1248,7 @@
"option_calibration_video" : "Calibration using a test video played by your favorite video player.<br/>We calibrate LUT taking into account the grabber, player and your TV.",
"option_calibration_classic" : "Calibration using Windows with HDR mode enabled and a web browser.<br/>We calibrate LUT taking into account the grabber and your TV.",
"video_calibration_overview" : "<b>1</b> You need to set the video format of your grabber to MJPEG/YUYV/NV12. Other formats are not supported.<br/><br/><b>2</b> If you calibrate using Flatbuffers, you need to enable tone mapping in its settings. Only the NV12 video format is supported.</br><br/><b>3</b> You can download test files here: <a href='https://github.com/awawa-dev/awawa-dev.github.io/tree/master/calibration'>link</a>. In your player, start playing the test file. You should see it in the HyperHDR video preview. The test screen must take up the entire screen and no extraneous elements, such as the player menu, can be visible.</br><br/><b>4</b> For calibration, you should choose a file with 'hdr' in the name unless your system or player automatically uses SDR to HDR tone mapping. In that case, to adapt to such a scenario, choose a file with 'sdr' in the name.</br><br/><b>5</b> The YUV420 format provides the greatest compatibility with average quality and is the most common. The YUV444 format provides the best quality but it is rare to find materials encoded in this form.",
"chk_calibration_debug" : "Debug"
"chk_calibration_debug" : "Debug",
"flatbuffers_nv12_quarter_of_frame_title": "Quarter of frame for NV12",
"flatbuffers_nv12_quarter_of_frame_expl": "The NV12 codec contains four times more information about brightness than about color. This option allows you to reduce CPU load by reducing the height and width of the video frame by 2 without losing color information."
}
34 changes: 19 additions & 15 deletions www/js/grabber_benchmark.js
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,7 @@ $(document).ready( function(){
{
internalLatency = ((new Date()).getTime() - internalLatency)/indexer;

ctx.fillStyle = "yellow";
ctx.fillStyle = "#FFFF00";
ctx.fillRect(0, 0, canvas.width, canvas.height);

await sleep(1000);
Expand All @@ -247,7 +247,7 @@ $(document).ready( function(){

whiteTimer = (new Date()).getTime();

ctx.fillStyle = "white";
ctx.fillStyle = "#FFFFFF";
ctx.fillRect(0, 0, canvas.width, canvas.height);

requestBenchmark(mode, ++whiteIndexer);
Expand All @@ -259,7 +259,7 @@ $(document).ready( function(){

redTimer = (new Date()).getTime();

ctx.fillStyle = "red";
ctx.fillStyle = "#FF0000";
ctx.fillRect(0, 0, canvas.width, canvas.height);

requestBenchmark(mode, ++redIndexer);
Expand All @@ -271,7 +271,7 @@ $(document).ready( function(){

greenTimer = (new Date()).getTime();

ctx.fillStyle = "green";
ctx.fillStyle = "#00FF00";
ctx.fillRect(0, 0, canvas.width, canvas.height);

requestBenchmark(mode, ++greenIndexer);
Expand All @@ -283,7 +283,7 @@ $(document).ready( function(){

blueTimer = (new Date()).getTime();

ctx.fillStyle = "blue";
ctx.fillStyle = "#0000FF";
ctx.fillRect(0, 0, canvas.width, canvas.height);

requestBenchmark(mode, ++blueIndexer);
Expand All @@ -295,7 +295,7 @@ $(document).ready( function(){

blackTimer = (new Date()).getTime();

ctx.fillStyle = "black";
ctx.fillStyle = "#000000";
ctx.fillRect(0, 0, canvas.width, canvas.height);

requestBenchmark(mode, ++blackIndexer);
Expand All @@ -312,15 +312,19 @@ $(document).ready( function(){
totalDelay = ((new Date()).getTime() - totalDelay);

let finalDelay = Math.max(( totalDelay / total ) - internalLatency, 0);
$("#logmessages").append("<code class='db_info'>"+$.i18n("dashboard_current_video_device")+": "+window.serverInfo.grabberstate.device+"</code><br/>");
$("#logmessages").append("<code class='db_info'>"+$.i18n("dashboard_current_video_mode")+": "+window.serverInfo.grabberstate.videoMode+"</code><br/>");

var mode1 = window.serverInfo.grabberstate.videoMode.split(' ');
if (mode1.length == 2)

if (window.serverInfo != null && window.serverInfo.grabberstate != null && window.serverInfo.grabberstate.device != null && window.serverInfo.grabberstate.videoMode != null)
{
var mode2 = mode1[0].split('x');
if (mode2.length == 3 && Number(mode2[2]) > 0)
$("#logmessages").append("<code class='db_info'>"+$.i18n("benchmark_exp_delay")+": "+(1000/Number(mode2[2])).toFixed(2)+"ms</code><br/>");
$("#logmessages").append("<code class='db_info'>"+$.i18n("dashboard_current_video_device")+": "+window.serverInfo.grabberstate.device+"</code><br/>");
$("#logmessages").append("<code class='db_info'>"+$.i18n("dashboard_current_video_mode")+": "+window.serverInfo.grabberstate.videoMode+"</code><br/>");

var mode1 = window.serverInfo.grabberstate.videoMode.split(' ');
if (mode1.length == 2)
{
var mode2 = mode1[0].split('x');
if (mode2.length == 3 && Number(mode2[2]) > 0)
$("#logmessages").append("<code class='db_info'>"+$.i18n("benchmark_exp_delay")+": "+(1000/Number(mode2[2])).toFixed(2)+"ms</code><br/>");
}
}

$("#logmessages").append("<code class='db_info'>"+$.i18n("benchmark_av_delay")+": "+Number((finalDelay).toFixed(2))+"ms</code><br/>");
Expand All @@ -334,4 +338,4 @@ $(document).ready( function(){
}, 800);
};

});
});

0 comments on commit 6d37f16

Please sign in to comment.