diff --git a/configure.ac b/configure.ac index 229f71f6..97506b33 100644 --- a/configure.ac +++ b/configure.ac @@ -10,7 +10,7 @@ # Initialize autoconf. AC_PREREQ([2.69]) -AC_INIT([RidgeRun inference library],[0.4.1],[https://github.com/RidgeRun/r2inference/issues],[r2inference]) +AC_INIT([RidgeRun inference library],[0.4.2],[https://github.com/RidgeRun/r2inference/issues],[r2inference]) # Initialize our build utils RR_INIT diff --git a/r2i/tflite/engine.cc b/r2i/tflite/engine.cc index 3bdd99eb..65cce27c 100644 --- a/r2i/tflite/engine.cc +++ b/r2i/tflite/engine.cc @@ -136,6 +136,17 @@ const int Engine::GetAllowFP16 () { return this->allow_fp16; } +int64_t Engine::GetRequiredBufferSize (TfLiteIntArray *dims) { + int64_t size = 1; + + /* For each dimension, multiply the amount of entries */ + for (int dim = 0; dim < dims->size; ++dim) { + size *= dims->data[dim]; + } + + return size; +} + std::shared_ptr Engine::Predict (std::shared_ptr in_frame, r2i::RuntimeError &error) { ImageFormat in_format; @@ -201,7 +212,7 @@ std::shared_ptr Engine::Predict (std::shared_ptr int output = this->interpreter->outputs()[0]; TfLiteIntArray *output_dims = this->interpreter->tensor(output)->dims; - auto output_size = output_dims->data[output_dims->size - 1] * sizeof(float); + auto output_size = GetRequiredBufferSize(output_dims) * sizeof(float); auto *tensor_data = this->interpreter->typed_output_tensor(0); prediction->SetTensorValues(tensor_data, output_size); diff --git a/r2i/tflite/engine.h b/r2i/tflite/engine.h index 3fe5fa77..010e5204 100644 --- a/r2i/tflite/engine.h +++ b/r2i/tflite/engine.h @@ -37,6 +37,7 @@ class Engine : public IEngine { const int GetNumberOfThreads (); RuntimeError SetAllowFP16 (int allow_fp16); const int GetAllowFP16 (); + int64_t GetRequiredBufferSize (TfLiteIntArray *dims); ~Engine ();