Skip to content

Commit

Permalink
Merge branch 'ci/sync_gh_tflite-micro' into 'master'
Browse files Browse the repository at this point in the history
Sync esp-tflite-micro from github - 690793

See merge request app-frameworks/esp-tflite-micro!143
  • Loading branch information
vikramdattu committed Apr 15, 2024
2 parents 6a91f2c + 7326c15 commit 275becb
Show file tree
Hide file tree
Showing 12 changed files with 57 additions and 26 deletions.
1 change: 1 addition & 0 deletions .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ variables:
pre-commit-mr:
stage: pre-check
image: "$CI_DOCKER_REGISTRY/pre-commit-codecheck:1"
allow_failure: true
before_script:
- echo "Skip common before script"
script:
Expand Down
2 changes: 1 addition & 1 deletion signal/micro/kernels/irfft.cc
Original file line number Diff line number Diff line change
Expand Up @@ -227,4 +227,4 @@ TFLMRegistration* Register_IRFFT_INT32() {
}

} // namespace tflm_signal
} // namespace tflite
} // namespace tflite
16 changes: 9 additions & 7 deletions signal/micro/kernels/rfft.cc
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ struct TfLiteAudioFrontendRfftParams {
int32_t output_length;
TfLiteType fft_type;
T* work_area;
int scratch_buffer_index;
int8_t* state;
};

Expand All @@ -65,9 +66,6 @@ void* RfftInit(TfLiteContext* context, const char* buffer, size_t length) {
params->fft_length = fbw.ElementAsInt32(kFftLengthIndex);
params->fft_type = typeToTfLiteType<T>();

params->work_area = static_cast<T*>(context->AllocatePersistentBuffer(
context, params->fft_length * sizeof(T)));

size_t state_size = (*get_needed_memory_func)(params->fft_length);
params->state = static_cast<int8_t*>(
context->AllocatePersistentBuffer(context, state_size * sizeof(int8_t)));
Expand Down Expand Up @@ -103,6 +101,8 @@ TfLiteStatus RfftPrepare(TfLiteContext* context, TfLiteNode* node) {
params->output_length =
output_shape.Dims(output_shape.DimensionsCount() - 1) / 2;

context->RequestScratchBufferInArena(context, params->fft_length * sizeof(T),
&params->scratch_buffer_index);
micro_context->DeallocateTempTfLiteTensor(input);
micro_context->DeallocateTempTfLiteTensor(output);
return kTfLiteOk;
Expand All @@ -122,15 +122,17 @@ TfLiteStatus RfftEval(TfLiteContext* context, TfLiteNode* node) {
tflite::micro::GetEvalOutput(context, node, kOutputTensor);
Complex<T>* output_data = tflite::micro::GetTensorData<Complex<T>>(output);

T* work_area = static_cast<T*>(
context->GetScratchBuffer(context, params->scratch_buffer_index));

for (int input_idx = 0, output_idx = 0; input_idx < params->input_size;
input_idx += params->input_length, output_idx += params->output_length) {
memcpy(params->work_area, &input_data[input_idx],
sizeof(T) * params->input_length);
memcpy(work_area, &input_data[input_idx], sizeof(T) * params->input_length);
// Zero pad input to FFT length
memset(&params->work_area[params->input_length], 0,
memset(&work_area[params->input_length], 0,
sizeof(T) * (params->fft_length - params->input_length));

(*apply_func)(params->state, params->work_area, &output_data[output_idx]);
(*apply_func)(params->state, work_area, &output_data[output_idx]);
}
return kTfLiteOk;
}
Expand Down
3 changes: 3 additions & 0 deletions tensorflow/lite/core/api/flatbuffer_conversions.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1017,6 +1017,9 @@ TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type,
case TensorType_FLOAT16:
*type = kTfLiteFloat16;
return kTfLiteOk;
case TensorType_BFLOAT16:
*type = kTfLiteBFloat16;
return kTfLiteOk;
case TensorType_FLOAT32:
*type = kTfLiteFloat32;
return kTfLiteOk;
Expand Down
1 change: 1 addition & 0 deletions tensorflow/lite/core/c/c_api_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,7 @@ typedef enum {
kTfLiteUInt32 = 16,
kTfLiteUInt16 = 17,
kTfLiteInt4 = 18,
kTfLiteBFloat16 = 19,
} TfLiteType;

/// Legacy. Will be deprecated in favor of `TfLiteAffineQuantization`.
Expand Down
2 changes: 2 additions & 0 deletions tensorflow/lite/core/c/common.cc
Original file line number Diff line number Diff line change
Expand Up @@ -370,6 +370,8 @@ const char* TfLiteTypeGetName(TfLiteType type) {
return "STRING";
case kTfLiteFloat16:
return "FLOAT16";
case kTfLiteBFloat16:
return "BFLOAT16";
case kTfLiteFloat64:
return "FLOAT64";
case kTfLiteResource:
Expand Down
35 changes: 24 additions & 11 deletions tensorflow/lite/core/c/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -296,6 +296,13 @@ typedef struct TfLiteFloat16 {
uint16_t data;
} TfLiteFloat16;

/// bfloat16 data type compatible with the Google Brain definition.
/// https://cloud.google.com/tpu/docs/bfloat16.
/// This provides 1 bit of sign, 8 bits of exponent, and 7 bits of mantissa.
typedef struct TfLiteBFloat16 {
uint16_t data;
} TfLiteBFloat16;

/// Return the name of a given type, for error reporting purposes.
const char* TfLiteTypeGetName(TfLiteType type);

Expand Down Expand Up @@ -1007,11 +1014,17 @@ typedef struct TfLiteContext {
int subgraph_index);
} TfLiteContext;

/// `TfLiteRegistrationExternal` is an external version of `TfLiteRegistration`
/// `TfLiteOperator` is an external version of `TfLiteRegistration`
/// for C API which doesn't use internal types (such as `TfLiteContext`) but
/// only uses stable API types (such as `TfLiteOpaqueContext`). The purpose of
/// each field is the exactly the same as with `TfLiteRegistration`.
typedef struct TfLiteRegistrationExternal TfLiteRegistrationExternal;
typedef struct TfLiteOperator TfLiteOperator;

#ifndef DOXYGEN_SKIP
// For backwards compatibility.
// Deprecated. Use TfLiteOperator instead.
typedef TfLiteOperator TfLiteRegistrationExternal;
#endif

/// The valid values of the `inplace_operator` field in `TfLiteRegistration`.
/// This allow an op to signal to the runtime that the same data pointer
Expand Down Expand Up @@ -1078,7 +1091,7 @@ static const int kTfLiteMaxSharableOpInputs = 3;
/// It is a struct containing "methods" (C function pointers) that will be
/// invoked by the TF Lite runtime to evaluate instances of the operation.
///
/// See also `TfLiteRegistrationExternal` which is a more ABI-stable equivalent.
/// See also `TfLiteOperator` which is a more ABI-stable equivalent.
typedef struct TfLiteRegistration {
/// Initializes the op from serialized data.
/// Called only *once* for the lifetime of the op, so any one-time allocations
Expand Down Expand Up @@ -1149,12 +1162,12 @@ typedef struct TfLiteRegistration {
/// properly.
int version;

/// The external version of `TfLiteRegistration`. Since we can't use internal
/// types (such as `TfLiteContext`) for C API to maintain ABI stability.
/// C API user will provide `TfLiteRegistrationExternal` to implement custom
/// ops. We keep it inside of `TfLiteRegistration` and use it to route
/// callbacks properly.
TfLiteRegistrationExternal* registration_external;
/// The external (i.e. ABI-stable) version of `TfLiteRegistration`.
/// Since we can't use internal types (such as `TfLiteContext`) for C API to
/// maintain ABI stability. C API user will provide `TfLiteOperator` to
/// implement custom ops. We keep it inside of `TfLiteRegistration` and use
/// it to route callbacks properly.
TfLiteOperator* registration_external;

/// Retrieves asynchronous kernel.
///
Expand Down Expand Up @@ -1194,7 +1207,7 @@ typedef struct TfLiteRegistration_V3 {
int32_t builtin_code;
const char* custom_name;
int version;
TfLiteRegistrationExternal* registration_external;
TfLiteOperator* registration_external;
struct TfLiteAsyncKernel* (*async_kernel)(TfLiteContext* context,
TfLiteNode* node);
} TfLiteRegistration_V3;
Expand All @@ -1220,7 +1233,7 @@ typedef struct TfLiteRegistration_V2 {
int32_t builtin_code;
const char* custom_name;
int version;
TfLiteRegistrationExternal* registration_external;
TfLiteOperator* registration_external;
} TfLiteRegistration_V2;

/// \private
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/lite/micro/kernels/if.cc
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ TfLiteStatus IfPrepare(TfLiteContext* context, TfLiteNode* node) {
// passed to the branch subgraphs. Therefore, the number of subgraph inputs
// will be the number of node inputs - 1.
size_t num_inputs = node->inputs->size - 1;
size_t num_outputs = node->outputs->size;
size_t num_outputs = NumOutputs(node);

MicroGraph& graph_info = micro_context->graph();

Expand Down
1 change: 1 addition & 0 deletions tensorflow/lite/micro/kernels/kernel_util.cc
Original file line number Diff line number Diff line change
Expand Up @@ -252,6 +252,7 @@ TfLiteStatus CopySubgraphOutputsToOpOutputs(TfLiteContext* context,
TfLiteNode* node,
MicroGraph* graph_info,
int subgraph_idx) {
if (graph_info->NumSubgraphOutputs(subgraph_idx) == 0) return kTfLiteOk;
TF_LITE_ENSURE(context, static_cast<size_t>(node->outputs->size) ==
graph_info->NumSubgraphOutputs(subgraph_idx));
for (int i = 0; i < node->outputs->size; i++) {
Expand Down
3 changes: 3 additions & 0 deletions tensorflow/lite/micro/memory_helpers.cc
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,9 @@ TfLiteStatus TfLiteTypeSizeOf(TfLiteType type, size_t* size) {
case kTfLiteFloat16:
*size = sizeof(int16_t);
break;
case kTfLiteBFloat16:
*size = sizeof(int16_t);
break;
case kTfLiteFloat32:
*size = sizeof(float);
break;
Expand Down
4 changes: 3 additions & 1 deletion tensorflow/lite/micro/micro_interpreter_graph.cc
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,9 @@ TfLiteEvalTensor* MicroInterpreterGraph::GetSubgraphInput(int subgraph_idx,
}

size_t MicroInterpreterGraph::NumSubgraphOutputs(int subgraph_idx) {
return model_->subgraphs()->Get(subgraph_idx)->outputs()->size();
return model_->subgraphs()->Get(subgraph_idx)->outputs() == nullptr
? 0
: model_->subgraphs()->Get(subgraph_idx)->outputs()->size();
}

TfLiteEvalTensor* MicroInterpreterGraph::GetSubgraphOutput(int subgraph_idx,
Expand Down
13 changes: 8 additions & 5 deletions tensorflow/lite/schema/schema_generated.h
Original file line number Diff line number Diff line change
Expand Up @@ -686,11 +686,12 @@ enum TensorType : int8_t {
TensorType_UINT32 = 15,
TensorType_UINT16 = 16,
TensorType_INT4 = 17,
TensorType_BFLOAT16 = 18,
TensorType_MIN = TensorType_FLOAT32,
TensorType_MAX = TensorType_INT4
TensorType_MAX = TensorType_BFLOAT16
};

inline const TensorType (&EnumValuesTensorType())[18] {
inline const TensorType (&EnumValuesTensorType())[19] {
static const TensorType values[] = {
TensorType_FLOAT32,
TensorType_FLOAT16,
Expand All @@ -709,13 +710,14 @@ inline const TensorType (&EnumValuesTensorType())[18] {
TensorType_VARIANT,
TensorType_UINT32,
TensorType_UINT16,
TensorType_INT4
TensorType_INT4,
TensorType_BFLOAT16
};
return values;
}

inline const char * const *EnumNamesTensorType() {
static const char * const names[19] = {
static const char * const names[20] = {
"FLOAT32",
"FLOAT16",
"INT32",
Expand All @@ -734,13 +736,14 @@ inline const char * const *EnumNamesTensorType() {
"UINT32",
"UINT16",
"INT4",
"BFLOAT16",
nullptr
};
return names;
}

inline const char *EnumNameTensorType(TensorType e) {
if (::flatbuffers::IsOutRange(e, TensorType_FLOAT32, TensorType_INT4)) return "";
if (::flatbuffers::IsOutRange(e, TensorType_FLOAT32, TensorType_BFLOAT16)) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesTensorType()[index];
}
Expand Down

0 comments on commit 275becb

Please sign in to comment.