Skip to content

Commit

Permalink
Merge branch 'ci/sync_gh_tflite-micro' into 'master'
Browse files Browse the repository at this point in the history
Sync esp-tflite-micro from github - 638261

See merge request app-frameworks/esp-tflite-micro!134
  • Loading branch information
vikramdattu committed Jan 15, 2024
2 parents bebe1a8 + 0a9143d commit 5be29f4
Show file tree
Hide file tree
Showing 12 changed files with 72 additions and 395 deletions.
91 changes: 3 additions & 88 deletions tensorflow/lite/micro/kernels/batch_to_space_nd.cc
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Expand All @@ -15,10 +15,7 @@ limitations under the License.

#include "tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h"

#include <algorithm>

#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/internal/runtime_shape.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
Expand All @@ -41,68 +38,6 @@ constexpr int kOutputTensor = 0;
const int kInputOutputMinDimensionNum = 3;
const int kInputOutputMaxDimensionNum = 4;

TfLiteStatus ReshapeOutputTensor(TfLiteContext* context, const TfLiteNode* node,
const TfLiteTensor* input,
const TfLiteTensor* block_shape,
const TfLiteTensor* crops,
TfLiteTensor* output) {
TF_LITE_ENSURE(context, IsConstantOrPersistentTensor(block_shape));
TF_LITE_ENSURE(context, IsConstantOrPersistentTensor(crops));
const int32_t* block_shape_data = GetTensorData<int32_t>(block_shape);
const int32_t* crops_data = GetTensorData<int32_t>(crops);

TfLiteIntArray* input_dims = input->dims;
int spatial_dims_num = input_dims->size - 2;
// Block_shape should be a 1D tensor with dimension [spatial_dims_num].
TF_LITE_ENSURE_EQ(context, NumDimensions(block_shape), 1);
TF_LITE_ENSURE_EQ(context, block_shape->dims->data[0], spatial_dims_num);
// Crops should be a 2D tensor with dimension [spatial_dims_num, 2].
TF_LITE_ENSURE_EQ(context, NumDimensions(crops), 2);
TF_LITE_ENSURE_EQ(context, crops->dims->data[0], spatial_dims_num);
TF_LITE_ENSURE_EQ(context, crops->dims->data[1], 2);

for (int i = 0; i < spatial_dims_num * 2; ++i) {
TF_LITE_ENSURE(context, crops_data[i] >= 0);
}

// copy from input tensor as per TfLite code
TF_LITE_ENSURE_EQ(context, input_dims->size, output->dims->size);
RuntimeShape output_shape = GetTensorShape(input);
// keep a copy of the output tensor shape for later comparison
RuntimeShape old_output_shape = GetTensorShape(output);

int output_batch_size = input_dims->data[0];
for (int dim = 0; dim < spatial_dims_num; ++dim) {
// Number of batch must be multiple of (block_shape[dim]).
TF_LITE_ENSURE(context, block_shape_data[dim] != 0);
TF_LITE_ENSURE_EQ(context, output_batch_size % block_shape_data[dim], 0);
output_batch_size = output_batch_size / block_shape_data[dim];
output_shape.SetDim(dim + 1,
input_dims->data[dim + 1] * block_shape_data[dim] -
crops_data[dim * 2] - crops_data[dim * 2 + 1]);
}
output_shape.SetDim(0, output_batch_size);
output_shape.SetDim(input_dims->size - 1,
input_dims->data[input_dims->size - 1]);

// check if need to relocate output tensor dims
if (output_shape == old_output_shape) {
return kTfLiteOk;
}
TF_LITE_ENSURE(context,
output_shape.FlatSize() <= old_output_shape.FlatSize());

// set the output tensor dims from output_shape
TfLiteEvalTensor* output_eval =
tflite::micro::GetEvalOutput(context, node, kOutputTensor);
TF_LITE_ENSURE_STATUS(tflite::micro::CreateWritableTensorDimsWithCopy(
context, output, output_eval));
std::copy_n(output_shape.DimsData(), output_shape.DimensionsCount(),
output->dims->data);

return kTfLiteOk;
}

TfLiteStatus BatchToSpaceNDPrepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
Expand All @@ -111,40 +46,20 @@ TfLiteStatus BatchToSpaceNDPrepare(TfLiteContext* context, TfLiteNode* node) {

TfLiteTensor* input =
micro_context->AllocateTempInputTensor(node, kInputTensor);
TF_LITE_ENSURE(context, input != nullptr);
TfLiteTensor* block_shape =
micro_context->AllocateTempInputTensor(node, kBlockShapeTensor);
TF_LITE_ENSURE(context, block_shape != nullptr);
TfLiteTensor* crops =
micro_context->AllocateTempInputTensor(node, kCropsTensor);
TF_LITE_ENSURE(context, crops != nullptr);
TfLiteTensor* output =
micro_context->AllocateTempOutputTensor(node, kOutputTensor);
TF_LITE_ENSURE(context, output != nullptr);
TF_LITE_ENSURE(context, input != nullptr && output != nullptr);

TF_LITE_ENSURE(context, NumDimensions(input) >= kInputOutputMinDimensionNum);
TF_LITE_ENSURE(context, NumDimensions(output) >= kInputOutputMinDimensionNum);
TF_LITE_ENSURE(context, NumDimensions(input) <= kInputOutputMaxDimensionNum);
TF_LITE_ENSURE(context, NumDimensions(output) <= kInputOutputMaxDimensionNum);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
TF_LITE_ENSURE(context,
input->type == kTfLiteFloat32 || input->type == kTfLiteInt8);

if (input->type == kTfLiteInt8) {
TF_LITE_ENSURE(context, input->params.scale == output->params.scale);
TF_LITE_ENSURE(context,
input->params.zero_point == output->params.zero_point);
}

TfLiteStatus status =
ReshapeOutputTensor(context, node, input, block_shape, crops, output);

micro_context->DeallocateTempTfLiteTensor(input);
micro_context->DeallocateTempTfLiteTensor(block_shape);
micro_context->DeallocateTempTfLiteTensor(crops);
micro_context->DeallocateTempTfLiteTensor(output);

return status;
return kTfLiteOk;
}

TfLiteStatus BatchToSpaceNDEval(TfLiteContext* context, TfLiteNode* node) {
Expand Down
14 changes: 2 additions & 12 deletions tensorflow/lite/micro/kernels/conv.h
Original file line number Diff line number Diff line change
Expand Up @@ -70,20 +70,10 @@ ConvParams ConvParamsQuantized(const TfLiteConvParams& params,
TfLiteStatus CalculateOpDataConv(TfLiteContext* context, TfLiteNode* node,
const TfLiteConvParams& params, int width,
int height, int filter_width,
int filter_height, int* out_width,
int* out_height, const TfLiteType data_type,
int filter_height, int out_width,
int out_height, const TfLiteType data_type,
OpDataConv* data);

// When this method is called, the output tensor shape is computed and
// relocated to persistent arena memory.
// The height and width parameters should be the computed results from
// CalculateOpDataConv.
TfLiteStatus ConvReshapeOutputTensor(TfLiteContext* context, TfLiteNode* node,
const TfLiteTensor* input,
const TfLiteTensor* filter,
TfLiteTensor* output, int height,
int width);

void* ConvInit(TfLiteContext* context, const char* buffer, size_t length);

TfLiteStatus ConvPrepare(TfLiteContext* context, TfLiteNode* node);
Expand Down
49 changes: 6 additions & 43 deletions tensorflow/lite/micro/kernels/conv_common.cc
Original file line number Diff line number Diff line change
Expand Up @@ -79,8 +79,8 @@ void* ConvInit(TfLiteContext* context, const char* buffer, size_t length) {
TfLiteStatus CalculateOpDataConv(TfLiteContext* context, TfLiteNode* node,
const TfLiteConvParams& params, int width,
int height, int filter_width,
int filter_height, int* out_width,
int* out_height, const TfLiteType data_type,
int filter_height, int out_width,
int out_height, const TfLiteType data_type,
OpDataConv* data) {
bool has_bias = node->inputs->size == 3;
// Check number of inputs/outputs
Expand All @@ -92,7 +92,7 @@ TfLiteStatus CalculateOpDataConv(TfLiteContext* context, TfLiteNode* node,
data->padding = ComputePaddingHeightWidth(
params.stride_height, params.stride_width, params.dilation_height_factor,
params.dilation_width_factor, height, width, filter_height, filter_width,
padding, out_height, out_width);
padding, &out_height, &out_width);

MicroContext* micro_context = GetMicroContext(context);

Expand Down Expand Up @@ -135,28 +135,6 @@ TfLiteStatus CalculateOpDataConv(TfLiteContext* context, TfLiteNode* node,
return kTfLiteOk;
}

TfLiteStatus ConvReshapeOutputTensor(TfLiteContext* context, TfLiteNode* node,
const TfLiteTensor* input,
const TfLiteTensor* filter,
TfLiteTensor* output, int height,
int width) {
const int filter_output_channels = filter->dims->data[0];
const int batches = input->dims->data[0];

// relocate output tensor dims so they can be updated
TfLiteEvalTensor* output_eval =
tflite::micro::GetEvalOutput(context, node, kConvOutputTensor);
TF_LITE_ENSURE_STATUS(tflite::micro::CreateWritableTensorDimsWithCopy(
context, output, output_eval));

output->dims->data[0] = batches;
output->dims->data[1] = height;
output->dims->data[2] = width;
output->dims->data[3] = filter_output_channels;

return kTfLiteOk;
}

TfLiteStatus ConvPrepare(TfLiteContext* context, TfLiteNode* node) {
TFLITE_DCHECK(node->user_data != nullptr);
TFLITE_DCHECK(node->builtin_data != nullptr);
Expand Down Expand Up @@ -185,23 +163,12 @@ TfLiteStatus ConvPrepare(TfLiteContext* context, TfLiteNode* node) {
(filter->type == kTfLiteInt4 || filter->type == kTfLiteInt8)),
"Hybrid models are not supported on TFLite Micro.");

// Check dimensionality of input, filter, output
TF_LITE_ENSURE_EQ(context, input->dims->size, 4);
TF_LITE_ENSURE_EQ(context, filter->dims->size, 4);
TF_LITE_ENSURE_EQ(context, output->dims->size, 4);

// Check input channels matching filter
const int input_channels = input->dims->data[3];
const int filter_input_channels = filter->dims->data[3];
TF_LITE_ENSURE(context, filter_input_channels > 0);
TF_LITE_ENSURE_EQ(context, input_channels % filter_input_channels, 0);

const int input_width = input->dims->data[2];
const int input_height = input->dims->data[1];
const int filter_width = filter->dims->data[2];
const int filter_height = filter->dims->data[1];
int output_width = 0;
int output_height = 0;
const int output_width = output->dims->data[2];
const int output_height = output->dims->data[1];

// Dynamically allocate per-channel quantization parameters.
const int num_channels = filter->dims->data[kConvQuantizedDimension];
Expand Down Expand Up @@ -231,11 +198,7 @@ TfLiteStatus ConvPrepare(TfLiteContext* context, TfLiteNode* node) {

TF_LITE_ENSURE_STATUS(CalculateOpDataConv(
context, node, params, input_width, input_height, filter_width,
filter_height, &output_width, &output_height, input->type, data));

// compute output tensor shape and relocate shape data
TF_LITE_ENSURE_STATUS(ConvReshapeOutputTensor(
context, node, input, filter, output, output_height, output_width));
filter_height, output_width, output_height, input->type, data));

if (filter->type == kTfLiteInt4) {
int filter_size =
Expand Down
10 changes: 1 addition & 9 deletions tensorflow/lite/micro/kernels/depthwise_conv.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,17 +44,9 @@ DepthwiseParams DepthwiseConvParamsQuantized(
TfLiteStatus CalculateOpDataDepthwiseConv(
TfLiteContext* context, TfLiteNode* node,
const TfLiteDepthwiseConvParams& params, int width, int height,
int filter_width, int filter_height, int* out_width, int* out_height,
int filter_width, int filter_height, int out_width, int out_height,
const TfLiteType data_type, OpDataConv* data);

// When this method is called, the output tensor shape is computed and
// relocated to persistent arena memory.
// The height and width parameters should be the computed results from
// CalculateOpDataConv.
TfLiteStatus DepthwiseConvReshapeOutputTensor(
TfLiteContext* context, TfLiteNode* node, const TfLiteTensor* input,
const TfLiteTensor* filter, TfLiteTensor* output, int height, int width);

TfLiteStatus DepthwiseConvPrepare(TfLiteContext* context, TfLiteNode* node);

// This is the most generic TFLMRegistration. The actual supported types
Expand Down
52 changes: 6 additions & 46 deletions tensorflow/lite/micro/kernels/depthwise_conv_common.cc
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -80,7 +80,7 @@ DepthwiseParams DepthwiseConvParamsQuantized(
TfLiteStatus CalculateOpDataDepthwiseConv(
TfLiteContext* context, TfLiteNode* node,
const TfLiteDepthwiseConvParams& params, int width, int height,
int filter_width, int filter_height, int* out_width, int* out_height,
int filter_width, int filter_height, int out_width, int out_height,
const TfLiteType data_type, OpDataConv* data) {
bool has_bias = node->inputs->size == 3;
// Check number of inputs/outputs
Expand All @@ -92,7 +92,7 @@ TfLiteStatus CalculateOpDataDepthwiseConv(
data->padding = ComputePaddingHeightWidth(
params.stride_height, params.stride_width, params.dilation_height_factor,
params.dilation_width_factor, height, width, filter_height, filter_width,
padding, out_height, out_width);
padding, &out_height, &out_width);

MicroContext* micro_context = GetMicroContext(context);

Expand Down Expand Up @@ -133,26 +133,6 @@ TfLiteStatus CalculateOpDataDepthwiseConv(
return kTfLiteOk;
}

TfLiteStatus DepthwiseConvReshapeOutputTensor(
TfLiteContext* context, TfLiteNode* node, const TfLiteTensor* input,
const TfLiteTensor* filter, TfLiteTensor* output, int height, int width) {
const int filter_output_channels = filter->dims->data[3];
const int batches = input->dims->data[0];

// relocate output tensor dims so they can be updated
TfLiteEvalTensor* output_eval =
tflite::micro::GetEvalOutput(context, node, kConvOutputTensor);
TF_LITE_ENSURE_STATUS(tflite::micro::CreateWritableTensorDimsWithCopy(
context, output, output_eval));

output->dims->data[0] = batches;
output->dims->data[1] = height;
output->dims->data[2] = width;
output->dims->data[3] = filter_output_channels;

return kTfLiteOk;
}

TfLiteStatus DepthwiseConvPrepare(TfLiteContext* context, TfLiteNode* node) {
TFLITE_DCHECK(node->user_data != nullptr);
TFLITE_DCHECK(node->builtin_data != nullptr);
Expand All @@ -172,28 +152,12 @@ TfLiteStatus DepthwiseConvPrepare(TfLiteContext* context, TfLiteNode* node) {
micro_context->AllocateTempInputTensor(node, kDepthwiseConvWeightsTensor);
TF_LITE_ENSURE(context, filter != nullptr);

// Check dimensionality of input, filter, output
TF_LITE_ENSURE_EQ(context, input->dims->size, 4);
TF_LITE_ENSURE_EQ(context, filter->dims->size, 4);
TF_LITE_ENSURE_EQ(context, output->dims->size, 4);
TF_LITE_ENSURE(context, params.dilation_height_factor > 0);
TF_LITE_ENSURE(context, params.dilation_width_factor > 0);

// Filter in DepthwiseConv is expected to be [1, height, width, channels].
TF_LITE_ENSURE_EQ(context, filter->dims->data[0], 1);

// Check input channels matching filter
const int num_filter_channels = filter->dims->data[3];
const int num_input_channels = input->dims->data[3];
TF_LITE_ENSURE(context, num_input_channels != 0);
TF_LITE_ENSURE_EQ(context, num_filter_channels % num_input_channels, 0);

const int input_width = input->dims->data[2];
const int input_height = input->dims->data[1];
const int filter_width = filter->dims->data[2];
const int filter_height = filter->dims->data[1];
int output_width = 0;
int output_height = 0;
const int output_width = output->dims->data[2];
const int output_height = output->dims->data[1];

// Dynamically allocate per-channel quantization parameters.
const int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension];
Expand Down Expand Up @@ -243,11 +207,7 @@ TfLiteStatus DepthwiseConvPrepare(TfLiteContext* context, TfLiteNode* node) {

TF_LITE_ENSURE_STATUS(CalculateOpDataDepthwiseConv(
context, node, params, input_width, input_height, filter_width,
filter_height, &output_width, &output_height, input->type, data));

// compute output tensor shape and relocate shape data
TF_LITE_ENSURE_STATUS(DepthwiseConvReshapeOutputTensor(
context, node, input, filter, output, output_height, output_width));
filter_height, output_width, output_height, input->type, data));

micro_context->DeallocateTempTfLiteTensor(output);
micro_context->DeallocateTempTfLiteTensor(input);
Expand Down
Loading

0 comments on commit 5be29f4

Please sign in to comment.