Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add int16 support to PACK/UNPACK #2737

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions tensorflow/lite/micro/kernels/pack.cc
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,10 @@ TfLiteStatus PackEval(TfLiteContext* context, TfLiteNode* node) {
return PackImpl<int8_t>(context, node, output, data->values_count,
data->axis);
}
case kTfLiteInt16: {
return PackImpl<int16_t>(context, node, output, data->values_count,
data->axis);
}
case kTfLiteInt32: {
return PackImpl<int32_t>(context, node, output, data->values_count,
data->axis);
Expand Down
68 changes: 25 additions & 43 deletions tensorflow/lite/micro/kernels/pack_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -110,44 +110,11 @@ void TestPackThreeInputsFloat(int* input1_dims_data, const float* input1_data,
1e-5f, output_data);
}

void TestPackTwoInputsQuantized(
int* input1_dims_data, const int8_t* input1_data, int* input2_dims_data,
const int8_t* input2_data, int axis, int* output_dims_data,
const int8_t* expected_output_data, int8_t* output_data) {
TfLiteIntArray* input1_dims = IntArrayFromInts(input1_dims_data);
TfLiteIntArray* input2_dims = IntArrayFromInts(input2_dims_data);
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
const int output_dims_count = ElementCount(*output_dims);

constexpr int input_size = 2;
constexpr int output_size = 1;
constexpr int tensors_size = input_size + output_size;
TfLiteTensor tensors[tensors_size] = {
// CreateQuantizedTensor needs scale/zero_point values as input, but these
// values don't matter as to the functionality of PACK, so just set as 1.0
// and 128.
CreateQuantizedTensor(input1_data, input1_dims, 1.0, 128),
CreateQuantizedTensor(input2_data, input2_dims, 1.0, 128),
CreateQuantizedTensor(output_data, output_dims, 1.0, 128)};

TfLitePackParams builtin_data = {
.values_count = 2,
.axis = axis,
};
int inputs_array_data[] = {2, 0, 1};
TfLiteIntArray* inputs_array = IntArrayFromInts(inputs_array_data);
int outputs_array_data[] = {1, 2};
TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data);

ValidatePackGoldens(tensors, tensors_size, builtin_data, inputs_array,
outputs_array, expected_output_data, output_dims_count,
1e-5f, output_data);
}

void TestPackTwoInputsQuantized32(
int* input1_dims_data, const int32_t* input1_data, int* input2_dims_data,
const int32_t* input2_data, int axis, int* output_dims_data,
const int32_t* expected_output_data, int32_t* output_data) {
template <typename T>
void TestPackTwoInputsQuantized(int* input1_dims_data, const T* input1_data,
int* input2_dims_data, const T* input2_data,
int axis, int* output_dims_data,
const T* expected_output_data, T* output_data) {
TfLiteIntArray* input1_dims = IntArrayFromInts(input1_dims_data);
TfLiteIntArray* input2_dims = IntArrayFromInts(input2_dims_data);
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
Expand Down Expand Up @@ -227,7 +194,7 @@ TF_LITE_MICRO_TEST(PackFloatThreeInputsNegativeAxis) {
input3_values, axis, output_shape, golden, output_data);
}

TF_LITE_MICRO_TEST(PackFloatMultilDimensions) {
TF_LITE_MICRO_TEST(PackFloatMultiDimensions) {
int input_shape[] = {2, 2, 3};
int output_shape[] = {3, 2, 2, 3};
const float input1_values[] = {1, 2, 3, 4, 5, 6};
Expand All @@ -242,7 +209,7 @@ TF_LITE_MICRO_TEST(PackFloatMultilDimensions) {
output_shape, golden, output_data);
}

TF_LITE_MICRO_TEST(PackQuantizedMultilDimensions) {
TF_LITE_MICRO_TEST(PackQuantizedInt8MultiDimensions) {
int input_shape[] = {2, 2, 3};
int output_shape[] = {3, 2, 2, 3};
const int8_t input1_values[] = {1, 2, 3, 4, 5, 6};
Expand All @@ -252,12 +219,27 @@ TF_LITE_MICRO_TEST(PackQuantizedMultilDimensions) {
constexpr int output_dims_count = 12;
int8_t output_data[output_dims_count];

tflite::testing::TestPackTwoInputsQuantized(
tflite::testing::TestPackTwoInputsQuantized<int8_t>(
input_shape, input1_values, input_shape, input2_values, axis,
output_shape, golden, output_data);
}

TF_LITE_MICRO_TEST(PackQuantizedInt16MultiDimensions) {
int input_shape[] = {2, 2, 3};
int output_shape[] = {3, 2, 2, 3};
const int16_t input1_values[] = {1, 2, 3, 4, 5, 6};
const int16_t input2_values[] = {7, 8, 9, 10, 11, 12};
const int16_t golden[] = {1, 2, 3, 7, 8, 9, 4, 5, 6, 10, 11, 12};
const int axis = 1;
constexpr int output_dims_count = 12;
int16_t output_data[output_dims_count];

tflite::testing::TestPackTwoInputsQuantized<int16_t>(
input_shape, input1_values, input_shape, input2_values, axis,
output_shape, golden, output_data);
}

TF_LITE_MICRO_TEST(PackQuantized32MultilDimensions) {
TF_LITE_MICRO_TEST(PackQuantizedInt32MultiDimensions) {
int input_shape[] = {2, 2, 3};
int output_shape[] = {3, 2, 2, 3};
const int32_t input1_values[] = {1, 2, 3, 4, 5, 6};
Expand All @@ -267,7 +249,7 @@ TF_LITE_MICRO_TEST(PackQuantized32MultilDimensions) {
constexpr int output_dims_count = 12;
int32_t output_data[output_dims_count];

tflite::testing::TestPackTwoInputsQuantized32(
tflite::testing::TestPackTwoInputsQuantized<int32_t>(
input_shape, input1_values, input_shape, input2_values, axis,
output_shape, golden, output_data);
}
Expand Down
3 changes: 3 additions & 0 deletions tensorflow/lite/micro/kernels/unpack.cc
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,9 @@ TfLiteStatus UnpackEval(TfLiteContext* context, TfLiteNode* node) {
case kTfLiteInt32: {
return UnpackImpl<int32_t>(context, node, input, data->num, data->axis);
}
case kTfLiteInt16: {
return UnpackImpl<int16_t>(context, node, input, data->num, data->axis);
}
case kTfLiteInt8: {
return UnpackImpl<int8_t>(context, node, input, data->num, data->axis);
}
Expand Down
38 changes: 30 additions & 8 deletions tensorflow/lite/micro/kernels/unpack_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -132,12 +132,13 @@ void TestUnpackOneOutputFloat(int* input_dims_data, const float* input_data,
}
}

void TestUnpackThreeOutputsQuantized32(
int* input_dims_data, const int32_t* input_data, int axis,
int* output1_dims_data, const int32_t* expected_output1_data,
int* output2_dims_data, const int32_t* expected_output2_data,
int* output3_dims_data, const int32_t* expected_output3_data,
int32_t* output1_data, int32_t* output2_data, int32_t* output3_data) {
template <typename T>
void TestUnpackThreeOutputsQuantized(
int* input_dims_data, const T* input_data, int axis, int* output1_dims_data,
const T* expected_output1_data, int* output2_dims_data,
const T* expected_output2_data, int* output3_dims_data,
const T* expected_output3_data, T* output1_data, T* output2_data,
T* output3_data) {
TfLiteIntArray* input_dims = IntArrayFromInts(input_dims_data);
TfLiteIntArray* output1_dims = IntArrayFromInts(output1_dims_data);
TfLiteIntArray* output2_dims = IntArrayFromInts(output2_dims_data);
Expand Down Expand Up @@ -257,7 +258,28 @@ TF_LITE_MICRO_TEST(UnpackFloatOneOutput) {
output_shape, golden, output_data);
}

TF_LITE_MICRO_TEST(UnpackQuantized32ThreeOutputs) {
TF_LITE_MICRO_TEST(UnpackQuantizedInt16ThreeOutputs) {
int input_shape[] = {2, 3, 2};
const int16_t input_values[] = {1, 2, 3, 4, 5, 6};
int output1_shape[] = {1, 2};
const int16_t output1_golden[] = {1, 2};
int output2_shape[] = {1, 2};
const int16_t output2_golden[] = {3, 4};
int output3_shape[] = {1, 2};
const int16_t output3_golden[] = {5, 6};
constexpr int output1_dims_count = 2;
constexpr int output2_dims_count = 2;
constexpr int output3_dims_count = 2;
int16_t output1_data[output1_dims_count];
int16_t output2_data[output2_dims_count];
int16_t output3_data[output3_dims_count];
tflite::testing::TestUnpackThreeOutputsQuantized<int16_t>(
input_shape, input_values, 0, output1_shape, output1_golden,
output2_shape, output2_golden, output3_shape, output3_golden,
output1_data, output2_data, output3_data);
}

TF_LITE_MICRO_TEST(UnpackQuantizedInt32ThreeOutputs) {
int input_shape[] = {2, 3, 2};
const int32_t input_values[] = {1, 2, 3, 4, 5, 6};
int output1_shape[] = {1, 2};
Expand All @@ -272,7 +294,7 @@ TF_LITE_MICRO_TEST(UnpackQuantized32ThreeOutputs) {
int32_t output1_data[output1_dims_count];
int32_t output2_data[output2_dims_count];
int32_t output3_data[output3_dims_count];
tflite::testing::TestUnpackThreeOutputsQuantized32(
tflite::testing::TestUnpackThreeOutputsQuantized<int32_t>(
input_shape, input_values, 0, output1_shape, output1_golden,
output2_shape, output2_golden, output3_shape, output3_golden,
output1_data, output2_data, output3_data);
Expand Down
Loading