Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion apps/microtvm/ethosu/src/demo_bare_metal.c
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ int main(int argc, char** argv) {

printf("Running inference\n");
struct tvmgen_default_outputs outputs = {
.output = output,
.MobilenetV2_Predictions_Reshape_11 = output,
};
struct tvmgen_default_inputs inputs = {
.tfl_quantize = input,
Expand Down
2 changes: 1 addition & 1 deletion apps/microtvm/ethosu/src/demo_freertos.c
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ static void prvInferenceTask(void* pvParameters) {
.tfl_quantize = pucReceivedData,
};
struct tvmgen_default_outputs xOutputs = {
.output = output,
.MobilenetV2_Predictions_Reshape_11 = output,
};
struct ethosu_driver* xDriver = ethosu_reserve_driver();
struct tvmgen_default_devices xDevices = {
Expand Down
2 changes: 1 addition & 1 deletion apps/microtvm/zephyr/template_project/src/aot_demo/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ void TVMInfer() {
.input_1 = input_data,
};
struct tvmgen_default_outputs outputs = {
.output = output_data,
.Identity = output_data,
};

StackMemoryManager_Init(&app_workspace, g_aot_memory, WORKSPACE_SIZE);
Expand Down
2 changes: 1 addition & 1 deletion apps/microtvm/zephyr_cmsisnn/src/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ void main(void) {
StackMemoryManager_Init(&app_workspace, g_crt_workspace, TVMGEN_DEFAULT_WORKSPACE_SIZE);

struct tvmgen_default_inputs inputs = {.input = input_storage};
struct tvmgen_default_outputs outputs = {.output = output_storage};
struct tvmgen_default_outputs outputs = {.Identity = output_storage};

if (tvmgen_default_run(&inputs, &outputs) != 0) {
printk("Model run failed\n");
Expand Down
10 changes: 7 additions & 3 deletions python/tvm/micro/model_library_format.py
Original file line number Diff line number Diff line change
Expand Up @@ -270,9 +270,13 @@ def _get_inputs_and_outputs_from_module(mod):
main_func = _get_main_relay_func(mod)
inputs = [argument.name_hint for argument in main_func.params]

outputs = ["output"]
if isinstance(main_func.ret_type, TupleType):
outputs = _convert_tuple_to_outputs(main_func.ret_type)
if "output_tensor_names" in main_func.attrs:
outputs = main_func.attrs["output_tensor_names"]
else:
if isinstance(main_func.ret_type, TupleType):
outputs = _convert_tuple_to_outputs(main_func.ret_type)
else:
outputs = ["output"]

return inputs, outputs

Expand Down
12 changes: 11 additions & 1 deletion python/tvm/relay/frontend/tflite.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
from .. import function as _function
from .. import op as _op
from .. import qnn as _qnn
from ..backend.name_transforms import sanitize_name
from .common import ExprTable
from .common import infer_shape as _infer_shape
from .common import to_int_list
Expand Down Expand Up @@ -3769,6 +3770,15 @@ def from_tflite(model, shape_dict=None, dtype_dict=None, op_converter=OperatorCo
params = {k: _nd.array(np.array(v)) for k, v in exp_tab.params.items()}
outputs = [exp_tab.get_expr(get_tensor_name(subgraph, i)) for i in model_outputs]
outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)
func = _function.Function(analysis.free_vars(outputs), outputs)
attrs = tvm.ir.make_node(
"DictAttrs",
**{
"output_tensor_names": [
sanitize_name(get_tensor_name(subgraph, model_output))
for model_output in model_outputs
]
},
)
func = _function.Function(analysis.free_vars(outputs), outputs, attrs=attrs)
mod = IRModule.from_expr(func)
return mod, params
22 changes: 21 additions & 1 deletion src/relay/backend/aot_executor_codegen.cc
Original file line number Diff line number Diff line change
Expand Up @@ -957,7 +957,27 @@ class AOTExecutorCodegen : public MixedModeVisitor {
Array<tir::Var>(tir_main_func->params.begin(),
tir_main_func->params.begin() + tir_main_func->params.size() -
return_sid_.size() - pool_vars.size() - devices.size());
ret.metadata = ExecutorCodegenMetadata(inputs, pool_vars, devices, return_sid_.size(),

std::vector<String> output_var_names;
if (auto opt = func->GetAttr<Array<String>>("output_tensor_names")) {
Array<String> output_tensor_names = opt.value();
for (size_t i = 0; i < output_tensor_names.size(); ++i) {
output_var_names.push_back(output_tensor_names[i]);
}
}

// If output names have not been specified then generate default output names
if (output_var_names.size() == 0) {
if (return_sid_.size() == 1) {
output_var_names.push_back(String("output"));
} else {
for (size_t i = 0; i < return_sid_.size(); ++i) {
output_var_names.push_back(String("output" + std::to_string(i)));
}
}
}

ret.metadata = ExecutorCodegenMetadata(inputs, pool_vars, devices, output_var_names,
runtime::kTvmExecutorAot, mod_name, interface_api,
use_unpacked_api_, pool_var_info);
return ret;
Expand Down
4 changes: 2 additions & 2 deletions src/relay/backend/utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -179,14 +179,14 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable)
});

ExecutorCodegenMetadata::ExecutorCodegenMetadata(
Array<tir::Var> inputs, Array<tir::Var> pools, Array<String> devices, Integer num_outputs,
Array<tir::Var> inputs, Array<tir::Var> pools, Array<String> devices, Array<String> outputs,
String executor, String mod_name, String interface_api, bool unpacked_api,
Map<tir::Var, tir::usmp::AllocatedPoolInfo> pool_inputs) {
auto n = make_object<ExecutorCodegenMetadataNode>();
n->inputs = inputs;
n->pools = pools;
n->devices = devices;
n->num_outputs = num_outputs;
n->outputs = outputs;
n->executor = executor;
n->interface_api = interface_api;
n->unpacked_api = unpacked_api;
Expand Down
8 changes: 4 additions & 4 deletions src/relay/backend/utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,10 +61,10 @@ class ExecutorCodegenMetadataNode : public Object {
public:
/*! \brief input information for the main function */
Array<tir::Var> inputs;
/*! \brief output information for the main function */
Array<String> outputs;
/*! \brief pool information for the main function */
Array<tir::Var> pools;
/*! \brief number of outputs of the main function */
Integer num_outputs = 1;
/*! \brief device contexts information for the main function */
Array<String> devices;
/*! \brief the executor to be used to run the model */
Expand All @@ -81,7 +81,7 @@ class ExecutorCodegenMetadataNode : public Object {
void VisitAttrs(tvm::AttrVisitor* v) {
v->Visit("inputs", &inputs);
v->Visit("pools", &pools);
v->Visit("num_outputs", &num_outputs);
v->Visit("outputs", &outputs);
v->Visit("devices", &devices);
v->Visit("executor", &executor);
v->Visit("unpacked_api", &unpacked_api);
Expand All @@ -98,7 +98,7 @@ class ExecutorCodegenMetadataNode : public Object {
class ExecutorCodegenMetadata : public ObjectRef {
public:
TVM_DLL ExecutorCodegenMetadata(Array<tir::Var> inputs, Array<tir::Var> pools,
Array<String> devices, Integer num_outputs, String executor,
Array<String> devices, Array<String> outputs, String executor,
String mod_name, String interface_api = "packed",
bool unpacked_api = false,
Map<tir::Var, tir::usmp::AllocatedPoolInfo> pool_inputs =
Expand Down
20 changes: 9 additions & 11 deletions src/target/source/source_module.cc
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,7 @@ class CSourceCrtMetadataModuleNode : public runtime::ModuleNode {
}
call_args_ss << " " << input_var->name_hint << ",";
}
for (int i = 0; i < metadata_->num_outputs->value; ++i) {
for (unsigned int i = 0; i < metadata_->outputs.size(); ++i) {
call_args_ss << "void* output" << i << ",";
}
for (const tir::Var& pool_var : metadata_->pools) {
Expand All @@ -300,7 +300,7 @@ class CSourceCrtMetadataModuleNode : public runtime::ModuleNode {
for (unsigned int i = 0; i < metadata_->inputs.size(); ++i) {
call_args_ss << "((DLTensor*)(((TVMValue*)args)[" << i << "].v_handle))[0].data,";
}
for (int i = 0; i < metadata_->num_outputs->value; ++i) {
for (unsigned int i = 0; i < metadata_->outputs.size(); ++i) {
int j = metadata_->inputs.size() + i;
call_args_ss << "((DLTensor*)(((TVMValue*)args)[" << j << "].v_handle))[0].data,";
}
Expand Down Expand Up @@ -328,7 +328,7 @@ class CSourceCrtMetadataModuleNode : public runtime::ModuleNode {
entrypoint_arg_count++;
run_func_arg_count++;
}
for (int i = 0; i < metadata_->num_outputs->value; i++) {
for (unsigned int i = 0; i < metadata_->outputs.size(); i++) {
run_func_to_entry_point_args[run_func_arg_count] = Integer(entrypoint_arg_count);
entrypoint_arg_count++;
run_func_arg_count++;
Expand Down Expand Up @@ -356,7 +356,7 @@ class CSourceCrtMetadataModuleNode : public runtime::ModuleNode {

// We are creating a copy of the set of pointers
size_t number_of_io_tensors =
metadata_->inputs.size() + metadata_->num_outputs->value + metadata_->pools.size();
metadata_->inputs.size() + metadata_->outputs.size() + metadata_->pools.size();
code_ << "TVMValue tensors[" << number_of_io_tensors << "];\n";

std::unordered_map<int, ObjectRef> run_func_to_entry_point_args =
Expand Down Expand Up @@ -395,7 +395,7 @@ class CSourceCrtMetadataModuleNode : public runtime::ModuleNode {
}
call_args_ss << " " << relay::backend::SanitizeName(input_var->name_hint) << ",";
}
for (int i = 0; i < metadata_->num_outputs->value; ++i) {
for (unsigned int i = 0; i < metadata_->outputs.size(); ++i) {
call_args_ss << "void* output" << i << ",";
}
for (const tir::Var& pool_var : metadata_->pools) {
Expand Down Expand Up @@ -449,13 +449,11 @@ class CSourceCrtMetadataModuleNode : public runtime::ModuleNode {
for (const auto& input : metadata_->inputs) {
call_args_ss << "inputs->" << relay::backend::SanitizeName(input->name_hint) << ",";
}
if (metadata_->num_outputs->value == 1) {
call_args_ss << "outputs->output,";
} else {
for (int i = 0; i < metadata_->num_outputs->value; ++i) {
call_args_ss << "outputs->output" << i << ",";
}
for (const auto& output : metadata_->outputs) {
call_args_ss << "outputs->" << relay::backend::SanitizeName(output);
call_args_ss << ",";
}

for (const tir::Var& pool_var : metadata_->pools) {
String pool_name = metadata_->pool_inputs.value()[pool_var]->pool_info->pool_name;
if (IsInternalWorkspaceBuffer(pool_var)) {
Expand Down
2 changes: 1 addition & 1 deletion tests/micro/zephyr/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ def generate_project(
model_files_path, arcname=os.path.relpath(model_files_path, tar_temp_dir)
)
header_path = generate_c_interface_header(
lowered.libmod_name, ["input_1"], ["output"], [], [], 0, model_files_path
lowered.libmod_name, ["input_1"], ["Identity"], [], [], 0, model_files_path
)
tf.add(header_path, arcname=os.path.relpath(header_path, tar_temp_dir))

Expand Down
7 changes: 4 additions & 3 deletions tests/python/contrib/test_ethosu/infra.py
Original file line number Diff line number Diff line change
Expand Up @@ -306,9 +306,10 @@ def generate_ref_data_tflite(model):
interpreter.set_tensor(index, value)
interpreter.invoke()

expected_output_data = [
interpreter.get_tensor(output_detail["index"]) for output_detail in output_details
]
expected_output_data = {
output_detail["name"]: interpreter.get_tensor(output_detail["index"])
for output_detail in output_details
}

return input_data, expected_output_data

Expand Down
6 changes: 3 additions & 3 deletions tests/python/contrib/test_ethosu/test_codegen.py
Original file line number Diff line number Diff line change
Expand Up @@ -754,7 +754,7 @@ def rounding_right_shift(lhs, rhs):
"ifm": lhs,
"ifm2": rhs,
}
output_data = generate_output_data(input_data)
output_data = {"output": generate_output_data(input_data)[0]}
ethosu_mod = _create_ethosu_partition(cpu_mod)

_compare_ethosu_with_reference(ethosu_mod, input_data, output_data, accel_type)
Expand All @@ -781,7 +781,7 @@ def generate_output_data(input_data):

cpu_mod = create_model()
input_data = {"ifm": np.random.randint(-120, high=120, size=ifm_shape, dtype="int8")}
output_data = generate_output_data(input_data)
output_data = {"output": generate_output_data(input_data)[0]}
ethosu_mod = _create_ethosu_partition(cpu_mod)

_compare_ethosu_with_reference(
Expand Down Expand Up @@ -910,7 +910,7 @@ def clz_comp(n):

cpu_mod = create_model()
input_data = {"ifm": np.random.randint(-500000, high=500000, size=ifm_shape, dtype="int32")}
output_data = generate_output_data(input_data)
output_data = {"output": generate_output_data(input_data)[0]}
ethosu_mod = _create_ethosu_partition(cpu_mod)

_compare_ethosu_with_reference(ethosu_mod, input_data, output_data, accel_type)
Expand Down
2 changes: 1 addition & 1 deletion tests/python/contrib/test_ethosu/test_lookup_table.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ def test_random_lut(accel_type):
compiled_models = infra.build_source(
mod,
{"ifm": in_data},
out_data,
{"output": out_data},
accel_type,
)

Expand Down
Loading