Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 6 additions & 4 deletions onnxruntime/core/providers/coreml/builders/helper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -13,17 +13,19 @@
namespace onnxruntime {
namespace coreml {

Status GetShape(const NodeArg& node_arg, std::vector<int64_t>& shape) {
const auto& input_name = node_arg.Name();
bool GetShape(const NodeArg& node_arg, std::vector<int64_t>& shape, const logging::Logger& logger) {
const auto* shape_proto = node_arg.Shape();
ORT_RETURN_IF_NOT(shape_proto, "shape_proto cannot be null for input: ", input_name);
if (!shape_proto) {
LOGS(logger, WARNING) << "NodeArg [" << node_arg.Name() << "] has no shape info";
return false;
}

// We already checked the shape has no dynamic dimension
for (const auto& dim : shape_proto->dim()) {
shape.push_back(dim.dim_value());
}

return Status::OK();
return true;
}

// TODO, move this to shared_library
Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/core/providers/coreml/builders/helper.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ class Logger;

namespace coreml {

common::Status GetShape(const NodeArg& node_arg, std::vector<int64_t>& shape);
bool GetShape(const NodeArg& node_arg, std::vector<int64_t>& shape, const logging::Logger& logger);

// TODO, move this to shared_library
bool GetType(const NodeArg& node_arg, int32_t& type, const logging::Logger& logger);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ Status ActivationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder,
*layer->mutable_input()->Add() = node.InputDefs()[0]->Name();
*layer->mutable_output()->Add() = node.OutputDefs()[0]->Name();

model_builder.AddLayer(layer.release());
model_builder.AddLayer(std::move(layer));
return Status::OK();
}

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,140 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

#include "core/providers/common.h"
#include "core/providers/shared/utils/utils.h"
#include "core/providers/coreml/builders/helper.h"
#include "core/providers/coreml/builders/model_builder.h"
#include "core/providers/coreml/builders/op_builder_factory.h"

#include "base_op_builder.h"
#include "builder_utils.h"

namespace onnxruntime {
namespace coreml {

class BatchNormalizationOpBuilder : public BaseOpBuilder {
// Add operator related
public:
void AddInitializersToSkip(ModelBuilder& model_builder, const Node& node) const override;

private:
Status AddToModelBuilderImpl(ModelBuilder& model_builder, const Node& node,
const logging::Logger& logger) const override ORT_MUST_USE_RESULT;

// Operator support related
private:
bool IsOpSupportedImpl(const InitializedTensorSet& initializers, const Node& node,
const logging::Logger& logger) const override;

// BatchNormalization opset 6- has unsupported attributes
int GetMinSupportedOpSet(const Node& /* node */) const override { return 7; }
};

// Add operator related

void BatchNormalizationOpBuilder::AddInitializersToSkip(ModelBuilder& model_builder, const Node& node) const {
// skip everything except input0 for BatchNormalization
const auto& input_defs = node.InputDefs();
model_builder.AddInitializerToSkip(input_defs[1]->Name()); // scale
model_builder.AddInitializerToSkip(input_defs[2]->Name()); // B
model_builder.AddInitializerToSkip(input_defs[3]->Name()); // mean
model_builder.AddInitializerToSkip(input_defs[4]->Name()); // var
}

Status BatchNormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder,
const Node& node,
const logging::Logger& /* logger */) const {
std::unique_ptr<COREML_SPEC::NeuralNetworkLayer> layer = CreateNNLayer(node);

const auto& input_defs = node.InputDefs();
const auto& initializers(model_builder.GetInitializerTensors());
NodeAttrHelper helper(node);

const auto& scale_tensor = *initializers.at(input_defs[1]->Name());
const auto& bias_tensor = *initializers.at(input_defs[2]->Name());
const auto& mean_tensor = *initializers.at(input_defs[3]->Name());
const auto& var_tensor = *initializers.at(input_defs[4]->Name());
const auto eps = helper.Get("epsilon", 1e-5f);
const auto channels = scale_tensor.dims()[0];

auto* coreml_batch_norm = layer->mutable_batchnorm();
coreml_batch_norm->set_channels(channels);
coreml_batch_norm->set_epsilon(eps);
coreml_batch_norm->set_computemeanvar(false);
coreml_batch_norm->set_instancenormalization(false);

CreateCoreMLWeight(*coreml_batch_norm->mutable_gamma(), scale_tensor); // scale
CreateCoreMLWeight(*coreml_batch_norm->mutable_beta(), bias_tensor); // B
CreateCoreMLWeight(*coreml_batch_norm->mutable_mean(), mean_tensor); // mean
CreateCoreMLWeight(*coreml_batch_norm->mutable_variance(), var_tensor); // var

*layer->mutable_input()->Add() = node.InputDefs()[0]->Name();
*layer->mutable_output()->Add() = node.OutputDefs()[0]->Name();

model_builder.AddLayer(std::move(layer));
return Status::OK();
}

// Operator support related

bool BatchNormalizationOpBuilder::IsOpSupportedImpl(const InitializedTensorSet& initializers, const Node& node,
const logging::Logger& logger) const {
if (node.OutputDefs().size() != 1) {
LOGS(logger, VERBOSE) << "Your onnx model may be in training mode, please export "
"it in test mode.";
return false;
}

const auto& input_defs = node.InputDefs();
std::vector<int64_t> input_shape;
if (!GetShape(*input_defs[0], input_shape, logger))
return false;

const auto input_size = input_shape.size();
// TODO, support 1d batch normalization (input is 3d)
// To map 1d input {N,C,H} to 2d {N,C,H,1} first and then squeeze back after
if (input_size != 4) {
LOGS(logger, VERBOSE) << "BN only support 4d shape for now, input is "
<< input_size << "d shape";
return false;
}

Copy link
Contributor

@skottmckay skottmckay Feb 10, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we infer the batch size to be 1 and treat it as 4d if input_size == 3? #Pending

Copy link
Contributor Author

@guoyu-wang guoyu-wang Feb 10, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This will be handled in a future PR, will need to add 1 to the end of the shape and make this a 2d batch norm, such as {N,C,H} -> {N,C,H,1}
Will need to add a ExpandDimsLayer (unsqueeze) before bn and a SqueezeLayer after
This may also be required by other ops since CoreML has weird shape for some layers

Updated comments


In reply to: 573438207 [](ancestors = 573438207)

NodeAttrHelper helper(node);
const auto spatial = helper.Get("spatial", 1);
if (spatial != 1) {
LOGS(logger, VERBOSE) << "Non-spatial BN is not supported";
return false;
}

const auto& scale_name = input_defs[1]->Name();
const auto& b_name = input_defs[2]->Name();
const auto& mean_name = input_defs[3]->Name();
const auto& var_name = input_defs[4]->Name();
if (!Contains(initializers, scale_name)) {
LOGS(logger, VERBOSE) << "Scale of BN must be a constant initializer";
return false;
}
if (!Contains(initializers, b_name)) {
LOGS(logger, VERBOSE) << "B of BN must be a constant initializer";
return false;
}
if (!Contains(initializers, mean_name)) {
LOGS(logger, VERBOSE) << "Mean of BN must be a constant initializer";
return false;
}
if (!Contains(initializers, var_name)) {
LOGS(logger, VERBOSE) << "Var of BN must be a constant initializer";
return false;
}

return true;
}

void CreateBatchNormalizationOpBuilder(const std::string& op_type, OpBuilderRegistrations& op_registrations) {
op_registrations.builders.push_back(onnxruntime::make_unique<BatchNormalizationOpBuilder>());
op_registrations.op_builder_map.emplace(op_type, op_registrations.builders.back().get());
}

} // namespace coreml
} // namespace onnxruntime
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ Status BinaryOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const
*layer->mutable_input()->Add() = input_defs[1]->Name();
*layer->mutable_output()->Add() = node.OutputDefs()[0]->Name();

model_builder.AddLayer(layer.release());
model_builder.AddLayer(std::move(layer));
return Status::OK();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ void ConvOpBuilder::AddInitializersToSkip(ModelBuilder& model_builder, const Nod
}

Status ConvOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const Node& node,
const logging::Logger& /* logger */) const {
const logging::Logger& logger) const {
std::unique_ptr<COREML_SPEC::NeuralNetworkLayer> layer = CreateNNLayer(node);

const auto input_defs = node.InputDefs();
Expand Down Expand Up @@ -72,7 +72,7 @@ Status ConvOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const N
// Usually using autopadding is more efficient than using explicit padding
// Try to see if we can map explicit padding to auto padding
std::vector<int64_t> input_shape;
ORT_RETURN_IF_ERROR(GetShape(*input_defs[0], input_shape));
ORT_RETURN_IF_NOT(GetShape(*input_defs[0], input_shape, logger), "Cannot get shape");
AutoPadType auto_pad_type;
ORT_RETURN_IF_ERROR(HandleAutoPad(input_shape, weight_shape[2], weight_shape[3],
onnx_pads, strides, dilations,
Expand Down Expand Up @@ -110,7 +110,7 @@ Status ConvOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const N
*layer->mutable_input()->Add() = node.InputDefs()[0]->Name();
*layer->mutable_output()->Add() = node.OutputDefs()[0]->Name();

model_builder.AddLayer(layer.release());
model_builder.AddLayer(std::move(layer));
return Status::OK();
}

Expand Down
94 changes: 94 additions & 0 deletions onnxruntime/core/providers/coreml/builders/impl/pool_op_builder.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

#include "core/providers/shared/utils/utils.h"
#include "core/providers/coreml/builders/helper.h"
#include "core/providers/coreml/builders/model_builder.h"
#include "core/providers/coreml/builders/op_builder_factory.h"

#include "base_op_builder.h"

namespace onnxruntime {
namespace coreml {

class PoolOpBuilder : public BaseOpBuilder {
// Add operator related
private:
Status AddToModelBuilderImpl(ModelBuilder& model_builder, const Node& node,
const logging::Logger& logger) const override ORT_MUST_USE_RESULT;

// Operator support related
private:
bool IsOpSupportedImpl(const InitializedTensorSet& initializers, const Node& node,
const logging::Logger& logger) const override;
};

// Add operator related

Status PoolOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder,
const Node& node,
const logging::Logger& /* logger */) const {
std::unique_ptr<COREML_SPEC::NeuralNetworkLayer> layer = CreateNNLayer(node);

auto* coreml_pool = layer->mutable_pooling();
const auto& op_type = node.OpType();

// We only support global pool now
coreml_pool->set_globalpooling(true);
coreml_pool->mutable_valid();

if (op_type == "GlobalAveragePool") {
coreml_pool->set_type(COREML_SPEC::PoolingLayerParams_PoolingType_AVERAGE);
} else if (op_type == "GlobalMaxPool") {
coreml_pool->set_type(COREML_SPEC::PoolingLayerParams_PoolingType_MAX);
}

*layer->mutable_input()->Add() = node.InputDefs()[0]->Name();
*layer->mutable_output()->Add() = node.OutputDefs()[0]->Name();

model_builder.AddLayer(std::move(layer));
return Status::OK();
}

// Operator support related
bool PoolOpBuilder::IsOpSupportedImpl(const InitializedTensorSet& /* initializers */, const Node& node,
const logging::Logger& logger) const {
const auto& op_type = node.OpType();
if (op_type != "GlobalAveragePool" && op_type != "GlobalMaxPool") {
LOGS(logger, VERBOSE) << "[" << op_type << "] is not supported";
return false;
}

std::vector<int64_t> input_shape;
if (!GetShape(*node.InputDefs()[0], input_shape, logger))
return false;

const auto input_size = input_shape.size();
if (input_size != 4) {
LOGS(logger, VERBOSE)
<< op_type << " only supports rank-4 tensor, input ["
<< node.InputDefs()[0]->Name() << "] has actual dim count " << input_size;
return false;
}

return true;
}

void CreatePoolOpBuilder(const std::string& op_type, OpBuilderRegistrations& op_registrations) {
if (op_registrations.op_builder_map.find(op_type) != op_registrations.op_builder_map.cend())
return;

static std::vector<std::string> op_types =
{
"GlobalAveragePool",
"GlobalMaxPool",
};

op_registrations.builders.push_back(onnxruntime::make_unique<PoolOpBuilder>());
for (const auto& op_type : op_types) {
op_registrations.op_builder_map.emplace(op_type, op_registrations.builders.back().get());
}
}

} // namespace coreml
} // namespace onnxruntime
Loading