Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,10 @@ Status ActivationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder,
} else if (op_type == "Elu") {
coreml_op_type = "elu";
add_alpha = true;
} else if (op_type == "HardSigmoid") {
// CoreML MIL: sigmoid_hard(x, alpha, beta) = min(max(alpha*x + beta, 0), 1)
// ONNX HardSigmoid has the same definition with defaults alpha=0.2, beta=0.5.
coreml_op_type = "sigmoid_hard";
} else {
return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT,
"ActivationOpBuilder::AddToModelBuilderImpl, unknown op: ", op_type);
Expand Down Expand Up @@ -166,6 +170,20 @@ Status ActivationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder,
AddOperationInput(*op, "mode", model_builder.AddScalarConstant(op->type(), "mode", std::string(approximate)));
}

if (op_type == "HardSigmoid") {
NodeAttrHelper helper(node);
const float alpha = helper.Get("alpha", 0.2f);
const float beta = helper.Get("beta", 0.5f);
auto input_dtype = node.InputDefs()[0]->TypeAsProto()->tensor_type().elem_type();
Comment thread
yuslepukhin marked this conversation as resolved.
if (input_dtype == ONNX_NAMESPACE::TensorProto_DataType_FLOAT) {
AddOperationInput(*op, "alpha", model_builder.AddScalarConstant(op->type(), "alpha", alpha));
AddOperationInput(*op, "beta", model_builder.AddScalarConstant(op->type(), "beta", beta));
} else {
AddOperationInput(*op, "alpha", model_builder.AddScalarConstant(op->type(), "alpha", MLFloat16(alpha)));
Comment thread
yuslepukhin marked this conversation as resolved.
AddOperationInput(*op, "beta", model_builder.AddScalarConstant(op->type(), "beta", MLFloat16(beta)));
}
}

AddOperationOutput(*op, *node.OutputDefs()[0]);

model_builder.AddOperation(std::move(op));
Expand All @@ -188,6 +206,14 @@ Status ActivationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder,

auto* leaky_relu = layer->mutable_activation()->mutable_leakyrelu();
leaky_relu->set_alpha(alpha);
} else if (op_type == "HardSigmoid") {
NodeAttrHelper helper(node);
const auto alpha = helper.Get("alpha", 0.2f);
const auto beta = helper.Get("beta", 0.5f);

auto* hard_sigmoid = layer->mutable_activation()->mutable_sigmoidhard();
hard_sigmoid->set_alpha(alpha);
hard_sigmoid->set_beta(beta);
} else {
return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT,
"ActivationOpBuilder::AddToModelBuilderImpl, unknown op: ", op_type);
Expand Down Expand Up @@ -269,6 +295,18 @@ bool ActivationOpBuilder::IsOpSupportedImpl(const Node& node, const OpBuilderInp
return false;
}
}
if (op_type == "HardSigmoid") {
// CoreML sigmoid_hard (MLProgram) and ActivationSigmoidHard (NN) both
// support float32 and float16 only. ONNX HardSigmoid also allows double
// and (opset 22+) bfloat16 — fall back to CPU for those.
const auto input_dtype = node.InputDefs()[0]->TypeAsProto()->tensor_type().elem_type();
if (input_dtype != ONNX_NAMESPACE::TensorProto_DataType_FLOAT &&
input_dtype != ONNX_NAMESPACE::TensorProto_DataType_FLOAT16) {
LOGS(logger, VERBOSE) << "HardSigmoid input data type [" << input_dtype
<< "] is not supported by CoreML (float or float16 only)";
return false;
}
}
if (op_type == "PRelu") {
return IsPReluOpSupported(node, input_params, logger);
}
Expand Down Expand Up @@ -300,6 +338,7 @@ void CreateActivationOpBuilder(const std::string& op_type, OpBuilderRegistration
"Gelu",
"Softplus",
"Elu",
"HardSigmoid",
};

op_registrations.builders.push_back(std::make_unique<ActivationOpBuilder>());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ static OpBuilderRegistrations CreateOpBuilderRegistrations() {
CreateActivationOpBuilder("Gelu", op_registrations);
CreateActivationOpBuilder("Softplus", op_registrations);
CreateActivationOpBuilder("Elu", op_registrations);
CreateActivationOpBuilder("HardSigmoid", op_registrations);

// Unary ops
CreateUnaryOpBuilder("Erf", op_registrations);
Expand Down
69 changes: 69 additions & 0 deletions onnxruntime/test/providers/coreml/coreml_basic_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -877,6 +877,75 @@ TEST(CoreMLExecutionProviderTest, Pad1DMLProgram) {
TestModelLoad(model_span, MakeCoreMLExecutionProvider("MLProgram"), ExpectedEPNodeAssignment::All);
#endif
}

TEST(CoreMLExecutionProviderTest, HardSigmoidTest) {
// Single-node HardSigmoid model; verify it is claimed entirely by the
// CoreML EP in both NeuralNetwork and MLProgram formats, and that the
// output matches the CPU EP reference.
onnxruntime::Model model("hard_sigmoid_test", false, DefaultLoggingManager().DefaultLogger());
auto& graph = model.MainGraph();

ONNX_NAMESPACE::TypeProto input_type;
input_type.mutable_tensor_type()->set_elem_type(ONNX_NAMESPACE::TensorProto_DataType_FLOAT);
auto* input_shape = input_type.mutable_tensor_type()->mutable_shape();
input_shape->add_dim()->set_dim_value(1);
input_shape->add_dim()->set_dim_value(3);
input_shape->add_dim()->set_dim_value(2);
input_shape->add_dim()->set_dim_value(4);

ONNX_NAMESPACE::TypeProto output_type;
output_type.mutable_tensor_type()->set_elem_type(ONNX_NAMESPACE::TensorProto_DataType_FLOAT);
auto* output_shape = output_type.mutable_tensor_type()->mutable_shape();
output_shape->add_dim()->set_dim_value(1);
output_shape->add_dim()->set_dim_value(3);
output_shape->add_dim()->set_dim_value(2);
output_shape->add_dim()->set_dim_value(4);

auto& input_arg = graph.GetOrCreateNodeArg("X", &input_type);
auto& output_arg = graph.GetOrCreateNodeArg("Y", &output_type);

auto& node = graph.AddNode("hard_sigmoid", "HardSigmoid", "HardSigmoid with non-default alpha/beta",
{&input_arg}, {&output_arg});
// Use non-default values so the test catches any attribute-wiring bug.
node.AddAttribute("alpha", 0.1f);
node.AddAttribute("beta", 0.6f);

ASSERT_STATUS_OK(graph.Resolve());

#if defined(__APPLE__)
// Inputs span the three HardSigmoid regions (saturated-low, linear, saturated-high)
// for alpha=0.1, beta=0.6: values < -6 clamp to 0, values > 4 clamp to 1, others are linear.
std::vector<int64_t> dims = {1, 3, 2, 4};
std::vector<float> input_data = {-10.0f, -7.0f, -6.0f, -5.0f, -3.0f, -1.0f, 0.0f, 1.0f,
2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 10.0f, 0.5f, -0.5f,
-4.0f, -2.0f, 1.5f, 2.5f, -1.5f, 3.5f, -3.5f, 4.5f};
OrtValue ml_value_x;
AllocatorPtr allocator = CPUAllocator::DefaultInstance();
CreateMLValue<float>(allocator, dims, input_data, &ml_value_x);

NameMLValMap feeds;
feeds.insert(std::make_pair("X", ml_value_x));

std::string model_data;
model.ToProto().SerializeToString(&model_data);
gsl::span<const std::byte> model_span{reinterpret_cast<const std::byte*>(model_data.data()), model_data.size()};

RunAndVerifyOutputsWithEP(model_span, "HardSigmoidTest_NN",
MakeCoreMLExecutionProvider(),
feeds,
EPVerificationParams{ExpectedEPNodeAssignment::All});
RunAndVerifyOutputsWithEP(model_span, "HardSigmoidTest_MLProgram",
MakeCoreMLExecutionProvider("MLProgram"),
feeds,
EPVerificationParams{ExpectedEPNodeAssignment::All});
#else
std::string model_data;
model.ToProto().SerializeToString(&model_data);
gsl::span<const std::byte> model_span{reinterpret_cast<const std::byte*>(model_data.data()), model_data.size()};
TestModelLoad(model_span, MakeCoreMLExecutionProvider(), ExpectedEPNodeAssignment::All);
TestModelLoad(model_span, MakeCoreMLExecutionProvider("MLProgram"), ExpectedEPNodeAssignment::All);
#endif
}
#endif // !(ORT_MINIMAL_BUILD)
} // namespace test
} // namespace onnxruntime
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ Keep in sync with doco generated from /docs/execution-providers/CoreML-Execution
|ai.onnx:GlobalMaxPool|Only 2D Pool is supported currently. 3D and 5D support can be added if needed.|
|ai.onnx:GridSample|4D input.<br/>'mode' of 'linear' or 'zeros'.<br/>(mode==linear && padding_mode==reflection && align_corners==0) is not supported.|
|ai.onnx:GroupNormalization||
|ai.onnx:HardSigmoid||
|ai.onnx:InstanceNormalization||
|ai.onnx:LayerNormalization||
|ai.onnx:LeakyRelu||
Expand Down
Loading