Skip to content

Commit

Permalink
Merge pull request #8 from liubo-intel/liubo/pdpd_frontend
Browse files Browse the repository at this point in the history
Enable Cast Op;elementwise_add/div/mul/sub;flatten_contiguous_range;p…
  • Loading branch information
zhangYiIntel authored Apr 25, 2021
2 parents f8e43ca + d9f3aee commit 8329ef4
Show file tree
Hide file tree
Showing 10 changed files with 473 additions and 0 deletions.
59 changes: 59 additions & 0 deletions ngraph/frontend/paddlepaddle/src/op/flatten_contiguous_range.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
//*****************************************************************************
// Copyright 2017-2021 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************

#include <ngraph/opsets/opset6.hpp>
#include "flatten_contiguous_range.hpp"
#include <paddlepaddle_frontend/utility.hpp>

namespace ngraph {
namespace frontend {
namespace pdpd {
namespace op {

NamedOutputs flatten_contiguous_range (const NodeContext& node) {
auto data = node.get_ng_input("X");

PartialShape input_shape = data.get_partial_shape();
int32_t input_rank = input_shape.rank().get_length();

auto start_axis = node.get_attribute<int32_t>("start_axis") < 0 ? 0 : node.get_attribute<int32_t>("start_axis");
auto stop_axis = node.get_attribute<int32_t>("stop_axis") > input_rank ? input_rank : node.get_attribute<int32_t>("stop_axis");
stop_axis = (stop_axis == -1) ? (input_rank - 1) : stop_axis;

int64_t flattened_rank = input_rank - (stop_axis - start_axis);
auto flattened_shape = std::vector<int64_t>(flattened_rank, 1);

int32_t i = 0, j = 0;

for (i = 0; i < start_axis; i++, j++)
flattened_shape[j] = input_shape[i].get_length();

for (i = start_axis; i <= stop_axis; i++)
flattened_shape[j] *= input_shape[i].get_length();

j++;

for (i = stop_axis + 1; i < input_rank; i++, j++)
flattened_shape[j] = input_shape[i].get_length();

auto shape_node = ngraph::opset6::Constant::create(ngraph::element::i64, {flattened_shape.size()}, flattened_shape);
// return {std::make_shared<ngraph::opset6::Reshape>(data, shape_node, true)};
return node.default_single_output_mapping({std::make_shared<ngraph::opset6::Reshape>(data, shape_node, true)}, {"Out"});
}
}
}
}
}
11 changes: 11 additions & 0 deletions ngraph/frontend/paddlepaddle/src/op/flatten_contiguous_range.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
#pragma once
#include "node_context.hpp"

namespace ngraph {
namespace frontend {
namespace pdpd {
namespace op {

NamedOutputs flatten_contiguous_range (const NodeContext& node);

}}}}
94 changes: 94 additions & 0 deletions ngraph/frontend/paddlepaddle/src/op/pad3d.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
//*****************************************************************************
// Copyright 2017-2021 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************

#include <ngraph/opsets/opset6.hpp>
#include "pad3d.hpp"
#include <paddlepaddle_frontend/utility.hpp>

namespace ngraph {
namespace frontend {
namespace pdpd {
namespace op {

NamedOutputs pad3d (const NodeContext& node) {
// TODO
auto data = node.get_ng_input("X");
auto paddings = node.get_attribute<std::vector<int32_t>>("paddings");
auto mode = node.get_attribute<std::string>("mode");
auto value = node.get_attribute<float>("value");
auto data_format = node.get_attribute<std::string>("data_format");

auto pads_begin = std::vector<int32_t>(5, 0);
auto pads_end = std::vector<int32_t>(5, 0);
// auto value_v = std::vector<float>(1, value);

Output<ngraph::Node> values;
Output<ngraph::Node> padding_begin;
Output<ngraph::Node> padding_end;

if (paddings.size() != 6)
throw ngraph::ngraph_error("paddings Params size should be 6 in pad3d!");

ngraph::op::PadMode pad_mode;

if (mode == "constant") {
pad_mode = ngraph::op::PadMode::CONSTANT;
values = ngraph::opset6::Constant::create(
element::f32, ngraph::Shape{}, {value});
} else if (mode == "reflect") {
pad_mode = ngraph::op::PadMode::REFLECT;
} else if (mode == "replicate") {
pad_mode = ngraph::op::PadMode::EDGE;
} else {
throw ngraph::ngraph_error("Unsupported 3d paddings mode: [" + mode + "]");
}

if (data_format == "NCDHW") {
pads_begin[4] = paddings[0]; //left
pads_end[4] = paddings[1]; //right
pads_begin[3] = paddings[2]; //top
pads_end[3] = paddings[3]; //down
pads_begin[2] = paddings[4]; //front
pads_end[2] = paddings[5]; //back

} else if (data_format == "NDHWC") {
pads_begin[3] = paddings[0]; //left
pads_end[3] = paddings[1]; //right
pads_begin[2] = paddings[2]; //top
pads_end[2] = paddings[3]; //down
pads_begin[1] = paddings[4]; //front
pads_end[1] = paddings[5]; //back

} else {
throw ngraph::ngraph_error("Unsupported 3d paddings data_format: [" + data_format + "]");
}

padding_begin = ngraph::opset6::Constant::create(
element::i32, ngraph::Shape{5}, pads_begin);
padding_end = ngraph::opset6::Constant::create(
element::i32, ngraph::Shape{5}, pads_end);

if (mode == "constant")
// return {std::make_shared<ngraph::opset6::Pad>(data, padding_begin, padding_end, values, pad_mode)};
return node.default_single_output_mapping({std::make_shared<ngraph::opset6::Pad>(data, padding_begin, padding_end, values, pad_mode)}, {"Out"});
else
// return {std::make_shared<ngraph::opset6::Pad>(data, padding_begin, padding_end, pad_mode)};
return node.default_single_output_mapping({std::make_shared<ngraph::opset6::Pad>(data, padding_begin, padding_end, pad_mode)}, {"Out"});
}
}
}
}
}
11 changes: 11 additions & 0 deletions ngraph/frontend/paddlepaddle/src/op/pad3d.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
#pragma once
#include "node_context.hpp"

namespace ngraph {
namespace frontend {
namespace pdpd {
namespace op {

NamedOutputs pad3d (const NodeContext& node);

}}}}
4 changes: 4 additions & 0 deletions ngraph/frontend/paddlepaddle/src/op_table.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,8 @@
#include "op/unsqueeze.hpp"
#include "op/slice.hpp"
#include "op/hard_swish.hpp"
#include "op/flatten_contiguous_range.hpp"
#include "op/pad3d.hpp"
#include "op/clip.hpp"
#include "op/greater_equal.hpp"
#include "op/log.hpp"
Expand Down Expand Up @@ -87,6 +89,7 @@ std::map<std::string, CreatorFunction> get_supported_ops() {
{"transpose2", op::transpose2},
{"yolo_box", op::yolo_box},
{"multiclass_nms3", op::multiclass_nms},
{"flatten_contiguous_range", op::flatten_contiguous_range},
{"rnn", op::rnn},
{"fill_constant", op::fill_constant},
{"bmm", op::matmul},
Expand All @@ -101,6 +104,7 @@ std::map<std::string, CreatorFunction> get_supported_ops() {
{"unsqueeze2", op::unsqueeze},
{"slice", op::slice},
{"hard_swish", op::hard_swish},
{"pad3d", op::pad3d},
{"clip", op::clip},
{"greater_equal", op::greater_equal},
{"log", op::log},
Expand Down
41 changes: 41 additions & 0 deletions ngraph/test/files/paddlepaddle/gen_scripts/generate_cast.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
#
# cast paddle model generator
#
import numpy as np
from save_model import saveModel

def cast(name : str, x, in_dtype, out_dtype):
import paddle as pdpd
pdpd.enable_static()

with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype)
out = pdpd.cast(node_x, out_dtype)

cpu = pdpd.static.cpu_places(1)
exe = pdpd.static.Executor(cpu[0])

# startup program will call initializer to initialize the parameters.
exe.run(pdpd.static.default_startup_program())
outs = exe.run(
feed={'x': x},
fetch_list=[out])
saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]])

return outs[0]


def main():
# TODO: more type
in_dtype = 'float32'
out_dtype = 'float32'
data = np.array( [ [1.1, 2.1, 1.0], [3.2, 4.7, 5.6] ], dtype = in_dtype )
cast("cast_test1", data, in_dtype, out_dtype)

# in_dtype = 'float32'
# out_dtype = 'uint8'
# data = np.array( [ [1.1, 2.1, 1], [3.2, 4, 5] ], dtype = in_dtype )
# cast("cast_test2", data, in_dtype, out_dtype)

if __name__ == "__main__":
main()
106 changes: 106 additions & 0 deletions ngraph/test/files/paddlepaddle/gen_scripts/generate_elementwise_ops.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
#
# elementwise paddle model generator
#
import numpy as np
from save_model import saveModel

import numpy as np
from save_model import saveModel

def elementwise_add(name : str, x, y, in_dtype):
import paddle as pdpd
pdpd.enable_static()

with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype)
node_y = pdpd.static.data(name = 'y', shape = y.shape, dtype = in_dtype)
out = pdpd.fluid.layers.nn.elementwise_add(node_x, node_y)

cpu = pdpd.static.cpu_places(1)
exe = pdpd.static.Executor(cpu[0])

# startup program will call initializer to initialize the parameters.
exe.run(pdpd.static.default_startup_program())
outs = exe.run(
feed={'x': x, 'y': y},
fetch_list=[out])
saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], inputs=[x, y], outputs=[outs[0]])

return outs[0]

def elementwise_sub(name : str, x, y, in_dtype):
import paddle as pdpd
pdpd.enable_static()

with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype)
node_y = pdpd.static.data(name = 'y', shape = y.shape, dtype = in_dtype)
out = pdpd.fluid.layers.nn.elementwise_sub(node_x, node_y)

cpu = pdpd.static.cpu_places(1)
exe = pdpd.static.Executor(cpu[0])

# startup program will call initializer to initialize the parameters.
exe.run(pdpd.static.default_startup_program())
outs = exe.run(
feed={'x': x, 'y': y},
fetch_list=[out])
saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], inputs=[x, y], outputs=[outs[0]])

return outs[0]

def elementwise_div(name : str, x, y, in_dtype):
import paddle as pdpd
pdpd.enable_static()

with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype)
node_y = pdpd.static.data(name = 'y', shape = y.shape, dtype = in_dtype)
out = pdpd.fluid.layers.nn.elementwise_div(node_x, node_y)

cpu = pdpd.static.cpu_places(1)
exe = pdpd.static.Executor(cpu[0])

# startup program will call initializer to initialize the parameters.
exe.run(pdpd.static.default_startup_program())
outs = exe.run(
feed={'x': x, 'y': y},
fetch_list=[out])
saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], inputs=[x, y], outputs=[outs[0]])

return outs[0]

def elementwise_mul(name : str, x, y, in_dtype):
import paddle as pdpd
pdpd.enable_static()

with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype)
node_y = pdpd.static.data(name = 'y', shape = y.shape, dtype = in_dtype)
out = pdpd.fluid.layers.nn.elementwise_mul(node_x, node_y)

cpu = pdpd.static.cpu_places(1)
exe = pdpd.static.Executor(cpu[0])

# startup program will call initializer to initialize the parameters.
exe.run(pdpd.static.default_startup_program())
outs = exe.run(
feed={'x': x, 'y': y},
fetch_list=[out])
saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], inputs=[x, y], outputs=[outs[0]])

return outs[0]

def main():
# TODO: more type
in_dtype = 'float32'
data_x = np.array([2, 3, 4]).astype(in_dtype)
data_y = np.array([1, 5, 2]).astype(in_dtype)

elementwise_add("elementwise_add1", data_x, data_y, in_dtype)
elementwise_sub("elementwise_sub1", data_x, data_y, in_dtype)
elementwise_div("elementwise_div1", data_x, data_y, in_dtype)
elementwise_mul("elementwise_mul1", data_x, data_y, in_dtype)

if __name__ == "__main__":
main()
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
#
# generate_flatten_contiguous_range paddle model generator
#
import numpy as np
from save_model import saveModel

def generate_flatten_contiguous_range(name : str, x, start_axis, stop_axis, in_dtype):
import paddle as pdpd
pdpd.enable_static()

with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype)
out = pdpd.flatten(node_x, start_axis, stop_axis)

cpu = pdpd.static.cpu_places(1)
exe = pdpd.static.Executor(cpu[0])

# startup program will call initializer to initialize the parameters.
exe.run(pdpd.static.default_startup_program())
outs = exe.run(
feed={'x': x},
fetch_list=[out])
saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]])

return outs[0]


def main():
# TODO: more type
in_dtype = 'float32'
data = np.random.randn(3, 2, 5, 4).astype(in_dtype)
start_axis = 1
stop_axis = 2
#new_shape = (3, 10, 4)
generate_flatten_contiguous_range("generate_flatten_contiguous_range_test1", data, start_axis, stop_axis, in_dtype)

if __name__ == "__main__":
main()
Loading

0 comments on commit 8329ef4

Please sign in to comment.