Skip to content

Commit

Permalink
Merge branch 'master' into feature/embedding-bag-opset-3
Browse files Browse the repository at this point in the history
  • Loading branch information
mvafin authored May 26, 2020
2 parents 2d5cb8f + d3764a7 commit a2f1cc5
Show file tree
Hide file tree
Showing 43 changed files with 1,128 additions and 400 deletions.
14 changes: 7 additions & 7 deletions CODEOWNERS
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,15 @@ azure-pipelines.yml @openvinotoolkit/openvino-admins
/inference-engine/ @openvinotoolkit/openvino-ie-maintainers

# IE CPU:
/inference-engine/src/mkldnn_plugin/ @openvinotoolkit/openvino-ie-cpu-maintainers
/inference-engine/include/cpu/ @openvinotoolkit/openvino-ie-cpu-maintainers
/inference-engine/src/low_precision_transformations/ @openvinotoolkit/openvino-ie-cpu-maintainers
/inference-engine/thirdparty/mkl-dnn/ @openvinotoolkit/openvino-ie-cpu-maintainers
/inference-engine/src/mkldnn_plugin/ @openvinotoolkit/openvino-ie-cpu-maintainers @openvinotoolkit/openvino-ie-cpu-developers
/inference-engine/src/low_precision_transformations/ @openvinotoolkit/openvino-ie-cpu-maintainers @openvinotoolkit/openvino-ie-cpu-developers
/inference-engine/thirdparty/mkl-dnn/ @openvinotoolkit/openvino-ie-cpu-maintainers @openvinotoolkit/openvino-ie-cpu-developers

# IE GPU:
/inference-engine/src/cldnn_engine/ @openvinotoolkit/openvino-ie-gpu-maintainers
/inference-engine/include/gpu/ @openvinotoolkit/openvino-ie-gpu-maintainers
/inference-engine/thirdparty/clDNN/ @openvinotoolkit/openvino-ie-gpu-maintainers
/inference-engine/src/cldnn_engine/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
/inference-engine/include/gpu/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
/inference-engine/include/cldnn/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
/inference-engine/thirdparty/clDNN/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers

# IE VPU:
/inference-engine/src/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers
Expand Down
4 changes: 2 additions & 2 deletions azure-pipelines.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ jobs:
timeoutInMinutes: 0
pool:
#vmImage: 'ubuntu-18.04'
name: LIN_VMSS_VENV
name: LIN_VMSS_VENV_F8S_WE
variables:
BUILD_TYPE: Release
steps:
Expand Down Expand Up @@ -186,7 +186,7 @@ jobs:
timeoutInMinutes: 0
pool:
#vmImage: 'vs2017-win2016'
name: WIN_VMSS_VENV2
name: WIN_VMSS_VENV_F8S_WE
variables:
BUILD_TYPE: Release
BUILD_DIR: D:\dldt-build
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1638,37 +1638,8 @@ CNNLayer::Ptr NodeConverter<ngraph::op::v1::TopK>::createLayer(const std::shared
auto castedLayer = ngraph::as_type_ptr<ngraph::op::v1::TopK>(layer);
if (castedLayer == nullptr) THROW_IE_EXCEPTION << "Cannot get " << params.type << " layer " << params.name;

auto mode = castedLayer->get_mode();
std::string str_mode;
switch (mode) {
case ngraph::op::v1::TopK::Mode::MIN:
str_mode = "min";
break;
case ngraph::op::v1::TopK::Mode::MAX:
str_mode = "max";
break;
default:
THROW_IE_EXCEPTION << "Unsupported TopK mode";
}

auto sort = castedLayer->get_sort_type();
std::string str_sort;
switch (sort) {
case ngraph::op::v1::TopK::SortType::NONE:
str_sort = "none";
break;
case ngraph::op::v1::TopK::SortType::SORT_VALUES:
str_sort = "value";
break;
case ngraph::op::v1::TopK::SortType::SORT_INDICES:
str_sort = "index";
break;
default:
THROW_IE_EXCEPTION << "Unsupported TopK sort type";
}

res->params["mode"] = str_mode;
res->params["sort"] = str_sort;
res->params["mode"] = ngraph::as_string<ngraph::op::v1::TopK::Mode>(castedLayer->get_mode());;
res->params["sort"] = ngraph::as_string<ngraph::op::v1::TopK::SortType>(castedLayer->get_sort_type());
res->params["axis"] = asString(castedLayer->get_axis());

return res;
Expand All @@ -1682,8 +1653,8 @@ CNNLayer::Ptr NodeConverter<ngraph::op::TopKIE>::createLayer(const std::shared_p
auto castedLayer = ngraph::as_type_ptr<ngraph::op::TopKIE>(layer);
if (castedLayer == nullptr) THROW_IE_EXCEPTION << "Cannot get " << params.type << " layer " << params.name;

res->params["mode"] = castedLayer->get_mode();
res->params["sort"] = castedLayer->get_sort_type();
res->params["mode"] = ngraph::as_string<ngraph::op::v1::TopK::Mode>(castedLayer->get_mode());;
res->params["sort"] = ngraph::as_string<ngraph::op::v1::TopK::SortType>(castedLayer->get_sort_type());
res->params["axis"] = asString(castedLayer->get_axis());

return res;
Expand Down
29 changes: 16 additions & 13 deletions inference-engine/src/transformations/include/ngraph_ops/topk_ie.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include <ie_api.h>

#include "ngraph/op/op.hpp"
#include "ngraph/op/topk.hpp"

namespace ngraph {
namespace op {
Expand All @@ -19,24 +20,26 @@ class INFERENCE_ENGINE_API_CLASS(TopKIE) : public Op {
static constexpr NodeTypeInfo type_info{"TopKIE", 1};
const NodeTypeInfo& get_type_info() const override { return type_info; }

TopKIE(const Output<Node> &data,
const Output<Node> &k,
TopKIE(const Output<Node>& data,
const Output<Node>& k,
const int64_t axis,
const std::string& mode,
const std::string& sort,
const Shape& output_shape);
const ngraph::op::TopKMode mode,
const ngraph::op::TopKSortType sort);

void validate_and_infer_types() override;

std::shared_ptr<Node> copy_with_new_args(const NodeVector& new_args) const override;
int64_t get_axis();
std::string get_mode();
std::string get_sort_type();
Shape get_output_shape();
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;

int64_t axis;
std::string mode, sort_type;
Shape output_shape;
int64_t get_axis() { return m_axis;}

ngraph::op::TopKMode get_mode() { return m_mode; }

ngraph::op::TopKSortType get_sort_type() { return m_sort_type; }

private:
int64_t m_axis;
ngraph::op::TopKMode m_mode;
ngraph::op::TopKSortType m_sort_type;
};

} // namespace op
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,5 +35,13 @@ enum class CONVERSION_RESULT {
NONE
};

/*
* check_constant function checks how given constant performs elementwise operation with given input
* CONVERSION_RESULT has several types:
* SCALE_SHIFT - constant applies only per-channel
* POWER - constant applies as single value
* NONE - default return value
*/

INFERENCE_ENGINE_API_CPP(CONVERSION_RESULT)
check_constant(const std::shared_ptr<ngraph::op::Constant> & constant, const ngraph::Shape & shape);
check_constant(const std::shared_ptr<ngraph::op::Constant> & constant, const ngraph::PartialShape & shape);
Original file line number Diff line number Diff line change
Expand Up @@ -70,10 +70,13 @@ ngraph::graph_rewrite_callback get_callback() {
"Unsupported template parameter. Only Add or Multiply allowed!");

auto lin_op = std::dynamic_pointer_cast<T> (m.get_match_root());
if (!lin_op) {
if (!lin_op || lin_op->output(0).get_partial_shape().rank().is_dynamic()) {
return false;
}

const auto output_shape = lin_op->output(0).get_partial_shape();
const auto output_shape_rank = output_shape.rank().get_length();

if (!lin_op->get_element_type().is_real()) {
return convert_to_eltwise<T>(lin_op,
lin_op->input(0).get_source_output(),
Expand All @@ -93,39 +96,58 @@ ngraph::graph_rewrite_callback get_callback() {
}
}

// Check that eltwise is not useless otherwise we remove it
if ((std::is_same<T, ngraph::opset1::Add>() && ngraph::op::util::constantIsEqualTo(const_node, 0)) ||
(std::is_same<T, ngraph::opset1::Multiply>() && ngraph::op::util::constantIsEqualTo(const_node, 1))) {
bool has_result_output = false;
for (const auto & output : lin_op->output(0).get_target_inputs()) {
if (dynamic_cast<ngraph::op::Result*>(output.get_node())) {
has_result_output = true;
}
/* This lambda checks data and constant shapes for broadcasting
For example:
1. data_shape{1, 64, 64} and const_shape{64, 1, 1} - constant broadcasts data_shape zero dimension
2. data_shape{DYN, 64, 64} and const_shape{1, 1, 64} - constant do not broadcasts data_shape
3. data_shape{64, 64} and const_shape{1, 1, 1} - constant broadcasts data_shape with additional dimension
*/
auto constant_broadcast_output = [](const ngraph::PartialShape & data_pshape, const ngraph::Shape & const_shape) -> bool {
if (data_pshape.rank().is_dynamic() || const_shape.size() > data_pshape.rank().get_length()) {
return true;
}

auto parent = data_node.get_node_shared_ptr();
size_t consumers_count = 0;
for (const auto &output : parent->outputs()) {
consumers_count += output.get_target_inputs().size();
std::vector<ngraph::Dimension> data_shape(data_pshape);

auto const_shape_it = const_shape.rbegin();
auto data_shape_it = data_shape.rbegin();

while (const_shape_it != const_shape.rend()) {
auto data_dim = *data_shape_it;
auto const_dim = *const_shape_it;

/* DATA DIM - CONST DIM - CONSTANT BROADCAST OUTPUT
DYN - 64 - TRUE
DYN - 1 - FALSE
64 - 1 - FALSE
1 - 64 - TRUE
64 - 64 - FALSE
*/
if ((data_dim.is_dynamic() && const_dim != 1) ||
(data_dim.is_static() && data_dim.get_length() == 1 && const_dim != 1)) {
return true;
}

++const_shape_it;
++data_shape_it;
}

if (!has_result_output || consumers_count == 1) {
if (!std::dynamic_pointer_cast<ngraph::op::Parameter>(parent)) {
parent->set_friendly_name(lin_op->get_friendly_name());
}
// TODO: due to ngraph::replace_node function limitations we have to reconnect output port consumers to the new input
// using replace_source_output method
for (auto &input : lin_op->output(0).get_target_inputs()) {
input.replace_source_output(data_node);
}
return false;
};

// Check that eltwise is not useless and do not broadcast output otherwise we remove it
if (((std::is_same<T, ngraph::opset1::Add>() && ngraph::op::util::constantIsEqualTo(const_node, 0)) ||
(std::is_same<T, ngraph::opset1::Multiply>() && ngraph::op::util::constantIsEqualTo(const_node, 1))) &&
!constant_broadcast_output(data_node.get_partial_shape(), const_node->get_shape())) {
bool ret_status = ngraph::replace_output_update_name(lin_op->output(0), data_node);
if (ret_status) {
return true;
}
}

auto res = check_constant(const_node, data_node.get_partial_shape());

auto res = check_constant(const_node, data_node.get_shape());

if (res == CONVERSION_RESULT::NONE || (res == CONVERSION_RESULT::SCALE_SHIFT && lin_op->get_shape().size() < 4)) {
if (res == CONVERSION_RESULT::NONE || (res == CONVERSION_RESULT::SCALE_SHIFT && output_shape_rank < 4)) {
return convert_to_eltwise<T>(lin_op,
lin_op->input(0).get_source_output(),
lin_op->input(1).get_source_output());
Expand All @@ -140,12 +162,12 @@ ngraph::graph_rewrite_callback get_callback() {
std::shared_ptr<ngraph::op::ScaleShiftIE> scaleshift;
if (std::is_same<T, ngraph::opset1::Add>()) {
auto weights = ngraph::opset1::Constant::create(weights_et, weights_shape, {1});
scaleshift = std::make_shared<ngraph::op::ScaleShiftIE>(data_node, ngraph::op::util::normalize_constant(weights, lin_op->get_shape()),
ngraph::op::util::normalize_constant(const_node, lin_op->get_shape()));
scaleshift = std::make_shared<ngraph::op::ScaleShiftIE>(data_node, ngraph::op::util::normalize_constant(weights, output_shape),
ngraph::op::util::normalize_constant(const_node, output_shape));
} else {
auto bias = ngraph::opset1::Constant::create(weights_et, weights_shape, {0});
scaleshift = std::make_shared<ngraph::op::ScaleShiftIE>(data_node, ngraph::op::util::normalize_constant(const_node, lin_op->get_shape()),
ngraph::op::util::normalize_constant(bias, lin_op->get_shape()));
scaleshift = std::make_shared<ngraph::op::ScaleShiftIE>(data_node, ngraph::op::util::normalize_constant(const_node, output_shape),
ngraph::op::util::normalize_constant(bias, output_shape));
}

scaleshift->set_friendly_name(lin_op->get_friendly_name());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ bool has_op_with_type(const std::shared_ptr<const ngraph::Function> &function) {
INFERENCE_ENGINE_API_CPP(bool) get_single_value(const std::shared_ptr<op::Constant> & const_node, float & value);

INFERENCE_ENGINE_API_CPP(std::shared_ptr<ngraph::Node>) normalize_constant(const std::shared_ptr<op::Constant> & constant,
const Shape & shape);
const PartialShape & shape);

INFERENCE_ENGINE_API_CPP(std::shared_ptr<ngraph::Node>) broadcastTo(const Output<Node>& input, const Shape& shape);

Expand Down
18 changes: 13 additions & 5 deletions inference-engine/src/transformations/src/ngraph_ops/eltwise.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,16 +37,24 @@ void op::Eltwise::validate_and_infer_types() {
NODE_VALIDATION_CHECK(this, element::Type::merge(et_result, data1_et, data2_et),
"Element types for first and second do not match :", data1_et, " and ", data2_et);

auto shape1 = get_input_partial_shape(0).to_shape();
auto shape2 = get_input_partial_shape(1).to_shape();
if (get_input_partial_shape(0).rank().is_dynamic() ||
get_input_partial_shape(1).rank().is_dynamic()) {
set_output_type(0, et_result, PartialShape::dynamic());
return;
}

std::vector<Dimension> shape1(get_input_partial_shape(0));
std::vector<Dimension> shape2(get_input_partial_shape(1));

ngraph::Shape output_shape(std::max(shape1.size(), shape2.size()));
std::vector<Dimension> output_shape(PartialShape::dynamic(std::max(shape1.size(), shape2.size())));
auto output_shape_it = output_shape.rbegin();

auto shape1_it = shape1.rbegin(), shape2_it = shape2.rbegin();
while (shape1_it != shape1.rend() || shape2_it != shape2.rend()) {
if (shape1_it != shape1.rend() && shape2_it != shape2.rend()) {
*output_shape_it = std::max(*shape1_it, *shape2_it);
if (shape1_it->is_static() && shape2_it->is_static()) {
*output_shape_it = (shape1_it->get_length() > shape2_it->get_length() ? *shape1_it : *shape2_it);
}
} else if (shape1_it != shape1.rend()) {
*output_shape_it = *shape1_it;
} else if (shape2_it != shape2.rend()) {
Expand All @@ -61,5 +69,5 @@ void op::Eltwise::validate_and_infer_types() {
}
}

set_output_type(0, data1_et, PartialShape(output_shape));
set_output_type(0, et_result, output_shape);
}
62 changes: 35 additions & 27 deletions inference-engine/src/transformations/src/ngraph_ops/topk_ie.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,43 +6,51 @@

#include <memory>
#include <string>
#include <ngraph/opsets/opset1.hpp>

using namespace std;
using namespace ngraph;

constexpr NodeTypeInfo op::TopKIE::type_info;

op::TopKIE::TopKIE(const Output<Node>& data, const Output<Node>& k, const int64_t axis, const std::string& mode, const std::string& sort,
const Shape& output_shape)
: Op({data, k}), axis(axis), mode(mode), sort_type(sort), output_shape(output_shape) {

op::TopKIE::TopKIE(const ngraph::Output<ngraph::Node> &data, const ngraph::Output<ngraph::Node> &k, const int64_t axis, const ngraph::op::TopKMode mode,
const ngraph::op::TopKSortType sort)
: Op({data, k}), m_axis(axis), m_mode(mode), m_sort_type(sort) {
constructor_validate_and_infer_types();
}

std::shared_ptr<Node> op::TopKIE::copy_with_new_args(const NodeVector& new_args) const {
if (new_args.size() != 2) {
throw ngraph_error("Incorrect number of new arguments");
}

return make_shared<TopKIE>(new_args.at(0), new_args.at(1), axis, mode, sort_type, output_shape);
std::shared_ptr<Node> op::TopKIE::clone_with_new_inputs(const ngraph::OutputVector &new_args) const {
check_new_args_count(this, new_args);
return make_shared<TopKIE>(new_args.at(0), new_args.at(1), m_axis, m_mode, m_sort_type);
}

void op::TopKIE::validate_and_infer_types() {
set_output_type(0, get_input_element_type(0), output_shape);
set_output_type(1, element::i32, output_shape);
}

int64_t op::TopKIE::get_axis() {
return axis;
}

std::string op::TopKIE::get_mode() {
return mode;
}

std::string op::TopKIE::get_sort_type() {
return sort_type;
}
const auto& input_partial_shape = get_input_partial_shape(0);
const auto input_rank = input_partial_shape.rank();

NODE_VALIDATION_CHECK(this,
input_rank.is_dynamic() || input_rank.get_length() > 0,
"Input rank must be greater than 0.");

const auto& k_partial_shape = get_input_partial_shape(1);
NODE_VALIDATION_CHECK(
this, k_partial_shape.rank().compatible(1), "The 'K' input must be a 1D tensor.");

// Construct v1::TopK operation to calculate output shapes
std::shared_ptr<Node> topk;
if (auto k_const = std::dynamic_pointer_cast<opset1::Constant>(input_value(1).get_node_shared_ptr())) {
const auto k = k_const->cast_vector<int64_t>();
topk = std::make_shared<opset1::TopK>(input_value(0),
opset1::Constant::create(element::i64, Shape{}, k),
m_axis, m_mode, m_sort_type);
} else {
topk = std::make_shared<opset1::TopK>(input_value(0),
std::make_shared<opset1::Squeeze>(input_value(1), opset1::Constant::create(element::i64, Shape{1}, {0})),
m_axis, m_mode, m_sort_type);
}

Shape op::TopKIE::get_output_shape() {
return output_shape;
}
set_output_size(2);
set_output_type(0, get_input_element_type(0), topk->get_output_partial_shape(0));
set_output_type(1, element::i32, topk->get_output_partial_shape(1));
}
Loading

0 comments on commit a2f1cc5

Please sign in to comment.