Skip to content

Commit

Permalink
remove reader tests #4
Browse files Browse the repository at this point in the history
  • Loading branch information
olpipi committed Oct 21, 2022
1 parent afea708 commit ef180e6
Show file tree
Hide file tree
Showing 13 changed files with 1,545 additions and 4,984 deletions.
1 change: 1 addition & 0 deletions src/core/tests/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -425,6 +425,7 @@ set(SRC
visitors/op/tan.cpp
visitors/op/tanh.cpp
visitors/op/tensor_iterator.cpp
visitors/op/tile.cpp
visitors/op/topk.cpp
visitors/op/transpose.cpp
visitors/op/unsqueeze.cpp
Expand Down
39 changes: 39 additions & 0 deletions src/core/tests/visitors/op/proposal.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,3 +60,42 @@ TEST(attributes, proposal_op) {
EXPECT_EQ(g_proposal_attrs.box_coordinate_scale, proposal_attrs.box_coordinate_scale);
EXPECT_EQ(g_proposal_attrs.framework, proposal_attrs.framework);
}

TEST(attributes, proposal_op2) {
NodeBuilder::get_ops().register_factory<opset1::Proposal>();
const auto class_probs = make_shared<op::Parameter>(element::f32, Shape{1, 12, 34, 62});
const auto class_logits = make_shared<op::Parameter>(element::f32, Shape{1, 24, 34, 62});
const auto image_shape = make_shared<op::Parameter>(element::f32, Shape{3});

op::ProposalAttrs attrs;
attrs.base_size = 16;
attrs.pre_nms_topn = 6000;
attrs.post_nms_topn = 200;
attrs.nms_thresh = 0.6f;
attrs.feat_stride = 16;
attrs.min_size = 16;
attrs.ratio = vector<float>{2.669f};
attrs.scale = vector<float>{4.0f, 6.0f, 9.0f, 16.0f, 24.0f, 32.0f};

auto proposal = make_shared<opset1::Proposal>(class_probs, class_logits, image_shape, attrs);
NodeBuilder builder(proposal);
auto g_proposal = ov::as_type_ptr<opset1::Proposal>(builder.create());

const auto proposal_attrs = proposal->get_attrs();
const auto g_proposal_attrs = g_proposal->get_attrs();

EXPECT_EQ(g_proposal_attrs.base_size, proposal_attrs.base_size);
EXPECT_EQ(g_proposal_attrs.pre_nms_topn, proposal_attrs.pre_nms_topn);
EXPECT_EQ(g_proposal_attrs.post_nms_topn, proposal_attrs.post_nms_topn);
EXPECT_EQ(g_proposal_attrs.nms_thresh, proposal_attrs.nms_thresh);
EXPECT_EQ(g_proposal_attrs.feat_stride, proposal_attrs.feat_stride);
EXPECT_EQ(g_proposal_attrs.min_size, proposal_attrs.min_size);
EXPECT_EQ(g_proposal_attrs.ratio, proposal_attrs.ratio);
EXPECT_EQ(g_proposal_attrs.scale, proposal_attrs.scale);
EXPECT_EQ(g_proposal_attrs.clip_before_nms, proposal_attrs.clip_before_nms);
EXPECT_EQ(g_proposal_attrs.clip_after_nms, proposal_attrs.clip_after_nms);
EXPECT_EQ(g_proposal_attrs.normalize, proposal_attrs.normalize);
EXPECT_EQ(g_proposal_attrs.box_size_scale, proposal_attrs.box_size_scale);
EXPECT_EQ(g_proposal_attrs.box_coordinate_scale, proposal_attrs.box_coordinate_scale);
EXPECT_EQ(g_proposal_attrs.framework, proposal_attrs.framework);
}
25 changes: 25 additions & 0 deletions src/core/tests/visitors/op/tile.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/op/util/attr_types.hpp"
#include "ngraph/opsets/opset1.hpp"
#include "util/visitor.hpp"

using namespace std;
using namespace ngraph;
using ngraph::test::NodeBuilder;

TEST(attributes, tile_op) {
NodeBuilder::get_ops().register_factory<opset1::Tile>();
const auto data = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3, 4});
const auto repeats = make_shared<op::Constant>(element::i64, Shape{4});

const auto tile = make_shared<opset1::Tile>(data, repeats);
NodeBuilder builder(tile);

const auto expected_attr_count = 0;
EXPECT_EQ(builder.get_value_map_size(), expected_attr_count);
}
281 changes: 281 additions & 0 deletions src/frontends/ir/tests/frontend_test_basic.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
#include "frontend_test.hpp"
#include "openvino/opsets/opset1.hpp"
#include "openvino/opsets/opset3.hpp"
#include "openvino/opsets/opset6.hpp"

class IRFrontendTests : public ::testing::Test, public IRFrontendTestsImpl {
protected:
Expand Down Expand Up @@ -957,3 +958,283 @@ TEST_F(IRFrontendTests, wrong_opset) {
ASSERT_THROW(model = core.read_model(testModel, ov::Tensor()), ov::Exception);
ASSERT_FALSE(!!model);
}

TEST_F(IRFrontendTests, extension_proposal_network) {
// the Proposal with 2 inputs was initially marked as "extension" operation but later was added to opset
// the test checks that IR reader properly instantiate the "extension" Proposal as "opset6" Proposal
std::string xmlModel = R"V0G0N(
<net name="Network" version="11">
<layers>
<layer id="0" name="in1" type="Parameter" version="opset1">
<data element_type="f32" shape="1,12,34,62"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>12</dim>
<dim>34</dim>
<dim>62</dim>
</port>
</output>
</layer>
<layer id="1" name="in2" type="Parameter" version="opset1">
<data element_type="f32" shape="1,24,34,62"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>24</dim>
<dim>34</dim>
<dim>62</dim>
</port>
</output>
</layer>
<layer id="2" name="in3" type="Const" version="opset1">
<data element_type="f32" offset="0" shape="3" size="12"/>
<output>
<port id="0" precision="FP32">
<dim>3</dim>
</port>
</output>
</layer>
<layer name="proposal" type="Proposal" precision="FP32" id="3" version="extension">
<data feat_stride="16" base_size="16" min_size="16" ratio="2.669000" scale="4.000000,6.000000,9.000000,16.000000,24.000000,32.000000" pre_nms_topn="6000" post_nms_topn="200" nms_thresh="0.600000"/>
<input>
<port id="1">
<dim>1</dim>
<dim>12</dim>
<dim>34</dim>
<dim>62</dim>
</port>
<port id="2">
<dim>1</dim>
<dim>24</dim>
<dim>34</dim>
<dim>62</dim>
</port>
<port id="3">
<dim>3</dim>
</port>
</input>
<output>
<port id="3" precision="FP32">
<dim>1000</dim>
<dim>5</dim>
</port>
<port id="4" precision="FP32">
<dim>1000</dim>
</port>
</output>
</layer>
<layer id="4" name="output" type="Result" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>200</dim>
<dim>5</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="3" to-port="1"/>
<edge from-layer="1" from-port="0" to-layer="3" to-port="2"/>
<edge from-layer="2" from-port="0" to-layer="3" to-port="3"/>
<edge from-layer="3" from-port="4" to-layer="4" to-port="0"/>
</edges>
</net>
)V0G0N";

std::vector<unsigned char> buffer(12, 0);
float* floatBuffer = reinterpret_cast<float*>(buffer.data());
floatBuffer[0] = 0;
floatBuffer[1] = 0;
floatBuffer[2] = 0;

createTemporalModelFile(xmlModel, buffer);
std::shared_ptr<ov::Model> model;

ASSERT_NO_THROW(model = core.read_model(xmlFileName, binFileName));
ASSERT_TRUE(!!model);

for (auto op : model->get_ordered_ops()) {
if (op->get_friendly_name() == "proposal" &&
op->get_type_info() == ov::opset6::Proposal::get_type_info_static()) {
return;
}
}
FAIL() << "Custom proposal layer is not an opset6 operation.";
}

TEST_F(IRFrontendTests, ReadModelWithTensorNamesWithSpaces) {
std::string testModel = R"V0G0N(
<net name="graph" version="11">
<layers>
<layer id="1" name="input1" type="Parameter" version="opset1">
<data shape="1,4,512" element_type="f32"/>
<output>
<port id="0" precision="FP32" names="input1">
<dim>1</dim>
<dim>4</dim>
<dim>512</dim>
</port>
</output>
</layer>
<layer id="0" name="input2" type="Parameter" version="opset1">
<data shape="1,4,512" element_type="f32"/>
<output>
<port id="0" precision="FP32" names="input2">
<dim>1</dim>
<dim>4</dim>
<dim>512</dim>
</port>
</output>
</layer>
<layer id="2" name="output 0([1 4 512])" type="Add" version="opset1">
<data auto_broadcast="numpy"/>
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>4</dim>
<dim>512</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>4</dim>
<dim>512</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="output 0([1 4 512])">
<dim>1</dim>
<dim>4</dim>
<dim>512</dim>
</port>
</output>
</layer>
<layer id="3" name="output 0([1 4 512])/sink_port_0" type="Result" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>4</dim>
<dim>512</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="2" to-port="1"/>
<edge from-layer="1" from-port="0" to-layer="2" to-port="0"/>
<edge from-layer="2" from-port="2" to-layer="3" to-port="0"/>
</edges>
</net>
)V0G0N";


std::shared_ptr<ov::Model> model;

ASSERT_NO_THROW(model = core.read_model(testModel, ov::Tensor()));
ASSERT_TRUE(!!model);

auto outputs = model->outputs();
EXPECT_EQ(outputs.size(), 1);
auto names = outputs.at(0).get_names();
EXPECT_EQ(names.size(), 1);
auto it = names.find("output 0([1 4 512])");
EXPECT_NE(it, names.end());
}

TEST_F(IRFrontendTests, ReadModelWithTensorNamesAddOutput) {
std::string testModel = R"V0G0N(
<net name="graph" version="11">
<layers>
<layer id="1" name="input1" type="Parameter" version="opset1">
<data shape="1,4,512" element_type="f32"/>
<output>
<port id="0" precision="FP32" names="input1">
<dim>1</dim>
<dim>4</dim>
<dim>512</dim>
</port>
</output>
</layer>
<layer id="0" name="input2" type="Parameter" version="opset1">
<data shape="1,4,512" element_type="f32"/>
<output>
<port id="0" precision="FP32" names="input2">
<dim>1</dim>
<dim>4</dim>
<dim>512</dim>
</port>
</output>
</layer>
<layer id="2" name="Add 221" type="Add" version="opset1">
<data auto_broadcast="numpy"/>
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>4</dim>
<dim>512</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>4</dim>
<dim>512</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="output add">
<dim>1</dim>
<dim>4</dim>
<dim>512</dim>
</port>
</output>
</layer>
<layer id="3" name="output 0([1 4 512])" type="ReLU" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>4</dim>
<dim>512</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="output 0([1 4 512])">
<dim>1</dim>
<dim>4</dim>
<dim>512</dim>
</port>
</output>
</layer>
<layer id="4" name="output 0([1 4 512])/sink_port_0" type="Result" version="opset1">
<rt_info>
<attribute name="fused_names" version="0" value="output 0([1 4 512])/sink_port_0"/>
</rt_info>
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>4</dim>
<dim>512</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="2" to-port="1"/>
<edge from-layer="1" from-port="0" to-layer="2" to-port="0"/>
<edge from-layer="2" from-port="2" to-layer="3" to-port="0"/>
<edge from-layer="3" from-port="1" to-layer="4" to-port="0"/>
</edges>
</net>)V0G0N";

std::shared_ptr<ov::Model> model;
std::string tensor_name = "output add";

ASSERT_NO_THROW(model = core.read_model(testModel, ov::Tensor()));
ASSERT_TRUE(!!model);

model->add_output(tensor_name);
auto outputs = model->outputs();
EXPECT_EQ(outputs.size(), 2);
auto names = outputs.at(1).get_names();
EXPECT_EQ(names.size(), 1);
auto it = names.find(tensor_name);
EXPECT_NE(it, names.end());
}
Loading

0 comments on commit ef180e6

Please sign in to comment.