|
| 1 | +// Copyright (C) 2018-2023 Intel Corporation |
| 2 | +// SPDX-License-Identifier: Apache-2.0 |
| 3 | +// |
| 4 | + |
| 5 | +#include <memory> |
| 6 | +#include <string> |
| 7 | +#include <tuple> |
| 8 | +#include <vector> |
| 9 | + |
| 10 | +#include "common_test_utils/common_utils.hpp" |
| 11 | +#include "openvino/opsets/opset10.hpp" |
| 12 | +#include "ov_models/builders.hpp" |
| 13 | +#include "shared_test_classes/base/ov_subgraph.hpp" |
| 14 | +#include "test_utils/cpu_test_utils.hpp" |
| 15 | +#include "transformations/utils/utils.hpp" |
| 16 | + |
| 17 | +using namespace CPUTestUtils; |
| 18 | +using namespace ov::test; |
| 19 | +using namespace ngraph::helpers; |
| 20 | + |
| 21 | +namespace CPUSubgraphTestsDefinitions { |
| 22 | +template <typename NodeType, typename... Args> |
| 23 | +static std::shared_ptr<ov::Node> make_layer_with_bias(Args&&... args) { |
| 24 | + const auto node = std::make_shared<NodeType>(std::forward<Args>(args)...); |
| 25 | + const auto& precision = node->get_output_element_type(0); |
| 26 | + const auto bias_const = ngraph::builder::makeConstant(precision, ov::Shape{}, std::vector<float>{}, true); |
| 27 | + const auto bias = std::make_shared<ov::opset10::Add>(node, bias_const); |
| 28 | + return bias; |
| 29 | +} |
| 30 | + |
| 31 | +static std::shared_ptr<ov::Model> initSubgraph(const ov::PartialShape& input_shape, const ov::element::Type& precision) { |
| 32 | + const auto shapeof_subgraph_prc = ov::element::i32; |
| 33 | + OPENVINO_ASSERT(input_shape.rank().is_static() && input_shape.size() == 4, "initSubgraph: only 4D shapes are supported"); |
| 34 | + OPENVINO_ASSERT(input_shape[1].is_static(), "initSubgraph: only static channels dim is supported"); |
| 35 | + |
| 36 | + const auto param = std::make_shared<ov::opset10::Parameter>(precision, input_shape); |
| 37 | + const auto reshape_const_1 = ov::opset10::Constant::create(shapeof_subgraph_prc, {3}, {0, 0, -1}); |
| 38 | + const auto reshape_1 = std::make_shared<ov::opset10::Reshape>(param, reshape_const_1, true); |
| 39 | + |
| 40 | + const auto transpose_const_1 = ov::opset10::Constant::create(shapeof_subgraph_prc, {3}, {0, 2, 1}); |
| 41 | + const auto transpose_1 = std::make_shared<ov::opset10::Transpose>(reshape_1, transpose_const_1); |
| 42 | + |
| 43 | + const size_t channels = input_shape[1].get_length(); |
| 44 | + const size_t fc_out_channels = 512; |
| 45 | + const auto fc_weights_1 = ngraph::builder::makeConstant(precision, ov::Shape{fc_out_channels, channels}, std::vector<float>{}, true); |
| 46 | + const auto fc_1 = make_layer_with_bias<ov::opset10::MatMul>(transpose_1, fc_weights_1, false, true); |
| 47 | + |
| 48 | + const auto transpose_const_2 = ov::opset10::Constant::create(shapeof_subgraph_prc, {3}, {0, 2, 1}); |
| 49 | + const auto transpose_2 = std::make_shared<ov::opset10::Transpose>(fc_1, transpose_const_2); |
| 50 | + const auto spatial_dims = ov::op::util::node_to_get_shape_value_of_indices_from_shape_source(param, {2, 3}, {}, shapeof_subgraph_prc); |
| 51 | + const auto unchangable_dims = ov::opset10::Constant::create(shapeof_subgraph_prc, {2}, {0, 0}); |
| 52 | + const auto reshape_const_2 = ov::op::util::make_try_fold<ov::opset10::Concat>(ov::OutputVector{unchangable_dims, spatial_dims}, 0); |
| 53 | + const auto reshape_2 = std::make_shared<ov::opset10::Reshape>(transpose_2, reshape_const_2, true); |
| 54 | + |
| 55 | + const auto conv_weights = ngraph::builder::makeConstant(precision, ov::Shape{fc_out_channels, 1, 1, 3, 3}, std::vector<float>{}, true); |
| 56 | + const auto conv = make_layer_with_bias<ov::opset10::GroupConvolution>(reshape_2, |
| 57 | + conv_weights, |
| 58 | + ov::Strides{1, 1}, |
| 59 | + ov::CoordinateDiff{1, 1}, |
| 60 | + ov::CoordinateDiff{1, 1}, |
| 61 | + ov::Strides{1, 1}); |
| 62 | + |
| 63 | + const auto dim_h = ov::op::util::node_to_get_shape_value_of_indices_from_shape_source(param, {2}, {}, shapeof_subgraph_prc); |
| 64 | + const auto dim_w = ov::op::util::node_to_get_shape_value_of_indices_from_shape_source(param, {3}, {}, shapeof_subgraph_prc); |
| 65 | + const auto fused_spatial_dims = ov::op::util::make_try_fold<ov::opset10::Multiply>(dim_h, dim_w); |
| 66 | + const auto reshape_const_3 = ov::op::util::make_try_fold<ov::opset10::Concat>(ov::OutputVector{unchangable_dims, fused_spatial_dims}, 0); |
| 67 | + const auto reshape_3 = std::make_shared<ov::opset10::Reshape>(conv, reshape_const_3, true); |
| 68 | + const auto transpose_const_3 = ov::opset10::Constant::create(shapeof_subgraph_prc, {3}, {0, 2, 1}); |
| 69 | + const auto transpose_3 = std::make_shared<ov::opset10::Transpose>(reshape_3, transpose_const_3); |
| 70 | + |
| 71 | + const auto fc_weights_2 = ngraph::builder::makeConstant(precision, ov::Shape{channels, fc_out_channels}, std::vector<float>{}, true); |
| 72 | + const auto fc_2 = make_layer_with_bias<ov::opset10::MatMul>(transpose_3, fc_weights_2, false, true); |
| 73 | + return std::make_shared<ov::Model>(fc_2, ov::ParameterVector{param}, "MergeTransposeReorderModel"); |
| 74 | +} |
| 75 | + |
| 76 | +using MergeTransposeReorderTestParams = std::tuple<InputShape, ElementType>; |
| 77 | +class MergeTransposeReorderCPUTest : public testing::WithParamInterface<MergeTransposeReorderTestParams>, virtual public SubgraphBaseTest, public CPUTestsBase { |
| 78 | +public: |
| 79 | + static std::string getTestCaseName(const testing::TestParamInfo<MergeTransposeReorderTestParams> &obj) { |
| 80 | + InputShape input_shape; |
| 81 | + ElementType precision; |
| 82 | + std::tie(input_shape, precision) = obj.param; |
| 83 | + |
| 84 | + std::ostringstream results; |
| 85 | + results << "IS=(" << ov::test::utils::partialShape2str({input_shape.first}) << "_"; |
| 86 | + results << ")_TS=("; |
| 87 | + for (const auto& static_shape : input_shape.second) { |
| 88 | + results << ov::test::utils::vec2str(static_shape) << "_"; |
| 89 | + } |
| 90 | + results << ")_precision=" << precision; |
| 91 | + return results.str(); |
| 92 | + } |
| 93 | + |
| 94 | +protected: |
| 95 | + void SetUp() override { |
| 96 | + targetDevice = ov::test::utils::DEVICE_CPU; |
| 97 | + InputShape input_shape; |
| 98 | + ElementType precision; |
| 99 | + std::tie(input_shape, precision) = this->GetParam(); |
| 100 | + init_input_shapes({input_shape}); |
| 101 | + function = initSubgraph(inputDynamicShapes[0], precision); |
| 102 | + } |
| 103 | + |
| 104 | + void validate_exec_graph() { |
| 105 | + CheckNumberOfNodesWithType(compiledModel, "Transpose", 1); |
| 106 | + CheckNumberOfNodesWithType(compiledModel, "Reshape", 1); |
| 107 | + size_t fake_reorder_count = 0; |
| 108 | + for (const auto& node : compiledModel.get_runtime_model()->get_ops()) { |
| 109 | + const auto& rtInfo = node->get_rt_info(); |
| 110 | + auto it = rtInfo.find(ExecGraphInfoSerialization::LAYER_TYPE); |
| 111 | + IE_ASSERT(rtInfo.end() != it); |
| 112 | + if (it->second.as<std::string>() == "Reorder") { |
| 113 | + fake_reorder_count++; |
| 114 | + ASSERT_TRUE(node->get_friendly_name().find("_fake") != std::string::npos) << "Non fake reorder found."; |
| 115 | + } |
| 116 | + } |
| 117 | + ASSERT_EQ(fake_reorder_count, 2); |
| 118 | + } |
| 119 | +}; |
| 120 | + |
| 121 | +TEST_P(MergeTransposeReorderCPUTest, CompareWithRefs) { |
| 122 | + run(); |
| 123 | + validate_exec_graph(); |
| 124 | +} |
| 125 | + |
| 126 | +namespace { |
| 127 | + |
| 128 | +std::vector<InputShape> inputShapes = { |
| 129 | + InputShape{{}, {{1, 32, 16, 16}}}, |
| 130 | +}; |
| 131 | + |
| 132 | +INSTANTIATE_TEST_SUITE_P(smoke_MergeTransposeReorder, MergeTransposeReorderCPUTest, |
| 133 | + ::testing::Combine(::testing::ValuesIn(inputShapes), |
| 134 | + ::testing::Values(ElementType::f32)), |
| 135 | + MergeTransposeReorderCPUTest::getTestCaseName); |
| 136 | +} // namespace |
| 137 | +} // namespace CPUSubgraphTestsDefinitions |
0 commit comments