Skip to content

Commit

Permalink
Clean LayerTestsUtils::LayerTestsCommon (openvinotoolkit#22309)
Browse files Browse the repository at this point in the history
* Delete `LayerTestsCommon::CalculateRefs()` impl

* Delete `ConvertRefsParams()`

* Delete `LayerTestsCommon::Validate()` impl

* Delete `LayerTestsUtils::getRuntimePrecision*()`

* Delete `LayerTestsCommon::Serialize()`

* Delete `LayerTestsCommon::QueryNetwork()`
  • Loading branch information
vurusovs authored Feb 1, 2024
1 parent 3d8b05e commit 42ea606
Show file tree
Hide file tree
Showing 5 changed files with 2 additions and 298 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -20,16 +20,4 @@ TEST_P(ActivationDynamicLayerTest, CompareWithRefs) {
Run();
}

TEST_P(ActivationLayerTest, QueryNetwork) {
QueryNetwork();
}

TEST_P(ActivationParamLayerTest, QueryNetwork) {
QueryNetwork();
}

TEST_P(ActivationDynamicLayerTest, QueryNetwork) {
QueryNetwork();
}

} // namespace LayerTestsDefinitions
Original file line number Diff line number Diff line change
Expand Up @@ -43,10 +43,6 @@ class LayerTestsCommon : public ov::test::TestsCommon {

virtual void Run();

virtual void Serialize(ov::pass::Serialize::Version ir_version = ov::pass::Serialize::Version::UNSPECIFIED);

virtual void QueryNetwork();

static void Compare(const std::vector<std::pair<ov::element::Type, std::vector<std::uint8_t>>> &expected,
const std::vector<InferenceEngine::Blob::Ptr> &actual,
float threshold,
Expand All @@ -66,27 +62,10 @@ class LayerTestsCommon : public ov::test::TestsCommon {

virtual void Compare(const InferenceEngine::TensorDesc &actualDesc, const InferenceEngine::TensorDesc &expectedDesc);

virtual void SetRefMode(RefMode mode);

std::shared_ptr<ov::Model> GetFunction();

std::map<std::string, std::string>& GetConfiguration();

// get runtime precision by operation friendly name
std::string getRuntimePrecision(const std::string& layerName);

// get runtime precision by operation type
std::string getRuntimePrecisionByType(const std::string& layerType);

// get runtime precision by operation friendly name which can be fused
std::string getRuntimePrecisionByFusedName(const std::string& layerName);

std::map<std::string, ov::Node::RTMap> getRuntimeInfo();

#ifndef NDEBUG
void showRuntimePrecisions();
#endif

template<class T_IE, class T_NGRAPH>
static void Compare(const T_NGRAPH *expected, const T_IE *actual, std::size_t size, float threshold, float abs_threshold = -1.f) {
for (std::size_t i = 0; i < size; ++i) {
Expand Down Expand Up @@ -160,10 +139,6 @@ class LayerTestsCommon : public ov::test::TestsCommon {

virtual std::vector<std::pair<ov::element::Type, std::vector<std::uint8_t>>> CalculateRefs();

/// default method to convert parameters for reference operation. Used before reference implementation execution
/// can be overridden by specific operation test
virtual void ConvertRefsParams();

virtual std::vector<InferenceEngine::Blob::Ptr> GetOutputs();

InferenceEngine::InferRequest inferRequest;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,6 @@ class RandomUniformLayerTest : public testing::WithParamInterface<RandomUniformP

protected:
void SetUp() override;

void ConvertRefsParams() override;
};

} // namespace LayerTestsDefinitions
Expand Down
256 changes: 2 additions & 254 deletions src/tests/functional/shared_test_classes/src/base/layer_test_utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,31 +19,6 @@

namespace LayerTestsUtils {

namespace {
std::vector<std::pair<ov::element::Type, std::vector<std::uint8_t>>> getConstData(
const std::shared_ptr<ov::Model>& function) {
size_t numOutputs = function->get_output_size();
std::vector<std::pair<ov::element::Type, std::vector<std::uint8_t>>> outputs(numOutputs);
auto funcResults = function->get_results();
for (size_t i = 0; i < numOutputs; i++) {
outputs[i].first = funcResults[i]->get_element_type();
const auto& output = function->output(i).get_node_shared_ptr();
OPENVINO_ASSERT(output->inputs().size() == 1);
auto parrentNode = output->input_value(0).get_node_shared_ptr();
OPENVINO_ASSERT(ov::op::util::is_constant(parrentNode),
"Function was not fully folded to constant state!\n",
"Parent node of one of results is not constant and has type ",
parrentNode->get_type_name());

const auto data = std::dynamic_pointer_cast<ov::op::v0::Constant>(parrentNode)->get_data_ptr<std::uint8_t>();
const auto dataSize = ov::shape_size(parrentNode->get_shape()) * parrentNode->get_element_type().size();
outputs[i].second.resize(dataSize);
std::copy(data, data + dataSize, outputs[i].second.data());
}
return outputs;
}
} // namespace

LayerTestsCommon::LayerTestsCommon() : threshold(1e-2f), abs_threshold(-1.f) {
core = PluginCache::get().ie(targetDevice);
}
Expand Down Expand Up @@ -104,54 +79,6 @@ void LayerTestsCommon::Run() {
}
}

void LayerTestsCommon::Serialize(ov::pass::Serialize::Version ir_version) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();

std::string output_name = ov::test::utils::generateTestFilePrefix();

std::string out_xml_path = output_name + ".xml";
std::string out_bin_path = output_name + ".bin";

ov::pass::Manager manager;
manager.register_pass<ov::pass::Serialize>(out_xml_path, out_bin_path, ir_version);
manager.run_passes(function);
function->validate_nodes_and_infer_types();

auto result = getCore()->ReadNetwork(out_xml_path, out_bin_path);

bool success;
std::string message;
std::tie(success, message) =
compare_functions(result.getFunction(), function, false, false, false,
true, // precision
true); // attributes

EXPECT_TRUE(success) << message;

ov::test::utils::removeIRFiles(out_xml_path, out_bin_path);
}

void LayerTestsCommon::QueryNetwork() {
SKIP_IF_CURRENT_TEST_IS_DISABLED();

cnnNetwork = InferenceEngine::CNNNetwork(function);

auto queryNetworkResult = PluginCache::get().ie()->QueryNetwork(cnnNetwork, targetDevice);
std::set<std::string> expected;
for (auto&& node : function->get_ops()) {
expected.insert(node->get_friendly_name());
}

std::set<std::string> actual;
for (auto&& res : queryNetworkResult.supportedLayersMap) {
// compare with originally used device name
ASSERT_EQ(ov::DeviceIDParser(res.second).get_device_name(), targetDevice);

actual.insert(res.first);
}
ASSERT_EQ(expected, actual);
}

InferenceEngine::Blob::Ptr LayerTestsCommon::GenerateInput(const InferenceEngine::InputInfo& info) const {
return FuncTestUtils::createAndFillBlob(info.getTensorDesc());
}
Expand Down Expand Up @@ -489,60 +416,8 @@ void LayerTestsCommon::Infer() {
inferRequest.Infer();
}

void LayerTestsCommon::ConvertRefsParams() {
ngraph::pass::ConvertPrecision<ov::element::Type_t::f16, ov::element::Type_t::f32>().run_on_model(functionRefs);
ngraph::pass::ConvertPrecision<ov::element::Type_t::bf16, ov::element::Type_t::f32>().run_on_model(functionRefs);
}

std::vector<std::pair<ov::element::Type, std::vector<std::uint8_t>>> LayerTestsCommon::CalculateRefs() {
ConvertRefsParams();
functionRefs->validate_nodes_and_infer_types();

auto referenceInputs = std::vector<std::vector<uint8_t>>(inputs.size());
auto refInputsTypes = std::vector<ov::element::Type>(inputs.size());
for (std::size_t i = 0; i < inputs.size(); ++i) {
const auto &input = inputs[i];
const auto inputSize = input->byteSize();

auto &referenceInput = referenceInputs[i];
referenceInput.resize(inputSize);

auto memory = InferenceEngine::as<InferenceEngine::MemoryBlob>(input);
IE_ASSERT(memory);
const auto lockedMemory = memory->wmap();
const auto buffer = lockedMemory.as<const std::uint8_t *>();
std::copy(buffer, buffer + inputSize, referenceInput.data());

refInputsTypes[i] = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(memory->getTensorDesc().getPrecision());
}

const auto &&outputsInfo = executableNetwork.GetOutputsInfo();
std::vector<ov::element::Type_t> convertType;
convertType.reserve(outputsInfo.size());
for (const auto &output : outputsInfo) {
convertType.push_back(
FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(
output.second->getTensorDesc().getPrecision()));
}

std::vector<std::pair<ov::element::Type, std::vector<std::uint8_t>>> expectedOutputs;
switch (refMode) {
case INTERPRETER: {
expectedOutputs = ngraph::helpers::interpreterFunction(functionRefs, referenceInputs, refInputsTypes);
break;
}
case CONSTANT_FOLDING: {
const auto &foldedFunc = ngraph::helpers::foldFunction(functionRefs, referenceInputs, refInputsTypes);
expectedOutputs = getConstData(foldedFunc);
break;
}
case IE: {
// reference inference on device with other options and nGraph function has to be implemented here
break;
}
}

return expectedOutputs;
return {};
}

std::vector<InferenceEngine::Blob::Ptr> LayerTestsCommon::GetOutputs() {
Expand All @@ -560,134 +435,7 @@ void LayerTestsCommon::Compare(
Compare(expectedOutputs, actualOutputs, threshold);
}

void LayerTestsCommon::Validate() {
if (functionRefs == nullptr) {
functionRefs = function->clone();
}
auto expectedOutputs = CalculateRefs();
const auto &actualOutputs = GetOutputs();

if (expectedOutputs.empty()) {
return;
}

IE_ASSERT(actualOutputs.size() == expectedOutputs.size())
<< "nGraph interpreter has " << expectedOutputs.size() << " outputs, while IE " << actualOutputs.size();

Compare(expectedOutputs, actualOutputs);
}

std::string LayerTestsCommon::getRuntimePrecision(const std::string& layerName) {
const auto execGraph = executableNetwork.GetExecGraphInfo();
const auto execFunction = execGraph.getFunction();

for (const auto& op : execFunction->get_ops()) {
const auto name = op->get_friendly_name();
if (name == layerName) {
const auto& rtInfo = op->get_rt_info();
const auto& it = rtInfo.find("runtimePrecision");
IE_ASSERT(it != rtInfo.end()) << "Runtime precision is not found for node: " << name;
return it->second.as<std::string>();
}
}

return "";
}

std::string LayerTestsCommon::getRuntimePrecisionByType(const std::string& layerType) {
const auto execGraph = executableNetwork.GetExecGraphInfo();
const auto execFunction = execGraph.getFunction();

for (const auto& op : execFunction->get_ops()) {
const auto& rtInfo = op->get_rt_info();
const auto& typeIt = rtInfo.find("layerType");

IE_ASSERT(typeIt != rtInfo.end()) << "Layer is not found for type: " << layerType;

auto type = typeIt->second.as<std::string>();
if (type == layerType) {
const auto& it = rtInfo.find("runtimePrecision");
IE_ASSERT(it != rtInfo.end()) << "Runtime precision is not found for node: " << type;
return it->second.as<std::string>();
}
}

return "";
}

std::string LayerTestsCommon::getRuntimePrecisionByFusedName(const std::string& layerName) {
const auto execGraph = executableNetwork.GetExecGraphInfo();
const auto execFunction = execGraph.getFunction();

const auto parse = [](const std::string& originalLayersNames) -> std::set<std::string> {
std::set<std::string> names;

std::string tmp = originalLayersNames;
size_t beginPosition = 0ul;
size_t endPosition;
while ((endPosition = tmp.find(",", beginPosition)) != std::string::npos) {
names.insert(tmp.substr(beginPosition, endPosition - beginPosition));
beginPosition = endPosition + 1;
}

names.insert(tmp.substr(beginPosition, endPosition - beginPosition));
return names;
};

for (const auto& op : execFunction->get_ops()) {
const auto& rtInfo = op->get_rt_info();

const auto& nameIt = rtInfo.find("originalLayersNames");
IE_ASSERT(nameIt != rtInfo.end()) << "originalLayersNames is not found for node: " << layerName;
const auto fusedName = parse(nameIt->second.as<std::string>());
if (fusedName.find(layerName) == fusedName.end()) {
continue;
}

const auto& it = rtInfo.find("runtimePrecision");
IE_ASSERT(it != rtInfo.end()) << "runtimePrecision is not found for node: " << layerName;
const auto rtPrecisionPtr = it->second.as<std::string>();
return rtPrecisionPtr;
}

return "";
}

std::map<std::string, ov::Node::RTMap> LayerTestsCommon::getRuntimeInfo() {
const auto execGraph = executableNetwork.GetExecGraphInfo();
const auto function = execGraph.getFunction();
std::map<std::string, ov::Node::RTMap> runtimeInfo;
for (const auto& op : function->get_ops()) {
runtimeInfo[op->get_friendly_name()] = op->get_rt_info();
}
return runtimeInfo;
}

#ifndef NDEBUG
void LayerTestsCommon::showRuntimePrecisions() {
const auto execGraph = executableNetwork.GetExecGraphInfo();
const auto execFunction = execGraph.getFunction();

for (const auto& op : execFunction->get_ops()) {
const auto& rtInfo = op->get_rt_info();

const auto& nameIt = rtInfo.find("originalLayersNames");
const auto name = nameIt->second.as<std::string>();

const auto& typeIt = rtInfo.find("layerType");
const auto type = typeIt->second.as<std::string>();

const auto& it = rtInfo.find("runtimePrecision");
const auto rtPrecisionPtr = it->second.as<std::string>();

std::cout << type << "(" << name << "): " << rtPrecisionPtr << std::endl;
}
}
#endif

void LayerTestsCommon::SetRefMode(RefMode mode) {
refMode = mode;
}
void LayerTestsCommon::Validate() {}

std::shared_ptr<ov::Model> LayerTestsCommon::GetFunction() {
return function;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,9 +82,4 @@ void RandomUniformLayerTest::SetUp() {
function = std::make_shared<ov::Model>(results, ov::ParameterVector{input}, "random_uniform");
}

void RandomUniformLayerTest::ConvertRefsParams() {
// we shouldn't use default conversion from f16 to f32
ngraph::pass::ConvertPrecision<ov::element::Type_t::bf16, ov::element::Type_t::f32>().run_on_model(functionRefs);
}

} // namespace LayerTestsDefinitions

0 comments on commit 42ea606

Please sign in to comment.