Skip to content

Commit

Permalink
WIP: Add space to batch evaluate
Browse files Browse the repository at this point in the history
  • Loading branch information
Mikhail Treskin committed Aug 26, 2020
1 parent 6c486cb commit 1e083c2
Show file tree
Hide file tree
Showing 9 changed files with 159 additions and 53 deletions.
3 changes: 3 additions & 0 deletions ngraph/core/include/ngraph/op/space_to_batch.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,9 @@ namespace ngraph
std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
bool visit_attributes(AttributeVisitor& visitor) override;

bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const override;
};
}
using v1::SpaceToBatch;
Expand Down
6 changes: 3 additions & 3 deletions ngraph/core/include/ngraph/runtime/reference/mvn.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,9 @@ namespace ngraph {
template<typename T>
void mvn(const T *arg, T *out, const Shape &in_shape, bool normalize_variance, AxisSet reduction_axes,
double eps) {
auto reduced_shape = reduce(in_shape, reduction_axes);
auto reduced_shape = reduce(in_shape, reduction_axes, true);
std::vector<T> mean_val(shape_size(reduced_shape));
mean(arg, mean_val.data(), in_shape, reduction_axes);
mean(arg, mean_val.data(), in_shape, reduction_axes, true);
std::vector<T> broadcast_mean_data(shape_size(in_shape));
broadcast(mean_val.data(), broadcast_mean_data.data(), reduced_shape, in_shape, reduction_axes);
subtract(arg, broadcast_mean_data.data(), out, shape_size(in_shape));
Expand All @@ -42,7 +42,7 @@ namespace ngraph {
std::vector<T> multiply_val(shape_size(in_shape));
multiply(out, out, multiply_val.data(),shape_size(in_shape));
std::vector<T> sum_val(shape_size(reduced_shape));
sum(multiply_val.data(), sum_val.data(), in_shape, reduction_axes);
sum(multiply_val.data(), sum_val.data(), in_shape, reduction_axes, true);
std::vector<T> broadcast_sum(shape_size(in_shape));
broadcast(sum_val.data(), broadcast_sum.data(), reduced_shape, in_shape, reduction_axes);
T n = 1;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -282,13 +282,13 @@ namespace ngraph
}
}
if (num_groups > 1){
std::vector<const OUTPUT*> const_results_cpy;
std::vector<const char*> const_results_cpy;
std::vector<Shape> in_shapes;
for (size_t g = 0; g < num_groups; g++){
const_results_cpy.push_back(result_groups[g].data());
const_results_cpy.push_back(reinterpret_cast<const char *>(result_groups[g].data()));
in_shapes.push_back(group_out_shape);
}
concat<OUTPUT>(const_results_cpy, out, in_shapes, Shape(out_shape), in_channel_axis);
concat(const_results_cpy, reinterpret_cast<char *>(out), in_shapes, Shape(out_shape), in_channel_axis, sizeof(OUTPUT));
} else {
std::copy(result_groups[0].data(), result_groups[0].data() + shape_size(out_shape), out);
}
Expand Down
9 changes: 4 additions & 5 deletions ngraph/core/src/op/batch_to_space.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,10 @@ bool ngraph::op::v1::BatchToSpace::evaluate(const HostTensorVector &outputs,
return false;
}
auto data_shape = data->get_shape();

if (data_shape.size() != 4 || data_shape.size() != 5) {
return false;
}
size_t block_values_size = shape_size(inputs[1]->get_shape());
const auto *block_values = inputs[1]->get_data_ptr<int64_t>();
const auto *crops_begin_values = inputs[2]->get_data_ptr<int64_t>();
Expand All @@ -168,15 +172,13 @@ bool ngraph::op::v1::BatchToSpace::evaluate(const HostTensorVector &outputs,
}

auto *flat_data = data->get_data_ptr<char>();
auto *d0 = reinterpret_cast<float *>(flat_data);

for (size_t block_idx = 1; block_idx < block_values_size; ++block_idx) {
dispersed_shape[0] = block_values[block_idx];
dispersed_shape[1] /= block_values[block_idx];
std::vector<char> dispersed_data(shape_size(dispersed_shape) * elem_size);
runtime::opt_kernel::reshape(flat_data, dispersed_data.data(), data_shape, plain_axes_order, dispersed_shape,
elem_size);
auto *d1 = reinterpret_cast<float *>(dispersed_data.data());

size_t val = 1;
for (size_t axis_idx = 0; axis_idx <= block_values_size; ++axis_idx) {
Expand All @@ -194,13 +196,11 @@ bool ngraph::op::v1::BatchToSpace::evaluate(const HostTensorVector &outputs,
std::vector<char> post_transpose_data(shape_size(post_transpose_shape) * elem_size);
runtime::opt_kernel::reshape(dispersed_data.data(), post_transpose_data.data(), dispersed_shape, axes_order,
post_transpose_shape, elem_size);
auto *d2 = reinterpret_cast<float *>(post_transpose_data.data());
squeezed_shape[0] = dispersed_shape[1];
squeezed_shape[block_idx] *= block_values[block_idx];
dispersed_shape[block_idx + 1] = squeezed_shape[block_idx];
runtime::opt_kernel::reshape(post_transpose_data.data(), flat_data, post_transpose_shape, plain_axes_order,
squeezed_shape, elem_size);
auto *d3 = reinterpret_cast<float *>(flat_data);
data_shape = squeezed_shape;
}

Expand All @@ -221,6 +221,5 @@ bool ngraph::op::v1::BatchToSpace::evaluate(const HostTensorVector &outputs,
SlicePlan slice_plan = make_slice_plan(data_shape, begins, ends, default_strides, begin_mask, end_mask, AxisSet(),
AxisSet(), AxisSet());
runtime::reference::strided_slice(flat_data, outputs[0]->get_data_ptr<char>(), data_shape, slice_plan, elem_size);
auto * d = outputs[0]->get_data_ptr<float>();
return true;
}
102 changes: 102 additions & 0 deletions ngraph/core/src/op/space_to_batch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,17 @@
#include <cmath>
#include <cstddef>
#include <memory>
#include <numeric>

#include "ngraph/builder/make_constant.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/space_to_batch.hpp"
#include "ngraph/ops.hpp"
#include "ngraph/shape.hpp"

#include "ngraph/runtime/reference/pad.hpp"
#include "ngraph/runtime/opt_kernel/reshape.hpp"

using namespace std;
using namespace ngraph;

Expand Down Expand Up @@ -135,3 +139,101 @@ bool ngraph::op::v1::SpaceToBatch::visit_attributes(ngraph::AttributeVisitor& vi
{
return true;
}

bool ngraph::op::v1::SpaceToBatch::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
auto data = inputs[0];
auto out = outputs[0];
const auto &out_shape = out->get_shape();
size_t elem_size = data->get_element_type().size();

if (data->get_partial_shape().is_dynamic()) {
return false;
}
auto data_shape = data->get_shape();

if (!(data_shape.size() == 4 || data_shape.size() == 5)) {
return false;
}

size_t block_values_size = shape_size(inputs[1]->get_shape());
const auto *block_values = inputs[1]->get_data_ptr<int64_t>();
const auto *pads_begin = inputs[2]->get_data_ptr<int64_t>();
const auto *pads_end = inputs[3]->get_data_ptr<int64_t>();


auto *flat_data = data->get_data_ptr<char>();

const char* pad_value = nullptr;
const std::vector<char> pad_zero_value(elem_size, 0);
if (inputs.size() == 4)
{
pad_value = inputs[3]->get_data_ptr<char>();
}
else
{
pad_value = pad_zero_value.data();
}
CoordinateDiff pads_begin_vec(shape_size(inputs[2]->get_shape()));
pads_begin_vec.assign(pads_begin, pads_begin + shape_size(inputs[2]->get_shape()));
CoordinateDiff pads_end_vec(shape_size(inputs[2]->get_shape()));
pads_end_vec.assign(pads_end, pads_end + shape_size(inputs[2]->get_shape()));

Shape padded_shape(data_shape.size());
for (size_t i = 0; i < data_shape.size(); ++i) {
padded_shape[i] = data_shape[i] + pads_begin_vec[i] + pads_end_vec[i];
}

ngraph::runtime::reference::pad(data->get_data_ptr<char>(),
pad_value,
out->get_data_ptr<char>(),
elem_size,
data_shape,
padded_shape,
pads_begin_vec,
pads_end_vec,
ngraph::op::PadMode::CONSTANT);

Shape dispersed_shape(block_values_size + 1);
std::vector<size_t> axes_order(block_values_size + 1);
Shape squeezed_shape(out_shape.begin(), out_shape.end());
std::vector<size_t> plain_axes_order(block_values_size + 1);
std::iota(plain_axes_order.begin(), plain_axes_order.end(), 0);
for (int64_t block_idx = block_values_size - 1; block_idx >= 0; --block_idx) {
int64_t sq_shape_idx = block_values_size - 1;
int64_t axis_idx = axes_order.size() - 1;
for (int64_t shape_idx = dispersed_shape.size() - 1; shape_idx >= 0; --shape_idx) {
if (shape_idx == (block_idx + 1)) {
dispersed_shape[shape_idx] = block_values[block_idx];
axes_order[0] = shape_idx;
} else if (shape_idx == block_idx) {
dispersed_shape[shape_idx] = squeezed_shape[sq_shape_idx]/block_values[block_idx];
axes_order[axis_idx] = shape_idx;
axis_idx--;
sq_shape_idx--;
} else {
dispersed_shape[shape_idx] = squeezed_shape[sq_shape_idx];
axes_order[axis_idx] = shape_idx;
axis_idx--;
sq_shape_idx--;
}
}
std::vector<char> dispersed_data(shape_size(data_shape) * elem_size);
runtime::opt_kernel::reshape(flat_data, dispersed_data.data(), data_shape, plain_axes_order, dispersed_shape,
elem_size);

Shape post_transpose_shape(axes_order.size());
for (size_t i = 0; i < axes_order.size(); ++i) {
post_transpose_shape[i] = dispersed_shape[axes_order[i]];
}
std::vector<char> post_transpose_data(shape_size(post_transpose_shape) * elem_size);
runtime::opt_kernel::reshape(dispersed_data.data(), post_transpose_data.data(), dispersed_shape, axes_order,
post_transpose_shape, elem_size);
squeezed_shape[0] *= block_values[block_idx];
squeezed_shape[block_idx] /= block_values[block_idx];

runtime::opt_kernel::reshape(post_transpose_data.data(), out->get_data_ptr<char>(), post_transpose_shape, plain_axes_order,
squeezed_shape, elem_size);
}

return true;
}
41 changes: 2 additions & 39 deletions ngraph/test/runtime/interpreter/evaluates_map.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,8 @@
#include <ngraph/runtime/reference/ceiling.hpp>
#include <ngraph/runtime/reference/select.hpp>

#include "reference/detection_output.hpp"
#include "reference/scatter_nd_update.hpp"
#include "reference/scatter_update.hpp"
#include "ngraph/runtime/reference/detection_output.hpp"
#include "ngraph/runtime/reference/scatter_nd_update.hpp"
#include "reference/gelu.hpp"
#include "reference/hard_sigmoid.hpp"
#include "reference/elu.hpp"
Expand Down Expand Up @@ -318,42 +317,6 @@ namespace {
return true;
}

template<element::Type_t ET>
bool evaluate(const shared_ptr<op::v3::ScatterUpdate> &op, const HostTensorVector &outputs,
const HostTensorVector &input) {
using T = typename element_type_traits<ET>::value_type;
if (op->get_input_element_type(3) != element::i64)
throw ngraph_error(
"ScatterNDUpdate layer support only i64 'axis' input precision!");

auto idxType = op->get_input_element_type(1);
if (idxType == element::i32) {
runtime::reference::scatterUpdate<T, int32_t, int64_t>(
input[0]->get_data_ptr<const T>(),
input[1]->get_data_ptr<const int32_t>(),
input[2]->get_data_ptr<const T>(),
input[3]->get_data_ptr<const int64_t>(),
outputs[0]->get_data_ptr<T>(),
op->get_input_shape(0),
op->get_input_shape(1),
op->get_input_shape(2));
} else if (idxType == element::i64) {
runtime::reference::scatterUpdate<T, int64_t, int64_t>(
input[0]->get_data_ptr<const T>(),
input[1]->get_data_ptr<const int64_t>(),
input[2]->get_data_ptr<const T>(),
input[3]->get_data_ptr<const int64_t>(),
outputs[0]->get_data_ptr<T>(),
op->get_input_shape(0),
op->get_input_shape(1),
op->get_input_shape(2));
} else {
throw ngraph_error(
"ScatterUpdate layer support only i32 and i64 'indices' input precision!");
}
return true;
}

template<element::Type_t ET>
bool evaluate(const shared_ptr<op::v1::Select> &op, const HostTensorVector &outputs,
const HostTensorVector &input) {
Expand Down
4 changes: 2 additions & 2 deletions ngraph/test/runtime/interpreter/int_executable.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -315,8 +315,8 @@ runtime::interpreter::INTExecutable::evaluate_node(const std::shared_ptr<Node> &
{
res = it->second(node, outputs, inputs);
if (!res) {
throw ngraph_error(std::string("Interpreter backend doesn't implement evaluate method for OP ") +
node->get_type_info().name);
throw ngraph_error(std::string("Running evaluate method for OP ") +
node->get_type_info().name + std::string(" failed!"));
}
}
else
Expand Down
1 change: 0 additions & 1 deletion ngraph/test/runtime/interpreter/opset_int_tbl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,6 @@ NGRAPH_OP(ExtractImagePatches, op::v3)
NGRAPH_OP(ShapeOf, op::v3)
NGRAPH_OP(NonZero, op::v3)
NGRAPH_OP(ScatterNDUpdate, op::v3)
NGRAPH_OP(ScatterUpdate, op::v3)
NGRAPH_OP(HardSigmoid, op::v0)
NGRAPH_OP(Elu, op::v0)
NGRAPH_OP(Selu, op::v0)
Expand Down
40 changes: 40 additions & 0 deletions ngraph/test/runtime/interpreter/reference/elu.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************

#pragma once

#include <cmath>
#include <cstddef>

namespace ngraph
{
namespace runtime
{
namespace reference
{
template <typename T>
void elu(const T* arg, T* out, size_t count, double alpha)
{
for (size_t i = 0; i < count; i++)
{
out[i] = arg[i] < T(0) ? T(alpha * (std::exp(arg[i]) - 1.0)) : arg[i];
}
}
}


}
}

0 comments on commit 1e083c2

Please sign in to comment.