Skip to content

Commit

Permalink
Merge branch 'master' into guozhong/multi_realized_by_ctput
Browse files Browse the repository at this point in the history
  • Loading branch information
wgzintel authored Mar 29, 2023
2 parents d29d4d6 + 7a95830 commit c2df03b
Show file tree
Hide file tree
Showing 20 changed files with 1,358 additions and 58 deletions.
35 changes: 35 additions & 0 deletions licensing/third-party-programs.txt
Original file line number Diff line number Diff line change
Expand Up @@ -1605,3 +1605,38 @@ Some of the benchmark data in testdata/ is licensed differently:
domain; the latter does not have expired copyright, but is still in the
public domain according to the license information
(http://www.gutenberg.org/ebooks/53).

-------------------------------------------------------------

29. Pillow (https://github.com/python-pillow/Pillow)

The Python Imaging Library (PIL) is

Copyright © 1997-2011 by Secret Labs AB
Copyright © 1995-2011 by Fredrik Lundh

Pillow is the friendly PIL fork. It is

Copyright © 2010-2023 by Jeffrey A. Clark (Alex) and contributors.

Like PIL, Pillow is licensed under the open source HPND License:

By obtaining, using, and/or copying this software and/or its associated
documentation, you agree that you have read, understood, and will comply
with the following terms and conditions:

Permission to use, copy, modify and distribute this software and its
documentation for any purpose and without fee is hereby granted,
provided that the above copyright notice appears in all copies, and that
both that copyright notice and this permission notice appear in supporting
documentation, and that the name of Secret Labs AB or the author not be
used in advertising or publicity pertaining to distribution of the software
without specific, written prior permission.

SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS.
IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
112 changes: 112 additions & 0 deletions src/core/reference/include/ngraph/runtime/reference/interpolate.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,11 @@
#include <functional>
#include <map>

#include "interpolate_pil.hpp"
#include "ngraph/coordinate_transform.hpp"
#include "ngraph/op/interpolate.hpp"
#include "ngraph/shape_util.hpp"
#include "transpose.hpp"

namespace ngraph {
namespace runtime {
Expand Down Expand Up @@ -302,6 +304,12 @@ class InterpolateEval final {
case InterpolateMode::CUBIC:
cubic_func(input_data, out);
break;
case InterpolateMode::BILINEAR_PILLOW:
bilinear_pil_func(input_data, out);
break;
case InterpolateMode::BICUBIC_PILLOW:
bicubic_pil_func(input_data, out);
break;
default:
OPENVINO_THROW("Unsupported interpolation mode");
break;
Expand Down Expand Up @@ -345,6 +353,10 @@ class InterpolateEval final {
/// \param input_data pointer to input data
/// \param out pointer to memory block for output data
void nearest_func(const T* input_data, T* out);

void bilinear_pil_func(const T* input_data, T* out);
void bicubic_pil_func(const T* input_data, T* out);
void multidim_pil_func(const T* input_data, T* out, const interpolate_pil::filter& filterp);
};

template <typename T>
Expand Down Expand Up @@ -564,6 +576,106 @@ void InterpolateEval<T>::cubic_func(const T* input_data, T* out) {
NGRAPH_SUPPRESS_DEPRECATED_END
}

template <typename T>
void InterpolateEval<T>::bilinear_pil_func(const T* input_data, T* out) {
struct interpolate_pil::filter bilinear = {interpolate_pil::bilinear_filter, 1.0, m_cube_coeff};
multidim_pil_func(input_data, out, bilinear);
}

template <typename T>
void InterpolateEval<T>::bicubic_pil_func(const T* input_data, T* out) {
struct interpolate_pil::filter bicubic = {interpolate_pil::bicubic_filter, 2.0, m_cube_coeff};
multidim_pil_func(input_data, out, bicubic);
}

template <typename T>
void InterpolateEval<T>::multidim_pil_func(const T* input_data, T* out, const interpolate_pil::filter& filterp) {
OPENVINO_ASSERT(m_axes.size() == 2, "For Pillow based modes exactly two (HW) axes need to be provided.");

auto h_dim_idx = m_axes[0];
auto w_dim_idx = m_axes[1];
auto h_dim_in = m_input_data_shape[h_dim_idx];
auto w_dim_in = m_input_data_shape[w_dim_idx];
auto h_dim_out = m_out_shape[h_dim_idx];
auto w_dim_out = m_out_shape[w_dim_idx];
auto in_matrix_elem_size = h_dim_in * w_dim_in;
auto out_matrix_elem_size = h_dim_out * w_dim_out;

auto box = std::vector<float>{0.f, 0.f, static_cast<float>(w_dim_in), static_cast<float>(h_dim_in)};

if (shape_size(m_input_data_shape) == in_matrix_elem_size) {
// Input data is 2D or ND with other dimensions equal 1
interpolate_pil::imaging_resample_inner(input_data,
w_dim_in,
h_dim_in,
w_dim_out,
h_dim_out,
filterp,
box.data(),
out);
} else {
// Flatten other dimensions and interpolate over 2D matrices
std::vector<int64_t> in_transp_axes_order;
for (size_t i = 0; i < m_input_data_shape.size(); ++i) {
if (std::find(m_axes.begin(), m_axes.end(), i) == m_axes.end()) {
in_transp_axes_order.push_back(i);
}
}
in_transp_axes_order.insert(in_transp_axes_order.end(), m_axes.begin(), m_axes.end());

Shape transp_input_shape;
Shape transp_output_shape;
for (auto&& axis : in_transp_axes_order) {
transp_input_shape.push_back(m_input_data_shape[axis]);
transp_output_shape.push_back(m_out_shape[axis]);
}
size_t flat_batch_size =
transp_input_shape.size() > 2
? shape_size(transp_input_shape.begin(), transp_input_shape.begin() + transp_input_shape.size() - 2)
: 1;

// Transpose HW dimensions to the end of the tensor shape
std::vector<T> transposed_in(input_data, input_data + shape_size(m_input_data_shape));
transpose(reinterpret_cast<const char*>(input_data),
reinterpret_cast<char*>(transposed_in.data()),
m_input_data_shape,
sizeof(T),
in_transp_axes_order.data(),
transp_input_shape);

std::vector<T> transposed_out(shape_size(m_out_shape));
T* in_matrix_ptr = transposed_in.data();
T* out_matrix_ptr = transposed_out.data();

// Resample each 2D matrix
for (size_t i = 0; i < flat_batch_size; ++i) {
interpolate_pil::imaging_resample_inner(in_matrix_ptr,
w_dim_in,
h_dim_in,
w_dim_out,
h_dim_out,
filterp,
box.data(),
out_matrix_ptr);
in_matrix_ptr += in_matrix_elem_size;
out_matrix_ptr += out_matrix_elem_size;
}

std::vector<int64_t> out_transp_axes_order(m_out_shape.size() - 2);
std::iota(out_transp_axes_order.begin(), out_transp_axes_order.end(), 0);
out_transp_axes_order.insert(out_transp_axes_order.begin() + h_dim_idx, transp_input_shape.size() - 2);
out_transp_axes_order.insert(out_transp_axes_order.begin() + w_dim_idx, transp_input_shape.size() - 1);

// Transpose back to the original data dimensions order
transpose(reinterpret_cast<const char*>(transposed_out.data()),
reinterpret_cast<char*>(out),
transp_output_shape,
sizeof(T),
out_transp_axes_order.data(),
m_out_shape);
}
}

template <typename T>
void InterpolateEval<T>::nearest_func(const T* input_data, T* out) {
NGRAPH_SUPPRESS_DEPRECATED_START
Expand Down
Loading

0 comments on commit c2df03b

Please sign in to comment.