Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[PTen] Unify data layout of pten and fluid #38583

Merged
merged 3 commits into from
Dec 31, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions paddle/fluid/eager/eager_tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -234,8 +234,7 @@ class EagerTensor final {
auto* framework_tensor =
var_.GetMutable<paddle::framework::LoDTensor>();
framework_tensor->Resize(tensor_->dims());
framework_tensor->set_layout(
pten::TransToFluidDataLayout(tensor_->layout()));
framework_tensor->set_layout(tensor_->layout());
// Contruct framework::Tensor from egr::EagerTensor
auto tensor_dense =
std::dynamic_pointer_cast<pten::DenseTensor>(tensor_->impl());
Expand Down
56 changes: 1 addition & 55 deletions paddle/fluid/framework/data_layout.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,58 +18,4 @@ limitations under the License. */
#include <ostream>
#include <string>

#include "paddle/fluid/platform/enforce.h"

namespace paddle {
namespace framework {

enum class DataLayout {
kNHWC = 0,
kNCHW = 1,
kAnyLayout = 2,
kMKLDNN = 3, // all layouts supported by MKLDNN internally
};

inline DataLayout StringToDataLayout(const std::string& str) {
std::string s(str);
for (size_t i = 0; i < s.size(); ++i) {
s[i] = toupper(s[i]);
}

if (s == "NHWC") {
return DataLayout::kNHWC;
} else if (s == "NCHW") {
return DataLayout::kNCHW;
} else if (s == "ANYLAYOUT") {
return DataLayout::kAnyLayout;
} else if (s == "MKLDNNLAYOUT") {
return DataLayout::kMKLDNN;
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Unknown data layout type string: %s.", s));
}
}

inline std::string DataLayoutToString(const DataLayout& data_layout) {
switch (data_layout) {
case DataLayout::kNHWC:
return "NHWC";
case DataLayout::kNCHW:
return "NCHW";
case DataLayout::kAnyLayout:
return "ANY_LAYOUT";
case DataLayout::kMKLDNN:
return "MKLDNNLAYOUT";
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"Unknown Data Layout type %d.", data_layout));
}
}

inline std::ostream& operator<<(std::ostream& out, const DataLayout& l) {
out << DataLayoutToString(l);
return out;
}

} // namespace framework
} // namespace paddle
#include "paddle/pten/common/layout.h"
5 changes: 2 additions & 3 deletions paddle/fluid/framework/pten_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ OpKernelType TransPtenKernelKeyToOpKernelType(
proto::VarType::Type data_type =
pten::TransToProtoVarType(kernel_key.dtype());
platform::Place place = pten::TransToFluidPlace(kernel_key.backend());
DataLayout data_layout = pten::TransToFluidDataLayout(kernel_key.layout());
DataLayout data_layout = kernel_key.layout();
LibraryType library_type = LibraryType::kPlain;
if (kernel_key.backend() == pten::Backend::MKLDNN) {
library_type = LibraryType::kMKLDNN;
Expand All @@ -83,8 +83,7 @@ pten::KernelKey TransOpKernelTypeToPtenKernelKey(
} else {
// do
}
paddle::experimental::DataLayout layout =
pten::TransToPtenDataLayout(kernel_type.data_layout_);
paddle::experimental::DataLayout layout = kernel_type.data_layout_;
paddle::experimental::DataType dtype =
pten::TransToPtenDataType(kernel_type.data_type_);
return pten::KernelKey(backend, layout, dtype);
Expand Down
3 changes: 1 addition & 2 deletions paddle/fluid/operators/reshape_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -385,8 +385,7 @@ class ReshapeKernel {
// We can't MakePtenDenseTensor for case 2, so we solve this case by
// creating a temporary tensor here:
pten::DenseTensorMeta meta{pten::TransToPtenDataType(in->type()),
in->dims(),
pten::TransToPtenDataLayout(in->layout())};
in->dims(), in->layout()};
auto pt_out_tmp = std::make_shared<pten::DenseTensor>(
pten::make_intrusive<paddle::experimental::SharedStorage>(
ctx.GetPlace()),
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/operators/transfer_layout_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ class TransferLayoutOp : public framework::OperatorWithKernel {
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "TransferLayout");

auto dst_layout = ctx->Attrs().Get<int>("dst_layout");
auto low_bound = static_cast<int>(framework::DataLayout::kNHWC);
auto low_bound = static_cast<int>(framework::DataLayout::kAnyLayout);
auto upper_bound = static_cast<int>(framework::DataLayout::kMKLDNN);
PADDLE_ENFORCE_GE(
dst_layout, low_bound,
Expand Down Expand Up @@ -106,7 +106,7 @@ class TransferLayoutOpProtoMaker : public framework::OpProtoAndCheckerMaker {
AddInput("X", "(LoDTensor) The input Tensor");
AddOutput("Out", "(LoDTensor) The Output Tensor with desired layout");
AddAttr<int>("dst_layout",
"kNHWC = 0, kNCHW = 1, kAnyLayout = 2, kMKLDNN = 3");
"kAnyLayout = 0, kNHWC = 1, kNCHW = 2, kMKLDNN = 3");
AddComment(R"DOC(
TransferLayout Operator)DOC");
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/transfer_layout_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ class TransferLayoutFunctor {
// Just set layout/format. No real transform occur

auto out_format = platform::MKLDNNFormatForSize(
in_tensor.dims().size(), ToMKLDNNFormat(in_layout));
in_tensor.dims().size(), framework::ToMKLDNNFormat(in_layout));
out_tensor.ShareDataWith(in_tensor);
// For NHWC data we need reshape of tensors as MKL-DNN
// is expecting NHWC dims description order
Expand Down
14 changes: 12 additions & 2 deletions paddle/fluid/platform/bfloat16.h
Original file line number Diff line number Diff line change
Expand Up @@ -155,14 +155,24 @@ struct PADDLE_ALIGN(2) bfloat16 {

// Conversion opertors
HOSTDEVICE inline explicit operator float() const {
#ifdef PADDLE_WITH_HIP
uint32_t res = 0;
// We should be using memcpy in order to respect the strict aliasing rule
// but it fails in the HIP environment.
uint16_t temp = x;
uint16_t* temp_ptr = reinterpret_cast<uint16_t*>(&temp);
res = *temp_ptr;
return res;
#else
#ifdef PADDLE_CUDA_BF16
return __bfloat162float(*reinterpret_cast<const __nv_bfloat16*>(&x));
#else
float val = 0.f;
uint16_t temp = x;
memcpy(reinterpret_cast<char*>(&val) + 2, reinterpret_cast<char*>(&temp),
2);
std::memcpy(reinterpret_cast<char*>(&val) + 2,
reinterpret_cast<char*>(&temp), 2);
return val;
#endif
#endif
}

Expand Down
12 changes: 5 additions & 7 deletions paddle/pten/api/lib/utils/tensor_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ std::unique_ptr<pten::DenseTensor> MakePtenDenseTensor(
VLOG(3) << "MakePtenDenseTensor based Tensor.";
pten::DenseTensorMeta meta{pten::TransToPtenDataType(src.type()),
src.dims(),
pten::TransToPtenDataLayout(src.layout()),
src.layout(),
src.offset()};
auto shared_storage = pten::make_intrusive<SharedStorage>(src.Holder());
return std::make_unique<pten::DenseTensor>(std::move(shared_storage),
Expand All @@ -54,10 +54,8 @@ std::unique_ptr<pten::DenseTensor> MakePtenDenseTensor(

std::unique_ptr<pten::DenseTensor> MakePtenDenseTensor(
const paddle::framework::Tensor& src, const pten::TensorArgDef& arg_def) {
pten::DenseTensorMeta meta{arg_def.dtype,
src.dims(),
pten::TransToPtenDataLayout(src.layout()),
src.offset()};
pten::DenseTensorMeta meta{
arg_def.dtype, src.dims(), src.layout(), src.offset()};

if (src.IsInitialized() &&
src.place() == pten::TransToFluidPlace(arg_def.backend)) {
Expand Down Expand Up @@ -348,7 +346,7 @@ void ReMakePtenDenseTensor(const paddle::framework::Tensor& src,
auto* meta = pten::CompatibleDenseTensorUtils::GetMutableMeta(dst);
meta->dims = src.dims();
meta->dtype = pten::TransToPtenDataType(src.type());
meta->layout = pten::TransToPtenDataLayout(src.layout());
meta->layout = src.layout();
meta->offset = src.offset();

auto* shared_storage = static_cast<SharedStorage*>(
Expand Down Expand Up @@ -380,7 +378,7 @@ void ReMakePtenDenseTensorByArgDef(const paddle::framework::Tensor& src,
auto* meta = pten::CompatibleDenseTensorUtils::GetMutableMeta(dst);
meta->dims = src.dims();
meta->dtype = arg_def.dtype;
meta->layout = pten::TransToPtenDataLayout(src.layout());
meta->layout = src.layout();
meta->offset = src.offset();

auto* shared_storage = static_cast<SharedStorage*>(
Expand Down
73 changes: 57 additions & 16 deletions paddle/pten/common/layout.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@ limitations under the License. */
namespace paddle {
namespace experimental {

// Note: Here the DataLayout is public api for external users, the prefix `k`
// maybe confuse users, so we use all uppercase names
enum class DataLayout {
UNDEFINED = 0,
// TODO(chenweihang): keep ANY for compatibility, remove it later
Expand All @@ -26,28 +28,67 @@ enum class DataLayout {
NCHW,
MKLDNN,
NUM_DATA_LAYOUTS,
// See Note [ Why we need ALL in baisc kernel key member? ]
// See Note [ Why we need ALL in basic kernel key member? ]
ALL_LAYOUT = UNDEFINED,
// Note: Unify pten DataLayout and fluid::framework::DataLayout,
// for compatible with fluid DataLayout, here need prefix `k`
// Note: The original `kAnyLayout (enum value 2)` is a strange design.
// `kAnyLayout` originally cannot represent any kind of Layout,
// at the same time, it can also represent any Layout.
// Strictly, it means "default" or "undefined" layout,
// and should not be mixed with other meaningful layouts.
kAnyLayout = ANY,
kNHWC = NHWC,
kNCHW = NCHW,
kMKLDNN = MKLDNN, // all layouts supported by MKLDNN internally
};

inline std::ostream& operator<<(std::ostream& os, DataLayout layout) {
} // namespace experimental

// In order to be compatible with the fluid implementation
namespace framework {

using DataLayout = paddle::experimental::DataLayout;

inline DataLayout StringToDataLayout(const std::string& str) {
std::string s(str);
for (size_t i = 0; i < s.size(); ++i) {
s[i] = toupper(s[i]);
}

if (s == "NHWC") {
return DataLayout::kNHWC;
} else if (s == "NCHW") {
return DataLayout::kNCHW;
} else if (s == "ANYLAYOUT") {
return DataLayout::kAnyLayout;
} else if (s == "MKLDNNLAYOUT") {
return DataLayout::kMKLDNN;
} else {
PD_THROW("Unknown data layout type string: ", s, ".");
}
}

inline std::string DataLayoutToString(const DataLayout& layout) {
switch (layout) {
case DataLayout::UNDEFINED:
os << "Undefined";
break;
case DataLayout::NHWC:
os << "NHWC";
break;
case DataLayout::NCHW:
os << "NCHW";
break;
case DataLayout::MKLDNN:
os << "MKLDNN";
break;
case DataLayout::kNHWC:
return "NHWC";
case DataLayout::kNCHW:
return "NCHW";
case DataLayout::kAnyLayout:
return "Undefined(AnyLayout)";
case DataLayout::kMKLDNN:
return "MKLDNN";
default:
PD_THROW(
"Invalid enum data layout type `", static_cast<int>(layout), "`.");
PD_THROW("Unknown Data Layout type ", static_cast<int>(layout), ".");
}
}
} // namespace framework

namespace experimental {

inline std::ostream& operator<<(std::ostream& os, DataLayout layout) {
os << framework::DataLayoutToString(layout);
return os;
}

Expand Down
33 changes: 0 additions & 33 deletions paddle/pten/core/convert_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -63,21 +63,6 @@ paddle::experimental::DataType TransToPtenDataType(
}
}

DataLayout TransToPtenDataLayout(const paddle::framework::DataLayout& layout) {
switch (layout) {
case paddle::framework::DataLayout::kNHWC:
return DataLayout::NHWC;
case paddle::framework::DataLayout::kNCHW:
return DataLayout::NCHW;
case paddle::framework::DataLayout::kAnyLayout:
return DataLayout::ANY;
case paddle::framework::DataLayout::kMKLDNN:
return DataLayout::MKLDNN;
default:
return DataLayout::UNDEFINED;
}
}

paddle::platform::Place TransToFluidPlace(const Backend& backend) {
// TODO(chenweihang): add other trans cases later
switch (backend) {
Expand Down Expand Up @@ -141,24 +126,6 @@ paddle::framework::proto::VarType::Type TransToProtoVarType(
}
}

paddle::framework::DataLayout TransToFluidDataLayout(const DataLayout& layout) {
switch (layout) {
case DataLayout::NHWC:
return paddle::framework::DataLayout::kNHWC;
case DataLayout::NCHW:
return paddle::framework::DataLayout::kNCHW;
case DataLayout::ANY:
return paddle::framework::DataLayout::kAnyLayout;
case DataLayout::MKLDNN:
return paddle::framework::DataLayout::kMKLDNN;
default:
PADDLE_THROW(paddle::platform::errors::Unimplemented(
"Unsupported data layout `%s` when casting it into "
"paddle data layout.",
layout));
}
}

paddle::framework::LoD TransToFluidLoD(const pten::LoD& lod) {
paddle::framework::LoD out;
out.reserve(lod.size());
Expand Down
3 changes: 0 additions & 3 deletions paddle/pten/core/convert_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ limitations under the License. */
#include "paddle/pten/core/tensor_meta.h"

// See Note [ Why still include the fluid headers? ]
#include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/platform/place.h"
Expand All @@ -37,12 +36,10 @@ const std::string& TransToPtenKernelName(const std::string& fluid_op_name);
Backend TransToPtenBackend(const paddle::platform::Place& place);
DataType TransToPtenDataType(
const paddle::framework::proto::VarType::Type& dtype);
DataLayout TransToPtenDataLayout(const paddle::framework::DataLayout& layout);

paddle::platform::Place TransToFluidPlace(const Backend& backend);
paddle::framework::proto::VarType::Type TransToProtoVarType(
const DataType& dtype);
paddle::framework::DataLayout TransToFluidDataLayout(const DataLayout& layout);

paddle::framework::LoD TransToFluidLoD(const pten::LoD& lod);
pten::LoD TransToPtenLoD(const paddle::framework::LoD& lod);
Expand Down
5 changes: 2 additions & 3 deletions paddle/pten/tests/api/test_tensor_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,7 @@ TEST(tensor_utils, dense_tensor_to_lod_tensor) {
CHECK(dense_tensor.lod()[0] ==
static_cast<paddle::framework::Vector<size_t>>((lod_tensor.lod()[0])));
CHECK(dense_tensor.dtype() == pten::TransToPtenDataType(lod_tensor.type()));
CHECK(dense_tensor.layout() ==
pten::TransToPtenDataLayout(lod_tensor.layout()));
CHECK(dense_tensor.layout() == lod_tensor.layout());
CHECK(platform::is_cpu_place(lod_tensor.place()));

CHECK(lod_tensor.data<float>()[0] == 1.0f);
Expand Down Expand Up @@ -85,7 +84,7 @@ TEST(tensor_utils, dense_tensor_to_tensor) {
experimental::MovesStorage(&dense_tensor, &tensor);

CHECK(dense_tensor.dtype() == pten::TransToPtenDataType(tensor.type()));
CHECK(dense_tensor.layout() == pten::TransToPtenDataLayout(tensor.layout()));
CHECK(dense_tensor.layout() == tensor.layout());
CHECK(platform::is_cpu_place(tensor.place()));

CHECK(tensor.data<float>()[0] == 1.0f);
Expand Down
7 changes: 3 additions & 4 deletions paddle/pten/tests/common/test_data_layout.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,10 @@ namespace tests {
TEST(DataLayout, OStream) {
std::ostringstream oss;
oss << pten::DataLayout::UNDEFINED;
EXPECT_EQ(oss.str(), "Undefined");
EXPECT_EQ(oss.str(), "Undefined(AnyLayout)");
oss.str("");
oss << pten::DataLayout::ANY;
EXPECT_EQ(oss.str(), "Undefined");
EXPECT_EQ(oss.str(), "Undefined(AnyLayout)");
oss.str("");
oss << pten::DataLayout::NHWC;
EXPECT_EQ(oss.str(), "NHWC");
Expand All @@ -43,8 +43,7 @@ TEST(DataLayout, OStream) {
oss << pten::DataLayout::NUM_DATA_LAYOUTS;
} catch (const std::exception& exception) {
std::string ex_msg = exception.what();
EXPECT_TRUE(ex_msg.find("Invalid enum data layout type") !=
std::string::npos);
EXPECT_TRUE(ex_msg.find("Unknown Data Layout type") != std::string::npos);
}
}

Expand Down
Loading