Skip to content

Commit

Permalink
[Phi&CustomOp] Remove deprecated enum PlaceType for custom op & add w…
Browse files Browse the repository at this point in the history
…arning (#41647)

* remove old custom op placetype

* replace dist  placetype using

* add with gpu macro

* fix mutable_data error

* fix set value error

* add comment
  • Loading branch information
chenwhql committed Apr 13, 2022
1 parent 1a4ab96 commit 6242a70
Show file tree
Hide file tree
Showing 18 changed files with 236 additions and 299 deletions.
9 changes: 5 additions & 4 deletions paddle/fluid/distributed/collective/Common.cc
Original file line number Diff line number Diff line change
Expand Up @@ -41,13 +41,14 @@ std::string GetKeyFromPlaces(const std::vector<Place>& places) {
}

static bool CheckTensorsInPlace(const std::vector<Tensor>& tensors,
const PlaceType type) {
return std::all_of(tensors.cbegin(), tensors.cend(),
[&](const Tensor& t) { return t.place() == type; });
phi::AllocationType type) {
return std::all_of(tensors.cbegin(), tensors.cend(), [&](const Tensor& t) {
return t.place().GetType() == type;
});
}

bool CheckTensorsInCudaPlace(const std::vector<Tensor>& tensors) {
return CheckTensorsInPlace(tensors, PlaceType::kGPU);
return CheckTensorsInPlace(tensors, phi::AllocationType::GPU);
}

} // namespace distributed
Expand Down
7 changes: 0 additions & 7 deletions paddle/fluid/distributed/collective/reducer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -414,20 +414,13 @@ void EagerReducer::InitializeDenseGroups(
p_group->dense_tensors_.push_back(phi::DenseTensor());

const auto &dtype = tensor.dtype();
const auto &place = tensor.place();
const auto &inner_place = tensor.impl()->place();
if (index > 0) {
PADDLE_ENFORCE_EQ(dtype, p_group->dtype_,
platform::errors::PreconditionNotMet(
"Tensor %s has unexpected dtype.", tensor_name));
PADDLE_ENFORCE_EQ(place, place_,
platform::errors::PreconditionNotMet(
"Tensor %s has different place. Expected place is "
"%s, but actual place is %s",
tensor_name, inner_place_, inner_place));
} else {
p_group->dtype_ = dtype;
place_ = place;
inner_place_ = inner_place;
}
}
Expand Down
2 changes: 0 additions & 2 deletions paddle/fluid/distributed/collective/reducer.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#include "paddle/phi/api/include/api.h"
#include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/api/lib/ext_compat_utils.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/utils/string/string_helper.h"
Expand Down Expand Up @@ -121,7 +120,6 @@ class EagerReducer {

std::vector<EagerGroup> groups_;
std::vector<TensorLocator> variable_locators_;
PlaceType place_;
platform::Place inner_place_;
size_t next_group_ = 0;
int64_t nranks_ = -1;
Expand Down
9 changes: 4 additions & 5 deletions paddle/fluid/framework/custom_operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ limitations under the License. */
#include "paddle/fluid/platform/dynload/dynamic_loader.h"
#include "paddle/fluid/string/string_helper.h"
#include "paddle/phi/api/all.h"
#include "paddle/phi/api/lib/ext_compat_utils.h"
#include "paddle/phi/api/lib/utils/tensor_utils.h"
#include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/utils/any.h"
Expand Down Expand Up @@ -627,8 +626,8 @@ class CustomGradOpMaker<imperative::OpBase>
static void RegisterOperatorKernelWithPlace(
const std::string& name,
const OperatorWithKernel::OpKernelFunc& op_kernel_func,
const proto::VarType::Type type, const PlaceType& place) {
OpKernelType key(type, experimental::ConvertExtPlaceToInnerPlace(place));
const proto::VarType::Type type, const platform::Place& place) {
OpKernelType key(type, place);
VLOG(3) << "Custom Operator: op kernel key: " << key;
OperatorWithKernel::AllOpKernels()[name][key] = op_kernel_func;
}
Expand Down Expand Up @@ -666,10 +665,10 @@ static void RegisterOperatorKernel(const std::string& name,
op_kernel_func = func;
}
RegisterOperatorKernelWithPlace(name, op_kernel_func, proto::VarType::RAW,
PlaceType::kCPU);
platform::CPUPlace());
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
RegisterOperatorKernelWithPlace(name, op_kernel_func, proto::VarType::RAW,
PlaceType::kGPU);
platform::CUDAPlace());
#endif
}

Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/pybind/eager_method.cc
Original file line number Diff line number Diff line change
Expand Up @@ -921,7 +921,7 @@ static PyObject* tensor_method__setitem_eager_tensor(TensorObject* self,
"please check the type of tensor."));
}

if (value_tensor_tmp.place() == paddle::PlaceType::kUNK) {
if (!value_tensor_tmp.initialized()) {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
SetTensorFromPyArray(
static_cast<phi::DenseTensor*>(value_tensor_tmp.impl().get()),
Expand Down Expand Up @@ -1009,7 +1009,7 @@ static PyObject* tensor_method__setitem_eager_tensor(TensorObject* self,
VLOG(4) << "index is not tensor";
self_numpy[_index] = py::object(py::handle(value_obj), true);
}
if (self->tensor.place() == paddle::PlaceType::kUNK) {
if (!self->tensor.initialized()) {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
SetTensorFromPyArray(self_tensor, self_numpy,
platform::Place(platform::CUDAPlace(0)), false);
Expand Down
1 change: 0 additions & 1 deletion paddle/phi/api/all.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,5 +41,4 @@ limitations under the License. */
#include "paddle/phi/api/ext/dispatch.h"
#include "paddle/phi/api/ext/exception.h"
#include "paddle/phi/api/ext/op_meta_info.h"
#include "paddle/phi/api/ext/place.h"
#include "paddle/phi/api/ext/tensor_compat.h"
22 changes: 0 additions & 22 deletions paddle/phi/api/ext/place.h

This file was deleted.

6 changes: 5 additions & 1 deletion paddle/phi/api/ext/tensor_compat.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,16 @@ limitations under the License. */

#pragma once

#include "paddle/phi/api/include/api.h"
#include "paddle/phi/api/include/tensor.h"

// Note(chenweihang): In order to be compatible with the original custom
// operator Tensor interface, only available to external users, the file
// cannot be includeed in paddle

namespace paddle {
using Tensor = paddle::experimental::Tensor;
using Tensor = experimental::Tensor;
// using several Tensor initialize functions in paddle namespace
using experimental::empty;
using experimental::full;
} // namespace paddle
62 changes: 34 additions & 28 deletions paddle/phi/api/include/tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ using gpuStream_t = cudaStream_t;
using gpuStream_t = hipStream_t;
#endif

#include "paddle/phi/api/ext/place.h"
#include "paddle/phi/api/include/dll_decl.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/common/layout.h"
Expand Down Expand Up @@ -109,21 +108,23 @@ class PADDLE_API Tensor final {

/**
* @brief Construct a new Tensor object on the target place.
* This is a deprecated method and may be removed in the future!
*
* This is a deprecated method and may be removed in the future!!!
*
* @param place
*/
explicit Tensor(const PlaceType& place);
explicit Tensor(const Place& place);

/**
* @brief Construct a new Tensor object on the target place
* with specified shape.
* This is a deprecated method and may be removed in the future!
*
* This is a deprecated method and may be removed in the future!!!
*
* @param place
* @param shape
*/
Tensor(const PlaceType& place, const std::vector<int64_t>& shape);
Tensor(const Place& place, const std::vector<int64_t>& shape);

/**
* @brief Construct a new Tensor object by a TensorBase pointer and name
Expand All @@ -135,8 +136,9 @@ class PADDLE_API Tensor final {
/**
* @brief Construct a new Tensor object with name
*
* @note Used to adapt original execution mechanism and debug analysis
* in the development of new dygraph. It may be removed in the future.
* @note Internal method, used to adapt original execution mechanism and
* debug analysis in the development of new dygraph. It may be removed in
* the future.
* */
explicit Tensor(const std::string& name) : name_(name) {}

Expand All @@ -151,6 +153,7 @@ class PADDLE_API Tensor final {

/**
* @brief Get the size of current tensor.
*
* The compatible method of `Tensor::numel()`.
* This is a deprecated method and may be removed in the future!
*
Expand All @@ -167,6 +170,7 @@ class PADDLE_API Tensor final {

/**
* @brief Return the shape (dimensions) of Tensor.
*
* The compatible method of `Tensor::dims()`.
* This is a deprecated method and may be removed in the future!
*
Expand All @@ -178,7 +182,7 @@ class PADDLE_API Tensor final {
* @brief Reset the shape of the tensor.
* @note: This method means Reset the shape of the tensor,
* and must be called before calling mutable_data() or
* copy_to(const PlaceType& place), this is not a standard definition of
* copy_to(const Place& place), this is not a standard definition of
* reshape behavior, so we will deprecated this feature in the future.
*
* @param shape
Expand All @@ -194,6 +198,7 @@ class PADDLE_API Tensor final {

/**
* @brief Return the data type of Tensor.
*
* The compatible method of `Tensor::dtype()`.
* This is a deprecated method and may be removed in the future!
*
Expand Down Expand Up @@ -246,18 +251,18 @@ class PADDLE_API Tensor final {
* @brief Return the place (device) of Tensor.
* This is a deprecated method and may be removed in the future!
*
* @return PlaceType
* @return Place
*/
PlaceType place() const;
Place place() const;

/**
* @brief Return the place (device) of Tensor.
* Because the `place` method already exists, so we need to use a new name,
* here we temporarily use `inner_place`.
*
* @return paddle::platform::Place
* This is a deprecated method and may be removed in the future!!!
*
* @return Place
*/
phi::Place inner_place() const;
Place inner_place() const;

/**
* @brief Determine whether the tensor device is CPU
Expand Down Expand Up @@ -287,7 +292,7 @@ class PADDLE_API Tensor final {

/**
* @brief Get the memory pointer in CPU or GPU with specific data type.
* It's usually used to get the output data pointer.
* It's usually used to get the output data pointer, same as the T* data().
*
* @tparam T
* @return T*
Expand All @@ -297,6 +302,7 @@ class PADDLE_API Tensor final {

/**
* @brief Get the memory pointer in CPU or GPU with specific data type.
*
* It's usually used to get the output data pointer.
* This is a deprecated method and may be removed in the future!
*
Expand All @@ -305,7 +311,7 @@ class PADDLE_API Tensor final {
* @return T*
*/
template <typename T>
T* mutable_data(const PlaceType& place);
T* mutable_data(const Place& place);

/**
* @brief Get the const memory pointer directly.
Expand All @@ -319,8 +325,7 @@ class PADDLE_API Tensor final {

/**
* @brief Get the memory pointer directly.
* It's usually used to get the output data pointer.
* This is a deprecated method and may be removed in the future!
* It's usually used to get the mutable output data pointer.
*
* @tparam T
* @return T*
Expand Down Expand Up @@ -409,7 +414,7 @@ class PADDLE_API Tensor final {
* @return Tensor
*/
template <typename T>
Tensor copy_to(const PlaceType& target_place) const;
Tensor copy_to(const Place& target_place) const;

/**
* @brief Transfer the current Tensor to the specified device and return.
Expand All @@ -427,7 +432,8 @@ class PADDLE_API Tensor final {
* @param blocking, Should we copy this in sync way.
* @return void
*/
void copy_(const Tensor& src, const phi::Place& target_place, bool blocking);
void copy_(const Tensor& src, const Place& target_place, bool blocking);

/**
* @brief Cast datatype from one to another
*
Expand Down Expand Up @@ -489,11 +495,17 @@ class PADDLE_API Tensor final {
/* Part 8: Autograd methods */

/**
* @brief Get the autograd meta object
* @brief Get the autograd meta object pointer
*
* @return AbstractAutogradMeta*
*/
AbstractAutogradMeta* get_autograd_meta() const;

/**
* @brief Get the shared pointer of autograd meta object
*
* @return std::shared_ptr<AbstractAutogradMeta>&
*/
const std::shared_ptr<AbstractAutogradMeta>& mutable_autograd_meta() const;

/**
Expand Down Expand Up @@ -524,7 +536,7 @@ class PADDLE_API Tensor final {

/* Part 10: Auto generated Tensor methods */

/* Part 11: Methods of converting SparseTensor and DenseTensor to each other
/* Part 11: Methods of converting underlying TensorType to each other
*/
/**
* @brief Convert DenseTensor or SparseCsrTensor to SparseCooTensor
Expand Down Expand Up @@ -587,12 +599,6 @@ class PADDLE_API Tensor final {
* in the development of new dygraph. It may be removed in the future.
*/
std::string name_{""};

/**
* Place type: Return the expected memory location if the Tensor is
* uninitialized.
*/
PlaceType place_{PlaceType::kUNK};
};

} // namespace experimental
Expand Down
8 changes: 3 additions & 5 deletions paddle/phi/api/lib/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,13 +1,11 @@
add_subdirectory(utils)

cc_library(ext_compat_utils SRCS ext_compat_utils.cc DEPS place)

if (WITH_GPU)
nv_library(phi_tensor_raw SRCS tensor.cc DEPS tensor_base dense_tensor phi_api_utils ext_compat_utils phi_enforce)
nv_library(phi_tensor_raw SRCS tensor.cc DEPS tensor_base dense_tensor phi_api_utils phi_enforce)
elseif (WITH_ROCM)
hip_library(phi_tensor_raw SRCS tensor.cc DEPS tensor_base dense_tensor phi_api_utils ext_compat_utils phi_enforce)
hip_library(phi_tensor_raw SRCS tensor.cc DEPS tensor_base dense_tensor phi_api_utils phi_enforce)
else()
cc_library(phi_tensor_raw SRCS tensor.cc DEPS tensor_base dense_tensor phi_api_utils ext_compat_utils phi_enforce)
cc_library(phi_tensor_raw SRCS tensor.cc DEPS tensor_base dense_tensor phi_api_utils phi_enforce)
endif()

set(api_gen_base ${CMAKE_SOURCE_DIR}/python/paddle/utils/code_gen/api_base.py)
Expand Down
Loading

1 comment on commit 6242a70

@paddle-bot-old
Copy link

@paddle-bot-old paddle-bot-old bot commented on 6242a70 Apr 13, 2022

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🕵️ CI failures summary

🔍 PR: #41726 Commit ID: 6242a70 contains failed CI.

🔹 Failed: PR-CI-APPROVAL-23

Unknown Failed
Unknown Failed

🔹 Failed: PR-CI-Coverage-23

Unknown Failed
Unknown Failed

Please sign in to comment.