Skip to content

Commit

Permalink
[CustomDevice] register Copy for custom device (PaddlePaddle#44200)
Browse files Browse the repository at this point in the history
* [CustomDevice] register Copy for custom device

* [CustomDevice] register Copy for custom device

* [CustomDevice] register Copy for custom device

* merge and add uts

* merge and add uts

* fix for blocking and unittests coverage
  • Loading branch information
Aganlengzi authored and Aurelius84 committed Jul 29, 2022
1 parent 363458b commit 321f054
Show file tree
Hide file tree
Showing 2 changed files with 66 additions and 4 deletions.
43 changes: 39 additions & 4 deletions paddle/phi/core/tensor_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -200,10 +200,9 @@ void Copy(const Context& dev_ctx,
paddle::memory::Copy(
dst_cuda_pinned_place, dst_ptr, src_gpu_place, src_ptr, size, stream);
#endif
}
#ifdef PADDLE_WITH_XPU
else if (paddle::platform::is_xpu_place(src_place) && // NOLINT
paddle::platform::is_cpu_place(dst_place)) {
} else if (paddle::platform::is_xpu_place(src_place) && // NOLINT
paddle::platform::is_cpu_place(dst_place)) {
paddle::memory::Copy(dst_place, dst_ptr, src_place, src_ptr, size);
} else if (paddle::platform::is_cpu_place(src_place) &&
paddle::platform::is_xpu_place(dst_place)) {
Expand All @@ -216,11 +215,40 @@ void Copy(const Context& dev_ctx,
return;
}
paddle::memory::Copy(dst_place, dst_ptr, src_place, src_ptr, size);
#endif
#ifdef PADDLE_WITH_CUSTOM_DEVICE
} else if (paddle::platform::is_custom_place(src_place) && // NOLINT
paddle::platform::is_cpu_place(dst_place)) {
auto stream =
blocking
? nullptr
: reinterpret_cast<const paddle::platform::CustomDeviceContext&>(
dev_ctx)
.stream();
paddle::memory::Copy(dst_place, dst_ptr, src_place, src_ptr, size, stream);
} else if (paddle::platform::is_cpu_place(src_place) && // NOLINT
paddle::platform::is_custom_place(dst_place)) {
auto stream =
blocking
? nullptr
: reinterpret_cast<const paddle::platform::CustomDeviceContext&>(
dev_ctx)
.stream();
paddle::memory::Copy(dst_place, dst_ptr, src_place, src_ptr, size, stream);
} else if (paddle::platform::is_custom_place(src_place) && // NOLINT
paddle::platform::is_custom_place(dst_place)) {
auto stream =
blocking
? nullptr
: reinterpret_cast<const paddle::platform::CustomDeviceContext&>(
dev_ctx)
.stream();
paddle::memory::Copy(dst_place, dst_ptr, src_place, src_ptr, size, stream);
#endif
} else {
PADDLE_THROW(phi::errors::Unimplemented(
"Copy from %s to %s is not supported.", src_place, dst_place));
}
#endif
}

template <typename Context>
Expand Down Expand Up @@ -363,4 +391,11 @@ template void Copy(const XPUContext& dev_ctx,
DenseTensor* dst);
#endif

#ifdef PADDLE_WITH_CUSTOM_DEVICE
template void Copy(const CustomContext& dev_ctx,
const DenseTensor& src,
Place dst_place,
bool blocking,
DenseTensor* dst);
#endif
} // namespace phi
27 changes: 27 additions & 0 deletions python/paddle/fluid/tests/custom_runtime/test_custom_cpu_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ def test_custom_device(self):
self._test_custom_device_dataloader()
self._test_custom_device_mnist()
self._test_eager_backward_api()
self._test_eager_copy_to()
self._test_custom_device_dataloader()
self._test_custom_device_mnist()

Expand Down Expand Up @@ -133,6 +134,32 @@ def _test_eager_backward_api(self):

self.assertTrue(x_tensor.grad.place.is_custom_place())

def _test_eager_copy_to(self):
import paddle
x = np.random.random([2, 2]).astype("float32")
# cpu -> custom
cpu_tensor = paddle.to_tensor(x,
dtype='float32',
place=paddle.CPUPlace())
custom_cpu_tensor = cpu_tensor._copy_to(
paddle.CustomPlace('custom_cpu', 0), True)
self.assertTrue(np.array_equal(custom_cpu_tensor, x))
self.assertTrue(custom_cpu_tensor.place.is_custom_place())
# custom -> custom
another_custom_cpu_tensor = custom_cpu_tensor._copy_to(
paddle.CustomPlace('custom_cpu', 0), True)
self.assertTrue(np.array_equal(another_custom_cpu_tensor, x))
self.assertTrue(another_custom_cpu_tensor.place.is_custom_place())
# custom -> cpu
another_cpu_tensor = custom_cpu_tensor._copy_to(paddle.CPUPlace(), True)
self.assertTrue(np.array_equal(another_cpu_tensor, x))
self.assertTrue(another_cpu_tensor.place.is_cpu_place())
# custom -> custom self
another_custom_cpu_tensor = another_custom_cpu_tensor._copy_to(
paddle.CustomPlace('custom_cpu', 0), True)
self.assertTrue(np.array_equal(another_custom_cpu_tensor, x))
self.assertTrue(another_custom_cpu_tensor.place.is_custom_place())

def tearDown(self):
del os.environ['CUSTOM_DEVICE_ROOT']

Expand Down

0 comments on commit 321f054

Please sign in to comment.