diff --git a/python/paddle/base/dygraph/tensor_patch_methods.py b/python/paddle/base/dygraph/tensor_patch_methods.py index 0bb12a42ede6f4..4032597e82f07a 100644 --- a/python/paddle/base/dygraph/tensor_patch_methods.py +++ b/python/paddle/base/dygraph/tensor_patch_methods.py @@ -1260,7 +1260,7 @@ def __cuda_array_interface__(self): # raise AttributeError for unsupported tensors, so that # hasattr(cpu_tensor, "__cuda_array_interface__") is False. - if "gpu" not in str(self.place): + if not self.place.is_gpu_place(): raise AttributeError( "Can't get __cuda_array_interface__ on non-CUDA tensor. " "If CUDA data is required use tensor.cuda() to copy tensor to device memory." diff --git a/python/paddle/tensor/stat.py b/python/paddle/tensor/stat.py index c0aeec15a71d6f..cc01f2f3f3d08d 100644 --- a/python/paddle/tensor/stat.py +++ b/python/paddle/tensor/stat.py @@ -266,7 +266,7 @@ def numel(x: Tensor, name: str | None = None) -> Tensor: Returns the number of elements for a tensor, which is a 0-D int64 Tensor with shape []. Args: - x (Tensor): The input Tensor, it's data type can be bool, float16, float32, float64, int32, int64, complex64, complex128. + x (Tensor): The input Tensor, it's data type can be bool, float16, float32, float64, uint8, int8, int32, int64, complex64, complex128. name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. diff --git a/test/legacy_test/test_eager_tensor.py b/test/legacy_test/test_eager_tensor.py index 49658c2a50035a..c279172d6c0a51 100644 --- a/test/legacy_test/test_eager_tensor.py +++ b/test/legacy_test/test_eager_tensor.py @@ -1217,18 +1217,18 @@ def test___cuda_array_interface__(self): # strides should be None if contiguous tensor = paddle.randn([3, 3]).to(device=gpu_place) interface = tensor.__cuda_array_interface__ - assert interface["strides"] is None + self.assertIsNone(interface["strides"]) # strides should be tuple of int if not contiguous tensor = paddle.randn([10, 10]).to(device=gpu_place) tensor = tensor[::2] interface = tensor.__cuda_array_interface__ - assert interface["strides"] == (80, 4) + self.assertEqual(interface["strides"], (80, 4)) # data_ptr should be 0 if tensor is 0-size tensor = paddle.randn([0, 10]).to(device=gpu_place) interface = tensor.__cuda_array_interface__ - assert interface["data"][0] == 0 + self.assertEqual(interface["data"][0], 0) # raise AttributeError for tensor that requires grad. tensor = paddle.randn([3, 3]).to(device=gpu_place) @@ -1261,22 +1261,24 @@ def test___cuda_array_interface__(self): .astype(dtype) ) interface = tensor.__cuda_array_interface__ - assert "typestr" in interface and isinstance( - interface["typestr"], str - ) - assert "shape" in interface and isinstance( - interface["shape"], tuple - ) - assert "strides" in interface and ( + self.assertIn("typestr", interface) + self.assertIsInstance(interface["typestr"], str) + + self.assertIn("shape", interface) + self.assertIsInstance(interface["shape"], tuple) + + self.assertIn("strides", interface) + self.assertTrue( isinstance(interface["strides"], tuple) or interface["strides"] is None ) - assert ( - "data" in interface - and isinstance(interface["data"], tuple) - and len(interface["data"]) == 2 - ) - assert "version" in interface and interface["version"] == 2 + + self.assertIn("data", interface) + self.assertIsInstance(interface["data"], tuple) + self.assertEqual(len(interface["data"]), 2) + + self.assertIn("version", interface) + self.assertEqual(interface["version"], 2) class TestEagerTensorSetitem(unittest.TestCase):