Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/phi/infermeta/binary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2356,7 +2356,7 @@ void IndexSampleInferMeta(const MetaTensor& x,
"Inputs(Index) shape of IndexSample op should be 2-D, but "
"got Index's shape [%s] , please check index shape.",
input_dims));
if (config.is_runtime) {
if (config.is_runtime && index_dims[0] != 0) { // 0-size not check
PADDLE_ENFORCE_EQ(input_dims[0],
index_dims[0],
errors::InvalidArgument(
Expand Down
3 changes: 3 additions & 0 deletions paddle/phi/kernels/cpu/index_sample_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,9 @@ void IndexSampleKernel(const Context &dev_ctx,
const DenseTensor &index,
DenseTensor *out) {
dev_ctx.template Alloc<T>(out);
if (out && out->numel() == 0) {
return;
}
auto index_type = index.dtype();
bool index_type_match =
index_type == DataType::INT32 || index_type == DataType::INT64;
Expand Down
1 change: 1 addition & 0 deletions paddle/phi/kernels/gpu/index_sample_grad_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,7 @@ void IndexSampleGradKernel(const Context& dev_ctx,
phi::funcs::SetConstant<Context, T> set_zero;
set_zero(dev_ctx, x_grad, static_cast<T>(0));

if (out_grad.numel() == 0) return;
if (index_type == DataType::INT64) {
const int64_t* index_data = index.data<int64_t>();
IndexSampleGrad<T, int64_t>
Expand Down
3 changes: 3 additions & 0 deletions paddle/phi/kernels/gpu/index_sample_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,9 @@ void IndexSampleKernel(const Context& dev_ctx,
DataTypeToString(DataType::INT64)));
const T* in_data = x.data<T>();
T* out_data = dev_ctx.template Alloc<T>(out);
if (out && out->numel() == 0) {
return;
}
auto stream = reinterpret_cast<const phi::GPUContext&>(dev_ctx).stream();
auto input_dim = x.dims();
auto index_dim = index.dims();
Expand Down
4 changes: 4 additions & 0 deletions paddle/phi/kernels/xpu/index_sample_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,10 @@ void IndexSampleKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& index,
DenseTensor* out) {
if (out && out->numel() == 0) {
dev_ctx.template Alloc<T>(out);
return;
}
auto index_type = index.dtype();
bool index_type_match =
index_type == DataType::INT32 || index_type == DataType::INT64;
Expand Down
46 changes: 46 additions & 0 deletions test/legacy_test/test_index_sample_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,52 @@ def config(self):
self.index_type = "int64"


class TestIndexSampleOp_ZeroSize(OpTest):
def setUp(self):
self.op_type = "index_sample"
self.python_api = paddle.index_sample
self.public_python_api = paddle.index_sample
self.config()
xnp = np.random.random(self.x_shape).astype(self.x_type)
if self.x_type == np.complex64 or self.x_type == np.complex128:
xnp = (
np.random.random(self.x_shape)
+ 1j * np.random.random(self.x_shape)
).astype(self.x_type)
indexnp = np.random.randint(
low=0, high=self.x_shape[1], size=self.index_shape
).astype(self.index_type)
self.inputs = {'X': xnp, 'Index': indexnp}
index_array = []
for i in range(self.index_shape[0]):
for j in indexnp[i]:
index_array.append(xnp[i, j])
index_array = np.array(index_array).astype(self.x_type)
out = np.reshape(index_array, self.index_shape)
self.outputs = {'Out': out}

def test_check_output(self):
self.check_output(check_pir=True)

def test_check_grad(self):
self.check_grad(['X'], 'Out', check_pir=True)

def config(self):
self.x_shape = (10, 20)
self.x_type = "float64"
self.index_shape = (10, 0)
self.index_type = "int32"


class TestIndexSampleOp_ZeroSize2(TestIndexSampleOp_ZeroSize):

def config(self):
self.x_shape = (0, 20)
self.x_type = "float64"
self.index_shape = (0, 0)
self.index_type = "int32"


@unittest.skipIf(core.is_compiled_with_xpu(), "complex is not supported on XPU")
class TestIndexSampleComplex64(TestIndexSampleOp):
def config(self):
Expand Down
Loading