Skip to content
5 changes: 3 additions & 2 deletions paddle/phi/kernels/impl/unfold_grad_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,9 @@ void UnfoldGradKernel(const Context& dev_ctx,
const std::vector<int>& dilations,
DenseTensor* x_grad) {
dev_ctx.template Alloc<T>(x_grad);

if (!x_grad) return;
if (!x_grad || x_grad->numel() == 0) {
return;
}

const auto& x_dims = x_grad->dims();
const int batch_size = static_cast<int>(x_dims[0]);
Expand Down
3 changes: 3 additions & 0 deletions paddle/phi/kernels/impl/unfold_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,9 @@ void UnfoldKernel(const Context& dev_ctx,
DenseTensor* out) {
const int batch_size = static_cast<int>(x.dims()[0]);
dev_ctx.template Alloc<T>(out);
if (out->numel() == 0) {
return;
}

phi::funcs::Im2ColFunctor<phi::funcs::ColFormat::kCFO, Context, T> im2col;
const auto& x_dims = x.dims();
Expand Down
3 changes: 3 additions & 0 deletions paddle/phi/kernels/stride/tensor_unfold_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,9 @@ void TensorUnfoldGradKernel(const Context& dev_ctx,
axis += input.dims().size();
}
dev_ctx.Alloc(input_grad, input_grad->dtype());
if (input_grad->numel() == 0) {
return;
}
input_grad->set_strides(DenseTensorMeta::calc_strides(input_grad->dims()));
if (out_grad.numel() < input.numel()) {
PD_VISIT_ALL_TYPES(input_grad->dtype(), "TensorUnfoldGradKernel", ([&] {
Expand Down
3 changes: 3 additions & 0 deletions paddle/phi/kernels/xpu/unfold_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,9 @@ void UnfoldGradKernel(const Context& dev_ctx,
DenseTensor* x_grad) {
using XPUType = typename XPUTypeTrait<T>::Type;
dev_ctx.template Alloc<T>(x_grad);
if (x_grad->numel() == 0) {
return;
}
const std::string data_format = common::DataLayoutToString(x.layout());
bool is_nchw = data_format == "NCHW";
PADDLE_ENFORCE_EQ(is_nchw,
Expand Down
3 changes: 3 additions & 0 deletions paddle/phi/kernels/xpu/unfold_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,9 @@ void UnfoldKernel(const Context& dev_ctx,
DenseTensor* out) {
using XPUType = typename XPUTypeTrait<T>::Type;
dev_ctx.template Alloc<T>(out);
if (out->numel() == 0) {
return;
}
const std::string data_format = common::DataLayoutToString(x.layout());
bool is_nchw = data_format == "NCHW";
PADDLE_ENFORCE_EQ(is_nchw,
Expand Down
33 changes: 33 additions & 0 deletions test/legacy_test/test_tensor_unfold.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,5 +98,38 @@ def test_tensor_unfold_backward(self):
self.assertEqual((b.grad.numpy() == 1).all().item(), True)


class TestTensorUnfold_ZeroSize(TestTensorUnfold):
def test_tensor_unfold_forward(self):
self.shape = [5, 0]
for idx, p in enumerate(self.places):
if idx == 0:
paddle.set_device('cpu')
else:
paddle.set_device('gpu')
for dtype in self.typelist:
x_np = np.random.random(self.shape).astype(dtype)
x = paddle.to_tensor(x_np, place=p)
a = paddle.unfold(x, 0, 5, 1)
np.testing.assert_allclose(a.numpy()[0], x_np.T)

def test_tensor_unfold_backward(self):
self.shape = [5, 0]
for idx, p in enumerate(self.places):
if idx == 0:
paddle.set_device('cpu')
else:
paddle.set_device('gpu')
for dtype in self.typelist:
x_np = np.random.random(self.shape).astype(dtype)
x = paddle.to_tensor(x_np, place=p)
x.stop_gradient = False
a = paddle.unfold(x, 0, 5, 1)
b = a * 2
b.retain_grads()
loss = b.sum()
loss.backward()
self.assertEqual((b.grad.numpy() == 1).all().item(), True)


if __name__ == '__main__':
unittest.main()