Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 9 additions & 2 deletions python/paddle/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -4695,8 +4695,8 @@ def cumprod(

Args:
x (Tensor): the input tensor need to be cumproded.
dim (int|None, optional): the dimension along which the input tensor will be accumulated. It need to be in the range of [-x.rank, x.rank),
where x.rank means the dimensions of the input tensor x and -1 means the last dimension.
dim (int|None, optional): the dimension along which the input tensor will be accumulated. It need to be in the range of [-x.rank, x.rank) or None,
where x.rank means the dimensions of the input tensor x and -1 means the last dimension. The default (None) is to compute the cumprod over the flattened array.
dtype (str|paddle.dtype|np.dtype, optional): The data type of the output tensor, can be bfloat16, float16, float32, float64, int32, int64,
complex64, complex128. If specified, the input tensor is casted to dtype before the operation is performed.
This is useful for preventing data type overflows. The default value is None.
Expand Down Expand Up @@ -4743,6 +4743,9 @@ def cumprod(
>>> assert y.dtype == paddle.float64

"""
if dim is None:
dim = -1
x = x.flatten(0, len(x.shape) - 1)

if dtype is not None and x.dtype != convert_np_dtype_to_dtype_(dtype):
x = cast(x, dtype)
Expand Down Expand Up @@ -4789,6 +4792,10 @@ def cumprod_(
Inplace version of ``cumprod`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_paddle_cumprod`.
"""
if dim is None:
dim = -1
x = _C_ops.flatten_(x, 0, len(x.shape) - 1)

if dtype is not None and x.dtype != convert_np_dtype_to_dtype_(dtype):
x = cast_(x, dtype)

Expand Down
56 changes: 56 additions & 0 deletions test/legacy_test/test_cumprod_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -1109,5 +1109,61 @@ def run(place):
run(place)


class TestCumprodAPI_WithFlatten(unittest.TestCase):
def init_dtype(self):
self.dtype = 'float64'
self.shape = [3, 10, 10]

def setUp(self):
self.init_dtype()
self.x = (np.random.rand(3, 10, 10) + 0.5).astype(self.dtype)
self.place = get_places()

# test dynamic graph api.
def test_dygraph_api(self):
def run(place):
paddle.disable_static(place)
x = paddle.to_tensor(self.x)
x.stop_gradient = False
out = paddle.cumprod(x, None)
out_ref = np.cumprod(self.x, None)
np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)

out_grad_ref = np.ones_like(out_ref)
out_grad = paddle.to_tensor(out_grad_ref)
x_grad_ref = np.zeros_like(self.x).flatten()
(x_grad,) = paddle.grad(out, [x], [out_grad])
cumprod_grad(
self.x.flatten(),
out_ref,
out_grad_ref,
x_grad_ref,
[np.prod(self.shape)],
-1,
exclusive=False,
reverse=False,
)
x_grad_ref = x_grad_ref.reshape(self.shape)
np.testing.assert_allclose(x_grad_ref, x_grad.numpy(), rtol=1e-05)
paddle.enable_static()

for place in self.place:
run(place)

def test_static_api(self):
def run(place):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.static.data('X', self.shape, dtype=self.dtype)
out = paddle.cumprod(x, None)
exe = paddle.static.Executor(place)
(out,) = exe.run(feed={'X': self.x}, fetch_list=[out])
out_ref = np.cumprod(self.x, None)
np.testing.assert_allclose(out_ref, out, rtol=1e-05)

for place in self.place:
run(place)


if __name__ == "__main__":
unittest.main()
42 changes: 42 additions & 0 deletions test/legacy_test/test_inplace.py
Original file line number Diff line number Diff line change
Expand Up @@ -1698,6 +1698,48 @@ def test_forward_version(self):
self.assertEqual(var.inplace_version, 3)


class TestDygraphInplaceCumprodWithFlatten(TestDygraphInplace):
def inplace_api_processing(self, var):
return paddle.cumprod_(var, None, dtype="float32")

def non_inplace_api_processing(self, var):
return paddle.cumprod(var, None, dtype="float32")

def test_backward_error(self):
# It raises an error because the inplace operator will result
# in incorrect gradient computation.
with paddle.base.dygraph.guard():
var_a = paddle.to_tensor(self.input_var_numpy).astype(self.dtype)
var_a.stop_gradient = False

var_b = var_a**2

# Here, the gradient computation will use the value of var_b
var_c = var_b**2
paddle.cumprod_(var_b, None, dtype="float64")

loss = paddle.nn.functional.relu(var_c)
with self.assertRaisesRegex(
RuntimeError,
"received tensor_version:3 != wrapper_version_snapshot:0",
):
loss.backward()

def test_forward_version(self):
with paddle.base.dygraph.guard():
var = paddle.to_tensor(self.input_var_numpy).astype(self.dtype)
self.assertEqual(var.inplace_version, 0)

inplace_var = self.inplace_api_processing(var)
self.assertEqual(var.inplace_version, 2)

inplace_var[0] = 2
self.assertEqual(var.inplace_version, 3)

inplace_var = self.inplace_api_processing(inplace_var)
self.assertEqual(var.inplace_version, 5)


class TestDygrapInplaceRenorm(TestDygraphInplaceWithContinuous):
def inplace_api_processing(self, var):
return paddle.renorm_(var, 1.0, -1, 2.05)
Expand Down