Skip to content

Commit 45fa080

Browse files
committed
add sinc
1 parent 13c024f commit 45fa080

File tree

4 files changed

+223
-1
lines changed

4 files changed

+223
-1
lines changed

python/paddle/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -487,6 +487,8 @@
487487
signbit,
488488
sin,
489489
sin_,
490+
sinc,
491+
sinc_,
490492
sinh,
491493
sinh_,
492494
sqrt,

python/paddle/tensor/__init__.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -374,6 +374,8 @@
374374
signbit,
375375
sin,
376376
sin_,
377+
sinc,
378+
sinc_,
377379
sinh,
378380
sinh_,
379381
sqrt,
@@ -519,6 +521,7 @@
519521
'scale_',
520522
'sign',
521523
'sin',
524+
'sinc',
522525
'sinh',
523526
'sqrt',
524527
'sqrt_',
@@ -792,7 +795,7 @@
792795
'cos_',
793796
'cosh_',
794797
'sin_',
795-
'sinh_',
798+
'sinc_' 'sinh_',
796799
'acosh_',
797800
'asinh_',
798801
'diag',

python/paddle/tensor/math.py

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7652,3 +7652,47 @@ def signbit(x, name=None):
76527652
x = paddle.sign(neg_zero_x)
76537653
out = paddle.cast(x < 0, dtype='bool')
76547654
return out
7655+
7656+
7657+
def sinc(x, name=None):
7658+
r"""
7659+
Calculate the normalized sinc of ``x`` elementwise.
7660+
7661+
.. math::
7662+
7663+
out_i =
7664+
\left\{
7665+
\begin{aligned}
7666+
&1 & \text{ if $x_i = 0$} \\
7667+
&\frac{\sin(\pi x_i)}{\pi x_i} & \text{ otherwise}
7668+
\end{aligned}
7669+
\right.
7670+
7671+
Args:
7672+
x (Tensor): The input Tensor. Must be one of the following types: float16, float32, float64.
7673+
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
7674+
7675+
Returns:
7676+
out (Tensor): The Tensor of elementwise computed normalized sinc result.
7677+
"""
7678+
if not isinstance(x, (paddle.Tensor, Variable, paddle.pir.Value)):
7679+
raise TypeError(f"x must be tensor type, but got {type(x)}")
7680+
7681+
tmp = math.pi * paddle.where(x == 0, 1.0e-20, x)
7682+
return paddle.divide(tmp.sin(), tmp)
7683+
7684+
7685+
@inplace_apis_in_dygraph_only
7686+
def sinc_(x, name=None):
7687+
r"""
7688+
Inplace version of ``sinc`` API, the output Tensor will be inplaced with input ``x``.
7689+
Please refer to :ref:`api_paddle_sinc`.
7690+
"""
7691+
if not isinstance(x, (paddle.Tensor, Variable)):
7692+
raise TypeError(f"x must be tensor type, but got {type(x)}")
7693+
7694+
paddle.where_(x != 0, x, paddle.full_like(x, 1.0e-20))
7695+
paddle.multiply_(x, paddle.to_tensor(math.pi, dtype=x.dtype))
7696+
paddle.sin_(x)
7697+
tmp = paddle.asin(x)
7698+
return paddle.divide_(x, tmp)

test/legacy_test/test_sinc.py

Lines changed: 173 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,173 @@
1+
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import unittest
16+
17+
import numpy as np
18+
19+
import paddle
20+
from paddle import base
21+
from paddle.base import core
22+
23+
24+
def np_sinc(x: np.ndarray):
25+
return np.sinc(x)
26+
27+
28+
class TestSincAPI(unittest.TestCase):
29+
def setUp(self) -> None:
30+
self.cpu_support_dtypes = [
31+
'float16',
32+
'float32',
33+
'float64',
34+
]
35+
self.cuda_support_dtypes = [
36+
'float16',
37+
'float32',
38+
'float64',
39+
]
40+
self.place = [paddle.CPUPlace()]
41+
if core.is_compiled_with_cuda():
42+
self.place.append(paddle.CUDAPlace(0))
43+
self.shapes = [[6], [16, 64], [128, 512, 1024]]
44+
45+
def test_dtype(self):
46+
def run_dygraph(place):
47+
paddle.disable_static(place)
48+
if core.is_compiled_with_cuda():
49+
support_dtypes = self.cuda_support_dtypes
50+
else:
51+
support_dtypes = self.cpu_support_dtypes
52+
53+
for dtype in support_dtypes:
54+
for shape in self.shapes:
55+
x_data = np.random.rand(*shape).astype(dtype)
56+
x = paddle.to_tensor(x_data)
57+
out = paddle.sinc(x)
58+
out_expected = np_sinc(x_data)
59+
np.testing.assert_allclose(out.numpy(), out_expected)
60+
61+
def run_static(place):
62+
paddle.enable_static()
63+
if core.is_compiled_with_cuda():
64+
support_dtypes = self.cuda_support_dtypes
65+
else:
66+
support_dtypes = self.cpu_support_dtypes
67+
for dtype in support_dtypes:
68+
for shape in self.shapes:
69+
x_data = np.random.rand(*shape).astype(dtype)
70+
startup_program = paddle.static.Program()
71+
main_program = paddle.static.Program()
72+
exe = base.Executor(place)
73+
with paddle.static.program_guard(
74+
main_program, startup_program
75+
):
76+
x = paddle.static.data(
77+
name='x', shape=shape, dtype=dtype
78+
)
79+
res = paddle.sinc(x)
80+
static_result = exe.run(
81+
feed={'x': x_data}, fetch_list=[res]
82+
)
83+
out_expected = np_sinc(x_data)
84+
np.testing.assert_allclose(static_result, out_expected)
85+
86+
for place in self.place:
87+
run_dygraph(place)
88+
run_static(place)
89+
90+
def test_zero(self):
91+
def run_dygraph(place):
92+
paddle.disable_static(place)
93+
if core.is_compiled_with_cuda():
94+
support_dtypes = self.cuda_support_dtypes
95+
else:
96+
support_dtypes = self.cpu_support_dtypes
97+
98+
for dtype in support_dtypes:
99+
for shape in self.shapes:
100+
x_data = np.random.rand(*shape).astype(dtype)
101+
mask = (
102+
(np.random.rand(*shape) > 0.5)
103+
.astype('int')
104+
.astype(dtype)
105+
)
106+
x_data = x_data * mask
107+
x = paddle.to_tensor(x_data)
108+
out = paddle.sinc(x)
109+
out_expected = np_sinc(x_data)
110+
np.testing.assert_allclose(out.numpy(), out_expected)
111+
112+
def run_static(place):
113+
paddle.enable_static()
114+
if core.is_compiled_with_cuda():
115+
support_dtypes = self.cuda_support_dtypes
116+
else:
117+
support_dtypes = self.cpu_support_dtypes
118+
for dtype in support_dtypes:
119+
for shape in self.shapes:
120+
x_data = np.random.rand(*shape).astype(dtype)
121+
mask = (
122+
(np.random.rand(*shape) > 0.5)
123+
.astype('int')
124+
.astype(dtype)
125+
)
126+
x_data = x_data * mask
127+
startup_program = paddle.static.Program()
128+
main_program = paddle.static.Program()
129+
exe = base.Executor(place)
130+
with paddle.static.program_guard(
131+
main_program, startup_program
132+
):
133+
x = paddle.static.data(
134+
name='x', shape=shape, dtype=dtype
135+
)
136+
res = paddle.sinc(x)
137+
static_result = exe.run(
138+
feed={'x': x_data}, fetch_list=[res]
139+
)
140+
out_expected = np_sinc(x_data)
141+
np.testing.assert_allclose(static_result, out_expected)
142+
143+
for place in self.place:
144+
run_dygraph(place)
145+
run_static(place)
146+
147+
def test_input_type_error(self):
148+
with self.assertRaises(TypeError):
149+
x = np.random.rand(6).astype('float32')
150+
x = paddle.sinc(x)
151+
152+
def test_inplace(self):
153+
def run_dygraph(place):
154+
paddle.disable_static(place)
155+
if core.is_compiled_with_cuda():
156+
support_dtypes = self.cuda_support_dtypes
157+
else:
158+
support_dtypes = self.cpu_support_dtypes
159+
160+
for dtype in support_dtypes:
161+
for shape in self.shapes:
162+
x_data = np.random.rand(*shape).astype(dtype)
163+
x = paddle.to_tensor(x_data)
164+
paddle.sinc_(x)
165+
out_expected = np_sinc(x_data)
166+
np.testing.assert_allclose(x.numpy(), out_expected)
167+
168+
for place in self.place:
169+
run_dygraph(place)
170+
171+
172+
if __name__ == "__main__":
173+
unittest.main()

0 commit comments

Comments
 (0)