Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions test/legacy_test/op_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -389,6 +389,19 @@ def convert_uint16_to_float(in_list):
return np.reshape(out, in_list.shape)


def get_places():
places = []
if (
os.environ.get('FLAGS_CI_both_cpu_and_gpu', 'False').lower()
in ['1', 'true', 'on']
or not core.is_compiled_with_cuda()
):
places.append(base.CPUPlace())
if core.is_compiled_with_cuda():
places.append(base.CUDAPlace(0))
return places


@contextmanager
def auto_parallel_test_guard(test_info_path, generated_test_file_path):
test_info_file, generated_test_file = None, None
Expand Down
23 changes: 3 additions & 20 deletions test/legacy_test/test_cumprod_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import random
import unittest

import numpy as np
from op_test import OpTest, convert_float_to_uint16
from op_test import OpTest, convert_float_to_uint16, get_places

import paddle
from paddle.base import core
Expand Down Expand Up @@ -263,15 +262,7 @@ def setUp(self):
paddle.enable_static()
self.init_dtype()
self.x = (np.random.rand(2, 3, 10, 10) + 0.5).astype(self.dtype)
self.place = []
if (
os.environ.get('FLAGS_CI_both_cpu_and_gpu', 'False').lower()
in ['1', 'true', 'on']
or not core.is_compiled_with_cuda()
):
self.place.append(paddle.CPUPlace())
if core.is_compiled_with_cuda():
self.place.append(paddle.CUDAPlace(0))
self.place = get_places()

# test static graph api.

Expand Down Expand Up @@ -1099,15 +1090,7 @@ def init_dtype(self):
def setUp(self):
self.init_dtype()
self.x = (np.random.rand(0, 3, 10, 10) + 0.5).astype(self.dtype)
self.place = []
if (
os.environ.get('FLAGS_CI_both_cpu_and_gpu', 'False').lower()
in ['1', 'true', 'on']
or not core.is_compiled_with_cuda()
):
self.place.append(paddle.CPUPlace())
if core.is_compiled_with_cuda():
self.place.append(paddle.CUDAPlace(0))
self.place = get_places()

# test dynamic graph api.
def test_dygraph_api(self):
Expand Down