Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions python/paddle/distributed/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.framework import in_dygraph_mode
from paddle.fluid.data_feeder import check_variable_and_dtype

from paddle import _C_ops

__all__ = [ #noqa
'get_host_name_ip',
Expand Down Expand Up @@ -146,7 +146,7 @@ def global_scatter(x,

ring_id = 0 if group is None else group.id
if in_dygraph_mode():
return core.ops.global_scatter(x, local_count, \
return _C_ops.global_scatter(x, local_count, \
global_count, \
'use_calc_stream', use_calc_stream, \
'ring_id', ring_id)
Expand Down Expand Up @@ -258,7 +258,7 @@ def global_gather(x,

ring_id = 0 if group is None else group.id
if in_dygraph_mode():
return core.ops.global_gather(x, local_count, \
return _C_ops.global_gather(x, local_count, \
global_count, \
'use_calc_stream', use_calc_stream, \
'ring_id', ring_id)
Expand Down
3 changes: 2 additions & 1 deletion python/paddle/fluid/clip.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
from .framework import in_dygraph_mode
from .layer_helper import LayerHelper
from .framework import default_main_program
from paddle import _C_ops

__all__ = [
'set_gradient_clip', 'ErrorClipByValue', 'ClipGradByValue',
Expand All @@ -47,7 +48,7 @@ def _squared_l2_norm(x):
return sum_square

if in_dygraph_mode():
return core.ops.squared_l2_norm(x)
return _C_ops.squared_l2_norm(x)

op_type = 'squared_l2_norm'
check_variable_and_dtype(x, 'x', ['float32', 'float64'], op_type)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ def test_ops_elementwise_mul(self):
b = np.random.uniform(0.1, 1, [51, 76]).astype(np.float32)
x = paddle.to_tensor(a)
y = paddle.to_tensor(b)
res = core.ops.elementwise_mul(x, y)
res = _C_ops.elementwise_mul(x, y)

# expected
expected = LOWEST_WARNING_POSTION
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
from paddle.dataset.common import DATA_HOME
from paddle.fluid.framework import core, in_dygraph_mode
from paddle.fluid.layer_helper import LayerHelper
from paddle import _C_ops

import sys
sys.path.append("./tokenizer")
Expand Down Expand Up @@ -75,7 +76,7 @@ def forward(self,
is_split_into_words=False,
pad_to_max_seq_len=False):
if in_dygraph_mode():
input_ids, seg_ids = core.ops.faster_tokenizer(
input_ids, seg_ids = _C_ops.faster_tokenizer(
self.vocab, text, text_pair, "do_lower_case", do_lower_case,
"max_seq_len", max_seq_len, "pad_to_max_seq_len",
pad_to_max_seq_len, "is_split_into_words", is_split_into_words)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
import paddle.fluid as fluid
import numpy as np
import unittest
from paddle import _C_ops

if fluid.is_compiled_with_cuda():
fluid.core.globals()['FLAGS_cudnn_deterministic'] = True
Expand Down Expand Up @@ -112,8 +113,8 @@ def __init__(self, num_channels, epsilon=1e-5):

def forward(self, input):
if fluid.in_dygraph_mode():
out, _, _ = fluid.core.ops.instance_norm(
input, self.scale, self.bias, 'epsilon', self.epsilon)
out, _, _ = _C_ops.instance_norm(input, self.scale, self.bias,
'epsilon', self.epsilon)
return out
else:
return fluid.layers.instance_norm(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,14 +21,15 @@
import paddle.fluid.core as core
from paddle.fluid.dygraph.jit import TracedLayer
import numpy as np
from paddle import _C_ops


class TestTracedLayer(fluid.dygraph.Layer):
def __init__(self, name_scope):
super(TestTracedLayer, self).__init__(name_scope)

def forward(self, input):
return core.ops.relu(input)
return _C_ops.relu(input)


class TestVariable(unittest.TestCase):
Expand All @@ -46,7 +47,7 @@ def test_elementwise_add(self):
x.stop_gradient = False

res1 = layers.elementwise_add(x, y)
res2 = core.ops.elementwise_add(x, y)
res2 = _C_ops.elementwise_add(x, y)

self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))

Expand All @@ -58,7 +59,7 @@ def test_elementwise_mul(self):
y = fluid.dygraph.to_variable(b)

res1 = layers.elementwise_mul(x, y)
res2 = core.ops.elementwise_mul(x, y)
res2 = _C_ops.elementwise_mul(x, y)

self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))

Expand All @@ -68,7 +69,7 @@ def test_relu(self):
x = fluid.dygraph.to_variable(a)

res1 = layers.relu(x)
res2 = core.ops.relu(x)
res2 = _C_ops.relu(x)

self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))

Expand All @@ -81,7 +82,7 @@ def test_trace_backward(self):
x.stop_gradient = False
y.stop_gradient = False

loss = core.ops.elementwise_mul(x, y)
loss = _C_ops.elementwise_mul(x, y)

loss.backward()
x_grad = x.gradient()
Expand Down
5 changes: 3 additions & 2 deletions python/paddle/fluid/tests/unittests/test_sum_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
from paddle.fluid.op import Operator
from paddle.fluid.tests.unittests.op_test import (
OpTest, convert_float_to_uint16, convert_uint16_to_float)
from paddle import _C_ops


class TestSumOp(OpTest):
Expand Down Expand Up @@ -382,11 +383,11 @@ class TestSumOpError(unittest.TestCase):
def test_errors(self):
def test_empty_list_input():
with fluid.dygraph.guard():
fluid.core.ops.sum([])
fluid._C_ops.sum([])

def test_list_of_none_input():
with fluid.dygraph.guard():
fluid.core.ops.sum([None])
fluid._C_ops.sum([None])

self.assertRaises(Exception, test_empty_list_input)
self.assertRaises(Exception, test_list_of_none_input)
Expand Down
5 changes: 3 additions & 2 deletions python/paddle/fluid/tests/unittests/xpu/test_sum_op_xpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
from paddle.fluid.op import Operator
from paddle.fluid.tests.unittests.op_test import (
OpTest, convert_float_to_uint16, convert_uint16_to_float)
from paddle import _C_ops

paddle.enable_static()

Expand Down Expand Up @@ -171,11 +172,11 @@ class TestSumOpError(unittest.TestCase):
def test_errors(self):
def test_empty_list_input():
with fluid.dygraph.guard():
fluid.core.ops.sum([])
fluid._C_ops.sum([])

def test_list_of_none_input():
with fluid.dygraph.guard():
fluid.core.ops.sum([None])
fluid._C_ops.sum([None])

self.assertRaises(Exception, test_empty_list_input)
self.assertRaises(Exception, test_list_of_none_input)
Expand Down
5 changes: 3 additions & 2 deletions python/paddle/incubate/operators/graph_send_recv.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from paddle.fluid.framework import in_dygraph_mode
from paddle.fluid.data_feeder import check_variable_and_dtype
from paddle.fluid import core
from paddle import _C_ops


def graph_send_recv(x, src_index, dst_index, pool_type="sum", name=None):
Expand Down Expand Up @@ -82,8 +83,8 @@ def graph_send_recv(x, src_index, dst_index, pool_type="sum", name=None):
% pool_type)

if in_dygraph_mode():
out, tmp = core.ops.graph_send_recv(x, src_index, dst_index,
'pool_type', pool_type.upper())
out, tmp = _C_ops.graph_send_recv(x, src_index, dst_index, 'pool_type',
pool_type.upper())
return out

check_variable_and_dtype(x, "X", ("float32", "float64", "int32", "int64"),
Expand Down
3 changes: 2 additions & 1 deletion python/paddle/incubate/operators/softmax_mask_fuse.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.framework import in_dygraph_mode
from paddle.fluid import core
from paddle import _C_ops


def softmax_mask_fuse(x, mask, name=None):
Expand Down Expand Up @@ -58,7 +59,7 @@ def softmax_mask_fuse(x, mask, name=None):
# [[[[0.02404429, 0.04658398, 0.02746007, ..., 0.01489375, 0.02397441, 0.02851614] ... ]]]
"""
if in_dygraph_mode():
out = core.ops.fused_softmax_mask(x, mask)
out = _C_ops.fused_softmax_mask(x, mask)
return out
helper = LayerHelper('fused_softmax_mask', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.framework import in_dygraph_mode
from paddle.fluid import core
from paddle import _C_ops


def softmax_mask_fuse_upper_triangle(x):
Expand Down Expand Up @@ -58,7 +59,7 @@ def softmax_mask_fuse_upper_triangle(x):
# ... ]]]
"""
if in_dygraph_mode():
out = core.ops.fused_softmax_mask_upper_triangle(x)
out = _C_ops.fused_softmax_mask_upper_triangle(x)
return out

helper = LayerHelper('fused_softmax_mask_upper_triangle', **locals())
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/nn/functional/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -1763,7 +1763,7 @@ class centers and the shape of sampled_class_center will be [num_positive_class_
seed = default_main_program().random_seed

if in_dygraph_mode():
remapped_label, sampled_class_center = core.ops.class_center_sample(
remapped_label, sampled_class_center = _C_ops.class_center_sample(
label, 'num_classes', num_classes, 'num_samples', num_samples,
'ring_id', ring_id, 'nranks', nranks, 'rank', rank, 'fix_seed',
seed is not None, 'seed', seed if seed is not None else 0)
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/nn/functional/loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -1320,7 +1320,7 @@ def margin_cross_entropy(logits,
label = paddle.unsqueeze(label, axis=-1)

if in_dygraph_mode():
softmax, loss = core.ops.margin_cross_entropy(
softmax, loss = _C_ops.margin_cross_entropy(
logits, label, 'ring_id', ring_id, 'rank', rank, 'nranks', nranks,
'margin1', margin1, 'margin2', margin2, 'margin3', margin3, 'scale',
scale, 'return_softmax', return_softmax)
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/tensor/linalg.py
Original file line number Diff line number Diff line change
Expand Up @@ -1430,7 +1430,7 @@ def det(x, name=None):

"""
if in_dygraph_mode():
return core.ops.determinant(x)
return _C_ops.determinant(x)

check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'det')

Expand Down Expand Up @@ -1485,7 +1485,7 @@ def slogdet(x, name=None):

"""
if in_dygraph_mode():
return core.ops.slogdeterminant(x)
return _C_ops.slogdeterminant(x)

check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'slogdet')

Expand Down Expand Up @@ -1633,7 +1633,7 @@ def matrix_power(x, n, name=None):
# [ 1.80555556 , -1.91666667 , 0.44444444 ]]
"""
if in_dygraph_mode():
return core.ops.matrix_power(x, "n", n)
return _C_ops.matrix_power(x, "n", n)

check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'matrix_power')
check_type(n, 'n', int, 'matrix_power')
Expand Down
26 changes: 13 additions & 13 deletions python/paddle/tensor/manipulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,8 +70,8 @@ def fill_(x, value):
raise TypeError(
"The type of 'value' must be int or float, but received %s." %
(type(value)))
return core.ops.fill_any_(x, "value_float",
float(value), "value_int", int(value))
return _C_ops.fill_any_(x, "value_float",
float(value), "value_int", int(value))


setattr(core.VarBase, 'fill_', fill_)
Expand Down Expand Up @@ -102,7 +102,7 @@ def zero_(x):
print(tensor.tolist()) #[0, 0, 0, 0, 0]

"""
return core.ops.fill_any_(x, "value_float", 0., "value_int", int(0))
return _C_ops.fill_any_(x, "value_float", 0., "value_int", int(0))


setattr(core.VarBase, 'zero_', zero_)
Expand Down Expand Up @@ -148,10 +148,10 @@ def fill_diagonal_(x, value, offset=0, wrap=False, name=None):
'Tensor dims should be equal while input dims > 2 in fill_diagonal_ API'
)
if len(inshape) == 2:
return core.ops.fill_diagonal_(x, 'value', value, 'offset', offset,
'wrap', wrap)
return core.ops.fill_diagonal_(x, 'value', value, 'offset', offset, 'wrap',
True)
return _C_ops.fill_diagonal_(x, 'value', value, 'offset', offset,
'wrap', wrap)
return _C_ops.fill_diagonal_(x, 'value', value, 'offset', offset, 'wrap',
True)


setattr(core.VarBase, 'fill_diagonal_', fill_diagonal_)
Expand Down Expand Up @@ -182,10 +182,10 @@ def _fill_diagonal_tensor_impl(x, y, offset=0, dim1=0, dim2=1, inplace=False):
y = y.reshape([1, -1])

if inplace:
return core.ops.fill_diagonal_tensor_(x, y, 'dim1', dim1, 'dim2', dim2,
'offset', offset)
return core.ops.fill_diagonal_tensor(x, y, 'dim1', dim1, 'dim2', dim2,
'offset', offset)
return _C_ops.fill_diagonal_tensor_(x, y, 'dim1', dim1, 'dim2', dim2,
'offset', offset)
return _C_ops.fill_diagonal_tensor(x, y, 'dim1', dim1, 'dim2', dim2,
'offset', offset)


def fill_diagonal_tensor_(x, y, offset=0, dim1=0, dim2=1, name=None):
Expand Down Expand Up @@ -475,7 +475,7 @@ def flip(x, axis, name=None):
if isinstance(axis, int):
axis = [axis]
if in_dygraph_mode():
return core.ops.flip(x, "axis", axis)
return _C_ops.flip(x, "axis", axis)

helper = LayerHelper("flip", **locals())
check_type(x, 'X', (Variable), 'flip')
Expand Down Expand Up @@ -1107,7 +1107,7 @@ def unique_consecutive(x,
axis = [axis]
attr_dtype = convert_np_dtype_to_dtype_(dtype)
if in_dygraph_mode():
out, inverse, counts = core.ops.unique_consecutive(
out, inverse, counts = _C_ops.unique_consecutive(
x, 'dtype', attr_dtype, 'return_inverse', return_inverse,
'return_counts', return_counts, 'axis', axis)
outs = [out]
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/tensor/random.py
Original file line number Diff line number Diff line change
Expand Up @@ -555,8 +555,8 @@ def uniform_(x, min=-1.0, max=1.0, seed=0, name=None):
# [-0.34646994, -0.45116323, -0.09902662, -0.11397249], # random
# [ 0.433519, 0.39483607, -0.8660099, 0.83664286]] # random
"""
return core.ops.uniform_random_inplace_(x, 'min', min, 'max', max, 'seed',
seed)
return _C_ops.uniform_random_inplace_(x, 'min', min, 'max', max, 'seed',
seed)


def randint(low=0, high=None, shape=[1], dtype=None, name=None):
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/text/viterbi_decode.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from ..fluid.framework import core, in_dygraph_mode
from ..fluid.layer_helper import LayerHelper
from ..fluid.data_feeder import check_variable_and_dtype, check_type
from paddle import _C_ops

__all__ = ['viterbi_decode', 'ViterbiDecoder']

Expand Down Expand Up @@ -58,9 +59,8 @@ def viterbi_decode(potentials,
scores, path = paddle.text.viterbi_decode(emission, transition, length, False) # scores: [3.37089300, 1.56825531], path: [[1, 0, 0], [1, 1, 0]]
"""
if in_dygraph_mode():
return core.ops.viterbi_decode(potentials, transition_params, lengths,
'include_bos_eos_tag',
include_bos_eos_tag)
return _C_ops.viterbi_decode(potentials, transition_params, lengths,
'include_bos_eos_tag', include_bos_eos_tag)
check_variable_and_dtype(potentials, 'input', ['float32', 'float64'],
'viterbi_decode')
check_variable_and_dtype(transition_params, 'transitions',
Expand Down
Loading