Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion test/mkldnn/mkldnn_op_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ def check_if_mkldnn_batchnorm_primitives_exist_in_bwd(
for id, name in enumerate(test_case.fetch_list):
__assert_close(test_case, var_dict[name], out[id], name)

print("MKLDNN op test forward passed: ", str(place), data_layout)
print("ONEDNN op test forward passed: ", str(place), data_layout)


def format_reorder(out, size):
Expand Down
34 changes: 17 additions & 17 deletions test/mkldnn/test_activation_bf16_mkldnn_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@


@OpTestTool.skip_if_not_cpu_bf16()
class MKLDNNBF16ActivationOp(metaclass=abc.ABCMeta):
class ONEDNNBF16ActivationOp(metaclass=abc.ABCMeta):
@abc.abstractmethod
def config(self):
pass
Expand Down Expand Up @@ -74,7 +74,7 @@ def test_check_grad(self):
)


class TestMKLDNNSigmoidBF16Op(MKLDNNBF16ActivationOp, TestActivation):
class TestONEDNNSigmoidBF16Op(ONEDNNBF16ActivationOp, TestActivation):
def config(self):
self.op_type = "sigmoid"
self.check_pir_onednn = True
Expand All @@ -86,7 +86,7 @@ def op_grad(self, dout, x):
return dout * self.op_forward(x) * (1 - self.op_forward(x))


class TestMKLDNNSqrtBF16Op(MKLDNNBF16ActivationOp, TestActivation):
class TestONEDNNSqrtBF16Op(ONEDNNBF16ActivationOp, TestActivation):
def config(self):
self.op_type = "sqrt"
self.check_pir_onednn = True
Expand All @@ -101,7 +101,7 @@ def op_grad(self, dout, x):
return dout / (2 * np.sqrt(x))


class TestMKLDNNGeluErfBF16Op(MKLDNNBF16ActivationOp, TestActivation):
class TestONEDNNGeluErfBF16Op(ONEDNNBF16ActivationOp, TestActivation):
def config(self):
self.op_type = "gelu"
self.check_pir_onednn = True
Expand All @@ -117,12 +117,12 @@ def op_grad(self, dout, x):
)


class TestMKLDNNGeluErfDim2BF16Op(TestMKLDNNGeluErfBF16Op):
class TestONEDNNGeluErfDim2BF16Op(TestONEDNNGeluErfBF16Op):
def init_data(self):
self.x = np.random.uniform(-1, 1, [11, 17]).astype(np.float32)


class TestMKLDNNGeluTanhBF16Op(MKLDNNBF16ActivationOp, TestActivation):
class TestONEDNNGeluTanhBF16Op(ONEDNNBF16ActivationOp, TestActivation):
def config(self):
self.op_type = "gelu"
self.check_pir_onednn = True
Expand Down Expand Up @@ -150,12 +150,12 @@ def set_attrs(self):
self.attrs = {"use_mkldnn": True, "approximate": True}


class TestMKLDNNGeluTanhDim2BF16Op(TestMKLDNNGeluTanhBF16Op):
class TestONEDNNGeluTanhDim2BF16Op(TestONEDNNGeluTanhBF16Op):
def init_data(self):
self.x = np.random.uniform(-1, 1, [11, 17]).astype(np.float32)


class TestMKLDNNReluBF16Op(MKLDNNBF16ActivationOp, TestActivation):
class TestONEDNNReluBF16Op(ONEDNNBF16ActivationOp, TestActivation):
def config(self):
self.op_type = "relu"
self.check_pir_onednn = True
Expand All @@ -167,7 +167,7 @@ def op_grad(self, dout, x):
return dout


class TestMKLDNNMishBF16Op(MKLDNNBF16ActivationOp, TestActivation):
class TestONEDNNMishBF16Op(ONEDNNBF16ActivationOp, TestActivation):
def config(self):
self.op_type = "mish"
self.check_pir_onednn = True
Expand All @@ -186,7 +186,7 @@ def op_grad(self, dout, x):
return dout * ((np.exp(x) * omega) / delta**2)


class TestMKLDNNRelu6BF16Op(MKLDNNBF16ActivationOp, TestActivation):
class TestONEDNNRelu6BF16Op(ONEDNNBF16ActivationOp, TestActivation):
def config(self):
self.op_type = "relu6"
self.check_pir_onednn = True
Expand All @@ -198,7 +198,7 @@ def op_grad(self, dout, x):
return np.where((x > 0) & (x <= 6), dout, 0)


class TestMKLDNNLeakyReluBF16Op(MKLDNNBF16ActivationOp, TestActivation):
class TestONEDNNLeakyReluBF16Op(ONEDNNBF16ActivationOp, TestActivation):
def config(self):
self.op_type = "leaky_relu"
self.check_pir_onednn = True
Expand All @@ -214,7 +214,7 @@ def set_attrs(self):
self.attrs = {"use_mkldnn": True, "alpha": self.alpha}


class TestMKLDNNSwishBF16Op(MKLDNNBF16ActivationOp, TestActivation):
class TestONEDNNSwishBF16Op(ONEDNNBF16ActivationOp, TestActivation):
def config(self):
self.op_type = "swish"
self.check_pir_onednn = True
Expand All @@ -233,7 +233,7 @@ def set_attrs(self):
self.attrs = {"use_mkldnn": True, "beta": self.beta}


class TestMKLDNNHardSwishBF16Op(MKLDNNBF16ActivationOp, TestActivation):
class TestONEDNNHardSwishBF16Op(ONEDNNBF16ActivationOp, TestActivation):
def config(self):
self.op_type = "hard_swish"
self.check_pir_onednn = True
Expand All @@ -247,7 +247,7 @@ def op_grad(self, dout, x):
return np.where(result > 3, dout, dout * (2 * x + 3) / 6)


class TestMKLDNNTanhBF16Op(MKLDNNBF16ActivationOp, TestActivation):
class TestONEDNNTanhBF16Op(ONEDNNBF16ActivationOp, TestActivation):
def config(self):
self.op_type = "tanh"
self.check_pir_onednn = True
Expand All @@ -259,7 +259,7 @@ def op_grad(self, dout, x):
return dout * (1 - np.tanh(x) ** 2)


class TestMKLDNNAbsBF16Op(MKLDNNBF16ActivationOp, TestActivation):
class TestONEDNNAbsBF16Op(ONEDNNBF16ActivationOp, TestActivation):
def config(self):
self.op_type = "abs"
self.check_pir_onednn = True
Expand All @@ -271,7 +271,7 @@ def op_grad(self, dout, x):
return dout * np.sign(x)


class TestMKLDNNEluBF16Op(MKLDNNBF16ActivationOp, TestActivation):
class TestONEDNNEluBF16Op(ONEDNNBF16ActivationOp, TestActivation):
def config(self):
self.op_type = "elu"
self.check_pir_onednn = True
Expand All @@ -287,7 +287,7 @@ def set_attrs(self):
self.attrs = {"use_mkldnn": True, "alpha": self.alpha}


class TestMKLDNNExpBF16Op(MKLDNNBF16ActivationOp, TestActivation):
class TestONEDNNExpBF16Op(ONEDNNBF16ActivationOp, TestActivation):
def config(self):
self.op_type = "exp"
self.check_pir_onednn = True
Expand Down
14 changes: 7 additions & 7 deletions test/mkldnn/test_batch_norm_mkldnn_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
_set_use_system_allocator(True)


class TestMKLDNNBatchNormOpTraining(TestBatchNormOpTraining):
class TestONEDNNBatchNormOpTraining(TestBatchNormOpTraining):
def init_kernel_type(self):
self.use_mkldnn = True
self.data_formats = ["NCHW"]
Expand Down Expand Up @@ -81,15 +81,15 @@ def test_forward_backward(self):
super().test_forward_backward()


class TestMKLDNNBatchNormOpTraining_NHWC(TestMKLDNNBatchNormOpTraining):
class TestONEDNNBatchNormOpTraining_NHWC(TestONEDNNBatchNormOpTraining):
def init_kernel_type(self):
self.use_mkldnn = True
self.data_formats = ["NHWC"]


class TestMKLDNNBatchNormOpExistedPrimitives(TestMKLDNNBatchNormOpTraining):
class TestONEDNNBatchNormOpExistedPrimitives(TestONEDNNBatchNormOpTraining):
def init_test_case(self):
TestMKLDNNBatchNormOpTraining.init_test_case(self)
TestONEDNNBatchNormOpTraining.init_test_case(self)
self.fetch_list = ['y', 'x@GRAD']

def test_forward_backward(self):
Expand Down Expand Up @@ -136,7 +136,7 @@ def test_forward_backward(self):
)


class TestMKLDNNBatchNormOpInference(TestBatchNormOpInference):
class TestONEDNNBatchNormOpInference(TestBatchNormOpInference):
def init_kernel_type(self):
self.use_mkldnn = True

Expand All @@ -154,7 +154,7 @@ def test_check_output(self):
)


class TestMKLDNNBatchNormOpInference_NHWC(TestMKLDNNBatchNormOpInference):
class TestONEDNNBatchNormOpInference_NHWC(TestONEDNNBatchNormOpInference):
def test_check_output(self):
place = core.CPUPlace()
data_format = "NHWC"
Expand All @@ -164,7 +164,7 @@ def test_check_output(self):
)


class TestMKLDNNBatchNormOpWithReluInference(TestBatchNormOpInference):
class TestONEDNNBatchNormOpWithReluInference(TestBatchNormOpInference):
def init_kernel_type(self):
self.use_mkldnn = True
self.fuse_with_relu = True
Expand Down
10 changes: 5 additions & 5 deletions test/mkldnn/test_cast_mkldnn_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
@unittest.skipIf(
not core.supports_bfloat16(), "place does not support BF16 evaluation"
)
class TestCastBF16ToFP32MKLDNNOp(OpTest):
class TestCastBF16ToFP32ONEDNNOp(OpTest):
def init_data(self):
self.out = np.random.random(size=self.shape).astype("float32")
self.x = convert_float_to_uint16(self.out)
Expand Down Expand Up @@ -64,25 +64,25 @@ def init_shape(self):
self.shape = [10, 10]


class TestCastFP32ToBF16MKLDNNOp(TestCastBF16ToFP32MKLDNNOp):
class TestCastFP32ToBF16ONEDNNOp(TestCastBF16ToFP32ONEDNNOp):
def init_data(self):
self.x = np.random.random(size=[2, 6]).astype("float32")
self.out = convert_float_to_uint16(self.x)


class TestCastBF16ToBF16MKLDNNOp(TestCastBF16ToFP32MKLDNNOp):
class TestCastBF16ToBF16ONEDNNOp(TestCastBF16ToFP32ONEDNNOp):
def init_data(self):
self.x = np.random.random(size=[6, 13]).astype("uint16")
self.out = self.x


class TestCastFP32ToFP32MKLDNNOp(TestCastBF16ToFP32MKLDNNOp):
class TestCastFP32ToFP32ONEDNNOp(TestCastBF16ToFP32ONEDNNOp):
def init_data(self):
self.x = np.random.random(size=[7, 15]).astype("float32")
self.out = self.x


class TestCastBF16ToFP32MKLDNNOp_ZeroDim(TestCastBF16ToFP32MKLDNNOp):
class TestCastBF16ToFP32ONEDNNOp_ZeroDim(TestCastBF16ToFP32ONEDNNOp):
def init_shape(self):
self.shape = []

Expand Down
18 changes: 9 additions & 9 deletions test/mkldnn/test_elementwise_div_mkldnn_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
not (isinstance(_current_expected_place(), core.CPUPlace)),
"GPU is not supported",
)
class TestMKLDNNElementwiseDivOp(OpTest):
class TestONEDNNElementwiseDivOp(OpTest):
def setUp(self):
self.op_type = "elementwise_div"
self.init_dtype()
Expand Down Expand Up @@ -73,21 +73,21 @@ def test_check_output(self):
self.check_output(check_pir_onednn=True)


class TestMKLDNNElementwiseDivOp2(TestMKLDNNElementwiseDivOp):
class TestONEDNNElementwiseDivOp2(TestONEDNNElementwiseDivOp):
def init_input_output(self):
self.x = np.random.uniform(0.1, 1, [100]).astype(self.dtype)
self.y = np.random.uniform(0.1, 1, [100]).astype(self.dtype)
self.out = np.divide(self.x, self.y)


class TestMKLDNNElementwiseDivOp3(TestMKLDNNElementwiseDivOp):
class TestONEDNNElementwiseDivOp3(TestONEDNNElementwiseDivOp):
def init_input_output(self):
self.x = np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype(self.dtype)
self.y = np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype(self.dtype)
self.out = np.divide(self.x, self.y)


class TestMKLDNNElementwiseDivOp4(TestMKLDNNElementwiseDivOp):
class TestONEDNNElementwiseDivOp4(TestONEDNNElementwiseDivOp):
def init_input_output(self):
self.x = np.random.uniform(1, 2, [2, 3, 4, 32]).astype(self.dtype)
self.y = np.random.uniform(1, 2, [4, 32]).astype(self.dtype)
Expand All @@ -100,7 +100,7 @@ def test_check_grad_ignore_x(self):
pass


class TestMKLDNNElementwiseDivOp5(TestMKLDNNElementwiseDivOp):
class TestONEDNNElementwiseDivOp5(TestONEDNNElementwiseDivOp):
def init_input_output(self):
self.x = np.random.uniform(1, 2, [2, 3, 4, 100]).astype(self.dtype)
self.y = np.random.uniform(1, 2, [100]).astype(self.dtype)
Expand All @@ -113,7 +113,7 @@ def test_check_grad_ignore_x(self):
pass


class TestMKLDNNElementwiseDivOpZeroDim(TestMKLDNNElementwiseDivOp):
class TestONEDNNElementwiseDivOpZeroDim(TestONEDNNElementwiseDivOp):
def init_input_output(self):
self.x = np.random.uniform(0.1, 1, [100]).astype(self.dtype)
self.y = np.array(3.0).astype(self.dtype)
Expand All @@ -126,7 +126,7 @@ def test_check_grad_ignore_x(self):
pass


class TestMKLDNNElementwiseDivOpZeroDim2(TestMKLDNNElementwiseDivOp):
class TestONEDNNElementwiseDivOpZeroDim2(TestONEDNNElementwiseDivOp):
def init_input_output(self):
self.x = np.array(3.0).astype(self.dtype)
self.y = np.random.uniform(0.1, 1, [100]).astype(self.dtype)
Expand All @@ -139,7 +139,7 @@ def test_check_grad_ignore_x(self):
pass


class TestMKLDNNElementwiseDivOpZeroDim3(TestMKLDNNElementwiseDivOp):
class TestONEDNNElementwiseDivOpZeroDim3(TestONEDNNElementwiseDivOp):
def init_input_output(self):
self.x = np.array(3.0).astype(self.dtype)
self.y = np.array(3.0).astype(self.dtype)
Expand All @@ -153,7 +153,7 @@ def test_check_grad_ignore_x(self):


@OpTestTool.skip_if_not_cpu_bf16()
class TestBf16(TestMKLDNNElementwiseDivOp):
class TestBf16(TestONEDNNElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.init_dtype()
Expand Down
2 changes: 1 addition & 1 deletion test/mkldnn/test_fc_bf16_mkldnn_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def test_check_grad_no_weight(self):
pass


class TestFCMKLDNNOp1(TestFcBf16MklDNNOp):
class TestFCONEDNNOp1(TestFcBf16MklDNNOp):
def generate_data(self):
self.matrix = MatrixGenerate(2, 15, 48, 2, 2)
self.bias = np.random.random(48).astype(np.float32)
Expand Down
4 changes: 2 additions & 2 deletions test/mkldnn/test_fc_mkldnn_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def __init__(self, mb, ic, oc, h, w):
self.weights = np.random.random((ic * h * w, oc)).astype("float32")


class TestFCMKLDNNOp(OpTest):
class TestFCONEDNNOp(OpTest):
def create_data(self):
self.matrix = MatrixGenerate(1, 10, 15, 3, 3)
self.bias = np.random.random(15).astype("float32")
Expand Down Expand Up @@ -64,7 +64,7 @@ def test_check_grad_no_weight(self):
pass


class TestFCMKLDNNOp1(TestFCMKLDNNOp):
class TestFCONEDNNOp1(TestFCONEDNNOp):
def create_data(self):
self.matrix = MatrixGenerate(2, 15, 48, 2, 2)
self.bias = np.random.random(48).astype("float32")
Expand Down
8 changes: 4 additions & 4 deletions test/mkldnn/test_fusion_gru_bf16_mkldnn_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
@unittest.skipIf(
not core.supports_bfloat16(), "place does not support BF16 evaluation"
)
class TestFusionGRUBF16MKLDNNOp(OpTest):
class TestFusionGRUBF16ONEDNNOp(OpTest):
def set_confs(self):
pass

Expand Down Expand Up @@ -134,17 +134,17 @@ def setUp(self):
}


class TestFusionGRUINT8MKLDNNOp2(TestFusionGRUBF16MKLDNNOp):
class TestFusionGRUINT8ONEDNNOp2(TestFusionGRUBF16ONEDNNOp):
def set_confs(self):
self.origin_mode = False


class TestFusionGRUINT8MKLDNNOp3(TestFusionGRUBF16MKLDNNOp):
class TestFusionGRUINT8ONEDNNOp3(TestFusionGRUBF16ONEDNNOp):
def set_confs(self):
self.with_bias = False


class TestFusionGRUINT8MKLDNNBF16WeightsOp(TestFusionGRUBF16MKLDNNOp):
class TestFusionGRUINT8ONEDNNBF16WeightsOp(TestFusionGRUBF16ONEDNNOp):
def set_confs(self):
self.weights_dtype = 'bf16'

Expand Down
Loading