Skip to content

Commit 42fa6af

Browse files
committed
Fix
1 parent ce7f373 commit 42fa6af

22 files changed

+126
-126
lines changed

test/legacy_test/test_batch_norm_op.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -220,7 +220,7 @@ def __set_tensor__(name, data=None):
220220
class TestBatchNormOpInference(unittest.TestCase):
221221
def setUp(self):
222222
self.dtype = np.float32
223-
self.use_mkldnn = False
223+
self.use_onednn = False
224224
self.fuse_with_relu = False
225225
self.init_kernel_type()
226226

@@ -317,7 +317,7 @@ def check_with_place(self, place, data_layout, dtype, shape):
317317
# attrs
318318
is_test=True,
319319
data_layout=data_layout,
320-
use_mkldnn=self.use_mkldnn,
320+
use_mkldnn=self.use_onednn,
321321
fuse_with_relu=self.fuse_with_relu,
322322
epsilon=epsilon,
323323
)
@@ -329,7 +329,7 @@ def check_with_place(self, place, data_layout, dtype, shape):
329329
# dims will be in NCHW order as it is MKL-DNN way
330330
# of memory descripting. So we need to convert NCHW
331331
# dims into NHWC.
332-
if data_layout == "NHWC" and self.use_mkldnn:
332+
if data_layout == "NHWC" and self.use_onednn:
333333
# Create executor to have MKL-DNN cache
334334
# cleared after NHWC unit test
335335
place = core.CPUPlace()
@@ -482,7 +482,7 @@ def init_kernel_type(self):
482482
class TestFP16BatchNormOpInference(TestBatchNormOpInference):
483483
def setUp(self):
484484
self.dtype = np.float16
485-
self.use_mkldnn = False
485+
self.use_onednn = False
486486
self.fuse_with_relu = False
487487
self.init_kernel_type()
488488

@@ -517,7 +517,7 @@ def test_check_output(self):
517517
class TestBF16BatchNormOpInference(TestBatchNormOpInference):
518518
def setUp(self):
519519
self.dtype = np.uint16
520-
self.use_mkldnn = False
520+
self.use_onednn = False
521521
self.fuse_with_relu = False
522522
self.init_kernel_type()
523523

test/legacy_test/test_broadcast_tensors_op.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -111,8 +111,8 @@ def set_dtype(self):
111111

112112
def setUp(self):
113113
self.op_type = "broadcast_tensors"
114-
self.use_mkldnn = False
115-
self.attrs = {'use_mkldnn': self.use_mkldnn}
114+
self.use_onednn = False
115+
self.attrs = {'use_mkldnn': self.use_onednn}
116116
self.test_gen_func_list = [
117117
gen_rank_diff_test,
118118
gen_no_broadcast_test,
@@ -197,8 +197,8 @@ def setUp(self):
197197
self.op_type = "broadcast_tensors"
198198
self.dtype = np.uint16
199199
self.np_dtype = "float32"
200-
self.use_mkldnn = False
201-
self.attrs = {'use_mkldnn': self.use_mkldnn}
200+
self.use_onednn = False
201+
self.attrs = {'use_mkldnn': self.use_onednn}
202202
self.test_gen_func_list = [
203203
gen_rank_diff_test,
204204
gen_no_broadcast_test,

test/legacy_test/test_channel_shuffle.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -332,7 +332,7 @@ def setUp(self):
332332
n, c, h, w = 2, 9, 4, 4
333333
self.python_api = paddle.nn.functional.channel_shuffle
334334
self.dtype = np.uint16
335-
self.use_mkldnn = False
335+
self.use_onednn = False
336336

337337
if self.format == "NCHW":
338338
shape = [n, c, h, w]

test/legacy_test/test_conv2d_op.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -420,7 +420,7 @@ def setUp(self):
420420
self.use_cudnn = False
421421
self.exhaustive_search = False
422422
self.use_cuda = False
423-
self.use_mkldnn = False
423+
self.use_onednn = False
424424
self.fuse_relu_before_depthwise_conv = False
425425
self.data_format = "AnyLayout"
426426
self.dtype = np.float64
@@ -483,7 +483,7 @@ def setUp(self):
483483
'groups': self.groups,
484484
'dilations': self.dilations,
485485
'use_cudnn': self.use_cudnn,
486-
'use_mkldnn': self.use_mkldnn,
486+
'use_mkldnn': self.use_onednn,
487487
'data_format': self.data_format,
488488
'fuse_relu_before_depthwise_conv': self.fuse_relu_before_depthwise_conv,
489489
'exhaustive_search': self.exhaustive_search,
@@ -501,7 +501,7 @@ def test_check_output(self):
501501
self.check_output_with_place(
502502
place,
503503
atol=1e-5,
504-
check_dygraph=(not self.use_mkldnn),
504+
check_dygraph=(not self.use_onednn),
505505
check_pir_onednn=self.check_pir_onednn,
506506
)
507507

@@ -517,7 +517,7 @@ def test_check_grad(self):
517517
{'Input', 'Filter'},
518518
'Output',
519519
max_relative_error=0.02,
520-
check_dygraph=(not self.use_mkldnn),
520+
check_dygraph=(not self.use_onednn),
521521
check_pir_onednn=self.check_pir_onednn,
522522
)
523523

@@ -534,7 +534,7 @@ def test_check_grad_no_filter(self):
534534
'Output',
535535
max_relative_error=0.02,
536536
no_grad_set={'Filter'},
537-
check_dygraph=(not self.use_mkldnn),
537+
check_dygraph=(not self.use_onednn),
538538
check_pir_onednn=self.check_pir_onednn,
539539
)
540540

@@ -550,7 +550,7 @@ def test_check_grad_no_input(self):
550550
['Filter'],
551551
'Output',
552552
no_grad_set={'Input'},
553-
check_dygraph=(not self.use_mkldnn),
553+
check_dygraph=(not self.use_onednn),
554554
check_pir_onednn=self.check_pir_onednn,
555555
)
556556

@@ -768,7 +768,7 @@ def setUp(self):
768768
self.use_cudnn = False
769769
self.exhaustive_search = False
770770
self.use_cuda = False
771-
self.use_mkldnn = False
771+
self.use_onednn = False
772772
self.fuse_relu_before_depthwise_conv = False
773773
self.dtype = np.float64
774774
self.init_kernel_type()
@@ -817,7 +817,7 @@ def setUp(self):
817817
'groups': self.groups,
818818
'dilations': self.dilations,
819819
'use_cudnn': self.use_cudnn,
820-
'use_mkldnn': self.use_mkldnn,
820+
'use_mkldnn': self.use_onednn,
821821
'data_format': self.data_format,
822822
'fuse_relu_before_depthwise_conv': self.fuse_relu_before_depthwise_conv,
823823
'exhaustive_search': self.exhaustive_search,
@@ -835,7 +835,7 @@ def test_check_output(self):
835835
self.check_output_with_place(
836836
place,
837837
atol=1e-5,
838-
check_dygraph=(not self.use_mkldnn),
838+
check_dygraph=(not self.use_onednn),
839839
check_pir_onednn=self.check_pir_onednn,
840840
)
841841

@@ -849,7 +849,7 @@ def test_check_grad(self):
849849
{'Input', 'Filter'},
850850
'Output',
851851
max_relative_error=0.02,
852-
check_dygraph=(not self.use_mkldnn),
852+
check_dygraph=(not self.use_onednn),
853853
check_pir_onednn=self.check_pir_onednn,
854854
)
855855

@@ -864,7 +864,7 @@ def test_check_grad_no_filter(self):
864864
'Output',
865865
max_relative_error=0.02,
866866
no_grad_set={'Filter'},
867-
check_dygraph=(not self.use_mkldnn),
867+
check_dygraph=(not self.use_onednn),
868868
check_pir_onednn=self.check_pir_onednn,
869869
)
870870

@@ -878,7 +878,7 @@ def test_check_grad_no_input(self):
878878
['Filter'],
879879
'Output',
880880
no_grad_set={'Input'},
881-
check_dygraph=(not self.use_mkldnn),
881+
check_dygraph=(not self.use_onednn),
882882
check_pir_onednn=self.check_pir_onednn,
883883
)
884884

test/legacy_test/test_conv2d_transpose_op.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -186,7 +186,7 @@ def setUp(self):
186186
self.need_check_grad = True
187187
self.is_test = False
188188
self.use_cudnn = False
189-
self.use_mkldnn = False
189+
self.use_onednn = False
190190
self.output_size = None
191191
self.output_padding = []
192192
self.data_format = "NCHW"
@@ -210,7 +210,7 @@ def setUp(self):
210210
'dilations': self.dilations,
211211
'use_cudnn': self.use_cudnn,
212212
'is_test': self.is_test,
213-
'use_mkldnn': self.use_mkldnn,
213+
'use_mkldnn': self.use_onednn,
214214
'data_format': self.data_format,
215215
}
216216
if self.output_size is not None:
@@ -241,12 +241,12 @@ def test_check_output(self):
241241
self.check_output_with_place(
242242
place,
243243
atol=1e-5,
244-
check_dygraph=(not self.use_mkldnn),
244+
check_dygraph=(not self.use_onednn),
245245
check_pir=True,
246246
)
247247
else:
248248
self.check_output(
249-
check_dygraph=(not self.use_mkldnn), check_pir=True
249+
check_dygraph=(not self.use_onednn), check_pir=True
250250
)
251251

252252
def test_check_grad_no_input(self):
@@ -813,12 +813,12 @@ def test_check_output(self):
813813
self.check_output_with_place(
814814
place,
815815
atol=0.02,
816-
check_dygraph=(not self.use_mkldnn),
816+
check_dygraph=(not self.use_onednn),
817817
check_pir=True,
818818
)
819819
else:
820820
self.check_output(
821-
check_dygraph=(not self.use_mkldnn), check_pir=True
821+
check_dygraph=(not self.use_onednn), check_pir=True
822822
)
823823

824824
def test_check_grad_no_input(self):
@@ -1012,7 +1012,7 @@ def test_check_output(self):
10121012
self.check_output_with_place(
10131013
place,
10141014
atol=0.02,
1015-
check_dygraph=(not self.use_mkldnn),
1015+
check_dygraph=(not self.use_onednn),
10161016
check_pir=True,
10171017
)
10181018

test/legacy_test/test_conv3d_op.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -208,7 +208,7 @@ def test_check_output(self):
208208
place = core.CUDAPlace(0)
209209
self.check_output_with_place(
210210
place,
211-
check_dygraph=(not self.use_mkldnn),
211+
check_dygraph=(not self.use_onednn),
212212
check_pir=True,
213213
check_pir_onednn=self.check_pir_onednn,
214214
)
@@ -222,7 +222,7 @@ def test_check_grad_no_filter(self):
222222
['Input'],
223223
'Output',
224224
no_grad_set={'Filter'},
225-
check_dygraph=(not self.use_mkldnn),
225+
check_dygraph=(not self.use_onednn),
226226
user_defined_grads=[numeric_grads],
227227
check_pir=True,
228228
check_pir_onednn=self.check_pir_onednn,
@@ -237,7 +237,7 @@ def test_check_grad_no_input(self):
237237
['Filter'],
238238
'Output',
239239
no_grad_set={'Input'},
240-
check_dygraph=(not self.use_mkldnn),
240+
check_dygraph=(not self.use_onednn),
241241
user_defined_grads=[numeric_grads],
242242
check_pir=True,
243243
check_pir_onednn=self.check_pir_onednn,
@@ -253,7 +253,7 @@ def test_check_grad(self):
253253
['Input', 'Filter'],
254254
'Output',
255255
user_defined_grads=[numeric_input_grads, numeric_filter_grads],
256-
check_dygraph=(not self.use_mkldnn),
256+
check_dygraph=(not self.use_onednn),
257257
check_pir=True,
258258
check_pir_onednn=self.check_pir_onednn,
259259
)
@@ -393,7 +393,7 @@ def setUp(self):
393393
self.op_type = "conv3d"
394394
self.python_api = conv3d_wrapper
395395
self.use_cudnn = False
396-
self.use_mkldnn = False
396+
self.use_onednn = False
397397
self.data_format = "AnyLayout"
398398
self.dtype = np.float64
399399
self.init_kernel_type()
@@ -444,7 +444,7 @@ def setUp(self):
444444
'groups': self.groups,
445445
'dilations': self.dilations,
446446
'use_cudnn': self.use_cudnn,
447-
'use_mkldnn': self.use_mkldnn,
447+
'use_mkldnn': self.use_onednn,
448448
'data_format': self.data_format,
449449
}
450450
self.outputs = {'Output': output}
@@ -458,7 +458,7 @@ def test_check_output(self):
458458
self.check_output_with_place(
459459
place,
460460
atol=1e-5,
461-
check_dygraph=(not self.use_mkldnn),
461+
check_dygraph=(not self.use_onednn),
462462
check_pir=True,
463463
check_pir_onednn=self.check_pir_onednn,
464464
)
@@ -471,7 +471,7 @@ def test_check_grad(self):
471471
{'Input', 'Filter'},
472472
'Output',
473473
max_relative_error=0.03,
474-
check_dygraph=(not self.use_mkldnn),
474+
check_dygraph=(not self.use_onednn),
475475
check_pir=True,
476476
check_pir_onednn=self.check_pir_onednn,
477477
)
@@ -485,7 +485,7 @@ def test_check_grad_no_filter(self):
485485
'Output',
486486
max_relative_error=0.03,
487487
no_grad_set={'Filter'},
488-
check_dygraph=(not self.use_mkldnn),
488+
check_dygraph=(not self.use_onednn),
489489
check_pir=True,
490490
check_pir_onednn=self.check_pir_onednn,
491491
)
@@ -499,7 +499,7 @@ def test_check_grad_no_input(self):
499499
'Output',
500500
max_relative_error=0.03,
501501
no_grad_set={'Input'},
502-
check_dygraph=(not self.use_mkldnn),
502+
check_dygraph=(not self.use_onednn),
503503
check_pir=True,
504504
check_pir_onednn=self.check_pir_onednn,
505505
)
@@ -764,7 +764,7 @@ def setUp(self):
764764
self.op_type = "conv3d"
765765
self.python_api = conv3d_wrapper
766766
self.use_cudnn = False
767-
self.use_mkldnn = False
767+
self.use_onednn = False
768768
self.data_format = "NCDHW"
769769
self.dtype = np.float64
770770
self.init_kernel_type()
@@ -804,7 +804,7 @@ def setUp(self):
804804
'groups': self.groups,
805805
'dilations': self.dilations,
806806
'use_cudnn': self.use_cudnn,
807-
'use_mkldnn': self.use_mkldnn,
807+
'use_mkldnn': self.use_onednn,
808808
'data_format': self.data_format,
809809
}
810810
self.outputs = {'Output': output}

test/legacy_test/test_elementwise_add_op.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929

3030
class TestElementwiseAddOp(OpTest):
3131
def init_kernel_type(self):
32-
self.use_mkldnn = False
32+
self.use_onednn = False
3333

3434
def setUp(self):
3535
self.op_type = "elementwise_add"
@@ -47,11 +47,11 @@ def setUp(self):
4747
'X': OpTest.np_dtype_to_base_dtype(self.x),
4848
'Y': OpTest.np_dtype_to_base_dtype(self.y),
4949
}
50-
self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
50+
self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_onednn}
5151
self.outputs = {'Out': self.out}
5252

5353
def check_dygraph(self):
54-
return not self.use_mkldnn and self.axis == -1
54+
return not self.use_onednn and self.axis == -1
5555

5656
def test_check_output(self):
5757
# TODO(wangzhongpu): support onednn op in dygraph mode
@@ -1023,7 +1023,7 @@ def test_float32_float16_add(self):
10231023

10241024
class TestElementwiseAddOpAutoParallel(OpTest):
10251025
def init_kernel_type(self):
1026-
self.use_mkldnn = False
1026+
self.use_onednn = False
10271027

10281028
def setUp(self):
10291029
self.op_type = "elementwise_add"
@@ -1042,11 +1042,11 @@ def setUp(self):
10421042
'Y': OpTest.np_dtype_to_base_dtype(self.y),
10431043
}
10441044

1045-
self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
1045+
self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_onednn}
10461046
self.outputs = {'Out': self.out}
10471047

10481048
def check_dygraph(self):
1049-
return not self.use_mkldnn and self.axis == -1
1049+
return not self.use_onednn and self.axis == -1
10501050

10511051
def test_check_grad(self):
10521052
self.check_grad(

0 commit comments

Comments
 (0)