Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/fluid/framework/infershape_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -556,7 +556,7 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx,

// 2. build infermeta context
CompatInferMetaContext infer_meta_context(
{ctx->IsRuntime(), ctx->IsRunMKLDNNKernel()});
{ctx->IsRuntime(), ctx->IsRunONEDNNKernel()});

const auto& input_names = signature.input_names;
const auto& attr_names = signature.attr_names;
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/op_desc.cc
Original file line number Diff line number Diff line change
Expand Up @@ -301,7 +301,7 @@ class CompileTimeInferShapeContext : public InferShapeContext {

bool IsRuntime() const override;

bool IsRunMKLDNNKernel() const override;
bool IsRunONEDNNKernel() const override;

proto::VarType::Type GetInputVarType(const std::string &name) const override {
return GetVarType(Inputs(name).at(0));
Expand Down Expand Up @@ -1349,7 +1349,7 @@ void CompileTimeInferShapeContext::SetRepeatedDims(

bool CompileTimeInferShapeContext::IsRuntime() const { return false; }

bool CompileTimeInferShapeContext::IsRunMKLDNNKernel() const { return false; }
bool CompileTimeInferShapeContext::IsRunONEDNNKernel() const { return false; }

proto::VarType::Type CompileTimeInferShapeContext::GetVarType(
const std::string &name) const {
Expand Down
22 changes: 11 additions & 11 deletions paddle/fluid/framework/operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -526,7 +526,7 @@ void RuntimeInferShapeContext::SetLoDLevel(const std::string& out,

bool RuntimeInferShapeContext::IsRuntime() const { return true; }

bool RuntimeInferShapeContext::IsRunMKLDNNKernel() const {
bool RuntimeInferShapeContext::IsRunONEDNNKernel() const {
try {
auto& op_with_kernel = dynamic_cast<const OperatorWithKernel&>(op_);
return ((op_with_kernel.kernel_type()) &&
Expand Down Expand Up @@ -1441,7 +1441,7 @@ bool OperatorWithKernel::SupportCustomDevice() const {
#endif
}

bool OperatorWithKernel::SupportsMKLDNN(const phi::DataType data_type) const {
bool OperatorWithKernel::SupportsONEDNN(const phi::DataType data_type) const {
auto phi_kernels = phi::KernelFactory::Instance().SelectKernelMap(
phi::TransToPhiKernelName(type_));
auto has_phi_kernel =
Expand Down Expand Up @@ -1578,7 +1578,7 @@ bool OperatorWithKernel::SupportsKernelType(
// 3. Whether onednn kernel can be used.
#ifdef PADDLE_WITH_DNNL
if (!this->DnnFallback() && !paddle::platform::in_onednn_white_list(type_) &&
this->CanMKLDNNBeUsed(exe_ctx, kernel_type.data_type_)) {
this->CanONEDNNBeUsed(exe_ctx, kernel_type.data_type_)) {
auto tmp_kernel_type = kernel_type;
tmp_kernel_type.library_type_ = framework::LibraryType::kMKLDNN;
tmp_kernel_type.data_layout_ = framework::DataLayout::ONEDNN;
Expand All @@ -1597,15 +1597,15 @@ bool OperatorWithKernel::SupportsKernelType(
return kernel_iter != kernels.end();
}

bool OperatorWithKernel::CanMKLDNNBeUsed(const framework::ExecutionContext& ctx,
bool OperatorWithKernel::CanONEDNNBeUsed(const framework::ExecutionContext& ctx,
phi::DataType data_type) const {
return ctx.HasAttr("use_mkldnn") && ctx.Attr<bool>("use_mkldnn") &&
phi::is_cpu_place(ctx.GetPlace()) && this->SupportsMKLDNN(data_type);
phi::is_cpu_place(ctx.GetPlace()) && this->SupportsONEDNN(data_type);
}

bool OperatorWithKernel::CanMKLDNNBeUsed(const framework::ExecutionContext& ctx,
bool OperatorWithKernel::CanONEDNNBeUsed(const framework::ExecutionContext& ctx,
proto::VarType::Type data_type) const {
return this->CanMKLDNNBeUsed(ctx, phi::TransToPhiDataType(data_type));
return this->CanONEDNNBeUsed(ctx, phi::TransToPhiDataType(data_type));
}

bool OperatorWithKernel::CanCUDNNBeUsed(const framework::ExecutionContext& ctx,
Expand Down Expand Up @@ -1853,14 +1853,14 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
#ifdef PADDLE_WITH_DNNL
if (!this->DnnFallback() &&
!paddle::platform::in_onednn_white_list(type_) &&
this->CanMKLDNNBeUsed(exe_ctx, kernel_type_->data_type_)) {
this->CanONEDNNBeUsed(exe_ctx, kernel_type_->data_type_)) {
kernel_type_->library_type_ = framework::LibraryType::kMKLDNN;
kernel_type_->data_layout_ = framework::DataLayout::ONEDNN;
} else if (phi::is_cpu_place(kernel_type_->place_) &&
kernel_type_->data_type_ ==
proto::VarType::Type::VarType_Type_BF16 &&
!this->SupportsCPUBF16() &&
this->SupportsMKLDNN(phi::DataType::BFLOAT16)) {
this->SupportsONEDNN(phi::DataType::BFLOAT16)) {
kernel_type_->library_type_ = framework::LibraryType::kMKLDNN;
kernel_type_->data_layout_ = framework::DataLayout::ONEDNN;
}
Expand Down Expand Up @@ -2172,14 +2172,14 @@ OpKernelType OperatorWithKernel::InnerGetExpectedKernelType(
// 3. Whether onednn kernel can be used.
#ifdef PADDLE_WITH_DNNL
if (!this->DnnFallback() && !paddle::platform::in_onednn_white_list(type_) &&
this->CanMKLDNNBeUsed(ctx, expected_kernel_key.data_type_)) {
this->CanONEDNNBeUsed(ctx, expected_kernel_key.data_type_)) {
expected_kernel_key.library_type_ = framework::LibraryType::kMKLDNN;
expected_kernel_key.data_layout_ = framework::DataLayout::ONEDNN;
} else if (phi::is_cpu_place(expected_kernel_key.place_) &&
expected_kernel_key.data_type_ ==
proto::VarType::Type::VarType_Type_BF16 &&
!this->SupportsCPUBF16() &&
this->SupportsMKLDNN(phi::DataType::BFLOAT16)) {
this->SupportsONEDNN(phi::DataType::BFLOAT16)) {
expected_kernel_key.library_type_ = framework::LibraryType::kMKLDNN;
expected_kernel_key.data_layout_ = framework::DataLayout::ONEDNN;
}
Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/framework/operator.h
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ class RuntimeInferShapeContext : public InferShapeContext {

bool IsRuntime() const override;

bool IsRunMKLDNNKernel() const override;
bool IsRunONEDNNKernel() const override;

// TODO(paddle-dev): Can this be template?
paddle::small_vector<InferShapeVarPtr, phi::kInputSmallVectorSize>
Expand Down Expand Up @@ -768,7 +768,7 @@ class OperatorWithKernel : public OperatorBase {

bool SupportCustomDevice() const override;

bool SupportsMKLDNN(phi::DataType data_type) const;
bool SupportsONEDNN(phi::DataType data_type) const;

bool SupportsCUDNN(phi::DataType data_type) const;

Expand All @@ -777,10 +777,10 @@ class OperatorWithKernel : public OperatorBase {

bool SupportsCPUBF16() const;

bool CanMKLDNNBeUsed(const framework::ExecutionContext& ctx,
bool CanONEDNNBeUsed(const framework::ExecutionContext& ctx,
phi::DataType data_type) const;

bool CanMKLDNNBeUsed(const framework::ExecutionContext& ctx,
bool CanONEDNNBeUsed(const framework::ExecutionContext& ctx,
proto::VarType::Type data_type) const;

bool CanCUDNNBeUsed(const framework::ExecutionContext& ctx,
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/shape_inference.h
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ class InferShapeContext {

virtual bool IsRuntime() const = 0;

virtual bool IsRunMKLDNNKernel() const = 0;
virtual bool IsRunONEDNNKernel() const = 0;

virtual paddle::small_vector<InferShapeVarPtr, phi::kInputSmallVectorSize>
GetInputVarPtrs(const std::string &name) const = 0;
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/imperative/infer_shape_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,7 @@ class DygraphInferShapeContext : public framework::InferShapeContext {

bool IsRuntime() const override { return true; }

bool IsRunMKLDNNKernel() const override {
bool IsRunONEDNNKernel() const override {
return (op_kernel_key_ &&
(op_kernel_key_->layout() == phi::DataLayout::ONEDNN));
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/imperative/prepared_operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ PreparedOp PrepareImpl(
// 3. Whether onednn kernel can be used.
#ifdef PADDLE_WITH_DNNL
if (!op.DnnFallback() && !paddle::platform::in_onednn_white_list(op.Type()) &&
op.CanMKLDNNBeUsed(dygraph_exe_ctx, expected_kernel_key.dtype())) {
op.CanONEDNNBeUsed(dygraph_exe_ctx, expected_kernel_key.dtype())) {
expected_kernel_key.set_backend(phi::Backend::ONEDNN);
expected_kernel_key.set_layout(phi::DataLayout::ONEDNN);
}
Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/operators/batch_norm_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -107,10 +107,10 @@ void BatchNormOp::InferShape(framework::InferShapeContext *ctx) const {
"= [%s], the dimension of input X = [%d]",
x_dims,
x_dims.size()));
VLOG(4) << ctx->IsRunMKLDNNKernel();
VLOG(4) << ctx->IsRunONEDNNKernel();
VLOG(4) << data_layout;
const int64_t C =
((ctx->IsRunMKLDNNKernel() == true) || (data_layout == DataLayout::kNCHW)
((ctx->IsRunONEDNNKernel() == true) || (data_layout == DataLayout::kNCHW)
? x_dims[1]
: x_dims[x_dims.size() - 1]);

Expand Down Expand Up @@ -370,7 +370,7 @@ void BatchNormGradOp::InferShape(framework::InferShapeContext *ctx) const {
common::StringToDataLayout(ctx->Attrs().Get<std::string>("data_layout"));

const int C = static_cast<int>(
((ctx->IsRunMKLDNNKernel() == true) || (data_layout == DataLayout::kNCHW)
((ctx->IsRunONEDNNKernel() == true) || (data_layout == DataLayout::kNCHW)
? x_dims[1]
: x_dims[x_dims.size() - 1]));

Expand Down Expand Up @@ -511,7 +511,7 @@ void BatchNormDoubleGradOp::InferShape(
const DataLayout data_layout =
common::StringToDataLayout(ctx->Attrs().Get<std::string>("data_layout"));
const int C = static_cast<int>(
((ctx->IsRunMKLDNNKernel() == true) || (data_layout == DataLayout::kNCHW)
((ctx->IsRunONEDNNKernel() == true) || (data_layout == DataLayout::kNCHW)
? x_dims[1]
: x_dims[x_dims.size() - 1]));

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/elementwise/elementwise_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ class ElementwiseOp : public framework::OperatorWithKernel {
// Broadcasting of dims has to be done on Paddle shapes (NHWC)
// if model is using NHWC and any of shapes in at least 3D
bool should_rotate =
ctx->IsRunMKLDNNKernel() &&
ctx->IsRunONEDNNKernel() &&
(phi::OneDNNContext::tls().get_cur_paddle_data_layout() ==
phi::DataLayout::kNHWC) &&
(x_dims.size() >= 3 || y_dims.size() >= 3);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -324,7 +324,7 @@ phi::KernelKey GetPad3dExpectedKernelType(
auto input_data_type = op_ptr->IndicateVarDataType(ctx, "X");
#ifdef PADDLE_WITH_DNNL
// only constant mode and non-blocked layouts are supported for oneDNN
if (op_ptr->CanMKLDNNBeUsed(ctx, input_data_type) &&
if (op_ptr->CanONEDNNBeUsed(ctx, input_data_type) &&
ctx.Attr<std::string>("mode") == "constant" &&
ctx.Input<phi::DenseTensor>("X")->mem_desc().get_inner_nblks() == 0) {
return phi::KernelKey(phi::Backend::ONEDNN,
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/matmul_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ class MatMulOp : public framework::OperatorWithKernel {
// For NHWC execution output shape needs to be
// computed like instead x*y we are to do y*x
bool channelwise_onednn =
context->IsRunMKLDNNKernel() &&
context->IsRunONEDNNKernel() &&
(phi::OneDNNContext::tls().get_cur_paddle_data_layout() ==
phi::DataLayout::kNHWC);
if (channelwise_onednn) {
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/operators/slice_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ class SliceOp : public framework::OperatorWithKernel {
auto vec_dims = common::vectorize(in_tensor.dims());
bool all_zero_dims = std::all_of(
vec_dims.cbegin(), vec_dims.cend(), [](int64_t i) { return i == 0; });
if (!all_zero_dims && this->CanMKLDNNBeUsed(ctx, input_data_type)) {
if (!all_zero_dims && this->CanONEDNNBeUsed(ctx, input_data_type)) {
// OneDNN uses blocking format, which cannot be always supported with
// reorders, because if blocked dimension is not divisible by 8 or
// 16(depending on which blocking format is used) submemory cannot be
Expand Down Expand Up @@ -333,7 +333,7 @@ class SliceOpGrad : public framework::OperatorWithKernel {
ctx, framework::GradVarName("Out"));

#ifdef PADDLE_WITH_DNNL
if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
if (this->CanONEDNNBeUsed(ctx, input_data_type)) {
// OneDNN uses blocking format, which cannot be always supported with
// reorders, because if blocked dimension is not divisible by 8 or
// 16(depending on which blocking format is used) submemory cannot be
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/split_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ class SplitOp : public framework::OperatorWithKernel {
framework::OperatorWithKernel::IndicateVarDataType(ctx, "X");

#ifdef PADDLE_WITH_DNNL
if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
if (this->CanONEDNNBeUsed(ctx, input_data_type)) {
// OneDNN uses blocking format, which cannot be always supported with
// reorders, because if blocked dimension is not divisible by 8 or
// 16(depending on which blocking format is used) submemory cannot be
Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/pir/transforms/pd_op_to_kernel_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1057,7 +1057,7 @@ std::string GetKernelName(const OpYamlInfoParser* op_info_parser,
}

#ifdef PADDLE_WITH_DNNL
bool SupportsMKLDNN(const std::string& kernel_name,
bool SupportsONEDNN(const std::string& kernel_name,
const phi::DataType data_type) {
auto phi_kernels =
phi::KernelFactory::Instance().SelectKernelMap(kernel_name);
Expand Down Expand Up @@ -1441,7 +1441,7 @@ phi::KernelKey GetKernelKey(
elems.erase("");

if (op->HasTrait<OneDNNTrait>() && res.backend() == phi::Backend::CPU &&
SupportsMKLDNN(kernel_fn_str, res.dtype()) &&
SupportsONEDNN(kernel_fn_str, res.dtype()) &&
elems.count(op->name().substr(
strlen(OneDNNOperatorDialect::name()) + 1,
op->name().size() - strlen(OneDNNOperatorDialect::name()) - 1)) ==
Expand Down Expand Up @@ -3613,7 +3613,7 @@ void ProcessBlock(
if (kernel_key.dtype() == phi::DataType::BFLOAT16 &&
kernel_key.backend() == phi::Backend::CPU &&
!op_item->HasTrait<OneDNNTrait>() && !SupportsCPUBF16(kernel_name) &&
SupportsMKLDNN(kernel_name, phi::DataType::BFLOAT16)) {
SupportsONEDNN(kernel_name, phi::DataType::BFLOAT16)) {
auto op_item_inner = PdOp2OneDNNOp(op_item, block, ctx);
if (op_item_inner != op_item) {
op_item = op_item_inner;
Expand All @@ -3623,7 +3623,7 @@ void ProcessBlock(
}
} else if (FLAGS_use_mkldnn && kernel_key.backend() == phi::Backend::CPU &&
!op_item->HasTrait<OneDNNTrait>() &&
SupportsMKLDNN(kernel_name, kernel_key.dtype())) {
SupportsONEDNN(kernel_name, kernel_key.dtype())) {
// Support FLAGS_use_mkldnn
auto op_item_inner = PdOp2OneDNNOp(op_item, block, ctx);
if (op_item_inner != op_item) {
Expand Down