Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions paddle/common/flags.cc
Original file line number Diff line number Diff line change
Expand Up @@ -696,6 +696,16 @@ PHI_DEFINE_EXPORTED_bool(
*/
PHI_DEFINE_EXPORTED_bool(use_mkldnn, false, "Use MKLDNN to run");

/**
* ONEDNN related FLAG
* Name: use_onednn
* Since Version:
* Value Range: bool, default=false
* Example:
* Note:
*/
PHI_DEFINE_EXPORTED_bool(use_onednn, false, "Use ONEDNN to run");

/**
* Debug related FLAG
* Name: FLAGS_call_stack_level
Expand Down
7 changes: 5 additions & 2 deletions paddle/fluid/eager/to_static/run_program_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@
COMMON_DECLARE_bool(enable_pir_with_pt_in_dy2st);
COMMON_DECLARE_bool(enable_pir_in_executor);
COMMON_DECLARE_bool(use_mkldnn);
COMMON_DECLARE_bool(use_onednn);
COMMON_DECLARE_bool(specialize_device_in_dy2st);
COMMON_DECLARE_bool(parameters_persistent_mode_in_dy2st);

Expand Down Expand Up @@ -673,7 +674,8 @@ std::vector<paddle::Tensor> RunProgramImpl(
}

#ifdef PADDLE_WITH_DNNL
if (FLAGS_use_mkldnn) paddle::platform::DontClearONEDNNCache(place);
if (FLAGS_use_mkldnn || FLAGS_use_onednn)
paddle::platform::DontClearONEDNNCache(place);
#endif
return out;
}
Expand Down Expand Up @@ -1014,7 +1016,8 @@ void LegacyRunProgramImpl(
}

#ifdef PADDLE_WITH_DNNL
if (FLAGS_use_mkldnn) paddle::platform::DontClearONEDNNCache(place);
if (FLAGS_use_mkldnn || FLAGS_use_onednn)
paddle::platform::DontClearONEDNNCache(place);
#endif
}

Expand Down
24 changes: 13 additions & 11 deletions paddle/fluid/framework/details/build_strategy.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ limitations under the License. */

PD_DECLARE_bool(convert_all_blocks);
COMMON_DECLARE_bool(use_mkldnn);
COMMON_DECLARE_bool(use_onednn);
#ifdef PADDLE_WITH_CINN
PD_DECLARE_bool(use_cinn);
#endif
Expand Down Expand Up @@ -203,22 +204,23 @@ class ParallelExecutorPassBuilder : public ir::PassBuilder {

void AppendPassToSetMkldnnAttr(const std::string &pass_name) {
#ifdef PADDLE_WITH_DNNL
if (FLAGS_use_mkldnn) {
if (FLAGS_use_mkldnn || FLAGS_use_onednn) {
AppendPass(pass_name);
} else if (!strategy_.onednn_enabled_op_types_.empty()) {
VLOG(1) << "mkldnn_enabled_op_types specify the operator type list to "
"use MKLDNN acceleration. It is null in default, means "
"that all the operators supported by MKLDNN will be "
VLOG(1) << "onednn_enabled_op_types specify the operator type list to "
"use ONEDNN acceleration. It is null in default, means "
"that all the operators supported by ONEDNN will be "
"accelerated. And it should not be set when "
"FLAGS_use_mkldnn=false.";
"FLAGS_use_onednn=false.";
}
#else
PADDLE_ENFORCE_NE(FLAGS_use_mkldnn,
true,
common::errors::PreconditionNotMet(
"FLAGS_use_mkldnn has been set to True, but "
"PaddlePaddle is compiled without MKLDNN. "
"Please compile PaddlePaddle with MKLDNN first."));
PADDLE_ENFORCE_NE(
FLAGS_use_mkldnn || FLAGS_use_onednn,
true,
common::errors::PreconditionNotMet(
"FLAGS_use_mkldnn or FLAGS_use_onednn has been set to True, but "
"PaddlePaddle is compiled without ONEDNN. "
"Please compile PaddlePaddle with ONEDNN first."));
#endif
}

Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/details/build_strategy.h
Original file line number Diff line number Diff line change
Expand Up @@ -123,11 +123,11 @@ struct BuildStrategy {
bool fuse_dot_product_attention_{false};
// Fuse ResUnit
bool fuse_resunit_{false};
// mkldnn_enabled_op_types specify the operator type list to
// onednn_enabled_op_types specify the operator type list to
// use OneDNN acceleration. It is null in default, means
// that all the operators supported by OneDNN will be
// accelerated. And it should not be set when
// FLAGS_use_mkldnn=false
// FLAGS_use_onednn=false
std::unordered_set<std::string> onednn_enabled_op_types_;

// By default, memory_optimize would be opened if gc is disabled, and
Expand Down
5 changes: 3 additions & 2 deletions paddle/fluid/framework/executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ limitations under the License. */

COMMON_DECLARE_bool(benchmark);
COMMON_DECLARE_bool(use_mkldnn);
COMMON_DECLARE_bool(use_onednn);

namespace paddle::framework {
namespace {
Expand Down Expand Up @@ -184,7 +185,7 @@ void Executor::Run(const ProgramDesc& pdesc,
phi::RecordEvent record_run(
"Executor::Run", phi::TracerEventType::UserDefined, 1);
platform::RecordBlock b(block_id);
if (FLAGS_use_mkldnn) EnableONEDNN(pdesc);
if (FLAGS_use_mkldnn || FLAGS_use_onednn) EnableONEDNN(pdesc);
auto ctx = Prepare(pdesc, block_id, skip_ref_cnt_vars, force_disable_gc);
#ifdef PADDLE_WITH_DNNL
platform::AttachPointerHashToONEDNNKey(this, place_);
Expand Down Expand Up @@ -330,7 +331,7 @@ void Executor::Run(const ProgramDesc& program,
phi::RecordEvent record_run(
"Executor::Run", phi::TracerEventType::UserDefined, 1);
platform::RecordBlock b(kProgramId);
if (FLAGS_use_mkldnn) EnableONEDNN(program);
if (FLAGS_use_mkldnn || FLAGS_use_onednn) EnableONEDNN(program);
#ifdef PADDLE_WITH_DNNL
platform::AttachPointerHashToONEDNNKey(this, place_);
#endif
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@
#endif

COMMON_DECLARE_bool(use_mkldnn);
COMMON_DECLARE_bool(use_onednn);
COMMON_DECLARE_bool(check_nan_inf);
COMMON_DECLARE_string(static_runtime_data_save_path);
COMMON_DECLARE_bool(save_static_runtime_data);
Expand Down Expand Up @@ -344,7 +345,7 @@ void CreateAllOps(const framework::BlockDesc& block,
op_base->SetRuntimeAttributeMap(op_runtime_attr_map);

#ifdef PADDLE_WITH_DNNL
if (FLAGS_use_mkldnn) {
if (FLAGS_use_mkldnn || FLAGS_use_onednn) {
if (op->HasAttr("use_mkldnn")) {
VLOG(4) << "Set use_mkldnn=True for " << op_base->Type();
op_base->SetAttr("use_mkldnn", true);
Expand Down
5 changes: 3 additions & 2 deletions paddle/fluid/framework/operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2329,10 +2329,11 @@ void OperatorWithKernel::ChooseKernel(const ExecutionContext& ctx) const {
auto kernel_iter = kernels.find(expected_kernel_key);

#ifdef PADDLE_WITH_DNNL
// workaround for missing MKLDNN kernel when FLAGS_use_mkldnn env var is set
// workaround for missing ONEDNN kernel when FLAGS_use_mkldnn or
// FLAGS_use_onednn env var is set
if (kernel_iter == kernels.end() &&
expected_kernel_key.library_type_ == LibraryType::kMKLDNN) {
VLOG(3) << "missing MKLDNN kernel: fallbacking to PLAIN one";
VLOG(3) << "missing ONEDNN kernel: fallbacking to PLAIN one";
expected_kernel_key.library_type_ = LibraryType::kPlain;
expected_kernel_key.data_layout_ = DataLayout::kAnyLayout;
kernel_iter = kernels.find(expected_kernel_key);
Expand Down
7 changes: 5 additions & 2 deletions paddle/fluid/imperative/layer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
#endif

COMMON_DECLARE_bool(use_mkldnn);
COMMON_DECLARE_bool(use_onednn);
namespace paddle::imperative {

using framework::Variable;
Expand Down Expand Up @@ -228,7 +229,8 @@ void VarBase::ClearGradient(bool set_to_zero) {
auto* grad_t = grad_var_->MutableVar()->GetMutable<phi::SelectedRows>();
if (grad_t->mutable_value()->IsInitialized()) {
#ifdef PADDLE_WITH_DNNL
if (FLAGS_use_mkldnn) platform::ClearONEDNNCache(grad_t->place());
if (FLAGS_use_mkldnn || FLAGS_use_onednn)
platform::ClearONEDNNCache(grad_t->place());
#endif
grad_t->mutable_rows()->clear();
grad_t->mutable_value()->clear();
Expand All @@ -246,7 +248,8 @@ void VarBase::ClearGradient(bool set_to_zero) {
grad_t->clear();
}
#ifdef PADDLE_WITH_DNNL
if (FLAGS_use_mkldnn) platform::ClearONEDNNCache(grad_t->place());
if (FLAGS_use_mkldnn || FLAGS_use_onednn)
platform::ClearONEDNNCache(grad_t->place());
#endif
}
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/imperative/prepared_operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ PreparedOp PrepareImpl(
// OneDNN variant of code reads attributes in some of GetKernelTypeForVar and
// GetKernelType functions, so we need to copy the attributes there.
// Const qualifier of Attrs had to be discarded to overwrite it.
if (FLAGS_use_mkldnn) {
if (FLAGS_use_mkldnn || FLAGS_use_onednn) {
auto& mutable_op_attrs = const_cast<framework::AttributeMap&>(op.Attrs());
mutable_op_attrs = default_attrs;
for (auto& attr : attrs) {
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/imperative/prepared_operator.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
#include "paddle/phi/core/vocab/string_array.h"

COMMON_DECLARE_bool(use_mkldnn);
COMMON_DECLARE_bool(use_onednn);

namespace paddle {
namespace imperative {
Expand Down
5 changes: 3 additions & 2 deletions paddle/fluid/imperative/tracer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
#include "paddle/utils/string/string_helper.h"

COMMON_DECLARE_bool(use_mkldnn);
COMMON_DECLARE_bool(use_onednn);
COMMON_DECLARE_string(tracer_onednn_ops_on);
COMMON_DECLARE_string(tracer_onednn_ops_off);
COMMON_DECLARE_bool(use_stride_kernel);
Expand Down Expand Up @@ -239,9 +240,9 @@ void Tracer::TraceOpImpl(const std::string& type,
type, phi::TracerEventType::Operator, 1);
platform::ScopedFlushDenormal flush;
VLOG(4) << "Trace Op: " << type;
if (FLAGS_use_mkldnn) {
if (FLAGS_use_mkldnn || FLAGS_use_onednn) {
// if both lists are empty all ops are enabled (default for
// FLAGS_use_mkldnn=1)
// FLAGS_use_onednn=1)
// if ops_on list is not empty only ops from that list are enabled
if (!FLAGS_tracer_onednn_ops_on.empty()) {
auto is_on = FLAGS_tracer_onednn_ops_on.find(type) != std::string::npos;
Expand Down
3 changes: 2 additions & 1 deletion paddle/fluid/operators/activation_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ limitations under the License. */
#include "paddle/phi/infermeta/backward.h"

COMMON_DECLARE_bool(use_mkldnn);
COMMON_DECLARE_bool(use_onednn);

namespace paddle::operators {

Expand All @@ -53,7 +54,7 @@ class ActivationGradOpMaker : public framework::SingleGradOpMaker<T> {

if ((static_cast<int>(kDepValue) &
static_cast<int>(ActBwdOpFwdDeps::kDepX)) ||
FLAGS_use_mkldnn ||
(FLAGS_use_mkldnn || FLAGS_use_onednn) ||
(op->HasAttr("use_mkldnn") &&
PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn")))) {
op->SetInput("X", this->Input("X")); // x
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ limitations under the License. */
#include "paddle/common/flags.h"

COMMON_DECLARE_bool(use_mkldnn);
COMMON_DECLARE_bool(use_onednn);
namespace paddle {
namespace framework {
class OpDesc;
Expand Down Expand Up @@ -87,12 +88,12 @@ class ConditionalBlockInferOp : public ConditionalOp {
auto &pdesc = *block->Program();
exec_.reset(new framework::Executor(dev_place));
#ifdef PADDLE_WITH_DNNL
if (FLAGS_use_mkldnn) exec_->EnableONEDNN(pdesc);
if (FLAGS_use_mkldnn || FLAGS_use_onednn) exec_->EnableONEDNN(pdesc);
#endif
ctx_ = exec_->Prepare(
pdesc, block->ID(), std::vector<std::string>(), false);
#ifdef PADDLE_WITH_DNNL
if (FLAGS_use_mkldnn) {
if (FLAGS_use_mkldnn || FLAGS_use_onednn) {
platform::AttachPointerHashToONEDNNKey(exec_.get(), dev_place);
platform::RegisterModelLayout(ctx_->ops_, dev_place);
}
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/operators/controlflow/conditional_block_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ limitations under the License. */
#endif

COMMON_DECLARE_bool(use_mkldnn);
COMMON_DECLARE_bool(use_onednn);

namespace paddle::operators {

Expand Down
6 changes: 4 additions & 2 deletions paddle/fluid/pir/transforms/pd_op_to_kernel_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@
#include "paddle/fluid/pir/dialect/operator/trait/onednn.h"
#include "paddle/phi/core/framework/framework.pb.h"
COMMON_DECLARE_bool(use_mkldnn);
COMMON_DECLARE_bool(use_onednn);
#endif

COMMON_DECLARE_bool(print_ir);
Expand Down Expand Up @@ -3621,10 +3622,11 @@ void ProcessBlock(
kernel_key.set_backend(phi::Backend::ONEDNN);
kernel_key.set_layout(phi::DataLayout::ONEDNN);
}
} else if (FLAGS_use_mkldnn && kernel_key.backend() == phi::Backend::CPU &&
} else if ((FLAGS_use_mkldnn || FLAGS_use_onednn) &&
kernel_key.backend() == phi::Backend::CPU &&
!op_item->HasTrait<OneDNNTrait>() &&
SupportsONEDNN(kernel_name, kernel_key.dtype())) {
// Support FLAGS_use_mkldnn
// Support FLAGS_use_mkldnn || FLAGS_use_onednn
auto op_item_inner = PdOp2OneDNNOp(op_item, block, ctx);
if (op_item_inner != op_item) {
op_item = op_item_inner;
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/pybind/compiled_program.cc
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,7 @@
#include "pybind11/stl.h"

COMMON_DECLARE_bool(use_mkldnn);
COMMON_DECLARE_bool(use_onednn);

// disable auto conversion to list in Python
PYBIND11_MAKE_OPAQUE(phi::TensorArray);
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/pybind/place.cc
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,7 @@ limitations under the License. */
#include "pybind11/stl.h"

COMMON_DECLARE_bool(use_mkldnn);
COMMON_DECLARE_bool(use_onednn);

// disable auto conversion to list in Python
PYBIND11_MAKE_OPAQUE(phi::TensorArray);
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/pybind/pybind.cc
Original file line number Diff line number Diff line change
Expand Up @@ -246,6 +246,7 @@ limitations under the License. */
#include "paddle/fluid/eager/accumulation/accumulation_node.h"

COMMON_DECLARE_bool(use_mkldnn);
COMMON_DECLARE_bool(use_onednn);
COMMON_DECLARE_string(prim_backward_blacklist);

// disable auto conversion to list in Python
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/pybind/tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,7 @@ limitations under the License. */
#include "pybind11/stl.h"

COMMON_DECLARE_bool(use_mkldnn);
COMMON_DECLARE_bool(use_onednn);
COMMON_DECLARE_bool(use_shm_cache);

// disable auto conversion to list in Python
Expand Down
12 changes: 6 additions & 6 deletions test/legacy_test/test_expand_v2_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -780,19 +780,19 @@ def init_place(self):
self.place = core.CPUPlace()

def test_check_output(self):
flags_use_mkldnn = core.globals()["FLAGS_use_mkldnn"]
paddle.set_flags({'FLAGS_use_mkldnn': True})
flags_use_onednn = core.globals()["FLAGS_use_onednn"]
paddle.set_flags({'FLAGS_use_onednn': True})
self.check_output_with_place(
self.place,
check_dygraph=False,
check_pir=False,
check_pir_onednn=True,
)
paddle.set_flags({'FLAGS_use_mkldnn': flags_use_mkldnn})
paddle.set_flags({'FLAGS_use_onednn': flags_use_onednn})

def test_check_grad(self):
flags_use_mkldnn = core.globals()["FLAGS_use_mkldnn"]
paddle.set_flags({'FLAGS_use_mkldnn': True})
flags_use_onednn = core.globals()["FLAGS_use_onednn"]
paddle.set_flags({'FLAGS_use_onednn': True})
self.check_grad_with_place(
self.place,
["X"],
Expand All @@ -801,7 +801,7 @@ def test_check_grad(self):
check_pir=False,
check_pir_onednn=True,
)
paddle.set_flags({'FLAGS_use_mkldnn': flags_use_mkldnn})
paddle.set_flags({'FLAGS_use_onednn': flags_use_onednn})


class TestExpandV2ZeroSizeOneDNNOp1(TestExpandV2ZeroSizeOneDNNOp):
Expand Down