Skip to content

Commit d3624ac

Browse files
committed
Merge remote-tracking branch 'origin/develop' into to_tensor
2 parents a6455dc + 31d9f04 commit d3624ac

File tree

121 files changed

+1816
-663
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

121 files changed

+1816
-663
lines changed

ci/check_approval.sh

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -309,6 +309,12 @@ if [ "${ALL_PADDLE_ENFORCE}" != "" ] && [ "${PR_ID}" != "" ]; then
309309
check_approval 1 luotao1 zhangbo9674 phlrain
310310
fi
311311

312+
CHINESE_CHECK=$(git diff -U0 upstream/$BRANCH |grep "^+" |grep -P '[\p{Han}]')
313+
if [ "${CHINESE_CHECK}" != "" ] && [ "${PR_ID}" != "" ]; then
314+
echo_line="Not recommended to use Chinese. You must have one RD (tianshuo78520a or swgu98 or zhangbo9674 or risemeup1) approval."
315+
check_approval 1 tianshuo78520a swgu98 zhangbo9674 risemeup1
316+
fi
317+
312318
ALL_ADDED_LINES=$(git diff -U0 upstream/$BRANCH |grep "^+" || true)
313319
ALL_PADDLE_CHECK=$(echo $ALL_ADDED_LINES |grep -zoE "(PADDLE_ENFORCE[A-Z_]{0,9}|PADDLE_THROW)\(.[^,\);]*.[^;]*\);\s" || true)
314320
VALID_PADDLE_CHECK=$(echo "$ALL_PADDLE_CHECK" | grep -zoE '(PADDLE_ENFORCE[A-Z_]{0,9}|PADDLE_THROW)\(([^,;]+,)*[^";]*errors::.[^"]*".[^";]{20,}.[^;]*\);\s' || true)

cmake/cinn.cmake

Lines changed: 19 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -26,12 +26,21 @@ else()
2626
endif()
2727

2828
if(NOT DEFINED ENV{runtime_include_dir})
29-
message(
30-
STATUS
31-
"set runtime_include_dir: ${CMAKE_SOURCE_DIR}/paddle/cinn/runtime/cuda")
32-
set(ENV{runtime_include_dir} "${CMAKE_SOURCE_DIR}/paddle/cinn/runtime/cuda")
33-
add_definitions(
34-
-DRUNTIME_INCLUDE_DIR="${CMAKE_SOURCE_DIR}/paddle/cinn/runtime/cuda")
29+
if(WITH_GPU)
30+
message(
31+
STATUS
32+
"set runtime_include_dir: ${CMAKE_SOURCE_DIR}/paddle/cinn/runtime/cuda")
33+
set(ENV{runtime_include_dir} "${CMAKE_SOURCE_DIR}/paddle/cinn/runtime/cuda")
34+
add_definitions(
35+
-DRUNTIME_INCLUDE_DIR="${CMAKE_SOURCE_DIR}/paddle/cinn/runtime/cuda")
36+
elseif(WITH_ROCM)
37+
message(
38+
STATUS
39+
"set runtime_include_dir: ${CMAKE_SOURCE_DIR}/paddle/cinn/runtime/hip")
40+
set(ENV{runtime_include_dir} "${CMAKE_SOURCE_DIR}/paddle/cinn/runtime/hip")
41+
add_definitions(
42+
-DRUNTIME_INCLUDE_DIR="${CMAKE_SOURCE_DIR}/paddle/cinn/runtime/hip")
43+
endif()
3544
endif()
3645

3746
if(WITH_TESTING)
@@ -118,6 +127,10 @@ if(WITH_ROCM)
118127
add_definitions(-DCINN_WITH_HIP)
119128
endif()
120129
link_libraries(${ROCM_HIPRTC_LIB})
130+
131+
message(
132+
STATUS "copy paddle/cinn/common/float16.h to $ENV{runtime_include_dir}")
133+
file(COPY paddle/cinn/common/float16.h DESTINATION $ENV{runtime_include_dir})
121134
endif()
122135

123136
set(cinnapi_src CACHE INTERNAL "" FORCE)

paddle/cinn/hlir/dialect/operator/transforms/pd_to_cinn_pass.cc

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -742,9 +742,7 @@ class SplitOpPattern : public pir::OpRewritePattern<paddle::dialect::SplitOp> {
742742
using pir::OpRewritePattern<paddle::dialect::SplitOp>::OpRewritePattern;
743743

744744
bool Match(paddle::dialect::SplitOp op) const override {
745-
const bool is_denied = CompatibleInfo::IsDeniedForCinn(*op.operation());
746-
747-
return !is_denied && PatternConstraint(op);
745+
return PatternConstraint(op);
748746
}
749747

750748
void Rewrite(paddle::dialect::SplitOp op,

paddle/cinn/runtime/cuda/cinn_cuda_runtime_source.cuh

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -338,7 +338,7 @@ extern "C" {
338338

339339
__device__ inline int FN_INT32(pow)(int a, int b) {
340340
if (a == 0 && b < 0) {
341-
return -1;
341+
return 0;
342342
}
343343
float res = pow(__int2float_rd(a), __int2float_rd(b));
344344
return __float2int_rn(res);
@@ -418,6 +418,9 @@ __device__ inline long long int FN_INT64(exp)(long long int a) {
418418

419419
__device__ inline long long int FN_INT64(pow)(long long int a,
420420
long long int b) {
421+
if (a == 0 && b < 0) {
422+
return 0;
423+
}
421424
double res = pow(__ll2double_rd(a), __ll2double_rd(b));
422425
return __double2ll_rn(res);
423426
}

paddle/fluid/framework/details/build_strategy.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -321,7 +321,7 @@ ir::Graph *BuildStrategy::Apply(ir::Graph *graph,
321321
continue;
322322
}
323323
} else if (pass->Type() == "onednn_placement_pass") {
324-
pass->Set("mkldnn_enabled_op_types",
324+
pass->Set("onednn_enabled_op_types",
325325
new std::unordered_set<std::string>(onednn_enabled_op_types_));
326326
}
327327
VLOG(1) << "Start Apply Pass " << pass->Type();

paddle/fluid/framework/ir/onednn/onednn_placement_pass.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ inline bool FoundPhiOneDNNKernelWithCorrectDataType(
6464
return false;
6565
}
6666

67-
bool MKLDNNPlacementPass::IsSupport(const Node* op) const {
67+
bool ONEDNNPlacementPass::IsSupport(const Node* op) const {
6868
if (FoundOneDNNKernelWithCorrectDataType(op) ||
6969
FoundPhiOneDNNKernelWithCorrectDataType(op)) {
7070
// For interpolate ops, there's a little difference between Paddle and
@@ -89,8 +89,8 @@ bool MKLDNNPlacementPass::IsSupport(const Node* op) const {
8989

9090
} // namespace paddle::framework::ir
9191

92-
REGISTER_PASS(onednn_placement_pass, paddle::framework::ir::MKLDNNPlacementPass)
93-
.RequirePassAttr("mkldnn_enabled_op_types");
92+
REGISTER_PASS(onednn_placement_pass, paddle::framework::ir::ONEDNNPlacementPass)
93+
.RequirePassAttr("onednn_enabled_op_types");
9494

9595
REGISTER_PASS_CAPABILITY(onednn_placement_pass)
9696
.AddCombination(

paddle/fluid/framework/ir/onednn/onednn_placement_pass.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -26,17 +26,17 @@ namespace ir {
2626
/*
2727
* Specifies which operators should use MKLDNN.
2828
*/
29-
class MKLDNNPlacementPass : public PlacementPassBase {
29+
class ONEDNNPlacementPass : public PlacementPassBase {
3030
protected:
3131
bool IsSupport(const Node* op) const override;
3232

3333
private:
34-
const std::string GetPlacementName() const override { return "MKLDNN"; }
34+
const std::string GetPlacementName() const override { return "ONEDNN"; }
3535

3636
const std::string GetAttrName() const override { return "use_mkldnn"; }
3737

3838
const std::unordered_set<std::string> GetOpTypesList() const override {
39-
return Get<std::unordered_set<std::string>>("mkldnn_enabled_op_types");
39+
return Get<std::unordered_set<std::string>>("onednn_enabled_op_types");
4040
}
4141
};
4242

paddle/fluid/framework/ir/onednn/onednn_placement_pass_tester.cc

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,7 @@ class PlacementPassTest {
133133

134134
auto pass = PassRegistry::Instance().Get("onednn_placement_pass");
135135

136-
pass->Set("mkldnn_enabled_op_types",
136+
pass->Set("onednn_enabled_op_types",
137137
new std::unordered_set<std::string>(onednn_enabled_op_types));
138138

139139
graph.reset(pass->Apply(graph.release()));
@@ -143,8 +143,10 @@ class PlacementPassTest {
143143
for (auto* node : graph->Nodes()) {
144144
if (node->IsOp()) {
145145
auto* op = node->Op();
146-
if (op->HasAttr("use_mkldnn") &&
147-
PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn"))) {
146+
if ((op->HasAttr("use_mkldnn") &&
147+
PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn"))) ||
148+
(op->HasAttr("use_onednn") &&
149+
PADDLE_GET_CONST(bool, op->GetAttr("use_onednn")))) {
148150
++use_onednn_true_count;
149151
}
150152
}
@@ -156,27 +158,27 @@ class PlacementPassTest {
156158
void PlacementNameTest() {
157159
auto pass = PassRegistry::Instance().Get("onednn_placement_pass");
158160
EXPECT_EQ(static_cast<PlacementPassBase*>(pass.get())->GetPlacementName(),
159-
"MKLDNN");
161+
"ONEDNN");
160162
}
161163
};
162164

163-
TEST(MKLDNNPlacementPass, enable_conv_relu) {
165+
TEST(ONEDNNPlacementPass, enable_conv_relu) {
164166
// 2 conv (1 conv is always true) + 2 relu (1 relu is always true) + 0 pool
165167
PlacementPassTest().MainTest({"conv2d", "relu"}, 4);
166168
}
167169

168-
TEST(MKLDNNPlacementPass, enable_relu_pool) {
170+
TEST(ONEDNNPlacementPass, enable_relu_pool) {
169171
// 1 conv (1 conv is always true) + 2 relu (1 relu is always true) + 1 pool
170172
PlacementPassTest().MainTest({"relu", "pool2d"}, 4);
171173
}
172174

173-
TEST(MKLDNNPlacementPass, enable_all) {
175+
TEST(ONEDNNPlacementPass, enable_all) {
174176
// 2 conv (1 conv is always true) + 2 relu (1 relu is always true) + 1 pool +
175177
// 1 concat
176178
PlacementPassTest().MainTest({}, 6);
177179
}
178180

179-
TEST(MKLDNNPlacementPass, placement_name) {
181+
TEST(ONEDNNPlacementPass, placement_name) {
180182
PlacementPassTest().PlacementNameTest();
181183
}
182184

paddle/fluid/inference/analysis/argument.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -193,12 +193,12 @@ struct Argument {
193193
// whether to mute all logs in inference.
194194
DECL_ARGUMENT_FIELD(disable_logs, DisableLogs, bool);
195195

196-
// Pass a set of op types to enable its mkldnn kernel
197-
DECL_ARGUMENT_FIELD(mkldnn_enabled_op_types,
198-
MKLDNNEnabledOpTypes,
196+
// Pass a set of op types to enable its onednn kernel
197+
DECL_ARGUMENT_FIELD(onednn_enabled_op_types,
198+
ONEDNNEnabledOpTypes,
199199
std::unordered_set<std::string>);
200-
// The cache capacity of different input shapes for mkldnn.
201-
DECL_ARGUMENT_FIELD(mkldnn_cache_capacity, MkldnnCacheCapacity, int);
200+
// The cache capacity of different input shapes for onednn.
201+
DECL_ARGUMENT_FIELD(mkldnn_cache_capacity, OnednnCacheCapacity, int);
202202

203203
#ifdef PADDLE_WITH_DNNL
204204
// A set of op types to enable their quantized kernels
@@ -219,7 +219,7 @@ struct Argument {
219219
Bfloat16EnabledOpTypes,
220220
std::unordered_set<std::string>);
221221

222-
DECL_ARGUMENT_FIELD(use_onednn_int8, UseMkldnnInt8, bool);
222+
DECL_ARGUMENT_FIELD(use_onednn_int8, UseOnednnInt8, bool);
223223
#endif
224224

225225
// Passed from config.

paddle/fluid/inference/analysis/ir_pass_manager.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -131,9 +131,9 @@ void IRPassManager::CreatePasses(Argument *argument,
131131
pass->Set("optim_cache_dir", new std::string(std::move(optim_cache_dir)));
132132
pass_num++;
133133
} else if (pass_name == "onednn_placement_pass") {
134-
pass->Set("mkldnn_enabled_op_types",
134+
pass->Set("onednn_enabled_op_types",
135135
new std::unordered_set<std::string>(
136-
argument->mkldnn_enabled_op_types()));
136+
argument->onednn_enabled_op_types()));
137137
} else if (pass_name == "cudnn_placement_pass") {
138138
pass->Set("cudnn_enabled_op_types",
139139
new std::unordered_set<std::string>());

0 commit comments

Comments
 (0)