Skip to content

Commit 14f196b

Browse files
committed
Fixed CI issues
1 parent 7e5ec77 commit 14f196b

File tree

6 files changed

+62
-32
lines changed

6 files changed

+62
-32
lines changed

paddle/fluid/eager/auto_code_generator/eager_generator.cc

Lines changed: 14 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -577,11 +577,6 @@ static std::string GenerateGradNodeCreationContent(
577577
// If single output slotname and not duplicable,
578578
// then generate: "egr::AutogradMeta* p_autograd_out =
579579
// egr::EagerUtils::autograd_meta("op_proto->outputs()[0].name()")"
580-
581-
// TODO(zhanlve): in case of multiple slotname but none of which are
582-
// duplicable,
583-
// avoid constructing vector<AutogradMeta*>, generate seperate
584-
// AutogradMeta* objects respectively.
585580
std::string get_autograd_meta_str = " // Prepare Autograd Meta \n";
586581
for (const proto::OpProto::Var& input : op_proto.inputs()) {
587582
const std::string& input_name = input.name();
@@ -607,11 +602,6 @@ static std::string GenerateGradNodeCreationContent(
607602
// If single output slotname and not duplicable,
608603
// then generate: "egr::AutogradMeta* p_autograd_out =
609604
// egr::EagerUtils::autograd_meta("op_proto.outputs()[0].name()")"
610-
611-
// TODO(zhanlve): in case of multiple slotname but none of which are
612-
// duplicable,
613-
// avoid constructing vector<AutogradMeta*>, generate seperate
614-
// AutogradMeta* objects respectively.
615605
for (const proto::OpProto::Var& output : op_proto.outputs()) {
616606
const std::string& output_name = output.name();
617607
const std::string& output_autograd_name = "p_autograd_" + output_name;
@@ -725,9 +715,9 @@ static std::string GenerateGradNodeCreationContent(
725715
// [Generation] GradNode Creation
726716
const char* GRAD_NODE_CREATION_TEMPLATE =
727717
" %s"
728-
" bool require_any_grad = egr::ComputeRequireGrad(%s);\n"
718+
" bool require_any_grad = egr::EagerUtils::ComputeRequireGrad(%s);\n"
729719
" if(require_any_grad) {\n"
730-
" egr::PassStopGradient(%s);\n"
720+
" egr::EagerUtils::PassStopGradient(%s);\n"
731721
"%s\n }";
732722
std::string grad_node_creation_body_str = paddle::string::Sprintf(
733723
GRAD_NODE_CREATION_TEMPLATE, prepare_autograd_meta_str,
@@ -793,7 +783,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
793783
Controller.Instance().GetExpectedPlace(), {});
794784
795785
// According to fwd_outputs_names
796-
std::vector<egr::EagerTensor> Out0 = GetOutputs(outs["Out0"]);
786+
std::vector<egr::EagerTensor> Out0 = GGetOutputetOutputs(outs["Out0"]);
797787
egr::EagerTensor Out1 = GetOutputs(outs["Out1"][0]);
798788
std::vector<egr::EagerTensor> Out2 = GetOutputs(outs["Out2"]);
799789
@@ -830,7 +820,8 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
830820
input_args_str_list[input_position] =
831821
paddle::string::Sprintf(FWD_INS_ARG_TEMPLATE, input_name);
832822
}
833-
const char* FWD_INS_CONTENT_TEMPLATE = "{ \"%s\", egr::SyncToVars(%s) },";
823+
const char* FWD_INS_CONTENT_TEMPLATE =
824+
"{ \"%s\", egr::EagerUtils::SyncToVars(%s) },";
834825
ins_contents_str += paddle::string::Sprintf(FWD_INS_CONTENT_TEMPLATE,
835826
input_name, input_name);
836827
}
@@ -925,14 +916,14 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
925916
if (output.duplicable()) {
926917
const char* FWD_OUT_TENSORS_TEMPLATE =
927918
" std::vector<egr::EagerTensor> %s = "
928-
"egr::GetOutputs(outs[\"%s\"]);\n";
919+
"egr::EagerUtils::GetOutputs(outs[\"%s\"]);\n";
929920
out_tensor_str = paddle::string::Sprintf(FWD_OUT_TENSORS_TEMPLATE,
930921
output_name, output_name);
931922
return_types[return_position] = "std::vector<egr::EagerTensor>";
932923
} else {
933924
const char* FWD_OUT_TENSOR_TEMPLATE =
934925
" egr::EagerTensor %s = "
935-
"egr::GetOutput(outs[\"%s\"][0]);\n";
926+
"egr::EagerUtils::GetOutput(outs[\"%s\"][0]);\n";
936927
out_tensor_str = paddle::string::Sprintf(FWD_OUT_TENSOR_TEMPLATE,
937928
output_name, output_name);
938929
return_types[return_position] = "egr::EagerTensor";
@@ -1093,7 +1084,8 @@ static std::string GenerateGradNodeCCContents(
10931084
grad_ins_fwd_slotname_map.at(grad_input_name) + "_";
10941085
const char* GRAD_INS_FWD_CONTENT_TEMPLATE =
10951086
"{ \"%s\", "
1096-
"egr::SyncToVars(egr::EagerUtils::RecoverTensorWrapper(&this->%s, "
1087+
"egr::EagerUtils::SyncToVars(egr::EagerUtils::RecoverTensorWrapper(&"
1088+
"this->%s, "
10971089
"nullptr)) },";
10981090
ins_contents_str +=
10991091
paddle::string::Sprintf(GRAD_INS_FWD_CONTENT_TEMPLATE,
@@ -1104,7 +1096,7 @@ static std::string GenerateGradNodeCCContents(
11041096
size_t fwd_output_position = fwd_outputs_name_pos_map.at(
11051097
grad_ins_grad_slotname_map.at(grad_input_name));
11061098
const char* GRAD_INS_GRAD_CONTENT_TEMPLATE =
1107-
"{ \"%s\", egr::SyncToVars(grads[%d]) },";
1099+
"{ \"%s\", egr::EagerUtils::SyncToVars(grads[%d]) },";
11081100
ins_contents_str += paddle::string::Sprintf(
11091101
GRAD_INS_GRAD_CONTENT_TEMPLATE, grad_input_name, fwd_output_position);
11101102

@@ -1206,7 +1198,7 @@ static std::string GenerateGradNodeCCContents(
12061198
fwd_inputs_name_pos_map.at(grad_outs_slotname_map.at(grad_out_name));
12071199

12081200
const char* BWD_OUTPUT_TEMPLATE =
1209-
" outputs[%d] = GetOutputs(outs[\"%s\"]);\n";
1201+
" outputs[%d] = egr::EagerUtils::GetOutputs(outs[\"%s\"]);\n";
12101202
outputs_str += paddle::string::Sprintf(BWD_OUTPUT_TEMPLATE,
12111203
fwd_input_position, grad_out_name);
12121204
}
@@ -1526,6 +1518,9 @@ static void DygraphCodeGeneration(const std::string& output_dir) {
15261518
GenerateForwardHFile(output_dir, dygraph_forward_api_str);
15271519
}
15281520

1521+
} // namespace framework
1522+
} // namespace paddle
1523+
15291524
int main(int argc, char* argv[]) {
15301525
if (argc != 2) {
15311526
std::cerr << "argc must be 2" << std::endl;
@@ -1537,6 +1532,3 @@ int main(int argc, char* argv[]) {
15371532

15381533
return 0;
15391534
}
1540-
1541-
} // namespace framework
1542-
} // namespace paddle

paddle/fluid/eager/legacy/prepared_operator.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
#include "paddle/fluid/framework/pten_utils.h"
2121
#include "paddle/utils/small_vector.h"
2222
#ifdef PADDLE_WITH_XPU
23-
#include "paddle/fluid/platform/xpu/xpu_op_list.h"
23+
#include "paddle/fluid/platform/device/xpu/xpu_op_list.h"
2424
#endif
2525
DECLARE_bool(check_nan_inf);
2626
DECLARE_bool(run_pten_kernel);

paddle/fluid/eager/tests/task_tests/generated_test.cc

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
#include "gtest/gtest.h"
2020

2121
#include "paddle/fluid/eager/api/all.h"
22+
#include "paddle/fluid/eager/api/utils/tensor_utils.h"
2223
#include "paddle/fluid/eager/autograd_meta.h"
2324
#include "paddle/fluid/eager/backward.h"
2425
#include "paddle/fluid/eager/utils.h"
@@ -29,23 +30,19 @@
2930
#include "paddle/fluid/eager/api/generated/fluid_generated/dygraph_forward_api.h"
3031
#include "paddle/pten/core/kernel_registry.h"
3132

32-
PT_DECLARE_MODULE(CreationCPU);
33-
34-
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
35-
PT_DECLARE_MODULE(CreationCUDA);
36-
#endif
37-
3833
// TODO(jiabin): remove nolint here!!!
3934
using namespace egr; // NOLINT
4035

36+
namespace eager_test {
37+
4138
TEST(Generated, Sigmoid) {
4239
// Prepare Device Contexts
4340
InitEnv(paddle::platform::CPUPlace());
4441
VLOG(6) << "Init Env";
4542
// 1. Prepare Input
4643
paddle::framework::DDim ddim = paddle::framework::make_ddim({2, 4, 4, 4});
4744
VLOG(6) << "Make Dim";
48-
egr::EagerTensor tensor = EagerUtils::CreateTensorWithValue(
45+
egr::EagerTensor tensor = CreateTensorWithValue(
4946
ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
5047
pten::DataLayout::NCHW, 0.0, true);
5148
VLOG(6) << "Make EagerTensor";
@@ -72,13 +69,13 @@ TEST(Generated, Matmul_v2) {
7269

7370
// 1. Prepare Input
7471
paddle::framework::DDim ddimX = paddle::framework::make_ddim({4, 16});
75-
egr::EagerTensor X = EagerUtils::CreateTensorWithValue(
72+
egr::EagerTensor X = CreateTensorWithValue(
7673
ddimX, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
7774
pten::DataLayout::NCHW, 3.0, true);
7875
RetainGradForTensor(X);
7976

8077
paddle::framework::DDim ddimY = paddle::framework::make_ddim({16, 20});
81-
egr::EagerTensor Y = EagerUtils::CreateTensorWithValue(
78+
egr::EagerTensor Y = CreateTensorWithValue(
8279
ddimY, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
8380
pten::DataLayout::NCHW, 2.0, true);
8481
RetainGradForTensor(Y);
@@ -94,3 +91,5 @@ TEST(Generated, Matmul_v2) {
9491
CompareGradVariableWithValue<float>(X, 2.0 * 20);
9592
CompareGradVariableWithValue<float>(Y, 3.0 * 4);
9693
}
94+
95+
} // namespace eager_test

paddle/fluid/eager/utils.cc

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414

1515
#include "paddle/fluid/eager/utils.h"
1616
#include "paddle/fluid/eager/api/utils/global_utils.h"
17+
#include "paddle/fluid/eager/tensor_wrapper.h"
1718

1819
#include "paddle/pten/api/all.h"
1920
#include "paddle/pten/common/layout.h"
@@ -188,4 +189,19 @@ egr::EagerTensor EagerUtils::GetOutput(
188189
return EagerTensor((*(out.get())));
189190
}
190191

192+
EagerTensor EagerUtils::RecoverTensorWrapper(
193+
TensorWrapper* tw, const std::shared_ptr<GradNodeBase>& grad_node) {
194+
return tw->recover(grad_node);
195+
}
196+
197+
std::vector<EagerTensor> EagerUtils::RecoverTensorWrapper(
198+
std::vector<TensorWrapper>* tw,
199+
const std::shared_ptr<GradNodeBase>& grad_node) {
200+
std::vector<EagerTensor> ret;
201+
for (auto& t : *tw) {
202+
ret.emplace_back(t.recover(grad_node));
203+
}
204+
return ret;
205+
}
206+
191207
} // namespace egr

paddle/fluid/eager/utils.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,8 @@
2222

2323
namespace egr {
2424

25+
class TensorWrapper;
26+
2527
/**
2628
* EagerUtils is utils used to do some static conversion or autograd
2729
* members access, this class is desinged to be a full static functional
@@ -131,6 +133,13 @@ class EagerUtils {
131133
iter.apply(std::forward<Args>(args)...);
132134
}
133135

136+
// TensorWrapper Utils
137+
static egr::EagerTensor RecoverTensorWrapper(
138+
egr::TensorWrapper* tw, const std::shared_ptr<GradNodeBase>& grad_node);
139+
static std::vector<egr::EagerTensor> RecoverTensorWrapper(
140+
std::vector<egr::TensorWrapper>* tw,
141+
const std::shared_ptr<GradNodeBase>& grad_node);
142+
134143
// Intermidate needed remove this once we don't need legacy
135144
static std::vector<std::shared_ptr<egr::EagerTensor>> SyncToVars(
136145
const egr::EagerTensor& tensor);

paddle/fluid/framework/details/nan_inf_utils.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
#include <string>
1818
#include <vector>
1919

20+
#include "paddle/fluid/eager/legacy/type_def.h"
2021
#include "paddle/fluid/framework/operator.h"
2122
#include "paddle/fluid/framework/scope.h"
2223
#include "paddle/fluid/imperative/type_defs.h"
@@ -53,6 +54,19 @@ void CheckOpHasNanOrInfInDygraph(const std::string& op_type,
5354
}
5455
}
5556

57+
template <typename TensorType>
58+
static void CheckOpHasNanOrInfInEager(const std::string& op_type,
59+
const egr::NameMap<TensorType>& op_outs,
60+
platform::Place place) {
61+
for (const auto& pair : op_outs) {
62+
for (const auto& tensor : pair.second) {
63+
auto* var = tensor->MutableVar();
64+
if (var == nullptr) continue;
65+
CheckVarHasNanOrInf(op_type, tensor->name(), var, place);
66+
}
67+
}
68+
}
69+
5670
#ifdef PADDLE_WITH_ASCEND_CL
5771
void NPUAllocAndClearFloatStatus(const framework::OperatorBase& op,
5872
const framework::ScopeBase& scope,

0 commit comments

Comments
 (0)