Skip to content

Commit 976dd60

Browse files
committed
Fix
1 parent 29d0104 commit 976dd60

File tree

4 files changed

+48
-48
lines changed

4 files changed

+48
-48
lines changed

paddle/phi/kernels/funcs/math/tree2col.cc

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ void Tree2ColUtil::construct_tree(const phi::DenseTensor &EdgeSet,
8686
template <typename T>
8787
class Tree2ColFunctor<phi::CPUContext, T> {
8888
public:
89-
void operator()(const phi::CPUContext &context,
89+
void operator()(const phi::CPUContext &dev_ctx,
9090
const phi::DenseTensor &EdgeSet,
9191
const phi::DenseTensor &node_features,
9292
phi::DenseTensor *patch,
@@ -110,8 +110,8 @@ class Tree2ColFunctor<phi::CPUContext, T> {
110110

111111
patch->Resize({static_cast<int64_t>(patch_size),
112112
static_cast<int64_t>(patch_elem_size)});
113-
T *patch_data = context.template Alloc<T>(patch);
114-
constant(context, patch, 0);
113+
T *patch_data = dev_ctx.template Alloc<T>(patch);
114+
constant(dev_ctx, patch, 0);
115115
const T *features = node_features.data<T>();
116116

117117
for (auto &patch_item : processing_list) {
@@ -138,7 +138,7 @@ class Tree2ColFunctor<phi::CPUContext, T> {
138138
template <typename T>
139139
class Col2TreeFunctor<phi::CPUContext, T> {
140140
public:
141-
void operator()(const phi::CPUContext &context,
141+
void operator()(const phi::CPUContext &dev_ctx,
142142
const phi::DenseTensor &EdgeSet,
143143
const phi::DenseTensor &out_grad,
144144
phi::DenseTensor *in_grad,
@@ -167,9 +167,9 @@ class Col2TreeFunctor<phi::CPUContext, T> {
167167
}
168168
in_grad->Resize({static_cast<int64_t>(node_count),
169169
static_cast<int64_t>(grad_elem_size)});
170-
T *grad_data = context.template Alloc<T>(in_grad);
170+
T *grad_data = dev_ctx.template Alloc<T>(in_grad);
171171

172-
constant(context, in_grad, 0);
172+
constant(dev_ctx, in_grad, 0);
173173
const T *out_g = out_grad.data<T>();
174174
for (auto &patch_item : grad_list) {
175175
size_t pointer_base = grad_count * grad_elem_size;

paddle/phi/kernels/funcs/math/tree2col.cu

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -52,20 +52,20 @@ __global__ void tree2col(const T* eta,
5252
template <typename T>
5353
class Tree2ColFunctor<phi::GPUContext, T> {
5454
public:
55-
void operator()(const phi::GPUContext& context,
55+
void operator()(const phi::GPUContext& dev_ctx,
5656
const phi::DenseTensor& EdgeSet,
5757
const phi::DenseTensor& node_features,
5858
phi::DenseTensor* patch,
5959
int max_depth) {
6060
std::vector<std::vector<int>> tr;
61-
auto gpu_place = context.GetPlace();
61+
auto gpu_place = dev_ctx.GetPlace();
6262
auto cpu_place = phi::CPUPlace();
63-
auto stream = context.stream();
63+
auto stream = dev_ctx.stream();
6464
auto feature_dims = node_features.dims();
6565
phi::funcs::SetConstant<phi::GPUContext, T> constant;
6666

6767
phi::DenseTensor EdgeSet_cpu;
68-
phi::Copy(context, EdgeSet, cpu_place, false, &EdgeSet_cpu);
68+
phi::Copy(dev_ctx, EdgeSet, cpu_place, false, &EdgeSet_cpu);
6969
int64_t feature_size = feature_dims[1];
7070
size_t patch_elem_size = 3 * static_cast<size_t>(feature_size);
7171
size_t node_count = 0, patch_count = 0, total_size = 0;
@@ -84,11 +84,11 @@ class Tree2ColFunctor<phi::GPUContext, T> {
8484
size_t patch_size = processing_list.size();
8585
phi::DenseTensor node_cpu, node_gpu, eta_cpu, eta_gpu, index_cpu, index_gpu;
8686
node_cpu.Resize({static_cast<int64_t>(total_size)});
87-
int* node = context.template Alloc<int>(&node_cpu);
87+
int* node = dev_ctx.template Alloc<int>(&node_cpu);
8888
eta_cpu.Resize({static_cast<int64_t>(total_size * 3)});
89-
T* eta = context.template Alloc<T>(&eta_cpu);
89+
T* eta = dev_ctx.template Alloc<T>(&eta_cpu);
9090
index_cpu.Resize({static_cast<int64_t>(patch_size * 2)});
91-
int* index = context.template Alloc<int>(&index_cpu);
91+
int* index = dev_ctx.template Alloc<int>(&index_cpu);
9292

9393
int idx = 0, index_idx = 0;
9494
for (auto& tmp : processing_list) {
@@ -102,9 +102,9 @@ class Tree2ColFunctor<phi::GPUContext, T> {
102102
}
103103
index[index_idx++] = idx;
104104
}
105-
phi::Copy(context, node_cpu, gpu_place, false, &node_gpu);
106-
phi::Copy(context, eta_cpu, gpu_place, false, &eta_gpu);
107-
phi::Copy(context, index_cpu, gpu_place, false, &index_gpu);
105+
phi::Copy(dev_ctx, node_cpu, gpu_place, false, &node_gpu);
106+
phi::Copy(dev_ctx, eta_cpu, gpu_place, false, &eta_gpu);
107+
phi::Copy(dev_ctx, index_cpu, gpu_place, false, &index_gpu);
108108

109109
int elem_size = patch_size * feature_size;
110110
int blocks = (elem_size + 1024 - 1) / 1024;
@@ -115,8 +115,8 @@ class Tree2ColFunctor<phi::GPUContext, T> {
115115

116116
patch->Resize({static_cast<int64_t>(max_size),
117117
static_cast<int64_t>(patch_elem_size)});
118-
context.template Alloc<T>(patch);
119-
constant(context, patch, 0);
118+
dev_ctx.template Alloc<T>(patch);
119+
constant(dev_ctx, patch, 0);
120120
tree2col<T><<<grid, threads, 0, stream>>>(eta_gpu.data<T>(),
121121
node_gpu.data<int>(),
122122
index_gpu.data<int>(),
@@ -129,20 +129,20 @@ class Tree2ColFunctor<phi::GPUContext, T> {
129129
template <typename T>
130130
class Col2TreeFunctor<phi::GPUContext, T> {
131131
public:
132-
void operator()(const phi::GPUContext& context,
132+
void operator()(const phi::GPUContext& dev_ctx,
133133
const phi::DenseTensor& EdgeSet,
134134
const phi::DenseTensor& patch_grad,
135135
phi::DenseTensor* embedding_grad,
136136
int max_depth) {
137137
std::vector<std::vector<int>> tr;
138-
auto gpu_place = context.GetPlace();
138+
auto gpu_place = dev_ctx.GetPlace();
139139
auto cpu_place = phi::CPUPlace();
140-
auto stream = context.stream();
140+
auto stream = dev_ctx.stream();
141141
auto output_dims = patch_grad.dims();
142142
phi::funcs::SetConstant<phi::GPUContext, T> constant;
143143

144144
phi::DenseTensor EdgeSet_cpu;
145-
phi::Copy(context, EdgeSet, cpu_place, false, &EdgeSet_cpu);
145+
phi::Copy(dev_ctx, EdgeSet, cpu_place, false, &EdgeSet_cpu);
146146
int64_t output_size = output_dims[1];
147147
size_t patch_elem_size = 3 * static_cast<size_t>(output_size);
148148
size_t node_count = 0, patch_count = 0;
@@ -169,11 +169,11 @@ class Col2TreeFunctor<phi::GPUContext, T> {
169169

170170
phi::DenseTensor node_cpu, node_gpu, eta_cpu, eta_gpu, index_cpu, index_gpu;
171171
node_cpu.Resize({static_cast<int64_t>(total_size)});
172-
int* node = context.template Alloc<int>(&node_cpu);
172+
int* node = dev_ctx.template Alloc<int>(&node_cpu);
173173
eta_cpu.Resize({static_cast<int64_t>(total_size * 3)});
174-
T* eta = context.template Alloc<T>(&eta_cpu);
174+
T* eta = dev_ctx.template Alloc<T>(&eta_cpu);
175175
index_cpu.Resize({static_cast<int64_t>(grad_size * 2)});
176-
int* index = context.template Alloc<int>(&index_cpu);
176+
int* index = dev_ctx.template Alloc<int>(&index_cpu);
177177

178178
size_t idx = 0, index_idx = 0;
179179
for (auto& tmp : grad_list) {
@@ -187,9 +187,9 @@ class Col2TreeFunctor<phi::GPUContext, T> {
187187
}
188188
index[index_idx++] = idx;
189189
}
190-
phi::Copy(context, node_cpu, gpu_place, false, &node_gpu);
191-
phi::Copy(context, eta_cpu, gpu_place, false, &eta_gpu);
192-
phi::Copy(context, index_cpu, gpu_place, false, &index_gpu);
190+
phi::Copy(dev_ctx, node_cpu, gpu_place, false, &node_gpu);
191+
phi::Copy(dev_ctx, eta_cpu, gpu_place, false, &eta_gpu);
192+
phi::Copy(dev_ctx, index_cpu, gpu_place, false, &index_gpu);
193193

194194
int elem_size = output_size * grad_size;
195195
int blocks = (elem_size + 1024 - 1) / 1024;
@@ -200,9 +200,9 @@ class Col2TreeFunctor<phi::GPUContext, T> {
200200

201201
embedding_grad->Resize({static_cast<int64_t>(max_size),
202202
static_cast<int64_t>(patch_elem_size)});
203-
context.template Alloc<T>(embedding_grad);
203+
dev_ctx.template Alloc<T>(embedding_grad);
204204

205-
constant(context, embedding_grad, 0);
205+
constant(dev_ctx, embedding_grad, 0);
206206
tree2col<T><<<grid, threads, 0, stream>>>(eta_gpu.data<T>(),
207207
node_gpu.data<int>(),
208208
index_gpu.data<int>(),

paddle/phi/kernels/funcs/math/tree2col.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ class Tree2ColUtil {
7272
template <typename DeviceContext, typename T>
7373
class Tree2ColFunctor {
7474
public:
75-
void operator()(const DeviceContext &context,
75+
void operator()(const DeviceContext &dev_ctx,
7676
const phi::DenseTensor &EdgeSet,
7777
const phi::DenseTensor &node_features,
7878
phi::DenseTensor *patch,
@@ -81,7 +81,7 @@ class Tree2ColFunctor {
8181
template <typename DeviceContext, typename T>
8282
class Col2TreeFunctor {
8383
public:
84-
void operator()(const DeviceContext &context,
84+
void operator()(const DeviceContext &dev_ctx,
8585
const phi::DenseTensor &EdgeSet,
8686
const phi::DenseTensor &out_grad,
8787
phi::DenseTensor *in_grad,

paddle/phi/kernels/funcs/math_function.cc

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ DEFINE_CPU_TRANS(6);
105105

106106
template <typename DeviceContext, typename T>
107107
void TransposeNormal<DeviceContext, T>::operator()(
108-
const DeviceContext& context UNUSED,
108+
const DeviceContext& dev_ctx UNUSED,
109109
const phi::DenseTensor& in,
110110
phi::DenseTensor* out,
111111
const std::vector<int>& axis) {
@@ -163,7 +163,7 @@ struct TensorSetConstantCPU {
163163
};
164164

165165
template <>
166-
void set_constant_with_place<phi::XPUPlace>(const phi::DeviceContext& context,
166+
void set_constant_with_place<phi::XPUPlace>(const phi::DeviceContext& dev_ctx,
167167
phi::DenseTensor* tensor,
168168
float value) {
169169
#ifdef PADDLE_WITH_XPU
@@ -176,15 +176,15 @@ void set_constant_with_place<phi::XPUPlace>(const phi::DeviceContext& context,
176176
}
177177

178178
template <>
179-
void set_constant_with_place<phi::IPUPlace>(const phi::DeviceContext& context,
179+
void set_constant_with_place<phi::IPUPlace>(const phi::DeviceContext& dev_ctx,
180180
phi::DenseTensor* tensor,
181181
float value) {
182182
PADDLE_THROW(common::errors::Unimplemented("IPUPlace is not supported"));
183183
}
184184

185185
template <>
186186
void set_constant_with_place<phi::CustomPlace>(
187-
const phi::DeviceContext& context, phi::DenseTensor* tensor, float value) {
187+
const phi::DeviceContext& dev_ctx, phi::DenseTensor* tensor, float value) {
188188
#ifdef PADDLE_WITH_CUSTOM_DEVICE
189189
auto kernel_result = phi::KernelFactory::Instance().SelectKernelOrThrowError(
190190
"full",
@@ -198,7 +198,7 @@ void set_constant_with_place<phi::CustomPlace>(
198198
DataType,
199199
phi::DenseTensor*);
200200
auto* kernel_fn = kernel.GetVariadicKernelFn<kernel_signature>();
201-
(*kernel_fn)(context,
201+
(*kernel_fn)(dev_ctx,
202202
phi::IntArray(common::vectorize(tensor->dims())),
203203
phi::Scalar(value),
204204
tensor->dtype(),
@@ -209,42 +209,42 @@ void set_constant_with_place<phi::CustomPlace>(
209209
}
210210

211211
template <>
212-
void set_constant_with_place<phi::CPUPlace>(const phi::DeviceContext& context,
212+
void set_constant_with_place<phi::CPUPlace>(const phi::DeviceContext& dev_ctx,
213213
phi::DenseTensor* tensor,
214214
float value) {
215215
phi::VisitDataType(tensor->dtype(), TensorSetConstantCPU(tensor, value));
216216
}
217217

218218
template <>
219219
void set_constant_with_place<phi::GPUPinnedPlace>(
220-
const phi::DeviceContext& context, phi::DenseTensor* tensor, float value) {
220+
const phi::DeviceContext& dev_ctx, phi::DenseTensor* tensor, float value) {
221221
phi::VisitDataType(tensor->dtype(), TensorSetConstantCPU(tensor, value));
222222
}
223223

224224
struct TensorSetConstantWithPlace {
225225
using argument_type = phi::Place;
226226
using result_type = void;
227-
TensorSetConstantWithPlace(const phi::DeviceContext& context,
227+
TensorSetConstantWithPlace(const phi::DeviceContext& dev_ctx,
228228
phi::DenseTensor* tensor,
229229
float value)
230-
: context_(context), tensor_(tensor), value_(value) {}
230+
: dev_ctx_(dev_ctx), tensor_(tensor), value_(value) {}
231231

232232
template <typename Place>
233233
void operator()(Place place UNUSED) const {
234-
set_constant_with_place<Place>(context_, tensor_, value_);
234+
set_constant_with_place<Place>(dev_ctx_, tensor_, value_);
235235
}
236236

237-
const phi::DeviceContext& context_;
237+
const phi::DeviceContext& dev_ctx_;
238238
phi::DenseTensor* tensor_;
239239
float value_;
240240
};
241241

242-
void set_constant(const phi::DeviceContext& context,
242+
void set_constant(const phi::DeviceContext& dev_ctx,
243243
phi::DenseTensor* tensor,
244244
float value) {
245-
TensorSetConstantWithPlace func(context, tensor, value);
245+
TensorSetConstantWithPlace func(dev_ctx, tensor, value);
246246
#ifdef PADDLE_WITH_CUSTOM_DEVICE
247-
if (context.GetPlace().GetType() == phi::AllocationType::CUSTOM) {
247+
if (dev_ctx.GetPlace().GetType() == phi::AllocationType::CUSTOM) {
248248
func(phi::CustomPlace());
249249
return;
250250
}
@@ -253,7 +253,7 @@ void set_constant(const phi::DeviceContext& context,
253253
// tensor->place().apply_visitor(func);
254254
phi::VisitPlace(tensor->place(), func);
255255
#elif defined(PADDLE_WITH_XPU)
256-
if (context.GetPlace().GetType() == phi::AllocationType::XPU) {
256+
if (dev_ctx.GetPlace().GetType() == phi::AllocationType::XPU) {
257257
func(phi::XPUPlace());
258258
return;
259259
} else {
@@ -274,7 +274,7 @@ template struct RowwiseMean<phi::CPUContext, double>;
274274

275275
template <typename T>
276276
struct RowwiseAdd<phi::CPUContext, T> {
277-
void operator()(const phi::CPUContext& context UNUSED,
277+
void operator()(const phi::CPUContext& dev_ctx UNUSED,
278278
const phi::DenseTensor& input,
279279
const phi::DenseTensor& vector,
280280
phi::DenseTensor* output) {

0 commit comments

Comments
 (0)