Skip to content

Commit c7cd8d9

Browse files
authored
removing dependent to fluid/framework/eigen.h in phi (#47675)
* removing dependent to fluid/framework/eigen.h in phi * more fix according to PR-CI-Py3 fail
1 parent ef21b58 commit c7cd8d9

21 files changed

+50
-54
lines changed

paddle/fluid/distributed/ps/service/communicator/communicator.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,7 @@ class BlockingQueue {
154154
template <typename T,
155155
int MajorType = Eigen::RowMajor,
156156
typename IndexType = Eigen::DenseIndex>
157-
using EigenVector = framework::EigenVector<T, MajorType, IndexType>;
157+
using EigenVector = phi::EigenVector<T, MajorType, IndexType>;
158158

159159
template <typename T>
160160
inline void MergeVars(const std::string &var_name,

paddle/fluid/imperative/gradient_accumulator.cc

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -206,13 +206,13 @@ void TensorAdd(const VarType& src, VarType* dst) {
206206
#endif
207207
}
208208

209-
#define TENSOR_ADD_EIGEN(T) \
210-
auto cpu_ctx = static_cast<phi::CPUContext*>( \
211-
platform::DeviceContextPool::Instance().Get(place)); \
212-
auto in = paddle::framework::EigenVector<T>::Flatten(src_tensor); \
213-
auto out = paddle::framework::EigenVector<T>::Flatten(*dst_tensor); \
214-
auto& p = *(cpu_ctx->eigen_device()); \
215-
out.device(p) = out + in; \
209+
#define TENSOR_ADD_EIGEN(T) \
210+
auto cpu_ctx = static_cast<phi::CPUContext*>( \
211+
platform::DeviceContextPool::Instance().Get(place)); \
212+
auto in = phi::EigenVector<T>::Flatten(src_tensor); \
213+
auto out = phi::EigenVector<T>::Flatten(*dst_tensor); \
214+
auto& p = *(cpu_ctx->eigen_device()); \
215+
out.device(p) = out + in; \
216216
return;
217217

218218
if (platform::is_cpu_place(place)) {

paddle/fluid/operators/detection/anchor_generator_op.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ class AnchorGeneratorOpKernel : public framework::OpKernel<T> {
7070
anchors->mutable_data<T>(ctx.GetPlace());
7171
vars->mutable_data<T>(ctx.GetPlace());
7272

73-
auto e_anchors = framework::EigenTensor<T, 4>::From(*anchors);
73+
auto e_anchors = phi::EigenTensor<T, 4>::From(*anchors);
7474
for (int h_idx = 0; h_idx < feature_height; ++h_idx) {
7575
for (int w_idx = 0; w_idx < feature_width; ++w_idx) {
7676
T x_ctr = (w_idx * stride_width) + offset * (stride_width - 1);
@@ -110,7 +110,7 @@ class AnchorGeneratorOpKernel : public framework::OpKernel<T> {
110110
var_t.mutable_data<T>(
111111
phi::make_ddim({1, static_cast<int>(variances.size())}),
112112
ctx.GetPlace());
113-
auto var_et = framework::EigenTensor<T, 2>::From(var_t);
113+
auto var_et = phi::EigenTensor<T, 2>::From(var_t);
114114
for (size_t i = 0; i < variances.size(); ++i) {
115115
var_et(0, i) = variances[i];
116116
}
@@ -119,7 +119,7 @@ class AnchorGeneratorOpKernel : public framework::OpKernel<T> {
119119
auto var_dim = vars->dims();
120120
vars->Resize({anchor_num, static_cast<int>(variances.size())});
121121

122-
auto e_vars = framework::EigenMatrix<T, Eigen::RowMajor>::From(*vars);
122+
auto e_vars = phi::EigenMatrix<T, Eigen::RowMajor>::From(*vars);
123123
e_vars = var_et.broadcast(Eigen::DSizes<int, 2>(anchor_num, 1));
124124

125125
vars->Resize(var_dim);

paddle/fluid/operators/detection/density_prior_box_op.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ class DensityPriorBoxOpKernel : public framework::OpKernel<T> {
6666

6767
auto box_dim = vars->dims();
6868
boxes->Resize({feature_height, feature_width, num_priors, 4});
69-
auto e_boxes = framework::EigenTensor<T, 4>::From(*boxes).setConstant(0.0);
69+
auto e_boxes = phi::EigenTensor<T, 4>::From(*boxes).setConstant(0.0);
7070
int step_average = static_cast<int>((step_width + step_height) * 0.5);
7171

7272
std::vector<float> sqrt_fixed_ratios;
@@ -126,7 +126,7 @@ class DensityPriorBoxOpKernel : public framework::OpKernel<T> {
126126
phi::make_ddim({1, static_cast<int>(variances.size())}),
127127
ctx.GetPlace());
128128

129-
auto var_et = framework::EigenTensor<T, 2>::From(var_t);
129+
auto var_et = phi::EigenTensor<T, 2>::From(var_t);
130130

131131
for (size_t i = 0; i < variances.size(); ++i) {
132132
var_et(0, i) = variances[i];
@@ -136,7 +136,7 @@ class DensityPriorBoxOpKernel : public framework::OpKernel<T> {
136136
auto var_dim = vars->dims();
137137
vars->Resize({box_num, static_cast<int>(variances.size())});
138138

139-
auto e_vars = framework::EigenMatrix<T, Eigen::RowMajor>::From(*vars);
139+
auto e_vars = phi::EigenMatrix<T, Eigen::RowMajor>::From(*vars);
140140
#ifdef PADDLE_WITH_MKLML
141141
#pragma omp parallel for collapse(2)
142142
#endif

paddle/fluid/operators/detection/prior_box_op.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -182,7 +182,7 @@ class PriorBoxOpKernel : public framework::OpKernel<T> {
182182
var_t.mutable_data<K>(
183183
phi::make_ddim({1, static_cast<int>(variances.size())}),
184184
ctx.GetPlace());
185-
auto var_et = framework::EigenTensor<K, 2>::From(var_t);
185+
auto var_et = phi::EigenTensor<K, 2>::From(var_t);
186186

187187
#ifdef PADDLE_WITH_MKLML
188188
#pragma omp parallel for
@@ -195,7 +195,7 @@ class PriorBoxOpKernel : public framework::OpKernel<T> {
195195
auto var_dim = vars->dims();
196196
vars->Resize({box_num, static_cast<int>(variances.size())});
197197

198-
auto e_vars = framework::EigenMatrix<K, Eigen::RowMajor>::From(*vars);
198+
auto e_vars = phi::EigenMatrix<K, Eigen::RowMajor>::From(*vars);
199199

200200
#ifdef PADDLE_WITH_MKLML
201201
#pragma omp parallel for collapse(2)

paddle/fluid/operators/index_select_op.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -83,8 +83,8 @@ void IndexSelectInner(const framework::ExecutionContext& context,
8383
input->Resize(phi::make_ddim({outer_nums, input_dim[dim], slice_size}));
8484
output->Resize(phi::make_ddim({outer_nums, index_size, slice_size}));
8585

86-
auto input_tensor = framework::EigenTensor<T, 3>::From(*input);
87-
auto output_tensor = framework::EigenTensor<T, 3>::From(*output);
86+
auto input_tensor = phi::EigenTensor<T, 3>::From(*input);
87+
auto output_tensor = phi::EigenTensor<T, 3>::From(*output);
8888

8989
auto& place =
9090
*context.template device_context<DeviceContext>().eigen_device();

paddle/fluid/operators/interpolate_op.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ template <typename T,
2525
size_t D,
2626
int MajorType = Eigen::RowMajor,
2727
typename IndexType = Eigen::DenseIndex>
28-
using EigenTensor = framework::EigenTensor<T, D, MajorType, IndexType>;
28+
using EigenTensor = phi::EigenTensor<T, D, MajorType, IndexType>;
2929
using Tensor = phi::DenseTensor;
3030
using DataLayout = phi::DataLayout;
3131

paddle/fluid/operators/math/sequence_pooling.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,11 +29,11 @@ using LoDTensor = phi::DenseTensor;
2929
template <typename T,
3030
int MajorType = Eigen::RowMajor,
3131
typename IndexType = Eigen::DenseIndex>
32-
using EigenVector = framework::EigenVector<T, MajorType, IndexType>;
32+
using EigenVector = phi::EigenVector<T, MajorType, IndexType>;
3333
template <typename T,
3434
int MajorType = Eigen::RowMajor,
3535
typename IndexType = Eigen::DenseIndex>
36-
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;
36+
using EigenMatrix = phi::EigenMatrix<T, MajorType, IndexType>;
3737

3838
template <typename T, bool is_test>
3939
class MaxSeqPoolFunctor {

paddle/fluid/operators/sequence_ops/sequence_expand_op.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ using LoDTensor = phi::DenseTensor;
2626
template <typename T,
2727
int MajorType = Eigen::RowMajor,
2828
typename IndexType = Eigen::DenseIndex>
29-
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;
29+
using EigenMatrix = phi::EigenMatrix<T, MajorType, IndexType>;
3030

3131
template <typename DeviceContext, typename T>
3232
struct SequenceExpandFunctor {

paddle/phi/kernels/cpu/norm_grad_kernel.cc

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -45,10 +45,10 @@ void NormGradKernel(const Context& ctx,
4545

4646
auto* place = ctx.eigen_device();
4747

48-
auto x_e = paddle::framework::EigenVector<T>::Flatten(*in_x);
49-
auto dy_e = paddle::framework::EigenVector<T>::Flatten(*in_dy);
50-
auto norm_e = paddle::framework::EigenVector<T>::Flatten(*in_norm);
51-
auto dx_e = paddle::framework::EigenVector<T>::Flatten(*out_dx);
48+
auto x_e = phi::EigenVector<T>::Flatten(*in_x);
49+
auto dy_e = phi::EigenVector<T>::Flatten(*in_dy);
50+
auto norm_e = phi::EigenVector<T>::Flatten(*in_norm);
51+
auto dx_e = phi::EigenVector<T>::Flatten(*out_dx);
5252

5353
Eigen::DSizes<int, 3> shape(pre, n, post);
5454
Eigen::DSizes<int, 3> rshape(pre, 1, post);
@@ -60,7 +60,7 @@ void NormGradKernel(const Context& ctx,
6060
DenseTensor rsum;
6161
rsum.Resize({pre, post});
6262
ctx.template Alloc<T>(&rsum);
63-
auto sum = paddle::framework::EigenTensor<T, 2>::From(rsum);
63+
auto sum = phi::EigenTensor<T, 2>::From(rsum);
6464

6565
Eigen::DSizes<int, 1> rdim(1);
6666
Eigen::DSizes<int, 3> bcast(1, n, 1);

0 commit comments

Comments
 (0)