Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions paddle/fluid/framework/data_layout_transform.cc
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
// limitations under the License.

#include "paddle/fluid/framework/data_layout_transform.h"
#include "paddle/fluid/framework/op_kernel_type.h"

#include "paddle/phi/core/utils/data_type.h"
#include "paddle/phi/kernels/funcs/math_function.h"
Expand Down
4 changes: 4 additions & 0 deletions paddle/fluid/imperative/tests/test_group.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,10 @@
#include "gtest/gtest.h"
#include "paddle/fluid/imperative/reducer.h"

#include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/phi/core/utils/data_type.h"

namespace paddle {
namespace imperative {

Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/operators/detection/anchor_generator_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ limitations under the License. */

#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/common/transform.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/math_function.h"

namespace paddle {
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/operators/detection/prior_box_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/common/transform.h"
#include "paddle/phi/core/visit_type.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/math_function.h"

namespace paddle {
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/operators/index_select_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@

#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/math_function.h"

namespace paddle {
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/operators/interpolate_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/core/hostdevice.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/math_function.h"

namespace paddle {
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/operators/math/prelu.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ limitations under the License. */
#include <vector>

#include "paddle/fluid/platform/device/gpu/gpu_dnn.h"
#include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/kernels/funcs/math_function.h"

namespace paddle {
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/operators/math/sequence_pooling.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License. */
#include <string>

#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/jit/kernels.h"
#include "paddle/phi/kernels/funcs/math_function.h"

Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/operators/math/tree2col.cu
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

#include <stack>

#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/operators/math/tree2col.h"
#include "paddle/phi/kernels/funcs/math_function.h"

Expand Down
5 changes: 3 additions & 2 deletions paddle/fluid/operators/reduce_ops/check_reduce_rank_test.cu
Original file line number Diff line number Diff line change
Expand Up @@ -13,14 +13,15 @@
// limitations under the License.

#include "gtest/gtest.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/kernels/gpu/reduce.h"

namespace paddle {
namespace operators {
namespace details {

TEST(test_reduce_rank_check, all) {
using EnforceNotMet = paddle::platform::EnforceNotMet;
using EnforceNotMet = phi::EnforceNotMet;
constexpr int kMaxRank = framework::DDim::kMaxRank;

for (int rank = 0; rank < kMaxRank; rank++) {
Expand All @@ -42,7 +43,7 @@ TEST(test_reduce_rank_check, all) {
phi::funcs::details::CheckReduceRank(reduce_rank, rank);
} else {
ASSERT_THROW(phi::funcs::details::CheckReduceRank(reduce_rank, rank),
paddle::platform::EnforceNotMet);
EnforceNotMet);
}
}
}
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/operators/sequence_ops/sequence_expand_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License. */

#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/math_function.h"

namespace paddle {
Expand Down
112 changes: 112 additions & 0 deletions paddle/phi/core/utils/visit_place.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "paddle/phi/common/place.h"
#include "paddle/phi/core/enforce.h"

namespace phi {

// need add dependency to phi_place when use phi::VisitPlace
template <typename Visitor>
typename Visitor::result_type VisitPlace(const phi::Place& place,
const Visitor& visitor) {
switch (place.GetType()) {
case phi::AllocationType::GPU: {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
phi::GPUPlace p(place.GetDeviceId());
return visitor(p);
#else
PADDLE_THROW(phi::errors::Unavailable(
("Paddle is not compiled with CUDA. Cannot visit cuda_pinned")));
return typename Visitor::result_type();
#endif
}
case phi::AllocationType::GPUPINNED: {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
phi::GPUPinnedPlace p;
return visitor(p);
#else
PADDLE_THROW(phi::errors::Unavailable(
("Paddle is not compiled with CUDA. Cannot visit cuda_pinned")));
return typename Visitor::result_type();
#endif
}
case phi::AllocationType::XPU: {
#ifdef PADDLE_WITH_XPU
phi::XPUPlace p(place.GetDeviceId());
return visitor(p);
#else
PADDLE_THROW(phi::errors::Unavailable(
("Paddle is not compiled with XPU. Cannot visit xpu device")));
return typename Visitor::result_type();
#endif
}
case phi::AllocationType::NPU: {
#ifdef PADDLE_WITH_ASCEND_CL
phi::NPUPlace p(place.GetDeviceId());
return visitor(p);
#else
PADDLE_THROW(phi::errors::Unavailable(
("Paddle is not compiled with NPU. Cannot visit npu_pinned")));
return typename Visitor::result_type();
#endif
}
case phi::AllocationType::NPUPINNED: {
#ifdef PADDLE_WITH_ASCEND_CL
phi::NPUPinnedPlace p;
return visitor(p);
#else
PADDLE_THROW(phi::errors::Unavailable(
("Paddle is not compiled with NPU. Cannot visit npu_pinned")));
return typename Visitor::result_type();
#endif
}
case phi::AllocationType::IPU: {
#ifdef PADDLE_WITH_IPU
phi::IPUPlace p(place.GetDeviceId());
return visitor(p);
#else
PADDLE_THROW(phi::errors::Unavailable(
("Paddle is not compiled with IPU. Cannot visit ipu device")));
return typename Visitor::result_type();
#endif
}
case phi::AllocationType::MLU: {
#ifdef PADDLE_WITH_MLU
phi::MLUPlace p(place.GetDeviceId());
return visitor(p);
#else
PADDLE_THROW(phi::errors::Unavailable(
("Paddle is not compiled with MLU. Cannot visit mlu device")));
#endif
}
case phi::AllocationType::CUSTOM: {
#ifdef PADDLE_WITH_CUSTOM_DEVICE
phi::CustomPlace p(place.GetDeviceType(), place.GetDeviceId());
return visitor(p);
#else
PADDLE_THROW(phi::errors::Unavailable(
("Paddle is not compiled with CUSTOM. Cannot visit custom device")));
#endif
}
default: {
phi::CPUPlace p;
return visitor(p);
}
}
}

} // namespace phi
18 changes: 7 additions & 11 deletions paddle/phi/kernels/cpu/repeat_interleave_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -45,29 +45,25 @@ void RepeatInterleaveWithTensorIndexGradKernel(
repeats_tensor.dims()[0],
x_grad->dims()[dim]));

const auto& index_type =
paddle::framework::TransToProtoVarType(repeats_tensor.dtype());
const auto& index_type = repeats_tensor.dtype();

bool index_type_match =
index_type == paddle::framework::proto::VarType::INT32 ||
index_type == paddle::framework::proto::VarType::INT64;
index_type == phi::DataType::INT32 || index_type == phi::DataType::INT64;
PADDLE_ENFORCE_EQ(index_type_match,
true,
phi::errors::InvalidArgument(
"Input(Repeats) holds the wrong type, it holds %s, but "
"desires to be %s or %s",
paddle::framework::DataTypeToString(index_type),
paddle::framework::DataTypeToString(
paddle::framework::proto::VarType::INT32),
paddle::framework::DataTypeToString(
paddle::framework::proto::VarType::INT64)));
phi::DataTypeToString(index_type),
phi::DataTypeToString(phi::DataType::INT32),
phi::DataTypeToString(phi::DataType::INT64)));

phi::DeviceContextPool::Instance().Get(repeats_tensor.place());
if (index_type == paddle::framework::proto::VarType::INT32) {
if (index_type == phi::DataType::INT32) {
phi::funcs::RepeatsTensor2IndexTensor<Context, int>(
ctx, repeats_tensor, &index);
IndexSelectGradInner<Context, T, int>(ctx, out_grad, index, x_grad, dim);
} else if (index_type == paddle::framework::proto::VarType::INT64) {
} else if (index_type == phi::DataType::INT64) {
phi::funcs::RepeatsTensor2IndexTensor<Context, int64_t>(
ctx, repeats_tensor, &index);
IndexSelectGradInner<Context, T, int64_t>(
Expand Down
5 changes: 2 additions & 3 deletions paddle/phi/kernels/cpu/sparse_weight_embedding_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,7 @@ struct EmbeddingCPUSparseFunctor {
int64_t row_width = table_t.value().dims()[1];
const auto* table = table_t.value().template data<T>();
auto* output = dev_ctx_.template Alloc<T>(output_t);
auto input_data_type =
paddle::framework::TransToProtoVarType(table_t.value().dtype());
auto input_data_type = table_t.value().dtype();

for (int64_t i = 0; i < ids_numel; ++i) {
if (padding_idx_ != kNoPadding && ids[i] == padding_idx_) {
Expand All @@ -66,7 +65,7 @@ struct EmbeddingCPUSparseFunctor {
phi::errors::InvalidArgument(
"the input key should be exists. But received %d.", id_index));

if (input_data_type == paddle::framework::proto::VarType::BF16) {
if (input_data_type == phi::DataType::BFLOAT16) {
memcpy(output + i * row_width,
table + id_index * row_width,
row_width * sizeof(T));
Expand Down
7 changes: 3 additions & 4 deletions paddle/phi/kernels/cpu/take_along_axis_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -43,16 +43,15 @@ void TakeAlongAxisGradKernel(const Context& dev_ctx,
phi::funcs::SetConstant<Context, T> functor;
functor(dev_ctx, x_grad, static_cast<T>(0));

const auto& index_type =
paddle::framework::TransToProtoVarType(index.dtype());
if (index_type == paddle::framework::proto::VarType::INT32) {
const auto& index_type = index.dtype();
if (index_type == phi::DataType::INT32) {
phi::funcs::cpu_scatter_add_kernel<T, int32_t>(
*x_grad,
axis,
index,
out_grad,
dev_ctx); // the gradient of gather is scatter
} else if (index_type == paddle::framework::proto::VarType::INT64) {
} else if (index_type == phi::DataType::INT64) {
phi::funcs::cpu_scatter_add_kernel<T, int64_t>(
*x_grad, axis, index, out_grad, dev_ctx);
}
Expand Down
4 changes: 3 additions & 1 deletion paddle/phi/kernels/cpu/unique_consecutive_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/unique_consecutive_kernel.h"
#include <climits>

#include "paddle/phi/kernels/cpu/unique_consecutive_functor.h"
#include "paddle/phi/kernels/unique_consecutive_kernel.h"

#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/errors.h"
Expand Down
2 changes: 2 additions & 0 deletions paddle/phi/kernels/cpu/unique_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.

#include <climits>

#include "paddle/phi/kernels/unique_kernel.h"

#include "paddle/phi/backends/cpu/cpu_context.h"
Expand Down
3 changes: 2 additions & 1 deletion paddle/phi/kernels/funcs/math_function.cc
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/core/utils/visit_place.h"

#ifdef PADDLE_WITH_MKLML
#include "paddle/phi/backends/dynload/mklml.h"
Expand Down Expand Up @@ -236,7 +237,7 @@ void set_constant(const phi::DeviceContext& context,
#endif
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
// tensor->place().apply_visitor(func);
paddle::platform::VisitPlace(tensor->place(), func);
phi::VisitPlace(tensor->place(), func);
#elif defined(PADDLE_WITH_XPU)
func(phi::XPUPlace());
#else
Expand Down
4 changes: 1 addition & 3 deletions paddle/phi/kernels/funcs/math_function.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,10 @@ limitations under the License. */
#include <memory>
#include <vector>

#include "paddle/fluid/framework/operator.h"
#include "paddle/phi/backends/all_context.h"
#include "paddle/phi/common/memory_utils.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/utils/data_type.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"

namespace phi {
namespace funcs {
Expand Down
1 change: 1 addition & 0 deletions paddle/phi/kernels/funcs/math_function_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License. */
#include <vector>

#include "paddle/phi/common/data_type.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/math_function.h"

namespace phi {
Expand Down
1 change: 1 addition & 0 deletions paddle/phi/kernels/funcs/segment_pooling.cu
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License. */
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/funcs/gather.cu.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/segment_pooling.h"
Expand Down
2 changes: 2 additions & 0 deletions paddle/phi/kernels/funcs/unique_functor.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@
// limitations under the License.

#pragma once
#include <set>

#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/utils/data_type.h"
#include "paddle/phi/kernels/funcs/concat_and_split_functor.h"
Expand Down
3 changes: 0 additions & 3 deletions paddle/phi/kernels/gpu/bincount_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -109,9 +109,6 @@ void BincountCUDAInner(const Context& dev_ctx,
<<<GET_BLOCKS(input_numel), PADDLE_CUDA_NUM_THREADS, 0, stream>>>(
input_data, input_numel, has_weights, weights_data, output_data);
} else {
const auto& weights_type =
paddle::framework::TransToProtoVarType(weights->dtype());

if (weights->dtype() == DataType::FLOAT32) {
float* output_data = dev_ctx.template Alloc<float>(output);
phi::funcs::SetConstant<Context, float>()(
Expand Down
4 changes: 1 addition & 3 deletions paddle/phi/kernels/gpu/class_center_sample_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -375,9 +375,7 @@ void ClassCenterSampleKernel(const Context& dev_ctx,
num_classes_per_device_ptr,
num_classes_per_device_ptr,
num_classes_per_device.numel(),
paddle::platform::ToNCCLDataType(
paddle::framework::TransToProtoVarType(
num_classes_per_device.dtype())),
phi::ToNCCLDataType(num_classes_per_device.dtype()),
ncclSum,
comm->comm(),
calcu_stream));
Expand Down
Loading