Skip to content
5 changes: 0 additions & 5 deletions paddle/fluid/operators/math/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ math_library(sampler DEPS generator)

# math_library(math_function DEPS blas dense_tensor tensor)

math_library(sequence_padding DEPS lod_tensor)
math_library(sequence_pooling DEPS math_function jit_kernel_helper)
if(WITH_ASCEND_CL)
math_library(beam_search DEPS math_function beam_search_npu)
Expand All @@ -55,10 +54,6 @@ cc_test(
vol2col_test
SRCS vol2col_test.cc
DEPS vol2col)
cc_test(
sequence_padding_test
SRCS sequence_padding_test.cc
DEPS sequence_padding)
cc_test(
sequence_pooling_test
SRCS sequence_pooling_test.cc
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/sequence_ops/sequence_pad_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ class SequencePadOp : public framework::OperatorWithKernel {
static_cast<int64_t>(x_lod_0.back())));

int seq_num = x_lod_0.size() - 1;
int max_seq_len = math::MaximumSequenceLength(x_lod_0);
int max_seq_len = phi::funcs::MaximumSequenceLength(x_lod_0);
if (padded_length == -1) {
padded_length = max_seq_len;
}
Expand Down
10 changes: 5 additions & 5 deletions paddle/fluid/operators/sequence_ops/sequence_pad_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@ limitations under the License. */

#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/operators/math/sequence_padding.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/sequence_padding.h"

namespace paddle {
namespace operators {
Expand All @@ -45,15 +45,15 @@ class SequencePadOpKernel : public framework::OpKernel<T> {

int padded_length = ctx.Attr<int>("padded_length");

math::PaddingLoDTensorFunctor<DeviceContext, T>()(
phi::funcs::PaddingLoDTensorFunctor<DeviceContext, T>()(
ctx.template device_context<DeviceContext>(),
*x,
out,
*pad_value,
padded_length,
0,
false,
math::kBatchLengthWidth);
phi::funcs::kBatchLengthWidth);

phi::DenseTensor seq_len;
seq_len.Resize(len_t->dims());
Expand All @@ -80,14 +80,14 @@ class SequencePadGradOpKernel : public framework::OpKernel<T> {

int padded_length = ctx.Attr<int>("padded_length");

math::UnpaddingLoDTensorFunctor<DeviceContext, T>()(
phi::funcs::UnpaddingLoDTensorFunctor<DeviceContext, T>()(
ctx.template device_context<DeviceContext>(),
*d_out,
d_x,
padded_length,
0,
false,
math::kBatchLengthWidth);
phi::funcs::kBatchLengthWidth);
}
}
};
Expand Down
16 changes: 11 additions & 5 deletions paddle/fluid/operators/sequence_ops/sequence_unpad_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@ limitations under the License. */

#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/operators/math/sequence_padding.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/sequence_padding.h"

namespace paddle {
namespace operators {
Expand Down Expand Up @@ -70,8 +70,14 @@ class SequenceUnpadOpKernel : public framework::OpKernel<T> {
out_t->mutable_data<T>(ctx.GetPlace());

int64_t padded_length = x_t->dims()[1];
math::UnpaddingLoDTensorFunctor<DeviceContext, T>()(
dev_ctx, *x_t, out_t, padded_length, 0, false, math::kBatchLengthWidth);
phi::funcs::UnpaddingLoDTensorFunctor<DeviceContext, T>()(
dev_ctx,
*x_t,
out_t,
padded_length,
0,
false,
phi::funcs::kBatchLengthWidth);
}
};

Expand All @@ -93,15 +99,15 @@ class SequenceUnpadGradOpKernel : public framework::OpKernel<T> {
auto& dev_ctx = ctx.template device_context<DeviceContext>();
set_zero(dev_ctx, &zero_pads, static_cast<T>(0));

math::PaddingLoDTensorFunctor<DeviceContext, T>()(
phi::funcs::PaddingLoDTensorFunctor<DeviceContext, T>()(
ctx.template device_context<DeviceContext>(),
*d_out,
d_x,
zero_pads,
padded_length,
0,
false,
math::kBatchLengthWidth);
phi::funcs::kBatchLengthWidth);
}
}
};
Expand Down
5 changes: 2 additions & 3 deletions paddle/fluid/operators/sequence_ops/sequence_unpad_op_xpu.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,7 @@ limitations under the License. */
#include "paddle/fluid/operators/sequence_ops/sequence_unpad_op.h"

namespace ops = paddle::operators;
REGISTER_OP_XPU_KERNEL(
sequence_unpad,
ops::SequenceUnpadOpKernel<paddle::platform::XPUDeviceContext, float>);
REGISTER_OP_XPU_KERNEL(sequence_unpad,
ops::SequenceUnpadOpKernel<phi::XPUContext, float>);

#endif
1 change: 1 addition & 0 deletions paddle/phi/kernels/funcs/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ math_library(softmax DEPS math_function)
math_library(maxouting)
math_library(matrix_bit_code)
math_library(sequence_scale)
math_library(sequence_padding DEPS lod_utils)

cc_library(
phi_data_layout_transform
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
/* Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Expand All @@ -12,18 +12,16 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/operators/math/sequence_padding.h"
#include "paddle/fluid/platform/device/device_wrapper.h"
#include "paddle/phi/kernels/funcs/sequence_padding.h"

#include "paddle/phi/backends/cpu/cpu_context.h"

namespace phi {
class DenseTensor;
} // namespace phi
#ifdef PADDLE_WITH_XPU
#include "paddle/phi/backends/xpu/enforce_xpu.h"
#endif

namespace paddle {
namespace operators {
namespace math {
namespace phi {
namespace funcs {

template <typename T>
void CopyValidData(phi::DenseTensor* dst_tensor,
Expand All @@ -46,7 +44,7 @@ void CopyValidData(phi::DenseTensor* dst_tensor,
PADDLE_ENFORCE_GE(
pad_seq_len,
valid_seq_len,
platform::errors::InvalidArgument(
phi::errors::InvalidArgument(
"The padded sequence length can not "
"be less than its original length. Expected %ld >= %ld, but got "
"%ld < %ld. Please check input value.",
Expand Down Expand Up @@ -107,7 +105,7 @@ class PaddingLoDTensorFunctor<phi::CPUContext, T> {
bool norm_by_times = false,
const PadLayout layout = kBatchLengthWidth) {
auto seq_lod = seq_tensor.lod();
const auto seq_offsets = framework::ToAbsOffset(seq_lod)[lod_level];
const auto seq_offsets = phi::ToAbsOffset(seq_lod)[lod_level];
const auto& seq_tensor_dims = seq_tensor.dims();
const auto& pad_tensor_dims = pad_tensor->dims();
if (pad_seq_len == -1) {
Expand All @@ -125,7 +123,7 @@ class PaddingLoDTensorFunctor<phi::CPUContext, T> {
PADDLE_ENFORCE_EQ(
pad_value.numel() == 1 || pad_value.numel() == step_width,
true,
platform::errors::InvalidArgument(
phi::errors::InvalidArgument(
"The numel of 'pad_value' can only be 1 or be equal to the "
"'step_width', but got %ld != 1 and %ld. Please check the input "
"value.",
Expand Down Expand Up @@ -165,7 +163,7 @@ class UnpaddingLoDTensorFunctor<phi::CPUContext, T> {
int lod_level = 0,
bool norm_by_times = false,
const PadLayout layout = kBatchLengthWidth) {
auto seq_offsets = framework::ToAbsOffset(seq_tensor->lod())[lod_level];
auto seq_offsets = phi::ToAbsOffset(seq_tensor->lod())[lod_level];
const auto& seq_tensor_dims = seq_tensor->dims();
const auto& pad_tensor_dims = pad_tensor.dims();
if (pad_seq_len == -1) {
Expand Down Expand Up @@ -193,16 +191,16 @@ class UnpaddingLoDTensorFunctor<phi::CPUContext, T> {

#ifdef PADDLE_WITH_XPU
template <typename T>
class UnpaddingLoDTensorFunctor<platform::XPUDeviceContext, T> {
class UnpaddingLoDTensorFunctor<phi::XPUContext, T> {
public:
void operator()(const platform::XPUDeviceContext& context,
void operator()(const phi::XPUContext& context,
const phi::DenseTensor& pad_tensor,
phi::DenseTensor* seq_tensor,
int pad_seq_len = -1,
int lod_level = 0,
bool norm_by_times = false,
const PadLayout layout = kBatchLengthWidth) {
auto seq_offsets = framework::ToAbsOffset(seq_tensor->lod())[lod_level];
auto seq_offsets = phi::ToAbsOffset(seq_tensor->lod())[lod_level];
const auto& seq_tensor_dims = seq_tensor->dims();
const auto& pad_tensor_dims = pad_tensor.dims();
if (pad_seq_len == -1) {
Expand Down Expand Up @@ -246,9 +244,8 @@ template class UnpaddingLoDTensorFunctor<phi::CPUContext, float>;
template class UnpaddingLoDTensorFunctor<phi::CPUContext, double>;

#ifdef PADDLE_WITH_XPU
template class UnpaddingLoDTensorFunctor<platform::XPUDeviceContext, float>;
template class UnpaddingLoDTensorFunctor<phi::XPUContext, float>;
#endif

} // namespace math
} // namespace operators
} // namespace paddle
} // namespace funcs
} // namespace phi
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
/* Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Expand All @@ -14,12 +14,11 @@ limitations under the License. */

#include <algorithm>

#include "paddle/fluid/operators/math/sequence_padding.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/kernels/funcs/sequence_padding.h"

namespace paddle {
namespace operators {
namespace math {
namespace phi {
namespace funcs {

template <typename T, CopyType Type>
__global__ void SequencePaddingKernel(T* dst,
Expand Down Expand Up @@ -69,7 +68,7 @@ class PaddingLoDTensorFunctor<phi::GPUContext, T> {
bool norm_by_times = false,
const PadLayout layout = kBatchLengthWidth) {
auto seq_lod = seq_tensor.lod();
auto seq_offsets = framework::ToAbsOffset(seq_lod)[lod_level];
auto seq_offsets = phi::ToAbsOffset(seq_lod)[lod_level];
const auto& seq_tensor_dims = seq_tensor.dims();
const auto& pad_tensor_dims = pad_tensor->dims();
int max_seq_len = MaximumSequenceLength(seq_offsets);
Expand All @@ -79,7 +78,7 @@ class PaddingLoDTensorFunctor<phi::GPUContext, T> {
PADDLE_ENFORCE_GE(
pad_seq_len,
max_seq_len,
platform::errors::InvalidArgument(
phi::errors::InvalidArgument(
"The pad_seq_len must be equal to or greater than the "
"original max sequence length. Expected %ld >= %ld, but got %ld < "
"%ld. Please check the input value.",
Expand All @@ -99,7 +98,7 @@ class PaddingLoDTensorFunctor<phi::GPUContext, T> {
PADDLE_ENFORCE_EQ(
pad_value.numel() == 1 || pad_value.numel() == step_width,
true,
platform::errors::InvalidArgument(
phi::errors::InvalidArgument(
"The numel of 'pad_value' can only be 1 or be equal to "
"the 'step_width', but got %ld != 1 and %ld. Please check the "
"input value.",
Expand Down Expand Up @@ -149,7 +148,7 @@ class UnpaddingLoDTensorFunctor<phi::GPUContext, T> {
int lod_level = 0,
bool norm_by_times = false,
const PadLayout layout = kBatchLengthWidth) {
auto seq_offsets = framework::ToAbsOffset(seq_tensor->lod())[lod_level];
auto seq_offsets = phi::ToAbsOffset(seq_tensor->lod())[lod_level];
const auto& seq_tensor_dims = seq_tensor->dims();
const auto& pad_tensor_dims = pad_tensor.dims();
int max_seq_len = MaximumSequenceLength(seq_offsets);
Expand Down Expand Up @@ -216,6 +215,5 @@ template class UnpaddingLoDTensorFunctor<phi::GPUContext, int64_t>;
template class UnpaddingLoDTensorFunctor<phi::GPUContext, float>;
template class UnpaddingLoDTensorFunctor<phi::GPUContext, double>;

} // namespace math
} // namespace operators
} // namespace paddle
} // namespace funcs
} // namespace phi
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
/* Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Expand All @@ -17,12 +17,13 @@ limitations under the License. */
#include <algorithm>
#include <vector>

#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/lod_utils.h"
#include "paddle/phi/core/mixed_vector.h"

namespace paddle {
namespace operators {
namespace math {
namespace phi {
namespace funcs {

enum PadLayout { kBatchLengthWidth = 0, kLengthBatchWidth };

Expand Down Expand Up @@ -130,6 +131,5 @@ class UnpaddingLoDTensorFunctor {
const PadLayout layout = kBatchLengthWidth);
};

} // namespace math
} // namespace operators
} // namespace paddle
} // namespace funcs
} // namespace phi
6 changes: 3 additions & 3 deletions paddle/phi/kernels/impl/warpctc_grad_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,12 @@

#include <vector>

#include "paddle/fluid/operators/math/sequence_padding.h"
#include "paddle/phi/backends/dynload/warpctc.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/sequence_padding.h"
#include "paddle/phi/kernels/funcs/sequence_scale.h"
#include "paddle/utils/optional.h"

Expand Down Expand Up @@ -69,14 +69,14 @@ void WarpctcGradKernel(const Context& dev_ctx,
logits_grad_e.device(*place) = logits_g;
}
} else {
paddle::operators::math::UnpaddingLoDTensorFunctor<Context, T>()(
phi::funcs::UnpaddingLoDTensorFunctor<Context, T>()(
dev_ctx,
warpctcgrad,
logits_grad,
-1,
0,
norm_by_times,
paddle::operators::math::kLengthBatchWidth);
phi::funcs::kLengthBatchWidth);

const T* loss_grad_data = loss_grad.data<T>();
phi::funcs::ScaleLoDTensorFunctor<Context, T>()(
Expand Down
Loading