Skip to content

Commit dcb3da5

Browse files
committed
refine code
1 parent 05239b6 commit dcb3da5

File tree

4 files changed

+56
-117
lines changed

4 files changed

+56
-117
lines changed

paddle/operators/math/sequence_project.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -90,8 +90,8 @@ template <typename Place, typename T>
9090
class SequenceProjectFunctor {
9191
public:
9292
void operator()(const platform::DeviceContext& context,
93-
framework::LoDTensor& in, framework::LoDTensor& padding_data,
94-
framework::LoDTensor& col, bool padding_trainable,
93+
framework::LoDTensor& in, framework::Tensor& padding_data,
94+
framework::Tensor& col, bool padding_trainable,
9595
int context_start, int context_length, int context_stride,
9696
int up_pad, int down_pad, bool gradient, bool input_grad,
9797
bool pad_grad) {

paddle/operators/sequence_conv_op.cc

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -29,10 +29,6 @@ class SequenceConvOp : public framework::OperatorWithKernel {
2929
"Input(Filter) of SequenceConvOp should not be null.");
3030
PADDLE_ENFORCE(ctx->HasOutput("Out"),
3131
"Output(Out) of SequenceConvOp should not be null.");
32-
// PaddingData mast be not empty. Otherwise(EnforceNotMet: enforce numel() >
33-
// 0 failed, 0 <= 0)
34-
PADDLE_ENFORCE(ctx->HasInput("PaddingData"),
35-
"Input(PaddingData) of SequenceConvOp should not be null.");
3632

3733
int context_length = ctx->Attrs().Get<int>("context_length");
3834
bool padding_trainable = ctx->Attrs().Get<bool>("padding_trainable");
@@ -48,6 +44,9 @@ class SequenceConvOp : public framework::OperatorWithKernel {
4844
"number_of_input_features).");
4945

5046
if (padding_trainable) {
47+
PADDLE_ENFORCE(
48+
ctx->HasInput("PaddingData"),
49+
"Input(PaddingData) of SequenceConvOp should not be null.");
5150
framework::DDim padding_dim = ctx->GetInputDim("PaddingData");
5251
int up_pad = std::max(0, -context_start);
5352
int down_pad = std::max(0, context_start + context_length - 1);
@@ -106,11 +105,12 @@ class SequenceConvOpMaker : public framework::OpProtoAndCheckerMaker {
106105
"(A float LoDTensor) the input of SequenceConvOp, a vector of "
107106
"2-D matrix of size (minibatch, number_of_input_features).");
108107
AddInput("PaddingData",
109-
"(A float LoDTensor) the input of SequenceConvOp, a vector of "
108+
"(Tensor) the input of SequenceConvOp, a vector of "
110109
"2-D matrix of size (up_pad + down_pad, "
111-
"number_of_input_features). ");
110+
"number_of_input_features). ")
111+
.AsDispensable();
112112
AddInput("Filter",
113-
"(A float LoDTensor) the input of SequenceConvOp, a vector of "
113+
"(Tensor) the input of SequenceConvOp, a vector of "
114114
"2-D matrix of size (context_length x number_of_input_features).");
115115
AddOutput("Out",
116116
"(A float LoDTensor) the output of SequenceConvOp, a vector "

paddle/operators/sequence_conv_op.h

Lines changed: 13 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ class SequenceConvKernel : public framework::OpKernel<T> {
3636
void Compute(const framework::ExecutionContext& context) const override {
3737
auto* in = context.Input<LoDTensor>("X");
3838
auto* out = context.Output<LoDTensor>("Out");
39-
auto filter = *context.Input<LoDTensor>("Filter");
39+
auto filter = *context.Input<Tensor>("Filter");
4040

4141
out->mutable_data<T>(context.GetPlace());
4242
// out->set_lod(in->lod());
@@ -50,9 +50,9 @@ class SequenceConvKernel : public framework::OpKernel<T> {
5050
PADDLE_ENFORCE_EQ(in->lod().size(), 1UL,
5151
"Only support one level sequence now.");
5252

53-
const LoDTensor* padding_data = nullptr;
53+
const Tensor* padding_data = nullptr;
5454
if (padding_trainable) {
55-
padding_data = context.Input<LoDTensor>("PaddingData");
55+
padding_data = context.Input<Tensor>("PaddingData");
5656
}
5757

5858
int up_pad = std::max(0, -context_start);
@@ -63,7 +63,7 @@ class SequenceConvKernel : public framework::OpKernel<T> {
6363
// use col_shape in the im2col calculation
6464
framework::DDim col_shape = {in->dims()[0],
6565
sequence_width * context_length};
66-
LoDTensor col;
66+
Tensor col;
6767
col.mutable_data<T>(col_shape, context.GetPlace());
6868
// Because if padding_trainable is false, padding data should be zeros.
6969
auto temp = framework::EigenVector<T>::Flatten(col);
@@ -73,7 +73,7 @@ class SequenceConvKernel : public framework::OpKernel<T> {
7373
paddle::operators::math::SequenceProjectFunctor<Place, T>
7474
seq_project_functor;
7575
LoDTensor* input = const_cast<LoDTensor*>(in);
76-
LoDTensor* pad_data = const_cast<LoDTensor*>(padding_data);
76+
Tensor* pad_data = const_cast<Tensor*>(padding_data);
7777

7878
seq_project_functor(context.device_context(), *input, *pad_data, col,
7979
padding_trainable, context_start, context_length,
@@ -91,12 +91,11 @@ class SequenceConvGradKernel : public framework::OpKernel<T> {
9191
void Compute(const framework::ExecutionContext& context) const override {
9292
auto* out_g = context.Input<LoDTensor>(framework::GradVarName("Out"));
9393
auto* in_g = context.Output<LoDTensor>(framework::GradVarName("X"));
94-
auto* filter_g =
95-
context.Output<LoDTensor>(framework::GradVarName("Filter"));
94+
auto* filter_g = context.Output<Tensor>(framework::GradVarName("Filter"));
9695
auto* padding_data_g =
97-
context.Output<LoDTensor>(framework::GradVarName("PaddingData"));
96+
context.Output<Tensor>(framework::GradVarName("PaddingData"));
9897
auto* in = context.Input<LoDTensor>("X");
99-
auto* filter = context.Input<LoDTensor>("Filter");
98+
auto* filter = context.Input<Tensor>("Filter");
10099

101100
int context_start = context.Attr<int>("context_start");
102101
int context_length = context.Attr<int>("context_length");
@@ -115,7 +114,7 @@ class SequenceConvGradKernel : public framework::OpKernel<T> {
115114
// use col_shape in the im2col calculation
116115
framework::DDim col_shape = {in->dims()[0],
117116
sequence_width * context_length};
118-
LoDTensor col;
117+
Tensor col;
119118

120119
if (in_g || filter_g || (padding_trainable && padding_data_g)) {
121120
col.mutable_data<T>(col_shape, context.GetPlace());
@@ -161,17 +160,17 @@ class SequenceConvGradKernel : public framework::OpKernel<T> {
161160
functor(context.device_context(), filter_g, 0);
162161

163162
Tensor filter_grad_ = *filter_g;
164-
Tensor out_grad_ = *out_g;
163+
LoDTensor out_grad_ = *out_g;
165164

166-
const LoDTensor* padding_data = nullptr;
165+
const Tensor* padding_data = nullptr;
167166
if (padding_trainable) {
168-
padding_data = context.Input<LoDTensor>("PaddingData");
167+
padding_data = context.Input<Tensor>("PaddingData");
169168
}
170169

171170
sequence_width = static_cast<int>(in->dims()[1]);
172171

173172
LoDTensor* input = const_cast<LoDTensor*>(in);
174-
LoDTensor* pad_data = const_cast<LoDTensor*>(padding_data);
173+
Tensor* pad_data = const_cast<Tensor*>(padding_data);
175174

176175
seq_project_functor(context.device_context(), *input, *pad_data, col,
177176
padding_trainable, context_start, context_length,

python/paddle/v2/framework/tests/test_seq_conv.py

Lines changed: 34 additions & 94 deletions
Original file line numberDiff line numberDiff line change
@@ -20,24 +20,29 @@ def setUp(self):
2020
# one level, batch size
2121
x = np.random.uniform(0.1, 1, [self.input_size[0],
2222
self.input_size[1]]).astype('float32')
23-
24-
self.begin_pad = np.max([0, -self.context_start])
25-
self.end_pad = np.max([0, self.context_start + self.context_length - 1])
26-
self.total_pad = self.begin_pad + self.end_pad
27-
if self.total_pad == 0:
28-
self.total_pad = 1
29-
30-
# PaddingData mast be not empty.
31-
# Otherwise(EnforceNotMet: enforce numel() > 0 failed, 0 <= 0)
32-
padding_data = np.random.uniform(
33-
0.1, 1, [self.total_pad, self.input_size[1]]).astype('float32')
3423
w = np.random.uniform(
3524
0.1, 1, [self.context_length, self.input_size[1]]).astype('float32')
25+
26+
begin_pad = np.max([0, -self.context_start])
27+
end_pad = np.max([0, self.context_start + self.context_length - 1])
28+
total_pad = begin_pad + end_pad
29+
padding_data = np.random.uniform(
30+
0.1, 1, [total_pad, self.input_size[1]]).astype('float32')
31+
self.pad_data = padding_data
3632
self.inputs = {
3733
'X': (x, self.lod),
38-
'PaddingData': (padding_data, [[0, self.total_pad]]),
39-
'Filter': (w, [[0, self.context_length]])
34+
'Filter': w,
4035
}
36+
self.inputs_val = ['X', 'Filter']
37+
self.inputs_val_no_x = ['Filter']
38+
self.inputs_val_no_f = ['X']
39+
40+
if total_pad != 0:
41+
self.inputs['PaddingData'] = padding_data
42+
self.inputs_val = ['X', 'PaddingData', 'Filter']
43+
self.inputs_val_no_x = ['PaddingData', 'Filter']
44+
self.inputs_val_no_f = ['PaddingData', 'X']
45+
4146
self.attrs = {
4247
'context_start': self.context_start,
4348
'context_length': self.context_length,
@@ -51,7 +56,7 @@ def setUp(self):
5156
def compute(self):
5257
x, lod = self.inputs['X']
5358
filter = self.inputs['Filter']
54-
pading_data, _ = self.inputs['PaddingData']
59+
pading_data = self.pad_data
5560
out = np.zeros((self.input_size[0], self.context_length *
5661
self.input_size[1])).astype('float32')
5762
lod = lod[0]
@@ -90,12 +95,12 @@ def compute(self):
9095
out[out_begin:out_end, j * self.input_size[1]:(j + 1) *
9196
self.input_size[1]] += in_sub
9297

93-
filter_dim = filter[0].shape
98+
filter_dim = filter.shape
9499
output_dim = self.outputs['Out'].shape
95-
filter[0].shape = filter_dim[0] * filter_dim[1]
100+
filter.shape = filter_dim[0] * filter_dim[1]
96101
self.outputs['Out'].shape = (output_dim[0], )
97-
np.dot(out, filter[0], out=self.outputs['Out'])
98-
filter[0].shape = filter_dim
102+
np.dot(out, filter, out=self.outputs['Out'])
103+
filter.shape = filter_dim
99104
self.outputs['Out'].shape = output_dim
100105

101106
def test_check_output(self):
@@ -104,16 +109,14 @@ def test_check_output(self):
104109
def test_check_grad(self):
105110
if self.padding_trainable:
106111
self.check_grad(
107-
set(['X', 'PaddingData', 'Filter']),
108-
'Out',
109-
max_relative_error=0.05)
112+
set(self.inputs_val), 'Out', max_relative_error=0.05)
110113

111114
def test_check_grad_input(self):
112115
self.check_grad(
113116
['X'],
114117
'Out',
115118
max_relative_error=0.05,
116-
no_grad_set=set(['PaddingData', 'Filter']))
119+
no_grad_set=set(self.inputs_val_no_x))
117120

118121
def test_check_grad_padding_data(self):
119122
if self.padding_trainable:
@@ -128,27 +131,28 @@ def test_check_grad_Filter(self):
128131
['Filter'],
129132
'Out',
130133
max_relative_error=0.05,
131-
no_grad_set=set(['X', 'PaddingData']))
134+
no_grad_set=set(self.inputs_val_no_f))
132135

133136
def test_check_grad_input_filter(self):
134-
self.check_grad(
135-
['X', 'Filter'],
136-
'Out',
137-
max_relative_error=0.05,
138-
no_grad_set=set(['PaddingData']))
137+
if self.padding_trainable:
138+
self.check_grad(
139+
['X', 'Filter'],
140+
'Out',
141+
max_relative_error=0.05,
142+
no_grad_set=set(['PaddingData']))
139143

140144
def test_check_grad_padding_input(self):
141145
if self.padding_trainable:
142146
self.check_grad(
143-
['X', 'PaddingData'],
147+
self.inputs_val_no_f,
144148
'Out',
145149
max_relative_error=0.05,
146150
no_grad_set=set(['Filter']))
147151

148152
def test_check_grad_padding_filter(self):
149153
if self.padding_trainable:
150154
self.check_grad(
151-
['PaddingData', 'Filter'],
155+
self.inputs_val_no_x,
152156
'Out',
153157
max_relative_error=0.05,
154158
no_grad_set=set(['X']))
@@ -191,69 +195,5 @@ def init_test_case(self):
191195
[self.input_size[0]]]
192196

193197

194-
'''
195-
class TestSeqProjectCases(TestSeqProject):
196-
def setUp(self):
197-
self.init_test_case()
198-
self.op_type = 'sequence_project'
199-
200-
num = 0
201-
for context_start in [-5, -3, -1, 0, 3]:
202-
for context_length in [1, 2, 5, 7]:
203-
for batch_size in [1, 2, 5, 7]:
204-
for padding_trainable in [False, True]:
205-
206-
if context_length == 1 and context_start == 0 and padding_trainable:
207-
continue
208-
209-
self.context_start = context_start
210-
self.context_length = context_length
211-
self.padding_trainable = padding_trainable
212-
self.input_size = [batch_size, 23]
213-
x = np.random.uniform(0.1, 1,
214-
self.input_size).astype('float32')
215-
self.lod = [[0, self.input_size[0]]]
216-
if self.input_size[0] > 2:
217-
idx = range(self.input_size[0])
218-
del idx[0]
219-
self.lod = [
220-
[0] + np.sort(random.sample(idx, 2)).tolist() +
221-
[self.input_size[0]]
222-
]
223-
224-
self.begin_pad = np.max([0, -self.context_start])
225-
self.end_pad = np.max([0, self.context_start + self.context_length - 1])
226-
self.total_pad = self.begin_pad + self.end_pad
227-
if self.total_pad == 0:
228-
self.total_pad = 1
229-
# PaddingData mast be not empty. Otherwise(EnforceNotMet: enforce numel() > 0 failed, 0 <= 0)
230-
padding_data = np.random.uniform(
231-
0.1, 1, [self.total_pad, self.input_size[1]]).astype('float32')
232-
233-
self.inputs = {
234-
'X': (x, self.lod),
235-
'PaddingData': (padding_data, [[0, self.total_pad]])
236-
}
237-
self.attrs = {
238-
'context_start': self.context_start,
239-
'context_length': self.context_length,
240-
'padding_trainable': self.padding_trainable,
241-
'context_stride': self.context_stride
242-
}
243-
out = np.zeros((self.input_size[0], self.input_size[1] *
244-
self.context_length)).astype('float32')
245-
self.outputs = {'Out': out}
246-
print num
247-
print self.attrs
248-
print batch_size
249-
print padding_trainable
250-
print "$$$$$$$$$$$$$"
251-
252-
self.compute()
253-
self.test_check_output()
254-
255-
num += 1
256-
'''
257-
258198
if __name__ == '__main__':
259199
unittest.main()

0 commit comments

Comments
 (0)