@@ -20,24 +20,29 @@ def setUp(self):
2020 # one level, batch size 
2121 x  =  np .random .uniform (0.1 , 1 , [self .input_size [0 ],
2222 self .input_size [1 ]]).astype ('float32' )
23- 
24-  self .begin_pad  =  np .max ([0 , - self .context_start ])
25-  self .end_pad  =  np .max ([0 , self .context_start  +  self .context_length  -  1 ])
26-  self .total_pad  =  self .begin_pad  +  self .end_pad 
27-  if  self .total_pad  ==  0 :
28-  self .total_pad  =  1 
29- 
30-  # PaddingData mast be not empty. 
31-  # Otherwise(EnforceNotMet: enforce numel() > 0 failed, 0 <= 0) 
32-  padding_data  =  np .random .uniform (
33-  0.1 , 1 , [self .total_pad , self .input_size [1 ]]).astype ('float32' )
3423 w  =  np .random .uniform (
3524 0.1 , 1 , [self .context_length , self .input_size [1 ]]).astype ('float32' )
25+ 
26+  begin_pad  =  np .max ([0 , - self .context_start ])
27+  end_pad  =  np .max ([0 , self .context_start  +  self .context_length  -  1 ])
28+  total_pad  =  begin_pad  +  end_pad 
29+  padding_data  =  np .random .uniform (
30+  0.1 , 1 , [total_pad , self .input_size [1 ]]).astype ('float32' )
31+  self .pad_data  =  padding_data 
3632 self .inputs  =  {
3733 'X' : (x , self .lod ),
38-  'PaddingData' : (padding_data , [[0 , self .total_pad ]]),
39-  'Filter' : (w , [[0 , self .context_length ]])
34+  'Filter' : w ,
4035 }
36+  self .inputs_val  =  ['X' , 'Filter' ]
37+  self .inputs_val_no_x  =  ['Filter' ]
38+  self .inputs_val_no_f  =  ['X' ]
39+ 
40+  if  total_pad  !=  0 :
41+  self .inputs ['PaddingData' ] =  padding_data 
42+  self .inputs_val  =  ['X' , 'PaddingData' , 'Filter' ]
43+  self .inputs_val_no_x  =  ['PaddingData' , 'Filter' ]
44+  self .inputs_val_no_f  =  ['PaddingData' , 'X' ]
45+ 
4146 self .attrs  =  {
4247 'context_start' : self .context_start ,
4348 'context_length' : self .context_length ,
@@ -51,7 +56,7 @@ def setUp(self):
5156 def  compute (self ):
5257 x , lod  =  self .inputs ['X' ]
5358 filter  =  self .inputs ['Filter' ]
54-  pading_data ,  _   =  self .inputs [ 'PaddingData' ] 
59+  pading_data   =  self .pad_data 
5560 out  =  np .zeros ((self .input_size [0 ], self .context_length  * 
5661 self .input_size [1 ])).astype ('float32' )
5762 lod  =  lod [0 ]
@@ -90,12 +95,12 @@ def compute(self):
9095 out [out_begin :out_end , j  *  self .input_size [1 ]:(j  +  1 ) * 
9196 self .input_size [1 ]] +=  in_sub 
9297
93-  filter_dim  =  filter [ 0 ] .shape 
98+  filter_dim  =  filter .shape 
9499 output_dim  =  self .outputs ['Out' ].shape 
95-  filter [ 0 ] .shape  =  filter_dim [0 ] *  filter_dim [1 ]
100+  filter .shape  =  filter_dim [0 ] *  filter_dim [1 ]
96101 self .outputs ['Out' ].shape  =  (output_dim [0 ], )
97-  np .dot (out , filter [ 0 ] , out = self .outputs ['Out' ])
98-  filter [ 0 ] .shape  =  filter_dim 
102+  np .dot (out , filter , out = self .outputs ['Out' ])
103+  filter .shape  =  filter_dim 
99104 self .outputs ['Out' ].shape  =  output_dim 
100105
101106 def  test_check_output (self ):
@@ -104,16 +109,14 @@ def test_check_output(self):
104109 def  test_check_grad (self ):
105110 if  self .padding_trainable :
106111 self .check_grad (
107-  set (['X' , 'PaddingData' , 'Filter' ]),
108-  'Out' ,
109-  max_relative_error = 0.05 )
112+  set (self .inputs_val ), 'Out' , max_relative_error = 0.05 )
110113
111114 def  test_check_grad_input (self ):
112115 self .check_grad (
113116 ['X' ],
114117 'Out' ,
115118 max_relative_error = 0.05 ,
116-  no_grad_set = set ([ 'PaddingData' ,  'Filter' ] ))
119+  no_grad_set = set (self . inputs_val_no_x ))
117120
118121 def  test_check_grad_padding_data (self ):
119122 if  self .padding_trainable :
@@ -128,27 +131,28 @@ def test_check_grad_Filter(self):
128131 ['Filter' ],
129132 'Out' ,
130133 max_relative_error = 0.05 ,
131-  no_grad_set = set ([ 'X' ,  'PaddingData' ] ))
134+  no_grad_set = set (self . inputs_val_no_f ))
132135
133136 def  test_check_grad_input_filter (self ):
134-  self .check_grad (
135-  ['X' , 'Filter' ],
136-  'Out' ,
137-  max_relative_error = 0.05 ,
138-  no_grad_set = set (['PaddingData' ]))
137+  if  self .padding_trainable :
138+  self .check_grad (
139+  ['X' , 'Filter' ],
140+  'Out' ,
141+  max_relative_error = 0.05 ,
142+  no_grad_set = set (['PaddingData' ]))
139143
140144 def  test_check_grad_padding_input (self ):
141145 if  self .padding_trainable :
142146 self .check_grad (
143-  [ 'X' ,  'PaddingData' ] ,
147+  self . inputs_val_no_f ,
144148 'Out' ,
145149 max_relative_error = 0.05 ,
146150 no_grad_set = set (['Filter' ]))
147151
148152 def  test_check_grad_padding_filter (self ):
149153 if  self .padding_trainable :
150154 self .check_grad (
151-  [ 'PaddingData' ,  'Filter' ] ,
155+  self . inputs_val_no_x ,
152156 'Out' ,
153157 max_relative_error = 0.05 ,
154158 no_grad_set = set (['X' ]))
@@ -191,69 +195,5 @@ def init_test_case(self):
191195 [self .input_size [0 ]]]
192196
193197
194- ''' 
195- class TestSeqProjectCases(TestSeqProject): 
196-  def setUp(self): 
197-  self.init_test_case() 
198-  self.op_type = 'sequence_project' 
199- 
200-  num = 0 
201-  for context_start in [-5, -3, -1, 0, 3]: 
202-  for context_length in [1, 2, 5, 7]: 
203-  for batch_size in [1, 2, 5, 7]: 
204-  for padding_trainable in [False, True]: 
205- 
206-  if context_length == 1 and context_start == 0 and padding_trainable: 
207-  continue 
208- 
209-  self.context_start = context_start 
210-  self.context_length = context_length 
211-  self.padding_trainable = padding_trainable 
212-  self.input_size = [batch_size, 23] 
213-  x = np.random.uniform(0.1, 1, 
214-  self.input_size).astype('float32') 
215-  self.lod = [[0, self.input_size[0]]] 
216-  if self.input_size[0] > 2: 
217-  idx = range(self.input_size[0]) 
218-  del idx[0] 
219-  self.lod = [ 
220-  [0] + np.sort(random.sample(idx, 2)).tolist() + 
221-  [self.input_size[0]] 
222-  ] 
223- 
224-  self.begin_pad = np.max([0, -self.context_start]) 
225-  self.end_pad = np.max([0, self.context_start + self.context_length - 1]) 
226-  self.total_pad = self.begin_pad + self.end_pad 
227-  if self.total_pad == 0: 
228-  self.total_pad = 1 
229-  # PaddingData mast be not empty. Otherwise(EnforceNotMet: enforce numel() > 0 failed, 0 <= 0) 
230-  padding_data = np.random.uniform( 
231-  0.1, 1, [self.total_pad, self.input_size[1]]).astype('float32') 
232- 
233-  self.inputs = { 
234-  'X': (x, self.lod), 
235-  'PaddingData': (padding_data, [[0, self.total_pad]]) 
236-  } 
237-  self.attrs = { 
238-  'context_start': self.context_start, 
239-  'context_length': self.context_length, 
240-  'padding_trainable': self.padding_trainable, 
241-  'context_stride': self.context_stride 
242-  } 
243-  out = np.zeros((self.input_size[0], self.input_size[1] * 
244-  self.context_length)).astype('float32') 
245-  self.outputs = {'Out': out} 
246-  print num 
247-  print self.attrs 
248-  print batch_size 
249-  print padding_trainable 
250-  print "$$$$$$$$$$$$$" 
251- 
252-  self.compute() 
253-  self.test_check_output() 
254- 
255-  num += 1 
256- ''' 
257- 
258198if  __name__  ==  '__main__' :
259199 unittest .main ()
0 commit comments