@@ -147,28 +147,6 @@ def decoder_decode(context, is_sparse):
147147 return translation_ids , translation_scores
148148
149149
150- def set_init_lod (data , lod , place ):
151- res = fluid .LoDTensor ()
152- res .set (data , place )
153- res .set_lod (lod )
154- return res
155-
156-
157- def to_lodtensor (data , place ):
158- seq_lens = [len (seq ) for seq in data ]
159- cur_len = 0
160- lod = [cur_len ]
161- for l in seq_lens :
162- cur_len += l
163- lod .append (cur_len )
164- flattened_data = np .concatenate (data , axis = 0 ).astype ("int64" )
165- flattened_data = flattened_data .reshape ([len (flattened_data ), 1 ])
166- res = fluid .LoDTensor ()
167- res .set (flattened_data , place )
168- res .set_lod ([lod ])
169- return res
170-
171-
172150def train_main (use_cuda , is_sparse , is_local = True ):
173151 if use_cuda and not fluid .core .is_compiled_with_cuda ():
174152 return
@@ -192,23 +170,25 @@ def train_main(use_cuda, is_sparse, is_local=True):
192170 paddle .dataset .wmt14 .train (dict_size ), buf_size = 1000 ),
193171 batch_size = batch_size )
194172
173+ feed_order = [
174+ 'src_word_id' , 'target_language_word' , 'target_language_next_word'
175+ ]
176+
195177 exe = Executor (place )
196178
197179 def train_loop (main_program ):
198180 exe .run (framework .default_startup_program ())
199181
182+ feed_list = [
183+ main_program .global_block ().var (var_name ) for var_name in feed_order
184+ ]
185+ feeder = fluid .DataFeeder (feed_list , place )
186+
200187 batch_id = 0
201188 for pass_id in xrange (1 ):
202189 for data in train_data ():
203- word_data = to_lodtensor (map (lambda x : x [0 ], data ), place )
204- trg_word = to_lodtensor (map (lambda x : x [1 ], data ), place )
205- trg_word_next = to_lodtensor (map (lambda x : x [2 ], data ), place )
206190 outs = exe .run (main_program ,
207- feed = {
208- 'src_word_id' : word_data ,
209- 'target_language_word' : trg_word ,
210- 'target_language_next_word' : trg_word_next
211- },
191+ feed = feeder .feed (data ),
212192 fetch_list = [avg_cost ])
213193 avg_cost_val = np .array (outs [0 ])
214194 print ('pass_id=' + str (pass_id ) + ' batch=' + str (batch_id ) +
@@ -258,26 +238,32 @@ def decode_main(use_cuda, is_sparse):
258238 [1. for _ in range (batch_size )], dtype = 'float32' )
259239 init_ids_data = init_ids_data .reshape ((batch_size , 1 ))
260240 init_scores_data = init_scores_data .reshape ((batch_size , 1 ))
261- init_lod = [i for i in range ( batch_size )] + [ batch_size ]
241+ init_lod = [1 ] * batch_size
262242 init_lod = [init_lod , init_lod ]
263243
244+ init_ids = fluid .create_lod_tensor (init_ids_data , init_lod , place )
245+ init_scores = fluid .create_lod_tensor (init_scores_data , init_lod , place )
246+
264247 train_data = paddle .batch (
265248 paddle .reader .shuffle (
266249 paddle .dataset .wmt14 .train (dict_size ), buf_size = 1000 ),
267250 batch_size = batch_size )
268- for _ , data in enumerate (train_data ()):
269- init_ids = set_init_lod (init_ids_data , init_lod , place )
270- init_scores = set_init_lod (init_scores_data , init_lod , place )
271251
272- src_word_data = to_lodtensor (map (lambda x : x [0 ], data ), place )
252+ feed_order = ['src_word_id' ]
253+ feed_list = [
254+ framework .default_main_program ().global_block ().var (var_name )
255+ for var_name in feed_order
256+ ]
257+ feeder = fluid .DataFeeder (feed_list , place )
258+
259+ for data in train_data ():
260+ feed_dict = feeder .feed (map (lambda x : [x [0 ]], data ))
261+ feed_dict ['init_ids' ] = init_ids
262+ feed_dict ['init_scores' ] = init_scores
273263
274264 result_ids , result_scores = exe .run (
275265 framework .default_main_program (),
276- feed = {
277- 'src_word_id' : src_word_data ,
278- 'init_ids' : init_ids ,
279- 'init_scores' : init_scores
280- },
266+ feed = feed_dict ,
281267 fetch_list = [translation_ids , translation_scores ],
282268 return_numpy = False )
283269 print result_ids .lod ()
0 commit comments