|
| 1 | +#!/bin/bash |
| 2 | + |
| 3 | +# Copyright 2012 Johns Hopkins University (author: Daniel Povey) Tony Robinson |
| 4 | +# 2017 Hainan Xu |
| 5 | +# 2017 Ke Li |
| 6 | +# 2017 Yiming Wang |
| 7 | + |
| 8 | +# This script is similar to rnnlm_lstm_tdnn_b.sh except for adding backstitch training |
| 9 | + |
| 10 | +# rnnlm/train_rnnlm.sh: best iteration (out of 18) was 17, linking it to final iteration. |
| 11 | +# rnnlm/train_rnnlm.sh: train/dev perplexity was 45.6 / 68.7. |
| 12 | +# Train objf: -651.50 -4.44 -4.26 -4.15 -4.08 -4.03 -4.00 -3.97 -3.94 -3.92 -3.90 -3.89 -3.88 -3.86 -3.85 -3.84 -3.83 -3.82 |
| 13 | +# Dev objf: -10.76 -4.68 -4.47 -4.38 -4.33 -4.29 -4.28 -4.27 -4.26 -4.26 -4.25 -4.24 -4.24 -4.24 -4.23 -4.23 -4.23 -4.23 |
| 14 | + |
| 15 | +# Begin configuration section. |
| 16 | +cmd=run.pl |
| 17 | +affix=1a |
| 18 | +embedding_dim=200 |
| 19 | +embedding_l2=0.005 # embedding layer l2 regularize |
| 20 | +comp_l2=0.005 # component-level l2 regularize |
| 21 | +output_l2=0.005 # output-layer l2 regularize |
| 22 | +epochs=90 |
| 23 | +mic=sdm1 |
| 24 | +stage=-10 |
| 25 | +train_stage=0 |
| 26 | +alpha=0.8 |
| 27 | +back_interval=1 |
| 28 | + |
| 29 | +. utils/parse_options.sh |
| 30 | +train=data/$mic/train/text |
| 31 | +dev=data/$mic/dev/text |
| 32 | +wordlist=data/lang/words.txt |
| 33 | +text_dir=data/rnnlm/text |
| 34 | +dir=exp/rnnlm_lstm_tdnn_bs_$affix |
| 35 | +mkdir -p $dir/config |
| 36 | +set -e |
| 37 | + |
| 38 | +for f in $train $dev $wordlist; do |
| 39 | + [ ! -f $f ] && \ |
| 40 | + echo "$0: expected file $f to exist; search for run.sh and utils/prepare_lang.sh in run.sh" && exit 1 |
| 41 | +done |
| 42 | + |
| 43 | +if [ $stage -le 0 ]; then |
| 44 | + mkdir -p $text_dir |
| 45 | + cat $train | cut -d ' ' -f2- > $text_dir/ami.txt |
| 46 | + cat $dev | cut -d ' ' -f2- > $text_dir/dev.txt |
| 47 | +fi |
| 48 | + |
| 49 | +if [ $stage -le 1 ]; then |
| 50 | + cp $wordlist $dir/config/ |
| 51 | + n=`cat $dir/config/words.txt | wc -l` |
| 52 | + echo "<brk> $n" >> $dir/config/words.txt |
| 53 | + |
| 54 | + # words that are not present in words.txt but are in the training or dev data, will be |
| 55 | + # mapped to <unk> during training. |
| 56 | + echo "<unk>" >$dir/config/oov.txt |
| 57 | + |
| 58 | + cat > $dir/config/data_weights.txt <<EOF |
| 59 | +ami 1 1.0 |
| 60 | +EOF |
| 61 | + |
| 62 | + rnnlm/get_unigram_probs.py --vocab-file=$dir/config/words.txt \ |
| 63 | + --unk-word="<unk>" \ |
| 64 | + --data-weights-file=$dir/config/data_weights.txt \ |
| 65 | + $text_dir | awk 'NF==2' >$dir/config/unigram_probs.txt |
| 66 | + |
| 67 | + # choose features |
| 68 | + rnnlm/choose_features.py --unigram-probs=$dir/config/unigram_probs.txt \ |
| 69 | + --use-constant-feature=true \ |
| 70 | + --top-word-features 10000 \ |
| 71 | + --min-frequency 1.0e-03 \ |
| 72 | + --special-words='<s>,</s>,<brk>,<unk>,[noise],[laughter]' \ |
| 73 | + $dir/config/words.txt > $dir/config/features.txt |
| 74 | + |
| 75 | +lstm_opts="l2-regularize=$comp_l2" |
| 76 | +tdnn_opts="l2-regularize=$comp_l2" |
| 77 | +output_opts="l2-regularize=$output_l2" |
| 78 | + |
| 79 | + cat >$dir/config/xconfig <<EOF |
| 80 | +input dim=$embedding_dim name=input |
| 81 | +lstm-layer name=lstm1 cell-dim=$embedding_dim $lstm_opts |
| 82 | +relu-renorm-layer name=tdnn dim=$embedding_dim $tdnn_opts input=Append(0, IfDefined(-1)) |
| 83 | +lstm-layer name=lstm2 cell-dim=$embedding_dim $lstm_opts |
| 84 | +output-layer name=output $output_opts include-log-softmax=false dim=$embedding_dim |
| 85 | +EOF |
| 86 | + rnnlm/validate_config_dir.sh $text_dir $dir/config |
| 87 | +fi |
| 88 | + |
| 89 | +if [ $stage -le 2 ]; then |
| 90 | + # the --unigram-factor option is set larger than the default (100) |
| 91 | + # in order to reduce the size of the sampling LM, because rnnlm-get-egs |
| 92 | + # was taking up too much CPU (as much as 10 cores). |
| 93 | + rnnlm/prepare_rnnlm_dir.sh --unigram-factor 200 \ |
| 94 | + $text_dir $dir/config $dir |
| 95 | +fi |
| 96 | + |
| 97 | +if [ $stage -le 3 ]; then |
| 98 | + backstitch_opt="--rnnlm.backstitch-scale $alpha --rnnlm.backstitch-interval $back_interval --embedding.backstitch-scale $alpha --embedding.backstitch-interval $back_interval" |
| 99 | + rnnlm/train_rnnlm.sh --embedding_l2 $embedding_l2 \ |
| 100 | + --stage $train_stage \ |
| 101 | + --num-epochs $epochs --cmd "queue.pl" $backstitch_opt $dir |
| 102 | +fi |
| 103 | + |
| 104 | +exit 0 |
0 commit comments