|
| 1 | +#!/bin/bash |
| 2 | + |
| 3 | +# This script is based on run_tdnn_7h.sh in swbd chain recipe. |
| 4 | + |
| 5 | +set -e |
| 6 | + |
| 7 | +nj=10 |
| 8 | +# configs for 'chain' |
| 9 | +affix= |
| 10 | +stage=0 |
| 11 | +train_stage=-10 |
| 12 | +get_egs_stage=-10 |
| 13 | +dir=exp/chain_nnet3/tdnn_1b |
| 14 | +decode_iter= |
| 15 | + |
| 16 | +# training options |
| 17 | +num_epochs=6 |
| 18 | +initial_effective_lrate=0.001 |
| 19 | +final_effective_lrate=0.0001 |
| 20 | +max_param_change=2.0 |
| 21 | +final_layer_normalize_target=0.5 |
| 22 | +num_jobs_initial=2 |
| 23 | +num_jobs_final=12 |
| 24 | +minibatch_size=128 |
| 25 | +frames_per_eg=150,110,90 |
| 26 | +remove_egs=true |
| 27 | +common_egs_dir= |
| 28 | +xent_regularize=0.1 |
| 29 | + |
| 30 | +# End configuration section. |
| 31 | +echo "$0 $@" # Print the command line for logging |
| 32 | + |
| 33 | +. ./cmd.sh |
| 34 | +. ./path.sh |
| 35 | +. ./utils/parse_options.sh |
| 36 | + |
| 37 | +if ! cuda-compiled; then |
| 38 | + echo "This script is intended to be used with GPUs" |
| 39 | + echo "but you have not compiled Kaldi with CUDA" |
| 40 | + echo "If you want to use GPUs (and have them), go to src/," |
| 41 | + echo "and configure and make on a machine where "nvcc" is installed." |
| 42 | + exit 1 |
| 43 | +fi |
| 44 | + |
| 45 | +dir=${dir}${affix:+_$affix} |
| 46 | +train_set=train |
| 47 | +ali_dir=exp/tri5a_ali |
| 48 | +lat_dir=exp/tri5a_lats |
| 49 | +treedir=exp/chain_nnet3/tri5_tree |
| 50 | +lang=data/lang_chain_nnet3 |
| 51 | + |
| 52 | + |
| 53 | +if [[ $stage -le 0 ]]; then |
| 54 | + for datadir in train dev test; do |
| 55 | + dst_dir=data/fbank_pitch/$datadir |
| 56 | + if [[ ! -f $dst_dir/feats.scp ]]; then |
| 57 | + utils/copy_data_dir.sh data/$datadir $dst_dir |
| 58 | + echo "making fbank-pitch features for LF-MMI training" |
| 59 | + steps/make_fbank_pitch.sh --cmd $train_cmd --nj $nj $dst_dir || exit 1 |
| 60 | + steps/compute_cmvn_stats.sh $dst_dir || exit 1 |
| 61 | + utils/fix_data_dir.sh $dst_dir |
| 62 | + else |
| 63 | + echo "$dst_dir/feats.scp already exists." |
| 64 | + echo "kaldi pybind (local/run_chain.sh) LF-MMI may have generated it." |
| 65 | + echo "skip $dst_dir" |
| 66 | + fi |
| 67 | + done |
| 68 | +fi |
| 69 | + |
| 70 | +if [[ $stage -le 1 ]]; then |
| 71 | + # Create a version of the lang/ directory that has one state per phone in the |
| 72 | + # topo file. [note, it really has two states.. the first one is only repeated |
| 73 | + # once, the second one has zero or more repeats.] |
| 74 | + rm -rf $lang |
| 75 | + cp -r data/lang $lang |
| 76 | + silphonelist=$(cat $lang/phones/silence.csl) || exit 1 |
| 77 | + nonsilphonelist=$(cat $lang/phones/nonsilence.csl) || exit 1 |
| 78 | + # Use our special topology... note that later on may have to tune this |
| 79 | + # topology. |
| 80 | + steps/nnet3/chain/gen_topo.py $nonsilphonelist $silphonelist >$lang/topo |
| 81 | +fi |
| 82 | + |
| 83 | +if [[ $stage -le 2 ]]; then |
| 84 | + # Build a tree using our new topology. This is the critically different |
| 85 | + # step compared with other recipes. |
| 86 | + steps/nnet3/chain/build_tree.sh --frame-subsampling-factor 3 \ |
| 87 | + --context-opts "--context-width=2 --central-position=1" \ |
| 88 | + --cmd $train_cmd 5000 data/train $lang $ali_dir $treedir |
| 89 | +fi |
| 90 | + |
| 91 | +if [[ $stage -le 3 ]]; then |
| 92 | + echo "creating neural net configs using the xconfig parser" |
| 93 | + |
| 94 | + num_targets=$(tree-info $treedir/tree | grep num-pdfs | awk '{print $2}') |
| 95 | + learning_rate_factor=$(echo "print(0.5/$xent_regularize)" | python3) |
| 96 | + feat_dim=$(feat-to-dim scp:data/fbank_pitch/train/feats.scp -) |
| 97 | + |
| 98 | + mkdir -p $dir/configs |
| 99 | + cat <<EOF > $dir/configs/network.xconfig |
| 100 | + input dim=$feat_dim name=input |
| 101 | +
|
| 102 | + # please note that it is important to have input layer with the name=input |
| 103 | + # as the layer immediately preceding the fixed-affine-layer to enable |
| 104 | + # the use of short notation for the descriptor |
| 105 | + fixed-affine-layer name=lda input=Append(-1,0,1) affine-transform-file=$dir/configs/lda.mat |
| 106 | +
|
| 107 | + # the first splicing is moved before the lda layer, so no splicing here |
| 108 | + relu-batchnorm-layer name=tdnn1 dim=625 |
| 109 | + relu-batchnorm-layer name=tdnn2 input=Append(-1,0,1) dim=625 |
| 110 | + relu-batchnorm-layer name=tdnn3 input=Append(-1,0,1) dim=625 |
| 111 | + relu-batchnorm-layer name=tdnn4 input=Append(-3,0,3) dim=625 |
| 112 | + relu-batchnorm-layer name=tdnn5 input=Append(-3,0,3) dim=625 |
| 113 | + relu-batchnorm-layer name=tdnn6 input=Append(-3,0,3) dim=625 |
| 114 | +
|
| 115 | + ## adding the layers for chain branch |
| 116 | + relu-batchnorm-layer name=prefinal-chain input=tdnn6 dim=625 target-rms=0.5 |
| 117 | + output-layer name=output include-log-softmax=false dim=$num_targets max-change=1.5 |
| 118 | +
|
| 119 | + # adding the layers for xent branch |
| 120 | + # This block prints the configs for a separate output that will be |
| 121 | + # trained with a cross-entropy objective in the 'chain' models... this |
| 122 | + # has the effect of regularizing the hidden parts of the model. we use |
| 123 | + # 0.5 / args.xent_regularize as the learning rate factor- the factor of |
| 124 | + # 0.5 / args.xent_regularize is suitable as it means the xent |
| 125 | + # final-layer learns at a rate independent of the regularization |
| 126 | + # constant; and the 0.5 was tuned so as to make the relative progress |
| 127 | + # similar in the xent and regular final layers. |
| 128 | + relu-batchnorm-layer name=prefinal-xent input=tdnn6 dim=625 target-rms=0.5 |
| 129 | + output-layer name=output-xent dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5 |
| 130 | +
|
| 131 | +EOF |
| 132 | + steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/ |
| 133 | +fi |
| 134 | + |
| 135 | +if [[ $stage -le 4 ]]; then |
| 136 | + steps/nnet3/chain/train.py --stage $train_stage \ |
| 137 | + --cmd $cuda_cmd \ |
| 138 | + --feat.cmvn-opts "--norm-means=false --norm-vars=false" \ |
| 139 | + --chain.xent-regularize $xent_regularize \ |
| 140 | + --chain.leaky-hmm-coefficient 0.1 \ |
| 141 | + --chain.l2-regularize 0.00005 \ |
| 142 | + --chain.apply-deriv-weights false \ |
| 143 | + --chain.lm-opts="--num-extra-lm-states=2000" \ |
| 144 | + --egs.dir "$common_egs_dir" \ |
| 145 | + --egs.stage $get_egs_stage \ |
| 146 | + --egs.opts "--frames-overlap-per-eg 0" \ |
| 147 | + --egs.chunk-width $frames_per_eg \ |
| 148 | + --trainer.num-chunk-per-minibatch $minibatch_size \ |
| 149 | + --trainer.frames-per-iter 1500000 \ |
| 150 | + --trainer.num-epochs $num_epochs \ |
| 151 | + --trainer.optimization.num-jobs-initial $num_jobs_initial \ |
| 152 | + --trainer.optimization.num-jobs-final $num_jobs_final \ |
| 153 | + --trainer.optimization.initial-effective-lrate $initial_effective_lrate \ |
| 154 | + --trainer.optimization.final-effective-lrate $final_effective_lrate \ |
| 155 | + --trainer.max-param-change $max_param_change \ |
| 156 | + --cleanup.remove-egs $remove_egs \ |
| 157 | + --cleanup.preserve-model-interval=1 \ |
| 158 | + --feat-dir data/fbank_pitch/train \ |
| 159 | + --tree-dir $treedir \ |
| 160 | + --use-gpu "wait" \ |
| 161 | + --lat-dir $lat_dir \ |
| 162 | + --dir $dir || exit 1 |
| 163 | +fi |
| 164 | + |
| 165 | +if [[ $stage -le 5 ]]; then |
| 166 | + # Note: it might appear that this $lang directory is mismatched, and it is as |
| 167 | + # far as the 'topo' is concerned, but this script doesn't read the 'topo' from |
| 168 | + # the lang directory. |
| 169 | + utils/mkgraph.sh --self-loop-scale 1.0 data/lang_test $dir $dir/graph |
| 170 | +fi |
| 171 | + |
| 172 | +graph_dir=$dir/graph |
| 173 | +if [[ $stage -le 6 ]]; then |
| 174 | + for test_set in dev test; do |
| 175 | + steps/nnet3/decode.sh --acwt 1.0 --post-decode-acwt 10.0 \ |
| 176 | + --nj $nj --cmd $decode_cmd \ |
| 177 | + $graph_dir data/fbank_pitch/${test_set} $dir/decode_${test_set} || exit 1 |
| 178 | + done |
| 179 | +fi |
0 commit comments