1
1
#! /bin/bash
2
2
3
- # This factorized TDNN (TDNN-F) script is adapted from s5b chime5 recipe
4
- # backported to s5, because the original script was failing to train in recent
5
- # versions of kaldi due to some instabilities and using superceded model
6
- # It uses resnet-style skip connections.
7
- # For details, refer to the paper:
8
- # "Semi-Orthogonal Low-Rank Matrix Factorization for Deep Neural Networks", Daniel Povey, Gaofeng Cheng, Yiming Wang, Ke Li, Hainan Xu, Mahsa Yarmohamadi, Sanjeev Khudanpur, Interspeech 2018
3
+ # Set -e here so that we catch if any executable fails immediately
4
+ set -euo pipefail
9
5
10
- # %WER 73.03 [ 43001 / 58881, 4433 ins, 22250 del, 16318 sub ] exp/chain_train_worn_u100k_cleaned/tdnn1a_sp/decode_dev_beamformit_ref/wer_10_0.0
11
- # %WER 38.88 [ 22895 / 58881, 1882 ins, 8235 del, 12778 sub ] exp/chain_train_worn_u100k_cleaned/tdnn1a_sp/decode_dev_worn/wer_10_0.0
12
-
13
- # steps/info/chain_dir_info.pl exp/chain_train_worn_u100k_cleaned/tdnn1a_sp
14
- # exp/chain_train_worn_u100k_cleaned/tdnn1a_sp: num-iters=96 nj=3..16 num-params=17.1M dim=40+100->2928 combine=-0.125->-0.125 (over 2) xent:train/valid[63,95,final]=(-2.12,-1.81,-1.82/-2.20,-1.96,-1.96) logprob:train/valid[63,95,final]=(-0.190,-0.126,-0.125/-0.218,-0.183,-0.183)
15
-
16
- set -e
17
-
18
- # configs for 'chain'
6
+ # First the options that are passed through to run_ivector_common.sh
7
+ # (some of which are also used in this script directly).
19
8
stage=0
20
9
nj=96
21
10
train_set=train_worn_u100k
@@ -32,13 +21,11 @@ train_stage=-10
32
21
get_egs_stage=-10
33
22
decode_iter=
34
23
35
- num_epochs=4
36
- common_egs_dir=
37
24
# training options
38
25
# training chunk-options
39
26
chunk_width=140,100,160
27
+ common_egs_dir=
40
28
xent_regularize=0.1
41
-
42
29
43
30
# training options
44
31
srand=0
67
54
# run those things.
68
55
local/nnet3/run_ivector_common.sh --stage $stage \
69
56
--train-set $train_set \
70
- --test-sets " $test_sets " \
57
+ --test-sets " $test_sets " \
71
58
--gmm $gmm \
72
59
--nnet3-affix " $nnet3_affix " || exit 1;
73
60
74
61
# Problem: We have removed the "train_" prefix of our training set in
75
62
# the alignment directory names! Bad!
76
63
gmm_dir=exp/$gmm
64
+ ali_dir=exp/${gmm} _ali_${train_set} _sp
77
65
tree_dir=exp/chain${nnet3_affix} /tree_sp${tree_affix: +_$tree_affix }
78
66
lang=data/lang_chain
79
67
lat_dir=exp/chain${nnet3_affix} /${gmm} _${train_set} _sp_lats
@@ -83,7 +71,7 @@ lores_train_data_dir=data/${train_set}_sp
83
71
train_ivector_dir=exp/nnet3${nnet3_affix} /ivectors_${train_set} _sp_hires
84
72
85
73
for f in $gmm_dir /final.mdl $train_data_dir /feats.scp $train_ivector_dir /ivector_online.scp \
86
- $lores_train_data_dir /feats.scp; do
74
+ $lores_train_data_dir /feats.scp $ali_dir /ali.1.gz ; do
87
75
[ ! -f $f ] && echo " $0 : expected file $f to exist" && exit 1
88
76
done
89
77
113
101
if [ $stage -le 11 ]; then
114
102
# Get the alignments as lattices (gives the chain training more freedom).
115
103
# use the same num-jobs as the alignments
116
- steps/align_fmllr_lats.sh --nj ${nj} --cmd " $train_cmd " --generate-ali-from-lats true \
117
- ${lores_train_data_dir} \
104
+ steps/align_fmllr_lats.sh --nj ${nj} --cmd " $train_cmd " ${lores_train_data_dir} \
118
105
data/lang $gmm_dir $lat_dir
119
106
rm $lat_dir /fsts.* .gz # save space
120
107
fi
@@ -124,27 +111,26 @@ if [ $stage -le 12 ]; then
124
111
# speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use
125
112
# those. The num-leaves is always somewhat less than the num-leaves from
126
113
# the GMM baseline.
127
- if [ -f $tree_dir /final.mdl ]; then
114
+ if [ -f $tree_dir /final.mdl ]; then
128
115
echo " $0 : $tree_dir /final.mdl already exists, refusing to overwrite it."
129
116
exit 1;
130
117
fi
131
118
steps/nnet3/chain/build_tree.sh \
132
119
--frame-subsampling-factor 3 \
120
+ --context-opts " --context-width=2 --central-position=1" \
133
121
--cmd " $train_cmd " 3500 ${lores_train_data_dir} \
134
- $lang $lat_dir $tree_dir
122
+ $lang $ali_dir $tree_dir
135
123
fi
136
124
125
+
137
126
if [ $stage -le 13 ]; then
138
127
mkdir -p $dir
139
128
echo " $0 : creating neural net configs using the xconfig parser" ;
140
129
141
130
num_targets=$( tree-info $tree_dir /tree | grep num-pdfs| awk ' {print $2}' )
142
- learning_rate_factor=$( echo " print 0.5/$xent_regularize " | python)
143
- affine_opts=" l2-regularize=0.01 dropout-proportion=0.0 dropout-per-dim=true dropout-per-dim-continuous=true"
144
- tdnnf_opts=" l2-regularize=0.01 dropout-proportion=0.0 bypass-scale=0.66"
145
- linear_opts=" l2-regularize=0.01 orthonormal-constraint=-1.0"
146
- prefinal_opts=" l2-regularize=0.01"
147
- output_opts=" l2-regularize=0.002"
131
+ learning_rate_factor=$( echo " print (0.5/$xent_regularize )" | python)
132
+ opts=" l2-regularize=0.05"
133
+ output_opts=" l2-regularize=0.01 bottleneck-dim=320"
148
134
149
135
mkdir -p $dir /configs
150
136
cat << EOF > $dir /configs/network.xconfig
@@ -154,31 +140,33 @@ if [ $stage -le 13 ]; then
154
140
# please note that it is important to have input layer with the name=input
155
141
# as the layer immediately preceding the fixed-affine-layer to enable
156
142
# the use of short notation for the descriptor
157
- fixed-affine-layer name=lda input=Append(-1,0,1,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir /configs/lda.mat
143
+ fixed-affine-layer name=lda input=Append(-2,- 1,0,1,2 ,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir /configs/lda.mat
158
144
159
145
# the first splicing is moved before the lda layer, so no splicing here
160
- relu-batchnorm-dropout-layer name=tdnn1 $affine_opts dim=1536
161
- tdnnf-layer name=tdnnf2 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=1
162
- tdnnf-layer name=tdnnf3 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=1
163
- tdnnf-layer name=tdnnf4 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=1
164
- tdnnf-layer name=tdnnf5 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=0
165
- tdnnf-layer name=tdnnf6 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3
166
- tdnnf-layer name=tdnnf7 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3
167
- tdnnf-layer name=tdnnf8 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3
168
- tdnnf-layer name=tdnnf9 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3
169
- tdnnf-layer name=tdnnf10 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3
170
- tdnnf-layer name=tdnnf11 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3
171
- tdnnf-layer name=tdnnf12 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3
172
- tdnnf-layer name=tdnnf13 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3
173
- tdnnf-layer name=tdnnf14 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3
174
- tdnnf-layer name=tdnnf15 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3
175
- linear-component name=prefinal-l dim=256 $linear_opts
176
-
177
- prefinal-layer name=prefinal-chain input=prefinal-l $prefinal_opts big-dim=1536 small-dim=256
178
- output-layer name=output include-log-softmax=false dim=$num_targets $output_opts
179
-
180
- prefinal-layer name=prefinal-xent input=prefinal-l $prefinal_opts big-dim=1536 small-dim=256
181
- output-layer name=output-xent dim=$num_targets learning-rate-factor=$learning_rate_factor $output_opts
146
+ relu-batchnorm-layer name=tdnn1 $opts dim=512
147
+ relu-batchnorm-layer name=tdnn2 $opts dim=512 input=Append(-1,0,1)
148
+ relu-batchnorm-layer name=tdnn3 $opts dim=512
149
+ relu-batchnorm-layer name=tdnn4 $opts dim=512 input=Append(-1,0,1)
150
+ relu-batchnorm-layer name=tdnn5 $opts dim=512
151
+ relu-batchnorm-layer name=tdnn6 $opts dim=512 input=Append(-3,0,3)
152
+ relu-batchnorm-layer name=tdnn7 $opts dim=512 input=Append(-3,0,3)
153
+ relu-batchnorm-layer name=tdnn8 $opts dim=512 input=Append(-6,-3,0)
154
+
155
+ ## adding the layers for chain branch
156
+ relu-batchnorm-layer name=prefinal-chain $opts dim=512 target-rms=0.5
157
+ output-layer name=output include-log-softmax=false $output_opts dim=$num_targets max-change=1.5
158
+
159
+ # adding the layers for xent branch
160
+ # This block prints the configs for a separate output that will be
161
+ # trained with a cross-entropy objective in the 'chain' models... this
162
+ # has the effect of regularizing the hidden parts of the model. we use
163
+ # 0.5 / args.xent_regularize as the learning rate factor- the factor of
164
+ # 0.5 / args.xent_regularize is suitable as it means the xent
165
+ # final-layer learns at a rate independent of the regularization
166
+ # constant; and the 0.5 was tuned so as to make the relative progress
167
+ # similar in the xent and regular final layers.
168
+ relu-batchnorm-layer name=prefinal-xent input=tdnn8 $opts dim=512 target-rms=0.5
169
+ output-layer name=output-xent $output_opts dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5
182
170
EOF
183
171
steps/nnet3/xconfig_to_configs.py --xconfig-file $dir /configs/network.xconfig --config-dir $dir /configs/
184
172
fi
@@ -189,30 +177,31 @@ if [ $stage -le 14 ]; then
189
177
/export/b0{3,4,5,6}/$USER /kaldi-data/egs/chime5-$( date +' %m_%d_%H_%M' ) /s5/$dir /egs/storage $dir /egs/storage
190
178
fi
191
179
192
- steps/nnet3/chain/train.py --stage $train_stage \
193
- --cmd " $train_cmd --mem 4G " \
180
+ steps/nnet3/chain/train.py --stage= $train_stage \
181
+ --cmd= " $decode_cmd " \
194
182
--feat.online-ivector-dir=$train_ivector_dir \
195
- --feat.cmvn-opts " --norm-means=false --norm-vars=false" \
183
+ --feat.cmvn-opts= " --norm-means=false --norm-vars=false" \
196
184
--chain.xent-regularize $xent_regularize \
197
185
--chain.leaky-hmm-coefficient=0.1 \
198
- --chain.l2-regularize=0.0 \
186
+ --chain.l2-regularize=0.00005 \
199
187
--chain.apply-deriv-weights=false \
200
188
--chain.lm-opts=" --num-extra-lm-states=2000" \
201
- --trainer.dropout-schedule=" $dropout_schedule " \
202
- --trainer.add-option=" --optimization.memory-compression-level=2" \
189
+ --trainer.srand=$srand \
203
190
--trainer.max-param-change=2.0 \
204
- --trainer.num-epochs $num_epochs \
205
- --trainer.frames-per-iter=1500000 \
206
- --trainer.optimization.num-jobs-initial=3 \
207
- --trainer.optimization.num-jobs-final=16 \
208
- --trainer.optimization.initial-effective-lrate=0.00025 \
209
- --trainer.optimization.final-effective-lrate=0.000025 \
210
- --trainer.num-chunk-per-minibatch=64 \
211
- --egs.stage $get_egs_stage \
191
+ --trainer.num-epochs=10 \
192
+ --trainer.frames-per-iter=3000000 \
193
+ --trainer.optimization.num-jobs-initial=2 \
194
+ --trainer.optimization.num-jobs-final=4 \
195
+ --trainer.optimization.initial-effective-lrate=0.001 \
196
+ --trainer.optimization.final-effective-lrate=0.0001 \
197
+ --trainer.optimization.shrink-value=1.0 \
198
+ --trainer.num-chunk-per-minibatch=256,128,64 \
199
+ --trainer.optimization.momentum=0.0 \
212
200
--egs.chunk-width=$chunk_width \
213
201
--egs.dir=" $common_egs_dir " \
214
202
--egs.opts=" --frames-overlap-per-eg 0" \
215
203
--cleanup.remove-egs=$remove_egs \
204
+ --use-gpu=true \
216
205
--feat-dir=$train_data_dir \
217
206
--tree-dir=$tree_dir \
218
207
--lat-dir=$lat_dir \
@@ -257,19 +246,21 @@ if $test_online_decoding && [ $stage -le 17 ]; then
257
246
$lang exp/nnet3${nnet3_affix} /extractor ${dir} ${dir} _online
258
247
259
248
rm $dir /.error 2> /dev/null || true
249
+
260
250
for data in $test_sets ; do
261
251
(
262
- nspk=$( wc -l < data/${data} /spk2utt)
252
+ nspk=$( wc -l < data/${data} _hires /spk2utt)
263
253
# note: we just give it "data/${data}" as it only uses the wav.scp, the
264
254
# feature type does not matter.
265
255
steps/online/nnet3/decode.sh \
266
256
--acwt 1.0 --post-decode-acwt 10.0 \
267
- --nj $nspk --cmd " $decode_cmd " \
257
+ --nj 8 --cmd " $decode_cmd " \
268
258
$tree_dir /graph${lm_suffix} data/${data} ${dir} _online/decode${lm_suffix} _${data} || exit 1
269
259
) || touch $dir /.error &
270
260
done
271
261
wait
272
262
[ -f $dir /.error ] && echo " $0 : there was a problem while decoding" && exit 1
273
263
fi
274
264
265
+
275
266
exit 0;
0 commit comments