Skip to content

Commit 1d84da9

Browse files
committed
moving the new recipe as 1b and recovering the 1a
1 parent e736421 commit 1d84da9

File tree

3 files changed

+327
-70
lines changed

3 files changed

+327
-70
lines changed

egs/chime5/s5/local/chain/run_tdnn.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
tuning/run_tdnn_1a.sh
1+
tuning/run_tdnn_1b.sh

egs/chime5/s5/local/chain/tuning/run_tdnn_1a.sh

Lines changed: 60 additions & 69 deletions
Original file line numberDiff line numberDiff line change
@@ -1,21 +1,10 @@
11
#!/bin/bash
22

3-
# This factorized TDNN (TDNN-F) script is adapted from s5b chime5 recipe
4-
# backported to s5, because the original script was failing to train in recent
5-
# versions of kaldi due to some instabilities and using superceded model
6-
# It uses resnet-style skip connections.
7-
# For details, refer to the paper:
8-
# "Semi-Orthogonal Low-Rank Matrix Factorization for Deep Neural Networks", Daniel Povey, Gaofeng Cheng, Yiming Wang, Ke Li, Hainan Xu, Mahsa Yarmohamadi, Sanjeev Khudanpur, Interspeech 2018
3+
# Set -e here so that we catch if any executable fails immediately
4+
set -euo pipefail
95

10-
# %WER 73.03 [ 43001 / 58881, 4433 ins, 22250 del, 16318 sub ] exp/chain_train_worn_u100k_cleaned/tdnn1a_sp/decode_dev_beamformit_ref/wer_10_0.0
11-
# %WER 38.88 [ 22895 / 58881, 1882 ins, 8235 del, 12778 sub ] exp/chain_train_worn_u100k_cleaned/tdnn1a_sp/decode_dev_worn/wer_10_0.0
12-
13-
# steps/info/chain_dir_info.pl exp/chain_train_worn_u100k_cleaned/tdnn1a_sp
14-
# exp/chain_train_worn_u100k_cleaned/tdnn1a_sp: num-iters=96 nj=3..16 num-params=17.1M dim=40+100->2928 combine=-0.125->-0.125 (over 2) xent:train/valid[63,95,final]=(-2.12,-1.81,-1.82/-2.20,-1.96,-1.96) logprob:train/valid[63,95,final]=(-0.190,-0.126,-0.125/-0.218,-0.183,-0.183)
15-
16-
set -e
17-
18-
# configs for 'chain'
6+
# First the options that are passed through to run_ivector_common.sh
7+
# (some of which are also used in this script directly).
198
stage=0
209
nj=96
2110
train_set=train_worn_u100k
@@ -32,13 +21,11 @@ train_stage=-10
3221
get_egs_stage=-10
3322
decode_iter=
3423

35-
num_epochs=4
36-
common_egs_dir=
3724
# training options
3825
# training chunk-options
3926
chunk_width=140,100,160
27+
common_egs_dir=
4028
xent_regularize=0.1
41-
dropout_schedule='0,[email protected],[email protected],0'
4229

4330
# training options
4431
srand=0
@@ -67,13 +54,14 @@ fi
6754
# run those things.
6855
local/nnet3/run_ivector_common.sh --stage $stage \
6956
--train-set $train_set \
70-
--test-sets "$test_sets" \
57+
--test-sets "$test_sets" \
7158
--gmm $gmm \
7259
--nnet3-affix "$nnet3_affix" || exit 1;
7360

7461
# Problem: We have removed the "train_" prefix of our training set in
7562
# the alignment directory names! Bad!
7663
gmm_dir=exp/$gmm
64+
ali_dir=exp/${gmm}_ali_${train_set}_sp
7765
tree_dir=exp/chain${nnet3_affix}/tree_sp${tree_affix:+_$tree_affix}
7866
lang=data/lang_chain
7967
lat_dir=exp/chain${nnet3_affix}/${gmm}_${train_set}_sp_lats
@@ -83,7 +71,7 @@ lores_train_data_dir=data/${train_set}_sp
8371
train_ivector_dir=exp/nnet3${nnet3_affix}/ivectors_${train_set}_sp_hires
8472

8573
for f in $gmm_dir/final.mdl $train_data_dir/feats.scp $train_ivector_dir/ivector_online.scp \
86-
$lores_train_data_dir/feats.scp; do
74+
$lores_train_data_dir/feats.scp $ali_dir/ali.1.gz; do
8775
[ ! -f $f ] && echo "$0: expected file $f to exist" && exit 1
8876
done
8977

@@ -113,8 +101,7 @@ fi
113101
if [ $stage -le 11 ]; then
114102
# Get the alignments as lattices (gives the chain training more freedom).
115103
# use the same num-jobs as the alignments
116-
steps/align_fmllr_lats.sh --nj ${nj} --cmd "$train_cmd" --generate-ali-from-lats true \
117-
${lores_train_data_dir} \
104+
steps/align_fmllr_lats.sh --nj ${nj} --cmd "$train_cmd" ${lores_train_data_dir} \
118105
data/lang $gmm_dir $lat_dir
119106
rm $lat_dir/fsts.*.gz # save space
120107
fi
@@ -124,27 +111,26 @@ if [ $stage -le 12 ]; then
124111
# speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use
125112
# those. The num-leaves is always somewhat less than the num-leaves from
126113
# the GMM baseline.
127-
if [ -f $tree_dir/final.mdl ]; then
114+
if [ -f $tree_dir/final.mdl ]; then
128115
echo "$0: $tree_dir/final.mdl already exists, refusing to overwrite it."
129116
exit 1;
130117
fi
131118
steps/nnet3/chain/build_tree.sh \
132119
--frame-subsampling-factor 3 \
120+
--context-opts "--context-width=2 --central-position=1" \
133121
--cmd "$train_cmd" 3500 ${lores_train_data_dir} \
134-
$lang $lat_dir $tree_dir
122+
$lang $ali_dir $tree_dir
135123
fi
136124

125+
137126
if [ $stage -le 13 ]; then
138127
mkdir -p $dir
139128
echo "$0: creating neural net configs using the xconfig parser";
140129

141130
num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}')
142-
learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python)
143-
affine_opts="l2-regularize=0.01 dropout-proportion=0.0 dropout-per-dim=true dropout-per-dim-continuous=true"
144-
tdnnf_opts="l2-regularize=0.01 dropout-proportion=0.0 bypass-scale=0.66"
145-
linear_opts="l2-regularize=0.01 orthonormal-constraint=-1.0"
146-
prefinal_opts="l2-regularize=0.01"
147-
output_opts="l2-regularize=0.002"
131+
learning_rate_factor=$(echo "print (0.5/$xent_regularize)" | python)
132+
opts="l2-regularize=0.05"
133+
output_opts="l2-regularize=0.01 bottleneck-dim=320"
148134

149135
mkdir -p $dir/configs
150136
cat <<EOF > $dir/configs/network.xconfig
@@ -154,31 +140,33 @@ if [ $stage -le 13 ]; then
154140
# please note that it is important to have input layer with the name=input
155141
# as the layer immediately preceding the fixed-affine-layer to enable
156142
# the use of short notation for the descriptor
157-
fixed-affine-layer name=lda input=Append(-1,0,1,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat
143+
fixed-affine-layer name=lda input=Append(-2,-1,0,1,2,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat
158144
159145
# the first splicing is moved before the lda layer, so no splicing here
160-
relu-batchnorm-dropout-layer name=tdnn1 $affine_opts dim=1536
161-
tdnnf-layer name=tdnnf2 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=1
162-
tdnnf-layer name=tdnnf3 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=1
163-
tdnnf-layer name=tdnnf4 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=1
164-
tdnnf-layer name=tdnnf5 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=0
165-
tdnnf-layer name=tdnnf6 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3
166-
tdnnf-layer name=tdnnf7 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3
167-
tdnnf-layer name=tdnnf8 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3
168-
tdnnf-layer name=tdnnf9 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3
169-
tdnnf-layer name=tdnnf10 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3
170-
tdnnf-layer name=tdnnf11 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3
171-
tdnnf-layer name=tdnnf12 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3
172-
tdnnf-layer name=tdnnf13 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3
173-
tdnnf-layer name=tdnnf14 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3
174-
tdnnf-layer name=tdnnf15 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3
175-
linear-component name=prefinal-l dim=256 $linear_opts
176-
177-
prefinal-layer name=prefinal-chain input=prefinal-l $prefinal_opts big-dim=1536 small-dim=256
178-
output-layer name=output include-log-softmax=false dim=$num_targets $output_opts
179-
180-
prefinal-layer name=prefinal-xent input=prefinal-l $prefinal_opts big-dim=1536 small-dim=256
181-
output-layer name=output-xent dim=$num_targets learning-rate-factor=$learning_rate_factor $output_opts
146+
relu-batchnorm-layer name=tdnn1 $opts dim=512
147+
relu-batchnorm-layer name=tdnn2 $opts dim=512 input=Append(-1,0,1)
148+
relu-batchnorm-layer name=tdnn3 $opts dim=512
149+
relu-batchnorm-layer name=tdnn4 $opts dim=512 input=Append(-1,0,1)
150+
relu-batchnorm-layer name=tdnn5 $opts dim=512
151+
relu-batchnorm-layer name=tdnn6 $opts dim=512 input=Append(-3,0,3)
152+
relu-batchnorm-layer name=tdnn7 $opts dim=512 input=Append(-3,0,3)
153+
relu-batchnorm-layer name=tdnn8 $opts dim=512 input=Append(-6,-3,0)
154+
155+
## adding the layers for chain branch
156+
relu-batchnorm-layer name=prefinal-chain $opts dim=512 target-rms=0.5
157+
output-layer name=output include-log-softmax=false $output_opts dim=$num_targets max-change=1.5
158+
159+
# adding the layers for xent branch
160+
# This block prints the configs for a separate output that will be
161+
# trained with a cross-entropy objective in the 'chain' models... this
162+
# has the effect of regularizing the hidden parts of the model. we use
163+
# 0.5 / args.xent_regularize as the learning rate factor- the factor of
164+
# 0.5 / args.xent_regularize is suitable as it means the xent
165+
# final-layer learns at a rate independent of the regularization
166+
# constant; and the 0.5 was tuned so as to make the relative progress
167+
# similar in the xent and regular final layers.
168+
relu-batchnorm-layer name=prefinal-xent input=tdnn8 $opts dim=512 target-rms=0.5
169+
output-layer name=output-xent $output_opts dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5
182170
EOF
183171
steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/
184172
fi
@@ -189,30 +177,31 @@ if [ $stage -le 14 ]; then
189177
/export/b0{3,4,5,6}/$USER/kaldi-data/egs/chime5-$(date +'%m_%d_%H_%M')/s5/$dir/egs/storage $dir/egs/storage
190178
fi
191179

192-
steps/nnet3/chain/train.py --stage $train_stage \
193-
--cmd "$train_cmd --mem 4G" \
180+
steps/nnet3/chain/train.py --stage=$train_stage \
181+
--cmd="$decode_cmd" \
194182
--feat.online-ivector-dir=$train_ivector_dir \
195-
--feat.cmvn-opts "--norm-means=false --norm-vars=false" \
183+
--feat.cmvn-opts="--norm-means=false --norm-vars=false" \
196184
--chain.xent-regularize $xent_regularize \
197185
--chain.leaky-hmm-coefficient=0.1 \
198-
--chain.l2-regularize=0.0 \
186+
--chain.l2-regularize=0.00005 \
199187
--chain.apply-deriv-weights=false \
200188
--chain.lm-opts="--num-extra-lm-states=2000" \
201-
--trainer.dropout-schedule="$dropout_schedule" \
202-
--trainer.add-option="--optimization.memory-compression-level=2" \
189+
--trainer.srand=$srand \
203190
--trainer.max-param-change=2.0 \
204-
--trainer.num-epochs $num_epochs \
205-
--trainer.frames-per-iter=1500000 \
206-
--trainer.optimization.num-jobs-initial=3 \
207-
--trainer.optimization.num-jobs-final=16 \
208-
--trainer.optimization.initial-effective-lrate=0.00025 \
209-
--trainer.optimization.final-effective-lrate=0.000025 \
210-
--trainer.num-chunk-per-minibatch=64 \
211-
--egs.stage $get_egs_stage \
191+
--trainer.num-epochs=10 \
192+
--trainer.frames-per-iter=3000000 \
193+
--trainer.optimization.num-jobs-initial=2 \
194+
--trainer.optimization.num-jobs-final=4 \
195+
--trainer.optimization.initial-effective-lrate=0.001 \
196+
--trainer.optimization.final-effective-lrate=0.0001 \
197+
--trainer.optimization.shrink-value=1.0 \
198+
--trainer.num-chunk-per-minibatch=256,128,64 \
199+
--trainer.optimization.momentum=0.0 \
212200
--egs.chunk-width=$chunk_width \
213201
--egs.dir="$common_egs_dir" \
214202
--egs.opts="--frames-overlap-per-eg 0" \
215203
--cleanup.remove-egs=$remove_egs \
204+
--use-gpu=true \
216205
--feat-dir=$train_data_dir \
217206
--tree-dir=$tree_dir \
218207
--lat-dir=$lat_dir \
@@ -257,19 +246,21 @@ if $test_online_decoding && [ $stage -le 17 ]; then
257246
$lang exp/nnet3${nnet3_affix}/extractor ${dir} ${dir}_online
258247

259248
rm $dir/.error 2>/dev/null || true
249+
260250
for data in $test_sets; do
261251
(
262-
nspk=$(wc -l <data/${data}/spk2utt)
252+
nspk=$(wc -l <data/${data}_hires/spk2utt)
263253
# note: we just give it "data/${data}" as it only uses the wav.scp, the
264254
# feature type does not matter.
265255
steps/online/nnet3/decode.sh \
266256
--acwt 1.0 --post-decode-acwt 10.0 \
267-
--nj $nspk --cmd "$decode_cmd" \
257+
--nj 8 --cmd "$decode_cmd" \
268258
$tree_dir/graph${lm_suffix} data/${data} ${dir}_online/decode${lm_suffix}_${data} || exit 1
269259
) || touch $dir/.error &
270260
done
271261
wait
272262
[ -f $dir/.error ] && echo "$0: there was a problem while decoding" && exit 1
273263
fi
274264

265+
275266
exit 0;

0 commit comments

Comments
 (0)