Skip to content
249 changes: 249 additions & 0 deletions egs/tedlium/s5_r3/local/chain/run_tdnn_1c.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,249 @@
#!/bin/bash

# 1g is as 1f but moving to a factorized TDNN (TDNN-F) model, re-tuning it, and
# switching to unconstrained egs (the last of which gives around 0.1%
# improvement). (Note: I don't believe the Tedlium TDNN models were,
# previously, very well-tuned).

# local/chain/compare_wer_general.sh exp/chain_cleaned/tdnn1f_sp_bi exp/chain_cleaned/tdnn1g_sp
# System tdnn1f_sp_bi tdnn1g_sp
# WER on dev(orig) 8.9 7.9
# WER on dev(rescored) 8.1 7.3
# WER on test(orig) 9.1 8.0
# WER on test(rescored) 8.6 7.6
# Final train prob -0.1026 -0.0637
# Final valid prob -0.1031 -0.0750
# Final train prob (xent) -1.4370 -0.9792
# Final valid prob (xent) -1.4670 -0.9951
# Num-params 6994800 9431072

# steps/info/chain_dir_info.pl exp/chain_cleaned/tdnn1g_sp
# exp/chain_cleaned/tdnn1g_sp: num-iters=108 nj=3..12 num-params=9.4M dim=40+100->3600 combine=-0.060->-0.060 (over 2) xent:train/valid[71,107,final]=(-1.30,-0.985,-0.979/-1.29,-1.00,-0.995) logprob:train/valid[71,107,final]=(-0.098,-0.065,-0.064/-0.100,-0.075,-0.075)

## how you run this (note: this assumes that the run_tdnn.sh soft link points here;
## otherwise call it directly in its location).
# by default, with cleanup:
# local/chain/run_tdnn.sh

# without cleanup:
# local/chain/run_tdnn.sh --train-set train --gmm tri3 --nnet3-affix "" &

set -e -o pipefail

# First the options that are passed through to run_ivector_common.sh
# (some of which are also used in this script directly).
stage=0
nj=15
decode_nj=15
xent_regularize=0.1
dropout_schedule='0,0@0.20,0.5@0.50,0'

train_set=train_cleaned
gmm=tri3_cleaned # the gmm for the target data
num_threads_ubm=1
nnet3_affix=_cleaned # cleanup affix for nnet3 and chain dirs, e.g. _cleaned

# The rest are configs specific to this script. Most of the parameters
# are just hardcoded at this level, in the commands below.
train_stage=-10
tree_affix= # affix for tree directory, e.g. "a" or "b", in case we change the configuration.
tdnn_affix=1g #affix for TDNN directory, e.g. "a" or "b", in case we change the configuration.
common_egs_dir= # you can set this to use previously dumped egs.
remove_egs=true

# End configuration section.
echo "$0 $@" # Print the command line for logging

. ./cmd.sh
. ./path.sh
. ./utils/parse_options.sh


if ! cuda-compiled; then
cat <<EOF && exit 1
This script is intended to be used with GPUs but you have not compiled Kaldi with CUDA
If you want to use GPUs (and have them), go to src/, and configure and make on a machine
where "nvcc" is installed.
EOF
fi

#local/nnet3/run_ivector_common.sh --stage $stage \
# --nj $nj \
# --train-set $train_set \
# --gmm $gmm \
# --num-threads-ubm $num_threads_ubm \
# --nnet3-affix "$nnet3_affix"


gmm_dir=exp/$gmm
ali_dir=exp/${gmm}_ali_${train_set}_sp
tree_dir=exp/chain${nnet3_affix}/tree_bi${tree_affix}
lat_dir=exp/chain${nnet3_affix}/${gmm}_${train_set}_sp_lats
dir=exp/chain${nnet3_affix}/tdnn${tdnn_affix}_sp_bi
train_data_dir=data/${train_set}_sp_hires
lores_train_data_dir=data/${train_set}_sp
train_ivector_dir=exp/nnet3${nnet3_affix}/ivectors_${train_set}_sp_hires


for f in $gmm_dir/final.mdl $train_data_dir/feats.scp $train_ivector_dir/ivector_online.scp \
$lores_train_data_dir/feats.scp $ali_dir/ali.1.gz $gmm_dir/final.mdl; do
[ ! -f $f ] && echo "$0: expected file $f to exist" && exit 1
done

if [ $stage -le 14 ]; then
echo "$0: creating lang directory with one state per phone."
# Create a version of the lang/ directory that has one state per phone in the
# topo file. [note, it really has two states.. the first one is only repeated
# once, the second one has zero or more repeats.]
if [ -d data/lang_chain ]; then
if [ data/lang_chain/L.fst -nt data/lang/L.fst ]; then
echo "$0: data/lang_chain already exists, not overwriting it; continuing"
else
echo "$0: data/lang_chain already exists and seems to be older than data/lang..."
echo " ... not sure what to do. Exiting."
exit 1;
fi
else
cp -r data/lang data/lang_chain
silphonelist=$(cat data/lang_chain/phones/silence.csl) || exit 1;
nonsilphonelist=$(cat data/lang_chain/phones/nonsilence.csl) || exit 1;
# Use our special topology... note that later on may have to tune this
# topology.
steps/nnet3/chain/gen_topo.py $nonsilphonelist $silphonelist >data/lang_chain/topo
fi
fi

if [ $stage -le 15 ]; then
# Get the alignments as lattices (gives the chain training more freedom).
# use the same num-jobs as the alignments
steps/align_fmllr_lats.sh --nj 100 --cmd "$train_cmd" ${lores_train_data_dir} \
data/lang $gmm_dir $lat_dir
rm $lat_dir/fsts.*.gz # save space
fi

if [ $stage -le 16 ]; then
# Build a tree using our new topology. We know we have alignments for the
# speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use
# those.
if [ -f $tree_dir/final.mdl ]; then
echo "$0: $tree_dir/final.mdl already exists, refusing to overwrite it."
exit 1;
fi
steps/nnet3/chain/build_tree.sh --frame-subsampling-factor 3 \
--context-opts "--context-width=2 --central-position=1" \
--cmd "$train_cmd" 4000 ${lores_train_data_dir} data/lang_chain $ali_dir $tree_dir
fi

if [ $stage -le 17 ]; then
mkdir -p $dir

echo "$0: creating neural net configs using the xconfig parser";

num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}')
learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python)
affine_opts="l2-regularize=0.008 dropout-proportion=0.0 dropout-per-dim-continuous=true"
tdnnf_opts="l2-regularize=0.008 dropout-proportion=0.0 bypass-scale=0.66"
linear_opts="l2-regularize=0.008 orthonormal-constraint=-1.0"
prefinal_opts="l2-regularize=0.008"
output_opts="l2-regularize=0.002"

mkdir -p $dir/configs
cat <<EOF > $dir/configs/network.xconfig
input dim=100 name=ivector
input dim=40 name=input

# please note that it is important to have input layer with the name=input
# as the layer immediately preceding the fixed-affine-layer to enable
# the use of short notation for the descriptor
fixed-affine-layer name=lda input=Append(-1,0,1,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat

# the first splicing is moved before the lda layer, so no splicing here
relu-batchnorm-dropout-layer name=tdnn1 $affine_opts dim=1024
tdnnf-layer name=tdnnf2 $tdnnf_opts dim=1024 bottleneck-dim=128 time-stride=1
tdnnf-layer name=tdnnf3 $tdnnf_opts dim=1024 bottleneck-dim=128 time-stride=1
tdnnf-layer name=tdnnf4 $tdnnf_opts dim=1024 bottleneck-dim=128 time-stride=1
tdnnf-layer name=tdnnf5 $tdnnf_opts dim=1024 bottleneck-dim=128 time-stride=0
tdnnf-layer name=tdnnf6 $tdnnf_opts dim=1024 bottleneck-dim=128 time-stride=3
tdnnf-layer name=tdnnf7 $tdnnf_opts dim=1024 bottleneck-dim=128 time-stride=3
tdnnf-layer name=tdnnf8 $tdnnf_opts dim=1024 bottleneck-dim=128 time-stride=3
tdnnf-layer name=tdnnf9 $tdnnf_opts dim=1024 bottleneck-dim=128 time-stride=3
tdnnf-layer name=tdnnf10 $tdnnf_opts dim=1024 bottleneck-dim=128 time-stride=3
tdnnf-layer name=tdnnf11 $tdnnf_opts dim=1024 bottleneck-dim=128 time-stride=3
tdnnf-layer name=tdnnf12 $tdnnf_opts dim=1024 bottleneck-dim=128 time-stride=3
tdnnf-layer name=tdnnf13 $tdnnf_opts dim=1024 bottleneck-dim=128 time-stride=3
linear-component name=prefinal-l dim=256 $linear_opts

prefinal-layer name=prefinal-chain input=prefinal-l $prefinal_opts big-dim=1024 small-dim=256
output-layer name=output include-log-softmax=false dim=$num_targets $output_opts

prefinal-layer name=prefinal-xent input=prefinal-l $prefinal_opts big-dim=1024 small-dim=256
output-layer name=output-xent dim=$num_targets learning-rate-factor=$learning_rate_factor $output_opts
EOF
steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/

fi

if [ $stage -le 18 ]; then
if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then
utils/create_split_dir.pl \
/export/b0{5,6,7,8}/$USER/kaldi-data/egs/ami-$(date +'%m_%d_%H_%M')/s5/$dir/egs/storage $dir/egs/storage
fi

steps/nnet3/chain/train.py --stage $train_stage \
--cmd "$decode_cmd" \
--feat.online-ivector-dir $train_ivector_dir \
--feat.cmvn-opts "--norm-means=false --norm-vars=false" \
--chain.xent-regularize $xent_regularize \
--chain.leaky-hmm-coefficient 0.1 \
--chain.l2-regularize 0.0 \
--chain.apply-deriv-weights false \
--chain.lm-opts="--num-extra-lm-states=2000" \
--trainer.dropout-schedule $dropout_schedule \
--trainer.add-option="--optimization.memory-compression-level=2" \
--egs.dir "$common_egs_dir" \
--egs.opts "--frames-overlap-per-eg 0 --constrained false" \
--egs.chunk-width 150,110,100 \
--trainer.num-chunk-per-minibatch 64 \
--trainer.frames-per-iter 5000000 \
--trainer.num-epochs 6 \
--trainer.optimization.num-jobs-initial 3 \
--trainer.optimization.num-jobs-final 12 \
--trainer.optimization.initial-effective-lrate 0.00025 \
--trainer.optimization.final-effective-lrate 0.000025 \
--trainer.max-param-change 2.0 \
--cleanup.remove-egs $remove_egs \
--feat-dir $train_data_dir \
--tree-dir $tree_dir \
--lat-dir $lat_dir \
--dir $dir
fi



if [ $stage -le 19 ]; then
# Note: it might appear that this data/lang_chain directory is mismatched, and it is as
# far as the 'topo' is concerned, but this script doesn't read the 'topo' from
# the lang directory.
utils/mkgraph.sh --self-loop-scale 1.0 data/lang $dir $dir/graph
fi

if [ $stage -le 20 ]; then
rm $dir/.error 2>/dev/null || true
for dset in dev test; do
(
steps/nnet3/decode.sh --num-threads 4 --nj $decode_nj --cmd "$decode_cmd" \
--acwt 1.0 --post-decode-acwt 10.0 \
--online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_${dset}_hires \
--scoring-opts "--min-lmwt 5 " \
$dir/graph data/${dset}_hires $dir/decode_${dset} || exit 1;
steps/lmrescore_const_arpa.sh --cmd "$decode_cmd" data/lang data/lang_rescore \
data/${dset}_hires ${dir}/decode_${dset} ${dir}/decode_${dset}_rescore || exit 1
) || touch $dir/.error &
done
wait
if [ -f $dir/.error ]; then
echo "$0: something went wrong in decoding"
exit 1
fi
fi
exit 0
2 changes: 1 addition & 1 deletion egs/wsj/s5/steps/cleanup/internal/align_ctm_ref.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ def read_text(text_file):
"Did not get enough columns; line {0} in {1}"
"".format(line, text_file.name))
elif len(parts) == 1:
logger.warn("Empty transcript for utterance %s in %s",
logger.warn("Empty transcript for utterance %s in %s",
parts[0], text_file.name)
yield parts[0], []
else:
Expand Down
10 changes: 5 additions & 5 deletions egs/wsj/s5/steps/cleanup/internal/get_ctm_edits.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3

# Copyright 2016 Vimal Manohar
# 2016 Johns Hopkins University (author: Daniel Povey)
Expand Down Expand Up @@ -116,17 +116,17 @@
def OpenFiles():
global ctm_edits_out, edits_in, ctm_in, symbol_table, oov_word
try:
ctm_edits_out = open(args.ctm_edits_out, 'w')
ctm_edits_out = open(args.ctm_edits_out, 'w', encoding='utf-8')
except:
sys.exit("get_ctm_edits.py: error opening ctm-edits file {0} for output".format(
args.ctm_edits_out))
try:
edits_in = open(args.edits_in)
edits_in = open(args.edits_in, encoding='utf-8')
except:
sys.exit("get_ctm_edits.py: error opening edits file {0} for input".format(
args.edits_in))
try:
ctm_in = open(args.ctm_in)
ctm_in = open(args.ctm_in, encoding='utf-8')
except:
sys.exit("get_ctm_edits.py: error opening ctm file {0} for input".format(
args.ctm_in))
Expand All @@ -138,7 +138,7 @@ def OpenFiles():
print("get_ctm_edits.py: error: if you set the the --symbol-table option "
"you must also set the --oov option", file = sys.stderr)
try:
f = open(args.symbol_table, 'r')
f = open(args.symbol_table, 'r', encoding='utf-8')
for line in f.readlines():
[ word, integer ] = line.split()
if int(integer) == args.oov:
Expand Down
4 changes: 2 additions & 2 deletions egs/wsj/s5/steps/cleanup/internal/get_non_scored_words.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3

# Copyright 2016 Vimal Manohar
# 2016 Johns Hopkins University (author: Daniel Povey)
Expand Down Expand Up @@ -90,7 +90,7 @@ def read_lang(lang_dir):
raise

try:
for line in open(lang_dir + '/words.txt').readlines():
for line in open(lang_dir + '/words.txt', encoding='utf-8').readlines():
[ word, integer ] = line.split()
if int(integer) in silence_word_ints:
non_scored_words.add(word)
Expand Down
Loading