From e96b257f6cd404feb750b67ddf64979ea03f383d Mon Sep 17 00:00:00 2001 From: David Snyder Date: Fri, 24 Mar 2017 19:33:54 -0400 Subject: [PATCH 1/3] [scripts,egs] Adding options for using PCA instead of LDA+MLLT for ivectors used in ASR. Results are reported in the default TDNN recipe in AMI. Updating steps/online/nnet2/{train_diag_ubm.sh,train_ivector_extractor.sh} so that they now backup the contents of their destination directory if it already exists. --- egs/ami/s5b/RESULTS_ihm | 21 +- egs/ami/s5b/local/chain/run_tdnn.sh | 2 +- egs/ami/s5b/local/chain/tuning/run_tdnn_1d.sh | 269 ++++++++++++++++++ egs/ami/s5b/local/nnet3/run_ivector_common.sh | 44 ++- .../steps/online/nnet2/get_pca_transform.sh | 67 +++++ .../s5/steps/online/nnet2/train_diag_ubm.sh | 35 ++- .../online/nnet2/train_ivector_extractor.sh | 29 +- 7 files changed, 425 insertions(+), 42 deletions(-) create mode 100755 egs/ami/s5b/local/chain/tuning/run_tdnn_1d.sh create mode 100755 egs/wsj/s5/steps/online/nnet2/get_pca_transform.sh diff --git a/egs/ami/s5b/RESULTS_ihm b/egs/ami/s5b/RESULTS_ihm index 44234fc3fd9..25a60d24cfb 100644 --- a/egs/ami/s5b/RESULTS_ihm +++ b/egs/ami/s5b/RESULTS_ihm @@ -40,7 +40,6 @@ %WER 24.0 | 13098 94470 | 79.4 12.1 8.5 3.4 24.0 57.1 | -0.153 | exp/ihm/nnet3_cleaned/tdnn_sp/decode_dev/ascore_12/dev_hires.ctm.filt.sys %WER 25.5 | 12643 89984 | 77.7 14.2 8.2 3.2 25.5 56.4 | -0.139 | exp/ihm/nnet3_cleaned/tdnn_sp/decode_eval/ascore_11/eval_hires.ctm.filt.sys - # local/nnet3/run_tdnn.sh --mic ihm --train-set train --gmm tri3 --nnet3-affix "" # nnet3 xent TDNN without data cleaning [cleaning makes very small and # inconsistent difference on this dat] @@ -55,17 +54,21 @@ %WER 22.4 | 12643 89977 | 80.3 12.5 7.2 2.7 22.4 53.6 | -0.503 | exp/ihm/nnet3_cleaned/lstm_bidirectional_sp/decode_eval/ascore_10/eval_hires.ctm.filt.sys ############################################ - -# local/chain/run_tdnn.sh --mic ihm --stage 12 & -# cleanup + chain TDNN model -# for d in exp/ihm/chain_cleaned/tdnn_sp_bi/decode_*; do grep Sum $d/*sc*/*ys | utils/best_wer.sh; done -%WER 22.5 | 13098 94490 | 80.6 10.8 8.6 3.1 22.5 55.0 | 0.072 | exp/ihm/chain_cleaned/tdnn_sp_bi/decode_dev/ascore_10/dev_hires.ctm.filt.sys -%WER 22.5 | 12643 89978 | 80.3 12.5 7.2 2.7 22.5 53.1 | 0.149 | exp/ihm/chain_cleaned/tdnn_sp_bi/decode_eval/ascore_10/eval_hires.ctm.filt.sys - +# cleanup + chain TDNN model. +# local/chain/run_tdnn.sh --mic ihm --stage 4 & +# for d in exp/ihm/chain_cleaned/tdnn1d_sp_bi/decode_*; do grep Sum $d/*sc*/*ys | utils/best_wer.sh; done +%WER 21.7 | 13098 94488 | 81.1 10.4 8.4 2.8 21.7 54.4 | 0.096 | exp/ihm/chain_cleaned/tdnn1d_sp_bi/decode_dev/ascore_10/dev_hires.ctm.filt.sys +%WER 22.1 | 12643 89979 | 80.5 12.1 7.4 2.6 22.1 52.8 | 0.185 | exp/ihm/chain_cleaned/tdnn1d_sp_bi/decode_eval/ascore_10/eval_hires.ctm.filt.sys + +# cleanup + chain TDNN model. Uses LDA instead of PCA for ivector features. +# local/chain/tuning/run_tdnn_1b.sh --mic ihm --stage 4 & +# for d in exp/ihm/chain_cleaned/tdnn1b_sp_bi/decode_*; do grep Sum $d/*sc*/*ys | utils/best_wer.sh; done +%WER 22.0 | 13098 94488 | 80.8 10.2 9.0 2.8 22.0 54.7 | 0.102 | exp/ihm/chain_cleaned/tdnn1b_sp_bi/decode_dev/ascore_10/dev_hires.ctm.filt.sys +%WER 22.2 | 12643 89968 | 80.3 12.1 7.6 2.6 22.2 52.9 | 0.170 | exp/ihm/chain_cleaned/tdnn1b_sp_bi/decode_eval/ascore_10/eval_hires.ctm.filt.sys # local/chain/run_tdnn.sh --mic ihm --train-set train --gmm tri3 --nnet3-affix "" --stage 12 # chain TDNN model without cleanup [note: cleanup helps very little on this IHM data.] -for d in exp/ihm/chain/tdnn_sp_bi/decode_*; do grep Sum $d/*sc*/*ys | utils/best_wer.sh; done +# for d in exp/ihm/chain/tdnn_sp_bi/decode_*; do grep Sum $d/*sc*/*ys | utils/best_wer.sh; done %WER 22.4 | 13098 94476 | 80.4 10.4 9.2 2.8 22.4 54.6 | 0.069 | exp/ihm/chain/tdnn_sp_bi/decode_dev/ascore_10/dev_hires.ctm.filt.sys %WER 22.5 | 12643 89974 | 80.0 12.1 7.9 2.6 22.5 52.8 | 0.157 | exp/ihm/chain/tdnn_sp_bi/decode_eval/ascore_10/eval_hires.ctm.filt.sys diff --git a/egs/ami/s5b/local/chain/run_tdnn.sh b/egs/ami/s5b/local/chain/run_tdnn.sh index 61f8f499182..e1adaa9346d 120000 --- a/egs/ami/s5b/local/chain/run_tdnn.sh +++ b/egs/ami/s5b/local/chain/run_tdnn.sh @@ -1 +1 @@ -tuning/run_tdnn_1b.sh \ No newline at end of file +tuning/run_tdnn_1d.sh \ No newline at end of file diff --git a/egs/ami/s5b/local/chain/tuning/run_tdnn_1d.sh b/egs/ami/s5b/local/chain/tuning/run_tdnn_1d.sh new file mode 100755 index 00000000000..a9f228cb55d --- /dev/null +++ b/egs/ami/s5b/local/chain/tuning/run_tdnn_1d.sh @@ -0,0 +1,269 @@ +#!/bin/bash + +# same as 1b but uses PCA instead of +# LDA features for the ivector extractor. + +# Results on 03/27/2017: +# local/chain/compare_wer_general.sh ihm tdnn1b_sp_bi tdnn1d_sp_bi +# System tdnn1b_sp_bi tdnn1d_sp_bi +# WER on dev 22.0 21.9 +# WER on eval 22.2 22.3 +# Final train prob -0.0813472 -0.0807054 +# Final valid prob -0.132032 -0.133564 +# Final train prob (xent) -1.41543 -1.41951 +# Final valid prob (xent) -1.62316 -1.63021 + +set -e -o pipefail +# First the options that are passed through to run_ivector_common.sh +# (some of which are also used in this script directly). +stage=0 +mic=ihm +nj=30 +min_seg_len=1.55 +use_ihm_ali=false +train_set=train_cleaned +gmm=tri3_cleaned # the gmm for the target data +ihm_gmm=tri3 # the gmm for the IHM system (if --use-ihm-ali true). +num_threads_ubm=32 +ivector_transform_type=pca +nnet3_affix=_cleaned # cleanup affix for nnet3 and chain dirs, e.g. _cleaned + +# The rest are configs specific to this script. Most of the parameters +# are just hardcoded at this level, in the commands below. +train_stage=-10 +tree_affix= # affix for tree directory, e.g. "a" or "b", in case we change the configuration. +tdnn_affix=1d #affix for TDNN directory, e.g. "a" or "b", in case we change the configuration. +common_egs_dir= # you can set this to use previously dumped egs. + +# End configuration section. +echo "$0 $@" # Print the command line for logging + +. ./cmd.sh +. ./path.sh +. ./utils/parse_options.sh + + +if ! cuda-compiled; then + cat <data/lang_chain/topo + fi +fi + +if [ $stage -le 13 ]; then + # Get the alignments as lattices (gives the chain training more freedom). + # use the same num-jobs as the alignments + steps/align_fmllr_lats.sh --nj 100 --cmd "$train_cmd" ${lores_train_data_dir} \ + data/lang $gmm_dir $lat_dir + rm $lat_dir/fsts.*.gz # save space +fi + +if [ $stage -le 14 ]; then + # Build a tree using our new topology. We know we have alignments for the + # speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use + # those. + if [ -f $tree_dir/final.mdl ]; then + echo "$0: $tree_dir/final.mdl already exists, refusing to overwrite it." + exit 1; + fi + steps/nnet3/chain/build_tree.sh --frame-subsampling-factor 3 \ + --context-opts "--context-width=2 --central-position=1" \ + --leftmost-questions-truncate -1 \ + --cmd "$train_cmd" 4200 ${lores_train_data_dir} data/lang_chain $ali_dir $tree_dir +fi + +xent_regularize=0.1 + +if [ $stage -le 15 ]; then + echo "$0: creating neural net configs using the xconfig parser"; + + num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}') + learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python) + + mkdir -p $dir/configs + cat < $dir/configs/network.xconfig + input dim=100 name=ivector + input dim=40 name=input + + # please note that it is important to have input layer with the name=input + # as the layer immediately preceding the fixed-affine-layer to enable + # the use of short notation for the descriptor + fixed-affine-layer name=lda input=Append(-1,0,1,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat + + # the first splicing is moved before the lda layer, so no splicing here + relu-renorm-layer name=tdnn1 dim=450 + relu-renorm-layer name=tdnn2 input=Append(-1,0,1) dim=450 + relu-renorm-layer name=tdnn3 input=Append(-1,0,1) dim=450 + relu-renorm-layer name=tdnn4 input=Append(-3,0,3) dim=450 + relu-renorm-layer name=tdnn5 input=Append(-3,0,3) dim=450 + relu-renorm-layer name=tdnn6 input=Append(-3,0,3) dim=450 + relu-renorm-layer name=tdnn7 input=Append(-3,0,3) dim=450 + + ## adding the layers for chain branch + relu-renorm-layer name=prefinal-chain input=tdnn7 dim=450 target-rms=0.5 + output-layer name=output include-log-softmax=false dim=$num_targets max-change=1.5 + + # adding the layers for xent branch + # This block prints the configs for a separate output that will be + # trained with a cross-entropy objective in the 'chain' models... this + # has the effect of regularizing the hidden parts of the model. we use + # 0.5 / args.xent_regularize as the learning rate factor- the factor of + # 0.5 / args.xent_regularize is suitable as it means the xent + # final-layer learns at a rate independent of the regularization + # constant; and the 0.5 was tuned so as to make the relative progress + # similar in the xent and regular final layers. + relu-renorm-layer name=prefinal-xent input=tdnn7 dim=450 target-rms=0.5 + output-layer name=output-xent dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5 + +EOF + + steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/ +fi + +if [ $stage -le 16 ]; then + if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then + utils/create_split_dir.pl \ + /export/b0{5,6,7,8}/$USER/kaldi-data/egs/ami-$(date +'%m_%d_%H_%M')/s5b/$dir/egs/storage $dir/egs/storage + fi + + touch $dir/egs/.nodelete # keep egs around when that run dies. + + steps/nnet3/chain/train.py --stage $train_stage \ + --cmd "$decode_cmd" \ + --feat.online-ivector-dir $train_ivector_dir \ + --feat.cmvn-opts "--norm-means=false --norm-vars=false" \ + --chain.xent-regularize $xent_regularize \ + --chain.leaky-hmm-coefficient 0.1 \ + --chain.l2-regularize 0.00005 \ + --chain.apply-deriv-weights false \ + --chain.lm-opts="--num-extra-lm-states=2000" \ + --egs.dir "$common_egs_dir" \ + --egs.opts "--frames-overlap-per-eg 0" \ + --egs.chunk-width 150 \ + --trainer.num-chunk-per-minibatch 128 \ + --trainer.frames-per-iter 1500000 \ + --trainer.num-epochs 4 \ + --trainer.optimization.num-jobs-initial 2 \ + --trainer.optimization.num-jobs-final 12 \ + --trainer.optimization.initial-effective-lrate 0.001 \ + --trainer.optimization.final-effective-lrate 0.0001 \ + --trainer.max-param-change 2.0 \ + --cleanup.remove-egs true \ + --feat-dir $train_data_dir \ + --tree-dir $tree_dir \ + --lat-dir $lat_dir \ + --dir $dir +fi + + +graph_dir=$dir/graph_${LM} +if [ $stage -le 17 ]; then + # Note: it might appear that this data/lang_chain directory is mismatched, and it is as + # far as the 'topo' is concerned, but this script doesn't read the 'topo' from + # the lang directory. + utils/mkgraph.sh --self-loop-scale 1.0 data/lang_${LM} $dir $graph_dir +fi + +if [ $stage -le 18 ]; then + rm $dir/.error 2>/dev/null || true + for decode_set in dev eval; do + ( + steps/nnet3/decode.sh --acwt 1.0 --post-decode-acwt 10.0 \ + --nj $nj --cmd "$decode_cmd" \ + --online-ivector-dir exp/$mic/nnet3${nnet3_affix}/ivectors_${decode_set}_hires \ + --scoring-opts "--min-lmwt 5 " \ + $graph_dir data/$mic/${decode_set}_hires $dir/decode_${decode_set} || exit 1; + ) || touch $dir/.error & + done + wait + if [ -f $dir/.error ]; then + echo "$0: something went wrong in decoding" + exit 1 + fi +fi +exit 0 diff --git a/egs/ami/s5b/local/nnet3/run_ivector_common.sh b/egs/ami/s5b/local/nnet3/run_ivector_common.sh index bccbb42494c..860009c5ef5 100755 --- a/egs/ami/s5b/local/nnet3/run_ivector_common.sh +++ b/egs/ami/s5b/local/nnet3/run_ivector_common.sh @@ -17,8 +17,8 @@ train_set=train # you might set this to e.g. train_cleaned. gmm=tri3 # This specifies a GMM-dir from the features of the type you're training the system on; # it should contain alignments for 'train_set'. - num_threads_ubm=32 +ivector_transform_type=lda nnet3_affix=_cleaned # affix for exp/$mic/nnet3 directory to put iVector stuff in, so it # becomes exp/$mic/nnet3_cleaned or whatever. @@ -30,7 +30,7 @@ nnet3_affix=_cleaned # affix for exp/$mic/nnet3 directory to put iVector stu gmmdir=exp/${mic}/${gmm} -for f in data/${mic}/${train_set}/feats.scp ${gmmdir}/final.mdl; do +for f in data/${mic}/${train_set}/feats.scp ; do if [ ! -f $f ]; then echo "$0: expected file $f to exist" exit 1 @@ -110,20 +110,36 @@ if [ $stage -le 4 ]; then echo "$0: warning: number of feats $n1 != $n2, if these are very different it could be bad." fi - echo "$0: training a system on the hires data for its LDA+MLLT transform, in order to produce the diagonal GMM." - if [ -e exp/$mic/nnet3${nnet3_affix}/tri5/final.mdl ]; then - # we don't want to overwrite old stuff, ask the user to delete it. - echo "$0: exp/$mic/nnet3${nnet3_affix}/tri5/final.mdl already exists: " - echo " ... please delete and then rerun, or use a later --stage option." - exit 1; - fi - steps/train_lda_mllt.sh --cmd "$train_cmd" --num-iters 7 --mllt-iters "2 4 6" \ - --splice-opts "--left-context=3 --right-context=3" \ - 3000 10000 $temp_data_root/${train_set}_hires data/lang \ - $gmmdir exp/$mic/nnet3${nnet3_affix}/tri5 + case $ivector_transform_type in + lda) + if [ ! -f ${gmmdir}/final.mdl ]; then + echo "$0: expected file ${gmmdir}/final.mdl to exist" + exit 1; + fi + echo "$0: training a system on the hires data for its LDA+MLLT transform, in order to produce the diagonal GMM." + if [ -e exp/$mic/nnet3${nnet3_affix}/tri5/final.mdl ]; then + # we don't want to overwrite old stuff, ask the user to delete it. + echo "$0: exp/$mic/nnet3${nnet3_affix}/tri5/final.mdl already exists: " + echo " ... please delete and then rerun, or use a later --stage option." + exit 1; + fi + steps/train_lda_mllt.sh --cmd "$train_cmd" --num-iters 7 --mllt-iters "2 4 6" \ + --splice-opts "--left-context=3 --right-context=3" \ + 3000 10000 $temp_data_root/${train_set}_hires data/lang \ + $gmmdir exp/$mic/nnet3${nnet3_affix}/tri5 + ;; + pca) + echo "$0: computing a PCA transform from the hires data." + steps/online/nnet2/get_pca_transform.sh --cmd "$train_cmd" \ + --splice-opts "--left-context=3 --right-context=3" \ + --max-utts 10000 --subsample 2 \ + $temp_data_root/${train_set}_hires \ + exp/$mic/nnet3${nnet3_affix}/tri5 + ;; + *) echo "$0: invalid iVector transform type $ivector_transform_type" && exit 1; + esac fi - if [ $stage -le 5 ]; then echo "$0: computing a subset of data to train the diagonal UBM." diff --git a/egs/wsj/s5/steps/online/nnet2/get_pca_transform.sh b/egs/wsj/s5/steps/online/nnet2/get_pca_transform.sh new file mode 100755 index 00000000000..e0b704f8852 --- /dev/null +++ b/egs/wsj/s5/steps/online/nnet2/get_pca_transform.sh @@ -0,0 +1,67 @@ +#!/bin/bash + +# Copyright 2016 David Snyder +# +# This script computes a PCA transform on top of spliced features processed with +# apply-cmvn-online. +# +# +# Apache 2.0. + +# Begin configuration. +cmd=run.pl +config= +stage=0 +dim=40 # The dim after applying PCA +normalize_variance=true # If the PCA transform normalizes the variance +normalize_mean=true # If the PCA transform centers +splice_opts= +online_cmvn_opts= +max_utts=5000 # maximum number of files to use +subsample=5 # subsample features with this periodicity + +echo "$0 $@" # Print the command line for logging + +[ -f path.sh ] && . ./path.sh +. parse_options.sh || exit 1; + +if [ $# != 2 ]; then + echo "Usage: steps/nnet2/get_pca_transform.sh [options] " + echo " e.g.: steps/train_pca_transform.sh data/train_si84 exp/tri2b" + echo "Main options (for others, see top of script file)" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo " --config # config containing options" + echo " --stage # stage to do partial re-run from." + exit 1; +fi + +data=$1 +dir=$2 + +for f in $data/feats.scp ; do + [ ! -f "$f" ] && echo "$0: expecting file $f to exist" && exit 1 +done + +mkdir -p $dir/log + +echo "$splice_opts" >$dir/splice_opts # keep track of frame-splicing options + # so that later stages of system building can know what they were. +echo $online_cmvn_opts > $dir/online_cmvn.conf # keep track of options to CMVN. + +# create global_cmvn.stats +if ! matrix-sum --binary=false scp:$data/cmvn.scp - >$dir/global_cmvn.stats 2>/dev/null; then + echo "$0: Error summing cmvn stats" + exit 1 +fi + +feats="ark,s,cs:utils/subset_scp.pl --quiet $max_utts $data/feats.scp | apply-cmvn-online $online_cmvn_opts $dir/global_cmvn.stats scp:- ark:- | splice-feats $splice_opts ark:- ark:- | subsample-feats --n=$subsample ark:- ark:- |" + +if [ $stage -le 0 ]; then + $cmd $dir/log/pca_est.log \ + est-pca --dim=$dim --normalize-variance=$normalize_variance \ + --normalize-mean=$normalize_mean "$feats" $dir/final.mat || exit 1; +fi + +echo "Done estimating PCA transform in $dir" + +exit 0 diff --git a/egs/wsj/s5/steps/online/nnet2/train_diag_ubm.sh b/egs/wsj/s5/steps/online/nnet2/train_diag_ubm.sh index 22250ae9ee3..80a023fed8a 100755 --- a/egs/wsj/s5/steps/online/nnet2/train_diag_ubm.sh +++ b/egs/wsj/s5/steps/online/nnet2/train_diag_ubm.sh @@ -10,15 +10,15 @@ # This script was modified from ../../sre08/v1/sid/train_diag_ubm.sh. It trains # a diagonal UBM on top of features processed with apply-cmvn-online and then -# transformed with an LDA+MLLT matrix (obtained from the source directory). -# This script does not use the trained model from the source directory to -# initialize the diagonal GMM; instead, we initialize the GMM using +# transformed with an LDA+MLLT or PCA matrix (obtained from the source +# directory). This script does not use the trained model from the source +# directory to initialize the diagonal GMM; instead, we initialize the GMM using # gmm-global-init-from-feats, which sets the means to random data points and # then does some iterations of E-M in memory. After the in-memory -# initialization we train for a few iterations in parallel. -# Note that there is a slight mismatch in that the source LDA+MLLT matrix -# (final.mat) will have been estimated using standard CMVN, and we're using -# online CMVN. We don't think this will have much effect. +# initialization we train for a few iterations in parallel. Note that if an +# LDA+MLLT transform matrix is used, there will be a slight mismatch in that the +# source LDA+MLLT matrix (final.mat) will have been estimated using standard +# CMVN, and we're using online CMVN. We don't think this will have much effect. # Begin configuration section. @@ -58,7 +58,7 @@ if [ $# != 4 ]; then echo " --stage # stage to do partial re-run from." echo " --num-gselect # Number of Gaussians per frame to" echo " # limit computation to, for speed" - echo " --subsample # In main E-M phase, use every n" + echo " --subsample # In main E-M phase, use every n" echo " # frames (a speedup)" echo " --num-frames # Maximum num-frames to keep in memory" echo " # for model initialization" @@ -89,6 +89,15 @@ for f in $data/feats.scp "$online_cmvn_config" $srcdir/splice_opts $srcdir/final [ ! -f "$f" ] && echo "$0: expecting file $f to exist" && exit 1 done +if [ -d "$dir" ]; then + bak_dir=$(mktemp -d ${dir}/backup.XXX); + echo "$0: Directory $dir already exists. Backing up diagonal UBM in ${bak_dir}"; + for f in $dir/final.mat $dir/final.dubm $dir/online_cmvn.conf $dir/global_cmvn.stats; do + [ -f "$f" ] && mv $f ${bak_dir}/ + done + [ -d "$dir/log" ] && mv $dir/log ${bak_dir}/ +fi + splice_opts=$(cat $srcdir/splice_opts) cp $srcdir/splice_opts $dir/ || exit 1; cp $srcdir/final.mat $dir/ || exit 1; @@ -146,10 +155,16 @@ for x in `seq 0 $[$num_iters-1]`; do $cmd $dir/log/update.$x.log \ gmm-global-est $opt --min-gaussian-weight=$min_gaussian_weight $dir/$x.dubm "gmm-global-sum-accs - $dir/$x.*.acc|" \ $dir/$[$x+1].dubm || exit 1; - rm $dir/$x.*.acc $dir/$x.dubm + + if $cleanup; then + rm $dir/$x.*.acc $dir/$x.dubm + fi fi done -rm $dir/gselect.*.gz +if $cleanup; then + rm $dir/gselect.*.gz +fi + mv $dir/$num_iters.dubm $dir/final.dubm || exit 1; exit 0; diff --git a/egs/wsj/s5/steps/online/nnet2/train_ivector_extractor.sh b/egs/wsj/s5/steps/online/nnet2/train_ivector_extractor.sh index 67845b01c8a..5dbda1780f4 100755 --- a/egs/wsj/s5/steps/online/nnet2/train_ivector_extractor.sh +++ b/egs/wsj/s5/steps/online/nnet2/train_ivector_extractor.sh @@ -21,7 +21,7 @@ # - Set num_threads to the minimum of (4, or how many virtual cores your machine has). # (because of needing to lock various global quantities, the program can't # use many more than 4 threads with good CPU utilization). -# - Set num_processes to the number of virtual cores on each machine you have, divided by +# - Set num_processes to the number of virtual cores on each machine you have, divided by # num_threads. E.g. 4, if you have 16 virtual cores. If you're on a shared queue # that's busy with other people's jobs, it may be wise to set it to rather less # than this maximum though, or your jobs won't get scheduled. And if memory is @@ -32,8 +32,8 @@ # may want more jobs, though. # Begin configuration section. -nj=10 # this is the number of separate queue jobs we run, but each one - # contains num_processes sub-jobs.. the real number of threads we +nj=10 # this is the number of separate queue jobs we run, but each one + # contains num_processes sub-jobs.. the real number of threads we # run is nj * num_processes * num_threads, and the number of # separate pieces of data is nj * num_processes. num_threads=4 @@ -88,6 +88,17 @@ for f in $srcdir/final.dubm $srcdir/final.mat $srcdir/global_cmvn.stats $srcdir/ [ ! -f $f ] && echo "No such file $f" && exit 1; done + +if [ -d "$dir" ]; then + bak_dir=$(mktemp -d ${dir}/backup.XXX); + echo "$0: Directory $dir already exists. Backing up iVector extractor in ${bak_dir}"; + for f in $dir/final.ie $dir/*.ie $dir/final.mat $dir/final.dubm \ + $dir/online_cmvn.conf $dir/global_cmvn.stats; do + [ -f "$f" ] && mv $f ${bak_dir}/ + done + [ -d "$dir/log" ] && mv $dir/log ${bak_dir}/ +fi + # Set various variables. mkdir -p $dir/log nj_full=$[$nj*$num_processes] @@ -105,7 +116,6 @@ gmm_feats="ark,s,cs:apply-cmvn-online --config=$dir/online_cmvn.conf $dir/global feats="ark,s,cs:splice-feats $splice_opts scp:$sdata/JOB/feats.scp ark:- | transform-feats $dir/final.mat ark:- ark:- | subsample-feats --n=$subsample ark:- ark:- |" - # Initialize the i-vector extractor using the input GMM, which is converted to # full because that's what the i-vector extractor expects. Note: we have to do # --use-weights=false to disable regression of the log weights on the ivector, @@ -115,7 +125,7 @@ if [ $stage -le -2 ]; then $cmd $dir/log/init.log \ ivector-extractor-init --ivector-dim=$ivector_dim --use-weights=false \ "gmm-global-to-fgmm $dir/final.dubm -|" $dir/0.ie || exit 1 -fi +fi # Do Gaussian selection and posterior extracion @@ -168,20 +178,23 @@ while [ $x -lt $num_iters ]; do # each accumulation process uses, since we # can be sure the queue will support this many. # - # The parallel-opts was either specified by + # The parallel-opts was either specified by # the user or we computed it correctly in # tge previous stages $cmd --num-threads $[$num_threads*$num_processes] $dir/log/update.$x.log \ ivector-extractor-est --num-threads=$nt $dir/$x.ie $dir/acc.$x $dir/$[$x+1].ie || exit 1; rm $dir/acc.$x.* if $cleanup; then - rm $dir/acc.$x - # rm $dir/$x.ie + rm $dir/acc.$x $dir/$x.ie fi fi x=$[$x+1] done +if $cleanup; then + rm $dir/post.*.gz +fi + rm $dir/final.ie 2>/dev/null ln -s $x.ie $dir/final.ie From 0e56d0aeda147dccbee80b1ebb6e2318a61dfeaf Mon Sep 17 00:00:00 2001 From: David Snyder Date: Fri, 14 Apr 2017 11:49:58 -0400 Subject: [PATCH 2/3] [egs,scripts] Updating AMI TDNN results to reflect the current recipe (tdnn1d). Fixing minor bug in egs/ami/s5b/local/chain/tuning/run_tdnn_*.sh scripts. --- egs/ami/s5b/README.txt | 2 +- egs/ami/s5b/RESULTS_ihm | 8 ++++---- egs/ami/s5b/RESULTS_mdm | 15 +++++++-------- egs/ami/s5b/RESULTS_sdm | 16 ++++++++-------- egs/ami/s5b/local/chain/tuning/run_tdnn_1a.sh | 4 ++-- egs/ami/s5b/local/chain/tuning/run_tdnn_1b.sh | 4 ++-- egs/ami/s5b/local/chain/tuning/run_tdnn_1c.sh | 4 ++-- egs/ami/s5b/local/chain/tuning/run_tdnn_1d.sh | 4 ++-- egs/ami/s5b/run.sh | 2 +- 9 files changed, 29 insertions(+), 30 deletions(-) diff --git a/egs/ami/s5b/README.txt b/egs/ami/s5b/README.txt index 032a2533e5b..2d5a522e228 100644 --- a/egs/ami/s5b/README.txt +++ b/egs/ami/s5b/README.txt @@ -5,7 +5,7 @@ many components removed. Before running run.sh, please run run_prepare_shared.sh. Afterwards, you can run: - run.sh --mic ihm # builds system for independent headset microphone + run.sh --mic ihm # builds system for independent headset microphone run.sh --mic sdm1 # single distant micropophone run.sh --mic mdm8 # multiple distant microphones + beamforming. diff --git a/egs/ami/s5b/RESULTS_ihm b/egs/ami/s5b/RESULTS_ihm index 25a60d24cfb..1003197701e 100644 --- a/egs/ami/s5b/RESULTS_ihm +++ b/egs/ami/s5b/RESULTS_ihm @@ -66,11 +66,11 @@ %WER 22.0 | 13098 94488 | 80.8 10.2 9.0 2.8 22.0 54.7 | 0.102 | exp/ihm/chain_cleaned/tdnn1b_sp_bi/decode_dev/ascore_10/dev_hires.ctm.filt.sys %WER 22.2 | 12643 89968 | 80.3 12.1 7.6 2.6 22.2 52.9 | 0.170 | exp/ihm/chain_cleaned/tdnn1b_sp_bi/decode_eval/ascore_10/eval_hires.ctm.filt.sys -# local/chain/run_tdnn.sh --mic ihm --train-set train --gmm tri3 --nnet3-affix "" --stage 12 +# local/chain/run_tdnn.sh --mic ihm --train-set train --gmm tri3 --nnet3-affix "" --stage 4 # chain TDNN model without cleanup [note: cleanup helps very little on this IHM data.] -# for d in exp/ihm/chain/tdnn_sp_bi/decode_*; do grep Sum $d/*sc*/*ys | utils/best_wer.sh; done -%WER 22.4 | 13098 94476 | 80.4 10.4 9.2 2.8 22.4 54.6 | 0.069 | exp/ihm/chain/tdnn_sp_bi/decode_dev/ascore_10/dev_hires.ctm.filt.sys -%WER 22.5 | 12643 89974 | 80.0 12.1 7.9 2.6 22.5 52.8 | 0.157 | exp/ihm/chain/tdnn_sp_bi/decode_eval/ascore_10/eval_hires.ctm.filt.sys +# for d in exp/ihm/chain/tdnn1d_sp_bi/decode_*; do grep Sum $d/*sc*/*ys | utils/best_wer.sh; done +%WER 21.8 | 13098 94484 | 80.7 9.7 9.6 2.5 21.8 54.2 | 0.114 | exp/ihm/chain/tdnn1d_sp_bi/decode_dev/ascore_10/dev_hires.ctm.filt.sys +%WER 22.1 | 12643 89965 | 80.2 11.5 8.3 2.3 22.1 52.5 | 0.203 | exp/ihm/chain/tdnn1d_sp_bi/decode_eval/ascore_10/eval_hires.ctm.filt.sy # local/chain/multi_condition/run_tdnn.sh --mic ihm # cleanup + chain TDNN model + IHM reverberated data diff --git a/egs/ami/s5b/RESULTS_mdm b/egs/ami/s5b/RESULTS_mdm index f27da5773ac..d9155eca507 100644 --- a/egs/ami/s5b/RESULTS_mdm +++ b/egs/ami/s5b/RESULTS_mdm @@ -65,17 +65,16 @@ # cleanup + chain TDNN model, alignments from IHM data (IHM alignments help). # local/chain/run_tdnn.sh --mic mdm8 --use-ihm-ali true --stage 12 & -# for d in exp/mdm8/chain_cleaned/tdnn_sp_bi_ihmali/decode_*; do grep Sum $d/*sc*/*ys | utils/best_wer.sh; done -%WER 37.4 | 15286 94509 | 66.6 18.0 15.5 3.9 37.4 62.8 | 0.624 | exp/mdm8/chain_cleaned/tdnn_sp_bi_ihmali/decode_dev/ascore_9/dev_hires_o4.ctm.filt.sys -%WER 40.6 | 13381 89982 | 62.7 18.9 18.3 3.3 40.6 67.6 | 0.594 | exp/mdm8/chain_cleaned/tdnn_sp_bi_ihmali/decode_eval/ascore_9/eval_hires_o4.ctm.filt.sys - +# for d in exp/mdm8/chain_cleaned/tdnn1d_sp_bi_ihmali/decode_*; do grep Sum $d/*sc*/*ys | utils/best_wer.sh; done +%WER 36.4 | 15140 94513 | 67.3 17.5 15.2 3.6 36.4 63.2 | 0.613 | exp/mdm8/chain_cleaned/tdnn1d_sp_bi_ihmali/decode_dev/ascore_9/dev_hires_o4.ctm.filt.sys +%WER 39.7 | 13835 89969 | 63.2 18.4 18.4 3.0 39.7 65.7 | 0.584 | exp/mdm8/chain_cleaned/tdnn1d_sp_bi_ihmali/decode_eval/ascore_9/eval_hires_o4.ctm.filt.sys # local/chain/run_tdnn.sh --use-ihm-ali true --mic mdm8 --train-set train --gmm tri3 --nnet3-affix "" --stage 12 & # chain TDNN model-- no cleanup, but IHM alignments. -# note, this system is worse by [0.8, 1.3] than the system without cleanup. -# for d in exp/mdm8/chain/tdnn_sp_bi_ihmali/decode_*; do grep Sum $d/*sc*/*ys | utils/best_wer.sh; done -%WER 37.9 | 15635 94514 | 66.5 19.1 14.4 4.4 37.9 61.2 | 0.646 | exp/mdm8/chain/tdnn_sp_bi_ihmali/decode_dev/ascore_8/dev_hires_o4.ctm.filt.sys -%WER 41.5 | 13884 89975 | 62.3 20.3 17.4 3.8 41.5 66.0 | 0.621 | exp/mdm8/chain/tdnn_sp_bi_ihmali/decode_eval/ascore_8/eval_hires_o4.ctm.filt.sys +# note, this system is worse by [0.5, 0.5] than the system without cleanup. +# for d in exp/mdm8/chain/tdnn1d_sp_bi_ihmali/decode_*; do grep Sum $d/*sc*/*ys | utils/best_wer.sh; done +%WER 36.9 | 15282 94502 | 67.1 18.5 14.4 4.1 36.9 62.5 | 0.635 | exp/mdm8/chain/tdnn1d_sp_bi_ihmali/decode_dev/ascore_8/dev_hires_o4.ctm.filt.sys +%WER 40.2 | 13729 89992 | 63.3 19.8 17.0 3.5 40.2 66.4 | 0.608 | exp/mdm8/chain/tdnn1d_sp_bi_ihmali/decode_eval/ascore_8/eval_hires_o4.ctm.filt.sys # local/chain/multi_condition/run_tdnn.sh --mic mdm8 --use-ihm-ali true --train-set train_cleaned --gmm tri3_cleaned # cleanup + chain TDNN model, MDM original + IHM reverberated data, alignments from IHM data diff --git a/egs/ami/s5b/RESULTS_sdm b/egs/ami/s5b/RESULTS_sdm index 05b68e5e780..737f8f6dc09 100644 --- a/egs/ami/s5b/RESULTS_sdm +++ b/egs/ami/s5b/RESULTS_sdm @@ -67,17 +67,17 @@ # cleanup + chain TDNN model, alignments from IHM data (IHM alignments help). # local/chain/run_tdnn.sh --mic sdm1 --use-ihm-ali true --stage 12 & # cleanup + chain TDNN model, cleaned data and alignments from ihm data. -# for d in exp/sdm1/chain_cleaned/tdnn_sp_bi_ihmali/decode_*; do grep Sum $d/*sc*/*ys | utils/best_wer.sh; done -%WER 40.7 | 14321 94501 | 63.0 19.6 17.4 3.7 40.7 67.7 | 0.592 | exp/sdm1/chain_cleaned/tdnn_sp_bi_ihmali/decode_dev/ascore_9/dev_hires_o4.ctm.filt.sys -%WER 44.8 | 14293 89976 | 58.6 21.3 20.1 3.3 44.8 64.2 | 0.559 | exp/sdm1/chain_cleaned/tdnn_sp_bi_ihmali/decode_eval/ascore_9/eval_hires_o4.ctm.filt.sys +# for d in exp/sdm1/chain_cleaned/tdnn1d_sp_bi_ihmali/decode_*; do grep Sum $d/*sc*/*ys | utils/best_wer.sh; done +%WER 39.5 | 14280 94503 | 64.0 19.3 16.7 3.5 39.5 67.7 | 0.582 | exp/sdm1/chain_cleaned/tdnn1d_sp_bi_ihmali/decode_dev/ascore_9/dev_hires_o4.ctm.filt.sys +%WER 43.9 | 13566 89961 | 59.3 20.9 19.9 3.1 43.9 67.9 | 0.547 | exp/sdm1/chain_cleaned/tdnn1d_sp_bi_ihmali/decode_eval/ascore_9/eval_hires_o4.ctm.filt.sys # no-cleanup + chain TDNN model, IHM alignments. -# A bit worse than with cleanup [+0.1, +0.4]. -# local/chain/run_tdnn.sh --use-ihm-ali true --mic sdm1 --train-set train --gmm tri3 --nnet3-affix "" --stage 17 - for d in exp/sdm1/chain/tdnn_sp_bi_ihmali/decode_*; do grep Sum $d/*sc*/*ys | utils/best_wer.sh; done -%WER 40.7 | 14549 94520 | 63.6 21.4 15.0 4.3 40.7 66.2 | 0.617 | exp/sdm1/chain/tdnn_sp_bi_ihmali/decode_dev/ascore_8/dev_hires_o4.ctm.filt.sys -%WER 45.1 | 13296 89971 | 59.1 23.4 17.6 4.2 45.1 69.5 | 0.591 | exp/sdm1/chain/tdnn_sp_bi_ihmali/decode_eval/ascore_8/eval_hires_o4.ctm.filt.sys +# A bit worse than with cleanup [+0.3, +0.4]. +# local/chain/run_tdnn.sh --use-ihm-ali true --mic sdm1 --train-set train --gmm tri3 --nnet3-affix "" --stage 12 +# for d in exp/sdm1/chain/tdnn1d_sp_bi_ihmali/decode_*; do grep Sum $d/*sc*/*ys | utils/best_wer.sh; done +%WER 39.8 | 15384 94535 | 64.4 21.0 14.6 4.2 39.8 62.8 | 0.610 | exp/sdm1/chain/tdnn1d_sp_bi_ihmali/decode_dev/ascore_8/dev_hires_o4.ctm.filt.sys +%WER 44.3 | 14046 90002 | 59.6 23.1 17.3 3.9 44.3 65.6 | 0.571 | exp/sdm1/chain/tdnn1d_sp_bi_ihmali/decode_eval/ascore_8/eval_hires_o4.ctm.filt.sys # local/chain/multi_condition/run_tdnn.sh --mic sdm1 --use-ihm-ali true --train-set train_cleaned --gmm tri3_cleaned # cleanup + chain TDNN model, SDM original + IHM reverberated data, alignments from ihm data. diff --git a/egs/ami/s5b/local/chain/tuning/run_tdnn_1a.sh b/egs/ami/s5b/local/chain/tuning/run_tdnn_1a.sh index b3a645c0c11..bdc3fbfb663 100755 --- a/egs/ami/s5b/local/chain/tuning/run_tdnn_1a.sh +++ b/egs/ami/s5b/local/chain/tuning/run_tdnn_1a.sh @@ -184,9 +184,9 @@ if [ $stage -le 16 ]; then /export/b0{5,6,7,8}/$USER/kaldi-data/egs/ami-$(date +'%m_%d_%H_%M')/s5b/$dir/egs/storage $dir/egs/storage fi - touch $dir/egs/.nodelete # keep egs around when that run dies. + mkdir -p $dir/egs && touch $dir/egs/.nodelete # keep egs around when that run dies. - steps/nnet3/chain/train.py --stage $train_stage \ + steps/nnet3/chain/train.py --stage $train_stage \ --cmd "$decode_cmd" \ --feat.online-ivector-dir $train_ivector_dir \ --feat.cmvn-opts "--norm-means=false --norm-vars=false" \ diff --git a/egs/ami/s5b/local/chain/tuning/run_tdnn_1b.sh b/egs/ami/s5b/local/chain/tuning/run_tdnn_1b.sh index 0644d624606..63c1623a57a 100755 --- a/egs/ami/s5b/local/chain/tuning/run_tdnn_1b.sh +++ b/egs/ami/s5b/local/chain/tuning/run_tdnn_1b.sh @@ -212,9 +212,9 @@ if [ $stage -le 16 ]; then /export/b0{5,6,7,8}/$USER/kaldi-data/egs/ami-$(date +'%m_%d_%H_%M')/s5b/$dir/egs/storage $dir/egs/storage fi - touch $dir/egs/.nodelete # keep egs around when that run dies. + mkdir -p $dir/egs && touch $dir/egs/.nodelete # keep egs around when that run dies. - steps/nnet3/chain/train.py --stage $train_stage \ + steps/nnet3/chain/train.py --stage $train_stage \ --cmd "$decode_cmd" \ --feat.online-ivector-dir $train_ivector_dir \ --feat.cmvn-opts "--norm-means=false --norm-vars=false" \ diff --git a/egs/ami/s5b/local/chain/tuning/run_tdnn_1c.sh b/egs/ami/s5b/local/chain/tuning/run_tdnn_1c.sh index 0a49575ebb0..2cfbf165401 100755 --- a/egs/ami/s5b/local/chain/tuning/run_tdnn_1c.sh +++ b/egs/ami/s5b/local/chain/tuning/run_tdnn_1c.sh @@ -199,9 +199,9 @@ if [ $stage -le 16 ]; then /export/b0{5,6,7,8}/$USER/kaldi-data/egs/ami-$(date +'%m_%d_%H_%M')/s5b/$dir/egs/storage $dir/egs/storage fi - touch $dir/egs/.nodelete # keep egs around when that run dies. + mkdir -p $dir/egs && touch $dir/egs/.nodelete # keep egs around when that run dies. - steps/nnet3/chain/train.py --stage $train_stage \ + steps/nnet3/chain/train.py --stage $train_stage \ --cmd "$decode_cmd" \ --feat.online-ivector-dir $train_ivector_dir \ --feat.cmvn-opts "--norm-means=false --norm-vars=false" \ diff --git a/egs/ami/s5b/local/chain/tuning/run_tdnn_1d.sh b/egs/ami/s5b/local/chain/tuning/run_tdnn_1d.sh index a9f228cb55d..16b89c857c1 100755 --- a/egs/ami/s5b/local/chain/tuning/run_tdnn_1d.sh +++ b/egs/ami/s5b/local/chain/tuning/run_tdnn_1d.sh @@ -211,9 +211,9 @@ if [ $stage -le 16 ]; then /export/b0{5,6,7,8}/$USER/kaldi-data/egs/ami-$(date +'%m_%d_%H_%M')/s5b/$dir/egs/storage $dir/egs/storage fi - touch $dir/egs/.nodelete # keep egs around when that run dies. + mkdir -p $dir/egs && touch $dir/egs/.nodelete # keep egs around when that run dies. - steps/nnet3/chain/train.py --stage $train_stage \ + steps/nnet3/chain/train.py --stage $train_stage \ --cmd "$decode_cmd" \ --feat.online-ivector-dir $train_ivector_dir \ --feat.cmvn-opts "--norm-means=false --norm-vars=false" \ diff --git a/egs/ami/s5b/run.sh b/egs/ami/s5b/run.sh index 56cdd29e311..0a630a87a5b 100755 --- a/egs/ami/s5b/run.sh +++ b/egs/ami/s5b/run.sh @@ -56,7 +56,7 @@ if [ "$base_mic" == "mdm" ]; then PROCESSED_AMI_DIR=$AMI_DIR/beamformed if [ $stage -le 1 ]; then # for MDM data, do beamforming - ! hash BeamformIt && echo "Missing BeamformIt, run 'cd ../../../tools/; make beamformit;'" && exit 1 + ! hash BeamformIt && echo "Missing BeamformIt, run 'cd ../../../tools/; extras/install_beamformit.sh; cd -;'" && exit 1 local/ami_beamform.sh --cmd "$train_cmd" --nj 20 $nmics $AMI_DIR $PROCESSED_AMI_DIR fi else From 10ec7172167ae793fa2f68d24976459ab593e12a Mon Sep 17 00:00:00 2001 From: David Snyder Date: Fri, 14 Apr 2017 15:42:35 -0400 Subject: [PATCH 3/3] [egs] Updating chain scripts in AMI so that they do not default to keeping egs --- egs/ami/s5b/local/chain/tuning/run_tdnn_1a.sh | 2 -- egs/ami/s5b/local/chain/tuning/run_tdnn_1b.sh | 2 -- egs/ami/s5b/local/chain/tuning/run_tdnn_1c.sh | 2 -- egs/ami/s5b/local/chain/tuning/run_tdnn_1d.sh | 2 -- 4 files changed, 8 deletions(-) diff --git a/egs/ami/s5b/local/chain/tuning/run_tdnn_1a.sh b/egs/ami/s5b/local/chain/tuning/run_tdnn_1a.sh index bdc3fbfb663..86587d6d830 100755 --- a/egs/ami/s5b/local/chain/tuning/run_tdnn_1a.sh +++ b/egs/ami/s5b/local/chain/tuning/run_tdnn_1a.sh @@ -184,8 +184,6 @@ if [ $stage -le 16 ]; then /export/b0{5,6,7,8}/$USER/kaldi-data/egs/ami-$(date +'%m_%d_%H_%M')/s5b/$dir/egs/storage $dir/egs/storage fi - mkdir -p $dir/egs && touch $dir/egs/.nodelete # keep egs around when that run dies. - steps/nnet3/chain/train.py --stage $train_stage \ --cmd "$decode_cmd" \ --feat.online-ivector-dir $train_ivector_dir \ diff --git a/egs/ami/s5b/local/chain/tuning/run_tdnn_1b.sh b/egs/ami/s5b/local/chain/tuning/run_tdnn_1b.sh index 63c1623a57a..98dc95e59a2 100755 --- a/egs/ami/s5b/local/chain/tuning/run_tdnn_1b.sh +++ b/egs/ami/s5b/local/chain/tuning/run_tdnn_1b.sh @@ -212,8 +212,6 @@ if [ $stage -le 16 ]; then /export/b0{5,6,7,8}/$USER/kaldi-data/egs/ami-$(date +'%m_%d_%H_%M')/s5b/$dir/egs/storage $dir/egs/storage fi - mkdir -p $dir/egs && touch $dir/egs/.nodelete # keep egs around when that run dies. - steps/nnet3/chain/train.py --stage $train_stage \ --cmd "$decode_cmd" \ --feat.online-ivector-dir $train_ivector_dir \ diff --git a/egs/ami/s5b/local/chain/tuning/run_tdnn_1c.sh b/egs/ami/s5b/local/chain/tuning/run_tdnn_1c.sh index 2cfbf165401..f87e1a12d36 100755 --- a/egs/ami/s5b/local/chain/tuning/run_tdnn_1c.sh +++ b/egs/ami/s5b/local/chain/tuning/run_tdnn_1c.sh @@ -199,8 +199,6 @@ if [ $stage -le 16 ]; then /export/b0{5,6,7,8}/$USER/kaldi-data/egs/ami-$(date +'%m_%d_%H_%M')/s5b/$dir/egs/storage $dir/egs/storage fi - mkdir -p $dir/egs && touch $dir/egs/.nodelete # keep egs around when that run dies. - steps/nnet3/chain/train.py --stage $train_stage \ --cmd "$decode_cmd" \ --feat.online-ivector-dir $train_ivector_dir \ diff --git a/egs/ami/s5b/local/chain/tuning/run_tdnn_1d.sh b/egs/ami/s5b/local/chain/tuning/run_tdnn_1d.sh index 16b89c857c1..eb84a1cd876 100755 --- a/egs/ami/s5b/local/chain/tuning/run_tdnn_1d.sh +++ b/egs/ami/s5b/local/chain/tuning/run_tdnn_1d.sh @@ -211,8 +211,6 @@ if [ $stage -le 16 ]; then /export/b0{5,6,7,8}/$USER/kaldi-data/egs/ami-$(date +'%m_%d_%H_%M')/s5b/$dir/egs/storage $dir/egs/storage fi - mkdir -p $dir/egs && touch $dir/egs/.nodelete # keep egs around when that run dies. - steps/nnet3/chain/train.py --stage $train_stage \ --cmd "$decode_cmd" \ --feat.online-ivector-dir $train_ivector_dir \