Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
84 changes: 63 additions & 21 deletions egs/tedlium/s5_r2/RESULTS
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
# Results based on the Tedlium Release 2 Paper using the original LM given by the Lium Team
# PAPER Results: 10.1 / 11.1
# http://www.lrec-conf.org/proceedings/lrec2014/pdf/1104_Paper.pdf


# steps/info/gmm_dir_info.pl exp/mono exp/tri{1,2,3,3_cleaned}
# exp/mono: nj=20 align prob=-96.77 over 4.65h [retry=1.1%, fail=0.1%] states=127 gauss=1001
Expand All @@ -17,50 +21,78 @@
######### tri1 results ########
for d in exp/tri1/decode_*; do grep Sum $d/*ore*/*ys | utils/best_wer.sh ; done
# small LM
%WER 28.3 | 507 17783 | 75.3 17.5 7.2 3.6 28.3 97.2 | 0.053 | exp/tri1/decode_nosp_dev/score_11_0.0/ctm.filt.filt.sys
%WER 27.5 | 1155 27500 | 75.1 18.2 6.7 2.6 27.5 93.9 | 0.103 | exp/tri1/decode_nosp_test/score_12_0.0/ctm.filt.filt.sys
%WER 27.8 | 507 17783 | 75.7 17.5 6.8 3.4 27.8 96.6 | 0.071 | exp/tri1/decode_nosp_dev/score_10_0.0/ctm.filt.filt.sys
%WER 27.3 | 1155 27500 | 75.3 18.4 6.3 2.7 27.3 93.0 | 0.119 | exp/tri1/decode_nosp_test/score_11_0.0/ctm.filt.filt.sys
# big LM
%WER 27.1 | 507 17783 | 76.3 16.6 7.0 3.4 27.1 96.1 | 0.018 | exp/tri1/decode_nosp_dev_rescore/score_11_0.0/ctm.filt.filt.sys
%WER 26.4 | 1155 27500 | 76.4 17.5 6.1 2.8 26.4 92.9 | 0.046 | exp/tri1/decode_nosp_test_rescore/score_11_0.0/ctm.filt.filt.sys
%WER 26.3 | 507 17783 | 76.8 16.1 7.1 3.1 26.3 95.9 | 0.080 | exp/tri1/decode_nosp_dev_rescore/score_11_0.0/ctm.filt.filt.sys
%WER 26.2 | 1155 27500 | 76.6 17.3 6.1 2.8 26.2 92.6 | 0.081 | exp/tri1/decode_nosp_test_rescore/score_11_0.0/ctm.filt.filt.sys

####### tri2 results ##########
#for d in exp/tri2/decode_*; do grep Sum $d/score*/*ys | utils/best_wer.sh ; done

# small LM
%WER 24.0 | 507 17783 | 79.1 14.5 6.3 3.1 24.0 94.3 | 0.003 | exp/tri2/decode_nosp_dev/score_14_0.0/ctm.filt.filt.sys
%WER 23.0 | 1155 27500 | 79.6 15.3 5.1 2.5 23.0 91.5 | 0.049 | exp/tri2/decode_nosp_test/score_13_0.0/ctm.filt.filt.sys
%WER 23.6 | 507 17783 | 79.6 14.8 5.6 3.2 23.6 95.1 | 0.024 | exp/tri2/decode_nosp_dev/score_12_0.0/ctm.filt.filt.sys
%WER 23.2 | 1155 27500 | 79.5 15.5 5.0 2.7 23.2 91.1 | 0.070 | exp/tri2/decode_nosp_test/score_12_0.0/ctm.filt.filt.sys
# big LM
%WER 23.1 | 507 17783 | 80.4 13.9 5.7 3.5 23.1 93.7 | -0.046 | exp/tri2/decode_nosp_dev_rescore/score_12_0.0/ctm.filt.filt.sys
%WER 22.0 | 1155 27500 | 80.5 14.7 4.9 2.5 22.0 91.1 | 0.009 | exp/tri2/decode_nosp_test_rescore/score_13_0.0/ctm.filt.filt.sys
%WER 22.3 | 507 17783 | 80.7 13.5 5.8 3.0 22.3 93.7 | -0.002 | exp/tri2/decode_nosp_dev_rescore/score_13_0.0/ctm.filt.filt.sys
%WER 21.9 | 1155 27500 | 80.7 14.6 4.7 2.6 21.9 90.2 | 0.026 | exp/tri2/decode_nosp_test_rescore/score_12_0.0/ctm.filt.filt.sys

# small LM with silence and pronunciation probs.
%WER 23.3 | 507 17783 | 80.1 14.5 5.4 3.4 23.3 93.7 | 0.031 | exp/tri2/decode_dev/score_14_0.0/ctm.filt.filt.sys
%WER 22.1 | 1155 27500 | 80.8 15.1 4.1 3.0 22.1 90.8 | 0.058 | exp/tri2/decode_test/score_13_0.0/ctm.filt.filt.sys
%WER 22.5 | 507 17783 | 80.5 14.0 5.5 3.1 22.5 94.7 | 0.092 | exp/tri2/decode_dev/score_15_0.0/ctm.filt.filt.sys
%WER 22.1 | 1155 27500 | 80.7 14.9 4.3 2.8 22.1 90.6 | 0.089 | exp/tri2/decode_test/score_13_0.0/ctm.filt.filt.sys

# big LM with silence and pronunciation probs.
%WER 22.3 | 507 17783 | 81.2 13.7 5.0 3.5 22.3 93.5 | 0.009 | exp/tri2/decode_dev_rescore/score_13_0.0/ctm.filt.filt.sys
%WER 21.2 | 1155 27500 | 81.6 14.3 4.0 2.8 21.2 90.5 | 0.021 | exp/tri2/decode_test_rescore/score_13_0.0/ctm.filt.filt.sys
%WER 21.3 | 507 17783 | 81.8 13.1 5.1 3.1 21.3 93.7 | 0.038 | exp/tri2/decode_dev_rescore/score_14_0.0/ctm.filt.filt.sys
%WER 20.9 | 1155 27500 | 81.9 14.0 4.1 2.8 20.9 90.5 | 0.046 | exp/tri2/decode_test_rescore/score_13_0.0/ctm.filt.filt.sys

####### tri3 results ##########
# small LM
%WER 19.4 | 507 17783 | 83.8 11.6 4.6 3.2 19.4 92.7 | -0.066 | exp/tri3/decode_dev/score_16_0.0/ctm.filt.filt.sys
%WER 17.4 | 1155 27500 | 84.9 11.6 3.5 2.4 17.4 87.1 | -0.018 | exp/tri3/decode_test/score_15_0.0/ctm.filt.filt.sys
%WER 18.7 | 507 17783 | 83.9 11.4 4.7 2.6 18.7 92.3 | -0.006 | exp/tri3/decode_dev/score_17_0.0/ctm.filt.filt.sys
%WER 17.6 | 1155 27500 | 84.7 11.6 3.7 2.4 17.6 87.2 | 0.013 | exp/tri3/decode_test/score_15_0.0/ctm.filt.filt.sys

# big LM
%WER 18.3 | 507 17783 | 84.7 10.8 4.5 3.0 18.3 91.3 | -0.112 | exp/tri3/decode_dev_rescore/score_16_0.0/ctm.filt.filt.sys
%WER 16.5 | 1155 27500 | 85.8 11.0 3.2 2.4 16.5 86.3 | -0.083 | exp/tri3/decode_test_rescore/score_14_0.0/ctm.filt.filt.sys

%WER 17.6 | 507 17783 | 85.0 10.5 4.4 2.6 17.6 90.5 | -0.030 | exp/tri3/decode_dev_rescore/score_16_0.0/ctm.filt.filt.sys
%WER 16.7 | 1155 27500 | 85.7 10.9 3.4 2.4 16.7 86.4 | -0.044 | exp/tri3/decode_test_rescore/score_14_0.0/ctm.filt.filt.sys


for d in exp/tri3_cleaned/decode_*; do grep Sum $d/score*/*ys | utils/best_wer.sh ; done
# tri3 after cleaning, small LM.
%WER 19.8 | 507 17783 | 83.7 11.9 4.4 3.5 19.8 93.9 | -0.114 | exp/tri3_cleaned/decode_dev/score_14_0.0/ctm.filt.filt.sys
%WER 17.5 | 1155 27500 | 84.9 11.6 3.5 2.4 17.5 88.1 | -0.024 | exp/tri3_cleaned/decode_test/score_15_0.0/ctm.filt.filt.sys
#
%WER 19.0 | 507 17783 | 83.9 11.4 4.7 2.9 19.0 92.1 | -0.054 | exp/tri3_cleaned/decode_dev/score_13_0.5/ctm.filt.filt.sys
%WER 17.6 | 1155 27500 | 84.8 11.7 3.5 2.4 17.6 87.6 | 0.001 | exp/tri3_cleaned/decode_test/score_15_0.0/ctm.filt.filt.sys

# tri3 after cleaning, large LM.
%WER 18.8 | 507 17783 | 84.5 11.1 4.4 3.2 18.8 91.5 | -0.137 | exp/tri3_cleaned/decode_dev_rescore/score_15_0.0/ctm.filt.filt.sys
%WER 16.7 | 1155 27500 | 85.6 10.9 3.4 2.3 16.7 87.0 | -0.066 | exp/tri3_cleaned/decode_test_rescore/score_15_0.0/ctm.filt.filt.sys
%WER 17.9 | 507 17783 | 85.1 10.5 4.4 3.0 17.9 90.9 | -0.055 | exp/tri3_cleaned/decode_dev_rescore/score_15_0.0/ctm.filt.filt.sys
%WER 16.6 | 1155 27500 | 85.8 10.9 3.4 2.4 16.6 86.4 | -0.058 | exp/tri3_cleaned/decode_test_rescore/score_15_0.0/ctm.filt.filt.sys


########## nnet3+chain systems
#
# chain+TDNN, small LM
%WER 9.7 | 507 17783 | 91.7 5.8 2.5 1.4 9.7 78.7 | 0.097 | exp/chain_cleaned/tdnn_sp_bi/decode_dev/score_10_0.0/ctm.filt.filt.sys
%WER 9.5 | 1155 27500 | 91.7 5.8 2.5 1.2 9.5 72.5 | 0.079 | exp/chain_cleaned/tdnn_sp_bi/decode_test/score_10_0.0/ctm.filt.filt.sys

# chain+TDNN, large LM
%WER 9.0 | 507 17783 | 92.3 5.3 2.4 1.3 9.0 76.7 | 0.067 | exp/chain_cleaned/tdnn_sp_bi/decode_dev_rescore/score_10_0.0/ctm.filt.filt.sys
%WER 9.0 | 1155 27500 | 92.2 5.3 2.5 1.2 9.0 71.3 | 0.064 | exp/chain_cleaned/tdnn_sp_bi/decode_test_rescore/score_10_0.0/ctm.filt.filt.sys

# chain+TDNN systems ran without cleanup, using the command:
# local/chain/run_tdnn.sh --train-set train --gmm tri3 --nnet3-affix ""
# for d in exp/chain/tdnn_sp_bi/decode_*; do grep Sum $d/*/*ys | utils/best_wer.sh; done
# This is about 0.1 (dev) / 0.4 (test) % worse than the corresponding results with cleanup.
%WER 9.8 | 507 17783 | 91.6 6.0 2.4 1.5 9.8 80.1 | -0.038 | exp/chain/tdnn_sp_bi/decode_dev/score_8_0.0/ctm.filt.filt.sys
%WER 9.9 | 1155 27500 | 91.4 5.7 2.9 1.3 9.9 74.9 | 0.083 | exp/chain/tdnn_sp_bi/decode_test/score_9_0.0/ctm.filt.filt.sys
%WER 9.1 | 507 17783 | 92.3 5.5 2.3 1.4 9.1 77.5 | 0.011 | exp/chain/tdnn_sp_bi/decode_dev_rescore/score_8_0.0/ctm.filt.filt.sys
%WER 9.4 | 1155 27500 | 91.9 5.6 2.5 1.4 9.4 72.7 | 0.018 | exp/chain/tdnn_sp_bi/decode_test_rescore/score_8_0.0/ctm.filt.filt.sys
####################################################################################################################
For the record, results with unpruned LM:
%WER 8.2 | 507 17783 | 92.8 4.5 2.6 1.1 8.2 70.8 | -0.036 | exp/chain/tdnn_sp_bi/decode_dev_1848_rescore/score_9_0.0/ctm.filt.filt.sys
%WER 9.3 | 1155 27500 | 91.8 5.1 3.0 1.2 9.3 71.7 | -0.008 | exp/chain/tdnn_sp_bi/decode_test_1848_rescore/score_9_0.0/ctm.filt.filt.sys


#####################################################################################################################
# BELOW FOR REFERENCE, old results with the Cantab LM -- including Nnet3 results tdnn + blstm
#####################################################################################################################

####### nnet3 results #####

Expand All @@ -73,6 +105,16 @@ for x in exp/nnet3_cleaned/tdnn_sp/decode_*; do grep Sum $x/*ore*/*ys | utils/be
%WER 11.9 | 507 17783 | 90.0 7.0 3.0 1.9 11.9 81.9 | -0.072 | exp/nnet3_cleaned/tdnn_sp/decode_dev_rescore/score_11_0.0/ctm.filt.filt.sys
%WER 10.8 | 1155 27500 | 90.6 6.7 2.7 1.4 10.8 76.6 | -0.101 | exp/nnet3_cleaned/tdnn_sp/decode_test_rescore/score_11_0.0/ctm.filt.filt.sys

# BLSTM small LM
# The results are with ClipGradientComponent and without deriv_time fix, so it may not reflect the latest changes
# for x in exp/nnet3_cleaned/lstm_bidirectional_sp/decode_*; do grep Sum $x/*ore*/*ys | utils/best_wer.sh; done
%WER 11.1 | 507 17783 | 90.5 6.8 2.7 1.6 11.1 80.7 | -0.251 | exp/nnet3_cleaned/lstm_bidirectional_sp/decode_dev/score_10_0.0/ctm.filt.filt.sys
%WER 10.2 | 1155 27500 | 91.0 6.4 2.6 1.2 10.2 75.5 | -0.278 | exp/nnet3_cleaned/lstm_bidirectional_sp/decode_test/score_10_0.0/ctm.filt.filt.sys

# BLSTM large LM
%WER 10.6 | 507 17783 | 91.0 6.5 2.5 1.6 10.6 79.3 | -0.275 | exp/nnet3_cleaned/lstm_bidirectional_sp/decode_dev_rescore/score_10_0.0/ctm.filt.filt.sys
%WER 9.9 | 1155 27500 | 91.3 6.1 2.6 1.2 9.9 74.1 | -0.306 | exp/nnet3_cleaned/lstm_bidirectional_sp/decode_test_rescore/score_10_0.0/ctm.filt.filt.sys

# nnet3 results without cleanup, run with:
# local/nnet3/run_tdnn.sh --train-set train --gmm tri3 --nnet3-affix ""
# This is only about 0.1% worse than the baseline with cleanup... the cleanup helps
Expand Down
4 changes: 2 additions & 2 deletions egs/tedlium/s5_r2/local/chain/run_tdnn.sh
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ nnet3_affix=_cleaned # cleanup affix for nnet3 and chain dirs, e.g. _cleaned
train_stage=-10
tree_affix= # affix for tree directory, e.g. "a" or "b", in case we change the configuration.
tdnn_affix= #affix for TDNN directory, e.g. "a" or "b", in case we change the configuration.
common_egs_dir= # you can set this to use previously dumped egs.
common_egs_dir= #exp/chain/tdnn_sp_bi/egs # you can set this to use previously dumped egs.

# End configuration section.
echo "$0 $@" # Print the command line for logging
Expand Down Expand Up @@ -196,4 +196,4 @@ if [ $stage -le 20 ]; then
exit 1
fi
fi
exit 0
exit 0
13 changes: 0 additions & 13 deletions egs/tedlium/s5_r2/local/download_data.sh
Original file line number Diff line number Diff line change
Expand Up @@ -34,18 +34,5 @@ if [ "$num_sph" != 1514 ]; then
exit 1
fi

# Language models (Cantab Research):
if [ ! -e cantab-TEDLIUM ]; then
echo "$0: Downloading \"http://www.openslr.org/resources/27/cantab-TEDLIUM-partial.tar.bz2\". "
wget --no-verbose --output-document=- http://www.openslr.org/resources/27/cantab-TEDLIUM-partial.tar.bz2 | bzcat | tar --extract --file=- || exit 1
else
echo "$0: directory cantab-TEDLIUM already exists, not re-downloading."
fi

if [ ! -s cantab-TEDLIUM/cantab-TEDLIUM.dct ]; then
echo "$0: expected file db/cantab-TEDLIUM/cantab-TEDLIUM.dct to exist and be nonempty."
exit 1
fi

exit 0

3 changes: 2 additions & 1 deletion egs/tedlium/s5_r2/local/prepare_dict.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,14 @@
# Copyright 2014 Nickolay V. Shmyrev
# 2014 Brno University of Technology (Author: Karel Vesely)
# 2016 Daniel Galvez
# 2016 Vincent Nguyen
# Apache 2.0
#

dir=data/local/dict_nosp
mkdir -p $dir

srcdict=db/cantab-TEDLIUM/cantab-TEDLIUM.dct
srcdict=db/TEDLIUM_release2/TEDLIUM.152k.dic

[ ! -r $srcdict ] && echo "Missing $srcdict" && exit 1

Expand Down
29 changes: 9 additions & 20 deletions egs/tedlium/s5_r2/local/ted_train_lm.sh
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ num_dev_sentences=10000
# These example numbers of metaparameters is for 4-gram model (with min-counts)
# running with train_lm.py.
# The dev perplexity should be close to the non-bypassed model.
bypass_metaparam_optim_opt="--bypass-metaparameter-optimization=0.837,0.023,0.761,0.065,0.029,0.015,0.999,0.361,0.157,0.080,0.999,0.625,0.2164,0.2162"
bypass_metaparam_optim_opt="--bypass-metaparameter-optimization=0.854,0.0722,0.5808,0.338,0.166,0.015,0.999,0.6228,0.340,0.172,0.999,0.788,0.501,0.406"
# Note: to use these example parameters, you may need to remove the .done files
# to make sure the make_lm_dir.py be called and tain only 3-gram model
#for order in 3; do
Expand All @@ -58,8 +58,8 @@ if [ $stage -le 0 ]; then

rm ${dir}/data/text/* 2>/dev/null || true

# cantab-TEDLIUM is the larger data source. gzip it.
sed 's/ <\/s>//g' < db/cantab-TEDLIUM/cantab-TEDLIUM.txt | gzip -c > ${dir}/data/text/train.txt.gz
# Unzip TEDLIUM 6 data sources, normalize apostrophe+suffix to previous word, gzip the result.
gunzip -c db/TEDLIUM_release2/LM/*.en.gz | sed 's/ <\/s>//g' | local/join_suffix.py | gzip -c > ${dir}/data/text/train.txt.gz
# use a subset of the annotated training data as the dev set .
# Note: the name 'dev' is treated specially by pocolm, it automatically
# becomes the dev set.
Expand All @@ -76,7 +76,7 @@ if [ $stage -le 0 ]; then
cut -d " " -f 2- < data/dev/text > ${dir}/data/real_dev_set.txt

# get wordlist
awk '{print $1}' db/cantab-TEDLIUM/cantab-TEDLIUM.dct | sort | uniq > ${dir}/data/wordlist
awk '{print $1}' db/TEDLIUM_release2/TEDLIUM.152k.dic | sed 's:([0-9])::g' | sort | uniq > ${dir}/data/wordlist
fi

order=4
Expand All @@ -103,13 +103,7 @@ if [ $stage -le 1 ]; then
${dir}/data/text ${order} ${lm_dir}/work ${unpruned_lm_dir}

get_data_prob.py ${dir}/data/real_dev_set.txt ${unpruned_lm_dir} 2>&1 | grep -F '[perplexity'

# current results, after adding --limit-unk-history=true:
# get_data_prob.py: log-prob of data/local/local_lm/data/real_dev_set.txt given model data/local/local_lm/data/wordlist_4.pocolm was -5.13486225358 per word [perplexity = 169.840923284] over 18290.0 words.
# older results (after adding min-counts):
# get_data_prob.py: log-prob of data/local/local_lm/data/real_dev_set.txt given model data/local/local_lm/data/wordlist_4.pocolm was -5.13902242865 per word [perplexity = 170.514153159] over 18290.0 words.
# even older results, before adding min-counts:
# get_data_prob.py: log-prob of data/local/local_lm/data/real_dev_set.txt given model data/local/local_lm/data/lm_4 was -5.10576291033 per word [perplexity = 164.969879761] over 18290.0 words.
#[perplexity = 157.87] over 18290.0 words
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The deleted lines appear to have provided a more detailed comment, about the logprob and the model being used. Is it possible to do the same with your updates?

fi

if [ $stage -le 2 ]; then
Expand All @@ -121,10 +115,8 @@ if [ $stage -le 2 ]; then
get_data_prob.py ${dir}/data/real_dev_set.txt ${dir}/data/lm_${order}_prune_big 2>&1 | grep -F '[perplexity'

# current results, after adding --limit-unk-history=true:
# get_data_prob.py: log-prob of data/local/local_lm/data/real_dev_set.txt given model data/local/local_lm/data/lm_4_prune_big was -5.17558740241 per word [perplexity = 176.90049554] over 18290.0 words.
# older results, after adding min-counts:
# get_data_prob.py ${dir}/data/real_dev_set.txt ${dir}/data/lm_${order}_prune_big 2>&1 | grep -F '[perplexity'
# get_data_prob.py: log-prob of data/local/local_lm/data/real_dev_set.txt given model data/local/local_lm/data/lm_4_prune_big was -5.17638942756 per word [perplexity = 177.006688203] over 18290.0 words.
# get_data_prob.py: log-prob of data/local/local_lm/data/real_dev_set.txt given model data/local/local_lm/data/lm_4_prune_big was -5.16562818753 per word [perplexity = 175.147449465] over 18290.0 words.


mkdir -p ${dir}/data/arpa
format_arpa_lm.py ${dir}/data/lm_${order}_prune_big | gzip -c > ${dir}/data/arpa/${order}gram_big.arpa.gz
Expand All @@ -140,11 +132,8 @@ if [ $stage -le 3 ]; then
get_data_prob.py ${dir}/data/real_dev_set.txt ${dir}/data/lm_${order}_prune_small 2>&1 | grep -F '[perplexity'

# current results, after adding --limit-unk-history=true (needed for modeling OOVs and not blowing up LG.fst):
# get_data_prob.py: log-prob of data/local/local_lm/data/real_dev_set.txt given model data/local/local_lm/data/lm_4_prune_small was -5.28036622198 per word [perplexity = 196.441803486] over 18290.0 words.
# older results, after adding min-counts:
# get_data_prob.py: log-prob of data/local/local_lm/data/real_dev_set.txt given model data/local/local_lm/data/lm_4_prune_small was -5.28346290049 per word [perplexity = 197.123843355] over 18290.0 words.
# even older results, before adding min-counts:
# get_data_prob.py: log-prob of data/local/local_lm/data/real_dev_set.txt given model data/local/local_lm/data/lm_4_prune_small was -5.27623197813 per word [perplexity = 195.631341646] over 18290.0 words.
# get_data_prob.py: log-prob of data/local/local_lm/data/real_dev_set.txt given model data/local/local_lm/data/lm_4_prune_small was -5.29432352378 per word [perplexity = 199.202824404 over 18290.0 words.


format_arpa_lm.py ${dir}/data/lm_${order}_prune_small | gzip -c > ${dir}/data/arpa/${order}gram_small.arpa.gz
fi
2 changes: 1 addition & 1 deletion egs/tedlium/s5_r2/run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ fi
if [ $stage -le 17 ]; then
# This will only work if you have GPUs on your system (and note that it requires
# you to have the queue set up the right way... see kaldi-asr.org/doc/queue.html)
local/chain/run_tdnn.sh
local/chain/run_tdnn.sh --train-set train --gmm tri3 --nnet3-affix ""
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In your RESULTS file, you say that

This is about 0.6% worse than the corresponding results with cleanup.

If that's the case, shouldn't the version with cleanup be the default here (like it was before)?

fi

# The nnet3 TDNN recipe:
Expand Down