diff --git a/egs/chime5/s5/cmd.sh b/egs/chime5/s5/cmd.sh index a697a22cda3..9702501f1a7 100644 --- a/egs/chime5/s5/cmd.sh +++ b/egs/chime5/s5/cmd.sh @@ -10,6 +10,6 @@ # conf/queue.conf in http://kaldi-asr.org/doc/queue.html for more information, # or search for the string 'default_config' in utils/queue.pl or utils/slurm.pl. -export train_cmd="queue.pl --mem 2G" +export train_cmd="retry.pl queue.pl --mem 2G" export decode_cmd="queue.pl --mem 4G" diff --git a/egs/chime5/s5/local/chain/tuning/run_tdnn_1a.sh b/egs/chime5/s5/local/chain/tuning/run_tdnn_1a.sh index 5418ecf2b4f..d60e6a4aa04 100755 --- a/egs/chime5/s5/local/chain/tuning/run_tdnn_1a.sh +++ b/egs/chime5/s5/local/chain/tuning/run_tdnn_1a.sh @@ -24,21 +24,16 @@ decode_iter= # training options # training chunk-options chunk_width=140,100,160 -# we don't need extra left/right context for TDNN systems. -chunk_left_context=0 -chunk_right_context=0 common_egs_dir= xent_regularize=0.1 # training options srand=0 remove_egs=true -reporting_email= #decode options test_online_decoding=false # if true, it will run the last decoding stage. - # End configuration section. echo "$0 $@" # Print the command line for logging @@ -176,7 +171,6 @@ EOF steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/ fi - if [ $stage -le 14 ]; then if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then utils/create_split_dir.pl \ @@ -204,15 +198,10 @@ if [ $stage -le 14 ]; then --trainer.num-chunk-per-minibatch=256,128,64 \ --trainer.optimization.momentum=0.0 \ --egs.chunk-width=$chunk_width \ - --egs.chunk-left-context=$chunk_left_context \ - --egs.chunk-right-context=$chunk_right_context \ - --egs.chunk-left-context-initial=0 \ - --egs.chunk-right-context-final=0 \ --egs.dir="$common_egs_dir" \ --egs.opts="--frames-overlap-per-eg 0" \ --cleanup.remove-egs=$remove_egs \ --use-gpu=true \ - --reporting.email="$reporting_email" \ --feat-dir=$train_data_dir \ --tree-dir=$tree_dir \ --lat-dir=$lat_dir \ @@ -235,10 +224,6 @@ if [ $stage -le 16 ]; then ( steps/nnet3/decode.sh \ --acwt 1.0 --post-decode-acwt 10.0 \ - --extra-left-context $chunk_left_context \ - --extra-right-context $chunk_right_context \ - --extra-left-context-initial 0 \ - --extra-right-context-final 0 \ --frames-per-chunk $frames_per_chunk \ --nj 8 --cmd "$decode_cmd" --num-threads 4 \ --online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_${data}_hires \ diff --git a/egs/chime5/s5/local/nnet3/run_ivector_common.sh b/egs/chime5/s5/local/nnet3/run_ivector_common.sh index e28e5ce996d..2b672063be7 100755 --- a/egs/chime5/s5/local/nnet3/run_ivector_common.sh +++ b/egs/chime5/s5/local/nnet3/run_ivector_common.sh @@ -23,7 +23,7 @@ nnet3_affix=_train_worn_u100k gmm_dir=exp/${gmm} ali_dir=exp/${gmm}_ali_${train_set}_sp -for f in data/${train_set}/feats.scp ${gmm_dir}/final.mdl; do +for f in data/${train_set}/utt2spk ${gmm_dir}/final.mdl; do if [ ! -f $f ]; then echo "$0: expected file $f to exist" exit 1 diff --git a/egs/chime5/s5/local/run_wpe.py b/egs/chime5/s5/local/run_wpe.py new file mode 100644 index 00000000000..cc9cd41927a --- /dev/null +++ b/egs/chime5/s5/local/run_wpe.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +# Copyright 2018 Johns Hopkins University (Author: Aswin Shanmugam Subramanian) +# Apache 2.0 +# Works with both python2 and python3 + +import numpy as np +import soundfile as sf +import time +import os, errno +from tqdm import tqdm +import argparse + +from nara_wpe.wpe import wpe +from nara_wpe.utils import stft, istft +from nara_wpe import project_root + +parser = argparse.ArgumentParser() +parser.add_argument('--files', '-f', nargs='+') +args = parser.parse_args() + +input_files = args.files[:len(args.files)//2] +output_files = args.files[len(args.files)//2:] +out_dir = os.path.dirname(output_files[0]) +try: + os.makedirs(out_dir) +except OSError as e: + if e.errno != errno.EEXIST: + raise + +stft_options = dict( + size=512, + shift=128, + window_length=None, + fading=True, + pad=True, + symmetric_window=False +) + +sampling_rate = 16000 +delay = 3 +iterations = 5 +taps = 10 + +signal_list = [ + sf.read(f)[0] + for f in input_files +] +y = np.stack(signal_list, axis=0) +Y = stft(y, **stft_options).transpose(2, 0, 1) +Z = wpe(Y, iterations=iterations, statistics_mode='full').transpose(1, 2, 0) +z = istft(Z, size=stft_options['size'], shift=stft_options['shift']) + +for d in range(len(signal_list)): + sf.write(output_files[d], z[d,:], sampling_rate) diff --git a/egs/chime5/s5/local/run_wpe.sh b/egs/chime5/s5/local/run_wpe.sh new file mode 100755 index 00000000000..8ecbbd6182a --- /dev/null +++ b/egs/chime5/s5/local/run_wpe.sh @@ -0,0 +1,85 @@ +#!/bin/bash +# Copyright 2018 Johns Hopkins University (Author: Aswin Shanmugam Subramanian) +# Apache 2.0 + +. ./cmd.sh +. ./path.sh + +# Config: +nj=4 +cmd=run.pl + +. utils/parse_options.sh || exit 1; + +if [ $# != 3 ]; then + echo "Wrong #arguments ($#, expected 3)" + echo "Usage: local/run_wpe.sh [options] " + echo "main options (for others, see top of script file)" + echo " --cmd # Command to run in parallel with" + echo " --nj 50 # number of jobs for parallel processing" + exit 1; +fi + +sdir=$1 +odir=$2 +array=$3 +task=`basename $sdir` +expdir=exp/wpe/${task}_${array} +# Set bash to 'debug' mode, it will exit on : +# -e 'error', -u 'undefined variable', -o ... 'error in pipeline', -x 'print commands', +set -e +set -u +set -o pipefail + +miniconda_dir=$HOME/miniconda3/ +if [ ! -d $miniconda_dir ]; then + echo "$miniconda_dir does not exist. Please run '../../../tools/extras/install_miniconda.sh' and '../../../tools/extras/install_wpe.sh';" +fi + +# check if WPE is installed +result=`$HOME/miniconda3/bin/python -c "\ +try: + import nara_wpe + print('1') +except ImportError: + print('0')"` + +if [ "$result" == "1" ]; then + echo "WPE is installed" +else + echo "WPE is not installed. Please run ../../../tools/extras/install_wpe.sh" + exit 1 +fi + +mkdir -p $odir +mkdir -p $expdir/log + +# wavfiles.list can be used as the name of the output files +output_wavfiles=$expdir/wavfiles.list +find -L ${sdir} | grep -i ${array} > $expdir/channels_input +cat $expdir/channels_input | awk -F '/' '{print $NF}' | sed "s@S@$odir\/S@g" > $expdir/channels_output +paste -d" " $expdir/channels_input $expdir/channels_output > $output_wavfiles + +# split the list for parallel processing +split_wavfiles="" +for n in `seq $nj`; do + split_wavfiles="$split_wavfiles $output_wavfiles.$n" +done +utils/split_scp.pl $output_wavfiles $split_wavfiles || exit 1; + +echo -e "Dereverberation - $task - $array\n" +# making a shell script for each job +for n in `seq $nj`; do +cat <<-EOF > $expdir/log/wpe.$n.sh +while read line; do + $HOME/miniconda3/bin/python local/run_wpe.py \ + --file \$line +done < $output_wavfiles.$n +EOF +done + +chmod a+x $expdir/log/wpe.*.sh +$cmd JOB=1:$nj $expdir/log/wpe.JOB.log \ + $expdir/log/wpe.JOB.sh + +echo "`basename $0` Done." diff --git a/egs/chime5/s5b/RESULTS b/egs/chime5/s5b/RESULTS new file mode 100644 index 00000000000..0dcea1f0031 --- /dev/null +++ b/egs/chime5/s5b/RESULTS @@ -0,0 +1,33 @@ + +# tri2 +%WER 76.40 [ 44985 / 58881, 3496 ins, 17652 del, 23837 sub ] exp/tri2/decode_dev_worn/wer_13_1.0 +%WER 93.56 [ 55091 / 58881, 2132 ins, 35555 del, 17404 sub ] exp/tri2/decode_dev_beamformit_ref/wer_17_1.0 + +# tri3 +%WER 72.81 [ 42869 / 58881, 3629 ins, 15998 del, 23242 sub ] exp/tri3/decode_dev_worn/wer_15_1.0 +%WER 91.73 [ 54013 / 58881, 3519 ins, 27098 del, 23396 sub ] exp/tri3/decode_dev_beamformit_ref/wer_17_1.0 + +# nnet3 tdnn+chain +%WER 47.91 [ 28212 / 58881, 2843 ins, 8957 del, 16412 sub ] exp/chain_train_worn_u100k_cleaned/tdnn1a_sp/decode_dev_worn/wer_9_0.0 +%WER 81.28 [ 47859 / 58881, 4210 ins, 27511 del, 16138 sub ] exp/chain_train_worn_u100k_cleaned/tdnn1a_sp/decode_dev_beamformit_ref/wer_9_0.5 + +# result with the challenge submission format (July 9, 2018) +# before the fix of speaker ID across arrays +session S02 room DINING: #words 8288, #errors 6593, wer 79.54 % +session S02 room KITCHEN: #words 12696, #errors 11096, wer 87.39 % +session S02 room LIVING: #words 15460, #errors 12219, wer 79.03 % +session S09 room DINING: #words 5766, #errors 4651, wer 80.66 % +session S09 room KITCHEN: #words 8911, #errors 7277, wer 81.66 % +session S09 room LIVING: #words 7760, #errors 6023, wer 77.61 % +overall: #words 58881, #errors 47859, wer 81.28 % + +# result with the challenge submission format (July 9, 2018) +# after the fix of speaker ID across arrays +==== development set ==== +session S02 room DINING: #words 8288, #errors 6556, wer 79.10 % +session S02 room KITCHEN: #words 12696, #errors 11096, wer 87.39 % +session S02 room LIVING: #words 15460, #errors 12182, wer 78.79 % +session S09 room DINING: #words 5766, #errors 4648, wer 80.61 % +session S09 room KITCHEN: #words 8911, #errors 7277, wer 81.66 % +session S09 room LIVING: #words 7760, #errors 6022, wer 77.60 % +overall: #words 58881, #errors 47781, wer 81.14 % diff --git a/egs/chime5/s5b/cmd.sh b/egs/chime5/s5b/cmd.sh new file mode 100644 index 00000000000..9702501f1a7 --- /dev/null +++ b/egs/chime5/s5b/cmd.sh @@ -0,0 +1,15 @@ +# you can change cmd.sh depending on what type of queue you are using. +# If you have no queueing system and want to run on a local machine, you +# can change all instances 'queue.pl' to run.pl (but be careful and run +# commands one by one: most recipes will exhaust the memory on your +# machine). queue.pl works with GridEngine (qsub). slurm.pl works +# with slurm. Different queues are configured differently, with different +# queue names and different ways of specifying things like memory; +# to account for these differences you can create and edit the file +# conf/queue.conf to match your queue's configuration. Search for +# conf/queue.conf in http://kaldi-asr.org/doc/queue.html for more information, +# or search for the string 'default_config' in utils/queue.pl or utils/slurm.pl. + +export train_cmd="retry.pl queue.pl --mem 2G" +export decode_cmd="queue.pl --mem 4G" + diff --git a/egs/chime5/s5b/conf/beamformit.cfg b/egs/chime5/s5b/conf/beamformit.cfg new file mode 100755 index 00000000000..70fdd858651 --- /dev/null +++ b/egs/chime5/s5b/conf/beamformit.cfg @@ -0,0 +1,50 @@ +#BeamformIt sample configuration file for AMI data (http://groups.inf.ed.ac.uk/ami/download/) + +# scrolling size to compute the delays +scroll_size = 250 + +# cross correlation computation window size +window_size = 500 + +#amount of maximum points for the xcorrelation taken into account +nbest_amount = 4 + +#flag wether to apply an automatic noise thresholding +do_noise_threshold = 1 + +#Percentage of frames with lower xcorr taken as noisy +noise_percent = 10 + +######## acoustic modelling parameters + +#transition probabilities weight for multichannel decoding +trans_weight_multi = 25 +trans_weight_nbest = 25 + +### + +#flag wether to print the feaures after setting them, or not +print_features = 1 + +#flag wether to use the bad frames in the sum process +do_avoid_bad_frames = 1 + +#flag to use the best channel (SNR) as a reference +#defined from command line +do_compute_reference = 1 + +#flag wether to use a uem file or not(process all the file) +do_use_uem_file = 0 + +#flag wether to use an adaptative weights scheme or fixed weights +do_adapt_weights = 1 + +#flag wether to output the sph files or just run the system to create the auxiliary files +do_write_sph_files = 1 + +####directories where to store/retrieve info#### +#channels_file = ./cfg-files/channels + +#show needs to be passed as argument normally, here a default one is given just in case +#show_id = Ttmp + diff --git a/egs/chime5/s5b/conf/mfcc.conf b/egs/chime5/s5b/conf/mfcc.conf new file mode 100644 index 00000000000..32988403b00 --- /dev/null +++ b/egs/chime5/s5b/conf/mfcc.conf @@ -0,0 +1,2 @@ +--use-energy=false +--sample-frequency=16000 diff --git a/egs/chime5/s5b/conf/mfcc_hires.conf b/egs/chime5/s5b/conf/mfcc_hires.conf new file mode 100644 index 00000000000..fd64b62eb16 --- /dev/null +++ b/egs/chime5/s5b/conf/mfcc_hires.conf @@ -0,0 +1,10 @@ +# config for high-resolution MFCC features, intended for neural network training. +# Note: we keep all cepstra, so it has the same info as filterbank features, +# but MFCC is more easily compressible (because less correlated) which is why +# we prefer this method. +--use-energy=false # use average of log energy, not energy. +--sample-frequency=16000 +--num-mel-bins=40 +--num-ceps=40 +--low-freq=40 +--high-freq=-400 diff --git a/egs/chime5/s5b/conf/online_cmvn.conf b/egs/chime5/s5b/conf/online_cmvn.conf new file mode 100644 index 00000000000..7748a4a4dd3 --- /dev/null +++ b/egs/chime5/s5b/conf/online_cmvn.conf @@ -0,0 +1 @@ +# configuration file for apply-cmvn-online, used in the script ../local/run_online_decoding.sh diff --git a/egs/chime5/s5b/local/chain/run_tdnn.sh b/egs/chime5/s5b/local/chain/run_tdnn.sh new file mode 120000 index 00000000000..34499362831 --- /dev/null +++ b/egs/chime5/s5b/local/chain/run_tdnn.sh @@ -0,0 +1 @@ +tuning/run_tdnn_1a.sh \ No newline at end of file diff --git a/egs/chime5/s5b/local/chain/tuning/run_cnn_tdnn_lstm_1a.sh b/egs/chime5/s5b/local/chain/tuning/run_cnn_tdnn_lstm_1a.sh new file mode 100755 index 00000000000..95e9d934bd3 --- /dev/null +++ b/egs/chime5/s5b/local/chain/tuning/run_cnn_tdnn_lstm_1a.sh @@ -0,0 +1,304 @@ +#!/bin/bash + +# Set -e here so that we catch if any executable fails immediately +set -euo pipefail + +# First the options that are passed through to run_ivector_common.sh +# (some of which are also used in this script directly). +stage=0 +nj=96 +train_set=train_worn_u400k_cleaned +test_sets="dev_beamformit_ref" +gmm=tri3_cleaned +nnet3_affix=_train_worn_u400k_cleaned +lm_suffix= + +# The rest are configs specific to this script. Most of the parameters +# are just hardcoded at this level, in the commands below. +affix=_1a # affix for the TDNN directory name +tree_affix= +train_stage=-10 +get_egs_stage=-10 +decode_iter= + +common_egs_dir= + +hidden_dim=1024 +cell_dim=1024 +projection_dim=256 + +# training options +num_epochs=2 # 2 works better than 4 +chunk_width=140,100,160 +chunk_left_context=40 +chunk_right_context=0 +dropout_schedule='0,0@0.20,0.3@0.50,0' +xent_regularize=0.025 +label_delay=5 + +# decode options +extra_left_context=50 +extra_right_context=0 + +# training options +srand=0 +remove_egs=true + +#decode options +test_online_decoding=false # if true, it will run the last decoding stage. + + +# End configuration section. +echo "$0 $@" # Print the command line for logging + +. ./cmd.sh +. ./path.sh +. ./utils/parse_options.sh + +if ! cuda-compiled; then + cat <$lang/topo + fi +fi + +if [ $stage -le 11 ]; then + # Get the alignments as lattices (gives the chain training more freedom). + # use the same num-jobs as the alignments + steps/align_fmllr_lats.sh --nj ${nj} --cmd "$train_cmd" ${lores_train_data_dir} \ + data/lang $gmm_dir $lat_dir + rm $lat_dir/fsts.*.gz # save space +fi + +if [ $stage -le 12 ]; then + # Build a tree using our new topology. We know we have alignments for the + # speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use + # those. The num-leaves is always somewhat less than the num-leaves from + # the GMM baseline. + if [ -f $tree_dir/final.mdl ]; then + echo "$0: $tree_dir/final.mdl already exists, refusing to overwrite it." + exit 1; + fi + steps/nnet3/chain/build_tree.sh \ + --frame-subsampling-factor 3 \ + --context-opts "--context-width=2 --central-position=1" \ + --cmd "$train_cmd" 3500 ${lores_train_data_dir} \ + $lang $ali_dir $tree_dir +fi + +if [ $stage -le 13 ]; then + mkdir -p $dir + echo "$0: creating neural net configs using the xconfig parser"; + + num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}') + learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python) + + lstm_opts="decay-time=40" + + mkdir -p $dir/configs + cat < $dir/configs/network.xconfig + input dim=100 name=ivector + input dim=40 name=input + + # please note that it is important to have input layer with the name=input + # as the layer immediately preceding the fixed-affine-layer to enable + # the use of short notation for the descriptor + fixed-affine-layer name=lda input=Append(-1,0,1,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat + idct-layer name=idct input=input dim=40 cepstral-lifter=22 affine-transform-file=$dir/configs/idct.mat + + conv-relu-batchnorm-layer name=cnn1 input=idct height-in=40 height-out=20 height-subsample-out=2 time-offsets=-1,0,1 height-offsets=-1,0,1 num-filters-out=256 learning-rate-factor=0.333 max-change=0.25 + conv-relu-batchnorm-layer name=cnn2 input=cnn1 height-in=20 height-out=20 time-offsets=-1,0,1 height-offsets=-1,0,1 num-filters-out=128 + + relu-batchnorm-layer name=affine1 input=lda dim=512 + + # the first splicing is moved before the lda layer, so no splicing here + relu-batchnorm-layer name=tdnn1 input=cnn2 dim=1024 + relu-batchnorm-layer name=tdnn2 input=Append(-1,0,1,affine1) dim=1024 + relu-batchnorm-layer name=tdnn3 input=Append(-1,0,1) dim=1024 + + # check steps/libs/nnet3/xconfig/lstm.py for the other options and defaults + fast-lstmp-layer name=lstm1 cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 dropout-proportion=0.0 $lstm_opts + relu-batchnorm-layer name=tdnn4 input=Append(-3,0,3) dim=1024 + relu-batchnorm-layer name=tdnn5 input=Append(-3,0,3) dim=1024 + relu-batchnorm-layer name=tdnn6 input=Append(-3,0,3) dim=1024 + fast-lstmp-layer name=lstm2 cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 dropout-proportion=0.0 $lstm_opts + relu-batchnorm-layer name=tdnn7 input=Append(-3,0,3) dim=1024 + relu-batchnorm-layer name=tdnn8 input=Append(-3,0,3) dim=1024 + relu-batchnorm-layer name=tdnn9 input=Append(-3,0,3) dim=1024 + fast-lstmp-layer name=lstm3 cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 dropout-proportion=0.0 $lstm_opts + + ## adding the layers for chain branch + output-layer name=output input=lstm3 output-delay=$label_delay include-log-softmax=false dim=$num_targets max-change=1.5 + + # adding the layers for xent branch + # This block prints the configs for a separate output that will be + # trained with a cross-entropy objective in the 'chain' models... this + # has the effect of regularizing the hidden parts of the model. we use + # 0.5 / args.xent_regularize as the learning rate factor- the factor of + # 0.5 / args.xent_regularize is suitable as it means the xent + # final-layer learns at a rate independent of the regularization + # constant; and the 0.5 was tuned so as to make the relative progress + # similar in the xent and regular final layers. + output-layer name=output-xent input=lstm3 output-delay=$label_delay dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5 + +EOF + + steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/ +fi + +if [ $stage -le 14 ]; then + if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then + utils/create_split_dir.pl \ + /export/b0{3,4,5,6}/$USER/kaldi-data/egs/chime5-$(date +'%m_%d_%H_%M')/s5/$dir/egs/storage $dir/egs/storage + fi + + mkdir -p $dir/egs + touch $dir/egs/.nodelete # keep egs around when that run dies. + + steps/nnet3/chain/train.py --stage=$train_stage \ + --cmd="$train_cmd --mem 4G" \ + --feat.online-ivector-dir=$train_ivector_dir \ + --feat.cmvn-opts="--norm-means=false --norm-vars=false" \ + --chain.xent-regularize $xent_regularize \ + --chain.leaky-hmm-coefficient=0.1 \ + --chain.l2-regularize=0.00005 \ + --chain.apply-deriv-weights=false \ + --chain.lm-opts="--num-extra-lm-states=2000" \ + --trainer.dropout-schedule $dropout_schedule \ + --trainer.num-chunk-per-minibatch 64,32 \ + --trainer.frames-per-iter 1500000 \ + --trainer.max-param-change 2.0 \ + --trainer.num-epochs $num_epochs \ + --trainer.srand=$srand \ + --trainer.optimization.shrink-value 0.99 \ + --trainer.optimization.num-jobs-initial=3 \ + --trainer.optimization.num-jobs-final=16 \ + --trainer.optimization.initial-effective-lrate=0.001 \ + --trainer.optimization.final-effective-lrate=0.0001 \ + --trainer.optimization.momentum=0.0 \ + --trainer.deriv-truncate-margin 8 \ + --egs.stage $get_egs_stage \ + --egs.opts="--frames-overlap-per-eg 0" \ + --egs.chunk-width=$chunk_width \ + --egs.chunk-left-context=$chunk_left_context \ + --egs.chunk-right-context=$chunk_right_context \ + --egs.chunk-left-context-initial=0 \ + --egs.chunk-right-context-final=0 \ + --egs.dir="$common_egs_dir" \ + --cleanup.remove-egs=$remove_egs \ + --feat-dir=$train_data_dir \ + --tree-dir=$tree_dir \ + --lat-dir=$lat_dir \ + --dir=$dir || exit 1; +fi + +if [ $stage -le 15 ]; then + # Note: it's not important to give mkgraph.sh the lang directory with the + # matched topology (since it gets the topology file from the model). + utils/mkgraph.sh \ + --self-loop-scale 1.0 data/lang${lm_suffix}/ \ + $tree_dir $tree_dir/graph${lm_suffix} || exit 1; +fi + +if [ $stage -le 16 ]; then + frames_per_chunk=$(echo $chunk_width | cut -d, -f1) + rm $dir/.error 2>/dev/null || true + + for data in $test_sets; do + ( + steps/nnet3/decode.sh \ + --acwt 1.0 --post-decode-acwt 10.0 \ + --extra-left-context $chunk_left_context \ + --extra-right-context $chunk_right_context \ + --extra-left-context-initial 0 \ + --extra-right-context-final 0 \ + --frames-per-chunk $frames_per_chunk \ + --nj 8 --cmd "$decode_cmd" --num-threads 4 \ + --online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_${data}_hires \ + $tree_dir/graph${lm_suffix} data/${data}_hires ${dir}/decode${lm_suffix}_${data} || exit 1 + ) || touch $dir/.error & + done + wait + [ -f $dir/.error ] && echo "$0: there was a problem while decoding" && exit 1 +fi + +# Not testing the 'looped' decoding separately, because for +# TDNN systems it would give exactly the same results as the +# normal decoding. + +if $test_online_decoding && [ $stage -le 17 ]; then + # note: if the features change (e.g. you add pitch features), you will have to + # change the options of the following command line. + steps/online/nnet3/prepare_online_decoding.sh \ + --mfcc-config conf/mfcc_hires.conf \ + $lang exp/nnet3${nnet3_affix}/extractor ${dir} ${dir}_online + + rm $dir/.error 2>/dev/null || true + + for data in $test_sets; do + ( + nspk=$(wc -l $lang/topo + fi +fi + +if [ $stage -le 11 ]; then + # Get the alignments as lattices (gives the chain training more freedom). + # use the same num-jobs as the alignments + steps/align_fmllr_lats.sh --nj ${nj} --cmd "$train_cmd" ${lores_train_data_dir} \ + data/lang $gmm_dir $lat_dir + rm $lat_dir/fsts.*.gz # save space +fi + +if [ $stage -le 12 ]; then + # Build a tree using our new topology. We know we have alignments for the + # speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use + # those. The num-leaves is always somewhat less than the num-leaves from + # the GMM baseline. + if [ -f $tree_dir/final.mdl ]; then + echo "$0: $tree_dir/final.mdl already exists, refusing to overwrite it." + exit 1; + fi + steps/nnet3/chain/build_tree.sh \ + --frame-subsampling-factor 3 \ + --context-opts "--context-width=2 --central-position=1" \ + --cmd "$train_cmd" 3500 ${lores_train_data_dir} \ + $lang $ali_dir $tree_dir +fi + + +if [ $stage -le 13 ]; then + mkdir -p $dir + echo "$0: creating neural net configs using the xconfig parser"; + + num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}') + learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python) + opts="l2-regularize=0.05" + output_opts="l2-regularize=0.01 bottleneck-dim=320" + + mkdir -p $dir/configs + cat < $dir/configs/network.xconfig + input dim=100 name=ivector + input dim=40 name=input + + # please note that it is important to have input layer with the name=input + # as the layer immediately preceding the fixed-affine-layer to enable + # the use of short notation for the descriptor + fixed-affine-layer name=lda input=Append(-2,-1,0,1,2,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat + + # the first splicing is moved before the lda layer, so no splicing here + relu-batchnorm-layer name=tdnn1 $opts dim=512 + relu-batchnorm-layer name=tdnn2 $opts dim=512 input=Append(-1,0,1) + relu-batchnorm-layer name=tdnn3 $opts dim=512 + relu-batchnorm-layer name=tdnn4 $opts dim=512 input=Append(-1,0,1) + relu-batchnorm-layer name=tdnn5 $opts dim=512 + relu-batchnorm-layer name=tdnn6 $opts dim=512 input=Append(-3,0,3) + relu-batchnorm-layer name=tdnn7 $opts dim=512 input=Append(-3,0,3) + relu-batchnorm-layer name=tdnn8 $opts dim=512 input=Append(-6,-3,0) + + ## adding the layers for chain branch + relu-batchnorm-layer name=prefinal-chain $opts dim=512 target-rms=0.5 + output-layer name=output include-log-softmax=false $output_opts dim=$num_targets max-change=1.5 + + # adding the layers for xent branch + # This block prints the configs for a separate output that will be + # trained with a cross-entropy objective in the 'chain' models... this + # has the effect of regularizing the hidden parts of the model. we use + # 0.5 / args.xent_regularize as the learning rate factor- the factor of + # 0.5 / args.xent_regularize is suitable as it means the xent + # final-layer learns at a rate independent of the regularization + # constant; and the 0.5 was tuned so as to make the relative progress + # similar in the xent and regular final layers. + relu-batchnorm-layer name=prefinal-xent input=tdnn8 $opts dim=512 target-rms=0.5 + output-layer name=output-xent $output_opts dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5 +EOF + steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/ +fi + +if [ $stage -le 14 ]; then + if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then + utils/create_split_dir.pl \ + /export/b0{3,4,5,6}/$USER/kaldi-data/egs/chime5-$(date +'%m_%d_%H_%M')/s5/$dir/egs/storage $dir/egs/storage + fi + + steps/nnet3/chain/train.py --stage=$train_stage \ + --cmd="$decode_cmd" \ + --feat.online-ivector-dir=$train_ivector_dir \ + --feat.cmvn-opts="--norm-means=false --norm-vars=false" \ + --chain.xent-regularize $xent_regularize \ + --chain.leaky-hmm-coefficient=0.1 \ + --chain.l2-regularize=0.00005 \ + --chain.apply-deriv-weights=false \ + --chain.lm-opts="--num-extra-lm-states=2000" \ + --trainer.srand=$srand \ + --trainer.max-param-change=2.0 \ + --trainer.num-epochs=10 \ + --trainer.frames-per-iter=3000000 \ + --trainer.optimization.num-jobs-initial=2 \ + --trainer.optimization.num-jobs-final=4 \ + --trainer.optimization.initial-effective-lrate=0.001 \ + --trainer.optimization.final-effective-lrate=0.0001 \ + --trainer.optimization.shrink-value=1.0 \ + --trainer.num-chunk-per-minibatch=256,128,64 \ + --trainer.optimization.momentum=0.0 \ + --egs.chunk-width=$chunk_width \ + --egs.chunk-left-context=$chunk_left_context \ + --egs.chunk-right-context=$chunk_right_context \ + --egs.chunk-left-context-initial=0 \ + --egs.chunk-right-context-final=0 \ + --egs.dir="$common_egs_dir" \ + --egs.opts="--frames-overlap-per-eg 0" \ + --cleanup.remove-egs=$remove_egs \ + --use-gpu=true \ + --feat-dir=$train_data_dir \ + --tree-dir=$tree_dir \ + --lat-dir=$lat_dir \ + --dir=$dir || exit 1; +fi + +if [ $stage -le 15 ]; then + # Note: it's not important to give mkgraph.sh the lang directory with the + # matched topology (since it gets the topology file from the model). + utils/mkgraph.sh \ + --self-loop-scale 1.0 data/lang${lm_suffix}/ \ + $tree_dir $tree_dir/graph${lm_suffix} || exit 1; +fi + +if [ $stage -le 16 ]; then + frames_per_chunk=$(echo $chunk_width | cut -d, -f1) + rm $dir/.error 2>/dev/null || true + + for data in $test_sets; do + ( + steps/nnet3/decode.sh \ + --acwt 1.0 --post-decode-acwt 10.0 \ + --frames-per-chunk $frames_per_chunk \ + --nj 8 --cmd "$decode_cmd" --num-threads 4 \ + --online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_${data}_hires \ + $tree_dir/graph${lm_suffix} data/${data}_hires ${dir}/decode${lm_suffix}_${data} || exit 1 + ) || touch $dir/.error & + done + wait + [ -f $dir/.error ] && echo "$0: there was a problem while decoding" && exit 1 +fi + +# Not testing the 'looped' decoding separately, because for +# TDNN systems it would give exactly the same results as the +# normal decoding. + +if $test_online_decoding && [ $stage -le 17 ]; then + # note: if the features change (e.g. you add pitch features), you will have to + # change the options of the following command line. + steps/online/nnet3/prepare_online_decoding.sh \ + --mfcc-config conf/mfcc_hires.conf \ + $lang exp/nnet3${nnet3_affix}/extractor ${dir} ${dir}_online + + rm $dir/.error 2>/dev/null || true + + for data in $test_sets; do + ( + nspk=$(wc -l 2792 combine=-0.149->-0.149 (over 2) xent:train/valid[210,316,final]=(-2.50,-1.99,-2.00/-2.36,-1.95,-1.95) logprob:train/valid[210,316,final]=(-0.228,-0.136,-0.136/-0.223,-0.156,-0.155) + +set -e + +# configs for 'chain' +stage=0 +nj=96 +train_set=train_worn_u400k +test_sets="dev_worn dev_beamformit_ref" +gmm=tri3 +nnet3_affix=_train_worn_u400k +lm_suffix= + +# The rest are configs specific to this script. Most of the parameters +# are just hardcoded at this level, in the commands below. +affix=1b # affix for the TDNN directory name +tree_affix= +train_stage=-10 +get_egs_stage=-10 +decode_iter= + +num_epochs=4 +common_egs_dir= +# training options +# training chunk-options +chunk_width=140,100,160 +xent_regularize=0.1 +dropout_schedule='0,0@0.20,0.5@0.50,0' + +# training options +srand=0 +remove_egs=true + +#decode options +test_online_decoding=false # if true, it will run the last decoding stage. + +# End configuration section. +echo "$0 $@" # Print the command line for logging + +. ./cmd.sh +. ./path.sh +. ./utils/parse_options.sh + +if ! cuda-compiled; then + cat <$lang/topo + fi +fi + +if [ $stage -le 11 ]; then + # Get the alignments as lattices (gives the chain training more freedom). + # use the same num-jobs as the alignments + steps/align_fmllr_lats.sh --nj ${nj} --cmd "$train_cmd" --generate-ali-from-lats true \ + ${lores_train_data_dir} \ + data/lang $gmm_dir $lat_dir + rm $lat_dir/fsts.*.gz # save space +fi + +if [ $stage -le 12 ]; then + # Build a tree using our new topology. We know we have alignments for the + # speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use + # those. The num-leaves is always somewhat less than the num-leaves from + # the GMM baseline. + if [ -f $tree_dir/final.mdl ]; then + echo "$0: $tree_dir/final.mdl already exists, refusing to overwrite it." + exit 1; + fi + steps/nnet3/chain/build_tree.sh \ + --frame-subsampling-factor 3 \ + --cmd "$train_cmd" 3500 ${lores_train_data_dir} \ + $lang $lat_dir $tree_dir +fi + +if [ $stage -le 13 ]; then + mkdir -p $dir + echo "$0: creating neural net configs using the xconfig parser"; + + num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}') + learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python) + affine_opts="l2-regularize=0.01 dropout-proportion=0.0 dropout-per-dim=true dropout-per-dim-continuous=true" + tdnnf_opts="l2-regularize=0.01 dropout-proportion=0.0 bypass-scale=0.66" + linear_opts="l2-regularize=0.01 orthonormal-constraint=-1.0" + prefinal_opts="l2-regularize=0.01" + output_opts="l2-regularize=0.002" + + mkdir -p $dir/configs + cat < $dir/configs/network.xconfig + input dim=100 name=ivector + input dim=40 name=input + + # please note that it is important to have input layer with the name=input + # as the layer immediately preceding the fixed-affine-layer to enable + # the use of short notation for the descriptor + fixed-affine-layer name=lda input=Append(-1,0,1,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat + + # the first splicing is moved before the lda layer, so no splicing here + relu-batchnorm-dropout-layer name=tdnn1 $affine_opts dim=1536 + tdnnf-layer name=tdnnf2 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=1 + tdnnf-layer name=tdnnf3 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=1 + tdnnf-layer name=tdnnf4 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=1 + tdnnf-layer name=tdnnf5 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=0 + tdnnf-layer name=tdnnf6 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 + tdnnf-layer name=tdnnf7 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 + tdnnf-layer name=tdnnf8 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 + tdnnf-layer name=tdnnf9 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 + tdnnf-layer name=tdnnf10 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 + tdnnf-layer name=tdnnf11 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 + tdnnf-layer name=tdnnf12 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 + tdnnf-layer name=tdnnf13 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 + tdnnf-layer name=tdnnf14 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 + tdnnf-layer name=tdnnf15 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 + linear-component name=prefinal-l dim=256 $linear_opts + + prefinal-layer name=prefinal-chain input=prefinal-l $prefinal_opts big-dim=1536 small-dim=256 + output-layer name=output include-log-softmax=false dim=$num_targets $output_opts + + prefinal-layer name=prefinal-xent input=prefinal-l $prefinal_opts big-dim=1536 small-dim=256 + output-layer name=output-xent dim=$num_targets learning-rate-factor=$learning_rate_factor $output_opts +EOF + steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/ +fi + +if [ $stage -le 14 ]; then + if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then + utils/create_split_dir.pl \ + /export/b0{3,4,5,6}/$USER/kaldi-data/egs/chime5-$(date +'%m_%d_%H_%M')/s5/$dir/egs/storage $dir/egs/storage + fi + + steps/nnet3/chain/train.py --stage $train_stage \ + --cmd "$train_cmd --mem 4G" \ + --feat.online-ivector-dir=$train_ivector_dir \ + --feat.cmvn-opts "--norm-means=false --norm-vars=false" \ + --chain.xent-regularize $xent_regularize \ + --chain.leaky-hmm-coefficient 0.1 \ + --chain.l2-regularize 0.0 \ + --chain.apply-deriv-weights false \ + --chain.lm-opts="--num-extra-lm-states=2000" \ + --trainer.dropout-schedule "$dropout_schedule" \ + --trainer.add-option="--optimization.memory-compression-level=2" \ + --egs.dir "$common_egs_dir" \ + --egs.stage $get_egs_stage \ + --egs.opts "--frames-overlap-per-eg 0" \ + --egs.chunk-width $chunk_width \ + --trainer.num-chunk-per-minibatch 64 \ + --trainer.frames-per-iter 1500000 \ + --trainer.num-epochs $num_epochs \ + --trainer.optimization.num-jobs-initial 3 \ + --trainer.optimization.num-jobs-final 16 \ + --trainer.optimization.initial-effective-lrate 0.00025 \ + --trainer.optimization.final-effective-lrate 0.000025 \ + --trainer.max-param-change 2.0 \ + --cleanup.remove-egs $remove_egs \ + --feat-dir=$train_data_dir \ + --tree-dir=$tree_dir \ + --lat-dir=$lat_dir \ + --dir $dir || exit 1; + +fi + +if [ $stage -le 15 ]; then + # Note: it's not important to give mkgraph.sh the lang directory with the + # matched topology (since it gets the topology file from the model). + utils/mkgraph.sh \ + --self-loop-scale 1.0 data/lang${lm_suffix}/ \ + $tree_dir $tree_dir/graph${lm_suffix} || exit 1; +fi + +if [ $stage -le 16 ]; then + frames_per_chunk=$(echo $chunk_width | cut -d, -f1) + rm $dir/.error 2>/dev/null || true + + for data in $test_sets; do + ( + steps/nnet3/decode.sh \ + --acwt 1.0 --post-decode-acwt 10.0 \ + --frames-per-chunk $frames_per_chunk \ + --nj 8 --cmd "$decode_cmd" --num-threads 4 \ + --online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_${data}_hires \ + $tree_dir/graph${lm_suffix} data/${data}_hires ${dir}/decode${lm_suffix}_${data} || exit 1 + ) || touch $dir/.error & + done + wait + [ -f $dir/.error ] && echo "$0: there was a problem while decoding" && exit 1 +fi + +exit 0; diff --git a/egs/chime5/s5b/local/chain/tuning/run_tdnn_lstm_1a.sh b/egs/chime5/s5b/local/chain/tuning/run_tdnn_lstm_1a.sh new file mode 100755 index 00000000000..e3d8e6ac4dc --- /dev/null +++ b/egs/chime5/s5b/local/chain/tuning/run_tdnn_lstm_1a.sh @@ -0,0 +1,297 @@ +#!/bin/bash + +# Set -e here so that we catch if any executable fails immediately +set -euo pipefail + +# First the options that are passed through to run_ivector_common.sh +# (some of which are also used in this script directly). +stage=0 +nj=96 +train_set=train_worn_u400k_cleaned +test_sets="dev_worn dev_beamformit_ref" +gmm=tri3_cleaned +nnet3_affix=_train_worn_u400k_cleaned +lm_suffix= + +# The rest are configs specific to this script. Most of the parameters +# are just hardcoded at this level, in the commands below. +affix=_1a # affix for the TDNN directory name +tree_affix= +train_stage=-10 +get_egs_stage=-10 +decode_iter= + +common_egs_dir= + +hidden_dim=1024 +cell_dim=1024 +projection_dim=256 + +# training options +num_epochs=2 # 2 works better than 4 +chunk_width=140,100,160 +chunk_left_context=40 +chunk_right_context=0 +dropout_schedule='0,0@0.20,0.3@0.50,0' +xent_regularize=0.025 +label_delay=5 + +# decode options +extra_left_context=50 +extra_right_context=0 + +# training options +srand=0 +remove_egs=true + +#decode options +test_online_decoding=false # if true, it will run the last decoding stage. + + +# End configuration section. +echo "$0 $@" # Print the command line for logging + +. ./cmd.sh +. ./path.sh +. ./utils/parse_options.sh + +if ! cuda-compiled; then + cat <$lang/topo + fi +fi + +if [ $stage -le 11 ]; then + # Get the alignments as lattices (gives the chain training more freedom). + # use the same num-jobs as the alignments + steps/align_fmllr_lats.sh --nj ${nj} --cmd "$train_cmd" ${lores_train_data_dir} \ + data/lang $gmm_dir $lat_dir + rm $lat_dir/fsts.*.gz # save space +fi + +if [ $stage -le 12 ]; then + # Build a tree using our new topology. We know we have alignments for the + # speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use + # those. The num-leaves is always somewhat less than the num-leaves from + # the GMM baseline. + if [ -f $tree_dir/final.mdl ]; then + echo "$0: $tree_dir/final.mdl already exists, refusing to overwrite it." + exit 1; + fi + steps/nnet3/chain/build_tree.sh \ + --frame-subsampling-factor 3 \ + --context-opts "--context-width=2 --central-position=1" \ + --cmd "$train_cmd" 3500 ${lores_train_data_dir} \ + $lang $ali_dir $tree_dir +fi + +if [ $stage -le 13 ]; then + mkdir -p $dir + echo "$0: creating neural net configs using the xconfig parser"; + + num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}') + learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python) + + lstm_opts="decay-time=40" + + mkdir -p $dir/configs + cat < $dir/configs/network.xconfig + input dim=100 name=ivector + input dim=40 name=input + + # please note that it is important to have input layer with the name=input + # as the layer immediately preceding the fixed-affine-layer to enable + # the use of short notation for the descriptor + fixed-affine-layer name=lda input=Append(-2,-1,0,1,2,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat + + # the first splicing is moved before the lda layer, so no splicing here + relu-batchnorm-layer name=tdnn1 dim=$hidden_dim + relu-batchnorm-layer name=tdnn2 input=Append(-1,0,1) dim=$hidden_dim + relu-batchnorm-layer name=tdnn3 input=Append(-1,0,1) dim=$hidden_dim + + fast-lstmp-layer name=lstm1 cell-dim=$cell_dim recurrent-projection-dim=$projection_dim non-recurrent-projection-dim=$projection_dim delay=-3 dropout-proportion=0.0 $lstm_opts + relu-batchnorm-layer name=tdnn4 input=Append(-3,0,3) dim=$hidden_dim + relu-batchnorm-layer name=tdnn5 input=Append(-3,0,3) dim=$hidden_dim + fast-lstmp-layer name=lstm2 cell-dim=$cell_dim recurrent-projection-dim=$projection_dim non-recurrent-projection-dim=$projection_dim delay=-3 dropout-proportion=0.0 $lstm_opts + relu-batchnorm-layer name=tdnn6 input=Append(-3,0,3) dim=$hidden_dim + relu-batchnorm-layer name=tdnn7 input=Append(-3,0,3) dim=$hidden_dim + fast-lstmp-layer name=lstm3 cell-dim=$cell_dim recurrent-projection-dim=$projection_dim non-recurrent-projection-dim=$projection_dim delay=-3 dropout-proportion=0.0 $lstm_opts + relu-batchnorm-layer name=tdnn8 input=Append(-3,0,3) dim=$hidden_dim + relu-batchnorm-layer name=tdnn9 input=Append(-3,0,3) dim=$hidden_dim + fast-lstmp-layer name=lstm4 cell-dim=$cell_dim recurrent-projection-dim=$projection_dim non-recurrent-projection-dim=$projection_dim delay=-3 dropout-proportion=0.0 $lstm_opts + + ## adding the layers for chain branch + output-layer name=output input=lstm4 output-delay=$label_delay include-log-softmax=false dim=$num_targets max-change=1.5 + + # adding the layers for xent branch + # This block prints the configs for a separate output that will be + # trained with a cross-entropy objective in the 'chain' models... this + # has the effect of regularizing the hidden parts of the model. we use + # 0.5 / args.xent_regularize as the learning rate factor- the factor of + # 0.5 / args.xent_regularize is suitable as it means the xent + # final-layer learns at a rate independent of the regularization + # constant; and the 0.5 was tuned so as to make the relative progress + # similar in the xent and regular final layers. + output-layer name=output-xent input=lstm4 output-delay=$label_delay dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5 + +EOF + steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/ +fi + +if [ $stage -le 14 ]; then + if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then + utils/create_split_dir.pl \ + /export/b0{3,4,5,6}/$USER/kaldi-data/egs/chime5-$(date +'%m_%d_%H_%M')/s5/$dir/egs/storage $dir/egs/storage + fi + + mkdir -p $dir/egs + touch $dir/egs/.nodelete # keep egs around when that run dies. + + steps/nnet3/chain/train.py --stage=$train_stage \ + --cmd="$train_cmd --mem 4G" \ + --feat.online-ivector-dir=$train_ivector_dir \ + --feat.cmvn-opts="--norm-means=false --norm-vars=false" \ + --chain.xent-regularize $xent_regularize \ + --chain.leaky-hmm-coefficient=0.1 \ + --chain.l2-regularize=0.00005 \ + --chain.apply-deriv-weights=false \ + --chain.lm-opts="--num-extra-lm-states=2000" \ + --trainer.dropout-schedule $dropout_schedule \ + --trainer.num-chunk-per-minibatch 64,32 \ + --trainer.frames-per-iter 1500000 \ + --trainer.max-param-change 2.0 \ + --trainer.num-epochs $num_epochs \ + --trainer.srand=$srand \ + --trainer.optimization.shrink-value 0.99 \ + --trainer.optimization.num-jobs-initial=3 \ + --trainer.optimization.num-jobs-final=16 \ + --trainer.optimization.initial-effective-lrate=0.001 \ + --trainer.optimization.final-effective-lrate=0.0001 \ + --trainer.optimization.momentum=0.0 \ + --trainer.deriv-truncate-margin 8 \ + --egs.stage $get_egs_stage \ + --egs.opts="--frames-overlap-per-eg 0" \ + --egs.chunk-width=$chunk_width \ + --egs.chunk-left-context=$chunk_left_context \ + --egs.chunk-right-context=$chunk_right_context \ + --egs.chunk-left-context-initial=0 \ + --egs.chunk-right-context-final=0 \ + --egs.dir="$common_egs_dir" \ + --cleanup.remove-egs=$remove_egs \ + --feat-dir=$train_data_dir \ + --tree-dir=$tree_dir \ + --lat-dir=$lat_dir \ + --dir=$dir || exit 1; +fi + +if [ $stage -le 15 ]; then + # Note: it's not important to give mkgraph.sh the lang directory with the + # matched topology (since it gets the topology file from the model). + utils/mkgraph.sh \ + --self-loop-scale 1.0 data/lang${lm_suffix}/ \ + $tree_dir $tree_dir/graph${lm_suffix} || exit 1; +fi + +if [ $stage -le 16 ]; then + frames_per_chunk=$(echo $chunk_width | cut -d, -f1) + rm $dir/.error 2>/dev/null || true + + for data in $test_sets; do + ( + steps/nnet3/decode.sh \ + --acwt 1.0 --post-decode-acwt 10.0 \ + --extra-left-context $chunk_left_context \ + --extra-right-context $chunk_right_context \ + --extra-left-context-initial 0 \ + --extra-right-context-final 0 \ + --frames-per-chunk $frames_per_chunk \ + --nj 8 --cmd "$decode_cmd" --num-threads 4 \ + --online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_${data}_hires \ + $tree_dir/graph${lm_suffix} data/${data}_hires ${dir}/decode${lm_suffix}_${data} || exit 1 + ) || touch $dir/.error & + done + wait + [ -f $dir/.error ] && echo "$0: there was a problem while decoding" && exit 1 +fi + +# Not testing the 'looped' decoding separately, because for +# TDNN systems it would give exactly the same results as the +# normal decoding. + +if $test_online_decoding && [ $stage -le 17 ]; then + # note: if the features change (e.g. you add pitch features), you will have to + # change the options of the following command line. + steps/online/nnet3/prepare_online_decoding.sh \ + --mfcc-config conf/mfcc_hires.conf \ + $lang exp/nnet3${nnet3_affix}/extractor ${dir} ${dir}_online + + rm $dir/.error 2>/dev/null || true + + for data in $test_sets; do + ( + nspk=$(wc -l ) + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED +# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, +# MERCHANTABLITY OR NON-INFRINGEMENT. +# See the Apache 2 License for the specific language governing permissions and +# limitations under the License. + +[ -f ./path.sh ] && . ./path.sh + +command -v uconv &>/dev/null \ + || { echo >&2 "uconv not found on PATH. You will have to install ICU4C"; exit 1; } + +command -v ngram &>/dev/null \ + || { echo >&2 "srilm not found on PATH. Please use the script $KALDI_ROOT/tools/extras/install_srilm.sh to install it"; exit 1; } + +if [ -z ${LIBLBFGS} ]; then + echo >&2 "SRILM is not compiled with the support of MaxEnt models." + echo >&2 "You should use the script in \$KALDI_ROOT/tools/install_srilm.sh" + echo >&2 "which will take care of compiling the SRILM with MaxEnt support" + exit 1; +fi + +sox=`command -v sox 2>/dev/null` \ + || { echo >&2 "sox not found on PATH. Please install it manually (you will need version 14.4.0 and higher)."; exit 1; } + +# If sox is found on path, check if the version is correct +if [ ! -z "$sox" ]; then + sox_version=`$sox --version 2>&1| head -1 | sed -e 's?.*: ??' -e 's?.* ??'` + if [[ ! $sox_version =~ v14.4.* ]]; then + echo "Unsupported sox version $sox_version found on path. You will need version v14.4.0 and higher." + exit 1 + fi +fi + +command -v phonetisaurus-align &>/dev/null \ + || { echo >&2 "Phonetisaurus not found on PATH. Please use the script $KALDI_ROOT/tools/extras/install_phonetisaurus.sh to install it"; exit 1; } + +command -v BeamformIt &>/dev/null \ + || { echo >&2 "BeamformIt not found on PATH. Please use the script $KALDI_ROOT/tools/extras/install_beamformit.sh to install it"; exit 1; } + +miniconda_dir=$HOME/miniconda3/ +if [ ! -d $miniconda_dir ]; then + echo "$miniconda_dir does not exist. Please run '../../../tools/extras/install_miniconda.sh'" +fi + +# check if WPE is installed +result=`$miniconda_dir/bin/python -c "\ +try: + import nara_wpe + print('1') +except ImportError: + print('0')"` + +if [ "$result" == "1" ]; then + echo "WPE is installed" +else + echo "WPE is not installed. Please run ../../../tools/extras/install_wpe.sh" + exit 1 +fi + +exit 0 diff --git a/egs/chime5/s5b/local/copy_lat_dir_parallel.sh b/egs/chime5/s5b/local/copy_lat_dir_parallel.sh new file mode 100755 index 00000000000..82839604c9e --- /dev/null +++ b/egs/chime5/s5b/local/copy_lat_dir_parallel.sh @@ -0,0 +1,97 @@ +#!/bin/bash + +cmd=queue.pl +nj=40 +stage=0 +speed_perturb=true + +. ./path.sh +. utils/parse_options.sh + +if [ $# -ne 4 ]; then + echo "Usage: $0 " + exit 1 +fi + +utt_map=$1 +data=$2 +srcdir=$3 +dir=$4 + +mkdir -p $dir + +cp $srcdir/{phones.txt,tree,final.mdl} $dir || exit 1 +cp $srcdir/{final.alimdl,final.occs,splice_opts,cmvn_opts,delta_opts,final.mat,full.mat} 2>/dev/null || true + +nj_src=$(cat $srcdir/num_jobs) || exit 1 + +if [ $stage -le 1 ]; then + $cmd JOB=1:$nj_src $dir/log/copy_lats_orig.JOB.log \ + lattice-copy "ark:gunzip -c $srcdir/lat.JOB.gz |" \ + ark,scp:$dir/lat_orig.JOB.ark,$dir/lat_orig.JOB.scp || exit 1 +fi + +for n in $(seq $nj_src); do + cat $dir/lat_orig.$n.scp +done > $dir/lat_orig.scp || exit 1 + +if $speed_perturb; then + for s in 0.9 1.1; do + awk -v s=$s '{print "sp"s"-"$1" sp"s"-"$2}' $utt_map + done | cat - $utt_map | sort -k1,1 > $dir/utt_map + utt_map=$dir/utt_map +fi + +if [ $stage -le 2 ]; then + utils/filter_scp.pl -f 2 $dir/lat_orig.scp < $utt_map | \ + utils/apply_map.pl -f 2 $dir/lat_orig.scp > \ + $dir/lat.scp || exit 1 + + if [ ! -s $dir/lat.scp ]; then + echo "$0: $dir/lat.scp is empty. Something went wrong!" + exit 1 + fi +fi + +utils/split_data.sh $data $nj + +if [ $stage -le 3 ]; then + $cmd JOB=1:$nj $dir/log/copy_lats.JOB.log \ + lattice-copy "scp:utils/filter_scp.pl $data/split$nj/JOB/utt2spk $dir/lat.scp |" \ + "ark:|gzip -c > $dir/lat.JOB.gz" || exit 1 +fi + +echo $nj > $dir/num_jobs + +if [ -f $srcdir/ali.1.gz ]; then + if [ $stage -le 4 ]; then + $cmd JOB=1:$nj_src $dir/log/copy_ali_orig.JOB.log \ + copy-int-vector "ark:gunzip -c $srcdir/ali.JOB.gz |" \ + ark,scp:$dir/ali_orig.JOB.ark,$dir/ali_orig.JOB.scp || exit 1 + fi + + for n in $(seq $nj_src); do + cat $dir/ali_orig.$n.scp + done > $dir/ali_orig.scp || exit 1 + + if [ $stage -le 5 ]; then + utils/filter_scp.pl -f 2 $dir/ali_orig.scp < $utt_map | \ + utils/apply_map.pl -f 2 $dir/ali_orig.scp > \ + $dir/ali.scp || exit 1 + + if [ ! -s $dir/ali.scp ]; then + echo "$0: $dir/ali.scp is empty. Something went wrong!" + exit 1 + fi + fi + + utils/split_data.sh $data $nj + + if [ $stage -le 6 ]; then + $cmd JOB=1:$nj $dir/log/copy_ali.JOB.log \ + copy-int-vector "scp:utils/filter_scp.pl $data/split$nj/JOB/utt2spk $dir/ali.scp |" \ + "ark:|gzip -c > $dir/ali.JOB.gz" || exit 1 + fi +fi + +rm $dir/lat_orig.*.{ark,scp} $dir/ali_orig.*.{ark,scp} 2>/dev/null || true diff --git a/egs/chime5/s5b/local/distant_audio_list b/egs/chime5/s5b/local/distant_audio_list new file mode 100644 index 00000000000..fc7aff15cd0 --- /dev/null +++ b/egs/chime5/s5b/local/distant_audio_list @@ -0,0 +1,376 @@ +S03_U01.CH1 +S03_U01.CH2 +S03_U01.CH3 +S03_U01.CH4 +S03_U02.CH1 +S03_U02.CH2 +S03_U02.CH3 +S03_U02.CH4 +S03_U03.CH1 +S03_U03.CH2 +S03_U03.CH3 +S03_U03.CH4 +S03_U04.CH1 +S03_U04.CH2 +S03_U04.CH3 +S03_U04.CH4 +S03_U05.CH1 +S03_U05.CH2 +S03_U05.CH3 +S03_U05.CH4 +S03_U06.CH1 +S03_U06.CH2 +S03_U06.CH3 +S03_U06.CH4 +S04_U01.CH1 +S04_U01.CH2 +S04_U01.CH3 +S04_U01.CH4 +S04_U02.CH1 +S04_U02.CH2 +S04_U02.CH3 +S04_U02.CH4 +S04_U03.CH1 +S04_U03.CH2 +S04_U03.CH3 +S04_U03.CH4 +S04_U04.CH1 +S04_U04.CH2 +S04_U04.CH3 +S04_U04.CH4 +S04_U05.CH1 +S04_U05.CH2 +S04_U05.CH3 +S04_U05.CH4 +S04_U06.CH1 +S04_U06.CH2 +S04_U06.CH3 +S04_U06.CH4 +S05_U01.CH1 +S05_U01.CH2 +S05_U01.CH3 +S05_U01.CH4 +S05_U02.CH1 +S05_U02.CH2 +S05_U02.CH3 +S05_U02.CH4 +S05_U04.CH1 +S05_U04.CH2 +S05_U04.CH3 +S05_U04.CH4 +S05_U05.CH1 +S05_U05.CH2 +S05_U05.CH3 +S05_U05.CH4 +S05_U06.CH1 +S05_U06.CH2 +S05_U06.CH3 +S05_U06.CH4 +S06_U01.CH1 +S06_U01.CH2 +S06_U01.CH3 +S06_U01.CH4 +S06_U02.CH1 +S06_U02.CH2 +S06_U02.CH3 +S06_U02.CH4 +S06_U03.CH1 +S06_U03.CH2 +S06_U03.CH3 +S06_U03.CH4 +S06_U04.CH1 +S06_U04.CH2 +S06_U04.CH3 +S06_U04.CH4 +S06_U05.CH1 +S06_U05.CH2 +S06_U05.CH3 +S06_U05.CH4 +S06_U06.CH1 +S06_U06.CH2 +S06_U06.CH3 +S06_U06.CH4 +S07_U01.CH1 +S07_U01.CH2 +S07_U01.CH3 +S07_U01.CH4 +S07_U02.CH1 +S07_U02.CH2 +S07_U02.CH3 +S07_U02.CH4 +S07_U03.CH1 +S07_U03.CH2 +S07_U03.CH3 +S07_U03.CH4 +S07_U04.CH1 +S07_U04.CH2 +S07_U04.CH3 +S07_U04.CH4 +S07_U05.CH1 +S07_U05.CH2 +S07_U05.CH3 +S07_U05.CH4 +S07_U06.CH1 +S07_U06.CH2 +S07_U06.CH3 +S07_U06.CH4 +S08_U01.CH1 +S08_U01.CH2 +S08_U01.CH3 +S08_U01.CH4 +S08_U02.CH1 +S08_U02.CH2 +S08_U02.CH3 +S08_U02.CH4 +S08_U03.CH1 +S08_U03.CH2 +S08_U03.CH3 +S08_U03.CH4 +S08_U04.CH1 +S08_U04.CH2 +S08_U04.CH3 +S08_U04.CH4 +S08_U05.CH1 +S08_U05.CH2 +S08_U05.CH3 +S08_U05.CH4 +S08_U06.CH1 +S08_U06.CH2 +S08_U06.CH3 +S08_U06.CH4 +S12_U01.CH1 +S12_U01.CH2 +S12_U01.CH3 +S12_U01.CH4 +S12_U02.CH1 +S12_U02.CH2 +S12_U02.CH3 +S12_U02.CH4 +S12_U03.CH1 +S12_U03.CH2 +S12_U03.CH3 +S12_U03.CH4 +S12_U04.CH1 +S12_U04.CH2 +S12_U04.CH3 +S12_U04.CH4 +S12_U05.CH1 +S12_U05.CH2 +S12_U05.CH3 +S12_U05.CH4 +S12_U06.CH1 +S12_U06.CH2 +S12_U06.CH3 +S12_U06.CH4 +S13_U01.CH1 +S13_U01.CH2 +S13_U01.CH3 +S13_U01.CH4 +S13_U02.CH1 +S13_U02.CH2 +S13_U02.CH3 +S13_U02.CH4 +S13_U03.CH1 +S13_U03.CH2 +S13_U03.CH3 +S13_U03.CH4 +S13_U04.CH1 +S13_U04.CH2 +S13_U04.CH3 +S13_U04.CH4 +S13_U05.CH1 +S13_U05.CH2 +S13_U05.CH3 +S13_U05.CH4 +S13_U06.CH1 +S13_U06.CH2 +S13_U06.CH3 +S13_U06.CH4 +S16_U01.CH1 +S16_U01.CH2 +S16_U01.CH3 +S16_U01.CH4 +S16_U02.CH1 +S16_U02.CH2 +S16_U02.CH3 +S16_U02.CH4 +S16_U03.CH1 +S16_U03.CH2 +S16_U03.CH3 +S16_U03.CH4 +S16_U04.CH1 +S16_U04.CH2 +S16_U04.CH3 +S16_U04.CH4 +S16_U05.CH1 +S16_U05.CH2 +S16_U05.CH3 +S16_U05.CH4 +S16_U06.CH1 +S16_U06.CH2 +S16_U06.CH3 +S16_U06.CH4 +S17_U01.CH1 +S17_U01.CH2 +S17_U01.CH3 +S17_U01.CH4 +S17_U02.CH1 +S17_U02.CH2 +S17_U02.CH3 +S17_U02.CH4 +S17_U03.CH1 +S17_U03.CH2 +S17_U03.CH3 +S17_U03.CH4 +S17_U04.CH1 +S17_U04.CH2 +S17_U04.CH3 +S17_U04.CH4 +S17_U05.CH1 +S17_U05.CH2 +S17_U05.CH3 +S17_U05.CH4 +S17_U06.CH1 +S17_U06.CH2 +S17_U06.CH3 +S17_U06.CH4 +S18_U01.CH1 +S18_U01.CH2 +S18_U01.CH3 +S18_U01.CH4 +S18_U02.CH1 +S18_U02.CH2 +S18_U02.CH3 +S18_U02.CH4 +S18_U03.CH1 +S18_U03.CH2 +S18_U03.CH3 +S18_U03.CH4 +S18_U04.CH1 +S18_U04.CH2 +S18_U04.CH3 +S18_U04.CH4 +S18_U05.CH1 +S18_U05.CH2 +S18_U05.CH3 +S18_U05.CH4 +S18_U06.CH1 +S18_U06.CH2 +S18_U06.CH3 +S18_U06.CH4 +S19_U01.CH1 +S19_U01.CH2 +S19_U01.CH3 +S19_U01.CH4 +S19_U02.CH1 +S19_U02.CH2 +S19_U02.CH3 +S19_U02.CH4 +S19_U03.CH1 +S19_U03.CH2 +S19_U03.CH3 +S19_U03.CH4 +S19_U04.CH1 +S19_U04.CH2 +S19_U04.CH3 +S19_U04.CH4 +S19_U05.CH1 +S19_U05.CH2 +S19_U05.CH3 +S19_U05.CH4 +S19_U06.CH1 +S19_U06.CH2 +S19_U06.CH3 +S19_U06.CH4 +S20_U01.CH1 +S20_U01.CH2 +S20_U01.CH3 +S20_U01.CH4 +S20_U02.CH1 +S20_U02.CH2 +S20_U02.CH3 +S20_U02.CH4 +S20_U03.CH1 +S20_U03.CH2 +S20_U03.CH3 +S20_U03.CH4 +S20_U04.CH1 +S20_U04.CH2 +S20_U04.CH3 +S20_U04.CH4 +S20_U05.CH1 +S20_U05.CH2 +S20_U05.CH3 +S20_U05.CH4 +S20_U06.CH1 +S20_U06.CH2 +S20_U06.CH3 +S20_U06.CH4 +S22_U01.CH1 +S22_U01.CH2 +S22_U01.CH3 +S22_U01.CH4 +S22_U02.CH1 +S22_U02.CH2 +S22_U02.CH3 +S22_U02.CH4 +S22_U04.CH1 +S22_U04.CH2 +S22_U04.CH3 +S22_U04.CH4 +S22_U05.CH1 +S22_U05.CH2 +S22_U05.CH3 +S22_U05.CH4 +S22_U06.CH1 +S22_U06.CH2 +S22_U06.CH3 +S22_U06.CH4 +S23_U01.CH1 +S23_U01.CH2 +S23_U01.CH3 +S23_U01.CH4 +S23_U02.CH1 +S23_U02.CH2 +S23_U02.CH3 +S23_U02.CH4 +S23_U03.CH1 +S23_U03.CH2 +S23_U03.CH3 +S23_U03.CH4 +S23_U04.CH1 +S23_U04.CH2 +S23_U04.CH3 +S23_U04.CH4 +S23_U05.CH1 +S23_U05.CH2 +S23_U05.CH3 +S23_U05.CH4 +S23_U06.CH1 +S23_U06.CH2 +S23_U06.CH3 +S23_U06.CH4 +S24_U01.CH1 +S24_U01.CH2 +S24_U01.CH3 +S24_U01.CH4 +S24_U02.CH1 +S24_U02.CH2 +S24_U02.CH3 +S24_U02.CH4 +S24_U03.CH1 +S24_U03.CH2 +S24_U03.CH3 +S24_U03.CH4 +S24_U04.CH1 +S24_U04.CH2 +S24_U04.CH3 +S24_U04.CH4 +S24_U05.CH1 +S24_U05.CH2 +S24_U05.CH3 +S24_U05.CH4 +S24_U06.CH1 +S24_U06.CH2 +S24_U06.CH3 +S24_U06.CH4 diff --git a/egs/chime5/s5b/local/extract_noises.py b/egs/chime5/s5b/local/extract_noises.py new file mode 100755 index 00000000000..f7b7f752d9e --- /dev/null +++ b/egs/chime5/s5b/local/extract_noises.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python3 + +import argparse +import json +import logging +import os +import sys +import scipy.io.wavfile as siw +import math +import numpy as np + + +def get_args(): + parser = argparse.ArgumentParser( + """Extract noises from the corpus based on the non-speech regions. + e.g. {} /export/corpora4/CHiME5/audio/train/ \\ + /export/corpora4/CHiME5/transcriptions/train/ \\ + /export/b05/zhiqiw/noise/""".format(sys.argv[0])) + + parser.add_argument("--segment-length", default=20) + parser.add_argument("audio_dir", help="""Location of the CHiME5 Audio files. e.g. /export/corpora4/CHiME5/audio/train/""") + parser.add_argument("trans_dir", help="""Location of the CHiME5 Transcriptions. e.g. /export/corpora4/CHiME5/transcriptions/train/""") + parser.add_argument("audio_list", help="""List of ids of the CHiME5 recordings from which noise is extracted. e.g. local/distant_audio_list""") + parser.add_argument("out_dir", help="Output directory to write noise files. e.g. /export/b05/zhiqiw/noise/") + + args = parser.parse_args() + return args + + +def Trans_time(time, fs): + units = time.split(':') + time_second = float(units[0]) * 3600 + float(units[1]) * 60 + float(units[2]) + return int(time_second*fs) + + +def Get_time(conf, tag, mic, fs): + for i in conf: + st = Trans_time(i['start_time'][mic], fs) + ed = Trans_time(i['end_time'][mic], fs) + tag[st:ed] = 0 + return tag + + +def write_noise(out_dir, seg, audio, sig, tag, fs, cnt): + sig_noise = sig[np.nonzero(tag)] + for i in range(math.floor(len(sig_noise)/(seg*fs))): + siw.write(out_dir +'/noise'+str(cnt)+'.wav', fs, sig_noise[i*seg*fs:(i+1)*seg*fs]) + cnt += 1 + return cnt + + +def main(): + args = get_args() + + if not os.path.exists(args.out_dir): + os.makedirs(args.out_dir) + + wav_list = open(args.audio_list).readlines() + + cnt = 1 + for i, audio in enumerate(wav_list): + parts = audio.strip().split('.') + if len(parts) == 2: + # Assuming distant mic with name like S03_U01.CH1 + session, mic = parts[0].split('_') + channel = parts[1] + base_name = session + "_" + mic + "." + channel + else: + # Assuming close talk mic with name like S03_P09 + session, mic = audio.strip().split('_') + base_name = session + "_" + mic + fs, sig = siw.read(args.audio_dir + "/" + base_name + '.wav') + tag = np.ones(len(sig)) + if i == 0 or session != session_p: + with open(args.trans_dir + "/" + session + '.json') as f: + conf = json.load(f) + tag = Get_time(conf, tag, mic, fs) + cnt = write_noise(args.out_dir, args.segment_length, audio, sig, tag, fs, cnt) + session_p = session + + +if __name__ == '__main__': + main() diff --git a/egs/chime5/s5b/local/extract_vad_weights.sh b/egs/chime5/s5b/local/extract_vad_weights.sh new file mode 100755 index 00000000000..250b021bd8f --- /dev/null +++ b/egs/chime5/s5b/local/extract_vad_weights.sh @@ -0,0 +1,86 @@ +#!/bin/bash + +# Copyright 2016 Johns Hopkins University (Author: Daniel Povey, Vijayaditya Peddinti) +# 2019 Vimal Manohar +# Apache 2.0. + +# This script converts lattices available from a first pass decode into a per-frame weights file +# The ctms generated from the lattices are filtered. Silence frames are assigned a low weight (e.g.0.00001) +# and voiced frames have a weight of 1. + +set -e + +stage=1 +cmd=run.pl +silence_weight=0.00001 +#end configuration section. + +. ./cmd.sh + +[ -f ./path.sh ] && . ./path.sh +. utils/parse_options.sh || exit 1; +if [ $# -ne 4 ]; then + echo "Usage: $0 [--cmd (run.pl|queue.pl...)] " + echo " Options:" + echo " --cmd (run.pl|queue.pl...) # specify how to run the sub-processes." + exit 1; +fi + +data_dir=$1 +lang=$2 # Note: may be graph directory not lang directory, but has the necessary stuff copied. +decode_dir=$3 +output_wts_file_gz=$4 + +if [ $stage -le 1 ]; then + echo "$0: generating CTM from input lattices" + steps/get_ctm_conf.sh --cmd "$cmd" \ + --use-segments false \ + $data_dir \ + $lang \ + $decode_dir +fi + +if [ $stage -le 2 ]; then + name=`basename $data_dir` + # we just take the ctm from LMWT 10, it doesn't seem to affect the results a lot + ctm=$decode_dir/score_10/$name.ctm + echo "$0: generating weights file from ctm $ctm" + + pad_frames=0 # this did not seem to be helpful but leaving it as an option. + feat-to-len scp:$data_dir/feats.scp ark,t:- >$decode_dir/utt.lengths + if [ ! -f $ctm ]; then echo "$0: expected ctm to exist: $ctm"; exit 1; fi + + cat $ctm | awk '$6 == 1.0 && $4 < 1.0' | \ + grep -v -w mm | grep -v -w mhm | grep -v -F '[noise]' | \ + grep -v -F '[laughter]' | grep -v -F '' | \ + perl -e ' $lengths=shift @ARGV; $pad_frames=shift @ARGV; $silence_weight=shift @ARGV; + $pad_frames >= 0 || die "bad pad-frames value $pad_frames"; + open(L, "<$lengths") || die "opening lengths file"; + @all_utts = (); + $utt2ref = { }; + while () { + ($utt, $len) = split(" ", $_); + push @all_utts, $utt; + $array_ref = [ ]; + for ($n = 0; $n < $len; $n++) { ${$array_ref}[$n] = $silence_weight; } + $utt2ref{$utt} = $array_ref; + } + while () { + @A = split(" ", $_); + @A == 6 || die "bad ctm line $_"; + $utt = $A[0]; $beg = $A[2]; $len = $A[3]; + $beg_int = int($beg * 100) - $pad_frames; + $len_int = int($len * 100) + 2*$pad_frames; + $array_ref = $utt2ref{$utt}; + !defined $array_ref && die "No length info for utterance $utt"; + for ($t = $beg_int; $t < $beg_int + $len_int; $t++) { + if ($t >= 0 && $t < @$array_ref) { + ${$array_ref}[$t] = 1; + } + } + } + foreach $utt (@all_utts) { $array_ref = $utt2ref{$utt}; + print $utt, " [ ", join(" ", @$array_ref), " ]\n"; + } ' $decode_dir/utt.lengths $pad_frames $silence_weight | \ + gzip -c > $output_wts_file_gz +fi diff --git a/egs/chime5/s5b/local/json2text.py b/egs/chime5/s5b/local/json2text.py new file mode 100755 index 00000000000..4df0160efb6 --- /dev/null +++ b/egs/chime5/s5b/local/json2text.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python3 + +# Copyright 2017 Johns Hopkins University (Shinji Watanabe) +# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) + +import json +import argparse +import logging +import sys + + +def hms_to_seconds(hms): + hour = hms.split(':')[0] + minute = hms.split(':')[1] + second = hms.split(':')[2].split('.')[0] + + # .xx (10 ms order) + ms10 = hms.split(':')[2].split('.')[1] + + # total seconds + seconds = int(hour) * 3600 + int(minute) * 60 + int(second) + + return '{:07d}'.format(int(str(seconds) + ms10)) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('json', type=str, help='JSON transcription file') + parser.add_argument('--mictype', type=str, + choices=['ref', 'worn', 'u01', 'u02', 'u03', 'u04', 'u05', 'u06'], + help='Type of microphones') + args = parser.parse_args() + + # logging info + log_format = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s:%(message)s" + logging.basicConfig(level=logging.INFO, format=log_format) + + logging.debug("reading %s", args.json) + with open(args.json, 'rt', encoding="utf-8") as f: + j = json.load(f) + + for x in j: + if '[redacted]' not in x['words']: + session_id = x['session_id'] + speaker_id = x['speaker'] + if args.mictype == 'ref': + mictype = x['ref'] + elif args.mictype == 'worn': + mictype = 'original' + else: + mictype = args.mictype.upper() # convert from u01 to U01 + + # add location tag for scoring (only for dev and eval sets) + if 'location' in x.keys(): + location = x['location'].upper() + else: + location = 'NOLOCATION' + + start_time = x['start_time'][mictype] + end_time = x['end_time'][mictype] + + # remove meta chars and convert to lower + words = x['words'].replace('"', '')\ + .replace('.', '')\ + .replace('?', '')\ + .replace(',', '')\ + .replace(':', '')\ + .replace(';', '')\ + .replace('!', '').lower() + + # remove multiple spaces + words = " ".join(words.split()) + + # convert to seconds, e.g., 1:10:05.55 -> 3600 + 600 + 5.55 = 4205.55 + start_time = hms_to_seconds(start_time) + end_time = hms_to_seconds(end_time) + + uttid = speaker_id + '_' + session_id + if not args.mictype == 'worn': + uttid += '_' + mictype + uttid += '_' + location + '-' + start_time + '-' + end_time + + if end_time > start_time: + sys.stdout.buffer.write((uttid + ' ' + words + '\n').encode("utf-8")) diff --git a/egs/chime5/s5b/local/make_noise_list.py b/egs/chime5/s5b/local/make_noise_list.py new file mode 100755 index 00000000000..5aaf7fa4062 --- /dev/null +++ b/egs/chime5/s5b/local/make_noise_list.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python3 + +import glob +import os +import sys + + +if len(sys.argv) != 2: + print ("Usage: {} ".format(sys.argv[0])) + raise SystemExit(1) + + +for line in glob.glob("{}/*.wav".format(sys.argv[1])): + fname = os.path.basename(line.strip()) + + print ("--noise-id {} --noise-type point-source " + "--bg-fg-type foreground {}".format(fname, line.strip())) diff --git a/egs/chime5/s5/local/chain/compare_wer.sh b/egs/chime5/s5b/local/nnet3/compare_wer.sh similarity index 84% rename from egs/chime5/s5/local/chain/compare_wer.sh rename to egs/chime5/s5b/local/nnet3/compare_wer.sh index cd6be14ed88..095e85cc338 100755 --- a/egs/chime5/s5/local/chain/compare_wer.sh +++ b/egs/chime5/s5b/local/nnet3/compare_wer.sh @@ -101,31 +101,32 @@ if $used_epochs; then exit 0; # the diagnostics aren't comparable between regular and discriminatively trained systems. fi - echo -n "# Final train prob " for x in $*; do - prob=$(grep Overall $x/log/compute_prob_train.final.log | grep -v xent | awk '{printf("%.4f", $8)}') + prob=$(grep Overall $x/log/compute_prob_train.{final,combined}.log 2>/dev/null | grep log-like | awk '{printf("%.4f", $8)}') printf "% 10s" $prob done echo echo -n "# Final valid prob " for x in $*; do - prob=$(grep Overall $x/log/compute_prob_valid.final.log | grep -v xent | awk '{printf("%.4f", $8)}') + prob=$(grep Overall $x/log/compute_prob_valid.{final,combined}.log 2>/dev/null | grep log-like | awk '{printf("%.4f", $8)}') printf "% 10s" $prob done echo -echo -n "# Final train prob (xent)" +echo -n "# Final train acc " for x in $*; do - prob=$(grep Overall $x/log/compute_prob_train.final.log | grep -w xent | awk '{printf("%.4f", $8)}') + prob=$(grep Overall $x/log/compute_prob_train.{final,combined}.log 2>/dev/null | grep accuracy | awk '{printf("%.4f", $8)}') printf "% 10s" $prob done echo -echo -n "# Final valid prob (xent)" +echo -n "# Final valid acc " for x in $*; do - prob=$(grep Overall $x/log/compute_prob_valid.final.log | grep -w xent | awk '{printf("%.4f", $8)}') + prob=$(grep Overall $x/log/compute_prob_valid.{final,combined}.log 2>/dev/null | grep accuracy | awk '{printf("%.4f", $8)}') printf "% 10s" $prob done echo + +echo diff --git a/egs/chime5/s5b/local/nnet3/decode.sh b/egs/chime5/s5b/local/nnet3/decode.sh new file mode 100755 index 00000000000..7af09f36a13 --- /dev/null +++ b/egs/chime5/s5b/local/nnet3/decode.sh @@ -0,0 +1,162 @@ +#!/bin/bash + +# Copyright 2016 Johns Hopkins University (Author: Daniel Povey, Vijayaditya Peddinti) +# 2019 Vimal Manohar +# Apache 2.0. + +# This script does 2-stage decoding where the first stage is used to get +# reliable frames for i-vector extraction. + +set -e + +# general opts +iter= +stage=0 +nj=30 +affix= # affix for decode directory + +# ivector opts +max_count=75 # parameter for extract_ivectors.sh +sub_speaker_frames=6000 +ivector_scale=0.75 +get_weights_from_ctm=true +weights_file= # use weights from this archive (must be compressed using gunzip) +silence_weight=0.00001 # apply this weight to silence frames during i-vector extraction +ivector_dir=exp/nnet3 + +# decode opts +pass2_decode_opts="--min-active 1000" +lattice_beam=8 +extra_left_context=0 # change for (B)LSTM +extra_right_context=0 # change for BLSTM +frames_per_chunk=50 # change for (B)LSTM +acwt=0.1 # important to change this when using chain models +post_decode_acwt=1.0 # important to change this when using chain models +extra_left_context_initial=0 +extra_right_context_final=0 + +score_opts="--min-lmwt 6 --max-lmwt 13" + +. ./cmd.sh +[ -f ./path.sh ] && . ./path.sh +. utils/parse_options.sh || exit 1; + +if [ $# -ne 4 ]; then + echo "Usage: $0 [options] " + echo " Options:" + echo " --stage (0|1|2) # start scoring script from part-way through." + echo "e.g.:" + echo "$0 data/dev data/lang exp/tri5a/graph_pp exp/nnet3/tdnn" + exit 1; +fi + +data=$1 # data directory +lang=$2 # data/lang +graph=$3 #exp/tri5a/graph_pp +dir=$4 # exp/nnet3/tdnn + +model_affix=`basename $dir` +ivector_affix=${affix:+_$affix}_chain_${model_affix}${iter:+_iter$iter} +affix=${affix:+_${affix}}${iter:+_iter${iter}} + +if [ $stage -le 1 ]; then + if [ ! -s ${data}_hires/feats.scp ]; then + utils/copy_data_dir.sh $data ${data}_hires + steps/make_mfcc.sh --mfcc-config conf/mfcc_hires.conf --nj $nj --cmd "$train_cmd" ${data}_hires + steps/compute_cmvn_stats.sh ${data}_hires + utils/fix_data_dir.sh ${data}_hires + fi +fi + +data_set=$(basename $data) +if [ $stage -le 2 ]; then + echo "Extracting i-vectors, stage 1" + steps/online/nnet2/extract_ivectors_online.sh --cmd "$train_cmd" --nj $nj \ + --max-count $max_count \ + ${data}_hires $ivector_dir/extractor \ + $ivector_dir/ivectors_${data_set}${ivector_affix}_stage1; + # float comparisons are hard in bash + if [ `bc <<< "$ivector_scale != 1"` -eq 1 ]; then + ivector_scale_affix=_scale$ivector_scale + else + ivector_scale_affix= + fi + + if [ ! -z "$ivector_scale_affix" ]; then + echo "$0: Scaling iVectors, stage 1" + srcdir=$ivector_dir/ivectors_${data_set}${ivector_affix}_stage1 + outdir=$ivector_dir/ivectors_${data_set}${ivector_affix}${ivector_scale_affix}_stage1 + mkdir -p $outdir + $train_cmd $outdir/log/scale_ivectors.log \ + copy-matrix --scale=$ivector_scale scp:$srcdir/ivector_online.scp ark:- \| \ + copy-feats --compress=true ark:- ark,scp:$outdir/ivector_online.ark,$outdir/ivector_online.scp; + cp $srcdir/ivector_period $outdir/ivector_period + fi +fi + +decode_dir=$dir/decode_${data_set}${affix} +# generate the lattices +if [ $stage -le 3 ]; then + echo "Generating lattices, stage 1" + steps/nnet3/decode.sh --nj $nj --cmd "$decode_cmd" \ + --acwt $acwt --post-decode-acwt $post_decode_acwt \ + --extra-left-context $extra_left_context \ + --extra-right-context $extra_right_context \ + --extra-left-context-initial $extra_left_context_initial \ + --extra-right-context-final $extra_right_context_final \ + --frames-per-chunk "$frames_per_chunk" \ + --online-ivector-dir $ivector_dir/ivectors_${data_set}${ivector_affix}${ivector_scale_affix}_stage1 \ + --skip-scoring true ${iter:+--iter $iter} \ + $graph ${data}_hires ${decode_dir}_stage1; +fi + +if [ $stage -le 4 ]; then + if $get_weights_from_ctm; then + if [ ! -z $weights_file ]; then + echo "$0: Using provided vad weights file $weights_file" + ivector_extractor_weights=$weights_file + else + echo "$0 : Generating vad weights file" + ivector_extractor_weights=${decode_dir}_stage1/weights${affix}.gz + local/extract_vad_weights.sh --silence-weight $silence_weight \ + --cmd "$decode_cmd" ${iter:+--iter $iter} \ + ${data}_hires $lang \ + ${decode_dir}_stage1 $ivector_extractor_weights + fi + else + # get weights from best path decoding + ivector_extractor_weights=${decode_dir}_stage1 + fi +fi + +if [ $stage -le 5 ]; then + echo "Extracting i-vectors, stage 2 with weights from $ivector_extractor_weights" + # this does offline decoding, except we estimate the iVectors per + # speaker, excluding silence (based on alignments from a DNN decoding), with a + # different script. This is just to demonstrate that script. + # the --sub-speaker-frames is optional; if provided, it will divide each speaker + # up into "sub-speakers" of at least that many frames... can be useful if + # acoustic conditions drift over time within the speaker's data. + steps/online/nnet2/extract_ivectors.sh --cmd "$train_cmd" --nj $nj \ + --silence-weight $silence_weight \ + --sub-speaker-frames $sub_speaker_frames --max-count $max_count \ + ${data}_hires $lang $ivector_dir/extractor \ + $ivector_extractor_weights $ivector_dir/ivectors_${data_set}${ivector_affix}; +fi + +if [ $stage -le 6 ]; then + echo "Generating lattices, stage 2 with --acwt $acwt" + rm -f ${decode_dir}/.error + steps/nnet3/decode.sh --nj $nj --cmd "$decode_cmd" $pass2_decode_opts \ + --acwt $acwt --post-decode-acwt $post_decode_acwt \ + --extra-left-context $extra_left_context \ + --extra-right-context $extra_right_context \ + --extra-left-context-initial $extra_left_context_initial \ + --extra-right-context-final $extra_right_context_final \ + --frames-per-chunk "$frames_per_chunk" \ + --skip-scoring false ${iter:+--iter $iter} --lattice-beam $lattice_beam \ + --online-ivector-dir $ivector_dir/ivectors_${data_set}${ivector_affix} \ + $graph ${data}_hires ${decode_dir} || touch ${decode_dir}/.error + [ -f ${decode_dir}/.error ] && echo "$0: Error decoding" && exit 1; +fi +exit 0 diff --git a/egs/chime5/s5b/local/nnet3/run_ivector_common.sh b/egs/chime5/s5b/local/nnet3/run_ivector_common.sh new file mode 100755 index 00000000000..3910e1812a3 --- /dev/null +++ b/egs/chime5/s5b/local/nnet3/run_ivector_common.sh @@ -0,0 +1,151 @@ +#!/bin/bash + +set -euo pipefail + +# This script is called from local/nnet3/run_tdnn.sh and +# local/chain/run_tdnn.sh (and may eventually be called by more +# scripts). It contains the common feature preparation and +# iVector-related parts of the script. See those scripts for examples +# of usage. + +stage=0 +train_set=train_worn_u100k +test_sets="dev_worn dev_beamformit_ref" +gmm=tri3 +nj=96 + +nnet3_affix=_train_worn_u100k + +. ./cmd.sh +. ./path.sh +. utils/parse_options.sh + +gmm_dir=exp/${gmm} +ali_dir=exp/${gmm}_ali_${train_set}_sp + +for f in ${gmm_dir}/final.mdl; do + if [ ! -f $f ]; then + echo "$0: expected file $f to exist" + exit 1 + fi +done + +if [ $stage -le 1 ]; then + # Although the nnet will be trained by high resolution data, we still have to + # perturb the normal data to get the alignment _sp stands for speed-perturbed + echo "$0: preparing directory for low-resolution speed-perturbed data (for alignment)" + utils/data/perturb_data_dir_speed_3way.sh data/${train_set} data/${train_set}_sp + echo "$0: making MFCC features for low-resolution speed-perturbed data" + steps/make_mfcc.sh --cmd "$train_cmd" --nj 20 data/${train_set}_sp || exit 1; + steps/compute_cmvn_stats.sh data/${train_set}_sp || exit 1; + utils/fix_data_dir.sh data/${train_set}_sp +fi + +if [ $stage -le 2 ]; then + echo "$0: aligning with the perturbed low-resolution data" + steps/align_fmllr.sh --nj ${nj} --cmd "$train_cmd" \ + data/${train_set}_sp data/lang $gmm_dir $ali_dir || exit 1 +fi + +if [ $stage -le 3 ]; then + # Create high-resolution MFCC features (with 40 cepstra instead of 13). + # this shows how you can split across multiple file-systems. + echo "$0: creating high-resolution MFCC features" + mfccdir=data/${train_set}_sp_hires/data + if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $mfccdir/storage ]; then + utils/create_split_dir.pl /export/b1{5,6,8,9}/$USER/kaldi-data/mfcc/chime5-$(date +'%m_%d_%H_%M')/s5/$mfccdir/storage $mfccdir/storage + fi + + for datadir in ${train_set}_sp ${test_sets}; do + utils/copy_data_dir.sh data/$datadir data/${datadir}_hires + done + + # do volume-perturbation on the training data prior to extracting hires + # features; this helps make trained nnets more invariant to test data volume. + utils/data/perturb_data_dir_volume.sh data/${train_set}_sp_hires || exit 1; + + for datadir in ${train_set}_sp ${test_sets}; do + steps/make_mfcc.sh --nj 20 --mfcc-config conf/mfcc_hires.conf \ + --cmd "$train_cmd" data/${datadir}_hires || exit 1; + steps/compute_cmvn_stats.sh data/${datadir}_hires || exit 1; + utils/fix_data_dir.sh data/${datadir}_hires || exit 1; + done +fi + +if [ $stage -le 4 ]; then + echo "$0: computing a subset of data to train the diagonal UBM." + # We'll use about a quarter of the data. + mkdir -p exp/nnet3${nnet3_affix}/diag_ubm + temp_data_root=exp/nnet3${nnet3_affix}/diag_ubm + + num_utts_total=$(wc -l &2 "$0" "$@" +if [ $# -ne 3 ] ; then + echo >&2 "$0" "$@" + echo >&2 "$0: Error: wrong number of arguments" + echo -e >&2 "Usage:\n $0 [opts] " + echo -e >&2 "eg:\n $0 /corpora/chime5/audio/train /corpora/chime5/transcriptions/train data/train" + exit 1 +fi + +set -e -o pipefail + +adir=$1 +jdir=$2 +dir=$3 + +json_count=$(find -L $jdir -name "*.json" | wc -l) +wav_count=$(find -L $adir -name "*.wav" | wc -l) + +if [ "$json_count" -eq 0 ]; then + echo >&2 "We expect that the directory $jdir will contain json files." + echo >&2 "That implies you have supplied a wrong path to the data." + exit 1 +fi +if [ "$wav_count" -eq 0 ]; then + echo >&2 "We expect that the directory $adir will contain wav files." + echo >&2 "That implies you have supplied a wrong path to the data." + exit 1 +fi + +echo "$0: Converting transcription to text" + +mkdir -p $dir +for file in $jdir/*json; do + ./local/json2text.py --mictype $mictype $file +done | \ + sed -e "s/\[inaudible[- 0-9]*\]/[inaudible]/g" |\ + sed -e 's/ - / /g' |\ + sed -e 's/mm-/mm/g' > $dir/text.orig + +echo "$0: Creating datadir $dir for type=\"$mictype\"" + +if [ $mictype == "worn" ]; then + # convert the filenames to wav.scp format, use the basename of the file + # as a the wav.scp key, add .L and .R for left and right channel + # i.e. each file will have two entries (left and right channel) + find -L $adir -name "S[0-9]*_P[0-9]*.wav" | \ + perl -ne '{ + chomp; + $path = $_; + next unless $path; + @F = split "/", $path; + ($f = $F[@F-1]) =~ s/.wav//; + @F = split "_", $f; + print "${F[1]}_${F[0]}.L sox $path -t wav - remix 1 |\n"; + print "${F[1]}_${F[0]}.R sox $path -t wav - remix 2 |\n"; + }' | sort > $dir/wav.scp + + # generate the transcripts for both left and right channel + # from the original transcript in the form + # P09_S03-0006072-0006147 gimme the baker + # create left and right channel transcript + # P09_S03.L-0006072-0006147 gimme the baker + # P09_S03.R-0006072-0006147 gimme the baker + sed -n 's/ *$//; h; s/-/\.L-/p; g; s/-/\.R-/p' $dir/text.orig | sort > $dir/text +elif [ $mictype == "ref" ]; then + # fixed reference array + + # first get a text, which will be used to extract reference arrays + perl -ne 's/-/.ENH-/;print;' $dir/text.orig | sort > $dir/text + + find -L $adir | grep "\.wav" | sort > $dir/wav.flist + # following command provide the argument for grep to extract only reference arrays + grep `cut -f 1 -d"-" $dir/text | awk -F"_" '{print $2 "_" $3}' | sed -e "s/\.ENH//" | sort | uniq | sed -e "s/^/ -e /" | tr "\n" " "` $dir/wav.flist > $dir/wav.flist2 + paste -d" " \ + <(awk -F "/" '{print $NF}' $dir/wav.flist2 | sed -e "s/\.wav/.ENH/") \ + $dir/wav.flist2 | sort > $dir/wav.scp +else + # array mic case + # convert the filenames to wav.scp format, use the basename of the file + # as a the wav.scp key + find -L $adir -name "*.wav" -ipath "*${mictype}*" |\ + perl -ne '$p=$_;chomp $_;@F=split "/";$F[$#F]=~s/\.wav//;print "$F[$#F] $p";' |\ + sort -u > $dir/wav.scp + + # convert the transcripts from + # P09_S03-0006072-0006147 gimme the baker + # to the per-channel transcripts + # P09_S03_U01_NOLOCATION.CH1-0006072-0006147 gimme the baker + # P09_S03_U01_NOLOCATION.CH2-0006072-0006147 gimme the baker + # P09_S03_U01_NOLOCATION.CH3-0006072-0006147 gimme the baker + # P09_S03_U01_NOLOCATION.CH4-0006072-0006147 gimme the baker + perl -ne '$l=$_; + for($i=1; $i<=4; $i++) { + ($x=$l)=~ s/-/.CH\Q$i\E-/; + print $x;}' $dir/text.orig | sort > $dir/text + +fi +$cleanup && rm -f $dir/text.* $dir/wav.scp.* $dir/wav.flist + +# Prepare 'segments', 'utt2spk', 'spk2utt' +if [ $mictype == "worn" ]; then + cut -d" " -f 1 $dir/text | \ + awk -F"-" '{printf("%s %s %08.2f %08.2f\n", $0, $1, $2/100.0, $3/100.0)}' |\ + sed -e "s/_[A-Z]*\././2" \ + > $dir/segments +elif [ $mictype == "ref" ]; then + cut -d" " -f 1 $dir/text | \ + awk -F"-" '{printf("%s %s %08.2f %08.2f\n", $0, $1, $2/100.0, $3/100.0)}' |\ + sed -e "s/_[A-Z]*\././2" |\ + sed -e "s/ P.._/ /" > $dir/segments +else + cut -d" " -f 1 $dir/text | \ + awk -F"-" '{printf("%s %s %08.2f %08.2f\n", $0, $1, $2/100.0, $3/100.0)}' |\ + sed -e "s/_[A-Z]*\././2" |\ + sed -e 's/ P.._/ /' > $dir/segments +fi +cut -f 1 -d ' ' $dir/segments | \ + perl -ne 'chomp;$utt=$_;s/_.*//;print "$utt $_\n";' > $dir/utt2spk + +utils/utt2spk_to_spk2utt.pl $dir/utt2spk > $dir/spk2utt + +# Check that data dirs are okay! +utils/validate_data_dir.sh --no-feats $dir || exit 1 diff --git a/egs/chime5/s5b/local/prepare_dict.sh b/egs/chime5/s5b/local/prepare_dict.sh new file mode 100755 index 00000000000..09083d0e795 --- /dev/null +++ b/egs/chime5/s5b/local/prepare_dict.sh @@ -0,0 +1,124 @@ +#!/bin/bash +# Copyright (c) 2018, Johns Hopkins University (Jan "Yenda" Trmal) +# License: Apache 2.0 + +# Begin configuration section. +# End configuration section +. ./utils/parse_options.sh + +. ./path.sh + +set -e -o pipefail +set -o nounset # Treat unset variables as an error + + +# The parts of the output of this that will be needed are +# [in data/local/dict/ ] +# lexicon.txt +# extra_questions.txt +# nonsilence_phones.txt +# optional_silence.txt +# silence_phones.txt + + +# check existing directories +[ $# != 0 ] && echo "Usage: $0" && exit 1; + +dir=data/local/dict + +mkdir -p $dir +echo "$0: Getting CMU dictionary" +if [ ! -f $dir/cmudict.done ]; then + [ -d $dir/cmudict ] && rm -rf $dir/cmudict + svn co https://svn.code.sf.net/p/cmusphinx/code/trunk/cmudict $dir/cmudict + touch $dir/cmudict.done +fi + +# silence phones, one per line. +for w in sil spn inaudible laughs noise; do + echo $w; +done > $dir/silence_phones.txt +echo sil > $dir/optional_silence.txt + +# For this setup we're discarding stress. +cat $dir/cmudict/cmudict-0.7b.symbols | \ + perl -ne 's:[0-9]::g; s:\r::; print lc($_)' | \ + sort -u > $dir/nonsilence_phones.txt + +# An extra question will be added by including the silence phones in one class. +paste -d ' ' -s $dir/silence_phones.txt > $dir/extra_questions.txt + +grep -v ';;;' $dir/cmudict/cmudict-0.7b |\ + uconv -f latin1 -t utf-8 -x Any-Lower |\ + perl -ne 's:(\S+)\(\d+\) :$1 :; s: : :; print;' |\ + perl -ne '@F = split " ",$_,2; $F[1] =~ s/[0-9]//g; print "$F[0] $F[1]";' \ + > $dir/lexicon1_raw_nosil.txt || exit 1; + +# Add prons for laughter, noise, oov +for w in `grep -v sil $dir/silence_phones.txt`; do + echo "[$w] $w" +done | cat - $dir/lexicon1_raw_nosil.txt > $dir/lexicon2_raw.txt || exit 1; + +# we keep all words from the cmudict in the lexicon +# might reduce OOV rate on dev and eval +cat $dir/lexicon2_raw.txt \ + <( echo "mm m" + echo " spn" + echo "cuz k aa z" + echo "cuz k ah z" + echo "cuz k ao z" + echo "mmm m"; \ + echo "hmm hh m"; \ + ) | sort -u | sed 's/[\t ]/\t/' > $dir/iv_lexicon.txt + + +cat data/train*/text | \ + awk '{for (n=2;n<=NF;n++){ count[$n]++; } } END { for(n in count) { print count[n], n; }}' | \ + sort -nr > $dir/word_counts + +cat $dir/word_counts | awk '{print $2}' > $dir/word_list + +awk '{print $1}' $dir/iv_lexicon.txt | \ + perl -e '($word_counts)=@ARGV; + open(W, "<$word_counts")||die "opening word-counts $word_counts"; + while() { chop; $seen{$_}=1; } + while() { + ($c,$w) = split; + if (!defined $seen{$w}) { print; } + } ' $dir/word_counts > $dir/oov_counts.txt + +echo "*Highest-count OOVs (including fragments) are:" +head -n 10 $dir/oov_counts.txt +echo "*Highest-count OOVs (excluding fragments) are:" +grep -v -E '^-|-$' $dir/oov_counts.txt | head -n 10 || true + +echo "*Training a G2P and generating missing pronunciations" +mkdir -p $dir/g2p/ +phonetisaurus-align --input=$dir/iv_lexicon.txt --ofile=$dir/g2p/aligned_lexicon.corpus +ngram-count -order 4 -kn-modify-counts-at-end -ukndiscount\ + -gt1min 0 -gt2min 0 -gt3min 0 -gt4min 0 \ + -text $dir/g2p/aligned_lexicon.corpus -lm $dir/g2p/aligned_lexicon.arpa +phonetisaurus-arpa2wfst --lm=$dir/g2p/aligned_lexicon.arpa --ofile=$dir/g2p/g2p.fst +awk '{print $2}' $dir/oov_counts.txt > $dir/oov_words.txt +phonetisaurus-apply --nbest 2 --model $dir/g2p/g2p.fst --thresh 5 --accumulate \ + --word_list $dir/oov_words.txt > $dir/oov_lexicon.txt + +## The next section is again just for debug purposes +## to show words for which the G2P failed +cat $dir/oov_lexicon.txt $dir/iv_lexicon.txt | sort -u > $dir/lexicon.txt +rm -f $dir/lexiconp.txt 2>/dev/null; # can confuse later script if this exists. +awk '{print $1}' $dir/lexicon.txt | \ + perl -e '($word_counts)=@ARGV; + open(W, "<$word_counts")||die "opening word-counts $word_counts"; + while() { chop; $seen{$_}=1; } + while() { + ($c,$w) = split; + if (!defined $seen{$w}) { print; } + } ' $dir/word_counts > $dir/oov_counts.g2p.txt + +echo "*Highest-count OOVs (including fragments) after G2P are:" +head -n 10 $dir/oov_counts.g2p.txt + +utils/validate_dict_dir.pl $dir +exit 0; + diff --git a/egs/chime5/s5b/local/reverberate_lat_dir.sh b/egs/chime5/s5b/local/reverberate_lat_dir.sh new file mode 100755 index 00000000000..f601a37c0e1 --- /dev/null +++ b/egs/chime5/s5b/local/reverberate_lat_dir.sh @@ -0,0 +1,93 @@ +#!/bin/bash + +# Copyright 2018 Vimal Manohar +# Apache 2.0 + +num_data_reps=1 +cmd=run.pl +nj=20 +include_clean=false + +. utils/parse_options.sh +. ./path.sh + +if [ $# -ne 4 ]; then + echo "Usage: $0 " + exit 1 +fi + +train_data_dir=$1 +noisy_latdir=$2 +clean_latdir=$3 +dir=$4 + +clean_nj=$(cat $clean_latdir/num_jobs) + +$cmd JOB=1:$clean_nj $dir/copy_clean_lattices.JOB.log \ + lattice-copy "ark:gunzip -c $clean_latdir/lat.JOB.gz |" \ + ark,scp:$dir/lats_clean.JOB.ark,$dir/lats_clean.JOB.scp || exit 1 + +for n in $(seq $clean_nj); do + cat $dir/lats_clean.$n.scp +done > $dir/lats_clean.scp + +for i in $(seq $num_data_reps); do + cat $dir/lats_clean.scp | awk -vi=$i '{print "rev"i"_"$0}' +done > $dir/lats_rvb.scp + +noisy_nj=$(cat $noisy_latdir/num_jobs) +$cmd JOB=1:$noisy_nj $dir/copy_noisy_lattices.JOB>log \ + lattice-copy "ark:gunzip -c $noisy_latdir/lat.JOB.gz |" \ + ark,scp:$dir/lats_noisy.JOB.ark,$dir/lats_noisy.JOB.scp || exit 1 + +optional_clean= +if $include_clean; then + optional_clean=$dir/lats_clean.scp +fi + +for n in $(seq $noisy_nj); do + cat $dir/lats_noisy.$n.scp +done | cat - $dir/lats_rvb.scp ${optional_clean} | sort -k1,1 > $dir/lats.scp + +utils/split_data.sh $train_data_dir $nj +$cmd JOB=1:$nj $dir/copy_lattices.JOB.log \ + lattice-copy "scp:utils/filter_scp.pl $train_data_dir/split$nj/JOB/utt2spk $dir/lats.scp |" \ + "ark:|gzip -c >$dir/lat.JOB.gz" || exit 1 + +echo $nj > $dir/num_jobs + +if [ -f $clean_latdir/ali.1.gz ]; then + $cmd JOB=1:$clean_nj $dir/copy_clean_alignments.JOB.log \ + copy-int-vector "ark:gunzip -c $clean_latdir/ali.JOB.gz |" \ + ark,scp:$dir/ali_clean.JOB.ark,$dir/ali_clean.JOB.scp + + for n in $(seq $clean_nj); do + cat $dir/ali_clean.$n.scp + done > $dir/ali_clean.scp + + for i in $(seq $num_data_reps); do + cat $dir/ali_clean.scp | awk -vi=$i '{print "rev"i"_"$0}' + done > $dir/ali_rvb.scp + + optional_clean= + if $include_clean; then + optional_clean=$dir/ali_clean.scp + fi + + $cmd JOB=1:$noisy_nj $dir/copy_noisy_alignments.JOB.log \ + copy-int-vector "ark:gunzip -c $noisy_latdir/ali.JOB.gz |" \ + ark,scp:$dir/ali_noisy.JOB.ark,$dir/ali_noisy.JOB.scp + + for n in $(seq $noisy_nj); do + cat $dir/ali_noisy.$n.scp + done | cat - $dir/ali_rvb.scp $optional_clean | sort -k1,1 > $dir/ali.scp + + utils/split_data.sh $train_data_dir $nj || exit 1 + $cmd JOB=1:$nj $dir/copy_rvb_alignments.JOB.log \ + copy-int-vector "scp:utils/filter_scp.pl $train_data_dir/split$nj/JOB/utt2spk $dir/ali.scp |" \ + "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1 +fi + +cp $clean_latdir/{final.*,tree,*.mat,*opts,*.txt} $dir || true + +rm $dir/lats_{clean,noisy}.*.{ark,scp} $dir/ali_{clean,noisy}.*.{ark,scp} || true # save space diff --git a/egs/chime5/s5b/local/run_beamformit.sh b/egs/chime5/s5b/local/run_beamformit.sh new file mode 100755 index 00000000000..aa3badd90d8 --- /dev/null +++ b/egs/chime5/s5b/local/run_beamformit.sh @@ -0,0 +1,87 @@ +#!/bin/bash + +# Copyright 2015, Mitsubishi Electric Research Laboratories, MERL (Author: Shinji Watanabe) + +. ./cmd.sh +. ./path.sh + +# Config: +cmd=run.pl +bmf="1 2 3 4" + +. utils/parse_options.sh || exit 1; + +if [ $# != 3 ]; then + echo "Wrong #arguments ($#, expected 3)" + echo "Usage: local/run_beamformit.sh [options] " + echo "main options (for others, see top of script file)" + echo " --cmd # Command to run in parallel with" + echo " --bmf \"1 2 3 4\" # microphones used for beamforming" + exit 1; +fi + +sdir=$1 +odir=$2 +array=$3 +expdir=exp/enhan/`echo $odir | awk -F '/' '{print $NF}'`_`echo $bmf | tr ' ' '_'` + +if ! command -v BeamformIt &>/dev/null ; then + echo "Missing BeamformIt, run 'cd $KALDI_ROOT/tools/; ./extras/install_beamformit.sh; cd -;'" && exit 1 +fi + +# Set bash to 'debug' mode, it will exit on : +# -e 'error', -u 'undefined variable', -o ... 'error in pipeline', -x 'print commands', +set -e +set -u +set -o pipefail + +mkdir -p $odir +mkdir -p $expdir/log + +echo "Will use the following channels: $bmf" +# number of channels +numch=`echo $bmf | tr ' ' '\n' | wc -l` +echo "the number of channels: $numch" + +# wavfiles.list can be used as the name of the output files +output_wavfiles=$expdir/wavfiles.list +find -L ${sdir} | grep -i ${array} | awk -F "/" '{print $NF}' | sed -e "s/\.CH.\.wav//" | sort | uniq > $expdir/wavfiles.list + +# this is an input file list of the microphones +# format: 1st_wav 2nd_wav ... nth_wav +input_arrays=$expdir/channels_$numch +for x in `cat $output_wavfiles`; do + echo -n "$x" + for ch in $bmf; do + echo -n " $x.CH$ch.wav" + done + echo "" +done > $input_arrays + +# split the list for parallel processing +# number of jobs are set by the number of WAV files +nj=`wc -l $expdir/wavfiles.list | awk '{print $1}'` +split_wavfiles="" +for n in `seq $nj`; do + split_wavfiles="$split_wavfiles $output_wavfiles.$n" +done +utils/split_scp.pl $output_wavfiles $split_wavfiles || exit 1; + +echo -e "Beamforming\n" +# making a shell script for each job +for n in `seq $nj`; do +cat << EOF > $expdir/log/beamform.$n.sh +while read line; do + $BEAMFORMIT/BeamformIt -s \$line -c $input_arrays \ + --config_file `pwd`/conf/beamformit.cfg \ + --source_dir $sdir \ + --result_dir $odir +done < $output_wavfiles.$n +EOF +done + +chmod a+x $expdir/log/beamform.*.sh +$cmd JOB=1:$nj $expdir/log/beamform.JOB.log \ + $expdir/log/beamform.JOB.sh + +echo "`basename $0` Done." diff --git a/egs/chime5/s5b/local/run_recog.sh b/egs/chime5/s5b/local/run_recog.sh new file mode 100755 index 00000000000..5c74c9ff242 --- /dev/null +++ b/egs/chime5/s5b/local/run_recog.sh @@ -0,0 +1,164 @@ +#!/bin/bash +# +# Based mostly on the TED-LIUM and Switchboard recipe +# +# Copyright 2017 Johns Hopkins University (Author: Shinji Watanabe and Yenda Trmal) +# Apache 2.0 +# +# This is a subset of run.sh to only perform recognition experiments with evaluation data + +# Begin configuration section. +decode_nj=20 +stage=0 +enhancement=beamformit # for a new enhancement method, + # change this variable and stage 4 +# End configuration section +. ./utils/parse_options.sh + +. ./cmd.sh +. ./path.sh + + +set -e # exit on error + +# chime5 main directory path +# please change the path accordingly +chime5_corpus=/export/corpora4/CHiME5 +json_dir=${chime5_corpus}/transcriptions +audio_dir=${chime5_corpus}/audio + +# training and test data +train_set=train_worn_u100k +test_sets="eval_${enhancement}_ref" + +# This script also needs the phonetisaurus g2p, srilm, beamformit +./local/check_tools.sh || exit 1 + +if [ $stage -le 4 ]; then + # Beamforming using reference arrays + # enhanced WAV directory + enhandir=enhan + for dset in eval; do + for mictype in u01 u02 u03 u04 u05 u06; do + local/run_beamformit.sh --cmd "$train_cmd" \ + ${audio_dir}/${dset} \ + ${enhandir}/${dset}_${enhancement}_${mictype} \ + ${mictype} + done + done + + for dset in eval; do + local/prepare_data.sh --mictype ref "$PWD/${enhandir}/${dset}_${enhancement}_u0*" \ + ${json_dir}/${dset} data/${dset}_${enhancement}_ref + done +fi + +if [ $stage -le 6 ]; then + # fix speaker ID issue (thanks to Dr. Naoyuki Kanda) + # add array ID to the speaker ID to avoid the use of other array information to meet regulations + # Before this fix + # $ head -n 2 data/eval_beamformit_ref_nosplit/utt2spk + # P01_S01_U02_KITCHEN.ENH-0000192-0001278 P01 + # P01_S01_U02_KITCHEN.ENH-0001421-0001481 P01 + # After this fix + # $ head -n 2 data/eval_beamformit_ref_nosplit_fix/utt2spk + # P01_S01_U02_KITCHEN.ENH-0000192-0001278 P01_U02 + # P01_S01_U02_KITCHEN.ENH-0001421-0001481 P01_U02 + for dset in ${test_sets}; do + utils/copy_data_dir.sh data/${dset} data/${dset}_nosplit + mkdir -p data/${dset}_nosplit_fix + cp data/${dset}_nosplit/{segments,text,wav.scp} data/${dset}_nosplit_fix/ + awk -F "_" '{print $0 "_" $3}' data/${dset}_nosplit/utt2spk > data/${dset}_nosplit_fix/utt2spk + utils/utt2spk_to_spk2utt.pl data/${dset}_nosplit_fix/utt2spk > data/${dset}_nosplit_fix/spk2utt + done + + # Split speakers up into 3-minute chunks. This doesn't hurt adaptation, and + # lets us use more jobs for decoding etc. + for dset in ${test_sets}; do + utils/data/modify_speaker_info.sh --seconds-per-spk-max 180 data/${dset}_nosplit_fix data/${dset} + done +fi + +if [ $stage -le 7 ]; then + # Now make MFCC features. + # mfccdir should be some place with a largish disk where you + # want to store MFCC features. + mfccdir=mfcc + for x in ${test_sets}; do + steps/make_mfcc.sh --nj 20 --cmd "$train_cmd" \ + data/$x exp/make_mfcc/$x $mfccdir + steps/compute_cmvn_stats.sh data/$x exp/make_mfcc/$x $mfccdir + utils/fix_data_dir.sh data/$x + done +fi + +if [ $stage -le 17 ]; then + nnet3_affix=_${train_set}_cleaned + for datadir in ${test_sets}; do + utils/copy_data_dir.sh data/$datadir data/${datadir}_hires + done + for datadir in ${test_sets}; do + steps/make_mfcc.sh --nj 20 --mfcc-config conf/mfcc_hires.conf \ + --cmd "$train_cmd" data/${datadir}_hires || exit 1; + steps/compute_cmvn_stats.sh data/${datadir}_hires || exit 1; + utils/fix_data_dir.sh data/${datadir}_hires || exit 1; + done + for data in $test_sets; do + steps/online/nnet2/extract_ivectors_online.sh --cmd "$train_cmd" --nj 20 \ + data/${data}_hires exp/nnet3${nnet3_affix}/extractor \ + exp/nnet3${nnet3_affix}/ivectors_${data}_hires + done +fi + +if [ $stage -le 18 ]; then + # First the options that are passed through to run_ivector_common.sh + # (some of which are also used in this script directly). + lm_suffix= + + # The rest are configs specific to this script. Most of the parameters + # are just hardcoded at this level, in the commands below. + affix=1a # affix for the TDNN directory name + tree_affix= + tree_dir=exp/chain${nnet3_affix}/tree_sp${tree_affix:+_$tree_affix} + dir=exp/chain${nnet3_affix}/tdnn${affix}_sp + + # training options + # training chunk-options + chunk_width=140,100,160 + # we don't need extra left/right context for TDNN systems. + chunk_left_context=0 + chunk_right_context=0 + + utils/mkgraph.sh \ + --self-loop-scale 1.0 data/lang${lm_suffix}/ \ + $tree_dir $tree_dir/graph${lm_suffix} || exit 1; + + frames_per_chunk=$(echo $chunk_width | cut -d, -f1) + rm $dir/.error 2>/dev/null || true + + for data in $test_sets; do + ( + steps/nnet3/decode.sh \ + --acwt 1.0 --post-decode-acwt 10.0 \ + --extra-left-context $chunk_left_context \ + --extra-right-context $chunk_right_context \ + --extra-left-context-initial 0 \ + --extra-right-context-final 0 \ + --frames-per-chunk $frames_per_chunk \ + --nj 8 --cmd "$decode_cmd" --num-threads 4 \ + --online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_${data}_hires \ + $tree_dir/graph${lm_suffix} data/${data}_hires ${dir}/decode${lm_suffix}_${data} || exit 1 + ) || touch $dir/.error & + done + wait + [ -f $dir/.error ] && echo "$0: there was a problem while decoding" && exit 1 +fi + +if [ $stage -le 20 ]; then + # final scoring to get the official challenge result + # please specify both dev and eval set directories so that the search parameters + # (insertion penalty and language model weight) will be tuned using the dev set + local/score_for_submit.sh \ + --dev exp/chain_${train_set}_cleaned/tdnn1a_sp/decode_dev_${enhancement}_ref \ + --eval exp/chain_${train_set}_cleaned/tdnn1a_sp/decode_eval_${enhancement}_ref +fi diff --git a/egs/chime5/s5b/local/run_wpe.py b/egs/chime5/s5b/local/run_wpe.py new file mode 100755 index 00000000000..2f3818f9c42 --- /dev/null +++ b/egs/chime5/s5b/local/run_wpe.py @@ -0,0 +1,59 @@ +#!/usr/bin/env python +# Copyright 2018 Johns Hopkins University (Author: Aswin Shanmugam Subramanian) +# Apache 2.0 +# Works with both python2 and python3 +# This script assumes that WPE (nara_wpe) is installed locally using miniconda. +# ../../../tools/extras/install_miniconda.sh and ../../../tools/extras/install_wpe.sh +# needs to be run and this script needs to be launched run with that version of +# python. +# See local/run_wpe.sh for example. + +import numpy as np +import soundfile as sf +import time +import os, errno +from tqdm import tqdm +import argparse + +from nara_wpe.wpe import wpe +from nara_wpe.utils import stft, istft +from nara_wpe import project_root + +parser = argparse.ArgumentParser() +parser.add_argument('--files', '-f', nargs='+') +args = parser.parse_args() + +input_files = args.files[:len(args.files)//2] +output_files = args.files[len(args.files)//2:] +out_dir = os.path.dirname(output_files[0]) +try: + os.makedirs(out_dir) +except OSError as e: + if e.errno != errno.EEXIST: + raise + +stft_options = dict( + size=512, + shift=128, + window_length=None, + fading=True, + pad=True, + symmetric_window=False +) + +sampling_rate = 16000 +delay = 3 +iterations = 5 +taps = 10 + +signal_list = [ + sf.read(f)[0] + for f in input_files +] +y = np.stack(signal_list, axis=0) +Y = stft(y, **stft_options).transpose(2, 0, 1) +Z = wpe(Y, iterations=iterations, statistics_mode='full').transpose(1, 2, 0) +z = istft(Z, size=stft_options['size'], shift=stft_options['shift']) + +for d in range(len(signal_list)): + sf.write(output_files[d], z[d,:], sampling_rate) diff --git a/egs/chime5/s5b/local/run_wpe.sh b/egs/chime5/s5b/local/run_wpe.sh new file mode 100755 index 00000000000..1c4b1c80291 --- /dev/null +++ b/egs/chime5/s5b/local/run_wpe.sh @@ -0,0 +1,85 @@ +#!/bin/bash +# Copyright 2018 Johns Hopkins University (Author: Aswin Shanmugam Subramanian) +# Apache 2.0 + +. ./cmd.sh +. ./path.sh + +# Config: +nj=4 +cmd=run.pl + +. utils/parse_options.sh || exit 1; + +if [ $# != 3 ]; then + echo "Wrong #arguments ($#, expected 3)" + echo "Usage: local/run_wpe.sh [options] " + echo "main options (for others, see top of script file)" + echo " --cmd # Command to run in parallel with" + echo " --nj 50 # number of jobs for parallel processing" + exit 1; +fi + +sdir=$1 +odir=$2 +array=$3 +task=`basename $sdir` +expdir=exp/wpe/${task}_${array} +# Set bash to 'debug' mode, it will exit on : +# -e 'error', -u 'undefined variable', -o ... 'error in pipeline', -x 'print commands', +set -e +set -u +set -o pipefail + +miniconda_dir=$HOME/miniconda3/ +if [ ! -d $miniconda_dir ]; then + echo "$miniconda_dir does not exist. Please run '../../../tools/extras/install_miniconda.sh' and '../../../tools/extras/install_wpe.sh';" +fi + +# check if WPE is installed +result=`$miniconda_dir/bin/python -c "\ +try: + import nara_wpe + print('1') +except ImportError: + print('0')"` + +if [ "$result" == "1" ]; then + echo "WPE is installed" +else + echo "WPE is not installed. Please run ../../../tools/extras/install_wpe.sh" + exit 1 +fi + +mkdir -p $odir +mkdir -p $expdir/log + +# wavfiles.list can be used as the name of the output files +output_wavfiles=$expdir/wavfiles.list +find -L ${sdir} | grep -i ${array} > $expdir/channels_input +cat $expdir/channels_input | awk -F '/' '{print $NF}' | sed "s@S@$odir\/S@g" > $expdir/channels_output +paste -d" " $expdir/channels_input $expdir/channels_output > $output_wavfiles + +# split the list for parallel processing +split_wavfiles="" +for n in `seq $nj`; do + split_wavfiles="$split_wavfiles $output_wavfiles.$n" +done +utils/split_scp.pl $output_wavfiles $split_wavfiles || exit 1; + +echo -e "Dereverberation - $task - $array\n" +# making a shell script for each job +for n in `seq $nj`; do +cat <<-EOF > $expdir/log/wpe.$n.sh +while read line; do + $miniconda_dir/bin/python local/run_wpe.py \ + --file \$line +done < $output_wavfiles.$n +EOF +done + +chmod a+x $expdir/log/wpe.*.sh +$cmd JOB=1:$nj $expdir/log/wpe.JOB.log \ + $expdir/log/wpe.JOB.sh + +echo "`basename $0` Done." diff --git a/egs/chime5/s5b/local/score.sh b/egs/chime5/s5b/local/score.sh new file mode 120000 index 00000000000..6a200b42ed3 --- /dev/null +++ b/egs/chime5/s5b/local/score.sh @@ -0,0 +1 @@ +../steps/scoring/score_kaldi_wer.sh \ No newline at end of file diff --git a/egs/chime5/s5b/local/score_for_submit.sh b/egs/chime5/s5b/local/score_for_submit.sh new file mode 100755 index 00000000000..23121d68b93 --- /dev/null +++ b/egs/chime5/s5b/local/score_for_submit.sh @@ -0,0 +1,119 @@ +#!/bin/bash +# Copyright 2012-2014 Johns Hopkins University (Author: Daniel Povey, Yenda Trmal) +# Apache 2.0 +# +# This script provides official CHiME-5 challenge submission scores per room and session. +# It first calculates the best search parameter configurations by using the dev set +# and also create the transcriptions for dev and eval sets to be submitted. +# The default setup does not calculate scores of the evaluation set since +# the evaluation transcription is not distributed (July 9 2018) + +cmd=run.pl +dev=exp/chain_train_worn_u100k_cleaned/tdnn1a_sp/decode_dev_beamformit_ref +eval=exp/chain_train_worn_u100k_cleaned/tdnn1a_sp/decode_eval_beamformit_ref +do_eval=false + +echo "$0 $@" # Print the command line for logging +[ -f ./path.sh ] && . ./path.sh +. parse_options.sh || exit 1; + +if [ $# -ne 0 ]; then + echo "Usage: $0 [--cmd (run.pl|queue.pl...)]" + echo "This script provides official CHiME-5 challenge submission scores" + echo " Options:" + echo " --cmd (run.pl|queue.pl...) # specify how to run the sub-processes." + echo " --dev # dev set decoding directory" + echo " --eval # eval set decoding directory" + exit 1; +fi + +# get language model weight and word insertion penalty from the dev set +best_lmwt=`cat $dev/scoring_kaldi/wer_details/lmwt` +best_wip=`cat $dev/scoring_kaldi/wer_details/wip` + +echo "best LM weight: $best_lmwt" +echo "insertion penalty weight: $best_wip" + +echo "==== development set ====" +# development set +# get the scoring result per utterance +score_result=$dev/scoring_kaldi/wer_details/per_utt +for session in S02 S09; do + for room in DINING KITCHEN LIVING; do + # get nerror + nerr=`grep "\#csid" $score_result | grep $room | grep $session | awk '{sum+=$4+$5+$6} END {print sum}'` + # get nwords from references (NF-2 means to exclude utterance id and " ref ") + nwrd=`grep "\#csid" $score_result | grep $room | grep $session | awk '{sum+=$3+$4+$6} END {print sum}'` + # compute wer with scale=2 + wer=`echo "scale=2; 100 * $nerr / $nwrd" | bc` + + # report the results + echo -n "session $session " + echo -n "room $room: " + echo -n "#words $nwrd, " + echo -n "#errors $nerr, " + echo "wer $wer %" + done +done +echo -n "overall: " +# get nerror +nerr=`grep "\#csid" $score_result | awk '{sum+=$4+$5+$6} END {print sum}'` +# get nwords from references (NF-2 means to exclude utterance id and " ref ") +nwrd=`grep "\#csid" $score_result | awk '{sum+=$3+$4+$6} END {print sum}'` +# compute wer with scale=2 +wer=`echo "scale=2; 100 * $nerr / $nwrd" | bc` +echo -n "#words $nwrd, " +echo -n "#errors $nerr, " +echo "wer $wer %" + +echo "==== evaluation set ====" +# evaluation set +# get the scoring result per utterance. Copied from local/score.sh +mkdir -p $eval/scoring_kaldi/wer_details_devbest +$cmd $eval/scoring_kaldi/log/stats1.log \ + cat $eval/scoring_kaldi/penalty_$best_wip/$best_lmwt.txt \| \ + align-text --special-symbol="'***'" ark:$eval/scoring_kaldi/test_filt.txt ark:- ark,t:- \| \ + utils/scoring/wer_per_utt_details.pl --special-symbol "'***'" \> $eval/scoring_kaldi/wer_details_devbest/per_utt +score_result=$eval/scoring_kaldi/wer_details_devbest/per_utt +for session in S01 S21; do + for room in DINING KITCHEN LIVING; do + if $do_eval; then + # get nerror + nerr=`grep "\#csid" $score_result | grep $room | grep $session | awk '{sum+=$4+$5+$6} END {print sum}'` + # get nwords from references (NF-2 means to exclude utterance id and " ref ") + nwrd=`grep "\#csid" $score_result | grep $room | grep $session | awk '{sum+=$3+$4+$6} END {print sum}'` + # compute wer with scale=2 + wer=`echo "scale=2; 100 * $nerr / $nwrd" | bc` + + # report the results + echo -n "session $session " + echo -n "room $room: " + echo -n "#words $nwrd, " + echo -n "#errors $nerr, " + echo "wer $wer %" + fi + done +done +if $do_eval; then + # get nerror + nerr=`grep "\#csid" $score_result | awk '{sum+=$4+$5+$6} END {print sum}'` + # get nwords from references (NF-2 means to exclude utterance id and " ref ") + nwrd=`grep "\#csid" $score_result | awk '{sum+=$3+$4+$6} END {print sum}'` + # compute wer with scale=2 + wer=`echo "scale=2; 100 * $nerr / $nwrd" | bc` + echo -n "overall: " + echo -n "#words $nwrd, " + echo -n "#errors $nerr, " + echo "wer $wer %" +else + echo "skip evaluation scoring" + echo "" + echo "==== when you submit your result to the CHiME-5 challenge ====" + echo "Please rename your recognition results of " + echo "$dev/scoring_kaldi/penalty_$best_wip/$best_lmwt.txt" + echo "$eval/scoring_kaldi/penalty_$best_wip/$best_lmwt.txt" + echo "with {dev,eval}__.txt, e.g., dev_watanabe_jhu.txt and eval_watanabe_jhu.txt, " + echo "and submit both of them as your final challenge result" + echo "==================================================================" +fi + diff --git a/egs/chime5/s5b/local/train_lms_srilm.sh b/egs/chime5/s5b/local/train_lms_srilm.sh new file mode 100755 index 00000000000..5a1d56d24b3 --- /dev/null +++ b/egs/chime5/s5b/local/train_lms_srilm.sh @@ -0,0 +1,261 @@ +#!/bin/bash +# Copyright (c) 2017 Johns Hopkins University (Author: Yenda Trmal, Shinji Watanabe) +# Apache 2.0 + +export LC_ALL=C + +# Begin configuration section. +words_file= +train_text= +dev_text= +oov_symbol="" +# End configuration section + +echo "$0 $@" + +[ -f path.sh ] && . ./path.sh +. ./utils/parse_options.sh || exit 1 + +echo "-------------------------------------" +echo "Building an SRILM language model " +echo "-------------------------------------" + +if [ $# -ne 2 ] ; then + echo "Incorrect number of parameters. " + echo "Script has to be called like this:" + echo " $0 [switches] " + echo "For example: " + echo " $0 data data/srilm" + echo "The allowed switches are: " + echo " words_file= word list file -- data/lang/words.txt by default" + echo " train_text= data/train/text is used in case when not specified" + echo " dev_text= last 10 % of the train text is used by default" + echo " oov_symbol=> symbol to use for oov modeling -- by default" + exit 1 +fi + +datadir=$1 +tgtdir=$2 + +##End of configuration +loc=`which ngram-count`; +if [ -z $loc ]; then + echo >&2 "You appear to not have SRILM tools installed, either on your path," + echo >&2 "Use the script \$KALDI_ROOT/tools/install_srilm.sh to install it." + exit 1 +fi + +# Prepare the destination directory +mkdir -p $tgtdir + +for f in $words_file $train_text $dev_text; do + [ ! -s $f ] && echo "No such file $f" && exit 1; +done + +[ -z $words_file ] && words_file=$datadir/lang/words.txt +if [ ! -z "$train_text" ] && [ -z "$dev_text" ] ; then + nr=`cat $train_text | wc -l` + nr_dev=$(($nr / 10 )) + nr_train=$(( $nr - $nr_dev )) + orig_train_text=$train_text + head -n $nr_train $train_text > $tgtdir/train_text + tail -n $nr_dev $train_text > $tgtdir/dev_text + + train_text=$tgtdir/train_text + dev_text=$tgtdir/dev_text + echo "Using words file: $words_file" + echo "Using train text: 9/10 of $orig_train_text" + echo "Using dev text : 1/10 of $orig_train_text" +elif [ ! -z "$train_text" ] && [ ! -z "$dev_text" ] ; then + echo "Using words file: $words_file" + echo "Using train text: $train_text" + echo "Using dev text : $dev_text" + train_text=$train_text + dev_text=$dev_text +else + train_text=$datadir/train/text + dev_text=$datadir/dev2h/text + echo "Using words file: $words_file" + echo "Using train text: $train_text" + echo "Using dev text : $dev_text" + +fi + +[ ! -f $words_file ] && echo >&2 "File $words_file must exist!" && exit 1 +[ ! -f $train_text ] && echo >&2 "File $train_text must exist!" && exit 1 +[ ! -f $dev_text ] && echo >&2 "File $dev_text must exist!" && exit 1 + + +# Extract the word list from the training dictionary; exclude special symbols +sort $words_file | awk '{print $1}' | grep -v '\#0' | grep -v '' | grep -v -F "$oov_symbol" > $tgtdir/vocab +if (($?)); then + echo "Failed to create vocab from $words_file" + exit 1 +else + # wc vocab # doesn't work due to some encoding issues + echo vocab contains `cat $tgtdir/vocab | perl -ne 'BEGIN{$l=$w=0;}{split; $w+=$#_; $w++; $l++;}END{print "$l lines, $w words\n";}'` +fi + +# Kaldi transcript files contain Utterance_ID as the first word; remove it +# We also have to avoid skewing the LM by incorporating the same sentences +# from different channels +sed -e "s/\.CH.//" -e "s/_.\-./_/" -e "s/NOLOCATION\(\.[LR]\)*-//" -e "s/U[0-9][0-9]_//" $train_text | sort -u | \ + perl -ane 'print join(" ", @F[1..$#F]) . "\n" if @F > 1' > $tgtdir/train.txt +if (($?)); then + echo "Failed to create $tgtdir/train.txt from $train_text" + exit 1 +else + echo "Removed first word (uid) from every line of $train_text" + # wc text.train train.txt # doesn't work due to some encoding issues + echo $train_text contains `cat $train_text | perl -ane 'BEGIN{$w=$s=0;}{$w+=@F; $w--; $s++;}END{print "$w words, $s sentences\n";}'` + echo train.txt contains `cat $tgtdir/train.txt | perl -ane 'BEGIN{$w=$s=0;}{$w+=@F; $s++;}END{print "$w words, $s sentences\n";}'` +fi + +# Kaldi transcript files contain Utterance_ID as the first word; remove it +sed -e "s/\.CH.//" -e "s/_.\-./_/" $dev_text | sort -u | \ + perl -ane 'print join(" ", @F[1..$#F]) . "\n" if @F > 1' > $tgtdir/dev.txt +if (($?)); then + echo "Failed to create $tgtdir/dev.txt from $dev_text" + exit 1 +else + echo "Removed first word (uid) from every line of $dev_text" + # wc text.train train.txt # doesn't work due to some encoding issues + echo $dev_text contains `cat $dev_text | perl -ane 'BEGIN{$w=$s=0;}{$w+=@F; $w--; $s++;}END{print "$w words, $s sentences\n";}'` + echo $tgtdir/dev.txt contains `cat $tgtdir/dev.txt | perl -ane 'BEGIN{$w=$s=0;}{$w+=@F; $s++;}END{print "$w words, $s sentences\n";}'` +fi + + +echo "-------------------" +echo "Good-Turing 3grams" +echo "-------------------" +ngram-count -lm $tgtdir/3gram.gt011.gz -gt1min 0 -gt2min 1 -gt3min 1 -order 3 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/3gram.gt012.gz -gt1min 0 -gt2min 1 -gt3min 2 -order 3 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/3gram.gt022.gz -gt1min 0 -gt2min 2 -gt3min 2 -order 3 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/3gram.gt023.gz -gt1min 0 -gt2min 2 -gt3min 3 -order 3 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" + +echo "-------------------" +echo "Kneser-Ney 3grams" +echo "-------------------" +ngram-count -lm $tgtdir/3gram.kn011.gz -kndiscount1 -gt1min 0 \ + -kndiscount2 -gt2min 1 -kndiscount3 -gt3min 1 -order 3 -interpolate \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/3gram.kn012.gz -kndiscount1 -gt1min 0 \ + -kndiscount2 -gt2min 1 -kndiscount3 -gt3min 2 -order 3 -interpolate \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/3gram.kn022.gz -kndiscount1 -gt1min 0 \ + -kndiscount2 -gt2min 2 -kndiscount3 -gt3min 2 -order 3 -interpolate \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/3gram.kn023.gz -kndiscount1 -gt1min 0 \ + -kndiscount2 -gt2min 2 -kndiscount3 -gt3min 3 -order 3 -interpolate \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/3gram.kn111.gz -kndiscount1 -gt1min 1 \ + -kndiscount2 -gt2min 1 -kndiscount3 -gt3min 1 -order 3 -interpolate \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/3gram.kn112.gz -kndiscount1 -gt1min 1 \ + -kndiscount2 -gt2min 1 -kndiscount3 -gt3min 2 -order 3 -interpolate \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/3gram.kn122.gz -kndiscount1 -gt1min 1 \ + -kndiscount2 -gt2min 2 -kndiscount3 -gt3min 2 -order 3 -interpolate \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/3gram.kn123.gz -kndiscount1 -gt1min 1 \ + -kndiscount2 -gt2min 2 -kndiscount3 -gt3min 3 -order 3 -interpolate \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" + + +echo "-------------------" +echo "Good-Turing 4grams" +echo "-------------------" +ngram-count -lm $tgtdir/4gram.gt0111.gz \ + -gt1min 0 -gt2min 1 -gt3min 1 -gt4min 1 -order 4 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/4gram.gt0112.gz \ + -gt1min 0 -gt2min 1 -gt3min 1 -gt4min 2 -order 4 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/4gram.gt0122.gz \ + -gt1min 0 -gt2min 1 -gt3min 2 -gt4min 2 -order 4 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/4gram.gt0123.gz \ + -gt1min 0 -gt2min 1 -gt3min 2 -gt4min 3 -order 4 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/4gram.gt0113.gz \ + -gt1min 0 -gt2min 1 -gt3min 1 -gt4min 3 -order 4 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/4gram.gt0222.gz \ + -gt1min 0 -gt2min 2 -gt3min 2 -gt4min 2 -order 4 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/4gram.gt0223.gz \ + -gt1min 0 -gt2min 2 -gt3min 2 -gt4min 3 -order 4 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" + +echo "-------------------" +echo "Kneser-Ney 4grams" +echo "-------------------" +ngram-count -lm $tgtdir/4gram.kn0111.gz \ + -kndiscount1 -gt1min 0 -kndiscount2 -gt2min 1 -kndiscount3 -gt3min 1 -kndiscount4 -gt4min 1 -order 4 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/4gram.kn0112.gz \ + -kndiscount1 -gt1min 0 -kndiscount2 -gt2min 1 -kndiscount3 -gt3min 1 -kndiscount4 -gt4min 2 -order 4 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/4gram.kn0113.gz \ + -kndiscount1 -gt1min 0 -kndiscount2 -gt2min 1 -kndiscount3 -gt3min 1 -kndiscount4 -gt4min 3 -order 4 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/4gram.kn0122.gz \ + -kndiscount1 -gt1min 0 -kndiscount2 -gt2min 1 -kndiscount3 -gt3min 2 -kndiscount4 -gt4min 2 -order 4 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/4gram.kn0123.gz \ + -kndiscount1 -gt1min 0 -kndiscount2 -gt2min 1 -kndiscount3 -gt3min 2 -kndiscount4 -gt4min 3 -order 4 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/4gram.kn0222.gz \ + -kndiscount1 -gt1min 0 -kndiscount2 -gt2min 2 -kndiscount3 -gt3min 2 -kndiscount4 -gt4min 2 -order 4 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/4gram.kn0223.gz \ + -kndiscount1 -gt1min 0 -kndiscount2 -gt2min 2 -kndiscount3 -gt3min 2 -kndiscount4 -gt4min 3 -order 4 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" + +if [ ! -z ${LIBLBFGS} ]; then + #please note that if the switch -map-unk "$oov_symbol" is used with -maxent-convert-to-arpa, ngram-count will segfault + #instead of that, we simply output the model in the maxent format and convert it using the "ngram" + echo "-------------------" + echo "Maxent 3grams" + echo "-------------------" + sed 's/'${oov_symbol}'//g' $tgtdir/train.txt | \ + ngram-count -lm - -order 3 -text - -vocab $tgtdir/vocab -unk -sort -maxent -maxent-convert-to-arpa|\ + ngram -lm - -order 3 -unk -map-unk "$oov_symbol" -prune-lowprobs -write-lm - |\ + sed 's//'${oov_symbol}'/g' | gzip -c > $tgtdir/3gram.me.gz || exit 1 + + echo "-------------------" + echo "Maxent 4grams" + echo "-------------------" + sed 's/'${oov_symbol}'//g' $tgtdir/train.txt | \ + ngram-count -lm - -order 4 -text - -vocab $tgtdir/vocab -unk -sort -maxent -maxent-convert-to-arpa|\ + ngram -lm - -order 4 -unk -map-unk "$oov_symbol" -prune-lowprobs -write-lm - |\ + sed 's//'${oov_symbol}'/g' | gzip -c > $tgtdir/4gram.me.gz || exit 1 +else + echo >&2 "SRILM is not compiled with the support of MaxEnt models." + echo >&2 "You should use the script in \$KALDI_ROOT/tools/install_srilm.sh" + echo >&2 "which will take care of compiling the SRILM with MaxEnt support" + exit 1; +fi + + +echo "--------------------" +echo "Computing perplexity" +echo "--------------------" +( + for f in $tgtdir/3gram* ; do ( echo $f; ngram -order 3 -lm $f -unk -map-unk "$oov_symbol" -prune-lowprobs -ppl $tgtdir/dev.txt ) | paste -s -d ' ' ; done + for f in $tgtdir/4gram* ; do ( echo $f; ngram -order 4 -lm $f -unk -map-unk "$oov_symbol" -prune-lowprobs -ppl $tgtdir/dev.txt ) | paste -s -d ' ' ; done +) | sort -r -n -k 15,15g | column -t | tee $tgtdir/perplexities.txt + +echo "The perlexity scores report is stored in $tgtdir/perplexities.txt " +echo "" + +for best_ngram in {3,4}gram ; do + outlm=best_${best_ngram}.gz + lmfilename=$(grep "${best_ngram}" $tgtdir/perplexities.txt | head -n 1 | cut -f 1 -d ' ') + echo "$outlm -> $lmfilename" + (cd $tgtdir; rm -f $outlm; ln -sf $(basename $lmfilename) $outlm ) +done diff --git a/egs/chime5/s5b/local/wer_output_filter b/egs/chime5/s5b/local/wer_output_filter new file mode 100755 index 00000000000..6f4b6400716 --- /dev/null +++ b/egs/chime5/s5b/local/wer_output_filter @@ -0,0 +1,25 @@ +#!/bin/bash +# Copyright (c) 2017 Johns Hopkins University (Author: Yenda Trmal ) +# Apache 2.0 + + +## Filter for scoring of the STT results. Convert everything to lowercase +## and add some ad-hoc fixes for the hesitations + +perl -e ' + while() { + @A = split(" ", $_); + $id = shift @A; print "$id "; + foreach $a (@A) { + print lc($a) . " " unless $a =~ /\[.*\]/; + } + print "\n"; + }' | \ +sed -e ' + s/\/hmm/g; + s/\/hmm/g; + s/\/hmm/g; +' + +#| uconv -f utf-8 -t utf-8 -x Latin-ASCII + diff --git a/egs/chime5/s5b/local/worn_audio_list b/egs/chime5/s5b/local/worn_audio_list new file mode 100644 index 00000000000..fc7a44ad77d --- /dev/null +++ b/egs/chime5/s5b/local/worn_audio_list @@ -0,0 +1,64 @@ +/export/corpora4/CHiME5/audio/train/S03_P09.wav +/export/corpora4/CHiME5/audio/train/S03_P10.wav +/export/corpora4/CHiME5/audio/train/S03_P11.wav +/export/corpora4/CHiME5/audio/train/S03_P12.wav +/export/corpora4/CHiME5/audio/train/S04_P09.wav +/export/corpora4/CHiME5/audio/train/S04_P10.wav +/export/corpora4/CHiME5/audio/train/S04_P11.wav +/export/corpora4/CHiME5/audio/train/S04_P12.wav +/export/corpora4/CHiME5/audio/train/S05_P13.wav +/export/corpora4/CHiME5/audio/train/S05_P14.wav +/export/corpora4/CHiME5/audio/train/S05_P15.wav +/export/corpora4/CHiME5/audio/train/S05_P16.wav +/export/corpora4/CHiME5/audio/train/S06_P13.wav +/export/corpora4/CHiME5/audio/train/S06_P14.wav +/export/corpora4/CHiME5/audio/train/S06_P15.wav +/export/corpora4/CHiME5/audio/train/S06_P16.wav +/export/corpora4/CHiME5/audio/train/S07_P17.wav +/export/corpora4/CHiME5/audio/train/S07_P18.wav +/export/corpora4/CHiME5/audio/train/S07_P19.wav +/export/corpora4/CHiME5/audio/train/S07_P20.wav +/export/corpora4/CHiME5/audio/train/S08_P21.wav +/export/corpora4/CHiME5/audio/train/S08_P22.wav +/export/corpora4/CHiME5/audio/train/S08_P23.wav +/export/corpora4/CHiME5/audio/train/S08_P24.wav +/export/corpora4/CHiME5/audio/train/S12_P33.wav +/export/corpora4/CHiME5/audio/train/S12_P34.wav +/export/corpora4/CHiME5/audio/train/S12_P35.wav +/export/corpora4/CHiME5/audio/train/S12_P36.wav +/export/corpora4/CHiME5/audio/train/S13_P33.wav +/export/corpora4/CHiME5/audio/train/S13_P34.wav +/export/corpora4/CHiME5/audio/train/S13_P35.wav +/export/corpora4/CHiME5/audio/train/S13_P36.wav +/export/corpora4/CHiME5/audio/train/S16_P21.wav +/export/corpora4/CHiME5/audio/train/S16_P22.wav +/export/corpora4/CHiME5/audio/train/S16_P23.wav +/export/corpora4/CHiME5/audio/train/S16_P24.wav +/export/corpora4/CHiME5/audio/train/S17_P17.wav +/export/corpora4/CHiME5/audio/train/S17_P18.wav +/export/corpora4/CHiME5/audio/train/S17_P19.wav +/export/corpora4/CHiME5/audio/train/S17_P20.wav +/export/corpora4/CHiME5/audio/train/S18_P41.wav +/export/corpora4/CHiME5/audio/train/S18_P42.wav +/export/corpora4/CHiME5/audio/train/S18_P43.wav +/export/corpora4/CHiME5/audio/train/S18_P44.wav +/export/corpora4/CHiME5/audio/train/S19_P49.wav +/export/corpora4/CHiME5/audio/train/S19_P50.wav +/export/corpora4/CHiME5/audio/train/S19_P51.wav +/export/corpora4/CHiME5/audio/train/S19_P52.wav +/export/corpora4/CHiME5/audio/train/S20_P49.wav +/export/corpora4/CHiME5/audio/train/S20_P50.wav +/export/corpora4/CHiME5/audio/train/S20_P51.wav +/export/corpora4/CHiME5/audio/train/S20_P52.wav +/export/corpora4/CHiME5/audio/train/S22_P41.wav +/export/corpora4/CHiME5/audio/train/S22_P42.wav +/export/corpora4/CHiME5/audio/train/S22_P43.wav +/export/corpora4/CHiME5/audio/train/S22_P44.wav +/export/corpora4/CHiME5/audio/train/S23_P53.wav +/export/corpora4/CHiME5/audio/train/S23_P54.wav +/export/corpora4/CHiME5/audio/train/S23_P55.wav +/export/corpora4/CHiME5/audio/train/S23_P56.wav +/export/corpora4/CHiME5/audio/train/S24_P53.wav +/export/corpora4/CHiME5/audio/train/S24_P54.wav +/export/corpora4/CHiME5/audio/train/S24_P55.wav +/export/corpora4/CHiME5/audio/train/S24_P56.wav diff --git a/egs/chime5/s5b/path.sh b/egs/chime5/s5b/path.sh new file mode 100644 index 00000000000..fb1c0489386 --- /dev/null +++ b/egs/chime5/s5b/path.sh @@ -0,0 +1,7 @@ +export KALDI_ROOT=`pwd`/../../.. +[ -f $KALDI_ROOT/tools/env.sh ] && . $KALDI_ROOT/tools/env.sh +export PATH=$PWD/utils/:$KALDI_ROOT/tools/openfst/bin:$PWD:$PATH +[ ! -f $KALDI_ROOT/tools/config/common_path.sh ] && echo >&2 "The standard file $KALDI_ROOT/tools/config/common_path.sh is not present -> Exit!" && exit 1 +. $KALDI_ROOT/tools/config/common_path.sh +export LC_ALL=C + diff --git a/egs/chime5/s5b/run.sh b/egs/chime5/s5b/run.sh new file mode 100755 index 00000000000..37bc5c2c94e --- /dev/null +++ b/egs/chime5/s5b/run.sh @@ -0,0 +1,297 @@ +#!/bin/bash +# +# Based mostly on the TED-LIUM and Switchboard recipe +# +# Copyright 2017 Johns Hopkins University (Author: Shinji Watanabe and Yenda Trmal) +# Apache 2.0 +# + +# Begin configuration section. +nj=96 +decode_nj=20 +stage=0 +nnet_stage=-10 +num_data_reps=4 +snrs="20:10:15:5:0" +foreground_snrs="20:10:15:5:0" +background_snrs="20:10:15:5:0" +enhancement=beamformit # for a new enhancement method, + # change this variable and stage 4 +# End configuration section +. ./utils/parse_options.sh + +. ./cmd.sh +. ./path.sh + + +set -e # exit on error + +# chime5 main directory path +# please change the path accordingly +chime5_corpus=/export/corpora4/CHiME5 +json_dir=${chime5_corpus}/transcriptions +audio_dir=${chime5_corpus}/audio + +# training and test data +train_set=train_worn_simu_u400k +test_sets="dev_${enhancement}_dereverb_ref" #"dev_worn dev_addition_dereverb_ref" +#test_sets="dev_${enhancement}_ref" #"dev_worn dev_addition_dereverb_ref" + +# This script also needs the phonetisaurus g2p, srilm, beamformit +./local/check_tools.sh || exit 1 + +if [ $stage -le 1 ]; then + # skip u03 as they are missing + for mictype in worn u01 u02 u04 u05 u06; do + local/prepare_data.sh --mictype ${mictype} \ + ${audio_dir}/train ${json_dir}/train data/train_${mictype} + done + for dataset in dev; do + for mictype in worn; do + local/prepare_data.sh --mictype ${mictype} \ + ${audio_dir}/${dataset} ${json_dir}/${dataset} \ + data/${dataset}_${mictype} + done + done +fi + +if [ $stage -le 2 ]; then + local/prepare_dict.sh + + utils/prepare_lang.sh \ + data/local/dict "" data/local/lang data/lang + + local/train_lms_srilm.sh \ + --train-text data/train_worn/text --dev-text data/dev_worn/text \ + --oov-symbol "" --words-file data/lang/words.txt \ + data/ data/srilm +fi + +LM=data/srilm/best_3gram.gz +if [ $stage -le 3 ]; then + # Compiles G for chime5 trigram LM + utils/format_lm.sh \ + data/lang $LM data/local/dict/lexicon.txt data/lang + +fi + +if [ $stage -le 4 ]; then + # Beamforming using reference arrays + # enhanced WAV directory + enhandir=enhan + dereverb_dir=${PWD}/wav/wpe/ + for dset in dev eval; do + for mictype in u01 u02 u03 u04 u06; do + local/run_wpe.sh --nj 4 --cmd "$train_cmd --mem 120G" \ + ${audio_dir}/${dset} \ + ${dereverb_dir}/${dset} \ + ${mictype} + done + done + + for dset in dev eval; do + for mictype in u01 u02 u03 u04 u06; do + local/run_beamformit.sh --cmd "$train_cmd" \ + ${dereverb_dir}/${dset} \ + ${enhandir}/${dset}_${enhancement}_${mictype} \ + ${mictype} + done + done + + for dset in dev eval; do + local/prepare_data.sh --mictype ref "$PWD/${enhandir}/${dset}_${enhancement}_u0*" \ + ${json_dir}/${dset} data/${dset}_${enhancement}_dereverb_ref + done +fi + +if [ $stage -le 5 ]; then + # remove possibly bad sessions (P11_S03, P52_S19, P53_S24, P54_S24) + # see http://spandh.dcs.shef.ac.uk/chime_challenge/data.html for more details + utils/copy_data_dir.sh data/train_worn data/train_worn_org # back up + grep -v -e "^P11_S03" -e "^P52_S19" -e "^P53_S24" -e "^P54_S24" data/train_worn_org/text > data/train_worn/text + utils/fix_data_dir.sh data/train_worn +fi + +if [ $stage -le 6 ]; then + local/extract_noises.py $chime5_corpus/audio/train $chime5_corpus/transcriptions/train \ + local/distant_audio_list distant_noises + local/make_noise_list.py distant_noises > distant_noise_list + + noise_list=distant_noise_list + + if [ ! -d RIRS_NOISES/ ]; then + # Download the package that includes the real RIRs, simulated RIRs, isotropic noises and point-source noises + wget --no-check-certificate http://www.openslr.org/resources/28/rirs_noises.zip + unzip rirs_noises.zip + fi + + # This is the config for the system using simulated RIRs and point-source noises + rvb_opts+=(--rir-set-parameters "0.5, RIRS_NOISES/simulated_rirs/smallroom/rir_list") + rvb_opts+=(--rir-set-parameters "0.5, RIRS_NOISES/simulated_rirs/mediumroom/rir_list") + rvb_opts+=(--noise-set-parameters $noise_list) + + steps/data/reverberate_data_dir.py \ + "${rvb_opts[@]}" \ + --prefix "rev" \ + --foreground-snrs $foreground_snrs \ + --background-snrs $background_snrs \ + --speech-rvb-probability 1 \ + --pointsource-noise-addition-probability 1 \ + --isotropic-noise-addition-probability 1 \ + --num-replications $num_data_reps \ + --max-noises-per-minute 1 \ + --source-sampling-rate 16000 \ + data/train_worn data/train_worn_rvb +fi + +if [ $stage -le 7 ]; then + # combine mix array and worn mics + # randomly extract first 100k utterances from all mics + # if you want to include more training data, you can increase the number of array mic utterances + utils/combine_data.sh data/train_uall data/train_u01 data/train_u02 data/train_u04 data/train_u05 data/train_u06 + utils/subset_data_dir.sh data/train_uall 400000 data/train_u400k + utils/combine_data.sh data/${train_set} data/train_worn data/train_worn_rvb data/train_u400k + + # only use left channel for worn mic recognition + # you can use both left and right channels for training + for dset in train dev; do + utils/copy_data_dir.sh data/${dset}_worn data/${dset}_worn_stereo + grep "\.L-" data/${dset}_worn_stereo/text > data/${dset}_worn/text + utils/fix_data_dir.sh data/${dset}_worn + done +fi + +if [ $stage -le 8 ]; then + # fix speaker ID issue (thanks to Dr. Naoyuki Kanda) + # add array ID to the speaker ID to avoid the use of other array information to meet regulations + # Before this fix + # $ head -n 2 data/eval_beamformit_ref_nosplit/utt2spk + # P01_S01_U02_KITCHEN.ENH-0000192-0001278 P01 + # P01_S01_U02_KITCHEN.ENH-0001421-0001481 P01 + # After this fix + # $ head -n 2 data/eval_beamformit_ref_nosplit_fix/utt2spk + # P01_S01_U02_KITCHEN.ENH-0000192-0001278 P01_U02 + # P01_S01_U02_KITCHEN.ENH-0001421-0001481 P01_U02 + for dset in dev_${enhancement}_dereverb_ref eval_${enhancement}_dereverb_ref; do + utils/copy_data_dir.sh data/${dset} data/${dset}_nosplit + mkdir -p data/${dset}_nosplit_fix + cp data/${dset}_nosplit/{segments,text,wav.scp} data/${dset}_nosplit_fix/ + awk -F "_" '{print $0 "_" $3}' data/${dset}_nosplit/utt2spk > data/${dset}_nosplit_fix/utt2spk + utils/utt2spk_to_spk2utt.pl data/${dset}_nosplit_fix/utt2spk > data/${dset}_nosplit_fix/spk2utt + done + + # Split speakers up into 3-minute chunks. This doesn't hurt adaptation, and + # lets us use more jobs for decoding etc. + for dset in ${train_set} dev_worn; do + utils/copy_data_dir.sh data/${dset} data/${dset}_nosplit + utils/data/modify_speaker_info.sh --seconds-per-spk-max 180 data/${dset}_nosplit data/${dset} + done + for dset in dev_${enhancement}_dereverb_ref eval_${enhancement}_dereverb_ref; do + utils/data/modify_speaker_info.sh --seconds-per-spk-max 180 data/${dset}_nosplit_fix data/${dset} + done +fi + +if [ $stage -le 8 ]; then + # Now make MFCC features. + # mfccdir should be some place with a largish disk where you + # want to store MFCC features. + mfccdir=mfcc + for x in ${train_set} ${test_sets}; do + steps/make_mfcc.sh --nj 20 --cmd "$train_cmd" \ + data/$x exp/make_mfcc/$x $mfccdir + steps/compute_cmvn_stats.sh data/$x exp/make_mfcc/$x $mfccdir + utils/fix_data_dir.sh data/$x + done +fi + +if [ $stage -le 9 ]; then + # make a subset for monophone training + utils/subset_data_dir.sh --shortest data/${train_set} 100000 data/${train_set}_100kshort + utils/subset_data_dir.sh data/${train_set}_100kshort 30000 data/${train_set}_30kshort +fi + +if [ $stage -le 10 ]; then + # Starting basic training on MFCC features + steps/train_mono.sh --nj $nj --cmd "$train_cmd" \ + data/${train_set}_30kshort data/lang exp/mono +fi + +if [ $stage -le 11 ]; then + steps/align_si.sh --nj $nj --cmd "$train_cmd" \ + data/${train_set} data/lang exp/mono exp/mono_ali + + steps/train_deltas.sh --cmd "$train_cmd" \ + 2500 30000 data/${train_set} data/lang exp/mono_ali exp/tri1 +fi + +if [ $stage -le 12 ]; then + steps/align_si.sh --nj $nj --cmd "$train_cmd" \ + data/${train_set} data/lang exp/tri1 exp/tri1_ali + + steps/train_lda_mllt.sh --cmd "$train_cmd" \ + 4000 50000 data/${train_set} data/lang exp/tri1_ali exp/tri2 +fi + +if [ $stage -le 13 ]; then + utils/mkgraph.sh data/lang exp/tri2 exp/tri2/graph + for dset in ${test_sets}; do + steps/decode.sh --nj $decode_nj --cmd "$decode_cmd" --num-threads 4 \ + exp/tri2/graph data/${dset} exp/tri2/decode_${dset} & + done + wait +fi + +if [ $stage -le 14 ]; then + steps/align_si.sh --nj $nj --cmd "$train_cmd" \ + data/${train_set} data/lang exp/tri2 exp/tri2_ali + + steps/train_sat.sh --cmd "$train_cmd" \ + 5000 100000 data/${train_set} data/lang exp/tri2_ali exp/tri3 +fi + +if [ $stage -le 15 ]; then + utils/mkgraph.sh data/lang exp/tri3 exp/tri3/graph + for dset in ${test_sets}; do + steps/decode_fmllr.sh --nj $decode_nj --cmd "$decode_cmd" --num-threads 4 \ + exp/tri3/graph data/${dset} exp/tri3/decode_${dset} & + done + wait +fi + +if [ $stage -le 16 ]; then + # The following script cleans the data and produces cleaned data + steps/cleanup/clean_and_segment_data.sh --nj ${nj} --cmd "$train_cmd" \ + --segmentation-opts "--min-segment-length 0.3 --min-new-segment-length 0.6" \ + data/${train_set} data/lang exp/tri3 exp/tri3_cleaned data/${train_set}_cleaned +fi + +if [ $stage -le 17 ]; then + # chain TDNN + local/chain/tuning/run_tdnn_1b.sh --nj ${nj} \ + --stage $nnet_stage \ + --train-set ${train_set}_cleaned \ + --test-sets "$test_sets" \ + --gmm tri3_cleaned --nnet3-affix _${train_set}_cleaned_rvb +fi + +if [ $stage -le 18 ]; then + # 2-stage decoding + for test_set in $test_sets; do + local/nnet3/decode.sh --affix 2stage --pass2-decode-opts "--min-active 1000" \ + --acwt 1.0 --post-decode-acwt 10.0 \ + --frames-per-chunk 150 --nj $decode_nj \ + --ivector-dir exp/nnet3_${train_set}_cleaned_rvb \ + data/${test_set} data/lang_chain \ + exp/chain_${train_set}_cleaned_rvb/tree_sp/graph \ + exp/chain_${train_set}_cleaned_rvb/tdnn1b_sp + done +fi + +if [ $stage -le 19 ]; then + # final scoring to get the official challenge result + # please specify both dev and eval set directories so that the search parameters + # (insertion penalty and language model weight) will be tuned using the dev set + local/score_for_submit.sh \ + --dev exp/chain_${train_set}_cleaned_rvb/tdnn1b_sp/decode_dev_${enhancement}_dereverb_ref \ + --eval exp/chain_${train_set}_cleaned_rvb/tdnn1b_sp/decode_eval_${enhancement}_dereverb_ref +fi diff --git a/egs/chime5/s5b/steps b/egs/chime5/s5b/steps new file mode 120000 index 00000000000..1b186770dd1 --- /dev/null +++ b/egs/chime5/s5b/steps @@ -0,0 +1 @@ +../../wsj/s5/steps/ \ No newline at end of file diff --git a/egs/chime5/s5b/utils b/egs/chime5/s5b/utils new file mode 120000 index 00000000000..a3279dc8679 --- /dev/null +++ b/egs/chime5/s5b/utils @@ -0,0 +1 @@ +../../wsj/s5/utils/ \ No newline at end of file diff --git a/egs/wsj/s5/steps/conf/get_ctm_conf.sh b/egs/wsj/s5/steps/conf/get_ctm_conf.sh index 8dbc9f449cd..5ce39b1ddb6 100755 --- a/egs/wsj/s5/steps/conf/get_ctm_conf.sh +++ b/egs/wsj/s5/steps/conf/get_ctm_conf.sh @@ -2,7 +2,8 @@ # Copyright Johns Hopkins University (Author: Daniel Povey) 2012. Apache 2.0. # This script produces CTM files from a decoding directory that has lattices -# present. This version gives you confidence scores. See also steps/get_ctm.sh +# present. This version gives you confidence scores using MBR decoding. +# See also steps/get_ctm.sh # begin configuration section. @@ -13,6 +14,7 @@ max_lmwt=20 use_segments=true # if we have a segments file, use it to convert # the segments to be relative to the original files. iter=final +beam=5 # pruning beam before MBR decoding #end configuration section. echo "$0 $@" # Print the command line for logging @@ -21,6 +23,8 @@ echo "$0 $@" # Print the command line for logging . parse_options.sh || exit 1; if [ $# -ne 3 ]; then + echo "This script produces CTM files from a decoding directory that has lattices " + echo "present. This version gives you confidence scores using MBR decoding." echo "Usage: $0 [options] " echo " Options:" echo " --cmd (run.pl|queue.pl...) # specify how to run the sub-processes." @@ -50,6 +54,7 @@ name=`basename $data`; # e.g. eval2000 mkdir -p $dir/scoring/log +frame_shift_opt= if [ -f $dir/../frame_shift ]; then frame_shift_opt="--frame-shift=$(cat $dir/../frame_shift)" echo "$0: $dir/../frame_shift exists, using $frame_shift_opt" @@ -68,10 +73,12 @@ if [ $stage -le 0 ]; then filter_cmd=cat fi + nj=$(cat $dir/num_jobs) + lats=$(for n in $(seq $nj); do echo -n "$dir/lat.$n.gz "; done) if [ -f $lang/phones/word_boundary.int ]; then $cmd LMWT=$min_lmwt:$max_lmwt $dir/scoring/log/get_ctm.LMWT.log \ - mkdir -p $dir/score_LMWT/ '&&' \ - lattice-prune --inv-acoustic-scale=LMWT --beam=5 "ark:gunzip -c $dir/lat.*.gz|" ark:- \| \ + set -o pipefail '&&' mkdir -p $dir/score_LMWT/ '&&' \ + lattice-prune --inv-acoustic-scale=LMWT --beam=$beam "ark:gunzip -c $lats|" ark:- \| \ lattice-align-words $lang/phones/word_boundary.int $model ark:- ark:- \| \ lattice-to-ctm-conf $frame_shift_opt --decode-mbr=true --inv-acoustic-scale=LMWT ark:- - \| \ utils/int2sym.pl -f 5 $lang/words.txt \| \ @@ -82,8 +89,8 @@ if [ $stage -le 0 ]; then exit 1; fi $cmd LMWT=$min_lmwt:$max_lmwt $dir/scoring/log/get_ctm.LMWT.log \ - mkdir -p $dir/score_LMWT/ '&&' \ - lattice-prune --inv-acoustic-scale=LMWT --beam=5 "ark:gunzip -c $dir/lat.*.gz|" ark:- \| \ + set -o pipefail '&&' mkdir -p $dir/score_LMWT/ '&&' \ + lattice-prune --inv-acoustic-scale=LMWT --beam=$beam "ark:gunzip -c $lats|" ark:- \| \ lattice-align-words-lexicon $lang/phones/align_lexicon.int $model ark:- ark:- \| \ lattice-to-ctm-conf $frame_shift_opt --decode-mbr=true --inv-acoustic-scale=LMWT ark:- - \| \ utils/int2sym.pl -f 5 $lang/words.txt \| \ diff --git a/egs/wsj/s5/steps/online/nnet2/extract_ivectors.sh b/egs/wsj/s5/steps/online/nnet2/extract_ivectors.sh index a423be7aa20..858dd4b6730 100755 --- a/egs/wsj/s5/steps/online/nnet2/extract_ivectors.sh +++ b/egs/wsj/s5/steps/online/nnet2/extract_ivectors.sh @@ -64,6 +64,10 @@ if [ -f path.sh ]; then . ./path.sh; fi if [ $# != 4 ] && [ $# != 5 ]; then echo "Usage: $0 [options] [||] " echo " e.g.: $0 data/test data/lang exp/nnet2_online/extractor exp/tri3/decode_test exp/nnet2_online/ivectors_test" + echo "If is provided, it is converted to frame-weights " + echo "giving silence frames a weight of --silence-weight (default: 0.0). " + echo "If is provided, it must be a single archive file compressed " + echo "(using gunzip) containing per-frame weights for each utterance." echo "main options (for others, see top of script file)" echo " --config # config containing options" echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." @@ -90,7 +94,7 @@ else # 5 arguments data=$1 lang=$2 srcdir=$3 - ali_or_decode_dir=$4 + ali_or_decode_dir_or_weights=$4 dir=$5 fi @@ -102,23 +106,23 @@ done mkdir -p $dir/log silphonelist=$(cat $lang/phones/silence.csl) || exit 1; -if [ ! -z "$ali_or_decode_dir" ]; then +if [ ! -z "$ali_or_decode_dir_or_weights" ]; then - if [ -f $ali_or_decode_dir/ali.1.gz ]; then - if [ ! -f $ali_or_decode_dir/${mdl}.mdl ]; then - echo "$0: expected $ali_or_decode_dir/${mdl}.mdl to exist." + if [ -f $ali_or_decode_dir_or_weights/ali.1.gz ]; then + if [ ! -f $ali_or_decode_dir_or_weights/${mdl}.mdl ]; then + echo "$0: expected $ali_or_decode_dir_or_weights/${mdl}.mdl to exist." exit 1; fi - nj_orig=$(cat $ali_or_decode_dir/num_jobs) || exit 1; + nj_orig=$(cat $ali_or_decode_dir_or_weights/num_jobs) || exit 1; if [ $stage -le 0 ]; then rm $dir/weights.*.gz 2>/dev/null $cmd JOB=1:$nj_orig $dir/log/ali_to_post.JOB.log \ - gunzip -c $ali_or_decode_dir/ali.JOB.gz \| \ + gunzip -c $ali_or_decode_dir_or_weights/ali.JOB.gz \| \ ali-to-post ark:- ark:- \| \ - weight-silence-post $silence_weight $silphonelist $ali_or_decode_dir/final.mdl ark:- ark:- \| \ + weight-silence-post $silence_weight $silphonelist $ali_or_decode_dir_or_weights/final.mdl ark:- ark:- \| \ post-to-weights ark:- "ark:|gzip -c >$dir/weights.JOB.gz" || exit 1; # put all the weights in one archive. @@ -126,10 +130,10 @@ if [ ! -z "$ali_or_decode_dir" ]; then rm $dir/weights.*.gz || exit 1; fi - elif [ -f $ali_or_decode_dir/lat.1.gz ]; then - nj_orig=$(cat $ali_or_decode_dir/num_jobs) || exit 1; - if [ ! -f $ali_or_decode_dir/../${mdl}.mdl ]; then - echo "$0: expected $ali_or_decode_dir/../${mdl}.mdl to exist." + elif [ -f $ali_or_decode_dir_or_weights/lat.1.gz ]; then + nj_orig=$(cat $ali_or_decode_dir_or_weights/num_jobs) || exit 1; + if [ ! -f $ali_or_decode_dir_or_weights/../${mdl}.mdl ]; then + echo "$0: expected $ali_or_decode_dir_or_weights/../${mdl}.mdl to exist." exit 1; fi @@ -138,19 +142,19 @@ if [ ! -z "$ali_or_decode_dir" ]; then rm $dir/weights.*.gz 2>/dev/null $cmd JOB=1:$nj_orig $dir/log/lat_to_post.JOB.log \ - lattice-best-path --acoustic-scale=$acwt "ark:gunzip -c $ali_or_decode_dir/lat.JOB.gz|" ark:/dev/null ark:- \| \ + lattice-best-path --acoustic-scale=$acwt "ark:gunzip -c $ali_or_decode_dir_or_weights/lat.JOB.gz|" ark:/dev/null ark:- \| \ ali-to-post ark:- ark:- \| \ - weight-silence-post $silence_weight $silphonelist $ali_or_decode_dir/../${mdl}.mdl ark:- ark:- \| \ + weight-silence-post $silence_weight $silphonelist $ali_or_decode_dir_or_weights/../${mdl}.mdl ark:- ark:- \| \ post-to-weights ark:- "ark:|gzip -c >$dir/weights.JOB.gz" || exit 1; # put all the weights in one archive. for j in $(seq $nj_orig); do gunzip -c $dir/weights.$j.gz; done | gzip -c >$dir/weights.gz || exit 1; rm $dir/weights.*.gz || exit 1; fi - elif [ -f $ali_or_decode_dir ] && gunzip -c $ali_or_decode_dir >/dev/null; then - cp $ali_or_decode_dir $dir/weights.gz || exit 1; + elif [ -f $ali_or_decode_dir_or_weights ] && gunzip -c $ali_or_decode_dir_or_weights >/dev/null; then + cp $ali_or_decode_dir_or_weights $dir/weights.gz || exit 1; else - echo "$0: expected ali.1.gz or lat.1.gz to exist in $ali_or_decode_dir"; + echo "$0: expected ali.1.gz or lat.1.gz to exist in $ali_or_decode_dir_or_weights"; exit 1; fi fi @@ -169,7 +173,7 @@ if [ $sub_speaker_frames -gt 0 ]; then if [ $stage -le 1 ]; then # We work out 'fake' spk2utt files that possibly split each speaker into multiple pieces. - if [ ! -z "$ali_or_decode_dir" ]; then + if [ ! -z "$ali_or_decode_dir_or_weights" ]; then gunzip -c $dir/weights.gz | copy-vector ark:- ark,t:- | \ awk '{ sum=0; for (n=3;n $dir/utt_counts || exit 1; else @@ -230,7 +234,7 @@ else fi if [ $stage -le 2 ]; then - if [ ! -z "$ali_or_decode_dir" ]; then + if [ ! -z "$ali_or_decode_dir_or_weights" ]; then $cmd --num-threads $num_threads JOB=1:$nj $dir/log/extract_ivectors.JOB.log \ gmm-global-get-post --n=$num_gselect --min-post=$min_post $srcdir/final.dubm "$gmm_feats" ark:- \| \ weight-post ark:- "ark,s,cs:gunzip -c $dir/weights.gz|" ark:- \| \ diff --git a/egs/wsj/s5/utils/perturb_data_dir_speed.sh b/egs/wsj/s5/utils/perturb_data_dir_speed.sh index 99c9cbdb1f0..924ebdc3473 100755 --- a/egs/wsj/s5/utils/perturb_data_dir_speed.sh +++ b/egs/wsj/s5/utils/perturb_data_dir_speed.sh @@ -73,7 +73,7 @@ if [ -f $srcdir/segments ]; then utils/apply_map.pl -f 1 $destdir/utt_map <$srcdir/segments | \ utils/apply_map.pl -f 2 $destdir/reco_map | \ awk -v factor=$factor \ - '{printf("%s %s %.2f %.2f\n", $1, $2, $3/factor, $4/factor);}' >$destdir/segments + '{s=$3/factor; e=$4/factor; if (e > s + 0.01) { printf("%s %s %.2f %.2f\n", $1, $2, $3/factor, $4/factor);} }' >$destdir/segments utils/apply_map.pl -f 1 $destdir/reco_map <$srcdir/wav.scp | sed 's/| *$/ |/' | \ # Handle three cases of rxfilenames appropriately; "input piped command", "file offset" and "filename" diff --git a/egs/wsj/s5/utils/subset_data_dir.sh b/egs/wsj/s5/utils/subset_data_dir.sh index 93ee0971b88..4cd3f9b7711 100755 --- a/egs/wsj/s5/utils/subset_data_dir.sh +++ b/egs/wsj/s5/utils/subset_data_dir.sh @@ -123,6 +123,8 @@ function do_filtering { [ -f $srcdir/wav.scp ] && utils/filter_scp.pl $destdir/reco <$srcdir/wav.scp >$destdir/wav.scp [ -f $srcdir/reco2file_and_channel ] && \ utils/filter_scp.pl $destdir/reco <$srcdir/reco2file_and_channel >$destdir/reco2file_and_channel + [ -f $srcdir/reco2dur ] && \ + utils/filter_scp.pl $destdir/reco <$srcdir/reco2dur >$destdir/reco2dur # Filter the STM file for proper sclite scoring # Copy over the comments from STM file @@ -134,6 +136,8 @@ function do_filtering { awk '{print $1;}' $destdir/wav.scp | sort | uniq > $destdir/reco [ -f $srcdir/reco2file_and_channel ] && \ utils/filter_scp.pl $destdir/reco <$srcdir/reco2file_and_channel >$destdir/reco2file_and_channel + [ -f $srcdir/reco2dur ] && \ + utils/filter_scp.pl $destdir/reco <$srcdir/reco2dur >$destdir/reco2dur rm $destdir/reco fi