diff --git a/egs/wsj/s5/steps/libs/nnet3/train/common.py b/egs/wsj/s5/steps/libs/nnet3/train/common.py index 4c6a37fb837..b20c64ab9ba 100644 --- a/egs/wsj/s5/steps/libs/nnet3/train/common.py +++ b/egs/wsj/s5/steps/libs/nnet3/train/common.py @@ -941,9 +941,10 @@ def __init__(self, action=common_lib.NullstrToNoneAction, help="Script to launch egs jobs") self.parser.add_argument("--use-gpu", type=str, - action=common_lib.StrToBoolAction, - choices=["true", "false"], - help="Use GPU for training", default=True) + choices=["true", "false", "yes", "no", "wait"], + help="Use GPU for training. " + "Note 'true' and 'false' are deprecated.", + default="yes") self.parser.add_argument("--cleanup", type=str, action=common_lib.StrToBoolAction, choices=["true", "false"], default=True, diff --git a/egs/wsj/s5/steps/nnet3/chain/e2e/train_e2e.py b/egs/wsj/s5/steps/nnet3/chain/e2e/train_e2e.py index 20d9c73eaf0..99f622d79a7 100755 --- a/egs/wsj/s5/steps/nnet3/chain/e2e/train_e2e.py +++ b/egs/wsj/s5/steps/nnet3/chain/e2e/train_e2e.py @@ -210,7 +210,9 @@ def process_args(args): # set the options corresponding to args.use_gpu run_opts = common_train_lib.RunOpts() - if args.use_gpu: + if args.use_gpu in ["true", "false"]: + args.use_gpu = ("yes" if args.use_gpu == "true" else "no") + if args.use_gpu in ["yes", "wait"]: if not common_lib.check_if_cuda_compiled(): logger.warning( """You are running with one thread but you have not compiled @@ -219,10 +221,9 @@ def process_args(args): ./configure; make""") run_opts.train_queue_opt = "--gpu 1" - run_opts.parallel_train_opts = "" + run_opts.parallel_train_opts = "--use-gpu={}".format(args.use_gpu) run_opts.combine_queue_opt = "--gpu 1" - run_opts.combine_gpu_opt = "" - + run_opts.combine_gpu_opt = "--use-gpu={}".format(args.use_gpu) else: logger.warning("Without using a GPU this will be very slow. " diff --git a/egs/wsj/s5/steps/nnet3/chain/train.py b/egs/wsj/s5/steps/nnet3/chain/train.py index 613b70fd192..6a68d9ecb6e 100755 --- a/egs/wsj/s5/steps/nnet3/chain/train.py +++ b/egs/wsj/s5/steps/nnet3/chain/train.py @@ -228,7 +228,9 @@ def process_args(args): args.transform_dir = args.lat_dir # set the options corresponding to args.use_gpu run_opts = common_train_lib.RunOpts() - if args.use_gpu: + if args.use_gpu in ["true", "false"]: + args.use_gpu = ("yes" if args.use_gpu == "true" else "no") + if args.use_gpu in ["yes", "wait"]: if not common_lib.check_if_cuda_compiled(): logger.warning( """You are running with one thread but you have not compiled @@ -237,9 +239,9 @@ def process_args(args): ./configure; make""") run_opts.train_queue_opt = "--gpu 1" - run_opts.parallel_train_opts = "" + run_opts.parallel_train_opts = "--use-gpu={}".format(args.use_gpu) run_opts.combine_queue_opt = "--gpu 1" - run_opts.combine_gpu_opt = "" + run_opts.combine_gpu_opt = "--use-gpu={}".format(args.use_gpu) else: logger.warning("Without using a GPU this will be very slow. " diff --git a/egs/wsj/s5/steps/nnet3/train_dnn.py b/egs/wsj/s5/steps/nnet3/train_dnn.py index 2cb314cca61..dd1c97b350d 100755 --- a/egs/wsj/s5/steps/nnet3/train_dnn.py +++ b/egs/wsj/s5/steps/nnet3/train_dnn.py @@ -118,7 +118,9 @@ def process_args(args): # set the options corresponding to args.use_gpu run_opts = common_train_lib.RunOpts() - if args.use_gpu: + if args.use_gpu in ["true", "false"]: + args.use_gpu = ("yes" if args.use_gpu == "true" else "no") + if args.use_gpu in ["yes", "wait"]: if not common_lib.check_if_cuda_compiled(): logger.warning( """You are running with one thread but you have not compiled @@ -127,11 +129,12 @@ def process_args(args): ./configure; make""") run_opts.train_queue_opt = "--gpu 1" - run_opts.parallel_train_opts = "" - run_opts.combine_gpu_opt = "" + run_opts.parallel_train_opts = "--use-gpu={}".format(args.use_gpu) + run_opts.combine_gpu_opt = "--use-gpu={}".format(args.use_gpu) run_opts.combine_queue_opt = "--gpu 1" - run_opts.prior_gpu_opt = "--use-gpu=yes" + run_opts.prior_gpu_opt = "--use-gpu={}".format(args.use_gpu) run_opts.prior_queue_opt = "--gpu 1" + else: logger.warning("Without using a GPU this will be very slow. " "nnet3 does not yet support multiple threads.") diff --git a/egs/wsj/s5/steps/nnet3/train_raw_dnn.py b/egs/wsj/s5/steps/nnet3/train_raw_dnn.py index 14922247cd3..0e787b0b647 100755 --- a/egs/wsj/s5/steps/nnet3/train_raw_dnn.py +++ b/egs/wsj/s5/steps/nnet3/train_raw_dnn.py @@ -126,7 +126,9 @@ def process_args(args): # set the options corresponding to args.use_gpu run_opts = common_train_lib.RunOpts() - if args.use_gpu: + if args.use_gpu in ["true", "false"]: + args.use_gpu = ("yes" if args.use_gpu == "true" else "no") + if args.use_gpu in ["yes", "wait"]: if not common_lib.check_if_cuda_compiled(): logger.warning( """You are running with one thread but you have not compiled @@ -135,10 +137,10 @@ def process_args(args): ./configure; make""") run_opts.train_queue_opt = "--gpu 1" - run_opts.parallel_train_opts = "" - run_opts.combine_gpu_opt = "" + run_opts.parallel_train_opts = "--use-gpu={}".format(args.use_gpu) + run_opts.combine_gpu_opt = "--use-gpu={}".format(args.use_gpu) run_opts.combine_queue_opt = "--gpu 1" - run_opts.prior_gpu_opt = "--use-gpu=yes" + run_opts.prior_gpu_opt = "--use-gpu={}".format(args.use_gpu) run_opts.prior_queue_opt = "--gpu 1" else: diff --git a/egs/wsj/s5/steps/nnet3/train_raw_rnn.py b/egs/wsj/s5/steps/nnet3/train_raw_rnn.py index 4623756caba..bd94fb7cb94 100755 --- a/egs/wsj/s5/steps/nnet3/train_raw_rnn.py +++ b/egs/wsj/s5/steps/nnet3/train_raw_rnn.py @@ -179,7 +179,9 @@ def process_args(args): # set the options corresponding to args.use_gpu run_opts = common_train_lib.RunOpts() - if args.use_gpu: + if args.use_gpu in ["true", "false"]: + args.use_gpu = ("yes" if args.use_gpu == "true" else "no") + if args.use_gpu in ["yes", "wait"]: if not common_lib.check_if_cuda_compiled(): logger.warning( """You are running with one thread but you have not compiled @@ -188,10 +190,10 @@ def process_args(args): ./configure; make""") run_opts.train_queue_opt = "--gpu 1" - run_opts.parallel_train_opts = "" - run_opts.combine_gpu_opt = "" + run_opts.parallel_train_opts = "--use-gpu={}".format(args.use_gpu) + run_opts.combine_gpu_opt = "--use-gpu={}".format(args.use_gpu) run_opts.combine_queue_opt = "--gpu 1" - run_opts.prior_gpu_opt = "--use-gpu=yes" + run_opts.prior_gpu_opt = "--use-gpu={}".format(args.use_gpu) run_opts.prior_queue_opt = "--gpu 1" else: diff --git a/egs/wsj/s5/steps/nnet3/train_rnn.py b/egs/wsj/s5/steps/nnet3/train_rnn.py index fd74e5c9f44..83a1da8eca1 100755 --- a/egs/wsj/s5/steps/nnet3/train_rnn.py +++ b/egs/wsj/s5/steps/nnet3/train_rnn.py @@ -173,7 +173,9 @@ def process_args(args): # set the options corresponding to args.use_gpu run_opts = common_train_lib.RunOpts() - if args.use_gpu: + if args.use_gpu in ["true", "false"]: + args.use_gpu = ("yes" if args.use_gpu == "true" else "no") + if args.use_gpu in ["yes", "wait"]: if not common_lib.check_if_cuda_compiled(): logger.warning( """You are running with one thread but you have not compiled @@ -182,10 +184,10 @@ def process_args(args): ./configure; make""") run_opts.train_queue_opt = "--gpu 1" - run_opts.parallel_train_opts = "" - run_opts.combine_gpu_opt = "" + run_opts.parallel_train_opts = "--use-gpu={}".format(args.use_gpu) + run_opts.combine_gpu_opt = "--use-gpu={}".format(args.use_gpu) run_opts.combine_queue_opt = "--gpu 1" - run_opts.prior_gpu_opt = "--use-gpu=yes" + run_opts.prior_gpu_opt = "--use-gpu={}".format(args.use_gpu) run_opts.prior_queue_opt = "--gpu 1" else: