diff --git a/examples/seq2seq/test_bash_script.py b/examples/seq2seq/test_bash_script.py index 9e4ffc221714..53922f2b645b 100644 --- a/examples/seq2seq/test_bash_script.py +++ b/examples/seq2seq/test_bash_script.py @@ -13,7 +13,7 @@ from finetune import SummarizationModule, main from transformers import MarianMTModel from transformers.file_utils import cached_path -from transformers.testing_utils import TestCasePlus, require_torch_gpu, require_torch_non_multi_gpu_but_fix_me, slow +from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow from utils import load_json @@ -32,7 +32,6 @@ def setUp(self): @slow @require_torch_gpu - @require_torch_non_multi_gpu_but_fix_me def test_model_download(self): """This warms up the cache so that we can time the next test without including download time, which varies between machines.""" MarianMTModel.from_pretrained(MARIAN_MODEL) @@ -40,7 +39,6 @@ def test_model_download(self): # @timeout_decorator.timeout(1200) @slow @require_torch_gpu - @require_torch_non_multi_gpu_but_fix_me def test_train_mbart_cc25_enro_script(self): env_vars_to_replace = { "$MAX_LEN": 64, @@ -129,7 +127,6 @@ class TestDistilMarianNoTeacher(TestCasePlus): @timeout_decorator.timeout(600) @slow @require_torch_gpu - @require_torch_non_multi_gpu_but_fix_me def test_opus_mt_distill_script(self): data_dir = f"{self.test_file_dir_str}/test_data/wmt_en_ro" env_vars_to_replace = { diff --git a/examples/seq2seq/test_fsmt_bleu_score.py b/examples/seq2seq/test_fsmt_bleu_score.py index 2d5df03c9c17..beb7f2bc9857 100644 --- a/examples/seq2seq/test_fsmt_bleu_score.py +++ b/examples/seq2seq/test_fsmt_bleu_score.py @@ -19,13 +19,7 @@ from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer -from transformers.testing_utils import ( - get_tests_dir, - require_torch, - require_torch_non_multi_gpu_but_fix_me, - slow, - torch_device, -) +from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu @@ -54,7 +48,6 @@ def get_model(self, mname): ] ) @slow - @require_torch_non_multi_gpu_but_fix_me def test_bleu_scores(self, pair, min_bleu_score): # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality diff --git a/examples/seq2seq/test_seq2seq_examples.py b/examples/seq2seq/test_seq2seq_examples.py index 497c1942ed14..4793aeba759a 100644 --- a/examples/seq2seq/test_seq2seq_examples.py +++ b/examples/seq2seq/test_seq2seq_examples.py @@ -19,14 +19,7 @@ from run_eval_search import run_search from transformers import AutoConfig, AutoModelForSeq2SeqLM from transformers.hf_api import HfApi -from transformers.testing_utils import ( - CaptureStderr, - CaptureStdout, - TestCasePlus, - require_torch_gpu, - require_torch_non_multi_gpu_but_fix_me, - slow, -) +from transformers.testing_utils import CaptureStderr, CaptureStdout, TestCasePlus, require_torch_gpu, slow from utils import ROUGE_KEYS, label_smoothed_nll_loss, lmap, load_json @@ -135,7 +128,6 @@ def setUpClass(cls): @slow @require_torch_gpu - @require_torch_non_multi_gpu_but_fix_me def test_hub_configs(self): """I put require_torch_gpu cause I only want this to run with self-scheduled.""" @@ -153,12 +145,10 @@ def test_hub_configs(self): failures.append(m) assert not failures, f"The following models could not be loaded through AutoConfig: {failures}" - @require_torch_non_multi_gpu_but_fix_me def test_distill_no_teacher(self): updates = dict(student_encoder_layers=2, student_decoder_layers=1, no_teacher=True) self._test_distiller_cli(updates) - @require_torch_non_multi_gpu_but_fix_me def test_distill_checkpointing_with_teacher(self): updates = dict( student_encoder_layers=2, @@ -183,7 +173,6 @@ def test_distill_checkpointing_with_teacher(self): convert_pl_to_hf(ckpts[0], transformer_ckpts[0].parent, out_path_new) assert os.path.exists(os.path.join(out_path_new, "pytorch_model.bin")) - @require_torch_non_multi_gpu_but_fix_me def test_loss_fn(self): model = AutoModelForSeq2SeqLM.from_pretrained(BART_TINY) input_ids, mask = model.dummy_inputs["input_ids"], model.dummy_inputs["attention_mask"] @@ -204,7 +193,6 @@ def test_loss_fn(self): # TODO: understand why this breaks self.assertEqual(nll_loss, model_computed_loss) - @require_torch_non_multi_gpu_but_fix_me def test_distill_mbart(self): updates = dict( student_encoder_layers=2, @@ -229,7 +217,6 @@ def test_distill_mbart(self): assert len(all_files) > 2 self.assertEqual(len(transformer_ckpts), 2) - @require_torch_non_multi_gpu_but_fix_me def test_distill_t5(self): updates = dict( student_encoder_layers=1, @@ -241,7 +228,6 @@ def test_distill_t5(self): ) self._test_distiller_cli(updates) - @require_torch_non_multi_gpu_but_fix_me def test_distill_different_base_models(self): updates = dict( teacher=T5_TINY, @@ -321,21 +307,18 @@ def run_eval_tester(self, model): # test one model to quickly (no-@slow) catch simple problems and do an # extensive testing of functionality with multiple models as @slow separately - @require_torch_non_multi_gpu_but_fix_me def test_run_eval(self): self.run_eval_tester(T5_TINY) # any extra models should go into the list here - can be slow @parameterized.expand([BART_TINY, MBART_TINY]) @slow - @require_torch_non_multi_gpu_but_fix_me def test_run_eval_slow(self, model): self.run_eval_tester(model) # testing with 2 models to validate: 1. translation (t5) 2. summarization (mbart) @parameterized.expand([T5_TINY, MBART_TINY]) @slow - @require_torch_non_multi_gpu_but_fix_me def test_run_eval_search(self, model): input_file_name = Path(self.get_auto_remove_tmp_dir()) / "utest_input.source" output_file_name = input_file_name.parent / "utest_output.txt" @@ -386,7 +369,6 @@ def test_run_eval_search(self, model): @parameterized.expand( [T5_TINY, BART_TINY, MBART_TINY, MARIAN_TINY, FSMT_TINY], ) - @require_torch_non_multi_gpu_but_fix_me def test_finetune(self, model): args_d: dict = CHEAP_ARGS.copy() task = "translation" if model in [MBART_TINY, MARIAN_TINY, FSMT_TINY] else "summarization" @@ -438,7 +420,6 @@ def test_finetune(self, model): assert isinstance(example_batch, dict) assert len(example_batch) >= 4 - @require_torch_non_multi_gpu_but_fix_me def test_finetune_extra_model_args(self): args_d: dict = CHEAP_ARGS.copy() @@ -489,7 +470,6 @@ def test_finetune_extra_model_args(self): model = main(args) assert str(excinfo.value) == f"model config doesn't have a `{unsupported_param}` attribute" - @require_torch_non_multi_gpu_but_fix_me def test_finetune_lr_schedulers(self): args_d: dict = CHEAP_ARGS.copy()