diff --git a/examples/pytorch/text-classification/run_glue.py b/examples/pytorch/text-classification/run_glue.py index 92c4d2b37936..5f7ba4ca48c3 100755 --- a/examples/pytorch/text-classification/run_glue.py +++ b/examples/pytorch/text-classification/run_glue.py @@ -519,11 +519,11 @@ def compute_metrics(p: EvalPrediction): if task == "mnli-mm": metrics = {k + "_mm": v for k, v in metrics.items()} - if "mnli" in task: + if task is not None and "mnli" in task: combined.update(metrics) trainer.log_metrics("eval", metrics) - trainer.save_metrics("eval", combined if "mnli" in task else metrics) + trainer.save_metrics("eval", combined if task is not None and "mnli" in task else metrics) if training_args.do_predict: logger.info("*** Predict ***") diff --git a/utils/tests_fetcher.py b/utils/tests_fetcher.py index dae4fa421845..5ad7b4b1f788 100644 --- a/utils/tests_fetcher.py +++ b/utils/tests_fetcher.py @@ -465,9 +465,9 @@ def infer_tests_to_run(output_file, diff_with_last_commit=False, filters=None): test_files_to_run.append(f) # Example files are tested separately elif f.startswith("examples/pytorch"): - test_files_to_run.append("examples/pytorch/test_examples.py") + test_files_to_run.append("examples/pytorch/test_pytorch_examples.py") elif f.startswith("examples/flax"): - test_files_to_run.append("examples/flax/test_examples.py") + test_files_to_run.append("examples/flax/test_flax_examples.py") else: new_tests = module_to_test_file(f) if new_tests is not None: