|
15 | 15 |
|
16 | 16 | import gc |
17 | 17 | import logging |
| 18 | +import os |
18 | 19 | import unittest |
19 | 20 |
|
20 | 21 | import pytest |
| 22 | +import torchao |
21 | 23 | from executorch.extension.pybindings.portable_lib import ExecuTorchModule |
| 24 | +from packaging.version import parse |
22 | 25 | from transformers import AutoConfig, AutoTokenizer |
23 | 26 | from transformers.testing_utils import slow |
24 | 27 |
|
|
27 | 30 | from ..utils import check_causal_lm_output_quality |
28 | 31 |
|
29 | 32 |
|
30 | | -@pytest.mark.skip(reason="Test Phi-4-mini (3.8B) will require runner to be configured with larger RAM") |
| 33 | +os.environ["TOKENIZERS_PARALLELISM"] = "false" |
| 34 | + |
| 35 | +is_ci = os.environ.get("GITHUB_ACTIONS") == "true" |
| 36 | + |
| 37 | + |
31 | 38 | class ExecuTorchModelIntegrationTest(unittest.TestCase): |
32 | 39 | def __init__(self, *args, **kwargs): |
33 | 40 | super().__init__(*args, **kwargs) |
34 | 41 |
|
35 | 42 | @slow |
36 | 43 | @pytest.mark.run_slow |
| 44 | + @pytest.mark.skip(is_ci, reason="Test Phi-4-mini (3.8B) will require runner to be configured with larger RAM") |
37 | 45 | def test_phi4_text_generation(self): |
38 | 46 | model_id = "microsoft/Phi-4-mini-instruct" |
39 | 47 | config = AutoConfig.from_pretrained(model_id) |
@@ -61,3 +69,92 @@ def test_phi4_text_generation(self): |
61 | 69 | gc.collect() |
62 | 70 |
|
63 | 71 | self.assertTrue(check_causal_lm_output_quality(model_id, generated_tokens)) |
| 72 | + |
| 73 | + @slow |
| 74 | + @pytest.mark.run_slow |
| 75 | + @pytest.mark.skipif( |
| 76 | + parse(torchao.__version__) < parse("0.11.0.dev0"), |
| 77 | + reason="Only available on torchao >= 0.11.0.dev0", |
| 78 | + ) |
| 79 | + def test_phi4_text_generation_with_quantized_pte_from_hub(self): |
| 80 | + model_id = "pytorch/Phi-4-mini-instruct-8da4w" |
| 81 | + config = AutoConfig.from_pretrained(model_id) |
| 82 | + # NOTE: To make the model exportable we need to set the rope scaling to default to avoid hitting |
| 83 | + # the data-dependent control flow in _longrope_frequency_update. Alternatively, we can rewrite |
| 84 | + # that function to avoid the data-dependent control flow. |
| 85 | + if hasattr(config, "rope_scaling") and config.rope_scaling is not None: |
| 86 | + config.rope_scaling["type"] = "default" |
| 87 | + model = ExecuTorchModelForCausalLM.from_pretrained( |
| 88 | + model_id, recipe="xnnpack", config=config, file_name="phi4-mini-8da4w.pte" |
| 89 | + ) |
| 90 | + self.assertIsInstance(model, ExecuTorchModelForCausalLM) |
| 91 | + self.assertIsInstance(model.model, ExecuTorchModule) |
| 92 | + |
| 93 | + tokenizer = AutoTokenizer.from_pretrained(model_id) |
| 94 | + generated_text = model.text_generation( |
| 95 | + tokenizer=tokenizer, |
| 96 | + prompt="My favourite condiment is ", |
| 97 | + max_seq_len=64, |
| 98 | + ) |
| 99 | + logging.info(f"\nGenerated text:\n\t{generated_text}") |
| 100 | + |
| 101 | + if not is_ci: |
| 102 | + generated_tokens = tokenizer(generated_text, return_tensors="pt").input_ids |
| 103 | + |
| 104 | + # Free memory before loading eager for quality check |
| 105 | + del model |
| 106 | + del tokenizer |
| 107 | + gc.collect() |
| 108 | + |
| 109 | + self.assertTrue( |
| 110 | + check_causal_lm_output_quality( |
| 111 | + "microsoft/Phi-4-mini-instruct", |
| 112 | + generated_tokens, |
| 113 | + ) |
| 114 | + ) |
| 115 | + |
| 116 | + @slow |
| 117 | + @pytest.mark.run_slow |
| 118 | + @pytest.mark.skipif( |
| 119 | + parse(torchao.__version__) < parse("0.11.0.dev0"), |
| 120 | + reason="Only available on torchao >= 0.11.0.dev0", |
| 121 | + ) |
| 122 | + def test_phi4_text_generation_with_quantized_ckp(self): |
| 123 | + model_id = "pytorch/Phi-4-mini-instruct-8da4w" |
| 124 | + config = AutoConfig.from_pretrained(model_id) |
| 125 | + # NOTE: To make the model exportable we need to set the rope scaling to default to avoid hitting |
| 126 | + # the data-dependent control flow in _longrope_frequency_update. Alternatively, we can rewrite |
| 127 | + # that function to avoid the data-dependent control flow. |
| 128 | + if hasattr(config, "rope_scaling") and config.rope_scaling is not None: |
| 129 | + config.rope_scaling["type"] = "default" |
| 130 | + model = ExecuTorchModelForCausalLM.from_pretrained( |
| 131 | + model_id, |
| 132 | + recipe="xnnpack", |
| 133 | + config=config, |
| 134 | + export=True, |
| 135 | + ) |
| 136 | + self.assertIsInstance(model, ExecuTorchModelForCausalLM) |
| 137 | + self.assertIsInstance(model.model, ExecuTorchModule) |
| 138 | + |
| 139 | + tokenizer = AutoTokenizer.from_pretrained(model_id) |
| 140 | + generated_text = model.text_generation( |
| 141 | + tokenizer=tokenizer, |
| 142 | + prompt="My favourite condiment is ", |
| 143 | + max_seq_len=64, |
| 144 | + ) |
| 145 | + logging.info(f"\nGenerated text:\n\t{generated_text}") |
| 146 | + |
| 147 | + if not is_ci: |
| 148 | + generated_tokens = tokenizer(generated_text, return_tensors="pt").input_ids |
| 149 | + |
| 150 | + # Free memory before loading eager for quality check |
| 151 | + del model |
| 152 | + del tokenizer |
| 153 | + gc.collect() |
| 154 | + |
| 155 | + self.assertTrue( |
| 156 | + check_causal_lm_output_quality( |
| 157 | + "microsoft/Phi-4-mini-instruct", |
| 158 | + generated_tokens, |
| 159 | + ) |
| 160 | + ) |
0 commit comments