|
15 | 15 |
|
16 | 16 | import gc
|
17 | 17 | import logging
|
| 18 | +import os |
18 | 19 | import unittest
|
19 | 20 |
|
20 | 21 | import pytest
|
| 22 | +import torchao |
21 | 23 | from executorch.extension.pybindings.portable_lib import ExecuTorchModule
|
| 24 | +from packaging.version import parse |
22 | 25 | from transformers import AutoConfig, AutoTokenizer
|
23 | 26 | from transformers.testing_utils import slow
|
24 | 27 |
|
|
27 | 30 | from ..utils import check_causal_lm_output_quality
|
28 | 31 |
|
29 | 32 |
|
30 |
| -@pytest.mark.skip(reason="Test Phi-4-mini (3.8B) will require runner to be configured with larger RAM") |
| 33 | +os.environ["TOKENIZERS_PARALLELISM"] = "false" |
| 34 | + |
| 35 | +is_ci = os.environ.get("GITHUB_ACTIONS") == "true" |
| 36 | + |
| 37 | + |
31 | 38 | class ExecuTorchModelIntegrationTest(unittest.TestCase):
|
32 | 39 | def __init__(self, *args, **kwargs):
|
33 | 40 | super().__init__(*args, **kwargs)
|
34 | 41 |
|
35 | 42 | @slow
|
36 | 43 | @pytest.mark.run_slow
|
| 44 | + @pytest.mark.skipif( |
| 45 | + is_ci, |
| 46 | + reason="Test Phi-4-mini (3.8B) will require runner to be configured with larger RAM", |
| 47 | + ) |
37 | 48 | def test_phi4_text_generation(self):
|
38 | 49 | model_id = "microsoft/Phi-4-mini-instruct"
|
39 | 50 | config = AutoConfig.from_pretrained(model_id)
|
@@ -61,3 +72,92 @@ def test_phi4_text_generation(self):
|
61 | 72 | gc.collect()
|
62 | 73 |
|
63 | 74 | self.assertTrue(check_causal_lm_output_quality(model_id, generated_tokens))
|
| 75 | + |
| 76 | + @slow |
| 77 | + @pytest.mark.run_slow |
| 78 | + @pytest.mark.skipif( |
| 79 | + parse(torchao.__version__) < parse("0.11.0.dev0"), |
| 80 | + reason="Only available on torchao >= 0.11.0.dev0", |
| 81 | + ) |
| 82 | + def test_phi4_text_generation_with_quantized_pte_from_hub(self): |
| 83 | + model_id = "pytorch/Phi-4-mini-instruct-8da4w" |
| 84 | + config = AutoConfig.from_pretrained(model_id) |
| 85 | + # NOTE: To make the model exportable we need to set the rope scaling to default to avoid hitting |
| 86 | + # the data-dependent control flow in _longrope_frequency_update. Alternatively, we can rewrite |
| 87 | + # that function to avoid the data-dependent control flow. |
| 88 | + if hasattr(config, "rope_scaling") and config.rope_scaling is not None: |
| 89 | + config.rope_scaling["type"] = "default" |
| 90 | + model = ExecuTorchModelForCausalLM.from_pretrained( |
| 91 | + model_id, recipe="xnnpack", config=config, file_name="phi4-mini-8da4w.pte" |
| 92 | + ) |
| 93 | + self.assertIsInstance(model, ExecuTorchModelForCausalLM) |
| 94 | + self.assertIsInstance(model.model, ExecuTorchModule) |
| 95 | + |
| 96 | + tokenizer = AutoTokenizer.from_pretrained(model_id) |
| 97 | + generated_text = model.text_generation( |
| 98 | + tokenizer=tokenizer, |
| 99 | + prompt="My favourite condiment is ", |
| 100 | + max_seq_len=64, |
| 101 | + ) |
| 102 | + logging.info(f"\nGenerated text:\n\t{generated_text}") |
| 103 | + |
| 104 | + if not is_ci: |
| 105 | + generated_tokens = tokenizer(generated_text, return_tensors="pt").input_ids |
| 106 | + |
| 107 | + # Free memory before loading eager for quality check |
| 108 | + del model |
| 109 | + del tokenizer |
| 110 | + gc.collect() |
| 111 | + |
| 112 | + self.assertTrue( |
| 113 | + check_causal_lm_output_quality( |
| 114 | + "microsoft/Phi-4-mini-instruct", |
| 115 | + generated_tokens, |
| 116 | + ) |
| 117 | + ) |
| 118 | + |
| 119 | + @slow |
| 120 | + @pytest.mark.run_slow |
| 121 | + @pytest.mark.skipif( |
| 122 | + parse(torchao.__version__) < parse("0.11.0.dev0"), |
| 123 | + reason="Only available on torchao >= 0.11.0.dev0", |
| 124 | + ) |
| 125 | + def test_phi4_text_generation_with_quantized_ckp(self): |
| 126 | + model_id = "pytorch/Phi-4-mini-instruct-8da4w" |
| 127 | + config = AutoConfig.from_pretrained(model_id) |
| 128 | + # NOTE: To make the model exportable we need to set the rope scaling to default to avoid hitting |
| 129 | + # the data-dependent control flow in _longrope_frequency_update. Alternatively, we can rewrite |
| 130 | + # that function to avoid the data-dependent control flow. |
| 131 | + if hasattr(config, "rope_scaling") and config.rope_scaling is not None: |
| 132 | + config.rope_scaling["type"] = "default" |
| 133 | + model = ExecuTorchModelForCausalLM.from_pretrained( |
| 134 | + model_id, |
| 135 | + recipe="xnnpack", |
| 136 | + config=config, |
| 137 | + export=True, |
| 138 | + ) |
| 139 | + self.assertIsInstance(model, ExecuTorchModelForCausalLM) |
| 140 | + self.assertIsInstance(model.model, ExecuTorchModule) |
| 141 | + |
| 142 | + tokenizer = AutoTokenizer.from_pretrained(model_id) |
| 143 | + generated_text = model.text_generation( |
| 144 | + tokenizer=tokenizer, |
| 145 | + prompt="My favourite condiment is ", |
| 146 | + max_seq_len=64, |
| 147 | + ) |
| 148 | + logging.info(f"\nGenerated text:\n\t{generated_text}") |
| 149 | + |
| 150 | + if not is_ci: |
| 151 | + generated_tokens = tokenizer(generated_text, return_tensors="pt").input_ids |
| 152 | + |
| 153 | + # Free memory before loading eager for quality check |
| 154 | + del model |
| 155 | + del tokenizer |
| 156 | + gc.collect() |
| 157 | + |
| 158 | + self.assertTrue( |
| 159 | + check_causal_lm_output_quality( |
| 160 | + "microsoft/Phi-4-mini-instruct", |
| 161 | + generated_tokens, |
| 162 | + ) |
| 163 | + ) |
0 commit comments