Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 567766b

Browse files
committedFeb 3, 2025·
add test
1 parent e1d0eb4 commit 567766b

File tree

2 files changed

+14
-14
lines changed

2 files changed

+14
-14
lines changed
 

‎optimum/executorch/modeling.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,6 @@ class ExecuTorchModelForCausalLM(OptimizedModel):
7373

7474
def __init__(self, model: "ExecuTorchModule", config: "PretrainedConfig"):
7575
super().__init__(model, config)
76-
# self.model = model
7776
metadata = self.model.method_names()
7877
logging.info(f"Load all static methods: {metadata}")
7978
if "use_kv_cache" in metadata:
@@ -120,7 +119,6 @@ def _from_pretrained(
120119
force_download: bool = False,
121120
local_files_only: bool = False,
122121
**kwargs,
123-
# model_save_dir: Optional[Union[str, Path, TemporaryDirectory]] = None,
124122
) -> "ExecuTorchModelForCausalLM":
125123
"""
126124
Load a pre-trained ExecuTorch model from a local directory or hosted on the HF hub.

‎tests/models/test_modeling.py

Lines changed: 14 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -17,38 +17,40 @@
1717
import tempfile
1818
import unittest
1919

20-
import pytest
2120
from executorch.extension.pybindings.portable_lib import ExecuTorchModule
22-
from transformers.testing_utils import slow
2321

2422
from optimum.executorch import ExecuTorchModelForCausalLM
23+
from optimum.exporters.executorch import main_export
2524

2625

2726
class ExecuTorchModelIntegrationTest(unittest.TestCase):
2827
def __init__(self, *args, **kwargs):
2928
super().__init__(*args, **kwargs)
3029

31-
@slow
32-
@pytest.mark.run_slow
3330
def test_load_model_from_hub(self):
34-
model = ExecuTorchModelForCausalLM.from_pretrained("NousResearch/Llama-3.2-1B", export=True, recipe="xnnpack")
31+
model_id = "optimum-internal-testing/tiny-random-llama"
32+
33+
model = ExecuTorchModelForCausalLM.from_pretrained(model_id, export=True, recipe="xnnpack")
3534
self.assertIsInstance(model, ExecuTorchModelForCausalLM)
3635
self.assertIsInstance(model.model, ExecuTorchModule)
3736

38-
@slow
39-
@pytest.mark.run_slow
40-
def test_load_model_from_local_path(self):
41-
from optimum.exporters.executorch import main_export
37+
def test_load_et_model_from_hub(self):
38+
model_id = "optimum-internal-testing/tiny-random-llama"
39+
40+
model = ExecuTorchModelForCausalLM.from_pretrained(
41+
model_id, export=False, revision="executorch", recipe="xnnpack"
42+
)
43+
self.assertIsInstance(model, ExecuTorchModelForCausalLM)
44+
self.assertIsInstance(model.model, ExecuTorchModule)
4245

43-
model_id = "NousResearch/Llama-3.2-1B"
44-
task = "text-generation"
46+
def test_load_model_from_local_path(self):
47+
model_id = "optimum-internal-testing/tiny-random-llama"
4548
recipe = "xnnpack"
4649

4750
with tempfile.TemporaryDirectory() as tempdir:
4851
# Export to a local dir
4952
main_export(
5053
model_name_or_path=model_id,
51-
task=task,
5254
recipe=recipe,
5355
output_dir=tempdir,
5456
)

0 commit comments

Comments
 (0)
Please sign in to comment.