diff --git a/tests/models/hunyuan_v1_moe/test_modeling_hunyuan_v1_moe.py b/tests/models/hunyuan_v1_moe/test_modeling_hunyuan_v1_moe.py index f12b0fd3e0a3..dc40a7f64715 100644 --- a/tests/models/hunyuan_v1_moe/test_modeling_hunyuan_v1_moe.py +++ b/tests/models/hunyuan_v1_moe/test_modeling_hunyuan_v1_moe.py @@ -15,6 +15,8 @@ import unittest +import pytest + from transformers import HunYuanMoEV1Config, is_torch_available from transformers.testing_utils import ( cleanup, @@ -26,6 +28,8 @@ if is_torch_available(): from transformers import ( + AutoModelForCausalLM, + AutoTokenizer, HunYuanMoEV1ForCausalLM, HunYuanMoEV1ForSequenceClassification, HunYuanMoEV1Model, @@ -78,6 +82,11 @@ def is_pipeline_test_to_skip( ): return True + @unittest.skip("Hunyuan model Unsupported") + @pytest.mark.torch_compile_test + def test_generate_compilation_all_outputs(self): + pass + @require_torch class HunYuanMoEV1IntegrationTest(unittest.TestCase): @@ -90,24 +99,22 @@ def tearDown(self): @slow def test_model_generation(self): # we will compele this when model file change over - pass - # EXPECTED_ANSWER = "\nRegular exercise offers numerous physical, mental, and emotional benefits. It improves cardiovascular health, strengthens muscles and bones, boosts metabolism, and helps" - # prompt = "Write a short summary of the benefits of regular exercise " - # tokenizer = AutoTokenizer.from_pretrained( - # "tencent/Hunyuan-A13B-Instruct", use_fast=False, trust_remote_code=True - # ) - # model = HunYuanMoEV1ForCausalLM.from_pretrained("tencent/Hunyuan-A13B-Instruct", device_map="auto") - # messages = [ - # {"role": "user", "content": prompt}, - # ] - # tokenized_chat = tokenizer.apply_chat_template( - # messages, - # tokenize=True, - # add_generation_prompt=True, - # return_tensors="pt", - # enable_thinking=False, # Toggle thinking mode (default: True) - # ) - # generated_ids = model.generate(tokenized_chat.to(model.device), max_new_tokens=30, top_k=1) - # text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) - # answer = text.split("")[1] - # self.assertEqual(EXPECTED_ANSWER, answer) + # pass + EXPECTED_ANSWER = "\nRegular exercise offers numerous physical, mental, and emotional benefits. It improves cardiovascular health, strengthens muscles and bones, boosts metabolism, and helps" + prompt = "Write a short summary of the benefits of regular exercise " + tokenizer = AutoTokenizer.from_pretrained("tencent/Hunyuan-A13B-Instruct") + model = AutoModelForCausalLM.from_pretrained("tencent/Hunyuan-A13B-Instruct", device_map="auto") + messages = [ + {"role": "user", "content": prompt}, + ] + tokenized_chat = tokenizer.apply_chat_template( + messages, + tokenize=True, + add_generation_prompt=True, + return_tensors="pt", + enable_thinking=False, # Toggle thinking mode (default: True) + ) + generated_ids = model.generate(tokenized_chat.to(model.device), max_new_tokens=30, top_k=1) + text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) + answer = text.split("")[1] + self.assertEqual(EXPECTED_ANSWER, answer)