-
-
Notifications
You must be signed in to change notification settings - Fork 3.7k
Description
from unsloth import FastModel
model, tokenizer = FastModel.from_pretrained(
"thuml/timer-base-84m",
trust_remote_code = True,
)
Unsloth: WARNING trust_remote_code
is True.
Are you certain you want to do remote code execution?
==((====))== Unsloth 2025.9.1: Fast Siglip patching. Transformers: 4.56.1.
\ /| NVIDIA GeForce RTX 3060. Num GPUs = 1. Max memory: 12.0 GB. Platform: Windows.
O^O/ _/ \ Torch: 2.7.0+cu126. CUDA: 8.6. CUDA Toolkit: 12.6. Triton: 3.4.0
\ / Bfloat16 = TRUE. FA [Xformers = 0.0.30. FA2 = False]
"-____-" Free license: https://github.com/unslothai/unsloth
Unsloth: Fast downloading is enabled - ignore downloading bars which are red colored!
Unsloth: Siglip does not support SDPA - switching to fast eager.
AttributeErrorTraceback (most recent call last)
File C:\MiniForge3\envs\gpt\Lib\site-packages\transformers\dynamic_module_utils.py:719, in resolve_trust_remote_code(trust_remote_code, model_name, has_local_code, has_remote_code, error_message, upstream_repo)
718 try:
--> 719 prev_sig_handler = signal.signal(signal.SIGALRM, _raise_timeout_error)
720 signal.alarm(TIME_OUT_REMOTE_CODE)
AttributeError: module 'signal' has no attribute 'SIGALRM'
During handling of the above exception, another exception occurred:
ValueErrorTraceback (most recent call last)
Cell In[2], line 2
1 from unsloth import FastModel
----> 2 model, tokenizer = FastModel.from_pretrained(
3 "thuml/timer-base-84m",
4 trust_remote_code = True,
5 )
File C:\MiniForge3\envs\gpt\Lib\site-packages\unsloth\models\loader.py:857, in FastModel.from_pretrained(model_name, max_seq_length, dtype, load_in_4bit, load_in_8bit, full_finetuning, token, device_map, rope_scaling, fix_tokenizer, trust_remote_code, use_gradient_checkpointing, resize_model_vocab, revision, return_logits, fullgraph, use_exact_model_name, auto_model, whisper_language, whisper_task, unsloth_force_compile, *args, **kwargs)
854 if auto_model is None:
855 auto_model = AutoModelForVision2Seq if is_vlm else AutoModelForCausalLM
--> 857 model, tokenizer = FastBaseModel.from_pretrained(
858 model_name = model_name,
859 max_seq_length = max_seq_length,
860 dtype = _get_dtype(dtype),
861 load_in_4bit = load_in_4bit,
862 load_in_8bit = load_in_8bit,
863 full_finetuning = full_finetuning,
864 token = token,
865 device_map = device_map,
866 trust_remote_code = trust_remote_code,
867 revision = revision if not is_peft else None,
868 model_types = model_types,
869 tokenizer_name = tokenizer_name,
870 auto_model = auto_model,
871 use_gradient_checkpointing = use_gradient_checkpointing,
872 supports_sdpa = supports_sdpa,
873 whisper_language = whisper_language,
874 whisper_task = whisper_task,
875 *args, **kwargs,
876 )
878 if resize_model_vocab is not None:
879 model.resize_token_embeddings(resize_model_vocab)
File C:\MiniForge3\envs\gpt\Lib\site-packages\unsloth\models\vision.py:498, in FastBaseModel.from_pretrained(model_name, max_seq_length, dtype, load_in_4bit, load_in_8bit, full_finetuning, token, device_map, trust_remote_code, model_types, tokenizer_name, auto_model, use_gradient_checkpointing, supports_sdpa, whisper_language, whisper_task, **kwargs)
490 tokenizer = auto_processor.from_pretrained(
491 tokenizer_name,
492 padding_side = "right",
(...) 495 task = whisper_task,
496 )
497 else:
--> 498 tokenizer = auto_processor.from_pretrained(
499 tokenizer_name,
500 padding_side = "right",
501 token = token,
502 )
503 if hasattr(tokenizer, "tokenizer"):
504 __tokenizer = tokenizer.tokenizer
File C:\MiniForge3\envs\gpt\Lib\site-packages\transformers\models\auto\tokenization_auto.py:1078, in AutoTokenizer.from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs)
1076 config = AutoConfig.for_model(**config_dict)
1077 else:
-> 1078 config = AutoConfig.from_pretrained(
1079 pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
1080 )
1081 config_tokenizer_class = config.tokenizer_class
1082 if hasattr(config, "auto_map") and "AutoTokenizer" in config.auto_map:
File C:\MiniForge3\envs\gpt\Lib\site-packages\transformers\models\auto\configuration_auto.py:1297, in AutoConfig.from_pretrained(cls, pretrained_model_name_or_path, **kwargs)
1295 else:
1296 upstream_repo = None
-> 1297 trust_remote_code = resolve_trust_remote_code(
1298 trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code, upstream_repo
1299 )
1301 if has_remote_code and trust_remote_code:
1302 config_class = get_class_from_dynamic_module(
1303 class_ref, pretrained_model_name_or_path, code_revision=code_revision, **kwargs
1304 )
File C:\MiniForge3\envs\gpt\Lib\site-packages\transformers\dynamic_module_utils.py:734, in resolve_trust_remote_code(trust_remote_code, model_name, has_local_code, has_remote_code, error_message, upstream_repo)
731 signal.alarm(0)
732 except Exception:
733 # OS which does not support signal.SIGALRM
--> 734 raise ValueError(
735 f"{error_message} You can inspect the repository content at https://hf.co/{model_name}.\n"
736 f"Please pass the argument trust_remote_code=True
to allow custom code to be run."
737 )
738 finally:
739 if prev_sig_handler is not None:
ValueError: The repository thuml/timer-base-84m contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/thuml/timer-base-84m .
You can inspect the repository content at https://hf.co/thuml/timer-base-84m.
Please pass the argument trust_remote_code=True
to allow custom code to be run.