@@ -47,9 +47,9 @@ def main(
4747 # compilation fails as it does not support torch.complex64 for RoPE
4848 # compile: bool = False,
4949 accelerator : str = "auto" ,
50- adapter_path : Optional [ Path ] = None ,
51- checkpoint_path : Optional [ Path ] = None ,
52- tokenizer_path : Optional [ Path ] = None ,
50+ adapter_path : Path = Path ( "out/adapter/alpaca/lit-llama-adapter-finetuned.pth" ) ,
51+ checkpoint_path : Path = Path ( "checkpoints/lit-llama/7B/lit-llama.pth" ) ,
52+ tokenizer_path : Path = Path ( "checkpoints/lit-llama/tokenizer.model" ) ,
5353 dtype : str = "float32" ,
5454 quantize : Optional [str ] = None ,
5555) -> None :
@@ -68,13 +68,6 @@ def main(
6868 ``"llm.int8"``: LLM.int8() mode,
6969 ``"gptq.int4"``: GPTQ 4-bit mode.
7070 """
71- if not adapter_path :
72- adapter_path = Path ("out/adapter/alpaca/lit-llama-adapter-finetuned.pth" )
73- if not checkpoint_path :
74- checkpoint_path = Path (f"./checkpoints/lit-llama/7B/lit-llama.pth" )
75- if not tokenizer_path :
76- tokenizer_path = Path ("./checkpoints/lit-llama/tokenizer.model" )
77-
7871 assert adapter_path .is_file ()
7972 assert checkpoint_path .is_file ()
8073 assert tokenizer_path .is_file ()
0 commit comments