Skip to content

Commit 333deaa

Browse files
authored
Set default paths in the signatures (#241)
1 parent 5d6472e commit 333deaa

File tree

9 files changed

+22
-65
lines changed

9 files changed

+22
-65
lines changed

evaluate.py

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -45,8 +45,8 @@ def main(
4545
# compilation fails as it does not support torch.complex64 for RoPE
4646
# compile: bool = False,
4747
accelerator: str = "auto",
48-
checkpoint_path: Optional[Path] = None,
49-
tokenizer_path: Optional[Path] = None,
48+
checkpoint_path: Path = Path("checkpoints/lit-llama/7B/lit-llama.pth"),
49+
tokenizer_path: Path = Path("checkpoints/lit-llama/tokenizer.model"),
5050
dtype: str = "float32",
5151
quantize: Optional[str] = None,
5252
) -> None:
@@ -63,10 +63,6 @@ def main(
6363
``"llm.int8"``: LLM.int8() mode,
6464
``"gptq.int4"``: GPTQ 4-bit mode.
6565
"""
66-
if not checkpoint_path:
67-
checkpoint_path = Path(f"./checkpoints/lit-llama/7B/lit-llama.pth")
68-
if not tokenizer_path:
69-
tokenizer_path = Path("./checkpoints/lit-llama/tokenizer.model")
7066
assert checkpoint_path.is_file()
7167
assert tokenizer_path.is_file()
7268

evaluate_adapter.py

Lines changed: 3 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -47,9 +47,9 @@ def main(
4747
# compilation fails as it does not support torch.complex64 for RoPE
4848
# compile: bool = False,
4949
accelerator: str = "auto",
50-
adapter_path: Optional[Path] = None,
51-
checkpoint_path: Optional[Path] = None,
52-
tokenizer_path: Optional[Path] = None,
50+
adapter_path: Path = Path("out/adapter/alpaca/lit-llama-adapter-finetuned.pth"),
51+
checkpoint_path: Path = Path("checkpoints/lit-llama/7B/lit-llama.pth"),
52+
tokenizer_path: Path = Path("checkpoints/lit-llama/tokenizer.model"),
5353
dtype: str = "float32",
5454
quantize: Optional[str] = None,
5555
) -> None:
@@ -68,13 +68,6 @@ def main(
6868
``"llm.int8"``: LLM.int8() mode,
6969
``"gptq.int4"``: GPTQ 4-bit mode.
7070
"""
71-
if not adapter_path:
72-
adapter_path = Path("out/adapter/alpaca/lit-llama-adapter-finetuned.pth")
73-
if not checkpoint_path:
74-
checkpoint_path = Path(f"./checkpoints/lit-llama/7B/lit-llama.pth")
75-
if not tokenizer_path:
76-
tokenizer_path = Path("./checkpoints/lit-llama/tokenizer.model")
77-
7871
assert adapter_path.is_file()
7972
assert checkpoint_path.is_file()
8073
assert tokenizer_path.is_file()

evaluate_full.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ def main(
4646
# compile: bool = False,
4747
accelerator: str = "auto",
4848
checkpoint_path: Optional[Path] = None,
49-
tokenizer_path: Optional[Path] = None,
49+
tokenizer_path: Path = Path("checkpoints/lit-llama/tokenizer.model"),
5050
model_size: str = "7B",
5151
dtype: str = "float32",
5252
quantize: Optional[str] = None,
@@ -65,9 +65,7 @@ def main(
6565
``"gptq.int4"``: GPTQ 4-bit mode.
6666
"""
6767
if not checkpoint_path:
68-
checkpoint_path = Path(f"./checkpoints/lit-llama/{model_size}/lit-llama.pth")
69-
if not tokenizer_path:
70-
tokenizer_path = Path("./checkpoints/lit-llama/tokenizer.model")
68+
checkpoint_path = Path(f"checkpoints/lit-llama/{model_size}/lit-llama.pth")
7169
assert checkpoint_path.is_file()
7270
assert tokenizer_path.is_file()
7371

evaluate_lora.py

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -51,9 +51,9 @@ def main(
5151
# compilation fails as it does not support torch.complex64 for RoPE
5252
# compile: bool = False,
5353
accelerator: str = "auto",
54-
lora_path: Optional[Path] = None,
55-
checkpoint_path: Optional[Path] = None,
56-
tokenizer_path: Optional[Path] = None,
54+
lora_path: Path = Path("out/lora/alpaca/lit-llama-lora-finetuned.pth"),
55+
checkpoint_path: Path = Path("checkpoints/lit-llama/7B/lit-llama.pth"),
56+
tokenizer_path: Path = Path("checkpoints/lit-llama/tokenizer.model"),
5757
dtype: str = "float32",
5858
quantize: Optional[str] = None,
5959
) -> None:
@@ -73,12 +73,6 @@ def main(
7373
``"llm.int8"``: LLM.int8() mode,
7474
``"gptq.int4"``: GPTQ 4-bit mode.
7575
"""
76-
if not lora_path:
77-
lora_path = Path("out/lora/alpaca/lit-llama-lora-finetuned.pth")
78-
if not checkpoint_path:
79-
checkpoint_path = Path(f"./checkpoints/lit-llama/7B/lit-llama.pth")
80-
if not tokenizer_path:
81-
tokenizer_path = Path("./checkpoints/lit-llama/tokenizer.model")
8276
assert lora_path.is_file()
8377
assert checkpoint_path.is_file()
8478
assert tokenizer_path.is_file()

generate.py

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -77,8 +77,8 @@ def main(
7777
max_new_tokens: int = 50,
7878
top_k: int = 200,
7979
temperature: float = 0.8,
80-
checkpoint_path: Optional[Path] = None,
81-
tokenizer_path: Optional[Path] = None,
80+
checkpoint_path: Path = Path("checkpoints/lit-llama/7B/lit-llama.pth"),
81+
tokenizer_path: Path = Path("checkpoints/lit-llama/tokenizer.model"),
8282
quantize: Optional[str] = None,
8383
) -> None:
8484
"""Generates text samples based on a pre-trained LLaMA model and tokenizer.
@@ -96,10 +96,6 @@ def main(
9696
``"llm.int8"``: LLM.int8() mode,
9797
``"gptq.int4"``: GPTQ 4-bit mode.
9898
"""
99-
if not checkpoint_path:
100-
checkpoint_path = Path(f"./checkpoints/lit-llama/7B/lit-llama.pth")
101-
if not tokenizer_path:
102-
tokenizer_path = Path("./checkpoints/lit-llama/tokenizer.model")
10399
assert checkpoint_path.is_file(), checkpoint_path
104100
assert tokenizer_path.is_file(), tokenizer_path
105101

generate_adapter.py

Lines changed: 3 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,9 @@
1717
def main(
1818
prompt: str = "What food do lamas eat?",
1919
input: str = "",
20-
adapter_path: Optional[Path] = None,
21-
pretrained_path: Optional[Path] = None,
22-
tokenizer_path: Optional[Path] = None,
20+
adapter_path: Path = Path("out/adapter/alpaca/lit-llama-adapter-finetuned.pth"),
21+
pretrained_path: Path = Path("checkpoints/lit-llama/7B/lit-llama.pth"),
22+
tokenizer_path: Path = Path("checkpoints/lit-llama/tokenizer.model"),
2323
quantize: Optional[str] = None,
2424
max_new_tokens: int = 100,
2525
top_k: int = 200,
@@ -44,13 +44,6 @@ def main(
4444
temperature: A value controlling the randomness of the sampling process. Higher values result in more random
4545
samples.
4646
"""
47-
if not adapter_path:
48-
adapter_path = Path("out/adapter/alpaca/lit-llama-adapter-finetuned.pth")
49-
if not pretrained_path:
50-
pretrained_path = Path(f"./checkpoints/lit-llama/7B/lit-llama.pth")
51-
if not tokenizer_path:
52-
tokenizer_path = Path("./checkpoints/lit-llama/tokenizer.model")
53-
5447
assert adapter_path.is_file()
5548
assert pretrained_path.is_file()
5649
assert tokenizer_path.is_file()

generate_full.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ def main(
7878
top_k: int = 200,
7979
temperature: float = 0.8,
8080
checkpoint_path: Optional[Path] = None,
81-
tokenizer_path: Optional[Path] = None,
81+
tokenizer_path: Path = Path("checkpoints/lit-llama/tokenizer.model"),
8282
model_size: str = "7B",
8383
quantize: Optional[str] = None,
8484
) -> None:
@@ -99,9 +99,7 @@ def main(
9999
``"gptq.int4"``: GPTQ 4-bit mode.
100100
"""
101101
if not checkpoint_path:
102-
checkpoint_path = Path(f"./checkpoints/lit-llama/{model_size}/lit-llama.pth")
103-
if not tokenizer_path:
104-
tokenizer_path = Path("./checkpoints/lit-llama/tokenizer.model")
102+
checkpoint_path = Path(f"checkpoints/lit-llama/{model_size}/lit-llama.pth")
105103
assert checkpoint_path.is_file(), checkpoint_path
106104
assert tokenizer_path.is_file(), tokenizer_path
107105

generate_lora.py

Lines changed: 3 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -21,9 +21,9 @@
2121
def main(
2222
prompt: str = "What food do lamas eat?",
2323
input: str = "",
24-
lora_path: Optional[Path] = None,
25-
pretrained_path: Optional[Path] = None,
26-
tokenizer_path: Optional[Path] = None,
24+
lora_path: Path = Path("out/lora/alpaca/lit-llama-lora-finetuned.pth"),
25+
pretrained_path: Path = Path("checkpoints/lit-llama/7B/lit-llama.pth"),
26+
tokenizer_path: Path = Path("checkpoints/lit-llama/tokenizer.model"),
2727
quantize: Optional[str] = None,
2828
dtype: str = "float32",
2929
max_new_tokens: int = 100,
@@ -50,13 +50,6 @@ def main(
5050
temperature: A value controlling the randomness of the sampling process. Higher values result in more random
5151
samples.
5252
"""
53-
if not lora_path:
54-
lora_path = Path("out/lora/alpaca/lit-llama-lora-finetuned.pth")
55-
if not pretrained_path:
56-
pretrained_path = Path(f"./checkpoints/lit-llama/7B/lit-llama.pth")
57-
if not tokenizer_path:
58-
tokenizer_path = Path("./checkpoints/lit-llama/tokenizer.model")
59-
6053
assert lora_path.is_file()
6154
assert pretrained_path.is_file()
6255
assert tokenizer_path.is_file()

quantize.py

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -136,9 +136,9 @@ def llama_blockwise_quantization(
136136

137137
def main(
138138
*,
139-
checkpoint_path: Optional[Path] = None,
139+
checkpoint_path: Path = Path("checkpoints/lit-llama/7B/lit-llama.pth"),
140140
output_path: Optional[Path] = None,
141-
tokenizer_path: Optional[Path] = None,
141+
tokenizer_path: Path = Path("checkpoints/lit-llama/tokenizer.model"),
142142
n_samples: int = 128,
143143
dtype: str = "float32",
144144
quantize: Optional[str] = None,
@@ -156,10 +156,6 @@ def main(
156156
``"gptq.int4"``: GPTQ 4-bit mode.
157157
Note that ``"llm.int8"```does not need a quantization step.
158158
"""
159-
if not checkpoint_path:
160-
checkpoint_path = Path(f"./checkpoints/lit-llama/7B/lit-llama.pth")
161-
if not tokenizer_path:
162-
tokenizer_path = Path("./checkpoints/lit-llama/tokenizer.model")
163159
assert checkpoint_path.is_file()
164160
assert tokenizer_path.is_file()
165161
assert output_path.parent.is_dir() and (

0 commit comments

Comments
 (0)