Skip to content

Commit cfe4d4e

Browse files
committed
Tidy up project structure
1 parent 333deaa commit cfe4d4e

File tree

21 files changed

+71
-161
lines changed

21 files changed

+71
-161
lines changed

evaluate.py

Lines changed: 0 additions & 141 deletions
This file was deleted.

evaluate_adapter.py renamed to evaluate/adapter.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,10 @@
1010
import torch
1111
import tqdm
1212

13+
# support running without installing as a package
14+
wd = Path(__file__).parent.parent.resolve()
15+
sys.path.append(str(wd))
16+
1317
from lit_llama import Tokenizer
1418
from lit_llama.adapter import LLaMA
1519
from lit_llama.utils import EmptyInitOnDevice, lazy_load, llama_model_lookup

evaluate_full.py renamed to evaluate/full.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,10 @@
1010
import torch
1111
import tqdm
1212

13+
# support running without installing as a package
14+
wd = Path(__file__).parent.parent.resolve()
15+
sys.path.append(str(wd))
16+
1317
from lit_llama import LLaMA, Tokenizer
1418
from lit_llama.utils import EmptyInitOnDevice
1519

evaluate_lora.py renamed to evaluate/lora.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,10 @@
1010
import torch
1111
import tqdm
1212

13+
# support running without installing as a package
14+
wd = Path(__file__).parent.parent.resolve()
15+
sys.path.append(str(wd))
16+
1317
from lit_llama import LLaMA, Tokenizer
1418
from lit_llama.utils import EmptyInitOnDevice, lazy_load, llama_model_lookup
1519
from lit_llama.lora import lora

finetune_adapter.py renamed to finetune/adapter.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
`torch.backends.cuda.enable_flash_sdp(False)` in the script below (see https://github.com/Lightning-AI/lit-llama/issues/101).
1313
"""
1414
import os
15+
import sys
1516
import time
1617
from pathlib import Path
1718
import shutil
@@ -20,6 +21,10 @@
2021
import numpy as np
2122
import torch
2223

24+
# support running without installing as a package
25+
wd = Path(__file__).parent.parent.resolve()
26+
sys.path.append(str(wd))
27+
2328
from generate import generate
2429
from lit_llama.adapter import LLaMA, LLaMAConfig, mark_only_adapter_as_trainable, adapter_state_from_state_dict
2530
from lit_llama.tokenizer import Tokenizer

finetune_full.py renamed to finetune/full.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,8 @@
44
Note: If you run into a CUDA error "Expected is_sm80 to be true, but got false", uncomment the line
55
`torch.backends.cuda.enable_flash_sdp(False)` in the script below (see https://github.com/Lightning-AI/lit-llama/issues/101).
66
"""
7+
import sys
8+
from pathlib import Path
79
import os
810
import time
911
from functools import partial
@@ -14,6 +16,10 @@
1416
import torch
1517
from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy
1618

19+
# support running without installing as a package
20+
wd = Path(__file__).parent.parent.resolve()
21+
sys.path.append(str(wd))
22+
1723
from generate import generate
1824
from lit_llama.model import Block, LLaMA, LLaMAConfig
1925
from lit_llama.tokenizer import Tokenizer

finetune_lora.py renamed to finetune/lora.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,13 +4,19 @@
44
Note: If you run into a CUDA error "Expected is_sm80 to be true, but got false", uncomment the line
55
`torch.backends.cuda.enable_flash_sdp(False)` in the script below (see https://github.com/Lightning-AI/lit-llama/issues/101).
66
"""
7+
import sys
8+
from pathlib import Path
79
import os
810
import time
911

1012
import lightning as L
1113
import numpy as np
1214
import torch
1315

16+
# support running without installing as a package
17+
wd = Path(__file__).parent.parent.resolve()
18+
sys.path.append(str(wd))
19+
1420
from generate import generate
1521
from lit_llama.lora import mark_only_lora_as_trainable, lora, lora_state_dict
1622
from lit_llama.model import LLaMA, LLaMAConfig

generate.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,10 @@
77
import lightning as L
88
import torch
99

10+
# support running without installing as a package
11+
wd = Path(__file__).parent.parent.resolve()
12+
sys.path.append(str(wd))
13+
1014
from lit_llama import LLaMA, Tokenizer
1115
from lit_llama.utils import EmptyInitOnDevice, lazy_load, llama_model_lookup
1216

generate_adapter.py renamed to generate/adapter.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,10 @@
77
import lightning as L
88
import torch
99

10+
# support running without installing as a package
11+
wd = Path(__file__).parent.parent.resolve()
12+
sys.path.append(str(wd))
13+
1014
from generate import generate
1115
from lit_llama import Tokenizer
1216
from lit_llama.adapter import LLaMA
File renamed without changes.

0 commit comments

Comments
 (0)