Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 12 additions & 9 deletions modules/exllamav2.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,18 +51,21 @@ def from_pretrained(self, path_to_model):

model = ExLlamaV2(config)

split = None
if shared.args.gpu_split:
split = [float(alloc) for alloc in shared.args.gpu_split.split(",")]

model.load(split)

tokenizer = ExLlamaV2Tokenizer(config)
if shared.args.cache_8bit:
cache = ExLlamaV2Cache_8bit(model)
cache = ExLlamaV2Cache_8bit(model, lazy=True)
else:
cache = ExLlamaV2Cache(model)
cache = ExLlamaV2Cache(model, lazy=True)

if shared.args.autosplit:
model.load_autosplit(cache)
else:
split = None
if shared.args.gpu_split:
split = [float(alloc) for alloc in shared.args.gpu_split.split(",")]

model.load(split)

tokenizer = ExLlamaV2Tokenizer(config)
generator = ExLlamaV2StreamingGenerator(model, cache, tokenizer)

result = self()
Expand Down
20 changes: 12 additions & 8 deletions modules/exllamav2_hf.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,18 +37,22 @@ def __init__(self, config: ExLlamaV2Config):
super().__init__(PretrainedConfig())
self.ex_config = config
self.ex_model = ExLlamaV2(config)
split = None
if shared.args.gpu_split:
split = [float(alloc) for alloc in shared.args.gpu_split.split(",")]

self.ex_model.load(split)
self.generation_config = GenerationConfig()
self.loras = None
self.generation_config = GenerationConfig()

if shared.args.cache_8bit:
self.ex_cache = ExLlamaV2Cache_8bit(self.ex_model)
self.ex_cache = ExLlamaV2Cache_8bit(self.ex_model, lazy=True)
else:
self.ex_cache = ExLlamaV2Cache(self.ex_model)
self.ex_cache = ExLlamaV2Cache(self.ex_model, lazy=True)

if shared.args.autosplit:
self.ex_model.load_autosplit(self.ex_cache)
else:
split = None
if shared.args.gpu_split:
split = [float(alloc) for alloc in shared.args.gpu_split.split(",")]

self.ex_model.load(split)

self.past_seq = None
if shared.args.cfg_cache:
Expand Down
2 changes: 2 additions & 0 deletions modules/loaders.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@
'no_flash_attn',
'num_experts_per_token',
'cache_8bit',
'autosplit',
'alpha_value',
'compress_pos_emb',
'trust_remote_code',
Expand All @@ -89,6 +90,7 @@
'no_flash_attn',
'num_experts_per_token',
'cache_8bit',
'autosplit',
'alpha_value',
'compress_pos_emb',
'exllamav2_info',
Expand Down
1 change: 1 addition & 0 deletions modules/shared.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,7 @@
# ExLlamaV2
group = parser.add_argument_group('ExLlamaV2')
group.add_argument('--gpu-split', type=str, help='Comma-separated list of VRAM (in GB) to use per GPU device for model layers. Example: 20,7,7.')
group.add_argument('--autosplit', action='store_true', help='Autosplit the model tensors across the available GPUs. This causes --gpu-split to be ignored.')
group.add_argument('--max_seq_len', type=int, default=2048, help='Maximum sequence length.')
group.add_argument('--cfg-cache', action='store_true', help='ExLlamav2_HF: Create an additional cache for CFG negative prompts. Necessary to use CFG with that loader.')
group.add_argument('--no_flash_attn', action='store_true', help='Force flash-attention to not be used.')
Expand Down
1 change: 1 addition & 0 deletions modules/ui.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@ def list_model_elements():
'no_flash_attn',
'num_experts_per_token',
'cache_8bit',
'autosplit',
'threads',
'threads_batch',
'n_batch',
Expand Down
1 change: 1 addition & 0 deletions modules/ui_model_menu.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,7 @@ def create_ui():
shared.gradio['disk'] = gr.Checkbox(label="disk", value=shared.args.disk)
shared.gradio['bf16'] = gr.Checkbox(label="bf16", value=shared.args.bf16)
shared.gradio['cache_8bit'] = gr.Checkbox(label="cache_8bit", value=shared.args.cache_8bit, info='Use 8-bit cache to save VRAM.')
shared.gradio['autosplit'] = gr.Checkbox(label="autosplit", value=shared.args.autosplit, info='Automatically split the model tensors across the available GPUs.')
shared.gradio['no_flash_attn'] = gr.Checkbox(label="no_flash_attn", value=shared.args.no_flash_attn, info='Force flash-attention to not be used.')
shared.gradio['cfg_cache'] = gr.Checkbox(label="cfg-cache", value=shared.args.cfg_cache, info='Necessary to use CFG with this loader.')
shared.gradio['num_experts_per_token'] = gr.Number(label="Number of experts per token", value=shared.args.num_experts_per_token, info='Only applies to MoE models like Mixtral.')
Expand Down