Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

GPU Inference from IPython #289

Open
Rajmehta123 opened this issue Sep 15, 2023 · 0 comments
Open

GPU Inference from IPython #289

Rajmehta123 opened this issue Sep 15, 2023 · 0 comments

Comments

@Rajmehta123
Copy link

Rajmehta123 commented Sep 15, 2023

Hello. Thanks for this amazing work. How do I implement the multi-GPU inference using Ipython and not the WebUI?

At present, I am implementing it this way. It is a 16k Context length Vicuna 4bit quantized model.

config.auto_map = [20.0,20.0,20.0,20.0] #Setting this for multi GPU

But still, it loads the model on just one GPU and goes OOM during inference.

from exllama.model import ExLlama, ExLlamaCache, ExLlamaConfig, ExLlamaDeviceMap
from exllama.tokenizer import ExLlamaTokenizer
from exllama.generator import ExLlamaGenerator
import os, glob

# Directory containing model, tokenizer, generator

model_directory =  "/home/ec2-user/.gccc/acc"

# Locate files we need within that directory

tokenizer_path = os.path.join(model_directory, "tokenizer.model")
model_config_path = os.path.join(model_directory, "config.json")
st_pattern = os.path.join(model_directory, "*.safetensors")
model_path = glob.glob(st_pattern)[0]

# Create config, model, tokenizer and generator
from exllama.model import ExLlama, ExLlamaCache, ExLlamaConfig
from exllama.tokenizer import ExLlamaTokenizer
from exllama.generator import ExLlamaGenerator
import os, glob

# Directory containing model, tokenizer, generator

model_directory =  "/home/ec2-user/.gccc/acc"

# Locate files we need within that directory

tokenizer_path = os.path.join(model_directory, "tokenizer.model")
model_config_path = os.path.join(model_directory, "config.json")
st_pattern = os.path.join(model_directory, "*.safetensors")
model_path = glob.glob(st_pattern)

# Create config, model, tokenizer and generator

config = ExLlamaConfig(model_config_path)               # create config from config.json
config.model_path = model_path                          # supply path to model weights file
config.max_seq_len = 16384
config.max_input_len = 4096  # Maximum length of input IDs in a single forward pass. Sequences longer than this will be processed in multiple steps
config.max_attention_size = 4096**2  # Sequences will be processed in chunks to keep the size of the attention weights matrix <= this
config.compress_pos_emb = 2.0  # Increase to compress positional embeddings applied to sequence
config.alpha = 2.0
config.auto_map = [20.0,20.0,20.0,20.0]

model = ExLlama(config)     
tokenizer = ExLlamaTokenizer(tokenizer_path)            # create tokenizer from tokenizer model file

cache = ExLlamaCache(model)                             # create cache for inference
generator = ExLlamaGenerator(model, tokenizer, cache)   # create generator

# Configure generator

generator.disallow_tokens([tokenizer.eos_token_id])

generator.settings.token_repetition_penalty_max = 1.2
generator.settings.temperature = 0.05
generator.settings.top_p = 0.1
generator.settings.top_k = 40
generator.settings.typical = 0.0

prompt = f"""A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. The assistant always answer
    the user questions from the input ONLY.
    USER: {14k context TEXT}
    ASSISTANT:"""

print("\n\n*** Generate:")

output = generator.generate_simple(prompt, max_new_tokens = 700)

print(output.split('ASSISTANT')[-1])

end = datetime.now()

prin("Time it took: ",str(end-start))

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant