|
| 1 | +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. |
| 2 | +# |
| 3 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +# you may not use this file except in compliance with the License. |
| 5 | +# You may obtain a copy of the License at |
| 6 | +# |
| 7 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +# |
| 9 | +# Unless required by applicable law or agreed to in writing, software |
| 10 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +# See the License for the specific language governing permissions and |
| 13 | +# limitations under the License. |
| 14 | + |
| 15 | +from typing import Optional |
| 16 | + |
| 17 | +import pytorch_lightning as pl |
| 18 | +import torch |
| 19 | +import torch.distributed |
| 20 | +from megatron.core.inference.common_inference_params import CommonInferenceParams |
| 21 | +from megatron.core.inference.model_inference_wrappers.inference_wrapper_config import InferenceWrapperConfig |
| 22 | + |
| 23 | +import nemo.lightning as nl |
| 24 | +from nemo.collections import vlm |
| 25 | +from nemo.collections.vlm.inference.vlm_inference_wrapper import VLMInferenceWrapper |
| 26 | +from nemo.collections.vlm.inference.vlm_inference_controller import VLMTextGenerationController |
| 27 | +from nemo.collections.vlm.inference.vlm_engine import VLMEngine |
| 28 | + |
| 29 | +def _setup_trainer_and_restore_model(path: str, trainer: nl.Trainer, model: pl.LightningModule): |
| 30 | + fabric = trainer.to_fabric() |
| 31 | + model = fabric.load_model(path, model) |
| 32 | + return model |
| 33 | + |
| 34 | +def setup_model_and_tokenizer( |
| 35 | + path: str, |
| 36 | + trainer: Optional[nl.Trainer] = None, |
| 37 | + params_dtype: torch.dtype = torch.bfloat16, |
| 38 | + inference_batch_times_seqlen_threshold: int = 1000, |
| 39 | +): |
| 40 | + # model: io.TrainerContext = io.load_context(path=path, subpath="model") |
| 41 | + # trainer = trainer or io.load_context(path=path, subpath="trainer") |
| 42 | + model_id = "meta-llama/Llama-3.2-11B-Vision-Instruct" |
| 43 | + from transformers import AutoProcessor |
| 44 | + |
| 45 | + processor = AutoProcessor.from_pretrained(model_id) |
| 46 | + tokenizer = processor.tokenizer |
| 47 | + config = vlm.MLlamaConfig11BInstruct() |
| 48 | + model = vlm.MLlamaModel(config, tokenizer=tokenizer) |
| 49 | + _setup_trainer_and_restore_model(path=path, trainer=trainer, model=model) |
| 50 | + |
| 51 | + mcore_model = model.module.cuda() |
| 52 | + mcore_model = mcore_model.to(params_dtype) |
| 53 | + inference_wrapped_model = VLMInferenceWrapper( |
| 54 | + mcore_model, |
| 55 | + InferenceWrapperConfig( |
| 56 | + hidden_size=config.language_model_config.hidden_size, |
| 57 | + params_dtype=params_dtype, |
| 58 | + inference_batch_times_seqlen_threshold=inference_batch_times_seqlen_threshold, |
| 59 | + padded_vocab_size=tokenizer.vocab_size, |
| 60 | + ), |
| 61 | + ) |
| 62 | + |
| 63 | + return inference_wrapped_model, processor |
| 64 | + |
| 65 | + |
| 66 | +def generate( |
| 67 | + model: VLMInferenceWrapper, |
| 68 | + processor, |
| 69 | + prompts: list[str], |
| 70 | + images, |
| 71 | + max_batch_size: int = 4, |
| 72 | + random_seed: Optional[int] = None, |
| 73 | + inference_params: Optional[CommonInferenceParams] = None, |
| 74 | +) -> dict: |
| 75 | + text_generation_controller = VLMTextGenerationController(inference_wrapped_model=model, processor=processor) |
| 76 | + mcore_engine = VLMEngine( |
| 77 | + text_generation_controller=text_generation_controller, max_batch_size=max_batch_size, random_seed=random_seed |
| 78 | + ) |
| 79 | + |
| 80 | + common_inference_params = inference_params or CommonInferenceParams(num_tokens_to_generate=50) |
| 81 | + |
| 82 | + results = mcore_engine.generate( |
| 83 | + prompts=prompts, |
| 84 | + images=images, |
| 85 | + common_inference_params=common_inference_params, |
| 86 | + ) |
| 87 | + |
| 88 | + return results |
0 commit comments