forked from PaddlePaddle/PaddleMIX
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
【PPMix No.39】mPLUG-Owl3推理 (PaddlePaddle#865)
Co-authored-by: [email protected]@github.com <[email protected]>
- Loading branch information
1 parent
be7c973
commit d1ac2a1
Showing
11 changed files
with
3,124 additions
and
4 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,42 @@ | ||
# mPLUG-Owl3 | ||
|
||
## 1. 模型介绍 | ||
|
||
**本仓库支持的模型权重:** | ||
|
||
| Model | | ||
|--------------------| | ||
| mPLUG/mPLUG-Owl3-7B-241101 | | ||
|
||
注意:与huggingface权重同名,但权重为paddle框架的Tensor,使用`xxx.from_pretrained("mPLUG/mPLUG-Owl3-7B-241101")`即可自动下载该权重文件夹到缓存目录。 | ||
|
||
|
||
## 2 环境准备 | ||
|
||
1)[安装 PaddleMIX 环境依赖包](https://github.com/PaddlePaddle/PaddleMIX/tree/develop?tab=readme-ov-file#%E5%AE%89%E8%A3%85) | ||
|
||
2)pip install pillow tqdm paddlenlp==3.0.0b2 | ||
|
||
注意:Python版本最好为3.10及以上版本。 | ||
|
||
## 3 快速开始 | ||
|
||
### 推理 | ||
```bash | ||
# 图片理解 | ||
CUDA_VISIBLE_DEVICES=0 python paddlemix/examples/mPLUG_Owl3/run_inference.py \ | ||
``` | ||
|
||
|
||
### 参考文献 | ||
```BibTeX | ||
@misc{ye2024mplugowl3longimagesequenceunderstanding, | ||
title={mPLUG-Owl3: Towards Long Image-Sequence Understanding in Multi-Modal Large Language Models}, | ||
author={Jiabo Ye and Haiyang Xu and Haowei Liu and Anwen Hu and Ming Yan and Qi Qian and Ji Zhang and Fei Huang and Jingren Zhou}, | ||
year={2024}, | ||
eprint={2408.04840}, | ||
archivePrefix={arXiv}, | ||
primaryClass={cs.CV}, | ||
url={https://arxiv.org/abs/2408.04840}, | ||
} | ||
``` |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,3 @@ | ||
pillow | ||
tqdm | ||
paddlenlp==3.0.0b2 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,46 @@ | ||
# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
|
||
import paddle | ||
from paddlenlp.transformers import Qwen2Tokenizer | ||
from PIL import Image | ||
|
||
from paddlemix.models.mPLUGOwl3.configuration_mplugowl3 import mPLUGOwl3Config | ||
from paddlemix.models.mPLUGOwl3.modeling_mplugowl3 import mPLUGOwl3Model | ||
|
||
model_path = "mPLUG/mPLUG-Owl3-7B-241101" | ||
|
||
config = mPLUGOwl3Config.from_pretrained(model_path) | ||
model = mPLUGOwl3Model.from_pretrained(model_path, dtype=paddle.bfloat16).eval() | ||
tokenizer = Qwen2Tokenizer.from_pretrained(model_path) | ||
processor = model.init_processor(tokenizer) | ||
|
||
# image = Image.new('RGB', (500, 500), color='red') | ||
image = Image.open("paddlemix/demo_images/examples_image1.jpg").convert("RGB") | ||
|
||
messages = [{"role": "user", "content": """<|image|>Describe this image."""}, {"role": "assistant", "content": ""}] | ||
|
||
inputs = processor(messages, images=[image], videos=None) | ||
inputs["pixel_values"] = inputs["pixel_values"].cast(paddle.bfloat16) | ||
|
||
inputs.update( | ||
{ | ||
"tokenizer": tokenizer, | ||
"max_new_tokens": 512, # | ||
"decode_text": True, | ||
} | ||
) | ||
|
||
res = model.generate(**inputs) | ||
print("output:\n", res) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,19 @@ | ||
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
|
||
from .configuration_hyper_qwen2 import * | ||
from .configuration_mplugowl3 import * | ||
from .modeling_hyper_qwen2 import * | ||
from .modeling_mplugowl3 import * | ||
from .modeling_navit_siglip import * |
141 changes: 141 additions & 0 deletions
141
paddlemix/models/mPLUGOwl3/configuration_hyper_qwen2.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,141 @@ | ||
# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
|
||
from paddlenlp.transformers import PretrainedConfig | ||
|
||
|
||
class HyperQwen2Config(PretrainedConfig): | ||
""" | ||
This is the configuration class to store the configuration of a [`Qwen2Model`]. It is used to instantiate a | ||
Qwen2 model according to the specified arguments, defining the model architecture. Instantiating a configuration | ||
with the defaults will yield a similar configuration to that of | ||
Qwen2-7B-beta [Qwen/Qwen2-7B-beta](https://huggingface.co/Qwen/Qwen2-7B-beta). | ||
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the | ||
documentation from [`PretrainedConfig`] for more information. | ||
Args: | ||
vocab_size (`int`, *optional*, defaults to 151936): | ||
Vocabulary size of the Qwen2 model. Defines the number of different tokens that can be represented by the | ||
`inputs_ids` passed when calling [`Qwen2Model`] | ||
hidden_size (`int`, *optional*, defaults to 4096): | ||
Dimension of the hidden representations. | ||
intermediate_size (`int`, *optional*, defaults to 22016): | ||
Dimension of the MLP representations. | ||
num_hidden_layers (`int`, *optional*, defaults to 32): | ||
Number of hidden layers in the Transformer encoder. | ||
num_attention_heads (`int`, *optional*, defaults to 32): | ||
Number of attention heads for each attention layer in the Transformer encoder. | ||
num_key_value_heads (`int`, *optional*, defaults to 32): | ||
This is the number of key_value heads that should be used to implement Grouped Query Attention. If | ||
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if | ||
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When | ||
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed | ||
by meanpooling all the original heads within that group. For more details checkout [this | ||
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `32`. | ||
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): | ||
The non-linear activation function (function or string) in the decoder. | ||
max_position_embeddings (`int`, *optional*, defaults to 32768): | ||
The maximum sequence length that this model might ever be used with. | ||
initializer_range (`float`, *optional*, defaults to 0.02): | ||
The standard deviation of the truncated_normal_initializer for initializing all weight matrices. | ||
rms_norm_eps (`float`, *optional*, defaults to 1e-06): | ||
The epsilon used by the rms normalization layers. | ||
use_cache (`bool`, *optional*, defaults to `True`): | ||
Whether or not the model should return the last key/values attentions (not used by all models). Only | ||
relevant if `config.is_decoder=True`. | ||
tie_word_embeddings (`bool`, *optional*, defaults to `False`): | ||
Whether the model's input and output word embeddings should be tied. | ||
rope_theta (`float`, *optional*, defaults to 10000.0): | ||
The base period of the RoPE embeddings. | ||
use_sliding_window (`bool`, *optional*, defaults to `False`): | ||
Whether to use sliding window attention. | ||
sliding_window (`int`, *optional*, defaults to 4096): | ||
Sliding window attention (SWA) window size. If not specified, will default to `4096`. | ||
max_window_layers (`int`, *optional*, defaults to 28): | ||
The number of layers that use SWA (Sliding Window Attention). The bottom layers use SWA while the top use full attention. | ||
attention_dropout (`float`, *optional*, defaults to 0.0): | ||
The dropout ratio for the attention probabilities. | ||
```python | ||
>>> from transformers import Qwen2Model, Qwen2Config | ||
>>> # Initializing a Qwen2 style configuration | ||
>>> configuration = Qwen2Config() | ||
>>> # Initializing a model from the Qwen2-7B style configuration | ||
>>> model = Qwen2Model(configuration) | ||
>>> # Accessing the model configuration | ||
>>> configuration = model.config | ||
```""" | ||
|
||
model_type = "qwen2" | ||
keys_to_ignore_at_inference = ["past_key_values"] | ||
|
||
def __init__( | ||
self, | ||
vocab_size=151936, | ||
hidden_size=4096, | ||
intermediate_size=22016, | ||
num_hidden_layers=32, | ||
num_attention_heads=32, | ||
num_key_value_heads=32, | ||
hidden_act="silu", | ||
max_position_embeddings=32768, | ||
initializer_range=0.02, | ||
rms_norm_eps=1e-6, | ||
use_cache=True, | ||
tie_word_embeddings=False, | ||
rope_theta=10000.0, | ||
use_sliding_window=False, | ||
sliding_window=4096, | ||
max_window_layers=28, | ||
attention_dropout=0.0, | ||
hyper_layers=[1,9,17,25], | ||
vision_batch_size=16, | ||
rope_scaling=None, | ||
**kwargs, | ||
): | ||
self.vocab_size = vocab_size | ||
self.max_position_embeddings = max_position_embeddings | ||
self.hidden_size = hidden_size | ||
self.intermediate_size = intermediate_size | ||
self.num_hidden_layers = num_hidden_layers | ||
self.num_attention_heads = num_attention_heads | ||
self.use_sliding_window = use_sliding_window | ||
self.sliding_window = sliding_window if use_sliding_window else None | ||
self.max_window_layers = max_window_layers | ||
self.rope_scaling = rope_scaling | ||
if self.rope_scaling is not None and "type" in self.rope_scaling: | ||
self.rope_scaling["rope_type"] = self.rope_scaling["type"] | ||
# for backward compatibility | ||
if num_key_value_heads is None: | ||
num_key_value_heads = num_attention_heads | ||
|
||
self.num_key_value_heads = num_key_value_heads | ||
self.hidden_act = hidden_act | ||
self.initializer_range = initializer_range | ||
self.rms_norm_eps = rms_norm_eps | ||
self.use_cache = use_cache | ||
self.rope_theta = rope_theta | ||
self.attention_dropout = attention_dropout | ||
self.hyper_layers = hyper_layers | ||
self.vision_batch_size = vision_batch_size | ||
self.seq_length = 1 #self.max_length ### | ||
super().__init__( | ||
tie_word_embeddings=tie_word_embeddings, | ||
**kwargs, | ||
) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,54 @@ | ||
# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
|
||
from paddlemix.utils.log import logger | ||
|
||
from .configuration_hyper_qwen2 import HyperQwen2Config | ||
from .modeling_navit_siglip import SigLipVisionConfig | ||
|
||
|
||
class mPLUGOwl3Config(HyperQwen2Config): | ||
model_type = "mplugowl3" | ||
keys_to_ignore_at_inference = ["past_key_values"] | ||
|
||
default_vision_config = { | ||
"hidden_size": 1152, | ||
"image_size": 378, | ||
"intermediate_size": 4304, | ||
"model_type": "siglip_vision_model", | ||
"num_attention_heads": 16, | ||
"num_hidden_layers": 27, | ||
"patch_size": 14, | ||
} | ||
|
||
def __init__( | ||
self, | ||
use_cache=True, | ||
vision_config=None, | ||
**kwargs, | ||
): | ||
self.use_cache = use_cache | ||
|
||
# same as HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit add tgt_sizes | ||
if vision_config is None: | ||
self.vision_config = SigLipVisionConfig(**self.default_vision_config) | ||
logger.info("vision_config is None, using default vision config") | ||
elif isinstance(vision_config, dict): | ||
self.vision_config = SigLipVisionConfig(**vision_config) | ||
elif isinstance(vision_config, SigLipVisionConfig): | ||
self.vision_config = vision_config | ||
self.image_size = 378 | ||
self.patch_size = self.vision_config.patch_size | ||
|
||
super().__init__(**kwargs) |
Oops, something went wrong.