generated from datawhalechina/repo-template
-
Notifications
You must be signed in to change notification settings - Fork 326
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
49bd1bc
commit 7fa3701
Showing
6 changed files
with
4,157 additions
and
0 deletions.
There are no files selected for viewing
192 changes: 192 additions & 0 deletions
192
Model_Architecture_Discussions/phi-3/configuration_phi3.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,192 @@ | ||
"""Phi-3 model configuration""" | ||
|
||
from transformers.configuration_utils import PretrainedConfig | ||
from transformers.utils import logging | ||
|
||
|
||
logger = logging.get_logger(__name__) | ||
|
||
|
||
class Phi3Config(PretrainedConfig): | ||
r""" | ||
This is the configuration class to store the configuration of a [`Phi3Model`]. It is used to instantiate a Phi-3 | ||
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the | ||
defaults will yield a similar configuration to that of the | ||
[microsoft/Phi-3-mini-4k-instruct](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct). | ||
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the | ||
documentation from [`PretrainedConfig`] for more information. | ||
Args: | ||
vocab_size (`int`, *optional*, defaults to 32064): | ||
Vocabulary size of the Phi-3 model. Defines the number of different tokens that can be represented by the | ||
`inputs_ids` passed when calling [`Phi3Model`]. | ||
hidden_size (`int`, *optional*, defaults to 3072): | ||
Dimension of the hidden representations. | ||
intermediate_size (`int`, *optional*, defaults to 8192): | ||
Dimension of the MLP representations. | ||
num_hidden_layers (`int`, *optional*, defaults to 32): | ||
Number of hidden layers in the Transformer decoder. | ||
num_attention_heads (`int`, *optional*, defaults to 32): | ||
Number of attention heads for each attention layer in the Transformer decoder. | ||
num_key_value_heads (`int`, *optional*): | ||
This is the number of key_value heads that should be used to implement Grouped Query Attention. If | ||
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if | ||
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When | ||
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed | ||
by meanpooling all the original heads within that group. For more details checkout [this | ||
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to | ||
`num_attention_heads`. | ||
resid_pdrop (`float`, *optional*, defaults to 0.0): | ||
Dropout probability for mlp outputs. | ||
embd_pdrop (`int`, *optional*, defaults to 0.0): | ||
The dropout ratio for the embeddings. | ||
attention_dropout (`float`, *optional*, defaults to 0.0): | ||
The dropout ratio after computing the attention scores. | ||
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): | ||
The non-linear activation function (function or string) in the decoder. | ||
max_position_embeddings (`int`, *optional*, defaults to 4096): | ||
The maximum sequence length that this model might ever be used with. | ||
original_max_position_embeddings (`int`, *optional*, defaults to 4096): | ||
The maximum sequence length that this model was trained with. This is used to determine the size of the | ||
original RoPE embeddings when using long scaling. | ||
initializer_range (`float`, *optional*, defaults to 0.02): | ||
The standard deviation of the truncated_normal_initializer for initializing all weight matrices. | ||
rms_norm_eps (`float`, *optional*, defaults to 1e-05): | ||
The epsilon value used for the RMSNorm. | ||
use_cache (`bool`, *optional*, defaults to `True`): | ||
Whether or not the model should return the last key/values attentions (not used by all models). Only | ||
relevant if `config.is_decoder=True`. Whether to tie weight embeddings or not. | ||
tie_word_embeddings (`bool`, *optional*, defaults to `False`): | ||
Whether to tie weight embeddings | ||
rope_theta (`float`, *optional*, defaults to 10000.0): | ||
The base period of the RoPE embeddings. | ||
rope_scaling (`dict`, *optional*): | ||
The scaling strategy for the RoPE embeddings. If `None`, no scaling is applied. If a dictionary, it must | ||
contain the following keys: `type`, `short_factor` and `long_factor`. The `type` must be either `su` or `yarn` and | ||
the `short_factor` and `long_factor` must be lists of numbers with the same length as the hidden size | ||
divided by the number of attention heads divided by 2. | ||
bos_token_id (`int`, *optional*, defaults to 1): | ||
The id of the "beginning-of-sequence" token. | ||
eos_token_id (`int`, *optional*, defaults to 32000): | ||
The id of the "end-of-sequence" token. | ||
pad_token_id (`int`, *optional*, defaults to 32000): | ||
The id of the padding token. | ||
sliding_window (`int`, *optional*): | ||
Sliding window attention window size. If `None`, no sliding window is applied. | ||
Example: | ||
```python | ||
>>> from transformers import Phi3Model, Phi3Config | ||
>>> # Initializing a Phi-3 style configuration | ||
>>> configuration = Phi3Config.from_pretrained("microsoft/Phi-3-mini-4k-instruct") | ||
>>> # Initializing a model from the configuration | ||
>>> model = Phi3Model(configuration) | ||
>>> # Accessing the model configuration | ||
>>> configuration = model.config | ||
```""" | ||
|
||
model_type = "phi3" | ||
keys_to_ignore_at_inference = ["past_key_values"] | ||
|
||
def __init__( | ||
self, | ||
vocab_size=32064, | ||
hidden_size=3072, | ||
intermediate_size=8192, | ||
num_hidden_layers=32, | ||
num_attention_heads=32, | ||
num_key_value_heads=None, | ||
resid_pdrop=0.0, | ||
embd_pdrop=0.0, | ||
attention_dropout=0.0, | ||
hidden_act="silu", | ||
max_position_embeddings=4096, | ||
original_max_position_embeddings=4096, | ||
initializer_range=0.02, | ||
rms_norm_eps=1e-5, | ||
use_cache=True, | ||
tie_word_embeddings=False, | ||
rope_theta=10000.0, | ||
rope_scaling=None, | ||
bos_token_id=1, | ||
eos_token_id=32000, | ||
pad_token_id=32000, | ||
sliding_window=None, | ||
**kwargs, | ||
): | ||
self.vocab_size = vocab_size | ||
self.hidden_size = hidden_size | ||
self.intermediate_size = intermediate_size | ||
self.num_hidden_layers = num_hidden_layers | ||
self.num_attention_heads = num_attention_heads | ||
|
||
if num_key_value_heads is None: | ||
num_key_value_heads = num_attention_heads | ||
|
||
self.num_key_value_heads = num_key_value_heads | ||
self.resid_pdrop = resid_pdrop | ||
self.embd_pdrop = embd_pdrop | ||
self.attention_dropout = attention_dropout | ||
self.hidden_act = hidden_act | ||
self.max_position_embeddings = max_position_embeddings | ||
self.original_max_position_embeddings = original_max_position_embeddings | ||
self.initializer_range = initializer_range | ||
self.rms_norm_eps = rms_norm_eps | ||
self.use_cache = use_cache | ||
self.rope_theta = rope_theta | ||
self.rope_scaling = rope_scaling | ||
self._rope_scaling_validation() | ||
self.sliding_window = sliding_window | ||
|
||
super().__init__( | ||
bos_token_id=bos_token_id, | ||
eos_token_id=eos_token_id, | ||
pad_token_id=pad_token_id, | ||
tie_word_embeddings=tie_word_embeddings, | ||
**kwargs, | ||
) | ||
|
||
def _rope_scaling_validation(self): | ||
""" | ||
Validate the `rope_scaling` configuration. | ||
""" | ||
if self.rope_scaling is None: | ||
return | ||
|
||
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 3: | ||
raise ValueError( | ||
"`rope_scaling` must be a dictionary with three fields, `type`, `short_factor` and `long_factor`, " | ||
f"got {self.rope_scaling}" | ||
) | ||
rope_scaling_type = self.rope_scaling.get("type", None) | ||
rope_scaling_short_factor = self.rope_scaling.get("short_factor", None) | ||
rope_scaling_long_factor = self.rope_scaling.get("long_factor", None) | ||
if rope_scaling_type is None or rope_scaling_type not in ["su", "yarn"]: | ||
raise ValueError(f"`rope_scaling`'s type field must be one of ['su', 'yarn'], got {rope_scaling_type}") | ||
if not ( | ||
isinstance(rope_scaling_short_factor, list) | ||
and all(isinstance(x, (int, float)) for x in rope_scaling_short_factor) | ||
): | ||
raise ValueError( | ||
f"`rope_scaling`'s short_factor field must be a list of numbers, got {rope_scaling_short_factor}" | ||
) | ||
if not len(rope_scaling_short_factor) == self.hidden_size // self.num_attention_heads // 2: | ||
raise ValueError( | ||
f"`rope_scaling`'s short_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_short_factor)}" | ||
) | ||
if not ( | ||
isinstance(rope_scaling_long_factor, list) | ||
and all(isinstance(x, (int, float)) for x in rope_scaling_long_factor) | ||
): | ||
raise ValueError( | ||
f"`rope_scaling`'s long_factor field must be a list of numbers, got {rope_scaling_long_factor}" | ||
) | ||
if not len(rope_scaling_long_factor) == self.hidden_size // self.num_attention_heads // 2: | ||
raise ValueError( | ||
f"`rope_scaling`'s long_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_long_factor)}" | ||
) |
Oops, something went wrong.