Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 9 additions & 2 deletions vllm/model_executor/models/deepseek_mtp.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
from vllm.logger import init_logger
from vllm.model_executor.layers.fused_moe import SharedFusedMoE
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.linear import ReplicatedLinear
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.layers.vocab_parallel_embedding import (
Expand Down Expand Up @@ -67,7 +68,13 @@ def __init__(self, vllm_config: VllmConfig, prefix: str) -> None:

self.enorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.hnorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.eh_proj = nn.Linear(config.hidden_size * 2, config.hidden_size, bias=False)
self.eh_proj = ReplicatedLinear(
config.hidden_size * 2,
config.hidden_size,
bias=False,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "eh_proj"),
)

self.device = current_platform.device_type

Expand Down Expand Up @@ -107,7 +114,7 @@ def forward(
inputs_embeds = self.enorm(inputs_embeds)
previous_hidden_states = self.hnorm(previous_hidden_states)

hidden_states = self.eh_proj(
hidden_states, _ = self.eh_proj(
torch.cat([inputs_embeds, previous_hidden_states], dim=-1)
)

Expand Down
11 changes: 9 additions & 2 deletions vllm/model_executor/models/glm4_moe_lite_mtp.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
from vllm.config import VllmConfig
from vllm.model_executor.layers.fused_moe import FusedMoE, SharedFusedMoE
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.linear import ReplicatedLinear
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.layers.vocab_parallel_embedding import (
Expand Down Expand Up @@ -87,7 +88,13 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):

self.enorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.hnorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.eh_proj = nn.Linear(config.hidden_size * 2, config.hidden_size, bias=False)
self.eh_proj = ReplicatedLinear(
config.hidden_size * 2,
config.hidden_size,
bias=False,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "eh_proj"),
)

self.device = current_platform.device_type

Expand Down Expand Up @@ -127,7 +134,7 @@ def forward(
inputs_embeds = self.enorm(inputs_embeds)
previous_hidden_states = self.hnorm(previous_hidden_states)

hidden_states = self.eh_proj(
hidden_states, _ = self.eh_proj(
torch.cat([inputs_embeds, previous_hidden_states], dim=-1)
)

Expand Down
11 changes: 9 additions & 2 deletions vllm/model_executor/models/glm4_moe_mtp.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
from vllm.config import CacheConfig, ParallelConfig, VllmConfig
from vllm.model_executor.layers.fused_moe import FusedMoE
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.linear import ReplicatedLinear
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.layers.vocab_parallel_embedding import (
Expand Down Expand Up @@ -83,7 +84,13 @@ def __init__(
super().__init__()
self.enorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.hnorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.eh_proj = nn.Linear(config.hidden_size * 2, config.hidden_size, bias=False)
self.eh_proj = ReplicatedLinear(
config.hidden_size * 2,
config.hidden_size,
bias=False,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "eh_proj"),
)
self.shared_head = SharedHead(
config=config, prefix=prefix, quant_config=quant_config
)
Expand All @@ -110,7 +117,7 @@ def forward(
inputs_embeds = self.enorm(inputs_embeds)
previous_hidden_states = self.hnorm(previous_hidden_states)

hidden_states = self.eh_proj(
hidden_states, _ = self.eh_proj(
torch.cat([inputs_embeds, previous_hidden_states], dim=-1)
)

Expand Down
11 changes: 9 additions & 2 deletions vllm/model_executor/models/glm_ocr_mtp.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@

from vllm.config import VllmConfig
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.linear import ReplicatedLinear
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.vocab_parallel_embedding import (
VocabParallelEmbedding,
Expand Down Expand Up @@ -63,7 +64,13 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):

self.enorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.hnorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.eh_proj = nn.Linear(config.hidden_size * 2, config.hidden_size, bias=False)
self.eh_proj = ReplicatedLinear(
config.hidden_size * 2,
config.hidden_size,
bias=False,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "eh_proj"),
)

self.device = current_platform.device_type
self.shared_head = SharedHead(
Expand All @@ -88,7 +95,7 @@ def forward(
inputs_embeds = self.enorm(inputs_embeds)
previous_hidden_states = self.hnorm(previous_hidden_states)

hidden_states = self.eh_proj(
hidden_states, _ = self.eh_proj(
torch.cat([inputs_embeds, previous_hidden_states], dim=-1)
)

Expand Down
9 changes: 8 additions & 1 deletion vllm/model_executor/models/openpangu_mtp.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from vllm.model_executor.layers.fused_moe import FusedMoE
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.linear import ReplicatedLinear
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.vocab_parallel_embedding import (
VocabParallelEmbedding,
Expand All @@ -56,7 +57,13 @@ def __init__(self, vllm_config: VllmConfig, prefix: str) -> None:

self.enorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.hnorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.eh_proj = nn.Linear(config.hidden_size * 2, config.hidden_size, bias=False)
self.eh_proj = ReplicatedLinear(
config.hidden_size * 2,
config.hidden_size,
bias=False,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "eh_proj"),
)
self.shared_head = SharedHead(
config=config,
quant_config=quant_config,
Expand Down
11 changes: 9 additions & 2 deletions vllm/model_executor/models/step3p5_mtp.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
from vllm.config import VllmConfig
from vllm.logger import init_logger
from vllm.model_executor.layers.layernorm import GemmaRMSNorm
from vllm.model_executor.layers.linear import ReplicatedLinear
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.layers.vocab_parallel_embedding import (
Expand Down Expand Up @@ -51,7 +52,13 @@ def __init__(
quant_config = vllm_config.quant_config
self.enorm = GemmaRMSNorm(config.hidden_size, config.rms_norm_eps)
self.hnorm = GemmaRMSNorm(config.hidden_size, config.rms_norm_eps)
self.eh_proj = nn.Linear(config.hidden_size * 2, config.hidden_size, bias=False)
self.eh_proj = ReplicatedLinear(
config.hidden_size * 2,
config.hidden_size,
bias=False,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "eh_proj"),
)
self.shared_head = SharedHead(config=config, quant_config=quant_config)
self.mtp_block = Step3p5DecoderLayer(
vllm_config,
Expand All @@ -70,7 +77,7 @@ def forward(
inputs_embeds = self.enorm(inputs_embeds)
previous_hidden_states = self.hnorm(previous_hidden_states)

hidden_states = self.eh_proj(
hidden_states, _ = self.eh_proj(
torch.cat([inputs_embeds, previous_hidden_states], dim=-1)
)

Expand Down
Loading