Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions python/sglang/srt/models/deepseek_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,9 +227,11 @@ def __init__(
prefix: str = "",
tp_rank: Optional[int] = None,
tp_size: Optional[int] = None,
swiglu_limit: Optional[float] = None,
) -> None:
super().__init__()
self.tp_size = tp_size
self.swiglu_limit = swiglu_limit
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

It is better to cast swiglu_limit to a float once during initialization to avoid repeated casting in the forward pass, which is on the hot path.

Suggested change
self.swiglu_limit = swiglu_limit
self.swiglu_limit = float(swiglu_limit) if swiglu_limit is not None else None


self.gate_up_proj = MergedColumnParallelLinear(
hidden_size,
Expand Down Expand Up @@ -283,6 +285,12 @@ def forward(
x = (x, None, y)

gate_up, _ = self.gate_up_proj(x)
if self.swiglu_limit is not None:
_g, _u = gate_up.chunk(2, dim=-1)
_lim = float(self.swiglu_limit)
gate_up = torch.cat(
[_g.clamp(max=_lim), _u.clamp(min=-_lim, max=_lim)], dim=-1
)
Comment on lines +288 to +293
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

Using torch.cat creates a new tensor and involves extra memory allocation and copying. Since gate_up is a fresh tensor returned by the linear projection, you can perform the clamping in-place on its chunks. This is more efficient and avoids unnecessary overhead in the forward pass.

        if self.swiglu_limit is not None:
            _lim = self.swiglu_limit
            _g, _u = gate_up.chunk(2, dim=-1)
            _g.clamp_(max=_lim)
            _u.clamp_(min=-_lim, max=_lim)

x = self.act_fn(gate_up)
x, _ = self.down_proj(
x,
Expand Down Expand Up @@ -533,6 +541,7 @@ def __init__(
hidden_act=config.hidden_act,
quant_config=quant_config,
reduce_results=False,
swiglu_limit=getattr(config, "swiglu_limit", None),
prefix=add_prefix("shared_experts", prefix),
**(
dict(tp_rank=0, tp_size=1)
Expand Down Expand Up @@ -2594,6 +2603,7 @@ def __init__(
prefix=add_prefix("mlp", prefix),
tp_rank=mlp_tp_rank,
tp_size=mlp_tp_size,
swiglu_limit=getattr(config, "swiglu_limit", None),
)

self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
Expand Down
Loading