diff --git a/python/sglang/multimodal_gen/runtime/layers/usp.py b/python/sglang/multimodal_gen/runtime/layers/usp.py index e822350091ae..3794605fde53 100644 --- a/python/sglang/multimodal_gen/runtime/layers/usp.py +++ b/python/sglang/multimodal_gen/runtime/layers/usp.py @@ -210,7 +210,7 @@ def attn_callable_adapter(q, k, v, *args, **kwargs): q = torch.permute(q, [0, 2, 1, 3]) k = torch.permute(k, [0, 2, 1, 3]) v = torch.permute(v, [0, 2, 1, 3]) - # logger.warning(f"Warning: return_s·oftmax_lse is only supported for FlashAttentionImpl") + # logger.warning(f"Warning: return_softmax_lse is only supported for FlashAttentionImpl") output, softmax_lse, *rest = attn_impl.forward( q, k,