Skip to content

Commit bccd6f0

Browse files
ispobockxwu-intel
authored andcommitted
Add draft extend CUDA graph for flashinfer backend (sgl-project#6805)
1 parent c5b5384 commit bccd6f0

File tree

5 files changed

+170
-3
lines changed

5 files changed

+170
-3
lines changed

python/sglang/srt/layers/attention/flashinfer_backend.py

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -358,6 +358,35 @@ def init_forward_metadata_capture_cuda_graph(
358358
)
359359
self.prefill_cuda_graph_metadata[bs] = prefill_wrappers
360360
self.forward_metadata = PrefillMetadata(prefill_wrappers, False, False)
361+
elif forward_mode.is_draft_extend():
362+
prefill_wrappers = []
363+
for i in range(self.num_wrappers):
364+
prefill_wrappers.append(
365+
BatchPrefillWithPagedKVCacheWrapper(
366+
self.workspace_buffer,
367+
"NHD",
368+
backend="fa2",
369+
use_cuda_graph=True,
370+
qo_indptr_buf=self.cuda_graph_qo_indptr[i][: bs + 1],
371+
paged_kv_indptr_buf=self.kv_indptr[i][: bs + 1],
372+
paged_kv_indices_buf=self.cuda_graph_kv_indices[i],
373+
paged_kv_last_page_len_buf=self.kv_last_page_len[:bs],
374+
)
375+
)
376+
377+
seq_lens_sum = seq_lens.sum().item()
378+
self.indices_updater_prefill.update(
379+
req_pool_indices,
380+
seq_lens,
381+
seq_lens_sum,
382+
prefix_lens=None,
383+
prefill_wrappers=prefill_wrappers,
384+
use_ragged=False,
385+
encoder_lens=encoder_lens,
386+
spec_info=spec_info,
387+
)
388+
self.prefill_cuda_graph_metadata[bs] = prefill_wrappers
389+
self.forward_metadata = PrefillMetadata(prefill_wrappers, False, False)
361390
else:
362391
raise ValueError(f"Invalid mode: {forward_mode=}")
363392

@@ -392,6 +421,17 @@ def init_forward_metadata_replay_cuda_graph(
392421
encoder_lens=encoder_lens[:bs] if encoder_lens is not None else None,
393422
spec_info=spec_info,
394423
)
424+
elif forward_mode.is_draft_extend():
425+
self.indices_updater_prefill.update(
426+
req_pool_indices[:bs],
427+
seq_lens[:bs],
428+
seq_lens_sum,
429+
prefix_lens=None,
430+
prefill_wrappers=self.prefill_cuda_graph_metadata[bs],
431+
use_ragged=False,
432+
encoder_lens=encoder_lens[:bs] if encoder_lens is not None else None,
433+
spec_info=spec_info,
434+
)
395435
else:
396436
raise ValueError("Invalid forward mode")
397437

python/sglang/srt/layers/attention/flashinfer_mla_backend.py

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -278,6 +278,28 @@ def init_forward_metadata_capture_cuda_graph(
278278
)
279279
self.prefill_cuda_graph_metadata[bs] = verify_wrapper
280280
self.forward_metadata = PrefillMetadata(verify_wrapper, False)
281+
elif forward_mode.is_draft_extend():
282+
draft_extend_wrapper = BatchMLAPagedAttentionWrapper(
283+
self.workspace_buffer,
284+
use_cuda_graph=True,
285+
qo_indptr=self.cuda_graph_qo_indptr[: bs + 1],
286+
kv_indptr=self.cuda_graph_kv_indptr[: bs + 1],
287+
kv_indices=self.cuda_graph_kv_indices,
288+
kv_len_arr=self.cuda_graph_kv_lens[:bs],
289+
backend="auto",
290+
)
291+
seq_lens_sum = seq_lens.sum().item()
292+
self.indices_updater_prefill.update(
293+
req_pool_indices,
294+
seq_lens,
295+
seq_lens_sum,
296+
prefix_lens=None,
297+
prefill_wrapper_paged=draft_extend_wrapper,
298+
use_ragged=False,
299+
spec_info=spec_info,
300+
)
301+
self.prefill_cuda_graph_metadata[bs] = draft_extend_wrapper
302+
self.forward_metadata = PrefillMetadata(draft_extend_wrapper, False)
281303
else:
282304
raise ValueError(f"Invalid mode: {forward_mode=}")
283305

@@ -325,6 +347,16 @@ def init_forward_metadata_replay_cuda_graph(
325347
use_ragged=False,
326348
spec_info=spec_info,
327349
)
350+
elif forward_mode.is_draft_extend():
351+
self.indices_updater_prefill.update(
352+
req_pool_indices[:bs],
353+
seq_lens[:bs],
354+
seq_lens_sum,
355+
prefix_lens=None,
356+
prefill_wrapper_paged=self.prefill_cuda_graph_metadata[bs],
357+
use_ragged=False,
358+
spec_info=spec_info,
359+
)
328360
else:
329361
raise ValueError(f"Invalid forward mode: {forward_mode=}")
330362

python/sglang/srt/speculative/eagle_draft_extend_cuda_graph_runner.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,9 @@ def __init__(self, eagle_worker: EAGLEWorker):
8080

8181
self.seq_lens = torch.ones((self.max_bs,), dtype=torch.int32)
8282
self.extend_seq_lens = torch.ones((self.max_bs,), dtype=torch.int32)
83-
self.accept_length = torch.ones((self.max_bs,), dtype=torch.int32)
83+
self.accept_length = (
84+
torch.ones((self.max_bs,), dtype=torch.int32) * self.num_tokens_per_bs
85+
)
8486

8587
# Capture
8688
try:

python/sglang/srt/speculative/eagle_worker.py

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -156,6 +156,7 @@ def init_attention_backend(self):
156156
if self.server_args.attention_backend == "flashinfer":
157157
if not global_server_args_dict["use_mla_backend"]:
158158
from sglang.srt.layers.attention.flashinfer_backend import (
159+
FlashInferAttnBackend,
159160
FlashInferMultiStepDraftBackend,
160161
)
161162

@@ -164,8 +165,13 @@ def init_attention_backend(self):
164165
self.topk,
165166
self.speculative_num_steps,
166167
)
168+
self.draft_extend_attn_backend = FlashInferAttnBackend(
169+
self.draft_model_runner,
170+
skip_prefill=False,
171+
)
167172
else:
168173
from sglang.srt.layers.attention.flashinfer_mla_backend import (
174+
FlashInferMLAAttnBackend,
169175
FlashInferMLAMultiStepDraftBackend,
170176
)
171177

@@ -174,7 +180,10 @@ def init_attention_backend(self):
174180
self.topk,
175181
self.speculative_num_steps,
176182
)
177-
self.draft_extend_attn_backend = None
183+
self.draft_extend_attn_backend = FlashInferMLAAttnBackend(
184+
self.draft_model_runner,
185+
skip_prefill=False,
186+
)
178187
self.padded_static_len = self.speculative_num_steps + 1
179188
self.has_prefill_wrapper_verify = True
180189
elif self.server_args.attention_backend == "triton":

test/srt/test_eagle_infer.py

Lines changed: 85 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
from sglang.test.test_utils import (
2020
DEFAULT_EAGLE_DRAFT_MODEL_FOR_TEST,
2121
DEFAULT_EAGLE_TARGET_MODEL_FOR_TEST,
22+
DEFAULT_MODEL_NAME_FOR_TEST_MLA,
2223
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
2324
DEFAULT_URL_FOR_TEST,
2425
CustomTestCase,
@@ -602,6 +603,7 @@ def setUpClass(cls):
602603
"fa3",
603604
],
604605
)
606+
cls.accept_len_threshold = 1.50
605607

606608
@classmethod
607609
def tearDownClass(cls):
@@ -636,7 +638,89 @@ def test_one_batch_accept_length(self):
636638
acc_length = 1.0
637639

638640
print(f"{acc_length=}")
639-
self.assertGreater(acc_length, 1.50)
641+
self.assertGreater(acc_length, self.accept_len_threshold)
642+
643+
644+
class TestEAGLEDraftExtendFlashinfer(TestEAGLEDraftExtend):
645+
@classmethod
646+
def setUpClass(cls):
647+
cls.base_url = DEFAULT_URL_FOR_TEST
648+
cls.process = popen_launch_server(
649+
DEFAULT_EAGLE_TARGET_MODEL_FOR_TEST,
650+
cls.base_url,
651+
timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
652+
other_args=[
653+
"--speculative-algorithm",
654+
"EAGLE",
655+
"--speculative-draft-model-path",
656+
DEFAULT_EAGLE_DRAFT_MODEL_FOR_TEST,
657+
"--speculative-num-steps",
658+
1,
659+
"--speculative-eagle-topk",
660+
1,
661+
"--speculative-num-draft-tokens",
662+
2,
663+
"--max-running-requests",
664+
4,
665+
"--attention-backend",
666+
"flashinfer",
667+
],
668+
)
669+
cls.accept_len_threshold = 1.50
670+
671+
672+
class TestEAGLEDraftExtendTriton(TestEAGLEDraftExtend):
673+
@classmethod
674+
def setUpClass(cls):
675+
cls.base_url = DEFAULT_URL_FOR_TEST
676+
cls.process = popen_launch_server(
677+
DEFAULT_EAGLE_TARGET_MODEL_FOR_TEST,
678+
cls.base_url,
679+
timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
680+
other_args=[
681+
"--speculative-algorithm",
682+
"EAGLE",
683+
"--speculative-draft-model-path",
684+
DEFAULT_EAGLE_DRAFT_MODEL_FOR_TEST,
685+
"--speculative-num-steps",
686+
1,
687+
"--speculative-eagle-topk",
688+
1,
689+
"--speculative-num-draft-tokens",
690+
2,
691+
"--max-running-requests",
692+
4,
693+
"--attention-backend",
694+
"triton",
695+
],
696+
)
697+
cls.accept_len_threshold = 1.50
698+
699+
700+
class TestEAGLEDraftExtendFlashinferMLA(TestEAGLEDraftExtend):
701+
@classmethod
702+
def setUpClass(cls):
703+
cls.base_url = DEFAULT_URL_FOR_TEST
704+
cls.process = popen_launch_server(
705+
DEFAULT_MODEL_NAME_FOR_TEST_MLA,
706+
cls.base_url,
707+
timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
708+
other_args=[
709+
"--speculative-algorithm",
710+
"EAGLE",
711+
"--speculative-num-steps",
712+
1,
713+
"--speculative-eagle-topk",
714+
1,
715+
"--speculative-num-draft-tokens",
716+
2,
717+
"--max-running-requests",
718+
4,
719+
"--attention-backend",
720+
"flashinfer",
721+
],
722+
)
723+
cls.accept_len_threshold = 1.85
640724

641725

642726
if __name__ == "__main__":

0 commit comments

Comments
 (0)