Skip to content

Commit 0b5b5d7

Browse files
authored
[Frontend] Log the maximum supported concurrency (#8831)
1 parent cdc72e3 commit 0b5b5d7

File tree

2 files changed

+8
-0
lines changed

2 files changed

+8
-0
lines changed

vllm/executor/distributed_gpu_executor.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,10 @@ def initialize_cache(self, num_gpu_blocks: int,
5656
# have GPUs.
5757
logger.info("# GPU blocks: %d, # CPU blocks: %d", num_gpu_blocks,
5858
num_cpu_blocks)
59+
max_concurrency = (num_gpu_blocks * self.cache_config.block_size /
60+
self.model_config.max_model_len)
61+
logger.info("Maximum concurrency for %s tokens per request: %.2fx",
62+
self.model_config.max_model_len, max_concurrency)
5963

6064
self.cache_config.num_gpu_blocks = num_gpu_blocks
6165
self.cache_config.num_cpu_blocks = num_cpu_blocks

vllm/executor/gpu_executor.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -121,6 +121,10 @@ def initialize_cache(self, num_gpu_blocks: int, num_cpu_blocks) -> None:
121121
# remains to abstract away the device for non-GPU configurations.
122122
logger.info("# GPU blocks: %d, # CPU blocks: %d", num_gpu_blocks,
123123
num_cpu_blocks)
124+
max_concurrency = (num_gpu_blocks * self.cache_config.block_size /
125+
self.model_config.max_model_len)
126+
logger.info("Maximum concurrency for %s tokens per request: %.2fx",
127+
self.model_config.max_model_len, max_concurrency)
124128

125129
self.driver_worker.initialize_cache(num_gpu_blocks, num_cpu_blocks)
126130

0 commit comments

Comments
 (0)