From e86fe9db6d951a581e6879724f9872bee3c3d137 Mon Sep 17 00:00:00 2001 From: Yifan Qiao Date: Mon, 1 Dec 2025 22:20:13 -0800 Subject: [PATCH 01/73] Squashed merge PR #23624 Signed-off-by: Yifan Qiao Co-authored-by: KuntaiDu --- .../core/test_single_type_kv_cache_manager.py | 12 ++- vllm/v1/core/block_pool.py | 4 + vllm/v1/core/kv_cache_coordinator.py | 22 +++++- vllm/v1/core/kv_cache_manager.py | 77 +++++++++++++++---- vllm/v1/core/sched/scheduler.py | 7 +- vllm/v1/worker/gpu_worker.py | 2 +- vllm/v1/worker/tpu_worker.py | 9 ++- 7 files changed, 107 insertions(+), 26 deletions(-) diff --git a/tests/v1/core/test_single_type_kv_cache_manager.py b/tests/v1/core/test_single_type_kv_cache_manager.py index e6a69dc8a949..9a8af5dcac8e 100644 --- a/tests/v1/core/test_single_type_kv_cache_manager.py +++ b/tests/v1/core/test_single_type_kv_cache_manager.py @@ -332,10 +332,12 @@ def test_get_num_blocks_to_allocate(): ] assert ( - manager.get_num_blocks_to_allocate("1", 20 * block_size, cached_blocks_1) == 20 + manager.get_num_blocks_to_allocate("1", 20 * block_size, cached_blocks_1, 0) + == 20 ) assert ( - manager.get_num_blocks_to_allocate("2", 20 * block_size, cached_blocks_2) == 15 + manager.get_num_blocks_to_allocate("2", 20 * block_size, cached_blocks_2, 0) + == 15 ) @@ -359,8 +361,10 @@ def test_chunked_local_attention_get_num_blocks_to_allocate(): ] assert ( - manager.get_num_blocks_to_allocate("1", 20 * block_size, cached_blocks_1) == 20 + manager.get_num_blocks_to_allocate("1", 20 * block_size, cached_blocks_1, 0) + == 20 ) assert ( - manager.get_num_blocks_to_allocate("2", 20 * block_size, cached_blocks_2) == 15 + manager.get_num_blocks_to_allocate("2", 20 * block_size, cached_blocks_2, 0) + == 15 ) diff --git a/vllm/v1/core/block_pool.py b/vllm/v1/core/block_pool.py index c779e3d34b3e..41d99c4f721f 100644 --- a/vllm/v1/core/block_pool.py +++ b/vllm/v1/core/block_pool.py @@ -254,6 +254,10 @@ def cache_full_blocks( [] if self.enable_kv_cache_events else None ) for i, blk in enumerate(new_full_blocks): + if blk.is_null: + # May happen when both sparse attention (e.g., sliding + # window) and connector are enabled. + continue assert blk.block_hash is None block_hash = new_block_hashes[i] diff --git a/vllm/v1/core/kv_cache_coordinator.py b/vllm/v1/core/kv_cache_coordinator.py index 4b09b76c1c59..32f3c317ffab 100644 --- a/vllm/v1/core/kv_cache_coordinator.py +++ b/vllm/v1/core/kv_cache_coordinator.py @@ -4,6 +4,7 @@ from collections.abc import Sequence from math import lcm +from vllm.logger import init_logger from vllm.v1.core.block_pool import BlockPool from vllm.v1.core.kv_cache_metrics import KVCacheMetricsCollector from vllm.v1.core.kv_cache_utils import ( @@ -24,6 +25,8 @@ ) from vllm.v1.request import Request +logger = init_logger(__name__) + class KVCacheCoordinator(ABC): """ @@ -73,6 +76,7 @@ def get_num_blocks_to_allocate( num_tokens: int, new_computed_blocks: tuple[Sequence[KVCacheBlock], ...], num_encoder_tokens: int, + total_computed_tokens: int, ) -> int: """ Get the number of blocks needed to be allocated for the request. @@ -85,6 +89,7 @@ def get_num_blocks_to_allocate( prefix caching. num_encoder_tokens: The number of encoder tokens for allocating blocks for cross-attention. + total_computed_tokens: Include both local and external tokens. Returns: The number of blocks. @@ -95,11 +100,14 @@ def get_num_blocks_to_allocate( # For cross-attention, we issue a single static allocation # of blocks based on the number of encoder input tokens. num_blocks_to_allocate += manager.get_num_blocks_to_allocate( - request_id, num_encoder_tokens, [] + request_id, num_encoder_tokens, [], 0 ) else: num_blocks_to_allocate += manager.get_num_blocks_to_allocate( - request_id, num_tokens, new_computed_blocks[i] + request_id, + num_tokens, + new_computed_blocks[i], + total_computed_tokens, ) return num_blocks_to_allocate @@ -144,6 +152,16 @@ def allocate_new_blocks( for manager in self.single_type_managers ) + def allocate_new_blocks_for_connector( + self, request_id: str, total_computed_tokens: int + ) -> None: + """ + Allocate new blocks for the request to give it at least + `total_computed_tokens` token slots. + """ + for manager in self.single_type_managers: + manager.allocate_new_blocks_for_connector(request_id, total_computed_tokens) + def cache_blocks(self, request: Request, num_computed_tokens: int) -> None: """ Cache the blocks for the request. diff --git a/vllm/v1/core/kv_cache_manager.py b/vllm/v1/core/kv_cache_manager.py index 13086a66f6ea..8d902d7e2daa 100644 --- a/vllm/v1/core/kv_cache_manager.py +++ b/vllm/v1/core/kv_cache_manager.py @@ -209,6 +209,7 @@ def allocate_slots( num_new_tokens: int, num_new_computed_tokens: int = 0, new_computed_blocks: KVCacheBlocks | None = None, + num_external_computed_tokens: int = 0, num_lookahead_tokens: int = 0, delay_cache_blocks: bool = False, num_encoder_tokens: int = 0, @@ -217,13 +218,13 @@ def allocate_slots( Args: request: The request to allocate slots. - num_new_tokens: The number of tokens to allocate, including external - tokens. Note that this does not include tokens that have - already been computed locally (i.e. new_computed_blocks). + num_new_tokens: The number of tokens to be computed. num_new_computed_tokens: The number of new computed tokens just hitting the prefix caching, excluding external tokens. new_computed_blocks: The cached blocks for the above new computed tokens. + num_external_computed_tokens: The number of tokens that their + KV caches are not cached by vLLM but cached by the connector. num_lookahead_tokens: The number of speculative tokens to allocate. This is used by spec decode proposers with kv-cache such as eagle. @@ -236,17 +237,55 @@ def allocate_slots( Blocks layout: ``` - ----------------------------------------------------------------------- - | < computed > | < new computed > | < new > | < pre-allocated > | - ----------------------------------------------------------------------- - | < required > | - -------------------------------------------------- - | < full > | - ------------------------------------------------ - | | - -------------- + --------------------------------------------------------------------- + | < comp > | < new_comp > | < connector > | < new > | < lookahead > | + --------------------------------------------------------------------- + | < to be computed > | + --------------------------------------------------------------------- + | < to be allocated > | + --------------------------------------------------------------------- + | < to be cached > | + --------------------------------------------------------------------- + | Prefix-cached tokens from both vLLM | + | and connector. Can be safely removed if | + | they are outside sliding window. | + --------------------------------------------------------------------- + | not cached by | + | vLLM, but | + | cached by | + | connector | + --------------------------------------------------------------------- + | < cached by vLLM > | + --------------------------------------------------------------------- + | ref_cnt | + | increased| + --------------------------------------------------------------------- + | ref_cnt not | + | increased yet| + --------------------------------------------------------------------- + + ``` + + Abbrivations: + + ``` + comp = request.num_computed_tokens + new_comp = num_new_computed_tokens + = len(new_computed_blocks) * block_size + connector = num_external_computed_tokens + new = num_new_tokens + lookahead = num_lookahead_tokens ``` - The following *_blocks are illustrated in this layout. + + + The allocation has three stages: + - Free unnecessary blocks in `comp` and check + if we have sufficient free blocks (return None if not). + - Handle prefix tokens (`comp + new_comp + connector`): + - Free unnecessary blocks (e.g. outside sliding window) + - Allocate new blocks for `connector` tokens inside + sliding window + - Allocate new blocks for tokens to be computed (`new + lookahead`) Returns: A list of new allocated blocks. @@ -273,7 +312,10 @@ def allocate_slots( # the new prefix caching hits num_computed_tokens = request.num_computed_tokens + num_new_computed_tokens num_tokens_need_slot = min( - num_computed_tokens + num_new_tokens + num_lookahead_tokens, + num_computed_tokens + + num_new_tokens + + num_lookahead_tokens + + num_external_computed_tokens, self.max_model_len, ) @@ -282,6 +324,7 @@ def allocate_slots( num_tokens=num_tokens_need_slot, new_computed_blocks=new_computed_block_list, num_encoder_tokens=num_encoder_tokens, + total_computed_tokens=num_computed_tokens + num_external_computed_tokens, ) if num_blocks_to_allocate > self.block_pool.get_num_free_blocks(): @@ -303,6 +346,12 @@ def allocate_slots( request.request_id, new_computed_block_list ) + if num_external_computed_tokens > 0: + self.coordinator.allocate_new_blocks_for_connector( + request.request_id, num_computed_tokens + num_external_computed_tokens + ) + # TODO: merge the new blocks for connector with new_blocks below + new_blocks = self.coordinator.allocate_new_blocks( request.request_id, num_tokens_need_slot, num_encoder_tokens ) diff --git a/vllm/v1/core/sched/scheduler.py b/vllm/v1/core/sched/scheduler.py index 278970ae7ee8..b6daa7fba309 100644 --- a/vllm/v1/core/sched/scheduler.py +++ b/vllm/v1/core/sched/scheduler.py @@ -582,9 +582,10 @@ def schedule(self) -> SchedulerOutput: new_blocks = self.kv_cache_manager.allocate_slots( request, - num_new_tokens + num_external_computed_tokens, + num_new_tokens, num_new_local_computed_tokens, new_computed_blocks, + num_external_computed_tokens, num_lookahead_tokens=effective_lookahead_tokens, delay_cache_blocks=load_kv_async, num_encoder_tokens=num_encoder_tokens, @@ -601,7 +602,7 @@ def schedule(self) -> SchedulerOutput: if self.connector is not None: self.connector.update_state_after_alloc( request, - new_computed_blocks + new_blocks, + self.kv_cache_manager.get_blocks(request.request_id), num_external_computed_tokens, ) @@ -1560,7 +1561,7 @@ def _connector_finished( # Hybrid memory allocator should be already turned off for this # code path, but let's double-check here. assert len(self.kv_cache_config.kv_cache_groups) == 1 - return self.connector.request_finished(request, block_ids[0]) + return self.connector.request_finished(request, block_ids) return self.connector.request_finished_all_groups(request, block_ids) diff --git a/vllm/v1/worker/gpu_worker.py b/vllm/v1/worker/gpu_worker.py index 21a8564f83c4..c767cbf1c2e6 100644 --- a/vllm/v1/worker/gpu_worker.py +++ b/vllm/v1/worker/gpu_worker.py @@ -623,7 +623,7 @@ def execute_model( output = self.model_runner.execute_model( scheduler_output, intermediate_tensors ) - if isinstance(output, (ModelRunnerOutput, NoneType)): + if isinstance(output, ModelRunnerOutput | NoneType): return output assert isinstance(output, IntermediateTensors) diff --git a/vllm/v1/worker/tpu_worker.py b/vllm/v1/worker/tpu_worker.py index 5f6136b178b4..ab22d0af63a5 100644 --- a/vllm/v1/worker/tpu_worker.py +++ b/vllm/v1/worker/tpu_worker.py @@ -304,6 +304,13 @@ def get_kv_cache_spec(self) -> dict[str, KVCacheSpec]: def initialize_from_config(self, kv_cache_config: KVCacheConfig) -> None: """Allocate GPU KV cache with the specified kv_cache_config.""" + # Init kv cache connector here, because it requires + # `kv_cache_config`. + # NOTE(Kuntai): This need to be done before `initialize_kv_cache`, + # because `initialize_kv_cache` will inject kv cache groups not + # related to kv cache connector (e.g. kv cache sharing layers). + ensure_kv_transfer_initialized(self.vllm_config, kv_cache_config) + self.model_runner.initialize_kv_cache(kv_cache_config) def check_health(self) -> None: @@ -336,8 +343,6 @@ def _init_tpu_worker_distributed_environment( parallel_config.tensor_parallel_size, parallel_config.pipeline_parallel_size ) - ensure_kv_transfer_initialized(vllm_config) - def shutdown(self) -> None: self.model_runner.ensure_kv_transfer_shutdown() From f645d95e8bfaa9b597aa41d1e621200c9e27705b Mon Sep 17 00:00:00 2001 From: Yifan Qiao Date: Tue, 2 Dec 2025 00:47:05 -0800 Subject: [PATCH 02/73] feat: for sliding window attention, only allocate tokens within the window, and leading padding with null blocks Signed-off-by: Yifan Qiao fixes Signed-off-by: Yifan Qiao fix get_num_blocks_to_allocate Signed-off-by: Yifan Qiao --- .gitignore | 3 + test_connector_w_hybrid_kv_allocator.py | 64 +++++++++ vllm/config/vllm.py | 25 ++-- .../kv_connector/v1/lmcache_connector.py | 17 ++- vllm/v1/core/kv_cache_coordinator.py | 62 +++++---- vllm/v1/core/kv_cache_manager.py | 61 ++++++--- vllm/v1/core/sched/scheduler.py | 5 +- vllm/v1/core/single_type_kv_cache_manager.py | 123 ++++++++++++++---- 8 files changed, 280 insertions(+), 80 deletions(-) create mode 100644 test_connector_w_hybrid_kv_allocator.py diff --git a/.gitignore b/.gitignore index 7cda86478664..04e9d90e2ff3 100644 --- a/.gitignore +++ b/.gitignore @@ -227,3 +227,6 @@ ep_kernels_workspace/ # Allow tracked library source folders under submodules (e.g., benchmarks/lib) !vllm/benchmarks/lib/ + +csrc/* +vllm/third_party/triton_kernels/* diff --git a/test_connector_w_hybrid_kv_allocator.py b/test_connector_w_hybrid_kv_allocator.py new file mode 100644 index 000000000000..7104c39765b6 --- /dev/null +++ b/test_connector_w_hybrid_kv_allocator.py @@ -0,0 +1,64 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +import os + +# Set token chunk size to 256 +os.environ["LMCACHE_CHUNK_SIZE"] = "256" +# Enable CPU memory backend +os.environ["LMCACHE_LOCAL_CPU"] = "True" +# Set CPU memory limit to 5GB +os.environ["LMCACHE_MAX_LOCAL_CPU_SIZE"] = "20.0" +os.environ["VLLM_ENABLE_V1_MULTIPROCESSING"] = "0" +os.environ["LMCACHE_USE_LAYERWISE"] = "True" + + +from vllm import LLM, SamplingParams +from vllm.config import KVTransferConfig + +# Configure KV cache transfer to use LMCache +ktc = KVTransferConfig( + kv_connector="LMCacheConnectorV1", + kv_role="kv_both", +) + +# Initialize LLM with LMCache configuration +# Adjust gpu_memory_utilization based on your GPU memory +llm = LLM( + model="google/gemma-3-4b-it", + kv_transfer_config=ktc, + max_model_len=75000, + gpu_memory_utilization=0.28, + # gpu_memory_utilization=0.4, + # gpu_memory_utilization=0.8, + max_num_seqs=16, + enforce_eager=True, +) + +# Define sampling parameters +sampling_params = SamplingParams(temperature=0, top_p=0.95, max_tokens=10) + +# Run inference +outputs = llm.generate("hi" * 70000 + "\nhow are you?", sampling_params) +generated_text = outputs[0].outputs[0].text +print(f"Generated text: {generated_text!r}") + +# This requires loading KV cache and will success +outputs = llm.generate("hi" * 10000 + "\nTell me a story.", sampling_params) +generated_text = outputs[0].outputs[0].text +print(f"Generated text: {generated_text!r}") + +# flush out prefix cache in GPU +outputs = llm.generate("1" + "hi" * 70000 + "\nhow are you?", sampling_params) +generated_text = outputs[0].outputs[0].text +print(f"Generated text: {generated_text!r}") + +print("YIFAN: finish request 2") + +# This requires loading KV cache +# but this request cannot be executed as vLLM cannot allocate for long prefix +# stored by LMCache +outputs = llm.generate("hi" * 70000 + "\nTell me a story.", sampling_params) +generated_text = outputs[0].outputs[0].text +print(f"Generated text: {generated_text!r}") + +print("YIFAN: finished") diff --git a/vllm/config/vllm.py b/vllm/config/vllm.py index b5f8f916de43..72a28cec53b3 100644 --- a/vllm/config/vllm.py +++ b/vllm/config/vllm.py @@ -896,18 +896,19 @@ def has_blocked_weights(): self.scheduler_config.disable_hybrid_kv_cache_manager = True if self.kv_transfer_config is not None: # NOTE(Kuntai): turn HMA off for connector for now. - # TODO(Kuntai): have a more elegent solution to check and - # turn off HMA for connector that does not support HMA. - logger.warning( - "Turning off hybrid kv cache manager because " - "`--kv-transfer-config` is set. This will reduce the " - "performance of vLLM on LLMs with sliding window attention " - "or Mamba attention. If you are a developer of kv connector" - ", please consider supporting hybrid kv cache manager for " - "your connector by making sure your connector is a subclass" - " of `SupportsHMA` defined in kv_connector/v1/base.py." - ) - self.scheduler_config.disable_hybrid_kv_cache_manager = True + # # TODO(Kuntai): have a more elegent solution to check and + # # turn off HMA for connector that does not support HMA. + # logger.warning( + # "Turning off hybrid kv cache manager because " + # "`--kv-transfer-config` is set. This will reduce the " + # "performance of vLLM on LLMs with sliding window attention " + # "or Mamba attention. If you are a developer of kv connector" + # ", please consider supporting hybrid kv cache manager for " + # "your connector by making sure your connector is a subclass" + # " of `SupportsHMA` defined in kv_connector/v1/base.py." + # ) + # self.scheduler_config.disable_hybrid_kv_cache_manager = True + pass if self.kv_events_config is not None: # Hybrid KV cache manager is not compatible with KV events. self.scheduler_config.disable_hybrid_kv_cache_manager = True diff --git a/vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py b/vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py index 17d468fe6c30..458dce2abeb4 100644 --- a/vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +++ b/vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py @@ -17,6 +17,7 @@ KVConnectorBase_V1, KVConnectorMetadata, KVConnectorRole, + SupportsHMA, ) from vllm.logger import init_logger from vllm.v1.core.sched.output import SchedulerOutput @@ -69,13 +70,14 @@ def __repr__(self) -> str: return f"" -class LMCacheConnectorV1(KVConnectorBase_V1): +class LMCacheConnectorV1(KVConnectorBase_V1, SupportsHMA): def __init__( self, vllm_config: "VllmConfig", role: KVConnectorRole, kv_cache_config: "KVCacheConfig", ): + vllm_config.kv_cache_config = kv_cache_config super().__init__( vllm_config=vllm_config, role=role, kv_cache_config=kv_cache_config ) @@ -310,8 +312,21 @@ def request_finished( Optional KVTransferParams to be included in the request outputs returned by the engine. """ + raise ValueError("YIFAN: should not be called") return self._lmcache_engine.request_finished(request, block_ids) + def request_finished_all_groups( + self, + request: "Request", + block_ids: tuple[list[int], ...], + ) -> tuple[bool, dict[str, Any] | None]: + return self._lmcache_engine.request_finished(request, block_ids) + # print( + # f"YIFAN: request_finished_all_groups called with request {request.request_id} and blocks {block_ids}" + # ) + # raise NotImplementedError("YIFAN: request_finished_all_groups not implemented") + # return False, None + def take_events(self) -> Iterable["KVCacheEvent"]: """ Take the KV cache events from the connector. diff --git a/vllm/v1/core/kv_cache_coordinator.py b/vllm/v1/core/kv_cache_coordinator.py index 32f3c317ffab..878edb6e9b4d 100644 --- a/vllm/v1/core/kv_cache_coordinator.py +++ b/vllm/v1/core/kv_cache_coordinator.py @@ -70,14 +70,14 @@ def __init__( for i, kv_cache_group in enumerate(self.kv_cache_config.kv_cache_groups) ) - def get_num_blocks_to_allocate( + def get_num_blocks_to_allocate_per_group( self, request_id: str, num_tokens: int, new_computed_blocks: tuple[Sequence[KVCacheBlock], ...], num_encoder_tokens: int, total_computed_tokens: int, - ) -> int: + ) -> tuple[list[int], list[int]]: """ Get the number of blocks needed to be allocated for the request. @@ -94,22 +94,40 @@ def get_num_blocks_to_allocate( Returns: The number of blocks. """ - num_blocks_to_allocate = 0 + num_new_blocks_to_allocate_per_group = [] + num_evictable_blocks_to_allocate_per_group = [] for i, manager in enumerate(self.single_type_managers): if isinstance(manager, CrossAttentionManager): # For cross-attention, we issue a single static allocation # of blocks based on the number of encoder input tokens. - num_blocks_to_allocate += manager.get_num_blocks_to_allocate( + ( + num_new_blocks_to_allocate, + num_evictable_blocks_to_allocate, + ) = manager.get_num_blocks_to_allocate( request_id, num_encoder_tokens, [], 0 ) else: - num_blocks_to_allocate += manager.get_num_blocks_to_allocate( - request_id, - num_tokens, - new_computed_blocks[i], - total_computed_tokens, + num_new_blocks_to_allocate, num_evictable_blocks_to_allocate = ( + manager.get_num_blocks_to_allocate( + request_id, + num_tokens, + new_computed_blocks[i], + total_computed_tokens, + ) ) - return num_blocks_to_allocate + num_new_blocks_to_allocate_per_group.append(num_new_blocks_to_allocate) + num_evictable_blocks_to_allocate_per_group.append( + num_evictable_blocks_to_allocate + ) + print( + f"YIFAN: get_num_blocks_to_allocate for group {i}, " + f"num_new_blocks_to_allocate: {num_new_blocks_to_allocate_per_group[-1]}" + f", num_evictable_blocks_to_allocate: {num_evictable_blocks_to_allocate_per_group[-1]}" + ) + return ( + num_new_blocks_to_allocate_per_group, + num_evictable_blocks_to_allocate_per_group, + ) def save_new_computed_blocks( self, request_id: str, new_computed_blocks: tuple[Sequence[KVCacheBlock], ...] @@ -126,14 +144,21 @@ def save_new_computed_blocks( manager.save_new_computed_blocks(request_id, new_computed_blocks[i]) def allocate_new_blocks( - self, request_id: str, num_tokens: int, num_encoder_tokens: int = 0 + self, + request_id: str, + num_blocks_to_allocate_per_group: list[int], + num_tokens: int, + num_encoder_tokens: int = 0, ) -> tuple[list[KVCacheBlock], ...]: """ Allocate new blocks for the request to give it at least `num_tokens` - token slots. + token slots. If `num_blocks_to_allocate` is smaller than the number of + blocks needed (in the case of sliding window attention), the leading + blocks will be padded with null blocks. Args: request_id: The request ID. + num_blocks_to_allocate: The number of blocks to allocate. num_tokens: The total number of tokens that need a slot (including tokens that are already allocated). num_encoder_tokens: The number of encoder tokens for allocating @@ -145,23 +170,14 @@ def allocate_new_blocks( return tuple( manager.allocate_new_blocks( request_id, + num_blocks_to_allocate_per_group[i], num_encoder_tokens if isinstance(manager, CrossAttentionManager) else num_tokens, ) - for manager in self.single_type_managers + for i, manager in enumerate(self.single_type_managers) ) - def allocate_new_blocks_for_connector( - self, request_id: str, total_computed_tokens: int - ) -> None: - """ - Allocate new blocks for the request to give it at least - `total_computed_tokens` token slots. - """ - for manager in self.single_type_managers: - manager.allocate_new_blocks_for_connector(request_id, total_computed_tokens) - def cache_blocks(self, request: Request, num_computed_tokens: int) -> None: """ Cache the blocks for the request. diff --git a/vllm/v1/core/kv_cache_manager.py b/vllm/v1/core/kv_cache_manager.py index 8d902d7e2daa..e4d99634dd90 100644 --- a/vllm/v1/core/kv_cache_manager.py +++ b/vllm/v1/core/kv_cache_manager.py @@ -222,7 +222,7 @@ def allocate_slots( num_new_computed_tokens: The number of new computed tokens just hitting the prefix caching, excluding external tokens. new_computed_blocks: The cached blocks for the above new computed - tokens. + tokens, groups as a tuple by kv cache groups. num_external_computed_tokens: The number of tokens that their KV caches are not cached by vLLM but cached by the connector. num_lookahead_tokens: The number of speculative tokens to allocate. @@ -290,8 +290,15 @@ def allocate_slots( Returns: A list of new allocated blocks. """ - if num_new_tokens == 0: - raise ValueError("num_new_tokens must be greater than 0") + if ( + num_new_tokens == 0 + and num_lookahead_tokens == 0 + and num_external_computed_tokens == 0 + ): + raise ValueError( + "At least one of num_new_tokens, num_lookahead_tokens, or " + "num_external_computed_tokens must be greater than 0" + ) if new_computed_blocks is not None: new_computed_block_list = new_computed_blocks.blocks @@ -310,21 +317,34 @@ def allocate_slots( # The number of computed tokens is the number of computed tokens plus # the new prefix caching hits - num_computed_tokens = request.num_computed_tokens + num_new_computed_tokens + num_local_computed_tokens = ( + request.num_computed_tokens + num_new_computed_tokens + ) num_tokens_need_slot = min( - num_computed_tokens + num_local_computed_tokens + + num_external_computed_tokens + num_new_tokens - + num_lookahead_tokens - + num_external_computed_tokens, + + num_lookahead_tokens, self.max_model_len, ) - num_blocks_to_allocate = self.coordinator.get_num_blocks_to_allocate( + ( + num_new_blocks_to_allocate_per_group, + num_evictable_blocks_to_allocate_per_group, + ) = self.coordinator.get_num_blocks_to_allocate_per_group( request_id=request.request_id, num_tokens=num_tokens_need_slot, new_computed_blocks=new_computed_block_list, num_encoder_tokens=num_encoder_tokens, - total_computed_tokens=num_computed_tokens + num_external_computed_tokens, + total_computed_tokens=num_local_computed_tokens + + num_external_computed_tokens, + ) + num_blocks_to_allocate = sum(num_new_blocks_to_allocate_per_group) + sum( + num_evictable_blocks_to_allocate_per_group + ) + + print( + f"YIFAN: request {request.request_id} needs total {num_tokens_need_slot} tokens, num_local_computed_tokens: {num_local_computed_tokens}, num_external_computed_tokens: {num_external_computed_tokens}, num_new_tokens: {num_new_tokens}, num_lookahead_tokens: {num_lookahead_tokens}" ) if num_blocks_to_allocate > self.block_pool.get_num_free_blocks(): @@ -346,14 +366,11 @@ def allocate_slots( request.request_id, new_computed_block_list ) - if num_external_computed_tokens > 0: - self.coordinator.allocate_new_blocks_for_connector( - request.request_id, num_computed_tokens + num_external_computed_tokens - ) - # TODO: merge the new blocks for connector with new_blocks below - new_blocks = self.coordinator.allocate_new_blocks( - request.request_id, num_tokens_need_slot, num_encoder_tokens + request.request_id, + num_new_blocks_to_allocate_per_group, + num_tokens_need_slot, + num_encoder_tokens, ) # P/D: delay caching blocks if we have to recv from @@ -361,12 +378,14 @@ def allocate_slots( if not self.enable_caching or delay_cache_blocks: return self.create_kv_cache_blocks(new_blocks) - # NOTE(woosuk): We want to commit (cache) up to num_computed_tokens + - # num_new_tokens, but must exclude "non-committable" tokens (e.g., - # draft tokens that could be rejected). Therefore, we cap the number - # at `request.num_tokens`, ensuring only "finalized" tokens are cached. + # NOTE(woosuk): We want to commit (cache) up to num_local_computed_tokens + # + num_external_computed_tokens + num_new_tokens, but must exclude + # "non-committable" tokens (e.g., draft tokens that could be rejected). + # Therefore, we cap the number at `request.num_tokens`, ensuring only + # "finalized" tokens are cached. num_tokens_to_cache = min( - num_computed_tokens + num_new_tokens, request.num_tokens + num_local_computed_tokens + num_external_computed_tokens + num_new_tokens, + request.num_tokens, ) self.coordinator.cache_blocks(request, num_tokens_to_cache) diff --git a/vllm/v1/core/sched/scheduler.py b/vllm/v1/core/sched/scheduler.py index b6daa7fba309..6dba59671854 100644 --- a/vllm/v1/core/sched/scheduler.py +++ b/vllm/v1/core/sched/scheduler.py @@ -590,6 +590,9 @@ def schedule(self) -> SchedulerOutput: delay_cache_blocks=load_kv_async, num_encoder_tokens=num_encoder_tokens, ) + print(f"YIFAN: schedule request {request.request_id} new_blocks") + for i, group in enumerate(new_blocks.blocks): + print(f"group {i} len(group): {len(group)}") if new_blocks is None: # The request cannot be scheduled. @@ -1561,7 +1564,7 @@ def _connector_finished( # Hybrid memory allocator should be already turned off for this # code path, but let's double-check here. assert len(self.kv_cache_config.kv_cache_groups) == 1 - return self.connector.request_finished(request, block_ids) + return self.connector.request_finished(request, block_ids[0]) return self.connector.request_finished_all_groups(request, block_ids) diff --git a/vllm/v1/core/single_type_kv_cache_manager.py b/vllm/v1/core/single_type_kv_cache_manager.py index 4aeb17a156bb..1b35a6978027 100644 --- a/vllm/v1/core/single_type_kv_cache_manager.py +++ b/vllm/v1/core/single_type_kv_cache_manager.py @@ -5,6 +5,7 @@ from collections import defaultdict from collections.abc import Sequence +from vllm.logger import init_logger from vllm.utils.math_utils import cdiv from vllm.v1.core.block_pool import BlockPool from vllm.v1.core.kv_cache_utils import BlockHashList, KVCacheBlock @@ -19,6 +20,8 @@ ) from vllm.v1.request import Request +logger = init_logger(__name__) + class SingleTypeKVCacheManager(ABC): """ @@ -68,7 +71,8 @@ def get_num_blocks_to_allocate( request_id: str, num_tokens: int, new_computed_blocks: Sequence[KVCacheBlock], - ) -> int: + total_computed_tokens: int, + ) -> tuple[int, int]: """ Get the number of blocks needed to be allocated for the request. @@ -78,25 +82,79 @@ def get_num_blocks_to_allocate( tokens that are already allocated). new_computed_blocks: The new computed blocks just hitting the prefix caching. + total_computed_tokens: Include both local and external computed + tokens. Returns: The number of blocks. """ num_required_blocks = cdiv(num_tokens, self.block_size) - num_new_blocks = ( - num_required_blocks - - len(new_computed_blocks) - - len(self.req_to_blocks[request_id]) + + # How many *tokens* are outside the attention window for this manager. + # For attention types that do not skip tokens (e.g. full attention), + # this will always be 0. + num_skipped_tokens = self.get_num_skipped_tokens(total_computed_tokens) + + # Fast-path: nothing is skipped. This should match the original + # behavior before total_computed_tokens was introduced so that + # existing tests (and non-sliding-window attention types) behave + # identically. + if num_skipped_tokens <= 0: + num_new_blocks = ( + num_required_blocks + - len(new_computed_blocks) + - len(self.req_to_blocks[request_id]) + ) + num_evictable_computed_blocks = sum( + blk.ref_cnt == 0 and not blk.is_null for blk in new_computed_blocks + ) + # Scheduler relies on evictable blocks being counted in the free + # capacity check, but allocate_new_blocks will clamp to actual new + # blocks to avoid double allocation. + print( + f"YIFAN: request {request_id} needs {num_new_blocks} new blocks, {num_evictable_computed_blocks} evictable computed blocks" + ) + return num_new_blocks, num_evictable_computed_blocks + + # General case: some prefix tokens are skipped by the attention window. + num_skipped_blocks = num_skipped_tokens // self.block_size + num_local_computed_blocks = len(new_computed_blocks) + len( + self.req_to_blocks[request_id] ) - # If a computed block of a request is an eviction candidate (in the - # free queue and ref_cnt == 0), it will be changed from a free block - # to a computed block when the request is allocated, so we also count - # it as needed to be allocated. - num_evictable_computed_blocks = sum( - blk.ref_cnt == 0 and not blk.is_null for blk in new_computed_blocks + + if num_skipped_blocks >= num_local_computed_blocks: + # All local-computed blocks (both existing and newly computed) are + # outside the current window. In this case we only need blocks for + # the non-skipped suffix. + num_new_blocks = max(num_required_blocks - num_skipped_blocks, 0) + # All new computed blocks are skipped. This happens when the entire + # sliding window hits external KV cache via a KV connector. + num_evictable_computed_blocks = 0 + else: + # Some local-computed blocks remain inside the window. + num_new_blocks = max(num_required_blocks - num_local_computed_blocks, 0) + + # Among the new_computed_blocks, the first + # `num_skipped_new_computed_blocks` correspond to skipped tokens and + # therefore do not need to be "touched" / re-allocated. + num_skipped_new_computed_blocks = max( + 0, num_skipped_blocks - len(self.req_to_blocks[request_id]) + ) + + # If a computed block of a request is an eviction candidate (in the + # free queue and ref_cnt == 0), it will be changed from a free block + # to a computed block when the request is allocated, so we also count + # it in the free-capacity check. + num_evictable_computed_blocks = sum( + blk.ref_cnt == 0 and not blk.is_null + for blk in new_computed_blocks[num_skipped_new_computed_blocks:] + ) + + print( + f"YIFAN: request {request_id} needs {num_new_blocks} new blocks, {num_evictable_computed_blocks} evictable computed blocks" ) - return num_new_blocks + num_evictable_computed_blocks + return num_new_blocks, num_evictable_computed_blocks def save_new_computed_blocks( self, request_id: str, new_computed_blocks: Sequence[KVCacheBlock] @@ -114,17 +172,21 @@ def save_new_computed_blocks( req_blocks = self.req_to_blocks[request_id] assert len(req_blocks) == 0 req_blocks.extend(new_computed_blocks) - self.num_cached_block[request_id] = len(new_computed_blocks) + self.num_cached_block[request_id] = len( + new_computed_blocks + ) ## YIFAN: why set to len(new_computed_blocks) rather than len(req_blocks)? else: # A running request. Should not have new computed blocks. assert len(new_computed_blocks) == 0 def allocate_new_blocks( - self, request_id: str, num_tokens: int + self, request_id: str, num_blocks_to_allocate: int, num_tokens: int ) -> list[KVCacheBlock]: """ Allocate new blocks for the request to give it at least `num_tokens` - token slots. + token slots. If `num_blocks_to_allocate` is smaller than the number of + blocks needed (in the case of sliding window attention), the leading + blocks will be padded with null blocks. Args: request_id: The request ID. @@ -137,10 +199,18 @@ def allocate_new_blocks( req_blocks = self.req_to_blocks[request_id] num_required_blocks = cdiv(num_tokens, self.block_size) num_new_blocks = num_required_blocks - len(req_blocks) + # Only allocate real new blocks; cached hits should already be present + # in req_blocks via save_new_computed_blocks. + num_blocks_to_padding = num_new_blocks - num_blocks_to_allocate + assert num_blocks_to_padding >= 0, ( + f"Invalid padding: need {num_new_blocks}, allocate {num_blocks_to_allocate}" + ) + if num_new_blocks <= 0: return [] else: - new_blocks = self.block_pool.get_new_blocks(num_new_blocks) + allocated_blocks = self.block_pool.get_new_blocks(num_blocks_to_allocate) + new_blocks = [self._null_block] * num_blocks_to_padding + allocated_blocks req_blocks.extend(new_blocks) return new_blocks @@ -711,9 +781,16 @@ def get_num_blocks_to_allocate( request_id: str, num_tokens: int, new_computed_blocks: Sequence[KVCacheBlock], - ) -> int: - # Allocate extra `num_speculative_blocks` blocks for - # speculative decoding (MTP/EAGLE) with linear attention. + total_computed_tokens: int, + ) -> tuple[int, int]: + # TODO(Kuntai): handle the case where `total_computed_tokens > 0` + if total_computed_tokens > 0: + logger.warning_once( + "Currently Mamba GPU memory allocator may cause" + " memory waste when total_computed_tokens" + " is greater than 0." + ) + assert isinstance(self.kv_cache_spec, MambaSpec) if self.kv_cache_spec.num_speculative_blocks > 0: num_tokens += ( @@ -721,11 +798,11 @@ def get_num_blocks_to_allocate( * self.kv_cache_spec.num_speculative_blocks ) return super().get_num_blocks_to_allocate( - request_id, num_tokens, new_computed_blocks + request_id, num_tokens, new_computed_blocks, total_computed_tokens ) def allocate_new_blocks( - self, request_id: str, num_tokens: int + self, request_id: str, num_blocks_to_allocate: int, num_tokens: int ) -> list[KVCacheBlock]: # Allocate extra `num_speculative_blocks` blocks for # speculative decoding (MTP/EAGLE) with linear attention. @@ -735,7 +812,9 @@ def allocate_new_blocks( self.kv_cache_spec.block_size * self.kv_cache_spec.num_speculative_blocks ) - return super().allocate_new_blocks(request_id, num_tokens) + return super().allocate_new_blocks( + request_id, num_blocks_to_allocate, num_tokens + ) class CrossAttentionManager(SingleTypeKVCacheManager): From ee81aa1f3460613b77c4ec90fbd048c7988c5dab Mon Sep 17 00:00:00 2001 From: Yifan Qiao Date: Wed, 3 Dec 2025 16:28:51 -0800 Subject: [PATCH 03/73] fix: skip outside sliding window tokens when touch and save cached blocks Signed-off-by: Yifan Qiao --- .../core/test_single_type_kv_cache_manager.py | 34 +++++++ vllm/v1/core/block_pool.py | 17 ++-- vllm/v1/core/kv_cache_coordinator.py | 45 +++++---- vllm/v1/core/kv_cache_manager.py | 18 +++- vllm/v1/core/sched/scheduler.py | 3 - vllm/v1/core/single_type_kv_cache_manager.py | 99 +++++++++++++------ 6 files changed, 157 insertions(+), 59 deletions(-) diff --git a/tests/v1/core/test_single_type_kv_cache_manager.py b/tests/v1/core/test_single_type_kv_cache_manager.py index 9a8af5dcac8e..2347c62a6bd6 100644 --- a/tests/v1/core/test_single_type_kv_cache_manager.py +++ b/tests/v1/core/test_single_type_kv_cache_manager.py @@ -341,6 +341,40 @@ def test_get_num_blocks_to_allocate(): ) +def test_evictable_cached_blocks_not_double_allocated(): + block_size = 2 + sliding_window_spec = SlidingWindowSpec( + block_size=block_size, + num_kv_heads=1, + head_size=1, + dtype=torch.float32, + sliding_window=16, + ) + + block_pool = BlockPool( + num_gpu_blocks=10, enable_caching=True, hash_block_size=block_size + ) + manager = get_sliding_window_manager(sliding_window_spec, block_pool) + + request_id = "req" + evictable_block = block_pool.blocks[1] # ref_cnt == 0, eviction candidate + + num_blocks = manager.get_num_blocks_to_allocate( + request_id=request_id, + num_tokens=4, # requires 2 blocks + new_computed_blocks=[evictable_block], # one cached block hit + total_computed_tokens=0, + ) + # Free capacity check should count evictable cached blocks (so return 2), + # but allocation should only allocate the truly new block. + assert num_blocks == 2 + + manager.save_new_computed_blocks(request_id, [evictable_block]) + new_blocks = manager.allocate_new_blocks(request_id, num_blocks, num_tokens=4) + assert len(new_blocks) == 1 + assert len(manager.req_to_blocks[request_id]) == 2 + + def test_chunked_local_attention_get_num_blocks_to_allocate(): block_size = 2 attention_spec = ChunkedLocalAttentionSpec( diff --git a/vllm/v1/core/block_pool.py b/vllm/v1/core/block_pool.py index 41d99c4f721f..51ae216fc2da 100644 --- a/vllm/v1/core/block_pool.py +++ b/vllm/v1/core/block_pool.py @@ -210,7 +210,7 @@ def cache_full_blocks( self, request: Request, blocks: list[KVCacheBlock], - num_cached_blocks: int, + num_cached_or_skipped_blocks: int, num_full_blocks: int, block_size: int, kv_cache_group_id: int, @@ -226,15 +226,15 @@ def cache_full_blocks( Args: request: The request to cache the blocks. blocks: All blocks in the request. - num_cached_blocks: The number of blocks that are already cached. + num_cached_or_skipped_blocks: The number of blocks that are already cached. num_full_blocks: The number of blocks that are full and should be cached after this function. block_size: Number of tokens in each block. kv_cache_group_id: The id of the KV cache group. """ - if num_cached_blocks >= num_full_blocks: + if num_cached_or_skipped_blocks >= num_full_blocks: return - new_full_blocks = blocks[num_cached_blocks:num_full_blocks] + new_full_blocks = blocks[num_cached_or_skipped_blocks:num_full_blocks] assert len(request.block_hashes) >= num_full_blocks if block_size == self.hash_block_size: # Common case. @@ -249,7 +249,7 @@ def cache_full_blocks( request.block_hashes, self.hash_block_size, block_size ) - new_block_hashes = block_hashes[num_cached_blocks:] + new_block_hashes = block_hashes[num_cached_or_skipped_blocks:] new_hashes: list[ExternalBlockHash] | None = ( [] if self.enable_kv_cache_events else None ) @@ -271,10 +271,10 @@ def cache_full_blocks( new_hashes.append(maybe_convert_block_hash(block_hash)) if self.enable_kv_cache_events: - if num_cached_blocks == 0: + if num_cached_or_skipped_blocks == 0: parent_block_hash: ExternalBlockHash | None = None else: - parent_block = blocks[num_cached_blocks - 1] + parent_block = blocks[num_cached_or_skipped_blocks - 1] assert parent_block.block_hash is not None parent_block_hash = maybe_convert_block_hash( get_block_hash(parent_block.block_hash) @@ -285,7 +285,8 @@ def cache_full_blocks( block_hashes=new_hashes, parent_block_hash=parent_block_hash, token_ids=request.all_token_ids[ - num_cached_blocks * block_size : num_full_blocks * block_size + num_cached_or_skipped_blocks * block_size : num_full_blocks + * block_size ], block_size=block_size, lora_id=request.lora_request.adapter_id diff --git a/vllm/v1/core/kv_cache_coordinator.py b/vllm/v1/core/kv_cache_coordinator.py index 878edb6e9b4d..5f14494d7840 100644 --- a/vllm/v1/core/kv_cache_coordinator.py +++ b/vllm/v1/core/kv_cache_coordinator.py @@ -77,7 +77,7 @@ def get_num_blocks_to_allocate_per_group( new_computed_blocks: tuple[Sequence[KVCacheBlock], ...], num_encoder_tokens: int, total_computed_tokens: int, - ) -> tuple[list[int], list[int]]: + ) -> tuple[list[int], list[int], tuple[Sequence[KVCacheBlock], ...]]: """ Get the number of blocks needed to be allocated for the request. @@ -92,33 +92,36 @@ def get_num_blocks_to_allocate_per_group( total_computed_tokens: Include both local and external tokens. Returns: - The number of blocks. + 1. The number of new blocks to allocate for each kv cache group. + 2. The number of evictable blocks to touch for each kv cache group. + 3. The blocks to touch for each kv cache group. """ num_new_blocks_to_allocate_per_group = [] num_evictable_blocks_to_allocate_per_group = [] + evictable_blocks_to_touch_per_group: list[list[KVCacheBlock]] = [] for i, manager in enumerate(self.single_type_managers): if isinstance(manager, CrossAttentionManager): # For cross-attention, we issue a single static allocation # of blocks based on the number of encoder input tokens. ( num_new_blocks_to_allocate, - num_evictable_blocks_to_allocate, + blocks_to_touch, ) = manager.get_num_blocks_to_allocate( request_id, num_encoder_tokens, [], 0 ) else: - num_new_blocks_to_allocate, num_evictable_blocks_to_allocate = ( - manager.get_num_blocks_to_allocate( - request_id, - num_tokens, - new_computed_blocks[i], - total_computed_tokens, - ) + ( + num_new_blocks_to_allocate, + blocks_to_touch, + ) = manager.get_num_blocks_to_allocate( + request_id, + num_tokens, + new_computed_blocks[i], + total_computed_tokens, ) num_new_blocks_to_allocate_per_group.append(num_new_blocks_to_allocate) - num_evictable_blocks_to_allocate_per_group.append( - num_evictable_blocks_to_allocate - ) + num_evictable_blocks_to_allocate_per_group.append(len(blocks_to_touch)) + evictable_blocks_to_touch_per_group.append(blocks_to_touch) print( f"YIFAN: get_num_blocks_to_allocate for group {i}, " f"num_new_blocks_to_allocate: {num_new_blocks_to_allocate_per_group[-1]}" @@ -127,10 +130,14 @@ def get_num_blocks_to_allocate_per_group( return ( num_new_blocks_to_allocate_per_group, num_evictable_blocks_to_allocate_per_group, + tuple(evictable_blocks_to_touch_per_group), ) def save_new_computed_blocks( - self, request_id: str, new_computed_blocks: tuple[Sequence[KVCacheBlock], ...] + self, + request_id: str, + new_computed_blocks: tuple[Sequence[KVCacheBlock], ...], + total_computed_tokens: int, ) -> None: """ Add the new computed blocks to the request. @@ -141,7 +148,9 @@ def save_new_computed_blocks( prefix cache. """ for i, manager in enumerate(self.single_type_managers): - manager.save_new_computed_blocks(request_id, new_computed_blocks[i]) + manager.save_new_computed_blocks( + request_id, new_computed_blocks[i], total_computed_tokens + ) def allocate_new_blocks( self, @@ -178,7 +187,9 @@ def allocate_new_blocks( for i, manager in enumerate(self.single_type_managers) ) - def cache_blocks(self, request: Request, num_computed_tokens: int) -> None: + def cache_blocks( + self, request: Request, num_computed_tokens: int, total_computed_tokens: int + ) -> None: """ Cache the blocks for the request. @@ -189,7 +200,7 @@ def cache_blocks(self, request: Request, num_computed_tokens: int) -> None: (including tokens that are already cached). """ for manager in self.single_type_managers: - manager.cache_blocks(request, num_computed_tokens) + manager.cache_blocks(request, num_computed_tokens, total_computed_tokens) def free(self, request_id: str) -> None: """ diff --git a/vllm/v1/core/kv_cache_manager.py b/vllm/v1/core/kv_cache_manager.py index e4d99634dd90..c190abd28bb4 100644 --- a/vllm/v1/core/kv_cache_manager.py +++ b/vllm/v1/core/kv_cache_manager.py @@ -331,6 +331,7 @@ def allocate_slots( ( num_new_blocks_to_allocate_per_group, num_evictable_blocks_to_allocate_per_group, + blocks_to_touch_per_group, ) = self.coordinator.get_num_blocks_to_allocate_per_group( request_id=request.request_id, num_tokens=num_tokens_need_slot, @@ -353,7 +354,7 @@ def allocate_slots( # Touch the computed blocks to make sure they won't be evicted. if self.enable_caching: - self.block_pool.touch(new_computed_block_list) + self.block_pool.touch(blocks_to_touch_per_group) else: assert not any(new_computed_block_list), ( "Computed blocks should be empty when prefix caching is disabled" @@ -362,8 +363,14 @@ def allocate_slots( if new_computed_block_list is not self.empty_kv_cache_blocks.blocks: # Append the new computed blocks to the request blocks until now to # avoid the case where the new blocks cannot be allocated. + print( + f"YIFAN: saving new computed blocks for request {request.request_id}, lens: {[len(b) for b in new_computed_block_list]}, total_computed_tokens: {num_local_computed_tokens + num_external_computed_tokens}" + ) self.coordinator.save_new_computed_blocks( - request.request_id, new_computed_block_list + request_id=request.request_id, + new_computed_blocks=new_computed_block_list, + total_computed_tokens=num_local_computed_tokens + + num_external_computed_tokens, ) new_blocks = self.coordinator.allocate_new_blocks( @@ -387,7 +394,12 @@ def allocate_slots( num_local_computed_tokens + num_external_computed_tokens + num_new_tokens, request.num_tokens, ) - self.coordinator.cache_blocks(request, num_tokens_to_cache) + self.coordinator.cache_blocks( + request, + num_tokens_to_cache, + total_computed_tokens=num_local_computed_tokens + + num_external_computed_tokens, + ) return self.create_kv_cache_blocks(new_blocks) diff --git a/vllm/v1/core/sched/scheduler.py b/vllm/v1/core/sched/scheduler.py index 6dba59671854..88221dcfeae8 100644 --- a/vllm/v1/core/sched/scheduler.py +++ b/vllm/v1/core/sched/scheduler.py @@ -590,9 +590,6 @@ def schedule(self) -> SchedulerOutput: delay_cache_blocks=load_kv_async, num_encoder_tokens=num_encoder_tokens, ) - print(f"YIFAN: schedule request {request.request_id} new_blocks") - for i, group in enumerate(new_blocks.blocks): - print(f"group {i} len(group): {len(group)}") if new_blocks is None: # The request cannot be scheduled. diff --git a/vllm/v1/core/single_type_kv_cache_manager.py b/vllm/v1/core/single_type_kv_cache_manager.py index 1b35a6978027..5737d4b31d55 100644 --- a/vllm/v1/core/single_type_kv_cache_manager.py +++ b/vllm/v1/core/single_type_kv_cache_manager.py @@ -72,7 +72,7 @@ def get_num_blocks_to_allocate( num_tokens: int, new_computed_blocks: Sequence[KVCacheBlock], total_computed_tokens: int, - ) -> tuple[int, int]: + ) -> tuple[int, Sequence[KVCacheBlock]]: """ Get the number of blocks needed to be allocated for the request. @@ -86,7 +86,8 @@ def get_num_blocks_to_allocate( tokens. Returns: - The number of blocks. + 1. The number of blocks. + 2. The list of evictable blocks (i.e., ref_cnt == 0) that should be touched. """ num_required_blocks = cdiv(num_tokens, self.block_size) @@ -106,16 +107,18 @@ def get_num_blocks_to_allocate( - len(new_computed_blocks) - len(self.req_to_blocks[request_id]) ) - num_evictable_computed_blocks = sum( - blk.ref_cnt == 0 and not blk.is_null for blk in new_computed_blocks - ) + evictable_computed_blocks = [ + blk + for blk in new_computed_blocks + if blk.ref_cnt == 0 and not blk.is_null + ] # Scheduler relies on evictable blocks being counted in the free # capacity check, but allocate_new_blocks will clamp to actual new # blocks to avoid double allocation. print( - f"YIFAN: request {request_id} needs {num_new_blocks} new blocks, {num_evictable_computed_blocks} evictable computed blocks" + f"YIFAN: request {request_id} needs {num_new_blocks} new blocks, {len(evictable_computed_blocks)} evictable computed blocks" ) - return num_new_blocks, num_evictable_computed_blocks + return num_new_blocks, evictable_computed_blocks # General case: some prefix tokens are skipped by the attention window. num_skipped_blocks = num_skipped_tokens // self.block_size @@ -130,7 +133,7 @@ def get_num_blocks_to_allocate( num_new_blocks = max(num_required_blocks - num_skipped_blocks, 0) # All new computed blocks are skipped. This happens when the entire # sliding window hits external KV cache via a KV connector. - num_evictable_computed_blocks = 0 + evictable_computed_blocks = [] else: # Some local-computed blocks remain inside the window. num_new_blocks = max(num_required_blocks - num_local_computed_blocks, 0) @@ -146,18 +149,22 @@ def get_num_blocks_to_allocate( # free queue and ref_cnt == 0), it will be changed from a free block # to a computed block when the request is allocated, so we also count # it in the free-capacity check. - num_evictable_computed_blocks = sum( - blk.ref_cnt == 0 and not blk.is_null + evictable_computed_blocks = [ + blk for blk in new_computed_blocks[num_skipped_new_computed_blocks:] - ) + if blk.ref_cnt == 0 and not blk.is_null + ] print( - f"YIFAN: request {request_id} needs {num_new_blocks} new blocks, {num_evictable_computed_blocks} evictable computed blocks" + f"YIFAN: request {request_id} needs {num_new_blocks} new blocks, {len(evictable_computed_blocks)} evictable computed blocks" ) - return num_new_blocks, num_evictable_computed_blocks + return num_new_blocks, evictable_computed_blocks def save_new_computed_blocks( - self, request_id: str, new_computed_blocks: Sequence[KVCacheBlock] + self, + request_id: str, + new_computed_blocks: Sequence[KVCacheBlock], + total_computed_tokens: int, ) -> None: """ Add the new computed blocks to the request. @@ -167,17 +174,45 @@ def save_new_computed_blocks( new_computed_blocks: The new computed blocks just hitting the prefix cache. """ - if request_id not in self.num_cached_block: - # A new request. - req_blocks = self.req_to_blocks[request_id] - assert len(req_blocks) == 0 - req_blocks.extend(new_computed_blocks) - self.num_cached_block[request_id] = len( - new_computed_blocks - ) ## YIFAN: why set to len(new_computed_blocks) rather than len(req_blocks)? - else: - # A running request. Should not have new computed blocks. - assert len(new_computed_blocks) == 0 + + # How many *tokens* are outside the attention window for this manager. + # For attention types that do not skip tokens (e.g. full attention), + # this will always be 0. + num_skipped_tokens = self.get_num_skipped_tokens(total_computed_tokens) + + if num_skipped_tokens <= 0: + if request_id not in self.num_cached_block: + # A new request. + req_blocks = self.req_to_blocks[request_id] + assert len(req_blocks) == 0 + req_blocks.extend(new_computed_blocks) + self.num_cached_block[request_id] = len( + new_computed_blocks + ) ## YIFAN: why set to len(new_computed_blocks) rather than len(req_blocks)? + else: + # A running request. Should not have new computed blocks. + assert len(new_computed_blocks) == 0 + return + + num_skipped_blocks = num_skipped_tokens // self.block_size + req_blocks = self.req_to_blocks[request_id] + num_local_computed_blocks = len(new_computed_blocks) + len(req_blocks) + num_blocks_to_save = max(num_local_computed_blocks - num_skipped_blocks, 0) + num_null_blocks_to_pad = min( + max(num_skipped_blocks - len(req_blocks), 0), num_local_computed_blocks + ) + assert num_blocks_to_save + num_skipped_blocks == num_local_computed_blocks, ( + "Inconsistent number of blocks to save" + ) + # Add null blocks for the skipped blocks. + if num_null_blocks_to_pad > 0: + req_blocks.extend([self._null_block] * num_null_blocks_to_pad) + # Add the remaining computed blocks. + req_blocks.extend(new_computed_blocks[num_null_blocks_to_pad:]) + # All cached hits (including skipped nulls) are already cached; mark + # them so cache_blocks() will not try to re-cache blocks that already + # have a block_hash set. + self.num_cached_block[request_id] = len(req_blocks) def allocate_new_blocks( self, request_id: str, num_blocks_to_allocate: int, num_tokens: int @@ -214,7 +249,9 @@ def allocate_new_blocks( req_blocks.extend(new_blocks) return new_blocks - def cache_blocks(self, request: Request, num_tokens: int) -> None: + def cache_blocks( + self, request: Request, num_tokens: int, prev_computed_tokens: int + ) -> None: """ Cache the blocks for the request. @@ -224,6 +261,9 @@ def cache_blocks(self, request: Request, num_tokens: int) -> None: (including tokens that are already cached). """ num_cached_blocks = self.num_cached_block.get(request.request_id, 0) + num_skipped_blocks = ( + self.get_num_skipped_tokens(prev_computed_tokens) // self.block_size + ) num_full_blocks = num_tokens // self.block_size if num_cached_blocks >= num_full_blocks: @@ -232,7 +272,7 @@ def cache_blocks(self, request: Request, num_tokens: int) -> None: self.block_pool.cache_full_blocks( request=request, blocks=self.req_to_blocks[request.request_id], - num_cached_blocks=num_cached_blocks, + num_cached_or_skipped_blocks=max(num_cached_blocks, num_skipped_blocks), num_full_blocks=num_full_blocks, block_size=self.block_size, kv_cache_group_id=self.kv_cache_group_id, @@ -821,7 +861,10 @@ class CrossAttentionManager(SingleTypeKVCacheManager): """Manager for cross-attention KV cache in encoder-decoder models.""" def save_new_computed_blocks( - self, request_id: str, new_computed_blocks: Sequence[KVCacheBlock] + self, + request_id: str, + new_computed_blocks: Sequence[KVCacheBlock], + local_computed_tokens: int, ) -> None: # We do not cache blocks for cross-attention to be shared between # requests, so `new_computed_blocks` should always be empty. From 7acbe0824dd6cc58257b8e7d5728568b07f25fc3 Mon Sep 17 00:00:00 2001 From: Yifan Qiao Date: Wed, 3 Dec 2025 16:48:04 -0800 Subject: [PATCH 04/73] fix: make interfaces consistent and remove debug prints Signed-off-by: Yifan Qiao --- test_connector_w_hybrid_kv_allocator.py | 11 +++-- vllm/config/vllm.py | 26 +++++----- .../kv_connector/v1/lmcache_connector.py | 11 ++--- vllm/v1/core/kv_cache_coordinator.py | 33 ++++++------- vllm/v1/core/kv_cache_manager.py | 49 +++++++++---------- vllm/v1/core/sched/async_scheduler.py | 4 +- vllm/v1/core/sched/scheduler.py | 16 ++++-- vllm/v1/core/single_type_kv_cache_manager.py | 17 +++---- 8 files changed, 80 insertions(+), 87 deletions(-) diff --git a/test_connector_w_hybrid_kv_allocator.py b/test_connector_w_hybrid_kv_allocator.py index 7104c39765b6..5e220e14430d 100644 --- a/test_connector_w_hybrid_kv_allocator.py +++ b/test_connector_w_hybrid_kv_allocator.py @@ -23,6 +23,7 @@ # Initialize LLM with LMCache configuration # Adjust gpu_memory_utilization based on your GPU memory +# Parameters below are for 80GB GPUs llm = LLM( model="google/gemma-3-4b-it", kv_transfer_config=ktc, @@ -38,27 +39,29 @@ sampling_params = SamplingParams(temperature=0, top_p=0.95, max_tokens=10) # Run inference +print("Generate request 1. This will store long prefix in LMCache.") outputs = llm.generate("hi" * 70000 + "\nhow are you?", sampling_params) generated_text = outputs[0].outputs[0].text print(f"Generated text: {generated_text!r}") -# This requires loading KV cache and will success +# This requires loading KV cache and will succeed +print("Generate request 2. This will load prefix from LMCache and succeed.") outputs = llm.generate("hi" * 10000 + "\nTell me a story.", sampling_params) generated_text = outputs[0].outputs[0].text print(f"Generated text: {generated_text!r}") # flush out prefix cache in GPU +print("Generate request 3. This will evict prefix cache in GPU.") outputs = llm.generate("1" + "hi" * 70000 + "\nhow are you?", sampling_params) generated_text = outputs[0].outputs[0].text print(f"Generated text: {generated_text!r}") -print("YIFAN: finish request 2") - # This requires loading KV cache # but this request cannot be executed as vLLM cannot allocate for long prefix # stored by LMCache +print("Generate request 4. This will attempt to load long prefix from LMCache.") outputs = llm.generate("hi" * 70000 + "\nTell me a story.", sampling_params) generated_text = outputs[0].outputs[0].text print(f"Generated text: {generated_text!r}") -print("YIFAN: finished") +print("All requests finished.") diff --git a/vllm/config/vllm.py b/vllm/config/vllm.py index 72a28cec53b3..83e96f1e4d47 100644 --- a/vllm/config/vllm.py +++ b/vllm/config/vllm.py @@ -895,21 +895,17 @@ def has_blocked_weights(): # Hybrid KV cache manager is not supported on non-GPU platforms. self.scheduler_config.disable_hybrid_kv_cache_manager = True if self.kv_transfer_config is not None: - # NOTE(Kuntai): turn HMA off for connector for now. - # # TODO(Kuntai): have a more elegent solution to check and - # # turn off HMA for connector that does not support HMA. - # logger.warning( - # "Turning off hybrid kv cache manager because " - # "`--kv-transfer-config` is set. This will reduce the " - # "performance of vLLM on LLMs with sliding window attention " - # "or Mamba attention. If you are a developer of kv connector" - # ", please consider supporting hybrid kv cache manager for " - # "your connector by making sure your connector is a subclass" - # " of `SupportsHMA` defined in kv_connector/v1/base.py." - # ) - # self.scheduler_config.disable_hybrid_kv_cache_manager = True - pass - if self.kv_events_config is not None: + # NOTE(Yifan): warning when both kv connector and hybrid kv cache + # manager are enabled. + # TODO(Kuntai): have a more elegent solution to check and + # turn off HMA for connector that does not support HMA. + logger.warning( + "Warning: both kv connector and hybrid kv cache manager are " + "enabled. However, not all kv connectors support HMA. Please " + "check if the kv connector you are using supports HMA, or " + "disable HMA by setting `--disable-hybrid-kv-cache-manager`." + ) + if self.kv_events_config is not None: ## YIFAN: what is this? # Hybrid KV cache manager is not compatible with KV events. self.scheduler_config.disable_hybrid_kv_cache_manager = True if ( diff --git a/vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py b/vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py index 458dce2abeb4..14859b67ef99 100644 --- a/vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +++ b/vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py @@ -312,7 +312,8 @@ def request_finished( Optional KVTransferParams to be included in the request outputs returned by the engine. """ - raise ValueError("YIFAN: should not be called") + # NOTE: LMCache overloads request_finished so `block_ids` here can be + # either list[int] or tuple[list[int], ...]. return self._lmcache_engine.request_finished(request, block_ids) def request_finished_all_groups( @@ -320,12 +321,10 @@ def request_finished_all_groups( request: "Request", block_ids: tuple[list[int], ...], ) -> tuple[bool, dict[str, Any] | None]: + # NOTE: LMCache overloads request_finished so `block_ids` here can be + # either list[int] or tuple[list[int], ...]. This could be changed in + # the future to separate these two methods. return self._lmcache_engine.request_finished(request, block_ids) - # print( - # f"YIFAN: request_finished_all_groups called with request {request.request_id} and blocks {block_ids}" - # ) - # raise NotImplementedError("YIFAN: request_finished_all_groups not implemented") - # return False, None def take_events(self) -> Iterable["KVCacheEvent"]: """ diff --git a/vllm/v1/core/kv_cache_coordinator.py b/vllm/v1/core/kv_cache_coordinator.py index 5f14494d7840..552874204802 100644 --- a/vllm/v1/core/kv_cache_coordinator.py +++ b/vllm/v1/core/kv_cache_coordinator.py @@ -70,7 +70,7 @@ def __init__( for i, kv_cache_group in enumerate(self.kv_cache_config.kv_cache_groups) ) - def get_num_blocks_to_allocate_per_group( + def get_num_blocks_to_allocate( self, request_id: str, num_tokens: int, @@ -96,41 +96,36 @@ def get_num_blocks_to_allocate_per_group( 2. The number of evictable blocks to touch for each kv cache group. 3. The blocks to touch for each kv cache group. """ - num_new_blocks_to_allocate_per_group = [] - num_evictable_blocks_to_allocate_per_group = [] - evictable_blocks_to_touch_per_group: list[list[KVCacheBlock]] = [] + num_new_blocks_to_allocate = [] + num_evictable_blocks_to_allocate = [] + evictable_blocks_to_touch: list[Sequence[KVCacheBlock]] = [] for i, manager in enumerate(self.single_type_managers): if isinstance(manager, CrossAttentionManager): # For cross-attention, we issue a single static allocation # of blocks based on the number of encoder input tokens. ( - num_new_blocks_to_allocate, - blocks_to_touch, + num_new_blocks_to_allocate_single_group, + blocks_to_touch_single_group, ) = manager.get_num_blocks_to_allocate( request_id, num_encoder_tokens, [], 0 ) else: ( - num_new_blocks_to_allocate, - blocks_to_touch, + num_new_blocks_to_allocate_single_group, + blocks_to_touch_single_group, ) = manager.get_num_blocks_to_allocate( request_id, num_tokens, new_computed_blocks[i], total_computed_tokens, ) - num_new_blocks_to_allocate_per_group.append(num_new_blocks_to_allocate) - num_evictable_blocks_to_allocate_per_group.append(len(blocks_to_touch)) - evictable_blocks_to_touch_per_group.append(blocks_to_touch) - print( - f"YIFAN: get_num_blocks_to_allocate for group {i}, " - f"num_new_blocks_to_allocate: {num_new_blocks_to_allocate_per_group[-1]}" - f", num_evictable_blocks_to_allocate: {num_evictable_blocks_to_allocate_per_group[-1]}" - ) + num_new_blocks_to_allocate.append(num_new_blocks_to_allocate_single_group) + num_evictable_blocks_to_allocate.append(len(blocks_to_touch_single_group)) + evictable_blocks_to_touch.append(blocks_to_touch_single_group) return ( - num_new_blocks_to_allocate_per_group, - num_evictable_blocks_to_allocate_per_group, - tuple(evictable_blocks_to_touch_per_group), + num_new_blocks_to_allocate, + num_evictable_blocks_to_allocate, + tuple(evictable_blocks_to_touch), ) def save_new_computed_blocks( diff --git a/vllm/v1/core/kv_cache_manager.py b/vllm/v1/core/kv_cache_manager.py index c190abd28bb4..60732577e6da 100644 --- a/vllm/v1/core/kv_cache_manager.py +++ b/vllm/v1/core/kv_cache_manager.py @@ -207,10 +207,10 @@ def allocate_slots( self, request: Request, num_new_tokens: int, + num_lookahead_tokens: int = 0, num_new_computed_tokens: int = 0, - new_computed_blocks: KVCacheBlocks | None = None, num_external_computed_tokens: int = 0, - num_lookahead_tokens: int = 0, + new_computed_blocks: KVCacheBlocks | None = None, delay_cache_blocks: bool = False, num_encoder_tokens: int = 0, ) -> KVCacheBlocks | None: @@ -221,13 +221,13 @@ def allocate_slots( num_new_tokens: The number of tokens to be computed. num_new_computed_tokens: The number of new computed tokens just hitting the prefix caching, excluding external tokens. - new_computed_blocks: The cached blocks for the above new computed - tokens, groups as a tuple by kv cache groups. num_external_computed_tokens: The number of tokens that their KV caches are not cached by vLLM but cached by the connector. num_lookahead_tokens: The number of speculative tokens to allocate. This is used by spec decode proposers with kv-cache such as eagle. + new_computed_blocks: The cached blocks for the above new computed + tokens, groups as a tuple by kv cache groups. delay_cache_blocks: Whether to skip caching the blocks. This is used by P/D when allocating blocks used in a KV transfer which will complete in a future step. @@ -238,7 +238,7 @@ def allocate_slots( Blocks layout: ``` --------------------------------------------------------------------- - | < comp > | < new_comp > | < connector > | < new > | < lookahead > | + | < comp > | < new_comp > | < ext_comp > | < new > | < lookahead > | --------------------------------------------------------------------- | < to be computed > | --------------------------------------------------------------------- @@ -272,7 +272,7 @@ def allocate_slots( comp = request.num_computed_tokens new_comp = num_new_computed_tokens = len(new_computed_blocks) * block_size - connector = num_external_computed_tokens + ext_comp = num_external_computed_tokens, cached by the connector new = num_new_tokens lookahead = num_lookahead_tokens ``` @@ -281,9 +281,9 @@ def allocate_slots( The allocation has three stages: - Free unnecessary blocks in `comp` and check if we have sufficient free blocks (return None if not). - - Handle prefix tokens (`comp + new_comp + connector`): + - Handle prefix tokens (`comp + new_comp + ext_comp`): - Free unnecessary blocks (e.g. outside sliding window) - - Allocate new blocks for `connector` tokens inside + - Allocate new blocks for `ext_comp` tokens inside sliding window - Allocate new blocks for tokens to be computed (`new + lookahead`) @@ -329,10 +329,10 @@ def allocate_slots( ) ( - num_new_blocks_to_allocate_per_group, - num_evictable_blocks_to_allocate_per_group, - blocks_to_touch_per_group, - ) = self.coordinator.get_num_blocks_to_allocate_per_group( + num_new_blocks_to_allocate, + num_evictable_blocks_to_allocate, + blocks_to_touch, + ) = self.coordinator.get_num_blocks_to_allocate( request_id=request.request_id, num_tokens=num_tokens_need_slot, new_computed_blocks=new_computed_block_list, @@ -340,21 +340,17 @@ def allocate_slots( total_computed_tokens=num_local_computed_tokens + num_external_computed_tokens, ) - num_blocks_to_allocate = sum(num_new_blocks_to_allocate_per_group) + sum( - num_evictable_blocks_to_allocate_per_group - ) - - print( - f"YIFAN: request {request.request_id} needs total {num_tokens_need_slot} tokens, num_local_computed_tokens: {num_local_computed_tokens}, num_external_computed_tokens: {num_external_computed_tokens}, num_new_tokens: {num_new_tokens}, num_lookahead_tokens: {num_lookahead_tokens}" + tot_num_blocks_to_allocate = sum(num_new_blocks_to_allocate) + sum( + num_evictable_blocks_to_allocate ) - if num_blocks_to_allocate > self.block_pool.get_num_free_blocks(): + if tot_num_blocks_to_allocate > self.block_pool.get_num_free_blocks(): # Cannot allocate new blocks return None # Touch the computed blocks to make sure they won't be evicted. if self.enable_caching: - self.block_pool.touch(blocks_to_touch_per_group) + self.block_pool.touch(blocks_to_touch) else: assert not any(new_computed_block_list), ( "Computed blocks should be empty when prefix caching is disabled" @@ -363,9 +359,6 @@ def allocate_slots( if new_computed_block_list is not self.empty_kv_cache_blocks.blocks: # Append the new computed blocks to the request blocks until now to # avoid the case where the new blocks cannot be allocated. - print( - f"YIFAN: saving new computed blocks for request {request.request_id}, lens: {[len(b) for b in new_computed_block_list]}, total_computed_tokens: {num_local_computed_tokens + num_external_computed_tokens}" - ) self.coordinator.save_new_computed_blocks( request_id=request.request_id, new_computed_blocks=new_computed_block_list, @@ -375,7 +368,7 @@ def allocate_slots( new_blocks = self.coordinator.allocate_new_blocks( request.request_id, - num_new_blocks_to_allocate_per_group, + num_new_blocks_to_allocate, num_tokens_need_slot, num_encoder_tokens, ) @@ -487,10 +480,14 @@ def get_block_ids(self, request_id: str) -> tuple[list[int], ...]: """Get the block ids of a request.""" return self.get_blocks(request_id).get_block_ids() - def cache_blocks(self, request: Request, num_computed_tokens: int) -> None: + def cache_blocks( + self, request: Request, num_computed_tokens: int, total_computed_tokens: int + ) -> None: """Cache the blocks for the request, if enabled.""" if self.enable_caching: - self.coordinator.cache_blocks(request, num_computed_tokens) + self.coordinator.cache_blocks( + request, num_computed_tokens, total_computed_tokens + ) def create_kv_cache_blocks( self, blocks: tuple[list[KVCacheBlock], ...] diff --git a/vllm/v1/core/sched/async_scheduler.py b/vllm/v1/core/sched/async_scheduler.py index df61eebb395e..a92d82e3cb02 100644 --- a/vllm/v1/core/sched/async_scheduler.py +++ b/vllm/v1/core/sched/async_scheduler.py @@ -63,6 +63,8 @@ def _update_request_with_output( # Cache the new tokens. Preempted requests should be skipped. if status_before_update == RequestStatus.RUNNING: self.kv_cache_manager.cache_blocks( - request, request.num_computed_tokens - request.num_output_placeholders + request, + request.num_computed_tokens - request.num_output_placeholders, + total_computed_tokens=request.num_computed_tokens, ) return new_token_ids, stopped diff --git a/vllm/v1/core/sched/scheduler.py b/vllm/v1/core/sched/scheduler.py index 88221dcfeae8..28975b0a745a 100644 --- a/vllm/v1/core/sched/scheduler.py +++ b/vllm/v1/core/sched/scheduler.py @@ -583,10 +583,10 @@ def schedule(self) -> SchedulerOutput: new_blocks = self.kv_cache_manager.allocate_slots( request, num_new_tokens, - num_new_local_computed_tokens, - new_computed_blocks, - num_external_computed_tokens, + num_new_computed_tokens=num_new_local_computed_tokens, + num_external_computed_tokens=num_external_computed_tokens, num_lookahead_tokens=effective_lookahead_tokens, + new_computed_blocks=new_computed_blocks, delay_cache_blocks=load_kv_async, num_encoder_tokens=num_encoder_tokens, ) @@ -1586,7 +1586,11 @@ def _update_waiting_for_remote_kv(self, request: Request) -> bool: # updated in _update_requests_with_invalid_blocks if request.num_computed_tokens: # Cache any valid computed tokens. - self.kv_cache_manager.cache_blocks(request, request.num_computed_tokens) + self.kv_cache_manager.cache_blocks( + request, + request.num_computed_tokens, + total_computed_tokens=request.num_computed_tokens, + ) else: # No valid computed tokens, release allocated blocks. # There may be a local cache hit on retry. @@ -1602,7 +1606,9 @@ def _update_waiting_for_remote_kv(self, request: Request) -> bool: if num_computed_tokens == request.num_tokens: num_computed_tokens -= 1 # This will cache the blocks iff caching is enabled. - self.kv_cache_manager.cache_blocks(request, num_computed_tokens) + self.kv_cache_manager.cache_blocks( + request, num_computed_tokens, total_computed_tokens=num_computed_tokens + ) # Update the request state for scheduling. request.num_computed_tokens = num_computed_tokens diff --git a/vllm/v1/core/single_type_kv_cache_manager.py b/vllm/v1/core/single_type_kv_cache_manager.py index 5737d4b31d55..0aa0615e5faf 100644 --- a/vllm/v1/core/single_type_kv_cache_manager.py +++ b/vllm/v1/core/single_type_kv_cache_manager.py @@ -115,9 +115,6 @@ def get_num_blocks_to_allocate( # Scheduler relies on evictable blocks being counted in the free # capacity check, but allocate_new_blocks will clamp to actual new # blocks to avoid double allocation. - print( - f"YIFAN: request {request_id} needs {num_new_blocks} new blocks, {len(evictable_computed_blocks)} evictable computed blocks" - ) return num_new_blocks, evictable_computed_blocks # General case: some prefix tokens are skipped by the attention window. @@ -155,9 +152,6 @@ def get_num_blocks_to_allocate( if blk.ref_cnt == 0 and not blk.is_null ] - print( - f"YIFAN: request {request_id} needs {num_new_blocks} new blocks, {len(evictable_computed_blocks)} evictable computed blocks" - ) return num_new_blocks, evictable_computed_blocks def save_new_computed_blocks( @@ -186,9 +180,8 @@ def save_new_computed_blocks( req_blocks = self.req_to_blocks[request_id] assert len(req_blocks) == 0 req_blocks.extend(new_computed_blocks) - self.num_cached_block[request_id] = len( - new_computed_blocks - ) ## YIFAN: why set to len(new_computed_blocks) rather than len(req_blocks)? + ## YIFAN: why len(new_computed_blocks) rather than len(req_blocks)? + self.num_cached_block[request_id] = len(new_computed_blocks) else: # A running request. Should not have new computed blocks. assert len(new_computed_blocks) == 0 @@ -822,7 +815,7 @@ def get_num_blocks_to_allocate( num_tokens: int, new_computed_blocks: Sequence[KVCacheBlock], total_computed_tokens: int, - ) -> tuple[int, int]: + ) -> tuple[int, Sequence[KVCacheBlock]]: # TODO(Kuntai): handle the case where `total_computed_tokens > 0` if total_computed_tokens > 0: logger.warning_once( @@ -870,7 +863,9 @@ def save_new_computed_blocks( # requests, so `new_computed_blocks` should always be empty. assert len(new_computed_blocks) == 0 - def cache_blocks(self, request: Request, num_tokens: int) -> None: + def cache_blocks( + self, request: Request, num_tokens: int, prev_computed_tokens: int + ) -> None: # We do not cache blocks for cross-attention to be shared between # requests, so this method is not relevant. raise ValueError("Should not be called as prefix caching is disabled.") From 0b2218c463a9204a946953bfa3f0d6719530949f Mon Sep 17 00:00:00 2001 From: Yifan Qiao Date: Wed, 3 Dec 2025 18:07:08 -0800 Subject: [PATCH 05/73] nits: remove test scripts Signed-off-by: Yifan Qiao --- test_connector_w_hybrid_kv_allocator.py | 67 ------------------------- 1 file changed, 67 deletions(-) delete mode 100644 test_connector_w_hybrid_kv_allocator.py diff --git a/test_connector_w_hybrid_kv_allocator.py b/test_connector_w_hybrid_kv_allocator.py deleted file mode 100644 index 5e220e14430d..000000000000 --- a/test_connector_w_hybrid_kv_allocator.py +++ /dev/null @@ -1,67 +0,0 @@ -# SPDX-License-Identifier: Apache-2.0 -# SPDX-FileCopyrightText: Copyright contributors to the vLLM project -import os - -# Set token chunk size to 256 -os.environ["LMCACHE_CHUNK_SIZE"] = "256" -# Enable CPU memory backend -os.environ["LMCACHE_LOCAL_CPU"] = "True" -# Set CPU memory limit to 5GB -os.environ["LMCACHE_MAX_LOCAL_CPU_SIZE"] = "20.0" -os.environ["VLLM_ENABLE_V1_MULTIPROCESSING"] = "0" -os.environ["LMCACHE_USE_LAYERWISE"] = "True" - - -from vllm import LLM, SamplingParams -from vllm.config import KVTransferConfig - -# Configure KV cache transfer to use LMCache -ktc = KVTransferConfig( - kv_connector="LMCacheConnectorV1", - kv_role="kv_both", -) - -# Initialize LLM with LMCache configuration -# Adjust gpu_memory_utilization based on your GPU memory -# Parameters below are for 80GB GPUs -llm = LLM( - model="google/gemma-3-4b-it", - kv_transfer_config=ktc, - max_model_len=75000, - gpu_memory_utilization=0.28, - # gpu_memory_utilization=0.4, - # gpu_memory_utilization=0.8, - max_num_seqs=16, - enforce_eager=True, -) - -# Define sampling parameters -sampling_params = SamplingParams(temperature=0, top_p=0.95, max_tokens=10) - -# Run inference -print("Generate request 1. This will store long prefix in LMCache.") -outputs = llm.generate("hi" * 70000 + "\nhow are you?", sampling_params) -generated_text = outputs[0].outputs[0].text -print(f"Generated text: {generated_text!r}") - -# This requires loading KV cache and will succeed -print("Generate request 2. This will load prefix from LMCache and succeed.") -outputs = llm.generate("hi" * 10000 + "\nTell me a story.", sampling_params) -generated_text = outputs[0].outputs[0].text -print(f"Generated text: {generated_text!r}") - -# flush out prefix cache in GPU -print("Generate request 3. This will evict prefix cache in GPU.") -outputs = llm.generate("1" + "hi" * 70000 + "\nhow are you?", sampling_params) -generated_text = outputs[0].outputs[0].text -print(f"Generated text: {generated_text!r}") - -# This requires loading KV cache -# but this request cannot be executed as vLLM cannot allocate for long prefix -# stored by LMCache -print("Generate request 4. This will attempt to load long prefix from LMCache.") -outputs = llm.generate("hi" * 70000 + "\nTell me a story.", sampling_params) -generated_text = outputs[0].outputs[0].text -print(f"Generated text: {generated_text!r}") - -print("All requests finished.") From 15ef476ed35f7e73caf4b292bdb74544fd863b83 Mon Sep 17 00:00:00 2001 From: Yifan Qiao Date: Thu, 4 Dec 2025 11:49:37 -0800 Subject: [PATCH 06/73] fix: revert `cache_block()` changes as we have already handled the null blocks inside the single_type_block_manager Signed-off-by: Yifan Qiao --- vllm/v1/core/block_pool.py | 34 ++++++++++++-------- vllm/v1/core/kv_cache_coordinator.py | 6 ++-- vllm/v1/core/kv_cache_manager.py | 23 ++++++------- vllm/v1/core/sched/async_scheduler.py | 1 - vllm/v1/core/sched/scheduler.py | 10 ++---- vllm/v1/core/single_type_kv_cache_manager.py | 13 ++------ 6 files changed, 38 insertions(+), 49 deletions(-) diff --git a/vllm/v1/core/block_pool.py b/vllm/v1/core/block_pool.py index 51ae216fc2da..818bc945e165 100644 --- a/vllm/v1/core/block_pool.py +++ b/vllm/v1/core/block_pool.py @@ -210,7 +210,7 @@ def cache_full_blocks( self, request: Request, blocks: list[KVCacheBlock], - num_cached_or_skipped_blocks: int, + num_cached_blocks: int, num_full_blocks: int, block_size: int, kv_cache_group_id: int, @@ -226,15 +226,15 @@ def cache_full_blocks( Args: request: The request to cache the blocks. blocks: All blocks in the request. - num_cached_or_skipped_blocks: The number of blocks that are already cached. + num_cached_blocks: The number of blocks that are already cached. num_full_blocks: The number of blocks that are full and should be cached after this function. block_size: Number of tokens in each block. kv_cache_group_id: The id of the KV cache group. """ - if num_cached_or_skipped_blocks >= num_full_blocks: + if num_cached_blocks >= num_full_blocks: return - new_full_blocks = blocks[num_cached_or_skipped_blocks:num_full_blocks] + new_full_blocks = blocks[num_cached_blocks:num_full_blocks] assert len(request.block_hashes) >= num_full_blocks if block_size == self.hash_block_size: # Common case. @@ -249,15 +249,22 @@ def cache_full_blocks( request.block_hashes, self.hash_block_size, block_size ) - new_block_hashes = block_hashes[num_cached_or_skipped_blocks:] + new_block_hashes = block_hashes[num_cached_blocks:] new_hashes: list[ExternalBlockHash] | None = ( [] if self.enable_kv_cache_events else None ) + + # Some blocks may be null blocks when enabling sparse attention or sliding + # window attention. For now, we only have sliding window attention, and + # null blocks must be at the beginning. + first_non_null_blk_idx = 0 for i, blk in enumerate(new_full_blocks): - if blk.is_null: - # May happen when both sparse attention (e.g., sliding - # window) and connector are enabled. - continue + if not blk.is_null: + first_non_null_blk_idx = i + break + + for i, blk in enumerate(new_full_blocks[first_non_null_blk_idx:]): + assert not blk.is_null assert blk.block_hash is None block_hash = new_block_hashes[i] @@ -271,10 +278,10 @@ def cache_full_blocks( new_hashes.append(maybe_convert_block_hash(block_hash)) if self.enable_kv_cache_events: - if num_cached_or_skipped_blocks == 0: + if num_cached_blocks == 0: parent_block_hash: ExternalBlockHash | None = None else: - parent_block = blocks[num_cached_or_skipped_blocks - 1] + parent_block = blocks[num_cached_blocks - 1] assert parent_block.block_hash is not None parent_block_hash = maybe_convert_block_hash( get_block_hash(parent_block.block_hash) @@ -284,9 +291,10 @@ def cache_full_blocks( BlockStored( block_hashes=new_hashes, parent_block_hash=parent_block_hash, + ## TODO(Yifan): here token_ids may be over-estimated when + ## using sliding window token_ids=request.all_token_ids[ - num_cached_or_skipped_blocks * block_size : num_full_blocks - * block_size + num_cached_blocks * block_size : num_full_blocks * block_size ], block_size=block_size, lora_id=request.lora_request.adapter_id diff --git a/vllm/v1/core/kv_cache_coordinator.py b/vllm/v1/core/kv_cache_coordinator.py index 552874204802..3f560674d5ca 100644 --- a/vllm/v1/core/kv_cache_coordinator.py +++ b/vllm/v1/core/kv_cache_coordinator.py @@ -182,9 +182,7 @@ def allocate_new_blocks( for i, manager in enumerate(self.single_type_managers) ) - def cache_blocks( - self, request: Request, num_computed_tokens: int, total_computed_tokens: int - ) -> None: + def cache_blocks(self, request: Request, num_computed_tokens: int) -> None: """ Cache the blocks for the request. @@ -195,7 +193,7 @@ def cache_blocks( (including tokens that are already cached). """ for manager in self.single_type_managers: - manager.cache_blocks(request, num_computed_tokens, total_computed_tokens) + manager.cache_blocks(request, num_computed_tokens) def free(self, request_id: str) -> None: """ diff --git a/vllm/v1/core/kv_cache_manager.py b/vllm/v1/core/kv_cache_manager.py index 60732577e6da..ca0f4b4daa09 100644 --- a/vllm/v1/core/kv_cache_manager.py +++ b/vllm/v1/core/kv_cache_manager.py @@ -387,12 +387,7 @@ def allocate_slots( num_local_computed_tokens + num_external_computed_tokens + num_new_tokens, request.num_tokens, ) - self.coordinator.cache_blocks( - request, - num_tokens_to_cache, - total_computed_tokens=num_local_computed_tokens - + num_external_computed_tokens, - ) + self.coordinator.cache_blocks(request, num_tokens_to_cache) return self.create_kv_cache_blocks(new_blocks) @@ -480,14 +475,16 @@ def get_block_ids(self, request_id: str) -> tuple[list[int], ...]: """Get the block ids of a request.""" return self.get_blocks(request_id).get_block_ids() - def cache_blocks( - self, request: Request, num_computed_tokens: int, total_computed_tokens: int - ) -> None: - """Cache the blocks for the request, if enabled.""" + def cache_blocks(self, request: Request, num_computed_tokens: int) -> None: + """Cache the blocks for the request, if enabled. + + Args: + request: The request to cache the blocks. + num_computed_tokens: The number of computed tokens, including tokens + that are already cached and tokens to be cached. + """ if self.enable_caching: - self.coordinator.cache_blocks( - request, num_computed_tokens, total_computed_tokens - ) + self.coordinator.cache_blocks(request, num_computed_tokens) def create_kv_cache_blocks( self, blocks: tuple[list[KVCacheBlock], ...] diff --git a/vllm/v1/core/sched/async_scheduler.py b/vllm/v1/core/sched/async_scheduler.py index a92d82e3cb02..6c4b2d85fbdb 100644 --- a/vllm/v1/core/sched/async_scheduler.py +++ b/vllm/v1/core/sched/async_scheduler.py @@ -65,6 +65,5 @@ def _update_request_with_output( self.kv_cache_manager.cache_blocks( request, request.num_computed_tokens - request.num_output_placeholders, - total_computed_tokens=request.num_computed_tokens, ) return new_token_ids, stopped diff --git a/vllm/v1/core/sched/scheduler.py b/vllm/v1/core/sched/scheduler.py index 28975b0a745a..16da5b9f5253 100644 --- a/vllm/v1/core/sched/scheduler.py +++ b/vllm/v1/core/sched/scheduler.py @@ -1586,11 +1586,7 @@ def _update_waiting_for_remote_kv(self, request: Request) -> bool: # updated in _update_requests_with_invalid_blocks if request.num_computed_tokens: # Cache any valid computed tokens. - self.kv_cache_manager.cache_blocks( - request, - request.num_computed_tokens, - total_computed_tokens=request.num_computed_tokens, - ) + self.kv_cache_manager.cache_blocks(request, request.num_computed_tokens) else: # No valid computed tokens, release allocated blocks. # There may be a local cache hit on retry. @@ -1606,9 +1602,7 @@ def _update_waiting_for_remote_kv(self, request: Request) -> bool: if num_computed_tokens == request.num_tokens: num_computed_tokens -= 1 # This will cache the blocks iff caching is enabled. - self.kv_cache_manager.cache_blocks( - request, num_computed_tokens, total_computed_tokens=num_computed_tokens - ) + self.kv_cache_manager.cache_blocks(request, num_computed_tokens) # Update the request state for scheduling. request.num_computed_tokens = num_computed_tokens diff --git a/vllm/v1/core/single_type_kv_cache_manager.py b/vllm/v1/core/single_type_kv_cache_manager.py index 0aa0615e5faf..18fc00ed9ae4 100644 --- a/vllm/v1/core/single_type_kv_cache_manager.py +++ b/vllm/v1/core/single_type_kv_cache_manager.py @@ -242,9 +242,7 @@ def allocate_new_blocks( req_blocks.extend(new_blocks) return new_blocks - def cache_blocks( - self, request: Request, num_tokens: int, prev_computed_tokens: int - ) -> None: + def cache_blocks(self, request: Request, num_tokens: int) -> None: """ Cache the blocks for the request. @@ -254,9 +252,6 @@ def cache_blocks( (including tokens that are already cached). """ num_cached_blocks = self.num_cached_block.get(request.request_id, 0) - num_skipped_blocks = ( - self.get_num_skipped_tokens(prev_computed_tokens) // self.block_size - ) num_full_blocks = num_tokens // self.block_size if num_cached_blocks >= num_full_blocks: @@ -265,7 +260,7 @@ def cache_blocks( self.block_pool.cache_full_blocks( request=request, blocks=self.req_to_blocks[request.request_id], - num_cached_or_skipped_blocks=max(num_cached_blocks, num_skipped_blocks), + num_cached_blocks=num_cached_blocks, num_full_blocks=num_full_blocks, block_size=self.block_size, kv_cache_group_id=self.kv_cache_group_id, @@ -863,9 +858,7 @@ def save_new_computed_blocks( # requests, so `new_computed_blocks` should always be empty. assert len(new_computed_blocks) == 0 - def cache_blocks( - self, request: Request, num_tokens: int, prev_computed_tokens: int - ) -> None: + def cache_blocks(self, request: Request, num_tokens: int) -> None: # We do not cache blocks for cross-attention to be shared between # requests, so this method is not relevant. raise ValueError("Should not be called as prefix caching is disabled.") From eb7bfcf34c64b1a486d9ceacd32f1ba35c6c7c4e Mon Sep 17 00:00:00 2001 From: Yifan Qiao Date: Thu, 4 Dec 2025 17:18:48 -0800 Subject: [PATCH 07/73] fix: revert KVCacheManager.allocate_slots() interface changes; revising that in a follow-up PR Signed-off-by: Yifan Qiao --- .../core/test_single_type_kv_cache_manager.py | 50 ++++++++++++------- vllm/v1/core/kv_cache_manager.py | 23 ++++++--- vllm/v1/core/sched/scheduler.py | 4 +- 3 files changed, 51 insertions(+), 26 deletions(-) diff --git a/tests/v1/core/test_single_type_kv_cache_manager.py b/tests/v1/core/test_single_type_kv_cache_manager.py index 2347c62a6bd6..5cce2fd677fe 100644 --- a/tests/v1/core/test_single_type_kv_cache_manager.py +++ b/tests/v1/core/test_single_type_kv_cache_manager.py @@ -331,14 +331,19 @@ def test_get_num_blocks_to_allocate(): KVCacheBlock(i + 1) for i in range(5) ] - assert ( - manager.get_num_blocks_to_allocate("1", 20 * block_size, cached_blocks_1, 0) - == 20 + num_blocks, evictable = manager.get_num_blocks_to_allocate( + "1", 20 * block_size, cached_blocks_1, 0 ) - assert ( - manager.get_num_blocks_to_allocate("2", 20 * block_size, cached_blocks_2, 0) - == 15 + assert num_blocks == 10 + assert evictable == cached_blocks_1 + assert num_blocks + len(evictable) == 20 + + num_blocks, evictable = manager.get_num_blocks_to_allocate( + "2", 20 * block_size, cached_blocks_2, 0 ) + assert num_blocks == 10 + assert evictable == cached_blocks_2[5:] + assert num_blocks + len(evictable) == 20 def test_evictable_cached_blocks_not_double_allocated(): @@ -359,17 +364,21 @@ def test_evictable_cached_blocks_not_double_allocated(): request_id = "req" evictable_block = block_pool.blocks[1] # ref_cnt == 0, eviction candidate - num_blocks = manager.get_num_blocks_to_allocate( + num_blocks, evictable_blocks = manager.get_num_blocks_to_allocate( request_id=request_id, num_tokens=4, # requires 2 blocks new_computed_blocks=[evictable_block], # one cached block hit total_computed_tokens=0, ) - # Free capacity check should count evictable cached blocks (so return 2), - # but allocation should only allocate the truly new block. - assert num_blocks == 2 - - manager.save_new_computed_blocks(request_id, [evictable_block]) + # Free capacity check should count evictable cached blocks, but allocation + # should only allocate the truly new block. + assert num_blocks == 1 + assert evictable_blocks == [evictable_block] + assert num_blocks + len(evictable_blocks) == 2 + + manager.save_new_computed_blocks( + request_id, [evictable_block], total_computed_tokens=block_size + ) new_blocks = manager.allocate_new_blocks(request_id, num_blocks, num_tokens=4) assert len(new_blocks) == 1 assert len(manager.req_to_blocks[request_id]) == 2 @@ -394,11 +403,16 @@ def test_chunked_local_attention_get_num_blocks_to_allocate(): KVCacheBlock(i + 1) for i in range(5) ] - assert ( - manager.get_num_blocks_to_allocate("1", 20 * block_size, cached_blocks_1, 0) - == 20 + num_blocks, evictable = manager.get_num_blocks_to_allocate( + "1", 20 * block_size, cached_blocks_1, 0 ) - assert ( - manager.get_num_blocks_to_allocate("2", 20 * block_size, cached_blocks_2, 0) - == 15 + assert num_blocks == 10 + assert evictable == cached_blocks_1 + assert num_blocks + len(evictable) == 20 + + num_blocks, evictable = manager.get_num_blocks_to_allocate( + "2", 20 * block_size, cached_blocks_2, 0 ) + assert num_blocks == 10 + assert evictable == cached_blocks_2[5:] + assert num_blocks + len(evictable) == 15 diff --git a/vllm/v1/core/kv_cache_manager.py b/vllm/v1/core/kv_cache_manager.py index ca0f4b4daa09..e6ef35ba55b4 100644 --- a/vllm/v1/core/kv_cache_manager.py +++ b/vllm/v1/core/kv_cache_manager.py @@ -207,10 +207,10 @@ def allocate_slots( self, request: Request, num_new_tokens: int, - num_lookahead_tokens: int = 0, num_new_computed_tokens: int = 0, - num_external_computed_tokens: int = 0, new_computed_blocks: KVCacheBlocks | None = None, + num_lookahead_tokens: int = 0, + num_external_computed_tokens: int = 0, delay_cache_blocks: bool = False, num_encoder_tokens: int = 0, ) -> KVCacheBlocks | None: @@ -221,13 +221,13 @@ def allocate_slots( num_new_tokens: The number of tokens to be computed. num_new_computed_tokens: The number of new computed tokens just hitting the prefix caching, excluding external tokens. - num_external_computed_tokens: The number of tokens that their - KV caches are not cached by vLLM but cached by the connector. + new_computed_blocks: The cached blocks for the above new computed + tokens, groups as a tuple by kv cache groups. num_lookahead_tokens: The number of speculative tokens to allocate. This is used by spec decode proposers with kv-cache such as eagle. - new_computed_blocks: The cached blocks for the above new computed - tokens, groups as a tuple by kv cache groups. + num_external_computed_tokens: The number of tokens that their + KV caches are not cached by vLLM but cached by the connector. delay_cache_blocks: Whether to skip caching the blocks. This is used by P/D when allocating blocks used in a KV transfer which will complete in a future step. @@ -349,13 +349,24 @@ def allocate_slots( return None # Touch the computed blocks to make sure they won't be evicted. + touched_block_ids: set[int] = set() if self.enable_caching: self.block_pool.touch(blocks_to_touch) + touched_block_ids = { + id(block) for blocks in blocks_to_touch for block in blocks + } else: assert not any(new_computed_block_list), ( "Computed blocks should be empty when prefix caching is disabled" ) + if new_computed_block_list is not self.empty_kv_cache_blocks.blocks: + for blocks in new_computed_block_list: + for block in blocks: + if block.is_null or id(block) in touched_block_ids: + continue + block.ref_cnt += 1 + if new_computed_block_list is not self.empty_kv_cache_blocks.blocks: # Append the new computed blocks to the request blocks until now to # avoid the case where the new blocks cannot be allocated. diff --git a/vllm/v1/core/sched/scheduler.py b/vllm/v1/core/sched/scheduler.py index 16da5b9f5253..5b9560bf4534 100644 --- a/vllm/v1/core/sched/scheduler.py +++ b/vllm/v1/core/sched/scheduler.py @@ -584,9 +584,9 @@ def schedule(self) -> SchedulerOutput: request, num_new_tokens, num_new_computed_tokens=num_new_local_computed_tokens, - num_external_computed_tokens=num_external_computed_tokens, - num_lookahead_tokens=effective_lookahead_tokens, new_computed_blocks=new_computed_blocks, + num_lookahead_tokens=effective_lookahead_tokens, + num_external_computed_tokens=num_external_computed_tokens, delay_cache_blocks=load_kv_async, num_encoder_tokens=num_encoder_tokens, ) From 327e472de4e89341799d6566f314caeb2dc4d4cf Mon Sep 17 00:00:00 2001 From: Yifan Qiao Date: Thu, 4 Dec 2025 23:25:09 -0800 Subject: [PATCH 08/73] revert unrelated changes Signed-off-by: Yifan Qiao --- .gitignore | 3 --- vllm/v1/core/block_pool.py | 4 ++-- vllm/v1/core/kv_cache_coordinator.py | 20 +++++++++--------- vllm/v1/core/sched/async_scheduler.py | 3 +-- vllm/v1/core/single_type_kv_cache_manager.py | 22 ++++++-------------- 5 files changed, 19 insertions(+), 33 deletions(-) diff --git a/.gitignore b/.gitignore index 04e9d90e2ff3..7cda86478664 100644 --- a/.gitignore +++ b/.gitignore @@ -227,6 +227,3 @@ ep_kernels_workspace/ # Allow tracked library source folders under submodules (e.g., benchmarks/lib) !vllm/benchmarks/lib/ - -csrc/* -vllm/third_party/triton_kernels/* diff --git a/vllm/v1/core/block_pool.py b/vllm/v1/core/block_pool.py index 818bc945e165..f83f6a447d01 100644 --- a/vllm/v1/core/block_pool.py +++ b/vllm/v1/core/block_pool.py @@ -291,8 +291,8 @@ def cache_full_blocks( BlockStored( block_hashes=new_hashes, parent_block_hash=parent_block_hash, - ## TODO(Yifan): here token_ids may be over-estimated when - ## using sliding window + ## TODO(Yifan): here token_ids may be over-estimated for + ## sliding window layers token_ids=request.all_token_ids[ num_cached_blocks * block_size : num_full_blocks * block_size ], diff --git a/vllm/v1/core/kv_cache_coordinator.py b/vllm/v1/core/kv_cache_coordinator.py index 3f560674d5ca..5f6eafb76a63 100644 --- a/vllm/v1/core/kv_cache_coordinator.py +++ b/vllm/v1/core/kv_cache_coordinator.py @@ -4,7 +4,6 @@ from collections.abc import Sequence from math import lcm -from vllm.logger import init_logger from vllm.v1.core.block_pool import BlockPool from vllm.v1.core.kv_cache_metrics import KVCacheMetricsCollector from vllm.v1.core.kv_cache_utils import ( @@ -25,8 +24,6 @@ ) from vllm.v1.request import Request -logger = init_logger(__name__) - class KVCacheCoordinator(ABC): """ @@ -92,9 +89,9 @@ def get_num_blocks_to_allocate( total_computed_tokens: Include both local and external tokens. Returns: - 1. The number of new blocks to allocate for each kv cache group. - 2. The number of evictable blocks to touch for each kv cache group. - 3. The blocks to touch for each kv cache group. + The number of new blocks to allocate for each kv cache group. + The number of evictable blocks to touch for each kv cache group. + The blocks to touch for each kv cache group. """ num_new_blocks_to_allocate = [] num_evictable_blocks_to_allocate = [] @@ -141,6 +138,8 @@ def save_new_computed_blocks( request_id: The request ID. new_computed_blocks: The new computed blocks just hitting the prefix cache. + total_computed_tokens: The total number of computed tokens, including + both local and external tokens. """ for i, manager in enumerate(self.single_type_managers): manager.save_new_computed_blocks( @@ -156,13 +155,14 @@ def allocate_new_blocks( ) -> tuple[list[KVCacheBlock], ...]: """ Allocate new blocks for the request to give it at least `num_tokens` - token slots. If `num_blocks_to_allocate` is smaller than the number of - blocks needed (in the case of sliding window attention), the leading - blocks will be padded with null blocks. + token slots. If `num_blocks_to_allocate_per_group[i]` is smaller than + the number of blocks needed (in the case of sliding window attention), + the leading blocks will be padded with null blocks. Args: request_id: The request ID. - num_blocks_to_allocate: The number of blocks to allocate. + num_blocks_to_allocate_per_group: The number of blocks to allocate + for each kv cache group. num_tokens: The total number of tokens that need a slot (including tokens that are already allocated). num_encoder_tokens: The number of encoder tokens for allocating diff --git a/vllm/v1/core/sched/async_scheduler.py b/vllm/v1/core/sched/async_scheduler.py index 6c4b2d85fbdb..df61eebb395e 100644 --- a/vllm/v1/core/sched/async_scheduler.py +++ b/vllm/v1/core/sched/async_scheduler.py @@ -63,7 +63,6 @@ def _update_request_with_output( # Cache the new tokens. Preempted requests should be skipped. if status_before_update == RequestStatus.RUNNING: self.kv_cache_manager.cache_blocks( - request, - request.num_computed_tokens - request.num_output_placeholders, + request, request.num_computed_tokens - request.num_output_placeholders ) return new_token_ids, stopped diff --git a/vllm/v1/core/single_type_kv_cache_manager.py b/vllm/v1/core/single_type_kv_cache_manager.py index 18fc00ed9ae4..fdd1cade5221 100644 --- a/vllm/v1/core/single_type_kv_cache_manager.py +++ b/vllm/v1/core/single_type_kv_cache_manager.py @@ -91,16 +91,9 @@ def get_num_blocks_to_allocate( """ num_required_blocks = cdiv(num_tokens, self.block_size) - - # How many *tokens* are outside the attention window for this manager. - # For attention types that do not skip tokens (e.g. full attention), - # this will always be 0. num_skipped_tokens = self.get_num_skipped_tokens(total_computed_tokens) - # Fast-path: nothing is skipped. This should match the original - # behavior before total_computed_tokens was introduced so that - # existing tests (and non-sliding-window attention types) behave - # identically. + # Fast-path: nothing is skipped. if num_skipped_tokens <= 0: num_new_blocks = ( num_required_blocks @@ -112,9 +105,6 @@ def get_num_blocks_to_allocate( for blk in new_computed_blocks if blk.ref_cnt == 0 and not blk.is_null ] - # Scheduler relies on evictable blocks being counted in the free - # capacity check, but allocate_new_blocks will clamp to actual new - # blocks to avoid double allocation. return num_new_blocks, evictable_computed_blocks # General case: some prefix tokens are skipped by the attention window. @@ -168,25 +158,24 @@ def save_new_computed_blocks( new_computed_blocks: The new computed blocks just hitting the prefix cache. """ - - # How many *tokens* are outside the attention window for this manager. - # For attention types that do not skip tokens (e.g. full attention), - # this will always be 0. num_skipped_tokens = self.get_num_skipped_tokens(total_computed_tokens) + # Fast-path: nothing is skipped. if num_skipped_tokens <= 0: if request_id not in self.num_cached_block: # A new request. req_blocks = self.req_to_blocks[request_id] assert len(req_blocks) == 0 req_blocks.extend(new_computed_blocks) - ## YIFAN: why len(new_computed_blocks) rather than len(req_blocks)? + # REMOVE BEFORE MERGE (YIFAN): why len(new_computed_blocks) + # rather than len(req_blocks)? self.num_cached_block[request_id] = len(new_computed_blocks) else: # A running request. Should not have new computed blocks. assert len(new_computed_blocks) == 0 return + # General case: some prefix tokens are skipped by the attention window. num_skipped_blocks = num_skipped_tokens // self.block_size req_blocks = self.req_to_blocks[request_id] num_local_computed_blocks = len(new_computed_blocks) + len(req_blocks) @@ -218,6 +207,7 @@ def allocate_new_blocks( Args: request_id: The request ID. + num_blocks_to_allocate: The number of new blocks to allocate. num_tokens: The total number of tokens that need a slot (including tokens that are already allocated). From 580efd422a9b955eab55606b69b58d9edffeddcf Mon Sep 17 00:00:00 2001 From: Yifan Qiao Date: Fri, 5 Dec 2025 00:38:19 -0800 Subject: [PATCH 09/73] revert `blocks_to_touch` changes Signed-off-by: Yifan Qiao --- vllm/v1/core/kv_cache_coordinator.py | 19 +++++------- vllm/v1/core/kv_cache_manager.py | 15 +-------- vllm/v1/core/single_type_kv_cache_manager.py | 32 +++++++++----------- 3 files changed, 22 insertions(+), 44 deletions(-) diff --git a/vllm/v1/core/kv_cache_coordinator.py b/vllm/v1/core/kv_cache_coordinator.py index 5f6eafb76a63..177f62bb5a96 100644 --- a/vllm/v1/core/kv_cache_coordinator.py +++ b/vllm/v1/core/kv_cache_coordinator.py @@ -74,7 +74,7 @@ def get_num_blocks_to_allocate( new_computed_blocks: tuple[Sequence[KVCacheBlock], ...], num_encoder_tokens: int, total_computed_tokens: int, - ) -> tuple[list[int], list[int], tuple[Sequence[KVCacheBlock], ...]]: + ) -> tuple[list[int], list[int]]: """ Get the number of blocks needed to be allocated for the request. @@ -91,25 +91,23 @@ def get_num_blocks_to_allocate( Returns: The number of new blocks to allocate for each kv cache group. The number of evictable blocks to touch for each kv cache group. - The blocks to touch for each kv cache group. """ num_new_blocks_to_allocate = [] num_evictable_blocks_to_allocate = [] - evictable_blocks_to_touch: list[Sequence[KVCacheBlock]] = [] for i, manager in enumerate(self.single_type_managers): if isinstance(manager, CrossAttentionManager): # For cross-attention, we issue a single static allocation # of blocks based on the number of encoder input tokens. ( num_new_blocks_to_allocate_single_group, - blocks_to_touch_single_group, + num_evictable_blocks_to_allocate_single_group, ) = manager.get_num_blocks_to_allocate( request_id, num_encoder_tokens, [], 0 ) else: ( num_new_blocks_to_allocate_single_group, - blocks_to_touch_single_group, + num_evictable_blocks_to_allocate_single_group, ) = manager.get_num_blocks_to_allocate( request_id, num_tokens, @@ -117,13 +115,10 @@ def get_num_blocks_to_allocate( total_computed_tokens, ) num_new_blocks_to_allocate.append(num_new_blocks_to_allocate_single_group) - num_evictable_blocks_to_allocate.append(len(blocks_to_touch_single_group)) - evictable_blocks_to_touch.append(blocks_to_touch_single_group) - return ( - num_new_blocks_to_allocate, - num_evictable_blocks_to_allocate, - tuple(evictable_blocks_to_touch), - ) + num_evictable_blocks_to_allocate.append( + num_evictable_blocks_to_allocate_single_group + ) + return num_new_blocks_to_allocate, num_evictable_blocks_to_allocate def save_new_computed_blocks( self, diff --git a/vllm/v1/core/kv_cache_manager.py b/vllm/v1/core/kv_cache_manager.py index e6ef35ba55b4..6a907e9fff63 100644 --- a/vllm/v1/core/kv_cache_manager.py +++ b/vllm/v1/core/kv_cache_manager.py @@ -331,7 +331,6 @@ def allocate_slots( ( num_new_blocks_to_allocate, num_evictable_blocks_to_allocate, - blocks_to_touch, ) = self.coordinator.get_num_blocks_to_allocate( request_id=request.request_id, num_tokens=num_tokens_need_slot, @@ -348,25 +347,13 @@ def allocate_slots( # Cannot allocate new blocks return None - # Touch the computed blocks to make sure they won't be evicted. - touched_block_ids: set[int] = set() if self.enable_caching: - self.block_pool.touch(blocks_to_touch) - touched_block_ids = { - id(block) for blocks in blocks_to_touch for block in blocks - } + self.block_pool.touch(new_computed_block_list) else: assert not any(new_computed_block_list), ( "Computed blocks should be empty when prefix caching is disabled" ) - if new_computed_block_list is not self.empty_kv_cache_blocks.blocks: - for blocks in new_computed_block_list: - for block in blocks: - if block.is_null or id(block) in touched_block_ids: - continue - block.ref_cnt += 1 - if new_computed_block_list is not self.empty_kv_cache_blocks.blocks: # Append the new computed blocks to the request blocks until now to # avoid the case where the new blocks cannot be allocated. diff --git a/vllm/v1/core/single_type_kv_cache_manager.py b/vllm/v1/core/single_type_kv_cache_manager.py index fdd1cade5221..043d1ac17292 100644 --- a/vllm/v1/core/single_type_kv_cache_manager.py +++ b/vllm/v1/core/single_type_kv_cache_manager.py @@ -72,7 +72,7 @@ def get_num_blocks_to_allocate( num_tokens: int, new_computed_blocks: Sequence[KVCacheBlock], total_computed_tokens: int, - ) -> tuple[int, Sequence[KVCacheBlock]]: + ) -> tuple[int, int]: """ Get the number of blocks needed to be allocated for the request. @@ -86,8 +86,8 @@ def get_num_blocks_to_allocate( tokens. Returns: - 1. The number of blocks. - 2. The list of evictable blocks (i.e., ref_cnt == 0) that should be touched. + The number of blocks. + The number of evictable blocks (i.e., ref_cnt == 0). """ num_required_blocks = cdiv(num_tokens, self.block_size) @@ -100,12 +100,10 @@ def get_num_blocks_to_allocate( - len(new_computed_blocks) - len(self.req_to_blocks[request_id]) ) - evictable_computed_blocks = [ - blk - for blk in new_computed_blocks - if blk.ref_cnt == 0 and not blk.is_null - ] - return num_new_blocks, evictable_computed_blocks + num_evictable_blocks = sum( + blk.ref_cnt == 0 and not blk.is_null for blk in new_computed_blocks + ) + return num_new_blocks, num_evictable_blocks # General case: some prefix tokens are skipped by the attention window. num_skipped_blocks = num_skipped_tokens // self.block_size @@ -120,7 +118,7 @@ def get_num_blocks_to_allocate( num_new_blocks = max(num_required_blocks - num_skipped_blocks, 0) # All new computed blocks are skipped. This happens when the entire # sliding window hits external KV cache via a KV connector. - evictable_computed_blocks = [] + num_evictable_blocks = 0 else: # Some local-computed blocks remain inside the window. num_new_blocks = max(num_required_blocks - num_local_computed_blocks, 0) @@ -136,13 +134,11 @@ def get_num_blocks_to_allocate( # free queue and ref_cnt == 0), it will be changed from a free block # to a computed block when the request is allocated, so we also count # it in the free-capacity check. - evictable_computed_blocks = [ - blk + num_evictable_blocks = sum( + blk.ref_cnt == 0 and not blk.is_null for blk in new_computed_blocks[num_skipped_new_computed_blocks:] - if blk.ref_cnt == 0 and not blk.is_null - ] - - return num_new_blocks, evictable_computed_blocks + ) + return num_new_blocks, num_evictable_blocks def save_new_computed_blocks( self, @@ -800,7 +796,7 @@ def get_num_blocks_to_allocate( num_tokens: int, new_computed_blocks: Sequence[KVCacheBlock], total_computed_tokens: int, - ) -> tuple[int, Sequence[KVCacheBlock]]: + ) -> tuple[int, int]: # TODO(Kuntai): handle the case where `total_computed_tokens > 0` if total_computed_tokens > 0: logger.warning_once( @@ -842,7 +838,7 @@ def save_new_computed_blocks( self, request_id: str, new_computed_blocks: Sequence[KVCacheBlock], - local_computed_tokens: int, + total_computed_tokens: int, ) -> None: # We do not cache blocks for cross-attention to be shared between # requests, so `new_computed_blocks` should always be empty. From 37d7c3b33cf1ab9dcfe1fe31dbe2c868ce6550c9 Mon Sep 17 00:00:00 2001 From: Yifan Qiao Date: Fri, 5 Dec 2025 16:35:16 -0800 Subject: [PATCH 10/73] fix: update test cases Signed-off-by: Yifan Qiao --- .../core/test_single_type_kv_cache_manager.py | 104 ++++++++++++------ vllm/config/vllm.py | 4 +- .../kv_transfer/kv_connector/factory.py | 9 +- .../kv_connector/v1/lmcache_connector.py | 4 + vllm/v1/core/kv_cache_coordinator.py | 2 +- vllm/v1/core/kv_cache_manager.py | 1 + vllm/v1/core/single_type_kv_cache_manager.py | 2 +- 7 files changed, 84 insertions(+), 42 deletions(-) diff --git a/tests/v1/core/test_single_type_kv_cache_manager.py b/tests/v1/core/test_single_type_kv_cache_manager.py index 5cce2fd677fe..1e836d215d92 100644 --- a/tests/v1/core/test_single_type_kv_cache_manager.py +++ b/tests/v1/core/test_single_type_kv_cache_manager.py @@ -314,12 +314,13 @@ def assert_block_id(block_table: list[KVCacheBlock], ids: list[int]): def test_get_num_blocks_to_allocate(): block_size = 2 + sliding_window_length = 2 * block_size sliding_window_spec = SlidingWindowSpec( block_size=block_size, num_kv_heads=1, head_size=1, dtype=torch.float32, - sliding_window=4, # Placeholder value, not related to test result + sliding_window=sliding_window_length, ) block_pool = BlockPool( @@ -331,67 +332,83 @@ def test_get_num_blocks_to_allocate(): KVCacheBlock(i + 1) for i in range(5) ] - num_blocks, evictable = manager.get_num_blocks_to_allocate( - "1", 20 * block_size, cached_blocks_1, 0 + num_new_blocks_to_allocate, num_evictable_blocks_to_allocate = ( + manager.get_num_blocks_to_allocate( + "1", + 20 * block_size, + cached_blocks_1, + total_computed_tokens=len(cached_blocks_1) * block_size, + ) + ) + assert ( + num_new_blocks_to_allocate == 10 + and num_evictable_blocks_to_allocate == sliding_window_length // block_size ) - assert num_blocks == 10 - assert evictable == cached_blocks_1 - assert num_blocks + len(evictable) == 20 - num_blocks, evictable = manager.get_num_blocks_to_allocate( - "2", 20 * block_size, cached_blocks_2, 0 + num_new_blocks_to_allocate, num_evictable_blocks_to_allocate = ( + manager.get_num_blocks_to_allocate( + "2", + 20 * block_size, + cached_blocks_2, + total_computed_tokens=len(cached_blocks_2) * block_size, + ) + ) + assert ( + num_new_blocks_to_allocate == 10 + and num_evictable_blocks_to_allocate == sliding_window_length // block_size ) - assert num_blocks == 10 - assert evictable == cached_blocks_2[5:] - assert num_blocks + len(evictable) == 20 def test_evictable_cached_blocks_not_double_allocated(): block_size = 2 + sliding_window_length = 2 * block_size sliding_window_spec = SlidingWindowSpec( block_size=block_size, num_kv_heads=1, head_size=1, dtype=torch.float32, - sliding_window=16, + sliding_window=sliding_window_length, ) block_pool = BlockPool( - num_gpu_blocks=10, enable_caching=True, hash_block_size=block_size + num_gpu_blocks=100, enable_caching=True, hash_block_size=block_size ) manager = get_sliding_window_manager(sliding_window_spec, block_pool) request_id = "req" evictable_block = block_pool.blocks[1] # ref_cnt == 0, eviction candidate - num_blocks, evictable_blocks = manager.get_num_blocks_to_allocate( - request_id=request_id, - num_tokens=4, # requires 2 blocks - new_computed_blocks=[evictable_block], # one cached block hit - total_computed_tokens=0, + num_new_blocks_to_allocate, num_evictable_blocks_to_allocate = ( + manager.get_num_blocks_to_allocate( + request_id=request_id, + num_tokens=2 * block_size, + new_computed_blocks=[evictable_block], + total_computed_tokens=block_size, + ) ) # Free capacity check should count evictable cached blocks, but allocation # should only allocate the truly new block. - assert num_blocks == 1 - assert evictable_blocks == [evictable_block] - assert num_blocks + len(evictable_blocks) == 2 + assert num_new_blocks_to_allocate == 1 and num_evictable_blocks_to_allocate == 1 manager.save_new_computed_blocks( request_id, [evictable_block], total_computed_tokens=block_size ) - new_blocks = manager.allocate_new_blocks(request_id, num_blocks, num_tokens=4) + new_blocks = manager.allocate_new_blocks( + request_id, num_new_blocks_to_allocate, num_tokens=4 + ) assert len(new_blocks) == 1 assert len(manager.req_to_blocks[request_id]) == 2 def test_chunked_local_attention_get_num_blocks_to_allocate(): block_size = 2 + attention_chunk_size = 2 * block_size attention_spec = ChunkedLocalAttentionSpec( block_size=block_size, num_kv_heads=1, head_size=1, dtype=torch.float32, - attention_chunk_size=4, # Placeholder value, not related to test result + attention_chunk_size=attention_chunk_size, ) block_pool = BlockPool( @@ -402,17 +419,34 @@ def test_chunked_local_attention_get_num_blocks_to_allocate(): cached_blocks_2 = [block_pool.null_block for _ in range(5)] + [ KVCacheBlock(i + 1) for i in range(5) ] - - num_blocks, evictable = manager.get_num_blocks_to_allocate( - "1", 20 * block_size, cached_blocks_1, 0 + cached_blocks_3 = [KVCacheBlock(i + 1) for i in range(5)] + + num_new_blocks_to_allocate, num_evictable_blocks_to_allocate = ( + manager.get_num_blocks_to_allocate( + "1", + 20 * block_size, + cached_blocks_1, + total_computed_tokens=len(cached_blocks_1) * block_size, + ) ) - assert num_blocks == 10 - assert evictable == cached_blocks_1 - assert num_blocks + len(evictable) == 20 - - num_blocks, evictable = manager.get_num_blocks_to_allocate( - "2", 20 * block_size, cached_blocks_2, 0 + assert num_new_blocks_to_allocate == 10 and num_evictable_blocks_to_allocate == 0 + + num_new_blocks_to_allocate, num_evictable_blocks_to_allocate = ( + manager.get_num_blocks_to_allocate( + "2", + 20 * block_size, + cached_blocks_2, + total_computed_tokens=len(cached_blocks_2) * block_size, + ) + ) + assert num_new_blocks_to_allocate == 10 and num_evictable_blocks_to_allocate == 0 + + num_new_blocks_to_allocate, num_evictable_blocks_to_allocate = ( + manager.get_num_blocks_to_allocate( + "3", + 20 * block_size, + cached_blocks_3, + total_computed_tokens=len(cached_blocks_3) * block_size, + ) ) - assert num_blocks == 10 - assert evictable == cached_blocks_2[5:] - assert num_blocks + len(evictable) == 15 + assert num_new_blocks_to_allocate == 15 and num_evictable_blocks_to_allocate == 1 diff --git a/vllm/config/vllm.py b/vllm/config/vllm.py index 83e96f1e4d47..097660d6ab75 100644 --- a/vllm/config/vllm.py +++ b/vllm/config/vllm.py @@ -896,7 +896,7 @@ def has_blocked_weights(): self.scheduler_config.disable_hybrid_kv_cache_manager = True if self.kv_transfer_config is not None: # NOTE(Yifan): warning when both kv connector and hybrid kv cache - # manager are enabled. + # manager are enabled but don't disable hybrid kv cache manager here. # TODO(Kuntai): have a more elegent solution to check and # turn off HMA for connector that does not support HMA. logger.warning( @@ -905,7 +905,7 @@ def has_blocked_weights(): "check if the kv connector you are using supports HMA, or " "disable HMA by setting `--disable-hybrid-kv-cache-manager`." ) - if self.kv_events_config is not None: ## YIFAN: what is this? + if self.kv_events_config is not None: # Hybrid KV cache manager is not compatible with KV events. self.scheduler_config.disable_hybrid_kv_cache_manager = True if ( diff --git a/vllm/distributed/kv_transfer/kv_connector/factory.py b/vllm/distributed/kv_transfer/kv_connector/factory.py index 02d9a1ec9599..d24a58a4effc 100644 --- a/vllm/distributed/kv_transfer/kv_connector/factory.py +++ b/vllm/distributed/kv_transfer/kv_connector/factory.py @@ -56,9 +56,12 @@ def create_connector( # check if the connector supports HMA hma_enabled = not config.scheduler_config.disable_hybrid_kv_cache_manager if hma_enabled and not supports_hma(connector_cls): - raise ValueError( - f"Connector {connector_cls.__name__} does not support HMA but " - f"HMA is enabled. Please set `--disable-hybrid-kv-cache-manager`." + ## REMOVE BEFORE MERGE (YIFAN): Revert this warning back to raising + # an ValueError. + logger.warning( + "Connector %s does not support HMA but HMA is enabled. Please set " + "--disable-hybrid-kv-cache-manager to disable HMA.", + connector_cls.__name__, ) logger.info( diff --git a/vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py b/vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py index 14859b67ef99..3df087d741ae 100644 --- a/vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +++ b/vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py @@ -77,6 +77,8 @@ def __init__( role: KVConnectorRole, kv_cache_config: "KVCacheConfig", ): + ## REMOVE BEFORE MERGE (YIFAN): this is temporary workaround to work with + # LMCache. Remove this once having LMCache-side support for new interfaces. vllm_config.kv_cache_config = kv_cache_config super().__init__( vllm_config=vllm_config, role=role, kv_cache_config=kv_cache_config @@ -316,6 +318,8 @@ def request_finished( # either list[int] or tuple[list[int], ...]. return self._lmcache_engine.request_finished(request, block_ids) + ## REMOVE BEFORE MERGE (YIFAN): this is temporary workaround to work with + # LMCache. Remove this once having LMCache-side support for new interfaces. def request_finished_all_groups( self, request: "Request", diff --git a/vllm/v1/core/kv_cache_coordinator.py b/vllm/v1/core/kv_cache_coordinator.py index 177f62bb5a96..7fc3eff1e3dc 100644 --- a/vllm/v1/core/kv_cache_coordinator.py +++ b/vllm/v1/core/kv_cache_coordinator.py @@ -90,7 +90,7 @@ def get_num_blocks_to_allocate( Returns: The number of new blocks to allocate for each kv cache group. - The number of evictable blocks to touch for each kv cache group. + The number of evictable blocks to allocate for each kv cache group. """ num_new_blocks_to_allocate = [] num_evictable_blocks_to_allocate = [] diff --git a/vllm/v1/core/kv_cache_manager.py b/vllm/v1/core/kv_cache_manager.py index 6a907e9fff63..2aba516ba9e9 100644 --- a/vllm/v1/core/kv_cache_manager.py +++ b/vllm/v1/core/kv_cache_manager.py @@ -347,6 +347,7 @@ def allocate_slots( # Cannot allocate new blocks return None + # Touch the computed blocks to make sure they won't be evicted. if self.enable_caching: self.block_pool.touch(new_computed_block_list) else: diff --git a/vllm/v1/core/single_type_kv_cache_manager.py b/vllm/v1/core/single_type_kv_cache_manager.py index 043d1ac17292..07c96b3daf5d 100644 --- a/vllm/v1/core/single_type_kv_cache_manager.py +++ b/vllm/v1/core/single_type_kv_cache_manager.py @@ -86,7 +86,7 @@ def get_num_blocks_to_allocate( tokens. Returns: - The number of blocks. + The number of new blocks to allocate. The number of evictable blocks (i.e., ref_cnt == 0). """ From 6169524bfa3fb459d9a4e9419d9629897e70a8d4 Mon Sep 17 00:00:00 2001 From: Yifan Qiao Date: Fri, 5 Dec 2025 21:47:21 -0800 Subject: [PATCH 11/73] doc string nits Signed-off-by: Yifan Qiao --- vllm/v1/core/single_type_kv_cache_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/v1/core/single_type_kv_cache_manager.py b/vllm/v1/core/single_type_kv_cache_manager.py index 07c96b3daf5d..400b08dd5a39 100644 --- a/vllm/v1/core/single_type_kv_cache_manager.py +++ b/vllm/v1/core/single_type_kv_cache_manager.py @@ -87,7 +87,7 @@ def get_num_blocks_to_allocate( Returns: The number of new blocks to allocate. - The number of evictable blocks (i.e., ref_cnt == 0). + The number of evictable blocks (i.e., ref_cnt == 0) to allocate. """ num_required_blocks = cdiv(num_tokens, self.block_size) From ccfc6763dd1361f089273be76d2b2aaba0e4b26a Mon Sep 17 00:00:00 2001 From: Yifan Qiao Date: Fri, 5 Dec 2025 22:00:42 -0800 Subject: [PATCH 12/73] ignore mypy errors Signed-off-by: Yifan Qiao --- .../kv_transfer/kv_connector/v1/lmcache_connector.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py b/vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py index 3df087d741ae..344dbd01bd75 100644 --- a/vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +++ b/vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py @@ -79,12 +79,12 @@ def __init__( ): ## REMOVE BEFORE MERGE (YIFAN): this is temporary workaround to work with # LMCache. Remove this once having LMCache-side support for new interfaces. - vllm_config.kv_cache_config = kv_cache_config + vllm_config.kv_cache_config = kv_cache_config # type: ignore[attr-defined] super().__init__( vllm_config=vllm_config, role=role, kv_cache_config=kv_cache_config ) - assert vllm_config.kv_transfer_config is not None - use_native = vllm_config.kv_transfer_config.get_from_extra_config( + assert vllm_config.kv_transfer_config is not None # type: ignore[attr-defined] + use_native = vllm_config.kv_transfer_config.get_from_extra_config( # type: ignore[attr-defined] "use_native", False ) if use_native: From ad761e685efcf488b8bd5266c97052d400fa1307 Mon Sep 17 00:00:00 2001 From: Yifan Qiao Date: Fri, 12 Dec 2025 16:58:22 -0800 Subject: [PATCH 13/73] fix: resolve comments; mainly merge local_computed_tokens and external_computed_tokens allocation to the same function Signed-off-by: Yifan Qiao --- vllm/v1/core/block_pool.py | 33 +++---- vllm/v1/core/kv_cache_coordinator.py | 17 ++-- vllm/v1/core/kv_cache_manager.py | 76 ++++++---------- vllm/v1/core/single_type_kv_cache_manager.py | 95 ++++++++++---------- 4 files changed, 100 insertions(+), 121 deletions(-) diff --git a/vllm/v1/core/block_pool.py b/vllm/v1/core/block_pool.py index f83f6a447d01..e0e01f988e10 100644 --- a/vllm/v1/core/block_pool.py +++ b/vllm/v1/core/block_pool.py @@ -254,17 +254,11 @@ def cache_full_blocks( [] if self.enable_kv_cache_events else None ) - # Some blocks may be null blocks when enabling sparse attention or sliding - # window attention. For now, we only have sliding window attention, and - # null blocks must be at the beginning. - first_non_null_blk_idx = 0 for i, blk in enumerate(new_full_blocks): - if not blk.is_null: - first_non_null_blk_idx = i - break - - for i, blk in enumerate(new_full_blocks[first_non_null_blk_idx:]): - assert not blk.is_null + # Some blocks may be null blocks when enabling sparse attention or sliding + # window attention. We skip null blocks here. + if blk.is_null: + continue assert blk.block_hash is None block_hash = new_block_hashes[i] @@ -376,7 +370,7 @@ def _maybe_evict_cached_block(self, block: KVCacheBlock) -> bool: ) return True - def touch(self, blocks: tuple[Sequence[KVCacheBlock], ...]) -> None: + def touch(self, blocks: Sequence[KVCacheBlock]) -> None: """Touch a block increases its reference count by 1, and may remove the block from the free queue. This is used when a block is hit by another request with the same prefix. @@ -384,15 +378,14 @@ def touch(self, blocks: tuple[Sequence[KVCacheBlock], ...]) -> None: Args: blocks: A list of blocks to touch. """ - for blocks_per_group in blocks: - for block in blocks_per_group: - # ref_cnt=0 means this block is in the free list (i.e. eviction - # candidate), so remove it. - if block.ref_cnt == 0 and not block.is_null: - self.free_block_queue.remove(block) - block.ref_cnt += 1 - if self.metrics_collector: - self.metrics_collector.on_block_accessed(block) + for block in blocks: + # ref_cnt=0 means this block is in the free list (i.e. eviction + # candidate), so remove it. + if block.ref_cnt == 0 and not block.is_null: + self.free_block_queue.remove(block) + block.ref_cnt += 1 + if self.metrics_collector: + self.metrics_collector.on_block_accessed(block) def free_blocks(self, ordered_blocks: Iterable[KVCacheBlock]) -> None: """Free a list of blocks. The blocks should be ordered by their diff --git a/vllm/v1/core/kv_cache_coordinator.py b/vllm/v1/core/kv_cache_coordinator.py index 7fc3eff1e3dc..f578a6a9ac2a 100644 --- a/vllm/v1/core/kv_cache_coordinator.py +++ b/vllm/v1/core/kv_cache_coordinator.py @@ -60,6 +60,7 @@ def __init__( get_manager_for_kv_cache_spec( kv_cache_spec=kv_cache_group.kv_cache_spec, block_pool=self.block_pool, + enable_caching=enable_caching, kv_cache_group_id=i, dcp_world_size=dcp_world_size, pcp_world_size=pcp_world_size, @@ -124,7 +125,8 @@ def save_new_computed_blocks( self, request_id: str, new_computed_blocks: tuple[Sequence[KVCacheBlock], ...], - total_computed_tokens: int, + num_local_computed_tokens: int, + num_external_computed_tokens: int, ) -> None: """ Add the new computed blocks to the request. @@ -133,18 +135,20 @@ def save_new_computed_blocks( request_id: The request ID. new_computed_blocks: The new computed blocks just hitting the prefix cache. - total_computed_tokens: The total number of computed tokens, including - both local and external tokens. + num_local_computed_tokens: The number of local computed tokens. + num_external_computed_tokens: The number of external computed tokens. """ for i, manager in enumerate(self.single_type_managers): manager.save_new_computed_blocks( - request_id, new_computed_blocks[i], total_computed_tokens + request_id, + new_computed_blocks[i], + num_local_computed_tokens, + num_external_computed_tokens, ) def allocate_new_blocks( self, request_id: str, - num_blocks_to_allocate_per_group: list[int], num_tokens: int, num_encoder_tokens: int = 0, ) -> tuple[list[KVCacheBlock], ...]: @@ -169,12 +173,11 @@ def allocate_new_blocks( return tuple( manager.allocate_new_blocks( request_id, - num_blocks_to_allocate_per_group[i], num_encoder_tokens if isinstance(manager, CrossAttentionManager) else num_tokens, ) - for i, manager in enumerate(self.single_type_managers) + for manager in self.single_type_managers ) def cache_blocks(self, request: Request, num_computed_tokens: int) -> None: diff --git a/vllm/v1/core/kv_cache_manager.py b/vllm/v1/core/kv_cache_manager.py index 2aba516ba9e9..96839b951585 100644 --- a/vllm/v1/core/kv_cache_manager.py +++ b/vllm/v1/core/kv_cache_manager.py @@ -237,33 +237,25 @@ def allocate_slots( Blocks layout: ``` - --------------------------------------------------------------------- - | < comp > | < new_comp > | < ext_comp > | < new > | < lookahead > | - --------------------------------------------------------------------- - | < to be computed > | - --------------------------------------------------------------------- - | < to be allocated > | - --------------------------------------------------------------------- - | < to be cached > | - --------------------------------------------------------------------- - | Prefix-cached tokens from both vLLM | - | and connector. Can be safely removed if | + ---------------------------------------------------------------------- + | < comp > | < new_comp > | < ext_comp > | < new > | < lookahead > | + ---------------------------------------------------------------------- + | < to be computed > | + ---------------------------------------------------------------------- + | < to be allocated > | + ---------------------------------------------------------------------- + | < to be cached (roughly, | + | details below)> | + ---------------------------------------------------------------------- + | Prefix-cached tokens from either vLLM | + | or connector. Can be safely removed if | | they are outside sliding window. | - --------------------------------------------------------------------- - | not cached by | + ---------------------------------------------------------------------- + | < cached by vLLM > | not cached by | | vLLM, but | - | cached by | - | connector | - --------------------------------------------------------------------- - | < cached by vLLM > | - --------------------------------------------------------------------- - | ref_cnt | - | increased| - --------------------------------------------------------------------- - | ref_cnt not | - | increased yet| - --------------------------------------------------------------------- - + | ref_cnt | ref_cnt not | cached by | + | increased| increased yet| connector | + ---------------------------------------------------------------------- ``` Abbrivations: @@ -273,10 +265,13 @@ def allocate_slots( new_comp = num_new_computed_tokens = len(new_computed_blocks) * block_size ext_comp = num_external_computed_tokens, cached by the connector - new = num_new_tokens + new = num_new_tokens, including unverified draft tokens lookahead = num_lookahead_tokens ``` + NOTE: for new tokens which include both verified and unverified draft + tokens, we only cache the verified tokens (by capping the number at + `request.num_tokens`). The allocation has three stages: - Free unnecessary blocks in `comp` and check @@ -290,15 +285,8 @@ def allocate_slots( Returns: A list of new allocated blocks. """ - if ( - num_new_tokens == 0 - and num_lookahead_tokens == 0 - and num_external_computed_tokens == 0 - ): - raise ValueError( - "At least one of num_new_tokens, num_lookahead_tokens, or " - "num_external_computed_tokens must be greater than 0" - ) + if num_new_tokens == 0: + raise ValueError("num_new_tokens must be greater than 0") if new_computed_blocks is not None: new_computed_block_list = new_computed_blocks.blocks @@ -347,27 +335,21 @@ def allocate_slots( # Cannot allocate new blocks return None - # Touch the computed blocks to make sure they won't be evicted. - if self.enable_caching: - self.block_pool.touch(new_computed_block_list) - else: - assert not any(new_computed_block_list), ( - "Computed blocks should be empty when prefix caching is disabled" - ) - - if new_computed_block_list is not self.empty_kv_cache_blocks.blocks: + if ( + new_computed_block_list is not self.empty_kv_cache_blocks.blocks + or num_external_computed_tokens > 0 + ): # Append the new computed blocks to the request blocks until now to # avoid the case where the new blocks cannot be allocated. self.coordinator.save_new_computed_blocks( request_id=request.request_id, new_computed_blocks=new_computed_block_list, - total_computed_tokens=num_local_computed_tokens - + num_external_computed_tokens, + num_local_computed_tokens=num_local_computed_tokens, + num_external_computed_tokens=num_external_computed_tokens, ) new_blocks = self.coordinator.allocate_new_blocks( request.request_id, - num_new_blocks_to_allocate, num_tokens_need_slot, num_encoder_tokens, ) diff --git a/vllm/v1/core/single_type_kv_cache_manager.py b/vllm/v1/core/single_type_kv_cache_manager.py index 400b08dd5a39..712ebbee3596 100644 --- a/vllm/v1/core/single_type_kv_cache_manager.py +++ b/vllm/v1/core/single_type_kv_cache_manager.py @@ -33,6 +33,7 @@ def __init__( self, kv_cache_spec: KVCacheSpec, block_pool: BlockPool, + enable_caching: bool, kv_cache_group_id: int, dcp_world_size: int = 1, pcp_world_size: int = 1, @@ -51,6 +52,7 @@ def __init__( self.block_size *= dcp_world_size * pcp_world_size self.kv_cache_spec = kv_cache_spec self.block_pool = block_pool + self.enable_caching = enable_caching # Mapping from request ID to blocks to track the blocks allocated # for each request, so that we can free the blocks when the request @@ -144,7 +146,8 @@ def save_new_computed_blocks( self, request_id: str, new_computed_blocks: Sequence[KVCacheBlock], - total_computed_tokens: int, + num_local_computed_tokens: int, + num_external_computed_tokens: int, ) -> None: """ Add the new computed blocks to the request. @@ -154,56 +157,63 @@ def save_new_computed_blocks( new_computed_blocks: The new computed blocks just hitting the prefix cache. """ - num_skipped_tokens = self.get_num_skipped_tokens(total_computed_tokens) - # Fast-path: nothing is skipped. - if num_skipped_tokens <= 0: - if request_id not in self.num_cached_block: - # A new request. - req_blocks = self.req_to_blocks[request_id] - assert len(req_blocks) == 0 - req_blocks.extend(new_computed_blocks) - # REMOVE BEFORE MERGE (YIFAN): why len(new_computed_blocks) - # rather than len(req_blocks)? - self.num_cached_block[request_id] = len(new_computed_blocks) - else: - # A running request. Should not have new computed blocks. - assert len(new_computed_blocks) == 0 + if request_id in self.num_cached_block: + # Fast-path: a running request. Should not have any new computed blocks. + assert len(new_computed_blocks) == 0 return - # General case: some prefix tokens are skipped by the attention window. - num_skipped_blocks = num_skipped_tokens // self.block_size + # A new request. req_blocks = self.req_to_blocks[request_id] - num_local_computed_blocks = len(new_computed_blocks) + len(req_blocks) - num_blocks_to_save = max(num_local_computed_blocks - num_skipped_blocks, 0) - num_null_blocks_to_pad = min( - max(num_skipped_blocks - len(req_blocks), 0), num_local_computed_blocks - ) - assert num_blocks_to_save + num_skipped_blocks == num_local_computed_blocks, ( - "Inconsistent number of blocks to save" + assert len(req_blocks) == 0 + num_total_computed_tokens = ( + num_local_computed_tokens + num_external_computed_tokens ) - # Add null blocks for the skipped blocks. - if num_null_blocks_to_pad > 0: - req_blocks.extend([self._null_block] * num_null_blocks_to_pad) + num_skipped_tokens = self.get_num_skipped_tokens(num_total_computed_tokens) + num_skipped_blocks = num_skipped_tokens // self.block_size + if num_skipped_blocks > 0: + # It is possible that all new computed blocks are skipped when + # num_skipped_blocks > len(new_computed_blocks). + new_computed_blocks = new_computed_blocks[num_skipped_blocks:] + # Some external computed tokens may be skipped too. + num_external_computed_tokens = min( + num_total_computed_tokens - num_skipped_tokens, + num_external_computed_tokens, + ) + + # Touch the computed blocks to make sure they won't be evicted. + if self.enable_caching: + self.block_pool.touch(new_computed_blocks) + else: + assert not any(new_computed_blocks), ( + "Computed blocks should be empty when prefix caching is disabled" + ) + + # Skip blocks are padded with null blocks. + req_blocks.extend([self._null_block] * num_skipped_blocks) # Add the remaining computed blocks. - req_blocks.extend(new_computed_blocks[num_null_blocks_to_pad:]) + req_blocks.extend(new_computed_blocks) # All cached hits (including skipped nulls) are already cached; mark # them so cache_blocks() will not try to re-cache blocks that already # have a block_hash set. self.num_cached_block[request_id] = len(req_blocks) + if num_external_computed_tokens > 0: + # Allocate new blocks for external computed tokens. + allocated_blocks = self.block_pool.get_new_blocks( + cdiv(num_total_computed_tokens, self.block_size) - len(req_blocks) + ) + req_blocks.extend(allocated_blocks) + def allocate_new_blocks( - self, request_id: str, num_blocks_to_allocate: int, num_tokens: int + self, request_id: str, num_tokens: int ) -> list[KVCacheBlock]: """ Allocate new blocks for the request to give it at least `num_tokens` - token slots. If `num_blocks_to_allocate` is smaller than the number of - blocks needed (in the case of sliding window attention), the leading - blocks will be padded with null blocks. + token slots. Args: request_id: The request ID. - num_blocks_to_allocate: The number of new blocks to allocate. num_tokens: The total number of tokens that need a slot (including tokens that are already allocated). @@ -213,18 +223,10 @@ def allocate_new_blocks( req_blocks = self.req_to_blocks[request_id] num_required_blocks = cdiv(num_tokens, self.block_size) num_new_blocks = num_required_blocks - len(req_blocks) - # Only allocate real new blocks; cached hits should already be present - # in req_blocks via save_new_computed_blocks. - num_blocks_to_padding = num_new_blocks - num_blocks_to_allocate - assert num_blocks_to_padding >= 0, ( - f"Invalid padding: need {num_new_blocks}, allocate {num_blocks_to_allocate}" - ) - if num_new_blocks <= 0: return [] else: - allocated_blocks = self.block_pool.get_new_blocks(num_blocks_to_allocate) - new_blocks = [self._null_block] * num_blocks_to_padding + allocated_blocks + new_blocks = self.block_pool.get_new_blocks(num_new_blocks) req_blocks.extend(new_blocks) return new_blocks @@ -816,7 +818,7 @@ def get_num_blocks_to_allocate( ) def allocate_new_blocks( - self, request_id: str, num_blocks_to_allocate: int, num_tokens: int + self, request_id: str, num_tokens: int ) -> list[KVCacheBlock]: # Allocate extra `num_speculative_blocks` blocks for # speculative decoding (MTP/EAGLE) with linear attention. @@ -826,9 +828,7 @@ def allocate_new_blocks( self.kv_cache_spec.block_size * self.kv_cache_spec.num_speculative_blocks ) - return super().allocate_new_blocks( - request_id, num_blocks_to_allocate, num_tokens - ) + return super().allocate_new_blocks(request_id, num_tokens) class CrossAttentionManager(SingleTypeKVCacheManager): @@ -838,7 +838,8 @@ def save_new_computed_blocks( self, request_id: str, new_computed_blocks: Sequence[KVCacheBlock], - total_computed_tokens: int, + num_local_computed_tokens: int, + num_external_computed_tokens: int, ) -> None: # We do not cache blocks for cross-attention to be shared between # requests, so `new_computed_blocks` should always be empty. From 89af30cf5d6f256fea54ed6b72ade7ff45f497f7 Mon Sep 17 00:00:00 2001 From: Yifan Qiao Date: Sat, 13 Dec 2025 01:03:01 -0800 Subject: [PATCH 14/73] fix: simplify return values of get_num_blocks_to_allocate Signed-off-by: Yifan Qiao --- vllm/v1/core/kv_cache_coordinator.py | 24 +++++--------------- vllm/v1/core/kv_cache_manager.py | 10 ++------ vllm/v1/core/single_type_kv_cache_manager.py | 11 ++++----- 3 files changed, 13 insertions(+), 32 deletions(-) diff --git a/vllm/v1/core/kv_cache_coordinator.py b/vllm/v1/core/kv_cache_coordinator.py index f578a6a9ac2a..195646d44f37 100644 --- a/vllm/v1/core/kv_cache_coordinator.py +++ b/vllm/v1/core/kv_cache_coordinator.py @@ -75,7 +75,7 @@ def get_num_blocks_to_allocate( new_computed_blocks: tuple[Sequence[KVCacheBlock], ...], num_encoder_tokens: int, total_computed_tokens: int, - ) -> tuple[list[int], list[int]]: + ) -> int: """ Get the number of blocks needed to be allocated for the request. @@ -90,36 +90,24 @@ def get_num_blocks_to_allocate( total_computed_tokens: Include both local and external tokens. Returns: - The number of new blocks to allocate for each kv cache group. - The number of evictable blocks to allocate for each kv cache group. + The number of blocks to allocate. """ - num_new_blocks_to_allocate = [] - num_evictable_blocks_to_allocate = [] + num_blocks_to_allocate = 0 for i, manager in enumerate(self.single_type_managers): if isinstance(manager, CrossAttentionManager): # For cross-attention, we issue a single static allocation # of blocks based on the number of encoder input tokens. - ( - num_new_blocks_to_allocate_single_group, - num_evictable_blocks_to_allocate_single_group, - ) = manager.get_num_blocks_to_allocate( + num_blocks_to_allocate += manager.get_num_blocks_to_allocate( request_id, num_encoder_tokens, [], 0 ) else: - ( - num_new_blocks_to_allocate_single_group, - num_evictable_blocks_to_allocate_single_group, - ) = manager.get_num_blocks_to_allocate( + num_blocks_to_allocate += manager.get_num_blocks_to_allocate( request_id, num_tokens, new_computed_blocks[i], total_computed_tokens, ) - num_new_blocks_to_allocate.append(num_new_blocks_to_allocate_single_group) - num_evictable_blocks_to_allocate.append( - num_evictable_blocks_to_allocate_single_group - ) - return num_new_blocks_to_allocate, num_evictable_blocks_to_allocate + return num_blocks_to_allocate def save_new_computed_blocks( self, diff --git a/vllm/v1/core/kv_cache_manager.py b/vllm/v1/core/kv_cache_manager.py index 96839b951585..695bf82c3421 100644 --- a/vllm/v1/core/kv_cache_manager.py +++ b/vllm/v1/core/kv_cache_manager.py @@ -316,10 +316,7 @@ def allocate_slots( self.max_model_len, ) - ( - num_new_blocks_to_allocate, - num_evictable_blocks_to_allocate, - ) = self.coordinator.get_num_blocks_to_allocate( + num_blocks_to_allocate = self.coordinator.get_num_blocks_to_allocate( request_id=request.request_id, num_tokens=num_tokens_need_slot, new_computed_blocks=new_computed_block_list, @@ -327,11 +324,8 @@ def allocate_slots( total_computed_tokens=num_local_computed_tokens + num_external_computed_tokens, ) - tot_num_blocks_to_allocate = sum(num_new_blocks_to_allocate) + sum( - num_evictable_blocks_to_allocate - ) - if tot_num_blocks_to_allocate > self.block_pool.get_num_free_blocks(): + if num_blocks_to_allocate > self.block_pool.get_num_free_blocks(): # Cannot allocate new blocks return None diff --git a/vllm/v1/core/single_type_kv_cache_manager.py b/vllm/v1/core/single_type_kv_cache_manager.py index 712ebbee3596..f15e12fd0125 100644 --- a/vllm/v1/core/single_type_kv_cache_manager.py +++ b/vllm/v1/core/single_type_kv_cache_manager.py @@ -74,7 +74,7 @@ def get_num_blocks_to_allocate( num_tokens: int, new_computed_blocks: Sequence[KVCacheBlock], total_computed_tokens: int, - ) -> tuple[int, int]: + ) -> int: """ Get the number of blocks needed to be allocated for the request. @@ -88,8 +88,7 @@ def get_num_blocks_to_allocate( tokens. Returns: - The number of new blocks to allocate. - The number of evictable blocks (i.e., ref_cnt == 0) to allocate. + The number of blocks to allocate. """ num_required_blocks = cdiv(num_tokens, self.block_size) @@ -105,7 +104,7 @@ def get_num_blocks_to_allocate( num_evictable_blocks = sum( blk.ref_cnt == 0 and not blk.is_null for blk in new_computed_blocks ) - return num_new_blocks, num_evictable_blocks + return num_new_blocks + num_evictable_blocks # General case: some prefix tokens are skipped by the attention window. num_skipped_blocks = num_skipped_tokens // self.block_size @@ -140,7 +139,7 @@ def get_num_blocks_to_allocate( blk.ref_cnt == 0 and not blk.is_null for blk in new_computed_blocks[num_skipped_new_computed_blocks:] ) - return num_new_blocks, num_evictable_blocks + return num_new_blocks + num_evictable_blocks def save_new_computed_blocks( self, @@ -798,7 +797,7 @@ def get_num_blocks_to_allocate( num_tokens: int, new_computed_blocks: Sequence[KVCacheBlock], total_computed_tokens: int, - ) -> tuple[int, int]: + ) -> int: # TODO(Kuntai): handle the case where `total_computed_tokens > 0` if total_computed_tokens > 0: logger.warning_once( From cf666cd6130015fb8b0ebd42d2ae2d3ca59a1c7f Mon Sep 17 00:00:00 2001 From: Yifan Qiao Date: Sat, 13 Dec 2025 01:11:52 -0800 Subject: [PATCH 15/73] test: update test cases Signed-off-by: Yifan Qiao --- .../core/test_single_type_kv_cache_manager.py | 108 ++++++------------ 1 file changed, 38 insertions(+), 70 deletions(-) diff --git a/tests/v1/core/test_single_type_kv_cache_manager.py b/tests/v1/core/test_single_type_kv_cache_manager.py index 1e836d215d92..11e981187874 100644 --- a/tests/v1/core/test_single_type_kv_cache_manager.py +++ b/tests/v1/core/test_single_type_kv_cache_manager.py @@ -21,13 +21,23 @@ pytestmark = pytest.mark.cpu_test -def get_sliding_window_manager(sliding_window_spec, block_pool): - return SlidingWindowManager(sliding_window_spec, block_pool, kv_cache_group_id=0) +def get_sliding_window_manager(sliding_window_spec, block_pool, enable_caching=True): + return SlidingWindowManager( + sliding_window_spec, + block_pool, + enable_caching=enable_caching, + kv_cache_group_id=0, + ) -def get_chunked_local_attention_manager(chunked_local_attention_spec, block_pool): +def get_chunked_local_attention_manager( + chunked_local_attention_spec, block_pool, enable_caching=True +): return ChunkedLocalAttentionManager( - chunked_local_attention_spec, block_pool, kv_cache_group_id=0 + chunked_local_attention_spec, + block_pool, + enable_caching=enable_caching, + kv_cache_group_id=0, ) @@ -314,13 +324,12 @@ def assert_block_id(block_table: list[KVCacheBlock], ids: list[int]): def test_get_num_blocks_to_allocate(): block_size = 2 - sliding_window_length = 2 * block_size sliding_window_spec = SlidingWindowSpec( block_size=block_size, num_kv_heads=1, head_size=1, dtype=torch.float32, - sliding_window=sliding_window_length, + sliding_window=4, # Placeholder value, not related to test result ) block_pool = BlockPool( @@ -332,30 +341,13 @@ def test_get_num_blocks_to_allocate(): KVCacheBlock(i + 1) for i in range(5) ] - num_new_blocks_to_allocate, num_evictable_blocks_to_allocate = ( - manager.get_num_blocks_to_allocate( - "1", - 20 * block_size, - cached_blocks_1, - total_computed_tokens=len(cached_blocks_1) * block_size, - ) - ) assert ( - num_new_blocks_to_allocate == 10 - and num_evictable_blocks_to_allocate == sliding_window_length // block_size - ) - - num_new_blocks_to_allocate, num_evictable_blocks_to_allocate = ( - manager.get_num_blocks_to_allocate( - "2", - 20 * block_size, - cached_blocks_2, - total_computed_tokens=len(cached_blocks_2) * block_size, - ) + manager.get_num_blocks_to_allocate("1", 20 * block_size, cached_blocks_1, 0) + == 20 ) assert ( - num_new_blocks_to_allocate == 10 - and num_evictable_blocks_to_allocate == sliding_window_length // block_size + manager.get_num_blocks_to_allocate("2", 20 * block_size, cached_blocks_2, 0) + == 15 ) @@ -378,37 +370,35 @@ def test_evictable_cached_blocks_not_double_allocated(): request_id = "req" evictable_block = block_pool.blocks[1] # ref_cnt == 0, eviction candidate - num_new_blocks_to_allocate, num_evictable_blocks_to_allocate = ( - manager.get_num_blocks_to_allocate( - request_id=request_id, - num_tokens=2 * block_size, - new_computed_blocks=[evictable_block], - total_computed_tokens=block_size, - ) + num_blocks_to_allocate = manager.get_num_blocks_to_allocate( + request_id=request_id, + num_tokens=2 * block_size, + new_computed_blocks=[evictable_block], + total_computed_tokens=block_size, ) # Free capacity check should count evictable cached blocks, but allocation # should only allocate the truly new block. - assert num_new_blocks_to_allocate == 1 and num_evictable_blocks_to_allocate == 1 + assert num_blocks_to_allocate == 2 manager.save_new_computed_blocks( - request_id, [evictable_block], total_computed_tokens=block_size - ) - new_blocks = manager.allocate_new_blocks( - request_id, num_new_blocks_to_allocate, num_tokens=4 + request_id, + [evictable_block], + num_local_computed_tokens=block_size, + num_external_computed_tokens=0, ) + new_blocks = manager.allocate_new_blocks(request_id, num_tokens=4) assert len(new_blocks) == 1 assert len(manager.req_to_blocks[request_id]) == 2 def test_chunked_local_attention_get_num_blocks_to_allocate(): block_size = 2 - attention_chunk_size = 2 * block_size attention_spec = ChunkedLocalAttentionSpec( block_size=block_size, num_kv_heads=1, head_size=1, dtype=torch.float32, - attention_chunk_size=attention_chunk_size, + attention_chunk_size=4, # Placeholder value, not related to test result ) block_pool = BlockPool( @@ -419,34 +409,12 @@ def test_chunked_local_attention_get_num_blocks_to_allocate(): cached_blocks_2 = [block_pool.null_block for _ in range(5)] + [ KVCacheBlock(i + 1) for i in range(5) ] - cached_blocks_3 = [KVCacheBlock(i + 1) for i in range(5)] - - num_new_blocks_to_allocate, num_evictable_blocks_to_allocate = ( - manager.get_num_blocks_to_allocate( - "1", - 20 * block_size, - cached_blocks_1, - total_computed_tokens=len(cached_blocks_1) * block_size, - ) - ) - assert num_new_blocks_to_allocate == 10 and num_evictable_blocks_to_allocate == 0 - - num_new_blocks_to_allocate, num_evictable_blocks_to_allocate = ( - manager.get_num_blocks_to_allocate( - "2", - 20 * block_size, - cached_blocks_2, - total_computed_tokens=len(cached_blocks_2) * block_size, - ) + + assert ( + manager.get_num_blocks_to_allocate("1", 20 * block_size, cached_blocks_1, 0) + == 20 ) - assert num_new_blocks_to_allocate == 10 and num_evictable_blocks_to_allocate == 0 - - num_new_blocks_to_allocate, num_evictable_blocks_to_allocate = ( - manager.get_num_blocks_to_allocate( - "3", - 20 * block_size, - cached_blocks_3, - total_computed_tokens=len(cached_blocks_3) * block_size, - ) + assert ( + manager.get_num_blocks_to_allocate("2", 20 * block_size, cached_blocks_2, 0) + == 15 ) - assert num_new_blocks_to_allocate == 15 and num_evictable_blocks_to_allocate == 1 From 75593ea5c8a95ecc4f1e252b3ba6e051b251b0d0 Mon Sep 17 00:00:00 2001 From: Yifan Qiao Date: Sat, 13 Dec 2025 01:57:32 -0800 Subject: [PATCH 16/73] fix: num_new_tokens can be 0 when load_kv_async is enabled Signed-off-by: Yifan Qiao --- vllm/v1/core/kv_cache_manager.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/vllm/v1/core/kv_cache_manager.py b/vllm/v1/core/kv_cache_manager.py index 695bf82c3421..0a7290e03a99 100644 --- a/vllm/v1/core/kv_cache_manager.py +++ b/vllm/v1/core/kv_cache_manager.py @@ -285,8 +285,13 @@ def allocate_slots( Returns: A list of new allocated blocks. """ - if num_new_tokens == 0: - raise ValueError("num_new_tokens must be greater than 0") + # When loading KV data asynchronously, we may have zero new tokens to + # compute while still allocating slots for externally computed tokens. + if num_new_tokens == 0 and num_external_computed_tokens == 0: + raise ValueError( + "num_new_tokens must be greater than 0 when there are no " + "external computed tokens" + ) if new_computed_blocks is not None: new_computed_block_list = new_computed_blocks.blocks From fd34b516661bc9fc442a1444ed54316e6fab5bc1 Mon Sep 17 00:00:00 2001 From: Yifan Qiao Date: Sat, 13 Dec 2025 22:20:46 -0800 Subject: [PATCH 17/73] fix: revert changes to factory.py Signed-off-by: Yifan Qiao --- vllm/distributed/kv_transfer/kv_connector/factory.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/vllm/distributed/kv_transfer/kv_connector/factory.py b/vllm/distributed/kv_transfer/kv_connector/factory.py index d24a58a4effc..02d9a1ec9599 100644 --- a/vllm/distributed/kv_transfer/kv_connector/factory.py +++ b/vllm/distributed/kv_transfer/kv_connector/factory.py @@ -56,12 +56,9 @@ def create_connector( # check if the connector supports HMA hma_enabled = not config.scheduler_config.disable_hybrid_kv_cache_manager if hma_enabled and not supports_hma(connector_cls): - ## REMOVE BEFORE MERGE (YIFAN): Revert this warning back to raising - # an ValueError. - logger.warning( - "Connector %s does not support HMA but HMA is enabled. Please set " - "--disable-hybrid-kv-cache-manager to disable HMA.", - connector_cls.__name__, + raise ValueError( + f"Connector {connector_cls.__name__} does not support HMA but " + f"HMA is enabled. Please set `--disable-hybrid-kv-cache-manager`." ) logger.info( From 75c27e3fc2a1b348e30acf0d801332e3da12de94 Mon Sep 17 00:00:00 2001 From: Yifan Qiao Date: Sun, 14 Dec 2025 17:04:32 -0800 Subject: [PATCH 18/73] nits Signed-off-by: Yifan Qiao --- vllm/v1/core/kv_cache_coordinator.py | 6 +----- vllm/v1/core/kv_cache_manager.py | 4 +--- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/vllm/v1/core/kv_cache_coordinator.py b/vllm/v1/core/kv_cache_coordinator.py index 195646d44f37..197992242391 100644 --- a/vllm/v1/core/kv_cache_coordinator.py +++ b/vllm/v1/core/kv_cache_coordinator.py @@ -142,14 +142,10 @@ def allocate_new_blocks( ) -> tuple[list[KVCacheBlock], ...]: """ Allocate new blocks for the request to give it at least `num_tokens` - token slots. If `num_blocks_to_allocate_per_group[i]` is smaller than - the number of blocks needed (in the case of sliding window attention), - the leading blocks will be padded with null blocks. + token slots. Args: request_id: The request ID. - num_blocks_to_allocate_per_group: The number of blocks to allocate - for each kv cache group. num_tokens: The total number of tokens that need a slot (including tokens that are already allocated). num_encoder_tokens: The number of encoder tokens for allocating diff --git a/vllm/v1/core/kv_cache_manager.py b/vllm/v1/core/kv_cache_manager.py index 0a7290e03a99..f46d343cfb72 100644 --- a/vllm/v1/core/kv_cache_manager.py +++ b/vllm/v1/core/kv_cache_manager.py @@ -348,9 +348,7 @@ def allocate_slots( ) new_blocks = self.coordinator.allocate_new_blocks( - request.request_id, - num_tokens_need_slot, - num_encoder_tokens, + request.request_id, num_tokens_need_slot, num_encoder_tokens ) # P/D: delay caching blocks if we have to recv from From 76855bc8c27d6998d9d70798ced71ee94a4977bf Mon Sep 17 00:00:00 2001 From: Yifan Qiao Date: Sun, 14 Dec 2025 17:26:32 -0800 Subject: [PATCH 19/73] workaround lmcache new interfaces Signed-off-by: Yifan Qiao --- .../kv_transfer/kv_connector/v1/lmcache_connector.py | 3 +++ vllm/v1/core/single_type_kv_cache_manager.py | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py b/vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py index 344dbd01bd75..a6e05da64532 100644 --- a/vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +++ b/vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py @@ -209,6 +209,9 @@ def get_kv_connector_kv_cache_events(self) -> LMCacheKVEvents | None: """ Get the KV connector kv cache events collected during the last interval. """ + ## REMOVE BEFORE MERGE (YIFAN): this is temporary workaround to work with + # old versions of LMCache for testing purposes. + return None events = self._lmcache_engine.get_kv_events() # type: ignore [attr-defined] if not events: diff --git a/vllm/v1/core/single_type_kv_cache_manager.py b/vllm/v1/core/single_type_kv_cache_manager.py index f15e12fd0125..d6d7b2b3ccf7 100644 --- a/vllm/v1/core/single_type_kv_cache_manager.py +++ b/vllm/v1/core/single_type_kv_cache_manager.py @@ -94,8 +94,8 @@ def get_num_blocks_to_allocate( num_required_blocks = cdiv(num_tokens, self.block_size) num_skipped_tokens = self.get_num_skipped_tokens(total_computed_tokens) - # Fast-path: nothing is skipped. if num_skipped_tokens <= 0: + # Nothing is skipped. num_new_blocks = ( num_required_blocks - len(new_computed_blocks) @@ -571,7 +571,7 @@ def get_num_skipped_tokens(self, num_computed_tokens: int) -> int: Returns: The number of tokens that will be skipped for attention computation. """ - return num_computed_tokens - self.sliding_window + 1 + return max(0, num_computed_tokens - self.sliding_window + 1) def get_num_common_prefix_blocks(self, running_request_id: str) -> int: """ From 188f66191a20909c3724fed8d58b4391179d0bba Mon Sep 17 00:00:00 2001 From: Yifan Qiao Date: Tue, 16 Dec 2025 08:15:02 +0000 Subject: [PATCH 20/73] fix: avoid memory leak in remove_skipped_blocks; workaround gemma3 prefix_lm issue Signed-off-by: Yifan Qiao --- vllm/config/model.py | 3 +++ vllm/v1/core/block_pool.py | 1 - vllm/v1/core/kv_cache_coordinator.py | 7 ++++--- vllm/v1/core/kv_cache_manager.py | 18 ++++++++---------- vllm/v1/core/single_type_kv_cache_manager.py | 13 +++++++++---- 5 files changed, 24 insertions(+), 18 deletions(-) diff --git a/vllm/config/model.py b/vllm/config/model.py index 59e9689567bd..fe98bbca2429 100644 --- a/vllm/config/model.py +++ b/vllm/config/model.py @@ -1106,6 +1106,9 @@ def is_deepseek_mla(self) -> bool: @cached_property def is_mm_prefix_lm(self) -> bool: """Whether to use bidirectional attention for mm positions.""" + ## REMOVE THIS BEFORE MERGING (YIFAN): temporarily disable mm prefix lm + # for correctness test. + return False MM_PREFIX_LM_MODELS = ( "gemma3", # TODO(Isotr0py): Disable paligemma for now before diff --git a/vllm/v1/core/block_pool.py b/vllm/v1/core/block_pool.py index e0e01f988e10..dab07c333266 100644 --- a/vllm/v1/core/block_pool.py +++ b/vllm/v1/core/block_pool.py @@ -253,7 +253,6 @@ def cache_full_blocks( new_hashes: list[ExternalBlockHash] | None = ( [] if self.enable_kv_cache_events else None ) - for i, blk in enumerate(new_full_blocks): # Some blocks may be null blocks when enabling sparse attention or sliding # window attention. We skip null blocks here. diff --git a/vllm/v1/core/kv_cache_coordinator.py b/vllm/v1/core/kv_cache_coordinator.py index 197992242391..a6324dc4397e 100644 --- a/vllm/v1/core/kv_cache_coordinator.py +++ b/vllm/v1/core/kv_cache_coordinator.py @@ -204,17 +204,18 @@ def get_num_common_prefix_blocks(self, running_request_id: str) -> list[int]: for manager in self.single_type_managers ] - def remove_skipped_blocks(self, request_id: str, num_computed_tokens: int) -> None: + def remove_skipped_blocks(self, request_id: str, num_tokens_need_slot: int) -> None: """ Remove the blocks that are no longer needed from `blocks` and replace the removed blocks with null_block. Args: request_id: The request ID. - num_computed_tokens: The number of tokens that have been computed. + num_tokens_need_slot: The number of tokens that need a slot, including + tokens already computed and tokens to be computed. """ for manager in self.single_type_managers: - manager.remove_skipped_blocks(request_id, num_computed_tokens) + manager.remove_skipped_blocks(request_id, num_tokens_need_slot) def get_blocks(self, request_id: str) -> tuple[list[KVCacheBlock], ...]: """ diff --git a/vllm/v1/core/kv_cache_manager.py b/vllm/v1/core/kv_cache_manager.py index f46d343cfb72..4aa748e11772 100644 --- a/vllm/v1/core/kv_cache_manager.py +++ b/vllm/v1/core/kv_cache_manager.py @@ -298,16 +298,6 @@ def allocate_slots( else: new_computed_block_list = self.empty_kv_cache_blocks.blocks - # Free the blocks that are skipped during the attention computation - # (e.g., tokens outside the sliding window). - # We can do this even if we cannot schedule this request due to - # insufficient free blocks. - # Should call this function before allocating new blocks to reduce - # the number of evicted blocks. - self.coordinator.remove_skipped_blocks( - request.request_id, request.num_computed_tokens - ) - # The number of computed tokens is the number of computed tokens plus # the new prefix caching hits num_local_computed_tokens = ( @@ -321,6 +311,14 @@ def allocate_slots( self.max_model_len, ) + # Free the blocks that are skipped during the attention computation + # (e.g., tokens outside the sliding window). + # We can do this even if we cannot schedule this request due to + # insufficient free blocks. + # Should call this function before allocating new blocks to reduce + # the number of evicted blocks. + self.coordinator.remove_skipped_blocks(request.request_id, num_tokens_need_slot) + num_blocks_to_allocate = self.coordinator.get_num_blocks_to_allocate( request_id=request.request_id, num_tokens=num_tokens_need_slot, diff --git a/vllm/v1/core/single_type_kv_cache_manager.py b/vllm/v1/core/single_type_kv_cache_manager.py index d6d7b2b3ccf7..f8aa90d01b90 100644 --- a/vllm/v1/core/single_type_kv_cache_manager.py +++ b/vllm/v1/core/single_type_kv_cache_manager.py @@ -337,7 +337,7 @@ def find_longest_cache_hit( raise NotImplementedError - def remove_skipped_blocks(self, request_id: str, num_computed_tokens: int) -> None: + def remove_skipped_blocks(self, request_id: str, num_tokens_need_slot: int) -> None: """ Remove and free the blocks that are no longer needed for attention computation. The removed blocks should be replaced by null_block. @@ -347,18 +347,23 @@ def remove_skipped_blocks(self, request_id: str, num_computed_tokens: int) -> No Args: request_id: The request ID. - num_computed_tokens: The number of tokens that have been computed. + num_tokens_need_slot: The number of tokens that need a slot, including + already computed tokens and to be computed tokens. """ # Remove the blocks that will be skipped during attention computation. - num_skipped_tokens = self.get_num_skipped_tokens(num_computed_tokens) + num_skipped_tokens = self.get_num_skipped_tokens(num_tokens_need_slot) if num_skipped_tokens <= 0: # This indicates that ALL tokens are inside attention window. # Thus we do not need to free any blocks outside attention window. # A typical case is full attention that we never free any token # before the request is finished. return - num_skipped_blocks = num_skipped_tokens // self.block_size blocks = self.req_to_blocks[request_id] + num_skipped_blocks = num_skipped_tokens // self.block_size + # `num_skipped_tokens` may include tokens that haven't been allocated yet + # (e.g., the attention window moves into the pending-token range), so we + # must cap to the number of blocks that currently exist for this request. + num_skipped_blocks = min(num_skipped_blocks, len(blocks)) removed_blocks: list[KVCacheBlock] = [] # Because the block starts from index 0, the num_skipped_block-th block # corresponds to index num_skipped_blocks - 1. From 0bed603d5ea1746fc9c1db95b92930cf406f88a8 Mon Sep 17 00:00:00 2001 From: Yifan Qiao Date: Tue, 16 Dec 2025 08:55:01 +0000 Subject: [PATCH 21/73] Squashed merge main Signed-off-by: Yifan Qiao --- .../qwen3_next_mtp_async_eplb.sh | 74 ++ .buildkite/test-amd.yaml | 21 +- .buildkite/test-pipeline.yaml | 17 +- .buildkite/test_areas/misc.yaml | 4 +- .buildkite/test_areas/tool_use.yaml | 12 +- benchmarks/auto_tune/auto_tune.sh | 13 +- cmake/external_projects/flashmla.cmake | 16 +- docker/Dockerfile | 272 ++++---- docker/Dockerfile.xpu | 3 + .../dockerfile-stages-dependency.png | Bin 177867 -> 209492 bytes docs/configuration/optimization.md | 2 +- .../integrations/production-stack.md | 2 +- docs/design/cuda_graphs.md | 4 +- docs/design/optimization_levels.md | 2 +- docs/design/paged_attention.md | 6 +- docs/features/tool_calling.md | 4 +- .../installation/cpu.arm.inc.md | 32 +- docs/models/supported_models.md | 4 +- docs/serving/parallelism_scaling.md | 2 +- docs/usage/security.md | 4 +- examples/offline_inference/audio_language.py | 117 ++-- examples/offline_inference/vision_language.py | 27 + .../structured_outputs/structured_outputs.py | 2 +- tests/benchmarks/test_param_sweep.py | 8 - tests/compile/distributed/test_fusions_e2e.py | 78 +++ .../test_dynamic_shapes_compilation.py | 10 +- tests/entrypoints/openai/test_chat_error.py | 3 +- .../test_response_api_parsable_context.py | 6 + .../openai/test_response_api_simple.py | 45 ++ tests/entrypoints/openai/test_serving_chat.py | 28 +- .../openai/test_serving_responses.py | 6 +- .../openai/test_sparse_tensor_validation.py | 342 ++++++++++ .../test_gigachat3_tool_parser.py | 2 +- .../tool_parsers/test_hermes_tool_parser.py | 2 +- .../test_hunyuan_a13b_tool_parser.py | 2 +- .../test_llama3_json_tool_parser.py | 2 +- .../test_llama4_pythonic_tool_parser.py | 2 +- .../tool_parsers/test_olmo3_tool_parser.py | 2 +- .../tool_parsers/test_pythonic_tool_parser.py | 2 +- .../entrypoints/openai/tool_parsers/utils.py | 2 +- tests/kernels/core/test_apply_rotary_emb.py | 203 ++++++ .../moe/modular_kernel_tools/common.py | 3 +- tests/kernels/moe/test_flashinfer.py | 14 + tests/kernels/quantization/test_awq.py | 6 +- .../expected_results_batched.json | 1 + .../expected_results_single.json | 1 + .../language/generation/test_mistral.py | 6 +- .../pooling/test_token_classification.py | 31 + .../generation/test_audioflamingo3.py | 142 ++++ .../test_vit_backend_functionality.py | 434 ++++++++++++ .../processing/test_audioflamingo3.py | 125 ++++ tests/models/registry.py | 5 + .../test_sparse_tensor_validation_unit.py | 134 ++++ tests/standalone_tests/lazy_imports.py | 31 +- tests/test_inputs.py | 7 + tests/tool_parsers/__init__.py | 0 .../test_deepseekv31_tool_parser.py | 4 +- .../test_ernie45_moe_tool_parser.py | 2 +- .../test_glm4_moe_tool_parser.py | 6 +- .../test_jamba_tool_parser.py | 4 +- .../test_kimi_k2_tool_parser.py | 4 +- .../test_minimax_tool_parser.py | 4 +- .../test_mistral_tool_parser.py | 2 +- .../test_openai_tool_parser.py | 2 +- .../test_qwen3coder_tool_parser.py | 10 +- .../test_seed_oss_tool_parser.py | 4 +- .../test_xlam_tool_parser.py | 4 +- tests/tool_use/test_tool_choice_required.py | 2 +- .../v1/attention/test_attention_splitting.py | 1 + tests/v1/entrypoints/conftest.py | 5 + .../kv_connector/unit/test_nixl_connector.py | 12 +- tests/v1/kv_offload/test_cpu_gpu.py | 22 +- tests/v1/structured_output/test_utils.py | 4 +- vllm/_custom_ops.py | 8 +- vllm/attention/layer.py | 78 +-- vllm/attention/layers/mm_encoder_attention.py | 284 ++++++++ vllm/attention/ops/vit_attn_wrappers.py | 11 +- vllm/attention/selector.py | 59 +- vllm/benchmarks/serve.py | 4 +- vllm/benchmarks/startup.py | 326 +++++++++ vllm/compilation/backends.py | 11 +- vllm/compilation/decorators.py | 8 - vllm/compilation/fusion.py | 100 +-- vllm/compilation/matcher_utils.py | 20 +- vllm/compilation/piecewise_backend.py | 7 +- vllm/config/compilation.py | 18 +- vllm/config/model.py | 40 +- vllm/config/parallel.py | 10 + vllm/config/scheduler.py | 4 +- vllm/config/vllm.py | 112 +-- vllm/distributed/eplb/rebalance_execute.py | 3 - .../kv_connector/v1/nixl_connector.py | 95 +-- vllm/engine/arg_utils.py | 8 +- vllm/entrypoints/chat_utils.py | 26 +- vllm/entrypoints/cli/__init__.py | 2 + vllm/entrypoints/cli/benchmark/startup.py | 21 + vllm/entrypoints/context.py | 78 ++- vllm/entrypoints/llm.py | 72 +- vllm/entrypoints/openai/api_server.py | 2 +- vllm/entrypoints/openai/cli_args.py | 2 +- .../openai/parser/responses_parser.py | 40 +- vllm/entrypoints/openai/protocol.py | 9 + vllm/entrypoints/openai/serving_chat.py | 119 ++-- vllm/entrypoints/openai/serving_engine.py | 201 ++---- vllm/entrypoints/openai/serving_responses.py | 39 +- .../openai/tool_parsers/__init__.py | 163 +---- vllm/entrypoints/pooling/classify/serving.py | 6 +- vllm/entrypoints/pooling/embed/serving.py | 59 +- vllm/entrypoints/pooling/pooling/serving.py | 7 +- vllm/entrypoints/pooling/score/protocol.py | 1 + vllm/entrypoints/pooling/score/serving.py | 4 +- vllm/entrypoints/renderer.py | 63 +- vllm/entrypoints/responses_utils.py | 33 - vllm/entrypoints/serve/disagg/serving.py | 6 +- vllm/entrypoints/serve/tokenize/serving.py | 13 +- vllm/inputs/parse.py | 25 +- vllm/model_executor/custom_op.py | 9 +- vllm/model_executor/layers/batch_invariant.py | 39 +- .../layers/fused_moe/cutlass_moe.py | 2 - .../layers/fused_moe/deep_gemm_moe.py | 2 +- .../fused_moe/fused_moe_modular_method.py | 7 +- .../layers/fused_moe/modular_kernel.py | 21 +- .../layers/fused_moe/shared_fused_moe.py | 4 +- .../compressed_tensors_moe.py | 3 - .../model_executor/layers/quantization/fp8.py | 243 ++++--- .../kernels/scaled_mm/__init__.py | 2 +- .../layers/quantization/modelopt.py | 19 +- .../layers/quantization/moe_wna16.py | 5 + .../quantization/utils/flashinfer_utils.py | 7 +- .../layers/rotary_embedding/base.py | 20 +- .../layers/rotary_embedding/common.py | 224 ++++-- .../rotary_embedding/ernie45_vl_rope.py | 13 +- .../layers/rotary_embedding/mrope.py | 25 +- .../layers/rotary_embedding/xdrope.py | 66 +- vllm/model_executor/models/adapters.py | 12 + vllm/model_executor/models/audioflamingo3.py | 639 ++++++++++++++++++ vllm/model_executor/models/bagel.py | 584 ++++++++++++++++ vllm/model_executor/models/dots_ocr.py | 167 ++--- vllm/model_executor/models/ernie45_vl.py | 167 ++--- vllm/model_executor/models/glm4_1v.py | 148 ++-- vllm/model_executor/models/keye.py | 129 +--- vllm/model_executor/models/opencua.py | 8 +- vllm/model_executor/models/ovis2_5.py | 22 +- vllm/model_executor/models/paddleocr_vl.py | 160 ++--- vllm/model_executor/models/qwen.py | 3 + vllm/model_executor/models/qwen2.py | 32 + .../models/qwen2_5_omni_thinker.py | 1 + vllm/model_executor/models/qwen2_5_vl.py | 140 ++-- vllm/model_executor/models/qwen2_vl.py | 175 ++--- .../models/qwen3_omni_moe_thinker.py | 20 +- vllm/model_executor/models/qwen3_vl.py | 482 ++++++++++++- vllm/model_executor/models/qwen3_vl_moe.py | 6 +- vllm/model_executor/models/registry.py | 5 + vllm/model_executor/models/siglip2navit.py | 183 ++--- vllm/model_executor/models/vision.py | 15 +- vllm/multimodal/audio.py | 12 +- vllm/multimodal/image.py | 12 +- vllm/multimodal/parse.py | 2 +- vllm/platforms/cpu.py | 15 +- vllm/platforms/cuda.py | 128 ++-- vllm/platforms/interface.py | 57 +- vllm/platforms/rocm.py | 79 ++- vllm/platforms/tpu.py | 41 +- vllm/platforms/xpu.py | 51 +- .../{deepseekv32.py => deepseek_v32.py} | 0 vllm/tokenizers/registry.py | 2 +- vllm/tool_parsers/__init__.py | 150 ++++ .../tool_parsers/abstract_tool_parser.py | 4 +- .../tool_parsers/deepseekv31_tool_parser.py | 4 +- .../tool_parsers/deepseekv32_tool_parser.py | 6 +- .../tool_parsers/deepseekv3_tool_parser.py | 6 +- .../tool_parsers/ernie45_tool_parser.py | 6 +- .../tool_parsers/gigachat3_tool_parser.py | 2 +- .../tool_parsers/glm4_moe_tool_parser.py | 6 +- .../granite_20b_fc_tool_parser.py | 8 +- .../tool_parsers/granite_tool_parser.py | 8 +- .../tool_parsers/hermes_tool_parser.py | 6 +- .../tool_parsers/hunyuan_a13b_tool_parser.py | 8 +- .../tool_parsers/internlm2_tool_parser.py | 8 +- .../tool_parsers/jamba_tool_parser.py | 4 +- .../tool_parsers/kimi_k2_tool_parser.py | 6 +- .../llama4_pythonic_tool_parser.py | 4 +- .../tool_parsers/llama_tool_parser.py | 6 +- .../tool_parsers/longcat_tool_parser.py | 2 +- .../tool_parsers/minimax_m2_tool_parser.py | 6 +- .../tool_parsers/minimax_tool_parser.py | 8 +- .../tool_parsers/mistral_tool_parser.py | 6 +- .../tool_parsers/olmo3_tool_parser.py | 4 +- .../tool_parsers/openai_tool_parser.py | 4 +- .../tool_parsers/phi4mini_tool_parser.py | 4 +- .../tool_parsers/pythonic_tool_parser.py | 4 +- .../tool_parsers/qwen3coder_tool_parser.py | 6 +- .../tool_parsers/qwen3xml_tool_parser.py | 6 +- .../tool_parsers/seed_oss_tool_parser.py | 6 +- .../tool_parsers/step3_tool_parser.py | 6 +- .../openai => }/tool_parsers/utils.py | 0 .../tool_parsers/xlam_tool_parser.py | 2 +- vllm/transformers_utils/config.py | 23 + vllm/transformers_utils/configs/__init__.py | 2 + vllm/transformers_utils/configs/bagel.py | 53 ++ .../transformers_utils/processors/__init__.py | 2 + vllm/transformers_utils/processors/bagel.py | 73 ++ vllm/utils/deep_gemm.py | 17 - vllm/utils/torch_utils.py | 25 +- vllm/v1/attention/backends/gdn_attn.py | 2 +- vllm/v1/attention/backends/utils.py | 14 +- vllm/v1/kv_offload/cpu.py | 14 +- vllm/v1/kv_offload/worker/cpu_gpu.py | 261 ++++--- vllm/v1/structured_output/backend_xgrammar.py | 10 +- vllm/v1/worker/dp_utils.py | 8 +- vllm/v1/worker/gpu_model_runner.py | 33 +- vllm/v1/worker/gpu_ubatch_wrapper.py | 35 +- vllm/v1/worker/gpu_worker.py | 3 +- vllm/v1/worker/ubatch_utils.py | 71 +- vllm/v1/worker/ubatching.py | 21 +- 215 files changed, 7010 insertions(+), 2785 deletions(-) create mode 100644 .buildkite/scripts/scheduled_integration_test/qwen3_next_mtp_async_eplb.sh create mode 100644 tests/entrypoints/openai/test_sparse_tensor_validation.py create mode 100644 tests/kernels/core/test_apply_rotary_emb.py create mode 100644 tests/models/fixtures/audioflamingo3/expected_results_batched.json create mode 100644 tests/models/fixtures/audioflamingo3/expected_results_single.json create mode 100644 tests/models/multimodal/generation/test_audioflamingo3.py create mode 100644 tests/models/multimodal/generation/test_vit_backend_functionality.py create mode 100644 tests/models/multimodal/processing/test_audioflamingo3.py create mode 100644 tests/multimodal/test_sparse_tensor_validation_unit.py create mode 100644 tests/tool_parsers/__init__.py rename tests/{tool_use => tool_parsers}/test_deepseekv31_tool_parser.py (96%) rename tests/{tool_use => tool_parsers}/test_ernie45_moe_tool_parser.py (99%) rename tests/{tool_use => tool_parsers}/test_glm4_moe_tool_parser.py (99%) rename tests/{tool_use => tool_parsers}/test_jamba_tool_parser.py (98%) rename tests/{tool_use => tool_parsers}/test_kimi_k2_tool_parser.py (99%) rename tests/{tool_use => tool_parsers}/test_minimax_tool_parser.py (99%) rename tests/{tool_use => tool_parsers}/test_mistral_tool_parser.py (99%) rename tests/{tool_use => tool_parsers}/test_openai_tool_parser.py (99%) rename tests/{tool_use => tool_parsers}/test_qwen3coder_tool_parser.py (99%) rename tests/{tool_use => tool_parsers}/test_seed_oss_tool_parser.py (99%) rename tests/{tool_use => tool_parsers}/test_xlam_tool_parser.py (99%) create mode 100644 vllm/attention/layers/mm_encoder_attention.py create mode 100644 vllm/benchmarks/startup.py create mode 100644 vllm/entrypoints/cli/benchmark/startup.py create mode 100644 vllm/model_executor/models/audioflamingo3.py create mode 100644 vllm/model_executor/models/bagel.py rename vllm/tokenizers/{deepseekv32.py => deepseek_v32.py} (100%) create mode 100644 vllm/tool_parsers/__init__.py rename vllm/{entrypoints/openai => }/tool_parsers/abstract_tool_parser.py (98%) rename vllm/{entrypoints/openai => }/tool_parsers/deepseekv31_tool_parser.py (99%) rename vllm/{entrypoints/openai => }/tool_parsers/deepseekv32_tool_parser.py (99%) rename vllm/{entrypoints/openai => }/tool_parsers/deepseekv3_tool_parser.py (99%) rename vllm/{entrypoints/openai => }/tool_parsers/ernie45_tool_parser.py (99%) rename vllm/{entrypoints/openai => }/tool_parsers/gigachat3_tool_parser.py (98%) rename vllm/{entrypoints/openai => }/tool_parsers/glm4_moe_tool_parser.py (99%) rename vllm/{entrypoints/openai => }/tool_parsers/granite_20b_fc_tool_parser.py (98%) rename vllm/{entrypoints/openai => }/tool_parsers/granite_tool_parser.py (98%) rename vllm/{entrypoints/openai => }/tool_parsers/hermes_tool_parser.py (99%) rename vllm/{entrypoints/openai => }/tool_parsers/hunyuan_a13b_tool_parser.py (99%) rename vllm/{entrypoints/openai => }/tool_parsers/internlm2_tool_parser.py (98%) rename vllm/{entrypoints/openai => }/tool_parsers/jamba_tool_parser.py (98%) rename vllm/{entrypoints/openai => }/tool_parsers/kimi_k2_tool_parser.py (99%) rename vllm/{entrypoints/openai => }/tool_parsers/llama4_pythonic_tool_parser.py (99%) rename vllm/{entrypoints/openai => }/tool_parsers/llama_tool_parser.py (99%) rename vllm/{entrypoints/openai => }/tool_parsers/longcat_tool_parser.py (93%) rename vllm/{entrypoints/openai => }/tool_parsers/minimax_m2_tool_parser.py (99%) rename vllm/{entrypoints/openai => }/tool_parsers/minimax_tool_parser.py (99%) rename vllm/{entrypoints/openai => }/tool_parsers/mistral_tool_parser.py (99%) rename vllm/{entrypoints/openai => }/tool_parsers/olmo3_tool_parser.py (99%) rename vllm/{entrypoints/openai => }/tool_parsers/openai_tool_parser.py (98%) rename vllm/{entrypoints/openai => }/tool_parsers/phi4mini_tool_parser.py (98%) rename vllm/{entrypoints/openai => }/tool_parsers/pythonic_tool_parser.py (99%) rename vllm/{entrypoints/openai => }/tool_parsers/qwen3coder_tool_parser.py (99%) rename vllm/{entrypoints/openai => }/tool_parsers/qwen3xml_tool_parser.py (99%) rename vllm/{entrypoints/openai => }/tool_parsers/seed_oss_tool_parser.py (99%) rename vllm/{entrypoints/openai => }/tool_parsers/step3_tool_parser.py (99%) rename vllm/{entrypoints/openai => }/tool_parsers/utils.py (100%) rename vllm/{entrypoints/openai => }/tool_parsers/xlam_tool_parser.py (99%) create mode 100644 vllm/transformers_utils/configs/bagel.py create mode 100644 vllm/transformers_utils/processors/bagel.py diff --git a/.buildkite/scripts/scheduled_integration_test/qwen3_next_mtp_async_eplb.sh b/.buildkite/scripts/scheduled_integration_test/qwen3_next_mtp_async_eplb.sh new file mode 100644 index 000000000000..937a43d1a322 --- /dev/null +++ b/.buildkite/scripts/scheduled_integration_test/qwen3_next_mtp_async_eplb.sh @@ -0,0 +1,74 @@ +#!/usr/bin/env bash +set -euxo pipefail + +# args: [THRESHOLD] [NUM_QUESTIONS] [START_PORT] +THRESHOLD=${1:-0.25} +NUM_Q=${2:-1319} +PORT=${3:-8040} +OUT_DIR=${OUT_DIR:-/tmp/vllm-scheduled} +mkdir -p "${OUT_DIR}" + +wait_for_server() { + local port=$1 + timeout 600 bash -c ' + until curl -sf "http://127.0.0.1:'"$port"'/health" > /dev/null; do + sleep 1 + done' +} + +MODEL="Qwen/Qwen3-Next-80B-A3B-Instruct" + +# Set BACKENDS based on platform +if command -v rocm-smi &> /dev/null || [[ -d /opt/rocm ]] || [[ -n "${ROCM_PATH:-}" ]]; then + # ROCm platform + BACKENDS=("allgather_reducescatter") + # Disable MOE padding for ROCm since it is causing eplb to fail + export VLLM_ROCM_MOE_PADDING=0 +else + # Non-ROCm platform (CUDA/other) + BACKENDS=("deepep_high_throughput" "deepep_low_latency") +fi + +cleanup() { + if [[ -n "${SERVER_PID:-}" ]] && kill -0 "${SERVER_PID}" 2>/dev/null; then + kill "${SERVER_PID}" 2>/dev/null || true + for _ in {1..20}; do + kill -0 "${SERVER_PID}" 2>/dev/null || break + sleep 0.5 + done + kill -9 "${SERVER_PID}" 2>/dev/null || true + fi +} +trap cleanup EXIT + +for BACK in "${BACKENDS[@]}"; do + VLLM_DEEP_GEMM_WARMUP=skip \ + VLLM_ALL2ALL_BACKEND=$BACK \ + vllm serve "$MODEL" \ + --enforce-eager \ + --tensor-parallel-size 4 \ + --enable-expert-parallel \ + --enable-eplb \ + --eplb-config '{"window_size":200,"step_interval":600,"use_async":true}' \ + --speculative-config '{"method":"qwen3_next_mtp","num_speculative_tokens":1}' \ + --trust-remote-code \ + --max-model-len 2048 \ + --gpu-memory-utilization 0.9 \ + --port $PORT & + SERVER_PID=$! + wait_for_server $PORT + + TAG=$(echo "$MODEL" | tr '/: \\n' '_____') + OUT="${OUT_DIR}/${TAG}_${BACK}.json" + python3 tests/evals/gsm8k/gsm8k_eval.py --host http://127.0.0.1 --port $PORT --num-questions ${NUM_Q} --save-results ${OUT} + python3 - <= ${THRESHOLD}, f"${MODEL} ${BACK} accuracy {acc}" +PY + + cleanup + SERVER_PID= + sleep 1 + PORT=$((PORT+1)) +done diff --git a/.buildkite/test-amd.yaml b/.buildkite/test-amd.yaml index c7d460be6e2b..3c9b8cbedcf0 100644 --- a/.buildkite/test-amd.yaml +++ b/.buildkite/test-amd.yaml @@ -61,8 +61,8 @@ steps: - pytest -v -s -m 'not cpu_test' multimodal - pytest -v -s utils_ -- label: Async Engine, Inputs, Utils, Worker, Config Test (CPU) # 15min - timeout_in_minutes: 20 +- label: Async Engine, Inputs, Utils, Worker, Config Test (CPU) # 20min + timeout_in_minutes: 30 mirror_hardwares: [amdexperimental, amdproduction, amdtentative] agent_pool: mi325_1 grade: Blocking @@ -73,6 +73,7 @@ steps: - tests/multimodal - tests/standalone_tests/lazy_imports.py - tests/tokenizers_ + - tests/tool_parsers - tests/transformers_utils - tests/config no_gpu: true @@ -82,6 +83,7 @@ steps: - pytest -v -s test_outputs.py - pytest -v -s -m 'cpu_test' multimodal - pytest -v -s tokenizers_ + - pytest -v -s tool_parsers - pytest -v -s transformers_utils - pytest -v -s config @@ -759,19 +761,7 @@ steps: - vllm/ - tests/tool_use commands: - - pytest -v -s -m 'not cpu_test' tool_use - -- label: OpenAI-Compatible Tool Use (CPU) # 5 mins - mirror_hardwares: [amdexperimental, amdproduction] - agent_pool: mi325_1 - # grade: Blocking - timeout_in_minutes: 10 - source_file_dependencies: - - vllm/ - - tests/tool_use - no_gpu: true - commands: - - pytest -v -s -m 'cpu_test' tool_use + - pytest -v -s tool_use ##### models test ##### @@ -1629,7 +1619,6 @@ steps: mirror_hardwares: [amdexperimental] agent_pool: mi325_4 # grade: Blocking - gpu: h100 optional: true num_gpus: 4 working_dir: "/vllm-workspace" diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index 5fcf945f3e5a..2dcca5711b3d 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -57,8 +57,8 @@ steps: - pytest -v -s -m 'not cpu_test' multimodal - pytest -v -s utils_ -- label: Async Engine, Inputs, Utils, Worker, Config Test (CPU) # 15min - timeout_in_minutes: 20 +- label: Async Engine, Inputs, Utils, Worker, Config Test (CPU) # 20min + timeout_in_minutes: 30 source_file_dependencies: - vllm/ - tests/test_inputs.py @@ -66,6 +66,7 @@ steps: - tests/multimodal - tests/standalone_tests/lazy_imports.py - tests/tokenizers_ + - tests/tool_parsers - tests/transformers_utils - tests/config no_gpu: true @@ -75,6 +76,7 @@ steps: - pytest -v -s test_outputs.py - pytest -v -s -m 'cpu_test' multimodal - pytest -v -s tokenizers_ + - pytest -v -s tool_parsers - pytest -v -s transformers_utils - pytest -v -s config @@ -672,16 +674,7 @@ steps: - vllm/ - tests/tool_use commands: - - pytest -v -s -m 'not cpu_test' tool_use - -- label: OpenAI-Compatible Tool Use (CPU) # 5 mins - timeout_in_minutes: 10 - source_file_dependencies: - - vllm/ - - tests/tool_use - no_gpu: true - commands: - - pytest -v -s -m 'cpu_test' tool_use + - pytest -v -s tool_use ##### models test ##### diff --git a/.buildkite/test_areas/misc.yaml b/.buildkite/test_areas/misc.yaml index 072bccadb726..252af1e56a10 100644 --- a/.buildkite/test_areas/misc.yaml +++ b/.buildkite/test_areas/misc.yaml @@ -115,7 +115,7 @@ steps: - label: Async Engine, Inputs, Utils, Worker, Config (CPU) depends_on: ~ - timeout_in_minutes: 20 + timeout_in_minutes: 30 source_file_dependencies: - vllm/ - tests/test_inputs.py @@ -123,6 +123,7 @@ steps: - tests/multimodal - tests/standalone_tests/lazy_imports.py - tests/tokenizers_ + - tests/tool_parsers - tests/transformers_utils - tests/config no_gpu: true @@ -132,6 +133,7 @@ steps: - pytest -v -s test_outputs.py - pytest -v -s -m 'cpu_test' multimodal - pytest -v -s tokenizers_ + - pytest -v -s tool_parsers - pytest -v -s transformers_utils - pytest -v -s config diff --git a/.buildkite/test_areas/tool_use.yaml b/.buildkite/test_areas/tool_use.yaml index 7040cd1d253b..69527a121422 100644 --- a/.buildkite/test_areas/tool_use.yaml +++ b/.buildkite/test_areas/tool_use.yaml @@ -10,14 +10,4 @@ steps: - vllm/ - tests/tool_use commands: - - pytest -v -s -m 'not cpu_test' tool_use - -- label: OpenAI-Compatible Tool Use (CPU) - depends_on: ~ - timeout_in_minutes: 10 - source_file_dependencies: - - vllm/ - - tests/tool_use - no_gpu: true - commands: - - pytest -v -s -m 'cpu_test' tool_use + - pytest -v -s tool_use diff --git a/benchmarks/auto_tune/auto_tune.sh b/benchmarks/auto_tune/auto_tune.sh index 25baa9cbda39..a245e2022e60 100644 --- a/benchmarks/auto_tune/auto_tune.sh +++ b/benchmarks/auto_tune/auto_tune.sh @@ -18,6 +18,11 @@ MIN_CACHE_HIT_PCT=${MIN_CACHE_HIT_PCT:-0} MAX_LATENCY_ALLOWED_MS=${MAX_LATENCY_ALLOWED_MS:-100000000000} NUM_SEQS_LIST=${NUM_SEQS_LIST:-"128 256"} NUM_BATCHED_TOKENS_LIST=${NUM_BATCHED_TOKENS_LIST:-"512 1024 2048 4096"} +HOSTNAME=$(hostname) +if [[ -z "$HOSTNAME" ]]; then + echo "Error: Failed to determine hostname." >&2 + exit 1 +fi LOG_FOLDER="$BASE/auto-benchmark/$TAG" RESULT="$LOG_FOLDER/result.txt" @@ -82,6 +87,7 @@ start_server() { "$MODEL" "--disable-log-requests" "--port" "8004" + "--host" "$HOSTNAME" "--gpu-memory-utilization" "$gpu_memory_utilization" "--max-num-seqs" "$max_num_seqs" "--max-num-batched-tokens" "$max_num_batched_tokens" @@ -113,7 +119,7 @@ start_server() { # since that we should always have permission to send signal to the server process. kill -0 $server_pid 2> /dev/null || break - RESPONSE=$(curl -s -X GET "http://0.0.0.0:8004/health" -w "%{http_code}" -o /dev/stdout) + RESPONSE=$(curl -s -X GET "http://${HOSTNAME}:8004/health" -w "%{http_code}" -o /dev/stdout) STATUS_CODE=$(echo "$RESPONSE" | tail -n 1) if [[ "$STATUS_CODE" -eq 200 ]]; then server_started=1 @@ -173,6 +179,7 @@ run_benchmark() { --goodput e2el:$MAX_LATENCY_ALLOWED_MS \ --num-prompts 1000 \ --random-prefix-len $prefix_len \ + --host "$HOSTNAME" \ --port 8004 &> "$bm_log" throughput=$(grep "Request throughput (req/s):" "$bm_log" | sed 's/[^0-9.]//g') e2el=$(grep "P99 E2EL (ms):" "$bm_log" | awk '{print $NF}') @@ -188,7 +195,7 @@ run_benchmark() { request_rate=$((${throughput%.*} + 1)) while ((request_rate > 0)); do # clear prefix cache - curl -X POST http://0.0.0.0:8004/reset_prefix_cache + curl -X POST http://${HOSTNAME}:8004/reset_prefix_cache sleep 5 bm_log="$LOG_FOLDER/bm_log_${max_num_seqs}_${max_num_batched_tokens}_requestrate_${request_rate}.txt" vllm bench serve \ @@ -204,6 +211,7 @@ run_benchmark() { --goodput e2el:$MAX_LATENCY_ALLOWED_MS \ --num-prompts 100 \ --random-prefix-len $prefix_len \ + --host "$HOSTNAME" \ --port 8004 &> "$bm_log" throughput=$(grep "Request throughput (req/s):" "$bm_log" | sed 's/[^0-9.]//g') e2el=$(grep "P99 E2EL (ms):" "$bm_log" | awk '{print $NF}') @@ -304,6 +312,7 @@ if (( $(echo "$best_throughput > 0" | bc -l) )); then --goodput e2el:$MAX_LATENCY_ALLOWED_MS \ --num-prompts 100 \ --random-prefix-len $prefix_len \ + --host "$HOSTNAME" \ --port 8004 \ --profile &> "$bm_log" else diff --git a/cmake/external_projects/flashmla.cmake b/cmake/external_projects/flashmla.cmake index 2cf3c1a755d3..0d4f9b7aa07c 100644 --- a/cmake/external_projects/flashmla.cmake +++ b/cmake/external_projects/flashmla.cmake @@ -35,16 +35,21 @@ message(STATUS "FlashMLA is available at ${flashmla_SOURCE_DIR}") # sm90a set(SUPPORT_ARCHS) -if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER 12.3) - list(APPEND SUPPORT_ARCHS 9.0a) +if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.3) + list(APPEND SUPPORT_ARCHS "9.0a") endif() -if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER 12.8) - list(APPEND SUPPORT_ARCHS 10.0a) +if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.9) + # CUDA 12.9 has introduced "Family-Specific Architecture Features" + # this supports all compute_10x family + list(APPEND SUPPORT_ARCHS "10.0f") +elseif(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.8) + list(APPEND SUPPORT_ARCHS "10.0a") endif() cuda_archs_loose_intersection(FLASH_MLA_ARCHS "${SUPPORT_ARCHS}" "${CUDA_ARCHS}") if(FLASH_MLA_ARCHS) + message(STATUS "FlashMLA CUDA architectures: ${FLASH_MLA_ARCHS}") set(VLLM_FLASHMLA_GPU_FLAGS ${VLLM_GPU_FLAGS}) list(APPEND VLLM_FLASHMLA_GPU_FLAGS "--expt-relaxed-constexpr" "--expt-extended-lambda" "--use_fast_math") @@ -126,7 +131,8 @@ if(FLASH_MLA_ARCHS) $<$:-UPy_LIMITED_API> $<$:-UPy_LIMITED_API>) else() - # Create empty targets for setup.py when not targeting sm90a systems + message(STATUS "FlashMLA will not compile: unsupported CUDA architecture ${CUDA_ARCHS}") + # Create empty targets for setup.py on unsupported systems add_custom_target(_flashmla_C) add_custom_target(_flashmla_extension_C) endif() diff --git a/docker/Dockerfile b/docker/Dockerfile index 0d50d97e54c6..ae2624ace67b 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -32,7 +32,7 @@ ARG DEADSNAKES_GPGKEY_URL # The PyPA get-pip.py script is a self contained script+zip file, that provides # both the installer script and the pip base85-encoded zip archive. This allows -# bootstrapping pip in environment where a dsitribution package does not exist. +# bootstrapping pip in environment where a distribution package does not exist. # # By parameterizing the URL for get-pip.py installation script, we allow # third-party to use their own copy of the script stored in a private mirror. @@ -73,15 +73,13 @@ ARG INSTALL_KV_CONNECTORS=false #################### BASE BUILD IMAGE #################### # prepare basic build environment FROM ${BUILD_BASE_IMAGE} AS base + ARG CUDA_VERSION ARG PYTHON_VERSION -ARG TARGETPLATFORM -ARG INSTALL_KV_CONNECTORS=false -ENV DEBIAN_FRONTEND=noninteractive -ARG GET_PIP_URL +ENV DEBIAN_FRONTEND=noninteractive -# Install system dependencies and uv, then create Python virtual environment +# Install system dependencies including build tools RUN echo 'tzdata tzdata/Areas select America' | debconf-set-selections \ && echo 'tzdata tzdata/Zones/America select Los_Angeles' | debconf-set-selections \ && apt-get update -y \ @@ -107,32 +105,30 @@ RUN echo 'tzdata tzdata/Areas select America' | debconf-set-selections \ && ln -s /opt/venv/bin/pip /usr/bin/pip \ && python3 --version && python3 -m pip --version -ARG PIP_INDEX_URL UV_INDEX_URL -ARG PIP_EXTRA_INDEX_URL UV_EXTRA_INDEX_URL -ARG PYTORCH_CUDA_INDEX_BASE_URL -ARG PIP_KEYRING_PROVIDER UV_KEYRING_PROVIDER - # Activate virtual environment and add uv to PATH ENV PATH="/opt/venv/bin:/root/.local/bin:$PATH" ENV VIRTUAL_ENV="/opt/venv" -# This timeout (in seconds) is necessary when installing some dependencies via uv since it's likely to time out -# Reference: https://github.com/astral-sh/uv/pull/1694 +# Environment for uv ENV UV_HTTP_TIMEOUT=500 ENV UV_INDEX_STRATEGY="unsafe-best-match" -# Use copy mode to avoid hardlink failures with Docker cache mounts ENV UV_LINK_MODE=copy -RUN <> /etc/environment -# Install Python and other dependencies +# Install Python and system dependencies RUN echo 'tzdata tzdata/Areas select America' | debconf-set-selections \ && echo 'tzdata tzdata/Zones/America select Los_Angeles' | debconf-set-selections \ && apt-get update -y \ @@ -408,62 +421,103 @@ RUN echo 'tzdata tzdata/Areas select America' | debconf-set-selections \ && curl -sS ${GET_PIP_URL} | python${PYTHON_VERSION} \ && python3 --version && python3 -m pip --version -# Install CUDA development tools and build essentials for runtime JIT compilation +# Install CUDA development tools for runtime JIT compilation # (FlashInfer, DeepGEMM, EP kernels all require compilation at runtime) RUN CUDA_VERSION_DASH=$(echo $CUDA_VERSION | cut -d. -f1,2 | tr '.' '-') && \ apt-get update -y && \ apt-get install -y --no-install-recommends \ - cuda-nvcc-${CUDA_VERSION_DASH} \ - cuda-cudart-${CUDA_VERSION_DASH} \ - cuda-nvrtc-${CUDA_VERSION_DASH} \ - cuda-cuobjdump-${CUDA_VERSION_DASH} \ - # https://github.com/vllm-project/vllm/issues/29590 - libcurand-dev-${CUDA_VERSION_DASH} \ - libcublas-${CUDA_VERSION_DASH} \ - # Fixes nccl_allocator requiring nccl.h at runtime - # https://github.com/vllm-project/vllm/blob/1336a1ea244fa8bfd7e72751cabbdb5b68a0c11a/vllm/distributed/device_communicators/pynccl_allocator.py#L22 - libnccl-dev && \ + cuda-nvcc-${CUDA_VERSION_DASH} \ + cuda-cudart-${CUDA_VERSION_DASH} \ + cuda-nvrtc-${CUDA_VERSION_DASH} \ + cuda-cuobjdump-${CUDA_VERSION_DASH} \ + libcurand-dev-${CUDA_VERSION_DASH} \ + libcublas-${CUDA_VERSION_DASH} \ + # Fixes nccl_allocator requiring nccl.h at runtime + # https://github.com/vllm-project/vllm/blob/1336a1ea244fa8bfd7e72751cabbdb5b68a0c11a/vllm/distributed/device_communicators/pynccl_allocator.py#L22 + libnccl-dev && \ rm -rf /var/lib/apt/lists/* -ARG PIP_INDEX_URL UV_INDEX_URL -ARG PIP_EXTRA_INDEX_URL UV_EXTRA_INDEX_URL -ARG PYTORCH_CUDA_INDEX_BASE_URL -ARG PIP_KEYRING_PROVIDER UV_KEYRING_PROVIDER - # Install uv for faster pip installs -RUN --mount=type=cache,target=/root/.cache/uv \ - python3 -m pip install uv +RUN python3 -m pip install uv -# This timeout (in seconds) is necessary when installing some dependencies via uv since it's likely to time out -# Reference: https://github.com/astral-sh/uv/pull/1694 +# Environment for uv ENV UV_HTTP_TIMEOUT=500 ENV UV_INDEX_STRATEGY="unsafe-best-match" -# Use copy mode to avoid hardlink failures with Docker cache mounts ENV UV_LINK_MODE=copy -# Workaround for https://github.com/openai/triton/issues/2507 and -# https://github.com/pytorch/pytorch/issues/107960 -- hopefully -# this won't be needed for future versions of this docker image -# or future versions of triton. +# Workaround for triton/pytorch issues RUN ldconfig /usr/local/cuda-$(echo $CUDA_VERSION | cut -d. -f1,2)/compat/ -# Install vllm wheel first, so that torch etc will be installed. -RUN --mount=type=bind,from=build,src=/workspace/dist,target=/vllm-workspace/dist \ - --mount=type=cache,target=/root/.cache/uv \ - uv pip install --system dist/*.whl --verbose \ - --extra-index-url ${PYTORCH_CUDA_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') +# ============================================================ +# SLOW-CHANGING DEPENDENCIES BELOW +# These are the expensive layers that we want to cache +# ============================================================ + +# Install PyTorch and core CUDA dependencies +# This is ~2GB and rarely changes +ARG PYTORCH_CUDA_INDEX_BASE_URL +COPY requirements/common.txt /tmp/common.txt +COPY requirements/cuda.txt /tmp/requirements-cuda.txt +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --system -r /tmp/requirements-cuda.txt \ + --extra-index-url ${PYTORCH_CUDA_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') && \ + rm /tmp/requirements-cuda.txt /tmp/common.txt # Install FlashInfer pre-compiled kernel cache and binaries +# This is ~1.1GB and only changes when FlashInfer version bumps # https://docs.flashinfer.ai/installation.html +ARG FLASHINFER_VERSION=0.5.3 RUN --mount=type=cache,target=/root/.cache/uv \ - uv pip install --system flashinfer-cubin==0.5.3 \ - && uv pip install --system flashinfer-jit-cache==0.5.3 \ + uv pip install --system flashinfer-cubin==${FLASHINFER_VERSION} \ + && uv pip install --system flashinfer-jit-cache==${FLASHINFER_VERSION} \ --extra-index-url https://flashinfer.ai/whl/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') \ && flashinfer show-config -COPY examples examples -COPY benchmarks benchmarks -COPY ./vllm/collect_env.py . +# ============================================================ +# OPENAI API SERVER DEPENDENCIES +# Pre-install these to avoid reinstalling on every vLLM wheel rebuild +# ============================================================ + +# Install gdrcopy (saves ~6s per build) +# TODO (huydhn): There is no prebuilt gdrcopy package on 12.9 at the moment +ARG GDRCOPY_CUDA_VERSION=12.8 +ARG GDRCOPY_OS_VERSION=Ubuntu22_04 +ARG TARGETPLATFORM +COPY tools/install_gdrcopy.sh /tmp/install_gdrcopy.sh +RUN set -eux; \ + case "${TARGETPLATFORM}" in \ + linux/arm64) UUARCH="aarch64" ;; \ + linux/amd64) UUARCH="x64" ;; \ + *) echo "Unsupported TARGETPLATFORM: ${TARGETPLATFORM}" >&2; exit 1 ;; \ + esac; \ + /tmp/install_gdrcopy.sh "${GDRCOPY_OS_VERSION}" "${GDRCOPY_CUDA_VERSION}" "${UUARCH}" && \ + rm /tmp/install_gdrcopy.sh + +# Install vllm-openai dependencies (saves ~2.6s per build) +# These are stable packages that don't depend on vLLM itself +RUN --mount=type=cache,target=/root/.cache/uv \ + if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \ + BITSANDBYTES_VERSION="0.42.0"; \ + else \ + BITSANDBYTES_VERSION="0.46.1"; \ + fi; \ + uv pip install --system accelerate hf_transfer modelscope \ + "bitsandbytes>=${BITSANDBYTES_VERSION}" 'timm>=1.0.17' 'runai-model-streamer[s3,gcs]>=0.15.3' + +# ============================================================ +# VLLM INSTALLATION (depends on build stage) +# ============================================================ + +ARG PIP_INDEX_URL UV_INDEX_URL +ARG PIP_EXTRA_INDEX_URL UV_EXTRA_INDEX_URL +ARG PYTORCH_CUDA_INDEX_BASE_URL +ARG PIP_KEYRING_PROVIDER UV_KEYRING_PROVIDER + +# Install vllm wheel first, so that torch etc will be installed. +RUN --mount=type=bind,from=build,src=/workspace/dist,target=/vllm-workspace/dist \ + --mount=type=cache,target=/root/.cache/uv \ + uv pip install --system dist/*.whl --verbose \ + --extra-index-url ${PYTORCH_CUDA_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') RUN --mount=type=cache,target=/root/.cache/uv \ . /etc/environment && \ @@ -478,7 +532,7 @@ RUN --mount=type=cache,target=/root/.cache/uv \ echo "No DeepGEMM wheels to install; skipping."; \ fi' -# Pytorch now installs NVSHMEM, setting LD_LIBRARY_PATH (https://github.com/pytorch/pytorch/blob/d38164a545b4a4e4e0cf73ce67173f70574890b6/.ci/manywheel/build_cuda.sh#L141C14-L141C36) +# Pytorch now installs NVSHMEM, setting LD_LIBRARY_PATH ENV LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH # Install EP kernels wheels (pplx-kernels and DeepEP) that have been built in the `build` stage @@ -487,23 +541,17 @@ RUN --mount=type=bind,from=build,src=/tmp/ep_kernels_workspace/dist,target=/vllm uv pip install --system ep_kernels/dist/*.whl --verbose \ --extra-index-url ${PYTORCH_CUDA_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') -RUN --mount=type=bind,source=tools/install_gdrcopy.sh,target=/tmp/install_gdrcopy.sh,ro \ - set -eux; \ - case "${TARGETPLATFORM}" in \ - linux/arm64) UUARCH="aarch64" ;; \ - linux/amd64) UUARCH="x64" ;; \ - *) echo "Unsupported TARGETPLATFORM: ${TARGETPLATFORM}" >&2; exit 1 ;; \ - esac; \ - /tmp/install_gdrcopy.sh "${GDRCOPY_OS_VERSION}" "${GDRCOPY_CUDA_VERSION}" "${UUARCH}" - # CUDA image changed from /usr/local/nvidia to /usr/local/cuda in 12.8 but will # return to /usr/local/nvidia in 13.0 to allow container providers to mount drivers # consistently from the host (see https://github.com/vllm-project/vllm/issues/18859). # Until then, add /usr/local/nvidia/lib64 before the image cuda path to allow override. ENV LD_LIBRARY_PATH=/usr/local/nvidia/lib64:${LD_LIBRARY_PATH} +# Copy examples and benchmarks at the end to minimize cache invalidation +COPY examples examples +COPY benchmarks benchmarks +COPY ./vllm/collect_env.py . #################### vLLM installation IMAGE #################### - #################### TEST IMAGE #################### # image to run unit testing suite # note that this uses vllm installed by `pip` @@ -569,18 +617,12 @@ ARG PIP_EXTRA_INDEX_URL UV_EXTRA_INDEX_URL # Reference: https://github.com/astral-sh/uv/pull/1694 ENV UV_HTTP_TIMEOUT=500 -# install additional dependencies for openai api server +# install kv_connectors if requested RUN --mount=type=cache,target=/root/.cache/uv \ --mount=type=bind,source=requirements/kv_connectors.txt,target=/tmp/kv_connectors.txt,ro \ if [ "$INSTALL_KV_CONNECTORS" = "true" ]; then \ uv pip install --system -r /tmp/kv_connectors.txt; \ - fi; \ - if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \ - BITSANDBYTES_VERSION="0.42.0"; \ - else \ - BITSANDBYTES_VERSION="0.46.1"; \ - fi; \ - uv pip install --system accelerate hf_transfer modelscope "bitsandbytes>=${BITSANDBYTES_VERSION}" 'timm>=1.0.17' 'runai-model-streamer[s3,gcs]>=0.15.3' + fi ENV VLLM_USAGE_SOURCE production-docker-image diff --git a/docker/Dockerfile.xpu b/docker/Dockerfile.xpu index adac43c6accb..72d2053102c2 100644 --- a/docker/Dockerfile.xpu +++ b/docker/Dockerfile.xpu @@ -76,6 +76,9 @@ RUN python3 -m pip install -e tests/vllm_test_utils ENV NIXL_VERSION=0.7.0 RUN python3 /workspace/vllm/tools/install_nixl_from_source_ubuntu.py +# PyJWT-2.7.0 will influence some wheel behaviors, remove its dist-info to avoid conflicts +RUN rm /usr/lib/python3/dist-packages/PyJWT-2.7.0.dist-info/ -rf + # remove torch bundled oneccl to avoid conflicts RUN --mount=type=cache,target=/root/.cache/pip \ pip uninstall oneccl oneccl-devel -y diff --git a/docs/assets/contributing/dockerfile-stages-dependency.png b/docs/assets/contributing/dockerfile-stages-dependency.png index 7420ca4d89441e6dd320657092aaf3e1c0491e9c..c8839eb93de95fa5ffd6b3338b38ce270ea0e1c7 100644 GIT binary patch literal 209492 zcmaHU30Tef`}S8RhAcCNj5W(an-G=uFyV+wD(z*oPzY_(b}+__#&%Rf8%n8EN)m0x zkak*?RA`}8l(g4-Kc~~-|G(b%J-^>|jZ)v`vpmoJ-1q%_{H?3KY4XI`6B&k?%-;N) z9>e@(!!SSW`EeY+lW1C#ga3^CU2D^C%rN~gEIslf!z^Ifzpc|h67=!4o1a0$HMJqT zaX*Fcj{LQ&YQX~0kLE9a`)zT6P3%j}1kJ@q%HrkjIOu=bc*9I&_Ums#;a%0~&Dv`1JlKw(?gzrCD zZstZ7&j0(noCljF#(w`}_;Z$uI9}#|egelwO!}YSVVKS^zcK&odsg%PWdGN)j8qK=w`lBh=d zi+lTD#irC8t+YBZIq}_DW!wJN6rFcZ&-k7d9=%k8zND1@mWb;cc;C=XDF{|pIH%IF zdEC_bL7T+9)?8e=O%e~Icf&ZzwsLP|@Zq{+<8SVax7b@6wf)6~(W@{4qBA8&hS4fx zsdi9cOWZ@53QHd^wZ8W^+zavW+a9>KUYRmaDcm5&aPN!jPu@%XZJ8WI!tK=P8g zt1<3q)LPY%w<6TswQ`B7$JcyA)BXOTgCFgLBYud=R2v@XOzU~;>;CD{_|E4WrZ)Jl z$cS=H#~UfW`Qw-3OrK#I-Y~z>yD(iL{7H;{*i~*?f*p2lxxAb%W`g!r;*De({&(1HryxLB?EV=x)X_H%KzjUAnmk87f8nXQ+|`9^7dyn>*?8;&vzc3@KCE)d z;^^ME^siO2x z$?4M^*)CanvDdFJ8Xl}24z?WbtGKM>V$b#QuIsMxXfJr}RvYY6ZX8+n2a}%{9TRs2 zcWS7#3*DA|cHTu+t#|+azOSuUPfSq~FNxGyv3B9;#WengP$2&*w$7Rv(BGYXwjl0d+m&-_ed6rmIP-@ppa1*i!^=Cn?>zeUh+DsV zM7xgqoDY>rb_NSKJaldCz|NJ#EG<)VY4)3Gab%(SL)UE`MTMw(T~=Ae!SBuU%hxBDzUPN#)UsOz*CFh`${DXdM^H^VQ~k zrO|qM)$WZ`)CWG{xmoN&mCmgvCox)=w<)|{v(xO5CO2GPGD0U*t+zQQexmlbIBCrf z;=1PsGl#o}J06V}W3lgHeFtIn3ojE2`_ZfaYe7bT+f`O?<5kDQ#q4D>f_@x*0IQtu zS9@4vdPlBQrDtcs4rAlXH@982{qp{WvBT};(`SV>)@AfH$2hlDEB>-{>t?RXm**QO z);zj3dMT#)?sqSyJ2rm4 z(ZNNdPry-X`M2ZxFT5{xJ28U!(eFA7m30=5Wef_{hXfvse z5SuaF^0O2Um|c-#Ot)HF8>x}+l={Ei*ZN4yG%rl|+ zNPPNN$(0irm)9X4ZN@GGue+y?{wOo_{S#;D?8KxZ`z@hW5My||qpQL)a{G;qn%P2O zQ%tc`XaR=f8<$t^T5oc1pRqG*^h%ru_1~svb&Xf|Yi#$0?Ba9}H>`ANU4#lQ%c(Ix z2eDJ{;OuX|wrM2VYpAX|`sID}y92Ku%n1B}v*Zb4tv)^L&!a!e46#Rl^fT7F;k@|F zp+4cIW31SpUw8YqXPguK;3u8Y0r-2W`anYS{T-3of*+i!))ST`cx=Yy;z-uz9R2<6 ze}5ZfOT{LvKgMQ^^rRntOx#-@Ys_tKUo5!tz_*_+4rjBun1nTsHEDHg7-hk*H#?|QV;(#t>CtwwRV$=zob9j@` z61D& zhy_d+e^40Cl6+&kVE@*XS~cvAZABrfomXoIZoc0vxU238IcX*SSH3s-8Rf+)!QY3~ zNUa$Cd%-s~%)|fRkXd8i%HRL{E9l7Ezj@%ac#Bj77PIkz)P~|)J0d@()Oycad(cV^ zU_iKS_3o$tAoDD*^k`2{LVkWP$yRs$+i7_;(P)Vvib9iWKoEHYxsV%k1Hs1(aeuzQ^HyMI=9^w$onbyqv-Oi}&v96;ME zq_Zl;QN3s`0q&e-p+TkxQkNh`i{z(%t#W7r@+iD^YW|g_+pb6-9QwDXt;O|grCpQX z4(*%^o916uMs^urG1u$Mi&TIC8nJfs%#VqA1fJdFyzrw*K*~Ptp79pxvd-_Hw+CAS zy)_xze||F4G8_x0GcB8NYg35FfsEdZ^+8UBIT$!oIg3EcpSPh5AQXJyVe37yAh%}= zPt~9E8Q5Kjo0@rke!}KHJu}A+`(Gq&{FBZh!Dz0u6aXnEz)(l!^KbXCN}|{QNi%U` z{M14`rL4!-kJ#Y~v(!9~cn`K%?!4{Z_aQRr)q#f%C#I@J-K#*Rx*DSDVT)BGv0_CL z)f_V|Jy&7dZ_(7$T)H*zJfY0Yo+m4~ET7^2TBm;}EUQP2WmaGP_|(**q5gLD+Vy|- zVx8(FHUyY9;;**a*w~mP+xw%+Sc_rPwk-_0j5Xbe?a4zq23el~iXu=%h)4_f+(GD| zBv_3&t=Jg2y8~s69V)`h$XK*|zBHG`*w=&(eTn=CP;*(;1EJxTgKd7`1p+`-4p`Ft zO}$N_MSc=HWNmW&2>WfCd+0YR;DEZLvhE5tq^>eQAZU!K@$PS>D>s{dz_Q6s8|rIO z?5R6ZlHt|A#=cZMZ+VK@1Gn}N<+l5aD{!BwZS+$H{=#O?FEmpduQ((E<3v&^_dIMJ z)u+HOivfD~Xan$yRXZS;qbQ2*qrWaoM$&Cecsxbx@t>2OF#1ge3bpzF%vvoAEIn!N z+LdSKD1m_$Y7 z`pgjZasW3!s#M)--0i4H+L!kE>E4<&cbbCzDX-;G=vZnix89BJMZU2rRO|oP-s+Sq z5viYUfd?{;@|ZeHHr&1d5a)hYZ%l0BmSpuyZLc4$7&tNXv%eJC`B^kOaxhTH1U4u0M53oM(95x;~;y@ZWy zP5WszBGcveD{5YS)_BCwreq|@m5ynjHhhX>8X#R$ zO6FjzXH$3EYvu8C4{A_2e)97-$Raw2UpFKKPM#&pLSYd%TMRM6c-P4pqW9ZSCtYpo zEmG@`tvZ4_NBw@0Y@YN*#I*wpx!$#ZO!(z6;)b2?@_T_zUD$)(?bXPr@+yx--0WzAkLW+%V9Ptbz)|xIM%9x z75S)Bm6VpIa$?-%E!eNoh3Q~iK9t4mqoCU6JNY^SfE;R}>sGo_FsD36cC+edzN! z&B6>;kA#nfZ^uyqj>_U{@ByCm#vM1e-jcU)t+a92*M3I6N__FAvnX6uF489VH!!^A znyxQtQ~&G&1^PsUQQMaZyL>&*Z9Fr3dGP@1uW%&#s?;lXPeCOfHjU3ROqxrF5?JT%blGu$oIZQ^(Qui^Z3!gz! z7;Fly081l!wDrq}5(lLbx|E(}`WC@}JH)5EQuQM@gqS9J>({z$d!!uVk^Y%2CrQD& zte0{xpiOj{hvVHB8_S-yr*~W2u5sK>xIjd;8+=c^d|Op=i%JvVkii_lUyoxR6}F1m z*D(TpC`R!F*V94-qmaNF<#T*9^n zKwo@C`p8n9t#BYnMCN{2Rchw&V7ti7p-okXvggH|u|%LhY4HmA*U#TS0>q2lJSlbu z!tefTjoSbYO(Zr@umPxbFWgUX?=$kf=zSk^$>^8ZElK&ZdHQE+@k;p^J_L-9E#G3G z)jM_7wT1cKz)+sP)S}Bj(}i)v&M`MPxGwFw58Y|eMTMJ zj##w^P*t+UKX=5h$nU>`)Er~4;?i7-sPy}>7UYs&_>V;+TdFKrTCaeFl3r|_V{pdt z{+mBhyS^D;ksqYEngTx8y1{>EA>h+sx*}kQO$frhaLw@Jsp_%vHDI|#7j>OWp<)8) z%HSP-L++gF*Ox*S=GXuR*@JyuG5-5z%$R}NqHby-O0m>0#ab~>C(mB<46~q6+dEI$ zbt~|7%v0>#fk%T>*J7cb+?sYI$hnw}X^fdoX<5X~>B)?prKwGVMZpObEJ|C7(SAyHT>6?WV@1W3bzfz zL09VNOWo|znX2+h19A7tnmy004*e$(O*l_B^7r0!E6~0RG><F(YQJF#ab&0 zP!?Xo0+vUSR)FH|5|PbFxomE<-L2(d<+g!ZCDt;l3ufOMmy~VZ{m;)EggA`Jqk>p+G^|S8d?aV*dd;$Js@7?{dHX<$UXj8|>$|M@!hQvIX zJW$Oun==qT3eG6h1cJfwYOg}v3Dyo)T3)J#A>Buldh*Iu7L|dHKK-wV0J#+#JMbtq zv#%`v@Vhe#wy10^-w!r5Af^{$!O0gn5F;@~>3ta0h%Erd7;AtP40#67)AWbTKDLL&%r^0V1&O>jhM@1^t^R1Ozm%b z2prolt?wS;etf6U2UR{O8vT`1ut&>VwV{$JiH_5{p2bE6rT} z`y1y9uQKz^sXQXs=06q7pw~E1toS_)w|| z0ID1}o|_-MB6C2NU5sq-5#vXXlJw!`Rhi<9P4isz@6{Y_p<#CiHPrDTG86%L5fb|| zO2NC&DlRqMpKP#SMehu;0^m~ukUf_vymsC}vGN}=b+v|u1}ZC(-uyYomgFH+L_40< zE)xu3)P5Uq#QM%7iC^@CHHZ<0VW&I1#&C zpUm$VM@PXA8{RfCn5Pr8_6|r!X$c6WvdEmQvf53f7iM~v@l#OOe~_+BTn3O?MmSH3 zDF83wef>IK*|-#xtUH>Ok*S@oy|k`*gw+?;*qQQ;)u+oPXvEbvXr;n`!yPmw0Zyv$&y zxq(Y{yk(})cPrz6ELD4LJY+ejOwXP-r`TM(5cj22=RrD&lB$7X=lr2~36ejM^fszw zi7x(5BXW6hvO#lX7lheTl*RAcmh~W&u2?ylaVeN(7j)tD>C>zn&X}>&zZGr+oJ4FE zNhKbq$H>K?GDY!ZLE(ah+~JXkEj=@6BImj)NQ+9KEj>y9Vo2O+_&$-^FYb)KInx~d zP1q@Lz{tzouR9+78Ona>-@hyq4g8UFz2J(R6vz(ep|VPEyeTBz(T@=`x|H?7!6hQp zv)Ckj2OZ9pptZG`f>0+w=rT6ZWQ0QS?a|*RVQ9EdaxU-d=3o+?sl@#6?_VymQmPMY zA>Eln7ZOeRdGSVTQj9Rt6e$U1;|FDwL`ScrVeYoe{l&=2h*AafS0IKw5dE2$H7YZ+ zeq*E$U=imL9|AbU$^r3yQLq}-i5EV>M6%JXri8FS}Gr)`xmNEWn zHHcNRa|kaJ#A2fkz9GWsG)Z`=AOlNK1F~{Tbku|AxxQRTAQ`H0)C82KIlzD1hiwm} z*hJu>Mp8wIxPPLyVDl$f^EZEj9~IEDVCZA-79&O=$1Q*Tz$H&)MVjtGS;qS`aDXyZ zSCm&-w2c3pGVclzlHaS!GW->(b&YCaLR<94yvP+m&jz++8yF49_eT1-pocu_{xq-z zY%VyRRf{+N^K6ArZyrW;bnv7rN5?LLwmEOyv9B+(lI)5KX`7IQ0YU!&UGi5;&(E3! z)5cC!SxFK(cVEJzakCT;Tgz8kUp!v&?W(<_SAD{hrJ=ZzqJXu@Wfeglmajhen0V;R zicYqVCX5NZgym9S|OTbK=(VqtC1B;|Fhz-iw92 zZ;02R=neoCP=Di`TC`FHvU?D?Mau3aU_##hJLWv=va;KOHXvdWmE94lV+!NbI&w)0 zA&yw++e4Qsj~Z^ma*^Pjp3btX2Y_K?UzYU((>Fa^d@i443H*6x;7?W`zCb{1B1DJ( zTHgNW8hjt8<{sWGlR3~KjU6t$bR>Y`Nd>ymCodueN#7+9R6A%_58a(SL`6i@5^H`t zxm_2jJSUNtw+U9V;g_x)+#w5LvIu+=aB~^U0TX1Q;yj?CSWW1KtKw}?kx@{EJJ)L zu|=NXm20n>f-oadjcDVu%56p(CZqS{M4aYZdPIr*B-)nf4@wl8*<*~Fv9lJApMS)P z*il01_~z)Z8kz97nAMAa<|)flVMIy{Z3&cQE-*ng2g~oTvPNN{1ERD9YmpWjYDKAU zQ8BA{v~E%_Ad*v|)^jzK-Dmt);mJQnUF9g7I$mrNvjJO5*wXjxSeNynj`6e|P(&89 zi?3}s<=j%f9QLZVY1^%z11H@*@LCBJWFfhq3WJsHkSCXayD!7X9${_$2x+qWUM5ug z3=g^kW{_zBz9I=4=oc@-w10*APl=&@#=o(M5sIh;OCGwl1D(Z&IggG7BmIcKS2WDO zAeY#Ib)=ZH`f&ZT{9xsj<|-%=p@=={SUg_3Wod!{A76^(qfDqI}wjrJJU^i4CXaSNu`d#}+tv^vIHR3|~00#b0 z*H_jK8W_PF;tCrUPgjIpXyR4@M9G@AezbD6dHT;R_b)Ftt;TSue99V2<~yH*-hc(2 z*w!{=XWBYpSB^Z0+=0*MeDcACUID9O_z;URCFtP46Dl!=v}KP^6C_aGYyltn0X$be z3RTj<$p2H@AJ#LKw(wc?p&pa1Kl`AeTFds24 ze7ML&?HBXz*+sS&tkMNKs$Kekv!d?j=DAg`f)1sFYHT^{kBG;p1u09IdG|K-W)Amf zMt44J&+ur2NF?+vjOuP0eby`T-w!ohJ(#Ai%UUa7h$05Qx3b9FaKnUSfH5+_VfkeK z;<`zJTK%TJJV)tXcz#4 zoej#mm=|8hst$n;gI-~S5lRKApM=uT2Joy9%MUp7^jSeF%F-DXdqji%B5;=WWyi7Xm9RpEV#%*g;Mb8&Ew7(%RxyHW4w)qtvEgCn*~BF zCbnMcDDS1~mZd_|AboV8-iwT0|IAla0+k$fOh4=G8Yq+1&L!JY-=3E0K&IaL@E-ou zh{-Ji@pcJC@pQitQxdMl5qq`@}!fteto%{NA!@pN@RrMq4*!J?zIf9W4^4k$q=bIk01)N=9# z_-2B|psQ}3wAOfm@;ZsEjH#%%iRIIO=n5jF>HLN%65;}M$%u^Fi@i@1Url@j_V(u+ z=t|TKCftK0Y<4Wa-fFOuKbYJhQ+Mxi*&N8Izfn+stUL$zE#@2uTm1y}vki<&v&UV%+heJXYSGS8h|*Y!X~rs6?GO?O(hdiCO@Za2YhD?J#|jL-GO zyw|W|Q!wKZrburWSx|}#K@|lxXVST-1UpuncHxK1mst^GrY5C2Rfcr?KCrhp}I| zVdSg2@!&I4h?9;D^RtU z7u#BWkVjrFy|rW6j2Vpyf@(l|(b%*NBf&A_F@E0(+u5ept9$uW>z4-~PbL$kXOqUC ztXm)m4}e>TKjv;)IT9J^MB`)`k&lpLe3B4InxGj=40j8%C+B3$|9EtwAxPdh4=OS` z?JL-BStK=gIHEtD=uX8Bh1(1q1!C#t#phHGUrQ0nqbjT3PQBll__z9)V=G{!J3@Ye zSXgtOJQYluwCcED`MP;+9hpsmD}zuRt!3+>uq7>9aj55=Pq8;@z#L?^tNrbn>JfT- zcij}q3qzfJ)2=AQ9k{NuJ+mza76l5@l(Z>;9y=+R3@zc&39U6SFlfL-Vn@zp*Fd(;A&IU zjkCE_Ir|%$+zJ!K9?lZ(wGjsSJc}eocozc6J;2rj(;WoUh$0L?i2oDg#`f&8EeZ)H zY7r)TZ=wKu;~>hez@YgMsSNkT-S01antx&GSJ5#st06RgX{lHNRf5e$wY&&?aV@D5 zqGsbil|We8O|XWNpT2>Ce+86OxX!uY%&{zI2#X59QR$JfX?YWA=s7m!G?G{xG$jqk zCc$B{8|t*;*jbz5?dUj|lF7X-a4BdhKcf5rW%>W(0NTKG)ZsJpTu5xfxFW7>mAorT# zuLYwO~QF z7_Rzy2pAIaE+1d6!eViPUu3)0tgN@Y7YKH0*3_nXJ9!Tv{Ywop6~f##mGjqNJqGCo zm44GCYr*3&b#uyO`0hH0R<@_$P`R~wCwOKGWOQ@&S(dQ~b=d!Xg%}BuwrS&50+x>X6XC?6k@uwbHwve}K zmEVV*I82xWAVfG|jC3VA3-$qxu(_x!)yc)>4f9|)V&HerBkep^^0W-^`ka&dk%B6* zxe$WwE8>EYm6S0zH;zqWME1bu((n!pht#s5gX_uZK#Q4}^FzA?Pv@&~>j6IRM^d|y z5it)p&MHZ+w$0znjZMszOof`CgbXffHZH+AP{EPsktERc-(Np>n#Vv97!3(L`s2I3 zx@R=|n;f%YwBoUu)Tr}>ZjQ84=yn?+^~0+aM`~Os z1f#9kjO%wkUR&HtKiwY z1YA19Xk3=hQN%!5gdW~jg@jC8Dh*OF&uq(_q%MXIM0G?=iOXmkd7;Q?`GK0{**&ky z7f8m$a#1RY2LHfF4+^GqS_7y7cFKX*@=RXiy#68HHsV4V~ZSTL&v;P$KNPUvp3aGsVWyZht} z!_Aju%!}~@P#I-l@ex;=z!-J#vq~2C&1tE)XREU3UPL*(lr8_#>I5t>^D6h;I5Sa( zA8t02jS(WJI1z4YNS=5U67i@#bUkr-jiP?mN`hK(mttOFvj)DxyUQjXn z&YVL~?kWZw0L!2ha+5{AKNOw+_%CRG4?Ex{EH+e}x!;Ckh@dX;*+ro|{tDnyqZ5gU zGO6`k?El>v4yX*CLBS5eUb%^31MOolG5QCiqhNJCsOd!3v*(ivEQy$up!Plr#E4pQ z6ky12cwBM}XGtJ`50sh#jc8$JWl^!S*dR94lMS!wa=iLI1h~AYV`InL zo|r7ZnW#g!FtP{Sc_Vb1)+DVSMbNuGsv+9?17_V%D@TU}X@*1((pU?%@Sf2qfXte(P)H4{B|&ne@Z~dD;bVguVa5F^ zoZ&AZPa3G@4OMbnhY79;5Pkk5L=a>OM(eLJ2bS=5gvOawh)Hu@U+($&|cCq*W?a-$n*U;h4-Bg{=q13#&_+43mw8!KMcnGP))HfH^U~XGbmygXjWO zYdRsYSn}2pyzA%%^_^fKSS#Ir7plqKGF zDz<{*;scNu$zUutwhIc<3Tv2tr0!4Rgf`RapgJ6q!-zao4LO5k35p^13;5_ryqtX%Ls1iWXK$m&IG}aV9{O&2DN<_K2%ZX9LuvtK zcY@D3JH^Vqz&VjObEet2tba)cx~X+ZhAebkp4}HOZ(pa(d}&XFK5QraFBDHfq3IoKn`8>fDxcRZhNN(^>fN5Hs1hlEvK!6+OqsNbS zfhGz`FX%Ipr9Tq3a0cmjkCqp?jdIxa3wMu``1I5yeT-wE+YitfY>hv|b{O`R>4 z!w>3YnShP71Zfmdvf9Gsvs{Ts3UQWXVW<*eO=?2>9pJrvnIvk7HCFSjB)&rMwT2l# z4-S-zEQ$lbIVJ~ZFuEHitsD`0IZqnleJ@%!eja>iH(6|Qc~+w`u7qJkka&8wpgcMZ zO$#Lz#32==9%C36MB;?3wv)3au|!*$s4F&?tb?fMB<@dSqQ-nTw!6UXkXI40l$v@# zL>BUgjtP5uA6pxLLBW#5O%$-wi^<|iEn>WpM)$@lQg9v_V9>1iD9pDM?|t~?u2Vds z3*eX=zUkb2_l}%pN#LAkn(-;KPySR(0*yCx5}*O|E0gde=gEHTnLPHgV88m9?lh|R zc$0$bznC=QN42^@Mr{PQQ&i2dQH33bPoohGs5WA!NE|;W_Y8L-#6px~b9rtP_&iQe z77F``k|Y^CSUH45ki%x0{lHAW{eAJRML29J$#Fb+mO=tHV<%V<+WMR|+`b>q!jf zcL5~iL=+Jd4qECC>IlS-eA*4DX|%$rCk_It(YVR8{dmS0Xp5_%9^wM@G1`oHdo^qL zJZu^x=K@d*GH+mCBCXk&A%%53tR0kL6YDlG)SbEF{*R3KXpt+u2K7mA=x}f7M-bVU z)x3^!>){R81dV?YN;-;BM!f*#bO6j;OW4TIm#73qd-Rt6udPlX+i+1u;hDVclCU5S zzIYgLK7zdftV457(ByFn31*>lyO!$Ur8cIpNRBSYe4mN2Qmj;5xI;c%Wj)0P%#cx zsRLAO1lAitVy-m`s#I)PT35~}jA@wjpYb+ZR3(yjamQzDD}Ad8jNJCts$?hN6aOOE zpgM#cc=M1h!Yflaq3=m&MC=I#Pr8(CXzA5ejpAW1?BS@;6ME7R(NaJCE8>KfPFuv5 zPV-lI59;^231li<#zTLf_JiR`-j|rOl%3JnUk#sVa@``v{yHvcdq%GO8I>{UIk83= zb_?xTgy-L`4!iz-q$mj3N{zxqGP7#&U?l zl&#IBoSjEqY3Mab0=5;`#^RwOkp#0;h)O6D9$G^J+lB%0)#mN|x;c}?)Y3fD!2eD+ z64KaKfne70IWyCnVe~Js$)m&nm(Zvj!v~ zvyl6O4aB#C3ImXNj_BSaiov8$=~gNuF#=o`@~g!Cahxj$pzah+QT_;}9IQYMbU50k zPjVhw^X9e3g0@n*sbet7v+P84S=*uNrw)58j**Xg4Y?T94q$waErBm=Xt3)owy|Ww z4~$VYKZw^U4-U5b6rlqx2&Ds>>H(N3dl85D?Lr0E8)jL5(*7 z_iE|PlW(&0t^v2Oi{VfqzM^h66Yv9_XV7~DRkd&c!}wn1msj~sBxn+%q`=0@Xyi5^ z2Lxg)I7UDaQZ&)0v6M|DCvmiNa0B^wNYuhI#fP)xRi@fj2XR$kU#d5<|CD925=X1# zTykVl*E^W#oxHc_VM!1rkhf5sGkF0>F7?Ah?Zbybx%-V@T!36fZcyqKTYm55A_V0E zaNZ&^JeI(ygKVa39uHs!kKT2Q^rN>B?78)*16SN1#~20i=XwFVgOu#6`|stDy2PT+ zQmXd`8Wl3BLJu$}6BEE$Cjw7Bih*Y+M)flYd4Ks>xq8mNgmqL8ptu(`;&Ge_*0eZPC>^tHserBvH>xSp@R_Qak4AQB51x(N54YGe>SmW2-fjA20$A!EKaB>y|2Rb|RJdcpp+q zux_(_qH%N!O$%9Pct<Fy#%7f^iJnBROHvi;BNE%39nc4-YxB_#RIbcJ?&v7Z@D3gUmX{?F8&h?5yT8-s9tHS<~WOUyOVqt;WmBbbbk%@Yr0^ z1LzEwDd$$Eg&}rksXKnL-8tlVCf;rD1y@V zMpr-(MSAoRKb>0R)c55B&u)QYnU2aKhKu@EXBZkSq@Si{A~MdHHF$=Hq{THSd)~|E4jbKtR4_ zbbU?_ek^-{dI}&-4BrSLbB1$!&7f&~EvjR(qvR<#_Q+FbDz!aR8`l$oCSmJr-R;gJ zbLd+Lg77kqTDb*yi~I0ta^_PoK}uBCG871?-4L0W$VK`LXpqnc-w#-uQBX#G2!tST zY)2#(45@82mPANV6$dk7N?R{sBLEUAEn($2oWCl&9v()--W{^=WB`9IOGR=h0jtN# z!R9YPmmTq;z6~DZ7>;HRe?vx;0tZ|qJrrP=hiG}lGU{tHDwa!LVOD5B6ZL+!%x$RlO{`#r#FRtIu3+% z8tl7BFx6x?rs9%1g_9jBv^&b$$9bbq+Q;D?(M>!;fqHJKZ-TNt87DzZ*r0ue*X~P3 z5fCy1Zr+*DUuAs6NJr0WNwzQRD{t?H9NIVJbAf?tl3(^GOjkm|PZ|rsR5ll?#2PZd zAn(&g3;2P0dC)AdT`d$#c&H~^olI)Ej!-4wLXr|LADQdgdZN~_8g{7B@*?}3XUjXN zXID@q2Nc2oIsp4ZpTjv1uyh1${EK|LVl2JvvO#BE#d;VHgo7k%wCFQBO>`oObub+n zTC_k2pi#pXo^CXbk!F*xlbV*@0YiD|tDP%HygZyAgq%w5?9L|o2S7@=VgzHd4+Fp3 zU{^MDq6}Cbq4z;Kl)M$;*WbpR3&g&&1L(H&$OrG&s2qwLxwRP+feG4Qea#$(u~!+5 za?@y!5mAOLuEMJqDl&)U01O6d6pR_h^CFN2b&rzWkjB9pl(V>{2O5cZMFaKm(F6mE zCuZp}On@8(7SdgT;L#r0YfJqKwA2$54}43;8>5OCM_2(e0FjJjYP-nlYA(woEn&XuQ3{6Alc_-9*(`xCoY39UNTjd&aBmSS z2dd~rGI#I}wxTFO?cSvQ9`_x?L`e|aPc|gN-PFv3|Jq`gDFPeQ=~2Q}DZpNqrkRkN zoB-gZHyvU4IzC|e+BHkYIVI&TV5;|h`J zuKUHdk7cYrG%mBCz{FWnjfbRWsWenWY%a=~`gWgT4_6dc;OjUM+kg~GP&iOiFd5aU za6v=QVm9x{F?6%!F1cjJW6@Co(X#xsn<>cg#Ho%0Ey}qJt?I^t^F`4zMJDC zKY&*16panD?8~CN&a_}ftGgGqC$3bp`a+~1M99wD`|msv6-jHS%Okg_K~T^JBUu7k zhEC~1gffJDnkrla$Ci;S0Hke3CkT$=@#-l6xt+o4n1Ig7HfVN!M%F*P`v@9=6+x2N zTdbe}l*g(h+9JyhB&Q=+?d8)yS<^nE;Zq0gn$#)3w72=pH1esTP~3rlnXo1XSH%k8 z@oCh>MS=ynBk0g5A~A?>2>y}DI`;+E8+oE#JsogpK(5eKn<&f7A%!>`9p!cGt2w|_ z3HbOm`(7T*P|m!EcI;zcw{gMg+dxBZ=%lp-&ey!o3&(}@Cnp2jN`K0r-NcjV%PA#5 zrCjkmgKz~oCMa*?z@Cn&8INVK0scW@qQ&#YE>ZHFQEmgp_2MLsS)_%JSzm0BZj#0N|dySAmMP z<4g;saXvlKfTp!)Nc)Pl%qi%dlDtC=fsU+S9Ju- zkTIMjDr4kj$S6#J0UaMfAd)anz){@oFapNv+A4+DkG^F0(dky`DI!coeH_D1>rTm% z`Gu0Q=zYh%WGTf%QdcI5cxlkzy-3@g0B<9wpR^-Us?@qeI6#BJ%pUC&hUY#|~7Om=)_gkWT`Pc%t8~#9y7r;p;CZ1%VBGNH5`f+xcy`^bC z%zZQW6tU>^GSo_Uvify{FgX!8c2Q)1CdoCB1qu7|wI;oFCDDpyvJ&UqXzl!!&Nc(Rgbb%9YLUHO@J#1LH)OJFo2%C$E zoeMJ?Z!(*Q9}MDK+{m*+q| zlA=e%iRdo|+#y|w&iwHeT`tPq`IIX-j41&;EWX* z+&%!3!?}+Ts1a6r z*>gMt8M?>uDv44JlzCSnLvW$q9yr$h8benMV=1{qs0%mg>1+p*2Sn0VNXx)fUqD0& zQNd{UOUr=WV)^q7O3+y0qIs`5W0^tY!^EM8e=uurNjmXkXY%2Jbg)$(=WGI zfexlcZ8`9EX6-}DaMbzOTMo^>|AaDwOGKOVy|%UO)T|3sXzKm=KN1cWZum1C94w8r zv>B-seckP$gx#sLicvM#<8z-ONFYBGr>YcW9xS z@b2iOQ-YeX?R2;>rx_fdJb>dRLfg}Tglj8t z6*=?KY4S_~3$l!QwLox@41)fEInRKB{K9h|j{hRQrd6YkqP^gL!xi&jsKFugqKhy*R7-SljPR^o^&miXm z>3|$M{*qdlg$sTFi4={~3L4&_LD{FZk0fTyA^uM}GXux>v^ti+Lfp{{9-{X|M&s~6 z(QrM2zjXWO{Qwev-^-aG1ePaK*10$VP%3ke&JQGY^A`{J`}7r`Foh4x$v3(QWJ2qbxqSbMF{pRg}~tcX(w zN#5`dthe)s{{Xezb~Q0{1W`o^eCg9tLC3q$1o4h2al(PIK6bS*tOtXM{9+nkbb0kM zdiT60T0bPTIC>5E4CN1nj-8j$(l3m0ee%$iS7O+Qfw&~G06W-{>)AEt7;=dM$vZ?K z`eNO?bO3?ag5e?IIJX5=uQ#3JLvfCTabl>2q{eU}3aGK3j=+NZC>Qct018Pum`g%Q zq~`)cZ48wW)X&gK&U%cLJCMW7BjS%~;z<|u>3J+htskU%fQUBPT*fe;T%eKI_4Yu* z$))yeUI2!dhP?6}&red9GSK;8Rp(R~SAw9UH(XVvlZVJMh2)%4k6t+(%HTwzI>;T8 z(0Cow2oeNuQ_-$Wj-d9B{U{(VBfL?)LR(oDbq~&{SVpG=EKjNC?!Q;LlnMlHIF#^N za16M$RRS82#i*gPc6g|p>Jn=ACAot#Dwj?@q^#s!m#GPacz;V-tz0vYljb}*fc4i~ z7Vqtki!EUTgZfh@MdztCZ`3QO8ykDI0gPX0`vx7@6bZrP%PA{2p|E5X3xSILRviMDP+=Do(@3wythG&5!DCJJ&AYAr=ERBhd&LuA}jIxRp=&77S z2D1g-!$!eVjILg!(zhPn6bI2y#Ma1MiYUSjag(74hLSq<31%AvS{6a9Fm z2}p2VC%TVo=Cq7wq%V#RnFLOKUpS%IN2`3)a;{>1Z+Oa1*%~PZ>>hy zq^>u2ussmXqE-<{PGw)`Z3BJ%dTd)Q4E@AtA&yO6W}9;jU>2P+rP;(J{Lxdsrt$)l5TW&ua=&K{D590v<}`upijz)w^hq0Hj4aMX+B_X>x2 z7Ku(MtK@i&8rl|EsF}*j#6@eR!FPC1#cvlm0il=EIgXKlq(Eh*bHOnjB7+av!Fc|u z3BN3z4kb7*03$#qpFHrDr?qjO_UHEaS#);8_bQrEKKv}7slh7LXULr!uNJiu<`_2( z-1eRI^rxd<0487}^_t_H53(=WqD@;$X}s0n&xsGfQAEeOc^!rbqJWs0M&kWrvWh=i zW`<(E?GaLUz~(L^6n66}zU+P3?+KknZ~F-1%u1|BMl1kovzY%Z0g;tpQ>U*9! zt>jb@%Y*|6B3?rH5vX8!*`SY%p;3OTdwxHNEntm`gZ!;r7OJC7L$F=gQO`RxFOX7b zb|TsV1g;%UilE|(#2xC>#a-X4Fh(pooig_R+tVm{%z@DcmVh<&B&wVadU(Hv@jeb( zvL2bXi1bOQ6U1GnOlXH00*gzd;4Uyiz`5bU4fmK&`-i;#ItRV+)9@X)+Be(%|Lz}d z|5779;g2iN*D2|V`iV~4GR|=N4}1TOj$M=>blQC9p<|lcuf)CA+c_a@fi;I=Gh=@K zX=Q2FmSui_{J}cEzG`5ou4O=8?7EP|vWCG2ac21{O^QWMQzh5t1oi7n^ZOMUMq`y} z>fs3Rn^F1sTiL}B=5B$We^yyp$!lcqDFZ%=Mncq7Q&TIy161Y!i@JUF{X5`-Vy-GU z&JO$u?t}`kM>ckLrlGH@s-h8Cqs>wikKJOJskx6HtpiC|2KXHgIY>fH%>s;DIhOjF zv*Q?zTVGHkn~FKUeo%%ZN;ruN6cj2A@;U%#2u&6`WK0-n{>)bO5He6y|XO>Eu>@Jr1&BB}SR`;jAiP+t)34l}AX^{<^S zhl8;G?c3|;&;KMWEPQK=zvRYE#5!}PfrDE;Pu<(h)zuaMUc>AqE)1vku0w^4DtJL| zTlM)R($e{KR!7(m+fw8NN5J^)#Qsi&U2X4j>tB^!V#4|8$K9Un+WqF?4ZH_^bn8W> zjlv)anW~2=fmKA6UUYOF^?>G{g+8nA2G|5dt23byqkHjJIb4%*_J zbd9}!!NIqoVz%Ij)x*xt`etTxg(De8U%G_fONzl>B=6L?H5~z`ya^9fhJ83`D>Nm} z^@D?bE_9?zAELfXZ%2b?c8J$kR|}XN<4LJHp=teRQUuoOE+Q*$gcgF$=VG3t^51$*I9d! z+ptG3(TPc%3;cr~@W+0?%a?C~N!$aXmdlzsb0)H?HXM$DBTG#bm4*&@$N-$KZ{BhW z+-pNNLM|PN(1MC@jS{9P;wfy$yS;+^{K8L)%xs1WXS=iWT}lfxg45H}1Eh@?jUPXr zU5py)2Jp{pOsmx&sCD$PkNsE%xAZgt=;ZOV%*rcgPHurz$NSv3L$O*8gcygVzL{HH zUHt`nPeNS$*~qxWz^WUakg$@?y?F5=c->eO)6w8u$KlWz70PlaPn~jj^Y2NZbncVi zFFbtsFwQJ>k(-;_4V+VMbsuX9YhfFmH3=QXFCbv4rKRPQCr@&aV%{IYc(w@^s+@i4 zbM!ORH#OPeXdn%?;*n2b&|6}8nG}OVrP7HV2p%W4MUMEP@B(eyj~;z6Zqlz|&z@~& zb0Z@o2^WyFpPrA_7M2giCIH**h`=t@PoHjFxNu<~)Rvodbyl?V0;e$tGf^0ZQvp#M zh_RGEP#pz-&n#?iPO-$2C7Sfx$b|6myWe<+=!Lfu9zR}6Ny&u!1wMLv?9s!nuJSnD zC+xm?`cVU5NrLHc#%G_afYs#8WSEGyj1?T*RcC>a5#uvFM5D&SArlgd7Oe-Tw1#&+ zQ0@sxqI)o=Cif3s1Lt_5-3am-X<4cuoDMrVtrlO%)OpWU^E84h!jFZ4+YCd(3-GLH z2H$^T3Hp za?h4A-=6Xuo-)r`*0}wTx4;lcc`*PTBLnO1gur&&=+1g9s&Z&Yo%1@eD zt<6kxlk*<5^}F?uNa+*y>;BpdbI?FEudKa*G$s$6Z_S-McM=9cTvFjc z^?3wU7$KIhVcsc&GpQ_U`(nBQN6%WR%X8!^%%?fU#l_QS&%P25i$}on=@cQuURG9B z0f+1b3{W~)#{r?SaMPwuY`p^q;__{iXi4L8EpMOrj*G;yK=ERJ|KK`sX6pWdfqR^% zGa8D;Fuf8;D4+J{3@O7XER-!;TB~V!iJLueW}~C)ujKQOva>edMQSC2U7gDGqYB2JT+BJUTGSV_7SjJbOcrE!-rbjMT-`}>8A{QMA%avz(k4$Y?C~<;#~TNr8)rfXzY@Xt&N8 zs4ff@*Xh6hI)ComIhF+Q3++Y(ADY?wmZH9zh}wGS*>`Ekox#?_&iVrGG0Fypz(|CQ zh#CB&BEV^?>*?uDpEm6}{3P4C`pNbNv}TYDXeJou;3-5zpwwG|fxqHN-Aep>YPtui z)7*DpGz}nzmw}+bFTAFI_o8lo<*Qf!hpsn)%Q^l2|8I;jgJGDmj4c$EeOFn+pgUVy zRFtjkM1_!)ea4zw)*?&Mo-C1U!=!~Qk))EfBo*3J)bDw2%zVC&|Ks1|yUdK-%k{pl zb6)55dYyAEz}9nc(8|;LRw3GXcQ#IPa&kI-=1do790scN-{2ejm@FJ{OI3bQ8z4Gn+CVU=|0)hkE2H7LYBHX6TJ5Fy7zMQ^t{%P$#9 z5<5x3Xx6;D-M!Bh#hk*y;|}zWW4@i2OHP_maAOjuVKf<(@@6beU4f;wrzwo{RD1WH zfP0+KcyNjLvE#>el+n@A!USowY^g`!U;i5fddi)fGox&_eZ5MD&opWbP|3vPPue6q z=hgIZT!Bn1&q{u_Sg@Ii1U}y0TFTQ=Q4^OeIR$@q?8p%<^|!7^eD6GEO5wD63f&^HIbeAON**)v8q;7D<~SN@E%{@BWZItR4!rLY_YREC|ix#}hpL zSqV$8JfKdJpCi{}onLNRw|;&2#)fOwtl6?-hx(|I&Ff!FP97&f(a+DXgi3Yf=pC&m z{_)2T8)1!ooFxcs$zagQvd^EKe`#a*(6CvnR-u2duNMlAIXKuo8l2Q{>`w~Kw2I=~ zQmCE~$a{fy9BJx1z`NDhu?^+5o?Urt17iC&?~ARkd$n2b&p-uLp+@gNMve^rmWO%2 zY>?Bdygb1e33u+?VJ}YKPl(s+)@>Z0{ag6;8OCsk^#}p|JRrdsH0@-`}IqWO~pKOgOlEM2$<$3~3-<{{o|YJu>rAmN+ph%f7I% z&}GRI-Lk}M=DvZ|vtRUNt_nm#a1-T?y?p-|c?)Y~IV;uKe zyLORGKa;9f6+kf4Gc-I1_N8JO$PrIL-8jbwTJ2VN=G-|e9H8_9{;+Y=rlgxUZ(j3} zwKMy;qqV^?&d&k_c6mmEp}4^5TAj(T5t|yeoay1TU_loK-o#wI2#Qw1KDfBJsAyFj zGJ#07bu07=~&5Rd8EHS+q@8L(m!7 zjE>l)uM(Q@Aoi6krbcpSx6RMG2)2G3c<-JWz2WyH{vBEU{oVxXY8M#1(s$_=@p7Y| zlrZ=2-_NWo*-7&Wfb|#*a9(yz^?6e3$2JSnPY1$O)!p8r*YNJ$yZ@;31Hb2`()|7R zkgtF>E2|&K=rTh_K9-aSfO@tjFd!g^J(zUoPWN43{0<#@{o+LjzPy2f!S{#d?0g?8 zIGC&;w1%mvskw{GX`&|OT5{9G9~H0Hojc0HH0e>`qyFH*S%z&}vPBJ!h zgpW2+&W9bVdmKeXxh{bL)c<}HmmH;4n9HCnam zA{pvFWL!U{4?iuE1y`)$29@MxJou*Rp}cG*VtXo&wrM(s6Fzm=35#>zoa9F`P052O z`(Ya!VnULct`~&bR;*C)B-0>JLW^_nE&#`qV0vyNc7u95E9XBrv3>cw$44pAzQ3h4 z{MHype`=|>eCBUSE2+uA$DL$4F04PwL>ZqU;{ZkHy6g2u`UN zoE^T^Ci2}Cif!H$XD~N!_|1`5mQ&$tHfwf*Z`HiwiTW3AKq0kN|C7QvhO7z0=)HPl zL&Aj`Xn*|^g>l`zyLQ1$tqMv=Ytw7y2HsPy4&=wj;oq>YJAi27*!#d)x1y;^Rb*r& zfeRR5@%00Jx;oRrVfzmC3+i<0+&P|UAZG1_P5>zjX+ZgTA|WB6!<39$x5mtvF+=PW zcJ}sL7pj#R<2;AOYUnI4pj&_5ym`_?y1Xz`PZ`2BBJx%cq2J$%f7KxWPy;N0OZa`@ zk|Xclz1#NncEGp}0|r>roHP-YrUY##AuCJpqgh0oK`YFjKYw1oapN?Q#H=WrEvt7T zs?UwvqFMi^pL9Q-Q$K^io!bKkKDGa9`BfPIY&v0rTn2C!N<(SBN1q`sZNgdKR;?sv z7_a5D62ePYBEyz4Y342Nj9tFIzH1ZZdmMQq7QlnOXKm3AChk<0U#$s8wiI3=X{VoG z>n|YG$(L81dlX?Gy0v+aW&`i0rCC#2Z`r;*6-Pn4xjjH#+uIGD%A~=g6jt?r!$!=O zUq1GKs9udxl<#~Dpif4|lqOA@e0{KiHsC?laIr@jht;y|c}w**s=nwj~9 zeSC)?U8-S8Qc6RQ%CkrpM^R68`}BE0L1MOCr&Fi-?C{?WTmtm1Bf^jo{vc>w4)s0? zn{?1{SqM)k82)@d4|x?P_O^2;3>`Xj`g=_ozL5%A^`ck||aJh+tvl_4e(2PNJ?6pXK=b_e%~W&n|czHG(wk zmh8NI`6SwVs5?Wj@LjrA?kCBJ$WRp8wdcTrHpK1S{{GXBPF0_RBV!Ma|2C=;&?XR= zBK#rfPFqQ`e{y=(Rzm*bucyAe3#=9v(cry3rbTSqwrzF%ljudEsB}X5XRiHKw;o8Y zgsosNMHF9n-MV#WA!mh05hQoyF|4Ju%CAEC5!QCun|puD1`c_hlQW;9tG93RHw%IY+uZh&7hM<~qk6&{W{8PP}79IKj|v&V}W*^$DvaR zxLTBXVIstH&{twtV-H^c1)S=Gedg>~18cuJo;Y)E&fT_v;p9z= zlxoZ{!KnPh`uFeu6~8K4((tNP=OI)1h8I-O zxK`toeqE3$EIB28^ZBA@5Nf|qB1A&!Xk;{nB1zPE?W`Ct2jGAR6JPHi*6^hjWgdMK zlVg{M11L3q{q+bAo2ydQ8vYLH>E{;)-4Fz(6?O$1!St`!ih2#hBXNSX#*@n@Vd}q0 zX~t44D|%|ro0*fpUKbSDJ628U+Km2RYl_f>0fo<>9|a7IfY1mhOorUY&s?>{7*yc+_x6q7b~TudV&ZjvYJVRPk_#NHz_cbv?C%^FJQ2 zZ^qNyI%|TwjB)xb2fd*2-=|NS?t&~7@XBv_Ya>=9EbJeOeDdva$=^V#(r({=7``_Z zWfvkAQ&+-ca~>o9v4`sa@(cXFIejr>x@HT}g>oR}7=~LS$fT$}#_E?yI{Tk?pqpA> z-_eF+#_9ZCN%Ko)mxLSzg#;5!DG9yYT@WAG*h1lQHo1;GcEOIVTes5oskb{Z0Q_E2 zRn^Mdl15#TDuhrIW0Ln*#Myi8znoLxzPOq9(P>JlrN46Pvy*=x@~b-h$fwia0BqBR zis6x4^G#9%$24D7m?_wA5AS#X1Dh@;=E<2kJIP0qP^kVi2S)NS-)6<0CqE#;baZno zZ#sVRDME1dAT@9HxygTkV?4Z$nEo-u&Aj|&#CI=1r@Zm)Z$N^ko?kpooqv6P;TIDV z6IB5ZA6gQVa*WnMWRgNY7yeG&Og(6O)a9Rl{?TpU{&|FR=gys}n4b14%*p9AdUX71 zhqY_hZcI|Ix3O)%rEknQ=UM#<-@FMzObLb@ou7T>GPLRu(ObQLH6G~$ThE9kSzvaE zsZy!8WGXm^$RWWS2ypA>%@Z4Ts}r`F_KA#Ehtx05|EVv-LMAt82{t&py23@av+;o@ z&6}TO9FOkc!S=BB(ap59J9oY{GwKcKid4T(o5Dx8M~|Y2C`<&?;Qu_IHEh|kWeBnC z=+txe*Jo4X#G7xUao?0luC&jTj^H6GrQmMYtg7waatBG zTnN9QC%A9e2A&z$IYzMmn(5aw-Hb3{!I}aFJ2dYqKt#7xF43B-%{9uXZEq@i3TW~9%-%J-QCF;gvVb}=nSP=?p82r)U3mnsL#rx?Z4S1dI+NX~NeM{z)R!&bZ1(%d84Gav_Gcd4a9Rv+g z<^cqK0G?m~&G}c64F)5=Y@GZOlbPy3AQr9>xd zYRuoxfo~S#^EZpmR3UE|z(Iosk*oFDD$p5(pm9P5xVi0P&G@6RyMw8)VJ@aj$r%2c zJE0aoeMo@b1DKUy#$ra*qj)9}Z4h|F z;?1L7*ASBZN_pX5fBp63g7b0Y3SBih#Mc-?AmeR$MQ2~qRj$yJ%UmY^^L|S9 zv`~x$@-m!k;YjwLJUQp*&g!#t-apSC5X7*2E#cFr=s^R;3U$}U&+1u61@;xzHWo2dc>({@Szpi@d#cSCR z3l#6F0FH^K{(9|SS`Rd%r@SKhWpHqCQgU+gfZ0(tWQ(p!vWWiDP1mcZn!2r-R=;D% zjw8@sRz53c%&E+oL$y9-W}ttOCdnx^9uWN&j*bBxI(AIGw9-|?Qb`UaL-T7SVTw9I zGEI}1iIc6v6v3ayaqq7NHsoDbky}ggWsgxo@`ly@1yKU>x(W_y@u9_0HcOfRL*;CG zYhRCjOLM<0A~^S5^LYijc!1-yhF<19ID%lgZ*V4J>gCDLE?v83EVPVc?_lUepRXw@ zX>hACGZv3g?_u74q$6-n#3yxNVk0#0*gTiWY?yh)V+xNGCL#rwLfEvLszG{sN)X2* z98vYpr7^555GjBS2r3(HzuyCHQ51rY@88QN1S7oux(N}RS=j;6Q4K!P)!4 z^+-GoN)4BsXH-9jLaFNAr_Y{UyJobx1-U+d{`@QEfBN`nuvdi)#Lj)QPLVIP>$GXv z!)LXFv8%=|syTl`nupby&G-6-jgq92s1c4{D@5 z7ow)nYcOv-7yig)*)mCbLa*iIoFF{jx^pMO%)qNgv`R=BlK&Lo)?N;l04PFO{$tq_ zJMr}d@`M6UoS5uCl=j0_H1F5na~y>81g77^KlkO!9;{Y$e_nht}3vU@GK5(+qRXCEW-NKpm$!oNKGe!J)Ai+HuGzz zAXZH02;KgV%7qM>2t!d=k@@c3Lb}=4qOJi&g{U+(S3eKay8mYL7K^bB{N88wHo~xaCaExDGZ2Lp z`g^Apt;u=y$}(sf#X|rOe$e|0(RW0v2x^FuN=E4R@OR^t0+YyqMQdw}I7Jb64iBCZ z)G#V43i`r_gm?^W5e!KQ*QxjL;KpU|pAGZCxLLnJgIm_ew3GmoRN`_89R(=W)rZq$ zaOh$5rUlPtx9C%9U_j#3{KSjG`EgvV8EKtfm!R#O>2!P;gD`}FNNEu9_ftF+Url`3Z zPMMAWK6qSl&4!WF+eM$J99O1bF)2molLFoABUq@_#nlz3sehKDbD_A4S&Y-A{*NzT zE`x*B(b1V#{kei$#O#iiEqEBTi;ctF=rd<5UMKCb3N+QskAx`x_~FCEK+`?-SKA?g zvA~*fW9q%H2Uog>%^47&Eu20E5Yw?^yS`7G)~?yk3PP2kAXe<6FE^H@ zJa}M*h(K8{9Jctpaa@+L=}a#c=e~p(Aja(P;Q32lUS87e+m?LjEvpwxH-GBcs2p)mbz?zo&E9kt$r@AOq3}&ruwqx^`_?2DVu_6po3DP2pD>cc1N$2 zTvR2=wjfl9v8B*@wR5@Z)RLsD0-uv0un<{XSE`8^-O>};Ro%W`e?&(8lQtqZW+k75 z+ruiN_NLTZTJdp~2OY2%SCxh^t--O|bQq=!x&0tnCcSAbkCuQPuG3Iq+GFwJ#Vg(2 zL+Q2Bxwv@v@ay&t@N1yS`%uSg%IySuR93#l3EE?Crr0RBTek6KhBgWP53?7D2gp># z5eR+!__6!sIh?LX=9?iILMR60cSMX~fToFXnGhioc4%4C%v-?6YbUO*x2*Cbm~y20 zDFOM%XHpTQfG7Dn96qX+6Q9|fM>a6@Eva%~r%or#7Mxp{o>KWq_(z{wx-u~98;K!` zXr=4p8j$8Of=rjbeJust2q*q65F|DGKTM_9mN|gax7k;(63@FSB`nZ%FsATQZS)PH zec*{avva9mLpjgFHKIWklo*kqoj{r zB{T{lw;;j*8Tuiu3-djOh{=*GU`(F=CTNkzcW%h}GiM$!Gw6sY%p}r{Q`>1~Fph+& zD!+kqTC^eL9=Sdb^$Fr+4oq1d(9iAioEU4YUjQXDsu4J$2tHPGXLChdH^fo6WFavL zyo58m?-))l{jGf9U;WSBnd+5ZvE)C}$(BjpB14xpx*!{OQkYG9sun3O&6W~7mp#8; zSJV}@d}4V%fMpqlU+kRO9Ub=9vd2Cvkt%b9* zv)lG6OcWHbzjJxT(fZwFi?nb+dYF9L@h@#Ly-N3GoygQWRHvg>eXlwWytVafC1M_p<5h`F~wVLTG(DjUx}#%1Ky zWj(fj`m{ngcRs0G8???;av1uwYB5yDO9{o=vMNW99=&>C1}dx=e~?V~V2x!ykSP(7 zWRhB^dA^-&Md@X=md%A6dYE%5ZH@DgM=K$^&vI7#Eq?lWZJB(5>Dw?EPi`0g&p#R= zK1bQ{A_7r|ECpiAiPr5Kb1_A}agnRrnlXkivMZ^?t;@aL_F_5m!Uc>Pw~hw5`iObf0#`ZhGQ)m{sjtL8 zJo(D%80~o|gCZ@7^m>_Le~r(IVCAJM_Z-u*e(a;jq9zFLl;oy=!N=gTO8N@&2naA7 z|Iwp7gEddTF3QjU`nQau7z*h#eE4vm{rh#5Z|@)9OjCmqd~ZuXS_O=nKQeVITvdGp z`7LZ`7poA*ZuXzDr`4%l+mbj!dDG0WkK)MOfd&S_@C0&F*T$n(FZ=Lfoj+H?z*-k z?UT$b7u3@AV@o3_u<#2&WSF=h*w9Zw+Kal)oSP9HUh6Af2ztD*>(!^nzHJ)CmcK;@ z2MD|cov5U#c^)~C!e{{c_K{<7E%zm|l)RaUbnq79&CvP91|+O=#1_uL+qG$TD2>OC z-B!C!9SSQ&DxFA1w+Bp2U`j2?dUfloOf_dCp4`rrbAsMlJ6Y#+sKZ4QSU z9uN>vueKhSjxCdH(io2%Uoo6wsbE z)dCB@t4L;wzhT4AS*~)KI3p>>cwUIU zeEBi}ISeomOm`LE@&LVH?K!Clj~+cTygM@rBfF_6w=5b1ngg@#`T#Q+(s&RF#i@-9_^)Z^=`vBivM~UQYMTTs*B3yJ(9yXweHoc zS8vdF9wLvPTV62P9;@56m7r+E61ksSwAs#`PkswJiZwU>hYZ*CCoqHoeWo;M2^AL% z%_R)t$U?|6V#^*5?@~aDXz=95@}&`%FMnw_wQmE(wRX1r@wp#nT=S|`Ug95N|w@ z)ebDCK+-jzKa9T2NxO*JwD_<;3KG@b6#t^^tJ#$Tsjr-DD4Bx;%J_VT%QTfzusnQy zYaj2R+s*d$A7~3^Fz$u()LH!NlEl|*-1Z?gbNzFo5ol-WtstW6UIH=cQeMwKV@prE z+4ALO_O5=14hh2!8TD4~#sbg%VxkVhio$){<7P&+X!DJaGOpNl@SvQzH?Xa$0C0!I z{gA1bFNzxV+6wa>i9$SfKpgq@6bWW1e-C9bF#e&g?!G+O<279-=LtRPXaDfxvIVHs zr%WD=8$W(8Wj+A{>1eJ4)f)InW>YOKduNN!!A2Fp4#g8xe5K3?3n;^G zkD!Cv#G@ORK+@&YW$GOp>%Qw!caD#uaUmy_ieXpjO3wPjD2LHP8pA}G5e+CTTqZ1l zm={y8Du9ER$2RK=%O*um4QCz2nkQW5F?<1^qmb+u zuvV%7Iv^~it!-EMAy71j+YZfJrHwtf8;P8@j4KPANJQsgVbFMVp#Z_TjnB>jmdTos zscqxMvqp!zF=uA=k$w{m5iiUp3)^t>)$7-Z4<1< zvMOGuxBnZ7M|MO@mL>)@0WD64=*8JiUyM-^)e%6N8XiTWib!`9@y!P-uj0bu+`InP zEdL&8%f*>^m?=5-?c90Sej2=yTbttI;@yW1*{^kXoHa}Env|#NE)-FvrB2QFBg8|r z>BICxi&ZBxnmClllk8_;A(@ykAO4c3EHpcolb6H88^5&2lTW>pN`*XadZHL^Od~nh z^N^yR#JWW`YCZ71Z{1(6LUklsr!bPR9(<-PMVJ%~qeg9_S1XA7?f2g6qDVR{CLD@? zabkcQefI2G_P-n>7cM@#{BjwYYBkT9T*1`BPpVIE8XzsZFMb8tzeNS^zP@{H3 z!bC~MA1o%ErcIj`9xNwN;yqjWqxKJqBTJ^Pe67hb4!xKEI3pvJ4m)WHELHyl%MN?e zl?2Ey+?pIcokk{nP;X!`cWKKbsH}M$TYMoTq|Cat7UP zdSc$e0~S*k^*vaadire1s(!bEZ?{e|edw2*Zt0ig@>93mTJ?Ud_d`pK$5)4bj=9=; z)q_3x}4RyL@ zc_AXz7VN# zKN!Z^6cr7&tiEU;N%B8byRvk zRoq^vLFdsRi@U$!?&G$i5m19UuH>N<*#3ve5Mu{7KmAl3ip-(+R0WVm^5{-6i<%kN zRft8e+f6k!1HXwuvV)_YvO;KxKN*wF=VUU_gjpkoYt7m6*IyIO%)BX4j{?W?sT~rd zL(Pt^1Ad0X0Ech{AfuCW0@Q19t@i&ci|XlZfnyA&!XUo98RC!QJ}kbaMcLDYRH5U; zVo^T$4xuv$7%qKpCCmxKz1YQ%KGEGPC-$f;!$G6Y}J)TKY|%4d&5zok3S!T9#cxzL6; zypc2rI1!x>ueko!hOdw?li@_D$FU;f7o?vVO}BXb;E}Jytw;~3jnA*A?yRj-PZTym z>=}%BDPHr<=rGeOs93Y*ABnd+DL4+>Ygk=;I zZy+OcUK(Yv7!L@7ujq>fH3i0)&VfM{pv2C7{KX^J*=4}dfSc$Jbh_MUfI_^xM6infT(5gye<&eYa@n9qTZ1OVElBFNk}mX!sb^lU}Z4oT)d!7x@6G&fX1B zK;B64$rYb?JH}5gkk{$i-$px4>pfgLdo0e~Z z%eFpvvK0m`zql$AF2q*lZmh3)+R%LRe=F`FZJE5XeM*7y6L9&!RyCqC>a^^5PJ& zo05>ubH(wimP~DsS@FVGL0_7)uzL||Qlk`FXptOLnqOQ8^@nGZ7n@@lWHt=3 z3GUmH4V8M8P{YN~12P+eue=uiQPWy+dJ~}O zfjVQ|HZ|{zFvH}Df4M5WkIQ$5ig~oIZUgxu4=p7ZBdP^efx32=s3uW;t3~@z zJ94b#UpmSIIZHYjH&Ma}){_|fBAp5F^K%xPC=%+3Z=OOynx4djMTk)?%FWLYrFCI0 zLbJZ@X~+I2fAGre*tP3efVa$X6vD_cL!-;2vGRCDLCDKHu)z9SCe6~&RWFuQ4c}Nl z*ko4+WVcJXMZDFm*=J_i+bC9!x3u&Tx}I7QTiG=D31u{N(`#ualqP4KR=09|6yENB zMsDM!kA)VP@KdKwIc9wN^eKo4#K#Gcb9!p}-wVh8tcb~mQ_^hGGR-G{U)E-RY^md26TiN4XT z=4?H!(ECH{DHgnbUu`f_dadMV;B3wqWXExc4jpCWrH@+}`!k=Yun||bbUxse>gVt3 z;v(2e%sDK;c+SR|p6wNbY{>Iu9C-VN-Rv(~ViDfLaJ zP|!S@#pg|9;gzIakK_@=8Cm(!?KL+ig|gTO%2hVS@6p$Z@^azrP|-X{^DHB4U< zP7D>NKT*1~Z}RLJrixRO(2ymniBP#~*RGsmuG|r^0yiIiJcl*bwaEcHcgkb$%ZJ24 zI*b^QlV-ZFdpOxP9~;IwzKBB6yOo5^mH+otMl)0a;wQ;&UKu`?dvfplt6#=7KFxQT z%S14FZWVrf8$QrU@djZfINP(0!dNA{Nop_*RYQL|;6Lw*E7b;zHaoR4oU&Pv#*49%7c=!<4Db>jXyUiDI_dHPMireI9U;vCny z^)>E^+Y;6&DLy{_I#rQUGKiGF(u=fwP=DFM7}9p zotX<$!ICauVVa$+ZrG3}J|M`YVJK7EoAvuXKrh0gK))>eYI!B$FbsjPMFZKsBpJWV$ z?|{-uBtOuqB~?SW@%0oYV}N>Mmiq3AfJYdth+D&01LH@cdSPk|#~{7`E~sOo z4E}%`nm2=yMjfS-Lc$>cs=t4$zH_~0xB`mL1N3sX$wxPoS6l#WQ_w*2X{ zKiNy-4(6cv1!Tmvr&%-tSM}BeLf+V2+F?q^nkzN2-m({ElF~Cq$ z-!g$m*p4Fiaq!KKgBV$OU*+x&^J5c#S>EGH32P?m#m}%-k+hSQDScf>)6QGyNNIKVf3OY z=oZsdP7^dR@t!Yb(9cBg9~CaU#2-#q47^}pe7pRq+HLD)j};v)iPhKn^E~ zQF-2a+cc~gRCNoTdn*zKVShD)r(KFiL8+p_p0F2!!+r{Exwnjmc)%v3`>0!9*_Nez zL@QXxZj_b>a>3E0i9I{o4ehQ3(%l2Ly~c3CiyDt&M8-pYYd zXz7&Pt)*3zc=Lb0+~Zj48NBPqu!ShiVKYkcg7sdj+t>gZ0>tIiEK3(XN{S7%^A!13 zRU(86`+!!j(gqBgV%VUMHZ|-QKHf7_9V*-yB%+Q=eTx&@kgkqXu}$WCEx9Vqu+%7T zA$wH;GO6_W-IZ^Kn18j}oLnmOgi=aj{_2P(BLNpu2!Wy1@gaa0n@`TrpLlFT z9NR##WI$yu8EBTp{{Wim2{aW+L4-qJY0evnIiu6H-1cB@VM#zwo%70)FS4+&NA*`E zjF5;)UY6vq3ZQW5qN{5{$#)(G(a{c1h_J=dTX@mqOdW&~~{#wgTGZ$2%6khg2rBhy4gFo}_)-K|mKf??By_Y%3ufCek+k zalcW~b1LyY+5n32AYThA5cxn37}`RBng{F3ow{VVZ@*4*nh(V$)Vy{cvV&w}iW1pv zEnN{{kjtK2dVZYp4j#j3z-onYTMKlpMGPeBy2bt3wOYK{I7novt`%N_kK1ab-yeGN z+f`~SRq_>k8}vRi5SXYIU(BZn6>j#di^rl&@)v*+!w@usr))mHIzHuJOPd zJ^KPd=&3VR)z{EHf=d3Dm~w7l%ia&*3C^wfFjJ|b1eCx_G`|1BQsEV5d1-~SKnd!Q zUD_;~6~qChw^vDpG~ZB;n5sceZ?tgX=$ku*QwU4xk3__#)W{PzR*$f6(`aAEM!;tlT$O+LsRQ0aRZw3^(gNOJ0f{qLvDsSqBWweCT%Xn6lDq+@sD9YDpp*R;>Ibk zg5p3-yv;h*_Hu8z=_qa&S9b&=4N_q}J9q%O``kq2Ju#eZ{jJuu%PdeSZGQVRsF+`G zYRpUVaV(fd9HA{Uyrq|09)iNVT}ep3$c##f!*D=zSj105r9h%=(N;3yS{j4o6DIsjRfbi_>X3o!D3$a#GSTS?K}Vrj z*<9Au1yOg%j_*9ewK^TJLSO;EPwLcBVtAB^9v3DK-HD*TRt=FV(a9d3nKOZPQn+m4 zqy-_Vd=21?hnsk1b%pGX^VUNn*)-MFRg0A=d_u{vTP@^o$Q--7jNLBbk`EzIPcix^ zb2-f(;@g04g?8rm_eqx_d)c_r2Rs39pjUq{(Y$f>8|l zE%ZIz1g@gNj5a{cpO-)Y%qP`1>s;F_Lk8DTcq`4xcQMboh~uQVvLXAmQMAUe^rZgn*$fyWc-+UW|!N`>S#}L&wu4^m2{1^sci>+nA`4|qH zBH?%PpCUMMTXjrw&xRfQjQAdK4Io~m@%0O1kHK*5*4XgnEdFY$({=o&Fc%D{f(WsB z%DKkqN}8OmeiFM8cVQ>qkSCGQL`txXnmH0}V1?Jl4I6@>dSq)tD~Zf(K!X}l&ilqc zKpxs#M8jo>6=PdEfM(TyNi51#E#BHzkyGtyv}b)4ul@kv3FxE?OxP(@o#Ki%Jnv4& zs92W#<56}uK57t+z#3g?a9ADP116YjK?NJB@o>L|2qspi;q{s9?vf2SW}mU9P1Lsbpw`WO1p);u5`6C8nQtRHDh>LWG3$ z7tjECsxYvj_7Nur-anD5p~&DD-z#u}NDBe!%JRh6mJ&Fi#tFqI_jI*2XZjzc(OxKk>C3b^vCV$_ts~DE8Z#6+g3aFbr8F=e@M4{`1{WTe^dEZT z`)-`dJk?FM-hU%T(TaCO>qeayGz&@tk~Y@;b*)HV^8mYt)Pc8>8oJx~I?sj)N6f=F zQPBr`>zAJuUX}!`2_UY$YUA_Q-Sqwl2K@8#kh89EBDDJ9@tmI$@RxR;*tPHj#QnwR z`uHQ&hgZo3|Nn|AuR(Ixr_2P>PK=8i32FbZ&)q?iDff|reRuNE7RZKh+I4n`MM`-S zMZ$O(yguz*+3`z$NQ+Xq)aC&y^7fQ`S*Pj8H|;Hqg7KNG8b$U6SujE zCwcY*IDu3Ia~NAARQh|X?I63=TVZQJQFCSXXoZ%MGp}yaLw3dwc0jrm8C;O#pf2DY z8>zoT(%k?kL;?<4tZ5^0P1dIYd;&d zx5O}-eBP{6Eyb&51ik_Cb*|~VML00iz$(UN*478?%sTSL6usO1f4|SZ47yu|*rpxh zD~YPEqAgEd=f#raLBd~Onunl|2B=-7UN^TjH3N-|Lg9#;aZlN+AC>WgFYKs0eUH&q zPtbuhy{X@JJrwAHosrxD#JY3lq`uO+-22J@oVl%nS~zzWAYAQWE@Tr+a$GPkwf+Xw zxxQtPcicSiTil^mM-KeZkZ-o8W(5;eaA^4bsPJw@lT;F$uqlrZuWb2F+C<`%{QgGD zfr|a=<9B=N+SOGlaZv+0l$ZxW7lOt)ql!NP(ogt?#372_z7PQeu;9Qn*2=UFgeK z^MsX{(E^yy6+Nd7$DjBFJNb$p8VT`?1W1ftNh5o5kx^);7>0^G7_?^W*s**4s9YV# zvQUoa8M(n*m{Lqlf`eNI&UMU~6MCUQG9&!4#VN;jC#Mv!GRf7f=BvXpna2S)x9A$;pEt%vXFq&jnf*VbSWU3V& zK8*Z$4ch)4m>GB9g;^d$%v@rRDggFHCXRe8Ev0gO|7-;^uj83?{zsTe){v0=^YRQ# z_04pKIk}eW|H8kA$?%T0KiQ3_qwsc?<_ikP*~`8QNXFeq{2Q5?_f(fykCgDv0s0@T}3OCsUY+Y-Coxa ztY8UT)=^@GMnbS)W{mXsbUFTw{!zFL{FiMtc#Pn11e3gP26$6NO}0^Tk@_bgozDqO z?1uL!bM+lsbi`pMZf?f&6j$&)rxH=qcsgN!ezlMB-Rl0U(!MeiHV?BlV+(bazYkt@ zmPv<{zgoo-AjNhK%IN|B1dkkoAR^5_GC-7qFR5q4P>0zUVRYsB;rkH|uYU9HVYTbl z526Wz;}^)Z@JQypj5SE(Lr9Sn#NsFU54{Q@EK)iK3Oj6_W7DE!=mu6?>fHk|_I7r1 zgylI&%L)wQ>4NNql(4JdJ4>0>8S=+pO{D`5>aDH&yIxY0!&SMT@as%lpK#(*A&G!U zh=!L|n>TraC-QAJ&3FTIbRP$vj53p^CQ`OYrxZzIuohhj8~y>+;K8&uiu5MkbHe3d zRG3OWNBl6zV^pBRar3ld>=70skt!>!QcN zLRm5tR9Z7+a9<_gFKox0U}D4q{OD2|)YsGx5CtFqne@vDQyBWPmSTEmv;^XxwQQd& z7y0H=TyWl!GBT0#E+z%((i$v>jw4$}Zn7fIYd$!CWLeG zsKAWkzr@ePwU^e|qLY1!H*DI(A59c*Gx~NgXJNlXZLWp5JGg*G=u#9wnNz1v3pBM{ zBp^Q+`!Kk4%BJ2h`=cQX=Ob-YoXO`e3uDeGHoA<&HBwYQCAn$3L1S0)l#pp)6lTK@ zz(p5ApBJNW=080=;=3m*!I^C5pn)di?P$_lrpGPQepOq5ns!Sybays^CcaM}(v8_? z4h+rL`w~op9tGp#sL z&Tsk!p;Pz5$wJm`Ro*(6>T=1OZ2f)!@=*l3)=3Z(3tmD|>$R0BZhD387|uE}-F+^0 zuaMjp$aN!PCLA6#Pwr7`^tz1EaWZfo7r>pP4kzOwzuw)M)nZc=-I&L1qGkrq4#79| zfvWq&icFqtr`T!S(WY8dBMojVWGqDN8Ad?m_p!?uGS z|G9npxU&7U4VpM-Lz3#mPLSGa(X?RmK*tOtmjeHTuOezvFoeyLYc-PdAF)ZerzP`L zzAqL=gQbn0t$ANv>zNa0?9!ZtH?p&{FEBc8OO@TF75Cpfy<9ce<1_BClPia?BSxq` zK$pzQIlp#nf6waAeIw^!I@$m3O7QkQ1PR`HRs7-BG2>ldO*JL~}4*66P5DsKDOj$=e)=blHO!?4t~1gPmnGoz_LyzPv(MdZ=``jf); z&+jxk_bB~Mt;j}wa_+u@6xL56}%t1{ql>hCueciWte=j)k@}D$S5DF zuSM8KQSOI`FS52u1m(dT>L*OYzj#-Z7sL{Ii&Ra1Fk^-=2TYVpxvy?^>^J*)aNB$$ zlsL-O)T1;$3AqDk=qBPmeDxZ9q#3Pv?fMO_O=#e0Db<5Q=z7$4^ogzM=VkRgqBP!q_xMI_b4HVPVpm44Fn-c zP7_l@Z^u88qz7Q_!u7SF5|uiYwyHB(6c0!%Ck5sE8M^-g3xWnC=<#+g03Z-OlLKIn zrVikZrT0+5DAfr7;HaM66wih=XZ}oKR;-L7%F$m)pSMg>h6mPNytqiCd|ZS*ZEc8X z55FSiIk(CcnLD|_cDQiUG8?4L;8hP>pQ+5QsqRd+R+=M|$lx28$9b?l0!xWfC2U=$ znNAk%gxGEc&4aq#7jNcC1~MJ$mOWyUo$mku5TRlBj27ZO68{wh^Bcet-l}^5xROZE zFJqX^1L=3qv!-eq>(Vhf)%wi*utA+VjRM!CNkNx&!FdE5QG6JU+71eWmjiv<%8W}M zkB(C27?Y|avGy8N_EuM6dt=vpxk!T4C-vh~9>p?=mr}TE^q>6&+#Yyb+&Yvru(IJY z$A$}aM~SbI%`e_Zs6O9nsUE3}ZtI zV8gMj!n`;4%zmlLu9-w06_yp}v&?oAThMAhnNkI%tP1d~t{BbHSh~XL0pT(bHGyU? zQE#Y5m2?v7V|*Xppz(NbE%Sq5QUqUJ{_5m0$|;v zEj5iWHur3@;r4kl$ zhHaQ0P4Dg~VrfChSbuCzhpW#sC>vOGN#^)*g@^4gT7R6MBAh5du?4 z{P@X#ufbUo94-Ed$T)acQzP?`H=;7WOF0?F{^ zRPo9_Z0U}7OUwJoV3{T@TI`=$1x8JC@|Vl}QeQ^eL|u>g!i%TemR|JO{9i_AdKCEY zEs5CF4^w4qnO3uBTp|XYP)+4v(m*D>;Qd`7~Qy0qv%%q+G2SbaiDjH(8%Q7 zJ2Fm`8O>k^E$_0Gj`7^!wgWTYn_o#XHM&$J-ei;BGFN1N?6wkv2N{u<-JgzPYi*|rZAnE~es=p`e?|Gud0ANK zbcx>eQGCJe_}n|We+E82U0C$)o!<+;?b{>*ul$U)p(Xvo>6Sp>t1;Wnj#U7V%&WCqeEj03`)w>7>Ar+@JX3 zzC9iw)M?~KwPjkruKmTc>G()0=k18e96VG;BI@n1qRtq)3C2FXjDXdjRPOuYZn{7o zd`rFsV(pvX?8b$`{g<5d;0c%?dLUz$&xEr_J_rcT|D@VRf;suPzrKDFsB3%sM1kne z^q*^F%zh1&xlmqH&PBt^>I7+XcioDZ})l zi*RcRrQtN;U|%IbXxXRLqOg z?ngb{L8AgA46sajdCRAG2!K3n z)ufL$X$S~4iDb@~tB3x~TeSjQr za+=`VKl$hvY0jX#Xkto%!7m2Mm>m1E(y&`ki_fu;8OeuexREi4GV3a~VyY5;A!T*z zcI}FmOnW*}nfsBuo+i?Vq>NfbH6!DF1M)wSxYJ1QdtRhvarF+Ij$j3q_&1Ud4>pP` z0~9QG-2?G(f)9+>)gZkLHc}efrf5{kpyyHMUk53!7oYg$-tK0F=A|YbDe9j3ge*!-}e{~EH?#zPo(CN~xgZ@)V`>VyJ;Gum$ zKKBcA3e_2`@e=Jku7c^awH9I~cG!Wdub|97_PFNbCtCeZxRzyP495)i+Z`t(N}tr1 z7ojj=N_Ny8x0ysVAL1vc3^ULJzk8FJd?vsqrwo924(ICTyX@gHv5o&O+FR1kCjbI` za9p%<@uLa({d|1h24vjMDJt@RQ7_Yto&;M-`{(P>XUUk0xWq75GCsiRPNow+Y`G87 zvOE~CT~@un34*y}3=i}$Kq8G8$LxfQomH><3#B`wMb7e+Ru9da{`#x;o?Z9}q^C@} z$%)GS{fn7gK=ca)>SvdK*lje4*$mP^3WuCo{$a);#N3SSb&XtZ+#+}<`?hy^k=$J+ zgGw~6GG=S(-TjXcx4qwuYv9KHLyuRlW?#}){M6H=o@3m(7&Kb37Uaa>$XHqY0UE)g ztGAz(!)wOl^lBH&gQr@VaPBfm!rqr^s`v!1A!yC@FrtEbU8~?sguyykC7=vubP}22 zA&((!+y3^z95bG(#phKiLryNOT4oad%pQW#^1_lCu2rHs*LW+n3vy@0*|i;gGlaee zxsJxwoi){qJ~Y(xiv9C@IQJi49+KFy?_9HxxK4%9B#zj18&Kj+oNMhiB4-B(y2y~C z4dU=d?%2CF8-1$%`;7KlxTNaOS(c`4r6kn%h{~0L~RVd6ZPfYKF=` zwYcZ*--Hiv`ORgBI)^ISuba_A3OO!@4sy(37{TsJrW}_tw$nr7k4O_f-9k_+Ie3`_o&%YNh{pW^iqDHoY<^ z`))W{>0(9L?9?F?cK~Vr2vB?03`4DzU@uXh^lI8%FmaiDAV?YcV6WzjrP4mFd$M5$WJr{V!!Vq6(5uJ5m%h`wzyXTS4`7#>7E_dih#@_jbXz^(k%bVUbYbzKbY*bm1FJ!r z2Acu>z>0Z0Qqhl;Qm2S7_r(x60}%ni*c9(O-)(t&3dEJ=di?J!sP)C?voP;lW?17n zb*iE!x2-;4MG@Y)Ti?BVA&nE95( zNK0I=zyqO~5Zxx?mZQU1CL|G* zrXqGR2iyDQ^P(G+-kL32&U1pInFe*ys9*|Sj5E3;U>9(|yWdTkou~&sJowuzYQTBt zJ$NXN83I|+A0CRropt!lDlAaak3)^^viwfv&Q>@I3nu+=Dt)0`H#wq;b-(LWRp@`? zVZbCSdlf@{Ezi!KDAb5~QsmL4RqO8&vP@OtpW|DN0ZqTniY@oZTCuxjVfE0xDqI7Z zPVS&C+a{eyU#KeIc6{h!c)B}V29l8)3|#dK!_A~OEuZmaSl1)!H)?cecTp9y)O}a3 z_gxzMc-?W2eb834@azS|Ck+lADszU?YuC-|Mdu@NE4|PF4iIRTndW>RS2Z``JT3kV z)RMV#vp-$H7Md8b4wX#%a@mswEr&#Cb5ipU@A=!`y)dR~>aHq?;}q!4g2S8^tN7hKB8|Rhwv8 zfk(<1yl+a4C%sdnSj~R%)796r!XLZ7$$7o)?Sqp$fjfaTaL0AMet`*63~ujbANr+z zt_)eSuazZ}Ca9q%M128!=_timj85bKx*{c%130glUyOU)Z3!!>Wzz7PG2SrxkX6`~ z{%+3B;%2;^?EqtD8f`ym+nzZkuDc0O!knsU*m?L*NkMn_epn^>^T2UDaB>o`kLf67 zycdQF3%{Z>%u|!eUIeDQQ6tk+-nyPRzf(WG+@eqH&L`LjTyJ-VF+(|%*?rOhDP@wy z-8}~b92$wWcSehHjjTW6Q`woxi;x+cvk*^y!X>GPm;0+Y`f0_xn&HFv9m;nL7oD>>nYB zQD=L088T<{wl&%2l~>D+TE`?lE8Ox#O})?sm_nR;H^dAY_@uO!HPtbdy9N$|3s|f+ zNqd{frCoG3#n^M9Sl=$|vY!_eEV51?LN$e+*QmhvXCbnUxt^$8=SRf!_Kl1zs&Ob9a^rYN+4-H>yW9SxebhQnSLfG|rNb?LvI-s^wB@G+ z|KlbTch+sB|7Yt~Tg>YQC?{(7t-JQ#uyw;~>6jLE+wiN2e{S8zRXx{g@9vp6KB3&z z-tP9ncP@7WZclcP{F1S}n>CzYl>v#RmL4Est-tTAJ_x;>Yzg~B&TtosuF1L=DBCFKqO}f4O zb4ke<$GxjvSA3j$7pvyuz&rr!I5={VNUHK73J0K&$1qEv|_Y|{Ew0Ei$Thph% zT$?U4h60zYfGkVy{?z%Im5FEd$u^}ylZ^j zzk6-95;!rsGCQ%dr`jQEvuZ_KYxdU+wK$!4pdiR|*7>Fk5nsA=sch`wm#1ZV#_qTn1cp95t}Zk3Bt% z%9s2cthJpwM!cod`mbC%_^KxwfVXS2-{q|Abbl+wxeTcVNa?FGD!KY+_J70WW!krKHSD8OlS=kJ$?x$2(jLc@0?Y^=)VifXc zosSC&=FH1c4n5|bj-_R?M`DyQct{iO z_FD9KF%)KY&wihCX!~x4x8Ax#PbT~$bJH5kG)H2>PMP{eM%k%T)!vzOzp4hNKd2~d z4vrQWktI{dsC8N=i7KD3Y(J&pYE~cWFPfIHy=jofkA$fsPBIm@q9`Mb+SCSgnJn#} zSf|hFlv6yj%AK29b2eQ2`Jo#&;E&svjs4x$#l_{gU3t#^5yr+Zq7EL*yi2ZSoN2g> zY)gMZxXI@=^t}&gJ{scv5n21B40k|4I>x&<2wBD&0lVm&qGKIgFGTvifB&BIZ&Os~ z)r0S^2*`VHWenJ{YxU2;r5WWWBQt~ga>z%K;rmd-Y*~5{OU;I@pJa#{aj!!}kdM!} z<5wH`7PFnq`re9-8#eTwcLkw{qsicnIatp6buEgxGdJOU(JLw}oq1R0m#?c{z508K z^hZqZ=%RoS8)x`4;c~@`!K<{k2u}IAm;_H9lynD%Wa8=N_C$k%+C|+Dz@q zAa7Ta=I9aWRdS^dP*}G^y(Vnsgp*(R##Dk9P#lYQmr(+U125h7NkHfM!`51Znb~sg zgscC`>M& z3UX%qaEtzy>^E~WgL9&tJPnp+^yiqw@9AdF=(CLX(}_*w))rj}Ndlrt;S1LyLVlf1r!;-aQoUn0Ve70) z?sW^_v%hZqnpIzNqKm4Jqm&4>u-p7QZQ83JsE4BT+n)su5?G_*+Ql}e>vGS47|i?b zl(a{?!>GuCCKQ%*Ftdchralf35)bG}Nv2IGX7P z;~6Iv`dNTz*hATc%6U)g-n41cW6Wn{uV0y`GwKV-PEqf`8E+H!m0t76D$VGMlrU#N zba1;}znmV|VoI+cvz4-k3Lr@aR9WhVg06)4E4;>&0Az3^Zx>?AaD0hG`~$*w?vZM~3Ov84s?B zn=%PobZwJKnh5l=&#GVY*0VVH>v<^wOJ_}GjBB{6vQ9Xc%AZ^6xV~JZ3|~vohHxE5 zHF2+|Gf@nhs1~~c1_R+f#ew1&L{!G)T|eo$CIMP923;Qeu{(Ok<7z|s_e!Rp*9xgQ zeX~!Gd3W^p=U6@EBw9Ore%PA5eClgXs?>G-kR)LoBvO!p;7Y%8*5H~Wrd(SSja2p+ z6VGgtT^;w80$ot=qANQ`JIqhKRsA1t3b*bu>CCWat6Dj%nIFCb2EioC;*`wW2F%9v3}XXI^-U>?AH3;!+@9o)%}OhY+$`&AMnA zCQ>{cM?OC)8~MBK%rYI$y83X7QuFs=-LfVmy`U;2q z*s8*gheY=u3B8>64p@6R$nb7}U(2G~ZHUrQc5^HEe913&cPM*DQhb7v$7=;X&DA8N3pbCKLn!D4#V@efe=EVu>M5PCSeP2G#!Ofoj(a;D z;(H1DDCP!&5l8ATiJ+&VOVv*)NetXl=U-jz)OXxlF$E}m04y*{%pm8i%&y|FfxDc3 z9xdR`kTM1b0|l#S-fQN|lghnJ89Fov0>sxTAf)Vix6`vOs!)fQz!wX&LtEDSpwG5# zGR4-;r3~cjTu9uyZv|8LHJuwGUSf$J@#LwDl^4$y90l^1ap#$9OD65he$^%c7 z{M$C1I|!c66%V3qrW!O&K2Yz<-=Gkjzoe4-BL4~NSE)k)Y&ykxSct7-!@YVw1;O;B zVsY>J4VXmq$fn6WT=7E^0st(qU$?nu-o#(~L2JM01>*q^oX^%bDT?^=(*C!+=s_=} zaP5D=GR!z)P@b!M^;%TnUNYk7fuwBC*EJ&@SCQ@)_y1N>B15J_9n5j- zLz!q?h758=%e|Kj7wGm#$Xy^Nz>H(&)eF3}XPN38W|mx7@zC*={zX0bTX`$rC2p*p zbtXse;OvI~d$(ECQ@q z6SU@Rj_fQsl!JOzh{H8TyjL)|3j==*1#j}kGREs0;BB(g#cKfat;3CsS`OF0hsZ>Y z2r1IZ04MlMu+gPoU!sg?XRr}0Gu z1(nxZOSN>dJlitken;1&q66mpryuOU%JVK#KoiGw0?vwg@tUBN7i`kYzRoXNT}PpK zu%IY+1sik{-DiOrOa@qKcd0fHtm$T;JaM{oHc`F{by!Qrw_ zj}_c_u}8*hVxm#Q0P;N5noJCdEghHsh^WMf2x2ZZYG$pLXxMmgD54>^+U9qi@Tp zu1NGRW2G;sF6Sv8kAC#{ojR@Q!?-_du@)i~vFVu(@Uic6*2Xwz6gcO)?wXyn$J}>@ zqX$ERGVOZo04usO*Oeun`Iv54ICj9m{KBNeF=BsW<(>X2yL;LUWA6_A7aiF~+mm*l zZYmTyF;(xvUVc(wlJu9Gxn;g&j8htBwmkh&JekTxn^Z+6zJHUu%Kf#FvND1DO5(-GnSFn_ zFzDR-1&~cDSL~x@KSqK?L8T$X!*s3j<7-?R2*u1vEmegQ~! z-Yy26K*n(+aN8|830vV0uJpVNb3PfW!0W=Xj9a^2maB}q&#CuhbKpal!suMH=@*cB z$7Np`?qSxnNt3ygH-|Q9*3A0JZ@$4t%cW%Ky-H{`tv7H#L`!GW^CHq&P(5XG(CXHH zM}4ncY@`vqb~2uS53OCQoGFg?>+B;NcaF6|SSZSY=nNVGL0q-`|9>Dd%_j0pgnd{J zUR0K0U&Yb4$dN(AWBj4?mP*ht8{M z%U=_h1!|k1$KKD>IPFGyGmYa`HENapHo@=i@$V%x4If)AJvdInJ@WiDx7Kyhu_4qdo?G^i1 z{4r(eNRVOk#UeB#0R--^uQg()`tU_#)ixs@q>QvVvywC+TQ@7mqs6m7q-O46cyt2w(UOq3!XTy0pBqnH+$BV6+LEj zt`&I`-v0~>wT{K$Y{kwHqZB9EUqYWhe?=dKB1?@1^JhV1lG%(n+bCQ5B;$03l5tIU z*mi&5=i5#DwzMy#_(}Olxqa)5;g|UW+zw|E^+H!lESoc5IkyY0JgcIJKOiTGQ7$Os z)Jx+9>i!$f{0R&cpF*vHc&BoVJHGniDVwm^FdWY7-S?x(4tJDDi+c9aqh=F1F^zboHzE=kFY~6(u()+DlTWEj1NbzK=sr zd>=j!r!9uL{AnmMjmse6bl+VauczghrV}vYT$|kBK5`(&oXlCPrz_L{Oo6?+^kfd5#G<-=_PiDH0KZjE zCH`Jog`3|orNY0@w>^%b0@{pmz?F@D{UoO?9-AXec6bD1o%SVk%Q(>|#DEJbF#njx zeg$*koBFQo`yMi9oBJT<_e4jEhk58WB1l_b@;Kq#_e1qyv3nJIl z)A}{4U0Z8iX-6As@xTFVUACn+s8=uY%GNJ2s;fe;a<}`|7$TZqFtGl27Q-RP+*_Q! z2V!+L3OeMirIb)8&h72L>Wcu3#)e?%=vFjmT<+6y?1FP(I$~5d+G#IgKHYN~5c99t zw+E37?EA)l4+XP@-3!F%G4ekYqk>2r$1V&O7>%cu&Lmgm5V%x`H7XKX#`)9$;TJhk=?CB+6DR^U4qf zKnHe^R`=jXAWG0ttD6*@i z?~Gk2lVnhbQRN`R&Zki^2Fvgm5(e=n5-TtC&`4}7S%W?5RLkyF>4N8RxWzJze1Aer zdLG{oSBp(SEep z#24Y=ysTW34^*v-`sESb2T(~ES_d)b2m~ONEQr7&*f8>#kH0Ggrz}I!=*mze+xo`C zs)Cz!l2^!8l*6{W`_!ZOyqR%&b}i4n>TZ7?Y!*eTK+o_3`lGvfP!6~B%Nw#>^rXBqaB;m_ofndC%~ zbnwT<5K*5X&kAE!0Cl9S9TLHT%zt7(q{(oYXkmnATmCU=GS*l zRK9mB-Fx$YfXxL*Uk3{Ny1tAjd2R)o@D!)DYuAQ>m8?D$zLwfNRrG^408r!y{=dIhEcWsj?H#Fy!|l zTFR8?@6Z$$xKwZvmPA>Gr4r3wME#1n-Z=rk)b@!*B1kKN>Wav~es1W%C-d*w9BQX% zYlO>h{TJz zX_43V?UCOYth+O>(7Ol0Z|USi#=a%AI2NaMOMYj@R4`#xR_oekVxAqbJbC(c*q>Cy zztbos_r2*&6T15Q(oQ?-^GTAez$8=oyR8q6;RHq%rbXr+xsZo0+}}EkQedOm&F=(2`cHm*Vq80`ecl5s4o+uriWB9r1uSBpi}?&Ph=w-|98%@?!C2Kb>o|*g zsmGpQ7h8rgnHA_*SA_4&&|Y917X=47?%4ohQ-S3=c;2`H$*tOrJO@GY=}2Zi%GoI> zp%(s9O*3r4ejgL3E4U1&Qq%-R$6U#N$$`|lGmjClc9(ppWa2;I*!*r!F&)YIGbkM2 z{Se@|{5K-jn`cVnl9GC*d|v93yC7|KsCvx61*H!MUw*c_C??=tw2^5lpwtwq$z5-r z5x8yS#q(uep6SmA?AQCbPWS}h5}|9-+UPcVVjERaI_=(%+=||xetcf{&Of{Ix}hf% z+dNS?6AFBTsORk1aPw|+)pJMqk?olEk-INJ4TlU<gBqJ^)_g!}We3Usv ze;88a7lVQQ1X4z2aiF`4Q&&ffLE%$HoFVI5Af6u4c%VmBI(G9iM5zcQBvFdzk0_o( zCjC6k1`{Hp9S2R8WeD+)lDaFOh#o@htI0e9M~H;xi9^r`uA{hQ2|9)mj!am|pbn8i zEcx%zmCN)pfk7mOgtesPLuHbx$j2g{z>erJT5%ag6MI!zp8}vK5BA=^e&MguyEC4= z0-rwcBs<^)nNr6UuRTg|`DqD-PtncFW+Aq3GQs<0p_kmE)a377@OoEN9I}p`0 z5pxFaQ!?|%`U=@?`3_#>;@D`aEpT zR)a3&Tsg4Zv=b!lu3B1JBM$YFDF&h4Byfo*zQm9&nwqjmManeE*AR2E%mO2aD{0FX z=iFYkN8kUB%gXtP)Z~}j7WPC85Nmt!{nKEaZ8U>h?g=3~rePp>x}`HU)*oMhF~X}c zNF}hl-N>z_hy*H@zC>IM#KtO;nBq`-z(B>GM63~j?4*lBRFvplO%dPz`!Bwu zefOV}2fesf{%sl!_p^mORZAIp(N>y$t^S&IYqEBw9jW|eSR=imC73{}@Y*0dp`N1J zUgDtn=QscT-yCD9*FWnadX-6H4;#1r+y8NFajP~RcaX&|&OfGb?r18o5>lz0oZIdH z{`CKT73&z1YAYXT2~Eh`rcV_5Ep`67W4diw&V0P=ZzA*sXfW$-L(Y_ibNU!n4|;)! zBTY#$sEMl&Uu}oZW1q@0CU%Wy(#~mMJ;OM!9FhxmKR{B)FeumW#n8?56G9ipcJTS< zbH)xONsJeJXK^JpqX0NR=T^-h-@otKUq5;Rx38lq%7pz3t1F&=BoO%q0l4nnUwzOL zr;F`mc;8R_%6!Gep5ldum3=zC*$~-CVnSr(H zDHC4wXD33p*0Q!!Y4aaTB{ehxD8v_XvwbxvnMGB`L*!?6IDO^LAcoHY@iPKvugj%68!JDFm3$I`j~Iqth^AUf zd`cr(TE^$cc1w19nQ={o9WgcINpm)P`%SEtHq`iTZqL0+f88D98;A)CqH_~p8cs1{ zL?iAuv3)-r^ZT|P!Cpa7<=cI5_yTU^&C+bb9vgo$Ne%pm?TgDCbluYK0TX5n}P)P0+0&_QVM7};n0D*)_lzVg*G889F4uSdq$Xsq&Cz!C)C^DJ zWFvzorYnC&OfRmYFsaA$RVeOSt?_;t2Mj6RAbdqJg&|hxoAFY|{`*p$&81_LrV6)P z4I4&cem=YYd!B=4mUoVP3zK?&ZdI&namW>U!w0q)v?2@t-2TVg)l;0kD?I64`5lGo z#?O_;WCjENHBX*>f}-;@(p71%DpxH^XFXI*|nhdp|1HMNVvHFk zjHOH?J7K4&aPFfu03XV`w>5`YEf`{a4cR6mMurIama_4 z(UD>iYifqhrG)KH@g(*f>)3LPlDzH?{QdoyhbcnFSNvQ&#*U^aAp7YzaC=#pv2GD{ z_`knd`I7d0EX+JYx(-9;mBS@1@RZ;%=-r$(4Hajf{r5oyssBa#(O77NU{C~B8wR;O zW^B#=Q%DU+8@roI^*}_be0OJADO1cr2nfAfTK~rs> z_)gKCbTq};C5U&HwO)J)*daUFH>ap5939Hu{j~=z!8}e^oR55^E~ColKPq;#K`lLj z8pt;YKc16-hP;R!>5$1&c+!3vi_^{t0@qt#3I9P^W+MVe(`3vLMd~bHL6PR0aLEnZ*!I6xrc+-L8za8Y*`I-nhkFUv}0PEJc3r_3u{adO-bhk5(_F+sS^HkkTr7|P?fHTgOYQk+qqil>JZB?}fRK#|j=qo$}zx1CI4P|W+8ZuK~ zDmjzEHFLTM<%c@n^IPksSLe(n`{A0qZ>c6J#sEd+MWw?XN(@bLJCr~wlmIXGCv&0qu=Pc}y;DQLHV&Asese|aB2MN+ zGClWl5;p3%yLDTdaaEqzP}X`NBn_fn6N(syM526JghJ7&3>GMbA!$>>nE!I33glb7 z1y%7R<`#vR%%LvTG3tMmOx>$pjid>kd-$4l~kRzdK|W%wU#~l{;OK&5w-q` z#T+VoFqS!YiGKuCxfTr%XQ8)b2e{X3kK0mOe~xSb{Y058OHo^`Y$=N;y2dNi(m%7S z0j#^@tLSNW06e5L^TQ49!E!WuVB{|sioRq6t1H@2v}BL915%TzAiPeN^0M9bN}qt( zOywi+qxk$Q)cmxG&KkM#|GVdkS9@Q+LT*FwAL(m;ecf6?k#OWo2F9sfsJ@C} zKbg6Zn7PLWF7PLKY)4ZvTQZkrp@TZ+<)|6(b;FmLc5a$*TM}31OKViQ)&UiT&@ZD& zIF0&ZdJQo@%2(Xc&@IYgcu?$GQR*s$kVXjVd@)|B3``ESG12~(+K#QMWl}1H`H_C%RDlH9DPJ$roqp^sNw}sTTpz4*CRVW(h ztHqd$qv6(!V|OnW_dBb ztXbAzF`BF(1k+Ghhe)Y~_U`iPq$8BRW^x%PGXyAztFIub;w~(%3tSzwZafjO^)W7^ zAq&EySTU%D6-g@I^MA)>$7wMSq4D2cxk;O_JD#$Y;zBPKyjI`}(_}!15-8aZG>DaP z&mBk%IC>KLBwr1TbTjs>sY>$f7Jl_;zU-AfOuqnw$0bq8FWQnb5? zaQOUk*h`%KfQr>G#nvB&Q0i8IGTz%>bpo;Ykunb3(Uivwvbj%owQ;T#y+<)lA#0!j z0nl^o>WU<$^fZGMo!DYghWmV;%&8ORG)yrNc|@^KpF77vc6UT`u0+%?yn(Nn77A*? zY0{;udR{~oA+s2iHSJ9$StI)EN@M1@k?)xZi$isD7+%Z77gYH$tsDF75_2SEMz`cB z#jowXY|ZcWPllfOC*&OF+l0WsU7i++Gj>8V>(~y`paQC(8V%Mk74=X^_4*2DDdB8^ zzffjkk3rWH$YFBWFH=g)bBckjh7A`;yp5<)6B3Zzv}dU`U{F7fD&}`oZb#Lsxv8Gu zDag9`3C&jW&|FX;(DhhX{^(no<(00s-T=W1g!q?06l+beS6i7-@Sl^FFU}L-NvDC* z8JoKLX#-l=#S|Q0Wn1F)UthM1L9gjgAWw0sVVn;Mgd)oXW)>>=B7l#EDc*>12B#5j zF5tVz?_nq85Xb`EP7h?$p{(QqPngGVxpfe`Xz(#9Y&5!J?j^Mr-09NFzXu#FE4O!n zBWu`DU&$lj?aL$Z2F5zVM64y#LB2sqcVtm+WueM%D%1$D*c^9@``3njkc5lofR1Bj zv`Rz%kKmR1&38CSHWD8JFtUKkJs)yzxc1b03j0lPWG8u(LOy7iGQ=(qSTtaKKxaKU zn>@KCY~(TKCM-W@7LIuX zglngfFUyDjq$L7pk{e;v#8=QLz^}bG=kimfHRBFB_nG(h`@hQ#jPt;kGk3f3sW!}>P-VlZ<6d1U)POKtdZ<>|=dh#*M?%j0-@DH`PoyxuV^ zF57zy$%0f_{NU|PCHTv{o_L7x2|E$po@Wb025c?98c51ad}ZMUCs4!(@(1`HRO^Et zE=c{~%XH46VjRjXi_l0&Nttv%pjU3sjfh9kct31t-B|1~cbZJ@OR9B_L$%D<2~?^UB32t^=@)pv+j&>1cBt zI)YRS-AFh(!CO;Kx-NdLlnIcDb(+eyvMZ}kTmYQPj;3-C;XMNy@77+gjP?f~H#8d~}wD^JcWbt3E@WcJPN-+yUV4aJ@w^ado1Cmds^pT#!WKkl|& zJ(_4YnVZcMi&dqw2@@9@%mMB=7Xr*YLhK+@R>!vXRK)g|nLs@8Xh2Q^k9~Cib6~D1S64a8L+)PznJY<-x2qYIIRp8#3?mgg*__)1A8do^u9!Y1q2{ zgvS>yaOR%IiU+b4S&V1-K`*Sj#upXVfr-Od=XQ$G_Kb7OZb1sIRAgG7xie(>g==mwtwtZqwhO zA2c3FJDJEd^Ti)7kX|yC2Hd=NS7XJwLqas*7%~|H^Te-6KE&-&lpgX?JC59?#i!{1 zQve#)eTy%-7;tcSJ;Uk5#V(Oqt?`7jF>LZ=NA_*-f(HR!cmGYvvBz=&z$DXQ`Ow8w z{*QoQOvsWf;#bMaIXWgWp}}kLQc*q??nJsBB$Q+1%0s7#3j{*A(I)j2`V;9Z8R0tv z{wS-pWn;B?=y2>aFNSh!g$NYlj4R$LsD|7AIYcmtGqTV_$29v4Av=LiR=`5Sch1L; z>@qy0x+>tl7+a!CALA$sBgJc2J_nv*MXL)wdFh%iB-i`N&EG!_+Oq{<~cf5 z#%5#q`A(3%19!AgIBX_)*io;(z+bd&pO}j{FUx`@RCW@FK;Gj?v^?d?Pbce!j0cL5 zg8U$~Xspx-F5B(o))2N1;?4}Q z0_iGekqjnEV}$G;Amc%S9*dZF@kH@xdJIc9iIoKarGvbe!LG>hCyDYwPCS56i5?5< zBI0o|+0hi|vSkXjyL!y^%AQhjmjXEKXV_4YV?dE5N$CZrGyzb~oRu7r9bWxQI5{u(Y&URb<<ZysBud&4_V{4Qk}f7Wq1m zir0k{+qGGB?hr|2Rk*Ae!+4c5i_~UM+3r!S%cd^dRtDcNdG6dd)vmvT4@OMnThDj> zB3pFCBHB)rrLp62E0d)%vi=3rgf}boYB0*@2Mfs{z6|(L0C`=hU2RZ{JqAvvq}~k03Pc~k(C~8u29)C4injcLw|}dDbm&=+&srxm z5lJ!!l?hKVd=lM0ZL|JN!lG=g3Jg2~O$AdB|M-!IE+eF8F0Yp=H-rjN z77Ww(uFQPZ5$JCuqJk8uN^LjGrU_mxHQ?i+i%(E|%J+gg)tK2d$O=2c%vu+ zo&=Ao@QEq&-u7X7dd+nW7Ax|`L@4Ig7;|)Pvl>=|2QRNVUS(9XMfwS*6O#5_)$gSJ zZOWHZ9xkD##a%RZdnV5}A9klnmDam8o^PyC@2FaGdi~6HQy+Go)2HsOMfXakZSPpH zHmQ8NQh9dJuW@fwK;9!OGW96MajaJ|CZj2G5Hv#ig)qY275{+w_ zn3@hYGiw60YY*s@cs()c2|2j9Agw8F)m;j=;$6G8Zhb!vk@Cjv+v}NY`m5!yUbU*d zreu&6>=cD#nnt#IR}Sb;=D75)e%OIlG>Kfk#2(7QcTy2pu@ZVff_OZQ7vLPA4< zpE@*d-MXIQ)3^I|Bo^J$-7fI&hsTqfgGSt-!%IarE-Kwx3Xb)Z+9q25h@D ztJ}qYWYF>BwqYoZwnCFK z#fn3-r}w;VGoN)-f7P`|kKwkqw*G?~?A^Q9(hgolJT&w5W%;ZYX-sv%nkhy3*Sdhm#=S=#LxV&bEu9!0V5|E z1kKXd*Jr4=77ww8;(L*-3~Z56xDQzm;}PaMLZwx!o9XGT%|e=iop1&H6s8_LxWL_1m^W}647Y2gqB4g@ zYLCu>hV|>$FTUJYNhzh9>b*g7xaC)aXMFv~EW*H0pp8Nc2bkM&aW~`RO|qu-T3K_v=wqhd3qCJ-lIF;3joOpSv_| z?tmVw6u6pNZYo8{2xMHJeO!N4qu1O*CVaG$pYf!Uvw`#L@S!c2Yx7*|H)s$6t`TqD z`#|ZL@WRaNK72DZUESs(At5FsM<&?jGHjT6__8k5IB1o>bVq~UDEC>|m}b3-LXHc2$Qg#Ox86@l zcK~cG(e;^DDL&U$sZM@g&vm@&Eo=b@b0NPqfLgqIWXIO6Ga(s!(xb0W zPEIzMb^q~WCE%ai^iBCe6;HM|_VV)joRiRW&)&T$C#87(L|3IowsL&%{JAnX`vB1# zOtG~5pxL`l0X5~CBD6un48(vGMZvR#EcaX`k711Qw9|q{hzXn8PS3#L4M8KxXz9X* zTj7swYHEG|{yp~Atz$15yh$q5G6YHQ;(zb5=a{EmPMT|imJbRsSMS@mg^bWy=4NHN zanwzM&FZ%?*i{rPXLI>Nj^h;4?169{c#B^!>1U&&!oEiFN(X0+uSsO8{AKI>)K-^1 zRH;py23bi53}9x~6MpOM_dL1mUP8iPQ0x3(#!;9@v>y2D(@?LaZ95!2e){Uw={kF4 zL9;WOrq3_z!jFJ2AUV@vok@+cCc}o6q)9w%+H~RxJ0{+$fsjZ2?k6R>$*57cctqZm zv~!6hfsSO*JFrqPXkEJ!JJ$tg;6+bdXmjn?mr;OO3%yH=GTZUxhf~Je`X7)((duGy zvf1(vPwR*$d3qqtc@HsQ$y*%rGYe&S%78|W%a#phEr71*6x^bte`J0$>(Z=t-MUp3 z60=>RLp)AKn2sB_WyldXvyj^qr{npEVR28NHbaB4!$hNYg9gLt7pj|#VU0V{n78*3(|| zXj~5sjp|$}r{wfUj~atc+~hvz2MtuxU57e9z22;P92c~v)u?eM!3oYd{jo7mag5LO zo_9IR`Jojc6Q9}XcRl%21eSKC#!ys)3vJV@mMYxERq-MiOE-KdMk!tSXZWt@aN#cWV=Wj4q?bZDYG9BxA#*^iiO zMz~h2qe!y}`&wMQX>_NJ=9==G-w*EN8@+EHl46ULuP#}`py*>>o(El3mN?X{T1v+J zv|G8(0XO+X^6UMiP|>RHxWVP3Cwnufrn=D{e54Q5pLaGg@)q(>E#^QEly*9L{8ntN z0($foBNE#X<(a2@AvkJBlYUcR5mhnr-m;|@)UOtMi|T_Q9g6VSdh+0ok-=v#TsYJB z)8PxAo}QnW*)m%BeTt1uSLR1H@O82%_*gmO`!1K(Gr+P#j~-)mcD<(Ada62*Q!&cu zH$U=sn4`ujDpNa_)@rxLjl%Wjy?Y;me>RrrR3(VRt8>!X{oW>O}krTiGbnI4Y))K%WSduQ-*)UaV* zTTY%kch2m6<3ER%|LtW(k%-Io=8?f0Of+ut>a~&D9HPUV(=RWtL&f4|8Mdx4TZ{u@KITX)NMhWCnNy1c zC64y?u`r&q^giWE!pCg)n_2lVbtM{^M<{#~Cv!HlUcauwV4-5rs#W@-VPVX%C(d|H))G8oC;8L6qM&o3?OAYMO_2MGjSdc6Km zh;3{(apGe{3ec`q73fj7)4JAaz3rq21F|D}XCu^`-ZTSs_<9C;BZd{P zqz7D@l@7K$`*VR%Y{wJ8QTU0f< zF|D_e#za4*rK2+x;KHloBHN6&czHFDzf)T1h5OBWy{<||Nl#kWee0ml;F%lQ)>e1( z=FRU@!&*}Zz@jyv#ryNv>&RdScYuHN*b1C_!y+mL_x?qh*Tj$`r7~_T|4*y%;W`;c zinc~=`zsWlZ46sbIb?gO55tAmqJzG^3iEw{d9@I-EKi=S(d2a|+Y)dqdobhT){`^4 z^yqO1?BOvC(VoD-)JsWHy)o0nZ9B@hL;Mx~?F8FC8~Qa>nlNUbwY3_(gG2PM=qnRb zpK=7fNO5gBK(5pF9X#mG0I68Ia&ejh40uuz+`^(eE}zlfKiL8wlG*V)+6SV`%)Cox z_{@-|?ZL?P>#Lwq^kH7V#kX(RKL%8W!>QcC+DzNoB()#9dC-yZgSxlKKfiP+3ruQv z>(=dLimfy{oQ*Tf!fC@7jN9{QQ~%zuMF3wz}S{iN~-#eHXXF z{`BE93%a;-yZgMF5FwXQemzwq{ekpbvLjJJ*C{VAZ%T}dEIgjr%VL+8_HiKJbY+O zH_2auU_Nxk#`DC9&O{|{2<@6$6OT`D87T|9h+UB68dukf7jd0w4L1L$yUWn*)$1-; z#%Dfd6$KfP+U!YAuge{hqbMil(mrvvVp&6U`oP5t7mR7qslw?=K7nRm|I!Nj5EW8= z;kCBM6Vb;`ovOmmxw*KwICo|jI^?D-9&0G+%w-+;H5Nq1GvYMVZOxF^9yZr(A8=m@ zc-86Bo{*5Pio?X1xN0f|A3sJ=5%VLbXomMJMjG<9oP`vh(Qss;TzqsPMpZi8KS;( zHu_f4S4Xw-7*=(siTUEjrQ5%km32^8uS)9>p5myce)PDh`47#C50$B2(tV!kym=j& z`Pl@qt#I48u^LE(h^V9$X~>Ac;6;nN=Q!vS2Wt6`ob>kUkt6MKG}ua>#+-Yk;3zak-wy@hR+eFv!D|vCYR}OBlCxMRGj58dbTyXY+)C$YuCah zTAkEwx3wL(uD<(_D$&!u;dt!I4WB)E(v-+`he-q5n3*Js#?70%Lu#g84(MraVR17f zqpeT7ftx)%>VP!sw(QVKSviXl-dj-av+eA<*_+Afw|zkH#@xh<)d5HGevDfS3%^M) z8F->Cd9BXib*Z4&s-?hqu0DtT^s-(ky!Q8B)kJ+~ZESS7r^zJSQ`Knc^_IVH%$Y`6 z_lffp=JaC4j!t}!CV?Y^hm0IqmkYvg*^~UjoEkQ4Si64xO_Y#>2E~qw8JCxGx+R29 z`9-I>5z6>7+(p)KZ+!|OYvQQ?=73--*tx*53Wbi24!tJ%U>dkVgMA}|Z?f5lSG68J zJ89YC^ye>MhD@2Fj1=%=Zo>(zl`~{iuENYtj3flR@bvPsqxm=)fdYPzv2o2MFE6bp zGO2awP>q?4Vf+`_@@V;S^pu%18*+N+y<^kTRDj)B(4zyDvQY+m)-6dn)f}OgAYZo@ z7!M1YMuCOBVikhn7RJh@dx^hy?tn3kD|E({7NEe*x-{2q0oZx={P|`EHF+v26US}QesIvJHOk|VN#RJ!UXv>^Vh{FC z)?EXQWHNT_W)qDeW5+gJv}lp-(-C17w_dzxg@B8BQ6tf`X9o>44>%YTqHSQ%igDr; z%iQDFetTdy`4v}FbpG?EyL8;n`_=dnKko= zP65UAT-12ILi3(SbU@>=o~Lx(j85U`_5XCTr(B8V8xR#7K6kDORmAg{)%~d4+`WfC zogIAqI7h3h;fjJm78Vv!U#Wd$U7i+_&htw%e=jXViK$BAl~>R1_FQfBCqAIw&-D5! zDa8bOde-NtUtP`;fTpT#+DIj+XJ~i}P=Jb?V;OpD*RWx+1OtczS2Z1-i8>2k=P5Qn zcre;}{(RQM<+>a7*3ntURBQa(FS!j>9i5zp+uLgqUr|V(-rucz_hFz7^i-z{wu+*R zB^#gJ?OL~LCCjY#Fg($%zg_tc+)yIem!WVwaG(uSejC^XvBhxUgfKLAjk##drqt#? z-)u9P5NR?r%z_YS#T-z|ivCX2+(eC;A+u;`KM`F;W_8&!?eHY7eOz2kYT$+G7kY4D z>|W+mWlI~ckg)fhsgE;ro9tBZTSvxsQ46?FO061YIU_6%+>%p3pS&Z%TGe?d$=>M= z&HeN1t^=FZz2mrLAs_!QV#Y$tz|u~-Fz-KFHk>9$7GUC5l}XgA_5K5pmGHrSZJ?q# z%pp{qWz1{zp#d}GvL;c80hl-Z#c=%KmUstSHA^kbml>|65Z+;+B;~RJG)X?-F zng4?-L!Cn>Zeq*UsjIsXW;0%Pi_u40e-;nE@n5Oc{^;?y zB%RwNHQSV1FJG!crlfdZw0TmKJ$Y`UJFR~EEP5QQbW?yX%cQjQbb?+R<^Il}x|OB` zFm}wiYgNeGSo-qUIXN23HEgTjyLayql9T;Lt6L~O39Lq7+yYS+n;@mLiNC);CDDGiMi<)ZsZbg;Ya;Qca--l)#{oZlSDh zwE}TaxKUuvCPo+}m<+qQy7;{kJW{=2GY^)Epx~H%T3btN5S@Uop&2N5ogHA`TU$xH zLtRJ5!w0m;Uwxe{I)@=hUMMih1|Y77so|laoA&K%4evGM?WN%0j&d&m@$wh-AQgPs z{mGV9?tgxr>W4wgHViou2WAOcn zEb~fbT~0?yeWGhHe8$r%$cSkT6Ri7T%&=j)&&ZkLVEPrXqOX9Bpm1EUrZg~@Tq_1h z%nS|f&MkWb=s#(R?Gj{3VUAbXh-X<#*Q!+u@bT7-8&$|6Kwv3r($dl< z{Wxg2q-mYbpab%6*C^_u4~&y zwF1vHq6t3>mI{hkr;QJWJUTh%(_>tkD;g^+*LELrqfJYX-^Qko1)> z+l{S8YW@4G0@7@R!x@`OWz~&J$^7keuIO1zG`8NEdmwc1uwg$ZOqsH^th6YbS8Gqi zM0;SD5>DexdOY>mYVo2)TC-!_EI)oT8P@9?`c;7x1oyT= z4NTfh1D8$LW50a)FzV#TxMk?+>mLD2VDqWlpNfN$j4?}ZnV`&{vtYp%Dukh({MwVQ zjEyg+`@NyHz^#h@HR|1S(&1;~#bKo52Gh2jY%k(Yu(ZwR&h=Kr;xus!QVrYg8hYC5il;5s2Zq*PN3^WaJcE2#H6m zcKp-fIj>rk*CP{|un=rphpCU=*S7%}Z}#D?>guQJ{kM9mE?^5OTU}dQN%1InR7?2Y z8!uj1!dR5}VRGo;(3?R6p#v&|%x4s=1)H)93PQiurnZq@#HHxNTJjmXbm=l^@Zc)6_)=je zCh;^rm6VV-ri7*Pbb3*i+x~K<$FN?o;F;#ezT;+P-+Sv&@G`n7putHCD?7V-6#aWg zZ{{i7B*^FLA7n4+PQ;G zY9v5ojj6f?Hulf2ou33ySi`)9!j1Ttg>|C+vSmto%_o%m?cTi^{D+@Ai_eqt@NWi< z<&Qk37B)!I!G2R!&t5?&-*o8EtOMapfi`&ZENd*Cv`kKxW%!!j3D`x!i@( zvx|DSeMzzNR4Fg19lY8h5nsy&O-r5XL%O%P#Q~tN4?M)q485(PZE0UNa-Vtc{67H? zVH7E;ly>jlty{Q{wE+wQz9x$S@eH3Z;c>T8u{M$pztsYT8_pMVV8Ij$J3Bk6;8Rlt zd9k;*m$Hk7bt7EtMAgRR_+R?vgWMlO9=ieq+Sqw+x+ zW&eeDQyi@xg?D&>u170h7&8uu$Mx#~3bs{=)LIK3swU3Wc=Tn>*S&Y0gF_ zVlM42`oDRL)NK`9MtjCN2n;Zfw&msZ6_@bSTXC(e)@%i+ckzBH3qJV%+k9d1u&m_6 zq%bFMZ*OIT(TF(L3u9yhlQ&n27MUk^iurNHxi$f_Xyl$ zL$-d9)IFOTQ@x?M`-E*aP{bl25TO5bTYzA#zI```g)I*I`Sj_N?LWyN^h&`c z*S$^{c1y7CjpY00%a_xO^bhr4wE;;;%$LH*%Dm|{oDLVXv3{`G)VG(wbLKzX@4+tT z*x1-Cf0qG_;wU;c@gedx9IgEK?W;$il1}~Z-C6{gaEAfCdNl&(i{nq|&Puux z^0RBi6i@Pzfa6Xo2e!6STCv>zA1#2g|44TV7ns&TJp;1Qs#xZ8oVB6oNedFT?%f-i zB&4=UORiw$L1|X^_MJO619Zx|0m6a}@(1B0-mxb=O^;(w&;bG_3dgHx)iOCRMLh>)H3u)MA_ucoU-Odj157 zdbW*C9dZjwh2j%JhoZiLA&Eo5~Z)-67!W%h^I8jSi zH)W`cx^4WQR*eA9-qHzDb;)OQhldwD-K&+mP`i~4z|7S9zsi=7ovCW<21GLI=~XFe z_;8@3vFj}9=qi5&t>&u_$Ef|CdIYGHqgpjx8wxj^5Zs_9sleGlV;&X<+(u+j9=NVQ zyj67*Q^JD1GA$tS6B=PdXa(|1EA*K1euodQLEM73=wtYRTuq3ZuG7D&0j_|xC+{kXHKKlZ8sr1L04N}$YSz13zI`H_^qH` zMtv)t(3@EmKU=O$1`Ip30KARQenRE;j=;cEQ4c}s7^&SrqNd-cM5>J1Cx17c^xJ8> z?mx}gQ3D`4Bfj}G$DzqWJTkp8XQ)yg4RjhDOSzx|hkv{Z7YfJX1@3sB;U2e5ym6$w z=9;Rgc65~Agb=!Zy}EmQT&-bfX=^m`2(r)zs}}`RSnh)d56%}p__2IS)la!0D z^VG_n4Ym&M88A6ouVU)os^Sm4%9ZLms1iX`chRI!ClZDC^}Lai(p+pz#%|W`PhV5d zE;SUauHN$0$cTLgPMQqZgjUu?Yz1hL_{p8TeT1(2YMsEXAIv#KBJ=>>L{?{QZ4GMV z;4p-K23Q4+DyN}D652@{D2$twZ`=r_mMyoCv%;K^mo!s-*L_mRs61QOcWWY1s;llpYH%Fsk9EwLi<+ zhnUypp9Ks4)Oxxz@$E5Qv!*|P=D+kBDvQ)#s|yg*$~!q)bNhcnxhn_G*jv?uYFFVp zHhsXdH`VE*Ym7BEFFh9-89_tIAyMnvHRQ!AuA1ZycXv9Z@h1#@Du1dxT%>yP%|tu( z540LV**;AD_1LYI5vAk-y0WuOYe=hoPpc-D<7;@Fpm8;qehFV7(i!RC-vjXm_FLL> zxvKinmM-5vwHLG;OoK-B2(~DeUXWw5syA(KI?8CbAlFl9@VLZhVcF$0pH=0%>3%-7 zaVHqykAoXH3-A5|4HxIS{?2th>+p7DC88#DDqJ5OTgm|v4@Hvvm;(m1g$tVWP_Q`S z2FWJd`-c1w=HbX`QQ)9m>d?M@6|{UQwvtY%-jPO8EySW8bqkU|IxHxWy7w&2!lI0; z(f5i@u@dSt{i<|CP@_$?kjm-yMZ2VjLkI7Ze~79lqRs%)1-7|D2i(ZY$|`1Fe^hJG zrVY3#Lgv#zKS_VVmk&P`6jWU_rS%i9Ip%t&8Zf@V2NQq01*(Ru*At<7FWAuHN`TdQINXBaZBO(7A(d8O-?ntp+{GS8xAw!6FURdJ8 zQ4B7};?>sKUQ_+m$Wfz)T3c&iLbi<%XPb+Uo)#_3=!5-Ln22ejCdx*o$C+0ga~lfE z#xW-aYZ|^EfKlrP05INv@yGO*HEY(ad=Mv(9<7Qa0u=jgpuxv1e>u{DFQ98l^1s2J z0paeB1O*LqeEINULujsBV951($5om(p?+Y{WDKA`BrpCre~uZQCwtx+gqVZ2?J)S^ z$s}8$Yy+pX`dB_q(g8o zbO3@qK>_E_d3*-vwLP2|`*)}I?{7;*QC7XDwL;Z5C9%=A9%f@**xlK+QoKIiNi78;+!K?oSd)GJ~A)4ohYK3IE- zhsMpC>Da^=Fgj0W6GfIeltj34HDGiK7RkZfxsjU9%=15p^z0UpAaZhYqz6}AF#%=I zdte!U>-85+gb;F8tvXL~Q9VA`yw*zDA_~1v3eM)Nf@;Qz1Aaa&L!x%={tBcWOu4xu zpbix_nQax5&M1;W>aLSJNhs-@>&DPyQws!}JxY*wW5S*pWJ(n(!_f0gLERX`xmqk3 z$!zh7J1OhK;jS^}-MTgE(Ba_7t}zLtkLYrK7P<|kGN5tL?$P6<(_h0G${Sl$mX8u? z`21f=imDzl3lK1!`=O!0kxK<($AqO5Bg&K$kn1>gFdF4~wQSvbuTKudDwt0Gq;t9- z*J;z*+p5!fo0rv6BEF$`1q>*JvtH2GQTM(QH8nBrTgdpku&CYbfoTd{V=oLAM z{2HG25UL+FpH~EzZmQ+WX;~l=C2kgMbW^5VB1kJ*lcjz@;@DWPW3DljG7x#qHmX6|PE&j4;w{F>`E-R9) z4T2uhQ&gas28C&2YP!D1V*~w^=(lhGfJIY|i$GpE&~a7%=o?iUyn`!iX>6CY4aRV{ zZrzx_6vbq9>DVz1!BdBYU;dzu&w4!{s^guAGVb{`zX<5*`)4zT5#WeolyMeJEp^2e zJpR8x(O)>h8UclAy?=FK2>{sH?bA-T`lDs!=2;(dtxAxka)T_{W|s5`+c(Q8Q(Bfy z#dHP=g2dx83DbiX%0vB4eM`*hmD0@@=M^R)0Xs(+=>KfRB?Y3fNN0HOWee1IkP#7a zs|M5NZO^%gTRM9h2pE#;=WXj}qN{CwE_f!rr$R3?p;_^3} z8IVIb&?Iqn5{h*C4`DQVEPo?P-hLb`P+A$uLob1NmjYn>m4O??iS?nOl0*zwP+3o;;>!eOmr=2e42?sCO+ntHz4mRFVC0V^(Y z6-~jLPG3Ke4u@5OEzG!1%Yhbir6OYaRU>2zkrx|7a-8{TDj;v-D4!xfBT5FYoBcbGuG)3QyQeIwlg{3&Lp>Y5S_%H~d9{{Ja9 z=aSF^IG|B+apjeR6bVuYYVI+eE&}8rfqS69UEqG|-D1NylLMc>s+mk1Pm- zd0^ID-a5r76b%CdDGp__yQpkz51)Qd9qu~x0#GN~aHq!1)$dXUSn0zs z=3{}FR1`%N6;^5Me~$tJ8t(M|*>KW%#d4)6t3QWt;IJQO{L1|bT*IW!q4n=He$Gw6 zlLc@CJ%Fz7j$T4?5`HE%)a;|sN(wgp11^KU{4cXd`5Vv();@ZEV7|6IQ2Dt#oiJ+y z0sQ=VZJ*=eT6G5&;u9`|4rn*V&6~$&FI#|@J!haCJxiS)bK3N&p{{rqh$Ws-buA)5 zeahL688c@8MZzN=NXFd%Qv4nIj5{??AJUDqq0&LJKEOF&a?pVn1s#IW%jAMXs?!Nf z_e#r6nS^9&1X|eb`H4r@ip4*R|0Di+J*yWdbL($)OPNfAdD6`>ub?b4Poi{Zk!|Xw z;A0b{--ClGM6pmAJXU zc!~m_(w(u&mfpVRQh{FP7>s{g7-Y&m<*Gm>(x8#w#B-qp63T+nQ{O*5D`^`QgQ#fq zQ|?QH%aB*7)4v%?*Y3DIptgql0h{EAIv&RD9P^)EqbNfZS@arS?P@_l=z>p0qv z@GbdrC_*BRne7M<=Vkk27oW19M_l^k&YM;A=u*deAG7NMha5H)`iA~cCgDPrMd8Y+ z22CQW|2;0VJGWu$))g?cuIU8OKWxE-IyFAd^yI>??U)t>(sc6#j%Lqi=A^BoBS;Z0 zc&9!9SwISqXikgLPo07iGOmrhfmP7IdJ5>D>M#1NV}}l@u;4;?Diat0+)wnZPE*ek zx3ffQ5&Oo3%SKo>OGoUD(jx?yj7sRF^*c092__d&B}vf=1iaE z9@N{nTI)M{)%)=G8MlnK0?)@AdKu-hq6o);vPvc;Pbc2JkC(MbWvDXHS{gZ+IQ}>J zS2HxsDAkpuIM9~CegOI%E;rO6`*)vPD{!^bZrrHKNeAE)x|*Ck!#!wLYKrEDqW}0t z-g{;}k*Q!=0{)X>axNPVL4o)TqPs`>m_z5xn zxO2Pv#lq15zI%!%(I{P#pX_M4K-nzw6P#$AdO&$lAVA9WB*q>P)yfa9nC0En|H=av zw4x~x=$v$0MS<&ZfmkCfGyjct?!e^J87%WJ3B#PyKGTblApE%?QDiZi(~RbNLR+bdHSAf3v2cH8%-TcPZ)+wnJ^eG}?h2dELZhXClCd8Aq~+n^ z_2K%+_?_~5X&w(Nn7Y>0-Cc`G9fXUKd*He^c&4LbkYWw?w!eb;IV z3{l6{2!WaW?*A8ZjjnJS3A#Fr?xewjrt#XO$B}JYwz#r|2vguLSU9!i0n}v9fV@o@ zh{A3WVgl3XvdeH2x!8EGMgdGPAq#ShbZLU&t^6h`KuM)uU?L-21==E+Ot`seFjA5~ zGj^J*E`Y}Lj1dI*|9RZFJ|Kt@9@Bz(FPXLnz`JVGrVYfMTUbX3`UJ6KIq(1G zEgnOhzCPr=k$Mf(F@k>({JJYpI75Zs+(&6@xc=s~%v@0jGLGx<9=oY$yrRtO>=|oJ z0OT_vr1S^EZPZNtOlCt_PX%+9A`_NDT;^+_-~j_*Ok7})t6!tWGX5Gp_3$V-c-aOZ zA&7ZmtV1K&4_z?YTIoB{l{Ry=P_3r)AR}ALh}dB;f+b7vY9V!ac@;nEr+0!$W)KP2 zo#vPaU}a9f!NJ)fV>f(|<+Z;xW5V|cR9eGjYrSfH!l-FMFg0`-1F0iI_{07yl z53bLqc`Z?AaAUaVfn48kft|=iHwDB57@q+0aC>G;1)uS8LrCp}E%E8n86w4q1+dyh zYEXgUd*YgP-V}3}rL;2GRj%UQM!{n688*n$;b+SHPr4`zkmOnbW>QJ=yRn{=TTb)g z4;Lbw=2_paa$4j+>P3eWyfS2n#uU6t2V89!zQdMw*ujl{%C`^<6CbM;B_)ftE~Y2r zxVzDbOI?V}r$recq}so;&yqEa!cqZKI8acf2dsE5QKfC#)2B~UjVBO5R#_TrC^=@3 z-f~)lYqI91le8Mdf6tS#vCSpw%7x=`)Dn1Hzrr(4nn*&4wLxGQTTcsF5tlCM2su-` znQz+K7)NfCzYVM;5H6*Z7f>9xF7DN5i}0;E@a*Z{{JWzYuO@?@R#=H&SO%%3Duiz)}!TKRPwyJD!a>etzcke!Z@j{!vMy9!-8=*5n%r@*HiabYLe-0eR6cJ)h40U%1M(pV@Plc~X zjPP8@&`>mCtfHvKXI58W#*LYkx^fi0VXHUs`f66zm*YdF!!>iAJh>d)7!BTNW;>#f zG(_l+s;nQ_wCyFm8Z|Q>BziV~I^*2mT!89S1&v)TcmS%PQU97Ngup~TPyIU`NKNU> zfK%VlFa#?ez3^44!%W$jx*Kus=MftYEvDWU{FMB?3=OAJLGDo8QJT67=oTnSrqf~? zEWuifJwAemQ=PF{&PWc5q@kwT6v99vYIY)f7_?*>!0bJD$MqG#<*OR?21vDzzfgy| zO`CyEo(}z2ZB+k6<_g%1{fb7*6LB2?ZP7hx?uk_81MQ++7ZvN^Q|Hiyc0rD7 z%G!DE{>r(f>VN$>gcr@yvG%SOCOAs>X35eDX@zKGln(6G zE8#1Hx^m^4Qc(cIiseR(H}^ITSk9kdT7C=DtlsBofI6^bjmAk!;Q|SsDdRJ^*_wOM z@I;!dviipYBi}`6RZKQ0auo#-=sZzRXPA|vkV++e6M?_Pv}63;wz& zM%BqorTijSpCbOA2Lk`LwF$3%1C%`E&$*Nuw~vChJ?FnHdLj*HjN5<0poqAL{<$1~ z#Sbcy;E7;y5rNB>pJN)x?@dF3y7IHER;$z9rzsf^8cWN*7IK?ZLz&L&P|z`JLmk(o zS+kh)GeE`z(M)iCMEnX3<5Kss(YIr5Ds+kG(Dx_u%HO1wTBEe^5Q`W z(4wTG`Nr$?f;?xAp%|*DD$pPcouUMJyP`nN5zaWFuy(h)`ue^ql;pyNXS0Nsg7I`^ zuHUO`QzSe!{ZEGP3v-geHX%+=5nAcMrny#LNq4D8gg661vG0O@mrt^YL6c*T$nb1d z*2uC6zr$EcCLZ2XYBph$@(c(zHua}Yn6Ol^Odw^rM9;?AN=OWjlSPHKX|Dso`Y$Ec zt5e5SR2^W_BnWLA>s`k5^TGy)09JvbI^sFaT4kSC*Bf-}(c?LKJjIWsmv--7ZRT$^ z7A+%PP)oZM zhUJt596C9rR2+Vz+H5LtvbSH#(G#K#-RcyGLBFfA!(~;OMNTSn>TkGHg;@1=Ju9QTct{6XPL!yI^*EpL&5hMWL01qh#f+C;GR0rL7_=4TQk7Y zVJ{~gzL%+J`i>nFVXlTk0u=;?s#ZXxxWnRb z!&IAP)rxRU`S2suracZ;2mj$D*m)Br+TPF3&ZUb})H4nb6+e*~<(5|AO z#rh0GwBWEO z++IS_D}2QKJA2mNwBwd23Lh+8N-af0o zfRNng4UHphLbq>U37qkaelDGx=ur%qbr8WBig&%xHNRT7YE^+td=VIxDU}Y4WQwg1 zi*zW?|AY!6MX|aj1M#Ett=SEt+2xg+H`nGgv-q>Tqobp+Vi+vvk0N$9ZS!v(rt)T{ zNvXUHAX@QDN-~3lpv|8L{jd^S6Ee7fi!fE5oiQqnSE(Y|IV!(Tw`#+yC#!G@_3Z)$ zk0ubl2Y)#W00b!8YDRcakS=xo&12JC1R4h2*s2a!mxuGF!S7ZIlZ8isHU;Q zpma=_h51gPT3>MF(4n(r(VIsm5BI-qH|RGv0lLAZM2O1gKw9PCSoQ^X5g9BxGcAS8 z6F~ATFRpkm{)Em3l&7Wa=E=%?l9)e?($7+J!atceGCLX`05?y{R%JJ4n*vO;j*Jc@ zZ#{2?%4!0S7*$Xu#+wt1nZvk&i%22aI3Xcr`61++%!KRH%^08D~!JF(~nYr zpr@K%W`{HX7PQ+;8%~WaXdO~dyVW0GUaKe7)TlFydvUUwc{5Q=1ug$TH2?T#tfSai z3gV|&y&*^hLK6_>=}pv+-2_RS#6asJX-Y$(6*$kZPiGik&q4^bNlB6$m8U4qFYp-u zFJJtHcvr!K(@>Cxl}B~Ppe8jwe*|F%cCOSZ@=V~ru3Rn3PtKA!E$xRNr3rzW^GL8~pl%t`qFI+<&xYC!sS19MTyuH2UGOxF#&Cvqc0SoKLie(nf2^e;12fkb z<^wd=iXbbu=WuxB)Ziy9{-)GgysgZzQEh@mf8y@0p+LSpk4}oRm)AJ*?|w4gQ;yb>SE?KW~e~RYRd^6u~&J+Q-YZHa>610PrM!#jdu7*DBig<%E3nW_BvI?BQF&-?k#*VrGInWNkig_4}*Y7R9lJTMvSN~gJc-9H(6e$ zQ9WxIREc@TMXg{%2d=Ro*)=9!U=9<^dqBLP!BA7J@aA+n>ht!~-~vGYS@;DZ&aPA* zQ1C$I-xAqjBR*Ua6g9~dWQuGqmgEi`k$O2!R&~Im>c}GysPGh9BJS7W8t~zz)xF7g zECUrC>1BG!7=Rvm26I7j*(UR&R4dRsHz!sT%Zj`$GI{*grb2_2ylo3; z%yl4FXs8ivloGYNaV;I2fsjzC-2UXnh)r&*E-jfUP#y24dKnDu=mfM&6sN1ePl-37ZPUZb8AK1r zY|zRMVh2LueSL+T$u{)D!lEHpqJUWq-jYicwMge&Rs-%8&Hw_i8E~$*_6AEq6y1P^ z$clnl$7a+*cADrz01WdSVQUKk%iu;2vf*l({4$@`RN(4qE?l_KpCoDL_z8e{0-cEB zCn6CLeN8!=#@+wL!Xme^2ULrEqpQU2-P;6PDbgXi{O5DJ>ux?T%FDQl>VLvdb8~a? z8Q_FdCM4GtY$dq&KoH(CZx|1TCUtqdF}2)SyIQ49+qTc%zFj2*ZkTPhBS&tUIf36? z?7*wwE>JrqNj;TeWxQVA^b_B&4WL4Iilos1mqh#OFe*FgThd%(qrTLN9ZnmR~dzAIa~IaGrBbI+GBZwh1%?0vqFmqPAuvH=?YEqT5-HEG~k#rx_!&rpVV%snwI`yfdWWm9YSw?WtqsWgJ4dmd+gf>PmN0 zewQNwdVj#CO||fItVqpBcWc*nU0Ip+jQa)Vad^lsl-5Y0Ru3 z{JrNc|NJAznppvH##q+K0FEityr0ElVYzILrNjQ3wQ5b^@4UUT&b6yog_krwx#dBI zZ(>z2?`u>yE-bDbsi;x7FnQZt2-%#Am-j!-?1hYYd<<#RP=g4N5n8D zDx15Fw6oKv*OP{k<`IT@1SfMOZu z9)x0hYQbll*cI*!W^L$^Rc_r|SSt+pa z%q=5nP}@$Ojy$Rh);IRAp#d1PwY*nO8&ohar}L%%ecJ!N07ti8!M!iguh4dDag?dq zjLpm(VP%+Q0|ic}ZH#cYt-!c}gRYG;&$F3S;6_Z=@*z}WDga%-;!Z_Qtg5*be)JcN z*X5(~=d2-R$3K6V=&41W2-FfqqIxt@B7sq}&Yc~`FzS{@Uh0Y|Y-T4p*ago2^0uxl z3`Dpv=;ae}h>!wP%qIzOIzvVs>ACPrCBE~9#bV0AevPwSXlbM^9r92DM(*CNC^u=p z{qKPBIPOVEs6rco%a9CQqz}6`lPKz?e|BX@3Sf#n(bY0#g0R+5={tDv-ba5IlyP1R zcGBP`J)>$QqE=|IJUH&+;Nioqfrf~{B5RO$$+Y4E_4}=aaXliQ%Q?H%V<$pHaw>x< zV`%mKBM{&=vrA4|)xni!H>oXMN-c*w%qf9KHJAMQ-r`rur{M*k$Kon!VcjIsP?&Jy zWP#InQMRoDOX4gbleCJ$UQJzQHXJV;hN8_MI~T7dNFS13->yPV%4O17px+>=!$flr z0-``M-k|ozcpmprn+(Q+YC?Z6Y`!HnaP#MWf{*W6C`osF% zVAUSrB&?X4a0?;-LQ=JWH2|YHWHrl0`cm78hD?-Lzuct0DlhzVj7buMM~=Qw5#q^B zblKug0ZhWUp1NtXyYwn5Zy;!4&eH;ri7WDcRqpN{G10ujfQ~bzJ44tLDSoAs5NkDRA)dHze1bA1bfBkEmnh_>%IkN!}U=!;#+R7p{ z(4+7xFRKEFWo4wa48Y@>s_Nj>d+mS%onwc)xES#nrLBVrup2b?#uWY%{+b|w0h>OS zi3bm+3Qc_i#@8k)m3RZdq=JkzEOs2Pk(pK+eWkQk%&Z3NUdABM1^9{KK~>ZNvQ`Pl zko5)P|IP7X^xw?ZLekB);l}I57l{VZ0(L5uq)=jC&!dG9z6WuQaXWF(>3GO}NpTgL z;mJfQkQxBUU$Au_16Z?c4ge**k|@QsG^>m&Zc#Y<-_C4^ailfX6L?Jp@XfgR_zE%< zj>{Pw4k(`@V8YEJmZ)+mE8PD0qmqIJsU~H86Lm|pq5Ah)9%4EoUfb|-J-0Je z5obg&9C^8juRn%JbRoXDfoP#4+cRV=UAeY(u$kIjv7y>PL0{=eTu8jpW1ocINN*bsTDkWIlOI#HO%$wJZCT2R?ZMo&WdZpa` zK%*#UyiCBpmS;&W?!QLr&tQ}E-wl5L>(}xuLBNS9y~KNg)ft2hI~{|S7g$%(axuC$ z%I&eS2M~+Oq+dEI;CFw6k?py*ry>BS*Vkv`R2A56emi$IB4*h+|UW^EBO#6;_{Q{(==i6gf;bZ)Cvci3{#wL%!IfrD-b(As)%6`U2sTo<;Z3*(F`MT?b|4&LsTV$J zV^U|y-=2Acrd&Kb*kdG53R@Cf$zs;ryL;DCo&cm3t{*_!j3bpfR+$3B@lYr^T`FuoUao&Nc9 zbCL$|kv@=iA=nG?LzE4XUc#NhQJsxJ*49EjdMY2aX^SwI&}W<_ft(31(cW}0^50n+ zFly^?ymI&nLl&oy#w6{(2i5UB31BFmP^-0*)^V9R?}wPbm3*p&Jy~^%w)3Q?Zkm^Or)4u`eA3-Vn@aBX$!o6*3ZCI;k4b7Z;G(c}Dg@#7{qgvOIEHuhMF1uziQ=Fl zbuuT(NDyB5H50QLi%BmyWy+NDp`s6(W9Q^#&D4fSjQX~1#B+yj6fucV9?ticJo@8D z?Ww!uPQH8Slxqs>B`7M&U8i|dRb6yiD>-4C?jM6}fiG{)b#0VTl3Qcy7l45SsG}nx_gfl+1|Sw{I1dyF2B)XosN}RNbBb;yp-} zh`PC%{!H?TV{iBirOc+Bmow^&b3C*5m^FgX;hd|BOiVl~xU`Tgd?3*fi)Z5?s7DW= zsR;cTPRON8mt6blMtZgRO|;-sM7}we@i!MB5cnpe2!^PPrD!f{wjB5QE()@(k(FM* zehn&?j#gKkPoFL2BV`(wKJp9tF&3h*d&#G|PR*XQ7CUEEoxn7kl7do@kwuInBnT0s z;NqUWHd=i&&92f?9KsUE)3giFnb^_Ew7o@CMlDR=#1jZnS6{olC&GZiL{~l9T|1kj z1oLV1iOUD+V-UIZlsZdhMwU(c(8c{u2omhj$qj6}!9M-o{;{~TU!gCdfiFDxN|Co^xw!p~>K=V*A?@}@#>NIgT@i^ikH{i@eLcCl$9;7A-l{y0Y;~o6VPRoa zWUd9zWrTC<)**xH2960r3&7O_Cs#2l7q@KIi5*MZ_3CkMC>t*Y`)OU$ePErW&#%hy zYuX+=YJX#(jFW|nM>r8`grj3-#v*g>C|Ffr8M;&H3CR;)Zz?sKh|Kr7oMGrxRL#7h z;|9#6#o$#vkb&V`AFfdVL_e9<=WZAc`pI>~i zSL{Eaj`hpyyMTVb!7G+TD08XQWgj=Q@&FGRfp$bC2}u~o(4xzqwr=14oC>1m799Hd z=`AB}s3uAc->icet^5`%@~Ry&9g7_;z})i!AIht@G;^k;lVX`z)C6?}>^~tGK+G2@ z+pL}{R~mP|XM4ubS_-E})^J4po~oLxC8UU|Pn3dCB63dquH-yEqdJFww7cxl^xlej z1h$^w7Sa;)4IiDHTNTOtU3biM+$K(xQci9w#Q?8)le?F^G?PI%D_nif=@BwzXi(fs zI6JWN&SJUjN58W5H2Fp@gv`cSqT0yT!cAy6V!}?t*X+(|bkvI&$ZN*PUkA`g+1cpR zs5Z4KhGIU-&8(ht0)jC=CcH3R5iT%bct+8PdMq8Ph5^sU-_WgHAoWcZNOQ}Dtq4MkCguWM zD`C~D*?(&@S`ixg-C^UW%ZkYG?lW~sz-3?DVb8Dz_04CM2j-Dc1cjg!)^fQ~mdOGR zC|zRC!DYL$Coi=P(lkfqml_XkebwPK1G^NemyxKvJHoM zt}Dj_O&2AML;j~~j%G0TBppk^1+n8w(bjF-CU7!EdJr16w=gCajWZ}((9)%8EpAY6 zFQpPoLmWZx3$)`8AuyYvm;WHC?}7)8hsCgz$WyTa9-Prx}3FiDoPWME;4kcf!B$UQbknp{>YE~k!00gcHw^u)m*;1UM-GHk-Jeyb3mG?I; z)xk3C6~9*Kmg0Ui(Zl2NKZ$9^j8;yKwHk|l2y9uz4TyQ1o$Esd(MP62QuF-+5j(!EWx!q5eed=Ona;2wC*a1reIr22rgu)Av_7Zs< zV$p%%#a?r2`y0A{MygEcx+K|WKIt$sy|p?vVnzvD&DX7%BEcXMu)`Os>uP1&I`AI@3MAhrxq>Bq4)ELNs6oF3d(9+CluItmOOA_gs-aGNtgT5 ze(o=LI6^6tEfZj=t3mcex-9+`#J8`v3RhG&Fc2S4=075-W{f11yIHelF=i`Ad*KFg z8ZM31@grAOl3sWv%W34rhu?U8=+xD%-)`q2m$08%$P!2?#13CY%o* zzTnAc2wW1Y5PypFhq?{Qln^-6Ae;-qh87U{$%k7rc0JHjE%%yTqU>N&ncWt8)_R!;#!ST*<#Mf`x63pAeg%E9OWhQa0k5(vj=SHW(ilGAT(6OUXrCWC} z%%3gdV+*F}k9Pj9$xIt7L7RKH<4Jwux`&-dFHBlQRM4 zn&QXvkwS~rUNMu#vMmNwVBDl%A48b+a&Dpzj2?~qM*xTY?9H18M?K-oneqV)){aH0 zBwkR>25(Ab_s`u=&AE5dOp>@OQS4RCD9p?;1js(Qedo=LTI&}r+ zSPB_EDKsjoiCCj4Q25TV#G?;rXc=?HaWl&l_+~h%)PXTy zTVqqE++z7hxgt6!x#mSh((^yXYcUl>lmvZIeiuhFFD!A!rPS2jk3QubpIJ_jVK6Bo z%avY6nQdXR-rSd0;*vFW%y7paiUm=QX=<8!~X#ze)BGbup{Q6Z|h&_6Sv?zFjg zO5P1G4l=>$b+W%ME~En1tgN9H1Xv(*pbzlDG#XRZnpaeytzBf6)fgTf3)rX>+2^S% z@CK#DhcEBOI^(a(0O|Jpd;3OfVZ35<<=v}S<%oTr{elP?!VCj}loJn-B_b^Hs+J-= z5X%3jRO&`Qo$2{G6y?O39&bH~|I%SH-i1!6selBZ=WjA))fCle%G~Qli@#}LhOn;I zM2Vc>$_$lch{oR`3n-U@jENlq_7p+~}gp0{}V3SiHdrnEb zyFKW0V1`G^0IK_Qr{;fRNTEY}$?0nZ36A4Qkt?_!->OAhRqguq#e0tb>BQCfEX@;U zU>ldy^`|{K9Z1R-QZ*jCq8A35_{OmGDWpgRx_bo*@BnbF#rap%J$8Hzy(dn3jF{!e zD@G=BP&*VqK9V?3XJ==t<7Pz3h{r#{+7y9O+_>=Se;TP`Uk8p1`WU59JXwR*kx{X5 zFU>jmF)l7{H&q1RYu#cAl^h};8$3KXr}KH#IKHUG{^{xBI>cyXabbh$PbRa}<@Qi( zA_e#{d)rJgbFexe>xCw-ihyg(39!R$duK9X0T~;#FkWt3UngdzO9KKv6cvm(HSdF_ zye1)rLLH|PTI_=_ZU^YVnk`R^NWf}I=BI2-6_k2cPvqeGx6)N!bIsdhUQVkYQ!i_~ z+0-Cr7FA8`OHdkerFE$V2=zb+?Pu?cii%3SRr8X||>d?U_N! z{-PP^fy9}Xw)OP=`;gEj*NmBU9dAvVic43o!c{O58g-}MoGl7oG0#_aGZN?Wwdr(Y zPx9QthGL`%uA!*_c+S9rkBXyqz3m8=q@<($Q(#>1`pPH%@!;^??@xX{UIa2JlXi*? z%zly$(o1nD+I9715wOMP%&q+q|({*_KS_LWOa+l0abGR9DEfzZqH`PUE`EP!$ymdKIK3J6bf*dRx|fi zv#KjaCmaX+8x>BCWoJ7AN&c!p8iGyTew{h9H|NX}BbXkoJKaeqU|LdMO#ku4Td{i8 ztzpl3epoDqEf~TC1fJ45EIWl;eu2@%;0C2nzK+K1P6se@?4(#w%f{f%3c$uPh7VCZ z;AWG)vq4Nk*zSDa7yLU{7ES9)2aIu+-8Q!4RHBfNS+nF=oPR<>yX>RhD_OI z;kjxlGR-A!Nd})?(q}Z8Yb_1LX7eg7O}ZVzO<(EDSN}k2VdqrX{8gt`c2p!2et_?} z+5DG#*0;ysTYogm#u<_Hpsh$T#k<-13ACdg)sm9YF5@XHcSo$sEyeeOB)`r6RoK9$ zQsQxAG?XcK5_T1cjq@vgbU^u znepvR$;XxInLylO62LR6W)c2M>A~TP0&Hgd7r0|b1Chpv4=&Jhvsx4RhgOh_1SF`B z!#1Q)E6^z@3F1gCOJkS{wYj7&GK$m@Tv#t$Syqr(FbKa1c;s_qAv}2D?g+F)bb?S` zi9-Y49Xd6KOxKe+E)Nw9wdLGlCfVUWe?96Bze!7+Z`ed!Ya(5fAH_Vf1qgiKFYDA* z`ntHBIsXL$_M(sB%5G-700FfoHj)E)mhQr%OM__-kG@Mu=bO4w!?KIK(s4Yw8;PWh=b|?iQlwnAS)x)cL6)X06y=@(B zga~as*33V)&YF=erBi9n{G`N0XwxM*Gayc_UtCJ8@U#n1y?p$+_2u+^ANrUSo7oH- zwrTOMojZT!%%^!9>|6LP0S5H=;uMw2YlV*TB!;Nd=_Qm<>aj};cj5UR9M`eclD8U! zr-6*9iKU}|Zj;oBnu&IJ-FIaX7fLgh>4)m2(2prN={3Y}s}(pr4s;zj(Q&wQOnxhF>NKr$U&5KC>u36-D7>;m6<<9c(G4um8@o6)?K&0) zqL_E3{P`GCY)4a)@fP-3p7rW!(5J3Ghqt+C%t3qXs7Ji~aM0~s@BCzv(lZL)b-lU6 zJ(z8N&v_22cBPHQ2MG=RnaP49DSn*&XYi&2aiX5J3h~6R?;3spWRcWGg*zT%`~_6c z^VYU?NS;Qhv2}FxEj*;;{HV`c+c>`jF%lLg@pnIz*4X)@8jRWX^D!BbC||Yr_=cFb zbz;7CdhVm;a(tef-OkF|w73*Hq}*iqi>+qq)O5U0d zvP9m^UAR_&HdPxV-=h<`^3V-*c}M}m?u3gca# zef+oqw(|Ypry;g0%Gr28{ueXh^fYg-OCffTw?`yF1h>B=Drf%?nBw%dhIQrv6H z%h7jq;r|bWolXf6$km|*-T#Q|E^W?*Y^L?#F(VV^W7&f*eC~vI6h*D)+6BSvHDU`;18v!Aa&Qe6ur?F8OHD>r)73ehL zsi-N9ktUXy9pVT{ya42)5_ifS5VwKBUnImBaISuoWWt(e*(4G)Cm(gxFgDnblr;PJ z$||8RN*|p3bTBUCSLLT&&zY3JWPI3U>28aV36r*+{wp?l>ZrKm&D$NfowUirByZ~L zy{-QApHw_^@X$wdhd#P$`l0)$Xc`UE6+?d51@T*)-+7PY8v#+8=4+_E4n!kHo~t4u=$ zz8CeJkkScI&g~3Vbf9FG)gDZG<`tY!$5yw3-|Gfx^DFip{r*0+E7eiUdtE6!=yR8I z$OLG75@Ry-*0(0!-tWeIW=Ysa&%L=n7aS*u!=u*ohCLdirQ#F{Rig2bwy?S>EtaRG z_^y7~y<4}I<6JIi{OJ3@eRr*e!Sx7EqHRQqK@4XfnM8y0sIkC3&h&=BXD0iU2NkUT;{Bnf#K;FR7 zqnRk*Sc7TA`CbaVZh!BUhtbBo2w6EnkXlsWrzfHRgevn`Rl)BZOmq~PY;n51T7mA$ zPiN89u|%?sl8TH0TP}Msj)5Lxhk-Fr{Gv0oI-+*s>Tp+DU>_Dp@@_XRY+f2g?E=-?!n`|5lNTv zwrOt#j^{Epzc-D%qxb<4rd_@qKYr@I$`mn4Uk(hztl!>WkM!JI6ut^_nvJ6;dwGr7 z>(10oFQQW3j}bL8ZXy?o`8tiF_0F*2!#8{GiH*%j3|P7p{!w(%uhzi+_NrYS13q2p z|2iXV?eM+VDYHd`E1-S6q1TaV9y=JBz{j?~KMkdrG%j9mqcVOZhoAT}N*Ye6uTuGv z!P+_h0;CkQ6bxJ_BH{r9n}ikRdoP*>g&A!W${J$NspC4?e%UfnEfe#!*}r(`Y|xg& zt7pxURli9!cq#I;ZDz^08E%_)P%!jwYT%j7oA-(nQybb|4TY6|8EVH%v&)w*rGgg` zHT;Da(Rc-ZSP)FDlHWx5t_T zuZSj9;4BzETW5pD2tLES$kolwt}sc0lHJcA`B7`dLC3Ab7gz)@Eb5AFGAb^`qj8mJ zLkK1vy*(@%c)i{g78Wyod-gI9r<^5u3tAs@8Ib1}@)|18-nUy6@poY=UViNo&}*>? z2UHinHMOX-T3~nk$qr8o@5L+Q^Ly*wT~g1NUE*T+C%kkqTO*^2)!D)Egnghpfmd_p zziL3rjmqxOe`wF<0_xxGS1|WoBd2hizqtU6RfJ-v?B*r-#E&g|#zn6YB}N~l5}sap zBvkgwkC}|`j)~FXT7nm(J$d4Cry1WoRgV4^Z~Llg0t;2_z|gwKRkMYs!#ueRHAs zpi&kGBfrC6SIgQxs5ODTLF6sUbUV~`G4b{;J;KrlJ1*XpFfdQnb+;ckV(1S{@HTlm zB`kqEg!}NXJ+}?-4q~AWjj_dj7}>Sf81-4h{qagZTpX3(JK?~?fe_At^eFEJP|FF9 z#<4f<)@@YtDdH-YPYWph>am;!ga7Qq!9`>7YT_-PS&@ysd8lYa(>~%Uyl2w{^?Ay_ zX1snNTO(F*;yQiXuOVO?vlB%DP?6b2P-}S-SK)GqhMiLKJwZ-jB2L-1b`&^qXHBY( zCuqvjocACs(qfaNd1o|qb_-l6(gaA8MT4jf^{$BW55eX0BuS$DI3hig`H&Fwfkd{> zCUS{AkTg~BqKB3}WV`-i`z!WwxOXWB;Y)3`4PtboD2&VC*+#E7tY=PedG=+*NU(N~ zYzQey*+4OaW4*CP$Nf(5i4R_k=ERKMH7thKvd z$9?Rbb>4d-4w1xCcO>m1lw15gueRXO7O9KK4dPFU?Bufd4^vvJg z2UTgHDi8;c&MN#SDJ(^T?D30S0M%PH@@z%Ll|ioPzY(vp;BwTQS+icOZLOZF29NK@ zU6wvID`DJ92#^{3D5EaW;YmX(_Hlnd&|D}|_`N;F@Pl2Txcf8WUP?VBtHi*d0Ds&l zpA8d=J~LF*q!vN3tEJFNBOj;=87S}}XESu#(4O*{!G1H%=ZTRYmVOrKxxrYM za$X~my~**#$Yx*cRKcS7`P_=?oKSA%>bm<;o0FXAZ}93(Nj)*T^UL172X(bYOL5+i zqa%JP6rs9`@I0?s3~z5Ox@XLdNe~(eCz`s0aJW3pn|&1Rx6T274>g~3Va2Se8^Xex zb#i^e=bD!CdDDY%xHR*OK}`(+{ToqVVUSK3j=%j-qF($q`-n<-jLy4ro2DL)rYfmy>E42$$CH!G zU!k@&+_x;lu_S5 zNh12kmg{oKREii{j=Qsls>4N~!(2pa;1lMy&c1FJkaCw0WUW~36w^K_&yegxC zk==OA;+3buPDa(SiaJTFWkv5snV&v?u4z$MVW07+c-x?lKVlv+;s`U(A-=8Kv6E1@=Bj@?$l4lxBaWjv)uXxFuG=z55~u{ zH$@vj;Y1^tO6GF$Hu5NZUYxv|x>HOBc9#8{b>T6-4iGV;*(5*{o`(>5j=iZh7 z_a;p4BL1Pcw+A%Cr-izZuf@N-^r|^s3W^P3B+y3Rv<;NcQlu(ohm_cc!X!{qaSK%x z%Jkb4M;|bwbCNxLcn^1x0S;tgNO@0zC&5eCJD0-zB34C-|FQ5ts^8o_khFRT{f3b5 za9oc+d;>wP5H2E8a9$u2IX$=shYSoLsd{P4{ZIN{I7=8A0l4D^08 zmH@{kZ(8ZgnFXIsWIHajlaUYl04CGt>`&KwGk^feJ0HLwL?{2m87m`}k@sW(Ixy3#fx6d7s#!*apH>T|Kv2idjqQh>B zBx5!zFxijv`Z(B6YdFh6(2&*-0GxHGn=C_8Ot0^Le{}w-`i$^rR!gMjU+tteW@fV3 z@r}r=7?)3=^1XS>?)9r}>a*j6XIKX&k-VAV$ybT`esJ^);kk(9Q*^WR&3Xz9>A19X#HcL!y2(yCd#fEnqe0B6C@_(p zv0Ud%+Q6J8r9WIqFiyw+0)Vt4GV!i6_kL?~nbt|{K+o08T+3qM)4+gg)xvK21GEa5;`KY>DVipyHEe< z@0b(%D$u+wF7fEm;(W&OkU1$1H)>{Hd*jn@=CPCzZH$Wi|}c z5s-!T;PC*Ve26@9@0)dUIgxk=vag<1)eg^zO8NmJXBYM`g5j1F}T44@C%Ux0=7CVu&4g z@6#GGf_S!oQy*VKZ7xnaj*EWh?fltI)(?~=k+6-P!glQVhHt>Qusk3_1htf*N==>( zb))bW77{E8CMTJ*x|=aHHAJz}IPulhtpOzdn4|j(=9-6Lf4&=5i8vOCSNFkEsuy*P zNwqfeUB<++weKp_G{OMT`(cFQTGl7<;Nbn#`~n`8SgpfuJp>`Chz&xEdyPQzB5Ob3 zY_xtm*0lU0_5})urTMUMvDdnne67W-A?=H_GEo*)pF5<_;h zshp?5JAwOS_!WTCpZT4JLeR&?DDr)>xL<*Lx19Mv4n2l(@b5B{_cPxS)L3Rp6f}SS z6xAArd$QFOu-L7wg1$zNX-!P=$VB(_DlpHFXUrBkELZu%Eo-9+Zd#7<*L?TAepp_S ziGf(N@#yuwj{TDfJdx1;^}>;K#l+&L7+ctW^F6>lJ^d}WBHDzm$-B=v6_iM2YjVUQ zer{8n7hPAt!h~ld6>Y{_z?O6R*7&m{FVj>8=}dqyAZqO-2>$GBC+a_u(1@UrMtcC28r9Me zBSe!-iP5IjlMUBlVVQ$n%6Z_WFBEGbr8_u;I(t+62xsLzU*GesX+p-2{^N3%y$U?h z-P~O7)oN}ij0w-=$P@F&C6se)%k>~*oa0wd=QW$`dIMxBE@>kBnsRgZX1!LzI_5*H z;lz|*aXt?%#|H}P=TJ!YIhr3bkxV`bc$ZKt_`S8-kH!ML1V&x;_Y|RU=}3pd!V=YGheg zck7LcBPTRRc91KsjL{4e@~_>(Vbl4<`vy>XA|7)Tiq`lClrLz}02ngv@E+oC`e3x++#7 zmZQYs9tqR#8@mwe-JLj}{!#z}nJI)rz8BsBqrIZfpT%Q{UnK)jsT)xAb{>~tFCIuF z%Yj<%HYt06q4oju#Hz*ks^@!K~!tT-Pi z))wH!iXs~gi;Gn$2b~_k8IqL{?6Qqy^m0FWMiU~L(ig0{}5*DfuFqi_ZMLxmY8`Y{**?=l1kF6MvPM0@G(U zft|!*_PGKUVZ|NX@nt_ERv2F*fWQWhwB4~3o`nWvt(q<}+v8`!MnpW$N(ThpM_Abt7?%Ix#E$Wk}=5G0)_;=dVt*u)b6C)EWdq zSpDnm{^PwYg_1Rmo{6y}=Uhq&MPUOekh@v3Z4fnZg)89gS+32ZO^Br8qY240Zo|)* z*jQ0nT8w2ROSP1WE2^w8bcWtI9cwzzaOyAm>ir~nmqqbqU* zSYWEy;xEaN!xf4Wf`>HtjIin&X%C2JS z&+>d&g}4&Mb?T!>B{6HpA&6o$({lX{(P8n^)Ov?M^QC++Esrq&ij~Hd0gjQy_JBZ7 zpSsH8H?Hc&(JdHLNd*cS95(WZAC_f}T-o0eJ=!L)qqOGWGm1iEkW{P49JdTH*YZ2u z-UTj7XJ@<7L(P)vEe(mN#PsL3bK|1hn45zh32huobSjcAj#l!L($g7hAW|Mg7GN^j zruGU;@%ERPC{Q|4NAUxphyrrL1S2uybxTx6-M97)62Bncqt8#O;e#MHa(h7q6+a?5 zvMsb@4NVSIjNA1ifHmf{01X&;>DqodF%JSzWhaMT)D9Au98XUMIXFjOEd+02mrza; z*x8X{O$HM&UEq%!1W{afs$J@U;zrch9s7jbx_2)f_@ok%w`{jIYPd)Y2OW1#br8|8 zMWX;lCc?hOlr5uZ2JR=I$+ApPtSKr~s341%57O`lTSl52Eo3G%?^lVy6?LwvX)u}7@cp z0xbI-HoCOvd*0^E>-M?yW3*aN1^a*V30T{)cQu~2j_{jq?(FQj!MRcY?jX90ety2U zrRV&P3II=ikqt0Wc|GebAh{T^DH#^a=sF$t{9N~1{BD4m8A1fHwj=h6@Yswa?+qkL z!PXEu=!+K<0dw(N)q!jdJ8%FHhx||B;cZIVCbd)PImey)gQ3#xGdBcIs_`()F0 z4c#A_seFCbPT4;10_ZGLEJhTHA}s^=PV8jDGIj1g`AJ{ogx$FXZ9;mRS)Wvi} zc~4Iu5EZv2Va|4vC>>?Rca$?oqM|Q87rQfd&?$;!N2Ki+U@U~H>_)?@O#6(F-C1h& zQSPP0iU{ycD7obnSxhF@;dJm4xSsuqZFv5! zrpG)>vwtT>{gCan5aJ0l9K}+jCQbT}N+>D464W-7)px&ra-M<99DAFef62d9*=;GZ z(J#~4bG%wC8tyy{gb$NuZgGF2^4`j&Y9}!#G-djzw$|<1b)dkWq*qg984eCG_PTV* znr_D5<*=(xM&HU77i@JMtJ>@-MiSd~(T{h>Xj$zG7JvTy869K=U@0iN0}9^JLj>CD zDonTSJzJiGT+5(c+Hw2=Z0rdOIP{SeDJLt{tG9j3jp%VszziMxX{mrn0;w9tOmq6)f7pNl zK`I+f#clfZ?zD$7N2en01)W=VHjR63MIhxPIgZ?JURc%9{_7{rmJ9#nBRm89Aj47R z_b2YwE2(;U<;s+ZmKTj^C1?=|lHnu-KnS4c*hH*iv`jjG3xwPKF$eN;%l zALrQe9Q}{PH6=B))d=#-GHU5oDZM;pgbUXl2d_%>Osnd5H{jHu_nnrKa;Aw_OP);N zjp(ebEDDZ!1vf6|1sJXj2@zL4yjk`(J#LOBfkHs=AYcu*Y10mD_?n!z>nm-T9f&m) z418{f75GUddpjKBw7eAzQ)%zhp0o#X=IaJ=&JlCUNLR$N8UgLk!RaC8fy~zx^)z-9 zpiDU7MnY7TJp^H4H94W*zKU#?p9M`&@J^s|`i3(n1GY9tu5%S>%VeRv%gMrgnM=lb z}SQSVa={9=nitqfJ`JZns zp4si=-&_FK0Uz+Sc@BW$n1BzeKSx%13jN_eR6-$g?33<<-1T1iVq{erzTAjD)R(G5 zx^RYMKi|v4D2ois?FGn;AUWIIJ8+4LL4@wCM>Jx?W%483y@eIV)HE!owr!VDr#QqJ zvTsbUbjmvPXp&8_t(f=ieZ-@x{h*uP!Y^kLWCxq7E%&v-YdSeWCK=u0nv+$ikovRx z6!^61u^hN`lxt%1D5frmP~FDiHFni~PbA^hsr&S9ctu~(qCSczfB1a@HbiF zuL|imGB4Ub{?YUP4}S78MY~YPwhi8$ScWo34$Hh(t#GZ+>&4?k7t%fm8b#+@{z%T| zP83Q1hpqE~%dvmM{*_Tg2-!2FVP?+~g{(@$N!$nCX;k*CdCNvJQ zoRG#=$Bv^;PH0`5^RNcWa`767d7RomZq=zL`HCrLHc*yY=(}AG);Mj1JQArSppS;t z!u}p~bwYk2m~@^prPU&j5rIXVkx@SvM0SsVL))JtqxM7So*0DNyzR-^Xr?a(1^pt( zXt#RXw)@DPhV0_>mFy3=u|@KU5|&6v|G$`Yj|_ytk;1`tEsY$dHJ;K)u9v=GIwcF- zV-a40NM>}b_$HxEh~*#C5Hg3Lv3R=qA&{>{NoPU033<48knUNr35I4f;p7}ODo*2k zb6ss2Sth4HeToFjBUA<#N16Qcl@>h5kb6FaLqWtrFNF1F`LVu&cwaQK(r1)Cnop3v zO@%8{Qvg-svb@-0uk&a$$uwpx9s$YgZi#Vi+osKxpVv!W0HUhw&59}EQ5~{>(w3%^ zyi(%exXs1c6980Q*pw;&fz}s5M>wAS<%n$83s7IZOMms?a0iFi0Now2m7_%~hg!wY zh2mM&VH{Kxd)C*O+*CubEBD95IRPW2f)Hgs&QId@C4wI60sZ#vFGw+o=hIIBBB|MP z`ja_7a@BaTl_RYgYkJD|tBi*_?3xwrdg4Kvw+W4HoqHzqD;fnllETPbP|0UqFtC&6 zC%1&or>|g9Tm(C@d2qA&Ma(U1*`Y&R#sc~f(F^BzLEexm72KxkfajJ>oZotBB-v|C;`nVE-PK$Q<41jmwy=q zBI++ID%JL-3{1PSul6#{;UHFMCBMZD@XVc)-FHA8tW=wo<#Dg?Ik#g%xKA7CzYU z21qqckHo#=Wv9PMyIJJtx&~X@*ywiAAK$vRLBFT<^>-$uJa6}rO=_s+GA%OmSF++m zb|}OeQb%9r-&ieNO2elKH%BTlNMf9WTJ$$661K*qEJ?_ze(`O8Z`&wzR#=6dNbIeKrQxR{Buq{}iBdRs(Ht;G?)h8%UcxzF^u)<1%7N9G? zh4db9@)IpRgP&njnOlsFO6xCvL+B~sJynHP91B!_n9_{RDz4aE&8pI$uS_&&^kZdW zM(KOgg(PuWRFgKQG$Ke|-lm>|i|BO+Nxw%Y9yPWA%7F4va zECHI6Dvs6eykez0$qP(*UB;$4#(dl)sle-^=X6GBUaRTv?7A|n`u>sRqv`vHZtm)+ z&M0W)ea=Y~Jg`Uj;kV;gHeBxlZrPIr$Zj{@C5#{++rnID`(h{Ay%%ZVGunFtOlQp zwPwiEZ`-Bu0^ArVm&I`f8uc^aBYmiKZ2OSjbORsF{(kK-r256IqnP)nY3qobY8u65 z19A(Qg*j&|IW0_TY3GRd7C7$xgchBTN*;4^ubA=Gsk85}1sQS6O1jW3aOkjf zN~5X}KVH&2AlKa`-@gmIo_^*WD2w13()0zh_;jPsNBtCUrAqbcb+;@ndg~`67fJlm zUBDBB-~!0GlDj0b+PkAnE+Ety_u}32_|2g5)vluYLgscXLm*V6E;HDF%g3~{UovUB zmsjfW)02ms&5!QCr#=|3jFqf4e_wojpftRZiKKT12oaVU3K#(hKsW_;058+)a`z|+ zh7QGzn9HPx&Lde7R03D8owY7*M(`rQ9V%-I6U4X_6KC1eGjyti8^YVF-Ru$^@U1(O zk_<60sg)?^`;b5Lm;Hn_F63TRkeHK&4S$`%5zWBr)x4YH@=Pq*>~igk=Jw0{HBm!p z(zL1H6x-BhI)RSX7v_&7M+=jKoK@hOJW`o@BbGA8)w8Kl%_#k^drN;F%y?y!l<~^L zHi?(eunfSYtP{2N587kSMbGcovBi( zrN5Uh`|;+WS&He{lV(IBFt=62KrumQsT}Go_Hk3_EJ}=r?;0qMZ4~W5B}(YKFS!t= zIsq|*{=9%;CVp%%Ei=X*SpyJG54uGq1KXbty{3jePXmJ9USMa}xl^Z&Rc)!!ViF%C zXkTA>b~z%H(t#P}!f+KMJPnPjp1B{94lpyl8a^*j&Riocr2ui&H~c-Z3)~Ta| z>K`*8xalqXltlx}dD2i997BSVZm@QkdbM|g355YpfsM#-&vK5**?!DR2F_tDtUTp$ zenYES_g+ul)fun13+X-$%(;nMIyf3u?@MW<$~5V>T`9bac98}iX_bt_!IFGT)Gvp7 zlre{xX*?_R2rEInAUI^EfGe=m29T2z*o8wLQQWk7^FXo>(f77mnd)SLW_wRx58dM! z^o4$I-n|E$P)KEFedy&x1>53Xc5VOo4jlIp3EjFD6(vJks#T#O2r&7bVe;49rN*8N zhmc`jFkNKWx>&W*9VH#tu2rjrZ;Qy1pVo04Sx%lsA+;{UvGXUN#}?A!z+R|Uh?i?p z+=&%}k}+|Z&_DB%20rG{Y2Pk>{D~Ui;r1u5LqPL&>DaMjM1@+>WHlCCd~LusfREs@ zf5V{>NdT%c8mN^a2$7A>234Dx;-tzmrq-EnG_fXjzpO>S>?nTLEREDJ;xF}&*mH@E zw1Td1b-^y{Rm2uEMs>THPd^LZW9&vNFe1<{X?C7xmK`yjpzWJ@#>JVh-4$u{2d|ZkNl5G<(Qa?OcTA7$MCl9{Bsp|K2Rmk^} zl7a2&RaE-_DjY2l*WFX7ZfJloEx^ZQbcbEBhE87JBMQEcrXfU2rw?%J)E~l&B&18n znNJBKDWst!Oi6;n{Df|Gg!LI+%^}iY8p5>q5fxhKu-hX$j|qG3PKDI#q{wl3Meczz zF$p+P_-um)`CjYIIubwl!|1GHhfl-4 z*K%fv+-`h;G9>dQFuEw*6OL7Z@6}8^e$*#Ar7lZ)IA95sx5VL3ht z)+Iiussf%J_o7>n_`Cs7RCy0Daj)l@Tc80X;fGGBEdb&A2BpsR-4|Ae^uMI{Ha1@vR{8E$M-1fMJR-@MSFI(Zg z&{R$j8aZl;kf88&YiSj>ivt)PMmG3oGK8&-EYsGrsnYkD&H(eUesP4piEsc6s}E0x$v~Od@i=R_quePx&WS{RH#30kE|ArC zLP6Xp5VoLgb9S@%#-qvJo|o|+SW!w`vaob_kAk36R0>EoX;3yHT>?fGN))zp?c0qX zJ2nhB&1`4OYJ$`9(z+@FimM&dWF$H;hH*X!hfoM$#_fC(Bs;)5gw%w)=ugfo6&Xn6 z#+`l1sUveJDFwPz=njOYe`?M96)y2;bK@t}S&G6#M@GS6^IaZ5-3J*w>qeFDXEmBn z!WvfbK?tLApYc(v=$cas2c|}pEz=}wh8!5fX;y_)&}=D#&b!q(q>-Vn&a5l5ziOR~ zZrHf7IJ=+;u^CfS!Fm!*p8jTkYB!x4IUQ-|az$#v*FTJX{4_*33-lVM5u%NX4zNnY z8fm$}1N0eQva)7>tCQF1d`P{pee4;XPlbhpc*3_&EtuiwN!;$xjv>(4(qwJ>{hNM@ zCJbvto*Mv5CgY4`nSF35VmqnKeU2`-Bf{BTosSt`#DknHzlS+x+^trfVlkU#GmGa73)S zac#=Y&6VK@@E*E4HnUC$uG$JJjiC&B!}WrlNxvER!T$S>C$x8!GjuZRe#kDHwY3;j zk5uhLmY=ch&*VlrIuUc8a0@M{*4KY@#@Wf>?w%gA_oV+C2{DBnk$DGds#axn8A}?1 zq)_JNhLnmba>8CfV;Z4LqV959QUkh|mSJ(D^G@NuFkt-n(-Ys8b0>7Oz^l%JYKg-! zwmR1T^k_qsgxO$C4(Ih?f1i)3S|kRxzsGMNQi#bRkN@s`M%rhZHZ}2@Fb4aI!|F!T zVH~lzjafsqj9OEsyl7NnyDRS~lmc)Ni0iROKZ-FbQPDXxokcwHMd&!Pj_SkXr*HXk z3ffrE^hJn?qTc6vesxUnH>OdI&-{=#ze&h+whXhh#ONlzQ}T1vUUk)Ph;IC0LtLJk zWn^`I;b|^IV(&y$qn4P_uUgU73>e#E#%#Nl2$Sotv9Uwh)Z#Zx5y&~ev*Ze675I|r z^@G4Jy7?PJh`8)%-~aBiff(faaCkAPOQDzdv*6zN*-?38KBjzahTzsuZ452Dfe^PK zmqkC;1E;6Nao=jB*~KFy>(Uop4s4P^ZIN||S}py(z<_i$!cs*^%YAT+We81P{pixL{%rI8lq5X9tni}j6yKQ|w4IGGU<6fDnGDkNyJ zVkbo}VRQaz90lZ!W`U&5FBL zzXvE;%v|rhjB`5iPH8n1Np>~{1qm47hi2t={$RrpnT0~*8bf!Pohr-N%H+DD-)j+;7NJNX;XLpI(;q*=3mt1N-Dv_=ANb;|_#4n)Kv` z7{q~VT;RJ1Hc{S#iw_6kscvRj1b;|v$8n#`Hjnnbl@>RP{@o|A7fbOiVODP$I*mQAW(kp5o6F-p~(CGzS zO#f@N3!~g(V(fMU-ASh_Ik|6=noo$vFrzP19upv87Gd;%bQC+Boz>zvX6nygzmBv` zVX!NtVpF zFnutCX2|auO+Q#;vomk^kA8~_6jEyUiaFiPWqG${hls5;I5sj-Ki)K}=ds1+fH1~x zN5&i~oJ(tM>O(sDHcKAuuw|XQR(P2o{o#8mh$|3CMTH!s&@#CL-j(6^Djs-~2Mjxf zr1yT(yteeiBv)?Pb^VaLoYC0o?zeS9!qhTYm0FE;(I4nSKqnrE+5-h_t=IM)L^u-H zAk^*wZKh`*Kh_L=*To(TS;RiWw;NXm1M?&B*ktd^p{wlDzg03Ni6tg2ZsS`9(?@&G z7UQ^|6uSWh*<7I`F`5W+9w(Y z?JW$4nffqfEB{a_h1E`%t?NOV1%AH1lW>9Rx$N8FoR9b5Q%XN=@}2!dXP;m6T7GK( z5(!&$=~=om!GwRM|ErHV?ZA1|k}^F>QQ@D&^z!9Bqd_Ze9jm|WFJ-K361}tA$sA-E zExHW*#TMp-OE)ZGIo2{ATVb-B#^}tK!r`g-59VD53aj7XVy(wE?FPnomLo$3zBx5p8|K=@7Z!&!-`BBD zMribew_W#*^Qt-if;|5iQKZTjy1FuW(AqO zNex+9;;Q*YT={__#T$>L`AUk$p^BLllqDd^Qp$aQh z-y%VMcEnJ#qmHdY!jG#%^p=q9L(dQGG=2ZqB>S&5$4A&0 zh&rF3b+vjyk8a&CjoO{#xe3t~z+}W%Z(=9CC-CqqDv^j+o@Yji=CQsNYpMU*5;Nj) zM&Dc`!22dtH7&m^Hty8P6p&i1yJ5?0|INt|oAsL|>NnQOsWRQOi_|x23TV=*v^4LO z${u12-)G^wGh!}HFNfy$Qz8xxW#8c@yo1au6ebxBIhT7_XJihG?U+wFd{&5N#XB+h z7UL~hv>}1owQGkLDv25RGDjy*X!zU{)5x^-sp8k?6yn&!v!Gw*vE)cN-IIVCdNHo(v_BD^=5s?+;y3j z$$`s!*56BdZDG_Mz42#^YeFdNto(#&nxFS=P(bECDU>j#V&lDzAn!7B z8meQ`JU4PN<1VFpQ~Cs6HSjLg`2BX)?bK;@mu=0|2VQKU?l;wPtZS`{V?JEFR=>-X zD;u8;>~gMC|0&z``}-TOGqatt_T9R$cjH&jv_3v8;)9oWVgB$%`AxH`#=l}}UCFMm zCA;Q5-d)oFRnDSUr*hw9&2kzsLW|>63{4_kfPO(L{?xIe8O zF%dJ`BZ@!Hi=^jOwLm1;INRPY+CvDrpyZAU_&~bm1 zm}ki=*I8;50D>c4zm{R#LL9#+^x9d(E@;`WaRZpaXvUA98`J8S4I8oAc`+-wN&d;1radgj2tc zw%QvqLeahpP1yHU4W;?A0yTUxcUlg1GGQEp9w=nN4_wS!_l1VVE-LXQMQtZ&cnaj+i+qBE07O)(L>+Y-$)X8Kjt$ zp_7!NU=)297nr`5!~|qR_|vbSq?w4=lRR(hc!vkyINgggK4xcVwy3S0wPll8Pm*3U z=VO~zXtcRd%ZDtPv(!wjl4AC$V(7Qt$0H+$Y;*YBN7AN6Z=STtH~Rid&&JM9CqP%6 zyRpS!&88=Bs!%OrZChKrIl}FkY*MC|718q+x8CFK6n<}V`PuCZ%9VH>XnbFTV}(SSl9 zSMTz6sb@82u(3ClP)=bkIsWU4>Jcdvc8f{PqTf3#sq5H{vu4ZmDj*cMm^R>A5>j`s z^7fUdb@4U|315&<`)BXc>muj_aBW60m;d2NG)0Ka9sg&-a-&*6o z2m}`&ii?~0A$jk{jV=1-;(bit*>>N3U<9V8Y~r*q_)tK=j1jwg+v#H*C>}jqw`Rv^jH$Y2PzAuxp1phP;!Pyz9H%&Jx^?`u z>Mt2!LuYhXUyq{`o<$z@YebG0fN^tLLNg=ntk_LA*9P3^XkckI4m+X=oiE+-tLgLn zh0~kx_=s%lg%38FX#}(zZ}U#k|GjKHeXA3xo6@U%tbj&GmKz|rFi$X-MEw)SU@qz? zR?k33$EFOmVs{Kgtn0f%GoqxsYi1RS40q29KXc2Co^bJjI@VGgI%vjiEomMLB~mz6 z@u@#FIfv^Tn8|WnvQ@gcNL&$dnsd6Q{&*X+q~R^%Zlgpxm(I76i9%UJH;4cwrD0ve z3^7oV(m}xopk;`qEXMHYXiW+c>3%9=2#Q>%>Xk^&8F6H$4ii`TaZkJ+3|f1qDriko z=TjXBx#K_R6(CB1Cel>ud|p-HEa$=Vf~h< zz^~>HrdT|vl=9H#(W}580pqoW-X_#@x24?-ZdeB$^{Z#tYV#Fe?Qz2>h>K#DJbv{_ zFY@}AsV-BE8*-McB)n8mglz(RoBH_&YlsJ8kUSEOtD13 z`-af%7tkO0>+<8(neZgtp^kQUi?(tOsZ9{VdODy}Y3ULChfd6xs}9pA6vJT!+OR?J{>T^)_w@bN#w}17+nu$(};XUPe}q4~dw1+Dlg$}=yE6~m8Twp- z$LtW3Z$cSaaFUA5u2_uMiIYK_HdP4yruPlp+|Oe3mMc?xlHd9~&v?VZ-kT%JRX&>z1&9h<|}wZ^p%oF=>&mKT;f8Cy@6igj~n*Av=swF>BN(jdMOt^&NYv3Wm(@4 zyV%&QulB&rQeq~#Y2ia;(OFH$X)wf(Ll#*MADQ6A8C&hH7%+iP9xC{pd6-ah?)r5t z7^!pS&W-wf-#qgcScgH*iJ7-|lMr4V!PgjZ@~@c4Ukg|XPgwxWWwD@7W%Qo;o*P7t zMIt3KqfXv@bY6gQaQnA*o5RgaKmC0Heg}=fZ%-@oDcRFJ!n1*HQ0Z1WaxQe7I9On7 zs?V90eXC97>IzChMe)_GV)`lJ7{=?ExcJ`8Y*V=|39IiEn>XPaZOr>x26PB+H{RIs zvkc`@y>e&DEEzN_)HK!~mJZ^e!a`!8VF*TXYp!U?N^QM^BWp5?Q3h;@(vwg!i9XZh zRTv@ZZl_-lZU}D}BP^sBgP57JdiD9j8-KqMc!P~9v_nON%4Eu-O@;34=lw0SGWSW; z<(Tkk|KkWNlON6MF#hiTbvx*u`sEzov144qRsBtoW5e~#8>;4uyI;wGVj(sEg!k?w zysMxFDQBSg)AbLY(Xr3N-Mae%4R?$Ysx-%g^KN>M_9vhEhnFP2^D<0onQF*Ji|=MU zbs+svX)`kv1>15_P<`*QAuv$h4@wOFJbTBj&%+A}y^KHk1c6Lv+&?_K^re?BUyjUZ z9d}}YXL3h=S7wDFEgdqZ11#FGuNn%a&pnZ@WSCXjoyt~ zFMgS}Ix{N^UaW6xd-EHc;ktT6xUmRe;z_GeP2v1yWKMiWV>djSBmgmbuLTLs9WN3@ zod6u2IZSY!80XTLAaDVg!S@YC>OhK=+XqLtN1uiO^C@_i;sbu_4Cx}PgTs#<)(~<* z1{3PlNlD@YZj)EZbQ37OnvSRIq*N7I1{69xTu@#jL^WTnd(=u-$CpUPi^p@3i_!VxJ!QNtr8&Q_NupbIbZw7VAW;t!x{ye~eG~mtmT9^n0hx|FkmTxuL-}rEYy& zBggr1@1BIt93Ju2`oRi~kPBs=BCO}aXjCIIg9~|-``11U4Vyw9EmTEN99vPC@VnHAdx%ZyDV&YC7hqYMeSGL;qYtNB?UIg;w|+@rGCWb+q*O4H7TvlfJbYi_LXUH& z;n1*Am4MCF;e}6dtNofsT(_C_J&FL;r(|{5ylvYzOY?>TlYmGcXRb`nW$b_BAi@|rsfaKfcCKp-ua*A2%qQdg$%;Eoxy9&-`Uhl6i00<*85v0T zL8{P?Ovt}^A};GvNd*P^Zii*}Jz+y|B4Xub^WGfr2|LJ zyGI*tYkBcYWVJR`$0>$=i?2?Vp|ZH-Ba9{u-I_lICNLSWQp+MIPK=i!XZ^A~0->n^ z37Zn|#M|U&!NAzvv%${WZ@o`f&hGu(zqwAc&YIY#_6YZI0jm~WuUNyKG(C5Vc=SUJYXhy4LuFU$80s!mGQOWQPc!Q3g;b7tGrruT+~o z`=e{NCsPfd?KlcYJg@(gI$7Q9?Cku~`lvL3lD3AQK)}-hO7bUWRcMxO?Lg!Z4lBhL z=h)R>vsI@+WJ~(M6~%MqTOzMj8iimgLj+=&lvKQra(i?ESedv@jToCBT75o_1Zp>kYAjQ1s z6}E=$9ga+_&Yn3BoLmzGhIF(Qq48eYOH2-Q`hS0^lKqIY&$w|r%&te|1wmp3f@}c-m0F<4hql;af=7r3SnMJbH@tq}s)x0ZqN$?;-u0l6;esd?n ztadRC^SE1yu3vtN*@$yjo9)97Sm|6K=X&;WAM!Pl+N^0i6JqIsy5F^H>hR^u7r*GB zc;RG%7e*evNdOYTxAQ5YGEJ=TjeAl=RW51A~mbY{$KMK9so` zaqi~}076NvGkf0PQ8?4EPy$MWj^NYf7m%rM$i2yE<1r(pS%xKWy4W8~@6qFg z`;`=zJO&ssK$;4#J~X@PCMIdFYg=eF=w$k#k+{^@bvOpr^z6qd7CPXzOGy5~6M+cH zaEg)%Kcc3C@=UKBkNw#R;Fw7dZ#EZSq@2P&u-QIJ`>QwzVkAZt}52oW3sIo|e zWcsO|!^Qzjq!zac&RFfHO|?di8dYYrAFe(Fo6kA(=U1}odnOF(N~hIOmQo&*P|5%# z%}c)sGT9zpW%b;cU^O4wuhguD!?C^v#S;_r0o=~~`HyIO2vK=c&1$f{#Am|m=gIWn z99(A1vP|E{zJo}YR8%UQ&1%(R4$r3DN1)YdHlPp+L;4HwHvJ}d`7VCi$t1ZfAiZ^q z9qm!_$!>>?#f&f8ZNU}me*6mMzUtk-Z|pit_ieQE6O#XD^a_Bx64E>G1nraCQpoyz z`s$T~vsuh=y>l2k&v|~ns^3>P3}jwfWO`v4BvTVu9rpLVX_<8!`Mi{tZeD;pbmY`K z*=C--a2%nOtGIo*IysA!|KORBB4UeQv0*gbS|Mzq?60+H3hrxb=!Y2{pGkB1uN%7z z)-4-z;CHPUCBT6cD1BaACtI?m(E<7wU|7&H1@?8W6X3JL+ z&Bfy~WW&83fJ3x8p3`kn0@n7n4LR&IjvNX*qc!k%tKOwG4#8+>gt-ygx=-GvU%g>O zswwmo%s@#j9vyO%!sSXnJJW1ggihVMlTeb#D8$s%-!t4s15VLRt5fVWmhiqZK0dy{ zYZlM}e5LVuy=6~{)6c9^Ge5tfg!L6C8ZWnAhn zv5gGh3SI0(Hm?pb!20#JiSB)E2a(Q;5u)=F)|$@5(?YC+O9qBUzsouJb2Lu;b$~~7 z4sa<(b*DwGW0A2m!bt2DnYkq3bIYK_e4?j=Z0GNI* z=;NE+kbXr?yx7CTZp=F2gR!YEicf|I&FaAr`mQ3%wBHgl2F;E#YSc|jlSM>TbqE26 z=F+M@J0`)^F5*3<8WhJB1>4C*tMkm_4#zX04`@v0R4AhTkhx9BvTaX&fJ`FX9Mw~@ zFAxK_@>gVcCV1+?7b8RuL_Ch){p6&jT`>;OhUpC*ZZgpWe6x1vadJxOUZ92%=NOVe z#cEm2(_6PXB&^hjC$O_UJtq{buhYyt&q9Sq`d*eufKXM>9VYgxfr4MwS$Vr(Ef0zU2>JTl7pO^vk+VOE>HqhVsE zwi5bvm5?M7EBzCX(TMP^D(a8y-L+FE#cEgG?|VysYQP>J%%Q1FV%3ZN8o#VTgM(aO zddS>95nke!X1KM9J6La504oJs5m6_KHl1x-FHyv$1y3A+y)a6UtbpMv%VC9!W)-QV zDUEAc3p67o2SRZIgV&#xm0I$l552_HJ4HAdHv?`q`<#cAS__7mKqPb%#2jG${H_A1 zW5Xt?7$ug50&}ryiH?4=5ff;V&iVzndm!#=-Dl-g3GI2MH+4QI>|q3;dC*D(v ze;jW*OHwby{tH9jM4Yf*Vxo3oXoQ>t1>>GDMMky})OR+VIx{YX7)41tm+4ZtY9P@z zA2PDTuZM^+<|YN5F>2J%KIr!3y?s4aaDrdHZnU%SQjQPzq1Uof=sKqakd#$*`ioM? z`N2__tc#;Z>$Gul*z(ghh{lXe|HFLT3}iuYb9wG?_P7J-r^Nk_D;hiGa_sa8)P~@D zAxlTt?vE?J6cbH|vYPNt=C+H&4Ash9TtH}QHNa<XVI&nMSn)4t#v_lHw-+D-!wVY2+JH(H%6ymB2ZFeyIacIqc+KE}2nPN)$tjX9XH}|E+FX&Gi z%q$K~PdOyG+H@cVZm&-6XkoZ;rG9gZuuJy-{k<*8a}`BeWDr)X;$p|INr#wAu)rKf zSgK@wGEF!AGrOv8?p`=SE8z{?N@Sk!vz|&_@8x!kU3HX-Ye*WwFt(liu2ib_ueCm4ie4<}W{X zUdGM>UF9W`tr@m;*S#LG8Q`DzKYm*W$NPt6AjjEZ?13FP?@QC1)M0FMZ0+Mg=R?b|Lh~tqB-Ka+Q$SmlrOnza> zC;>N%K*nH0Rj^v-Sg&^N6{Lz`45X5oH*te|pc&hh%}5gLqQ<#6AB2ny%ZbF zbe}M(tS!ykG@NLM)A;^QN0YD#vo(w@G8IW7U(N2(Z1gN<>A5T($`Pmc#tb@-?E2vF zaXkKHmsPNu&{bI8X3ops%A9LXxwu4}@^z?kjAjb&jT016jn$jah($2aX)YEv7CSp$ zFGGncS?zi{ep8`!2<$&`?4C7|QnX%&3Hq%U+722$x{f?@Yzr$jZ_ZW{f24EgDw61( z!(vsb+x8j-Md-tm85uzKv;jo2*SeL%d8lb3m*k8Z_r7<$<7ss#rze(!bM9ds!U+SZ zYqbcjxN4o^NBBP>!8MwR&+-{GXnC7j4BI@UwV0a7&_(j5q}Ew?3?Gi73ck3aarIBG6A1*DNMQ0d&GaVENYZ|BRkN&%C3wx_)a~0F zB3EJhg6+POSARRk#>NVBH0)$R_*H}GQ@=i)di&YaS{IO-uE_mIu{{I-Z~>TN+oY!;es2@b&{ZZ3v*)Gw)o+&)jJU?Edv*@P6ku;COk6ooE59ZdG!tS z&8Xi<2+kEqbh`+nYtGX%nwztW3Jd4JWKrx)0=ZA(cFym{fA(~nF{2vAiMwCW+bJl|n=h3#2<+Ar#4K#VM2t-V?&@sl!XR>S z_(GCccieBFHW9f^aGyICaxR9+iDucy?Y?6GMJ@NKQTsX#8jRUDd0e+VbRdAbknO8p zx$`j^mFECR@*pYo+WXVMeNHP-u<61kBW%aBPsYb}ny`jw{*3fw49#(oYg-0Bl5wKF z=IKc$``)C#*iUkOP}mP=SXb-+<8>=>4uWQ*bK^zCSc+;LF{2vDoLBLV%}Mn;MPYUi$6el1pDS$ih^w+ouOm(}V4jj1Yi3 ze)U{Re%%?_%W&{JXg~D~RMN2UhYJ{|j+=Frl`b0q7(lIj>8o50f^X?Ibl5QQ{*!4n zq2KzKP~#db+uNM?PpRW&qupmkZEkCp-ja$N8((<@kQL@a=cI0cGb1(4vOa({+|v)c z$BGCW6K#28#*@^3zkhxiCGC2#R3lYkK}$vi610ykK&qlGsUNIW5Cb12zO?+XbVknNf!?fBCVw;6_$9)qgT z=$26x;-HBQr%dXD)*}m*hiL()SZAaeMU0l%WAT(*67Si*0|Z7HK3tr6g26F) zw9IcGb4pj8BS7_oaHlvmv6OTeyHQPHkF7+6p-Z|al6zvO=#KaSbaV}%>~cnN&)7vat2Se1?48EpuveGd7 zE}^^8-tjY5qBsG-T|+fBf&EG^f-cZW_U^sK>So&YqGknyW1Aq!4MzmiG} zHyAO>rS_HS!7w9z;6_up?xSn-`oXcbR5dep?Hcpw=D>9`b_?2$ES${r9PMPk2JD+p z=sRRzkzd=z$*eRAkO8a@5fsy_{A10Nn0S$c+wZHZ-KQ;u26%CfHM&$Ns;9^_;1_u4 zbJ#v28Aiyq`1|KJ;xgmnB2zs5;&xjuMdc(-E7-9zv<=Kfv;I^?0ag_YEGcE6NaDEa zDffuflW-&!X{3AS#tl83aO6nT(ZhD;Z+|Q^Eq#0UVBr0hwX%$8qYDWkJ#kb1E^6WD zurbZa>|_iI`>GYyFsnyyGYlc@7Y^JiX2zkY2(RtvR!vxLFbSl&s7zxp1cXc6!=08D z-X5&@hNIv$AODOpw%CODD@$Bh^}wjpV} zB-Xoj?PADgYnx!zPcD!cA>@0}-z6{mURzYVoEik;0|ieZ_-4)#KC_t8h>d&QCv{Fj zOIE>3;qJk}!~xOy^Kn=HZ}-1%rzNN1WbLE+5&;Jgqnu+bqP0!yNfcdkjpPJ34)K~` zKb^1fYWJm}8_0C+3RHf;stYSD4!SaR^_n#UtgLE@6AD`-BLOzb6xs;VySSa~F)t`w zW-jPPn@}XN7BskxY6`82i*PV8?Q82kRk|OHJu>VubL6RmHZa@7H5#A-f6C=JGFWX41EiDzG}YjrYFkIx zp0DAPt3@6pE<%bz1y{S5YPy}g@BdzglF_0C758G2I(IiX%4f0T>;%$Ecf)L#Lhl@+ zhN7XNA-*ltujlx=Amj!>ePUKdX0U?F}(Ag zw_CM@5nPeIXi$cw)Rg=S zi|<-0ljPf{ufVyH@PQ#g04+XT+Mgj`7d0+7@IsBi)vsgs9XOy4i-HulG08F0k5}H8 zU%oxwB8sllkasP(NUq@Viw_?*fa;nN!02v29WhDEXSg1d=nSw))8q{#Yh~TOJItn4 z!zxY)?qLTG_y-5q#XBOPX}QS+@7zZ$qa@K3Cp8?XGZNV2V!`O>m!{7`4;Pp zGWS4C=tO8E@1ENLJNfiL2Hay}y@Cvp+C)u((9wJ0yDGF{#Lpu)cjC*{vRm61Y!0}u ztqLBuPq(G3!L-#%3WCEml^Zs~=BEPAm7FQ)Wh+V@!C7#=21R?LNZNbPIZqWIYOo_M z#4T9$vTk}cBO{#$%(QZucM$w%k>e{B2E)P3SJwHU{Z**Dkd+5;YTO?G0(hwtgS1I` ze4Y=;5MBa{O1y+*W+c^jCD={m?WX2_1|pTPomcxvMHK=QzUU_CzuzSgr{|T;^2D4q z{RMP0?N*pe`p=@blm6n)tD0pols}^##YG6OI@vn7b|kH+&Ka-`9?Fe9{nnCet_9W(XLd)XNjDC z!*iL3pnBP^MeA@F@K91(SU?^B@P-k=o6<)2t@3AQHj0BuGMPR+sML8#Jd8RW(L5n;1g$12B8lZ-GOf#LCGv?|xcXL< z%p8hb;UL0>pBPF%efw6E4yA}hYOmS9rxj5?Tz#Fn9E!Y#=~tl;zO^O1-?c_-dC;2+dGZ=^=nEajNXn8rHO6ag!QkDS(nC zgQrz@M9Ul#60&MV;Y?tLhPp6KH~eATu6}*J&i|FMv1>VZ)3-W#t&wcBt|~dLj z&%Pu`;v|^PYR8q_DlQOUm?}T<%Kyy;|AazwEedQY&#?McWWf|diqe8W9>)$3(gegkyFdt9Qea|G40pq{YCI{aB(BnSg2L;knAaz_Yrin3o@ zN5>EFmibY~(!HSSz=7}u#o%FAYSSiS$%bH_+MqMx2ui!h`7LBTsao1N|OQl9Ee zAmu(3ml_HOPSYEEO=?0oaknk|9sbnXIb3fbw^NL~=Diw8L1Gj4n6inqODI&b6v~r+ zzDl@^1nS8sa4{``YXa>*G~ElvFz$N(9OTH)0s)q@=IxgY8ue%0uQsg5;0qQ=D4lKK z2ch{PB)=fxZV=v}b?!fS(6U=M9fj#a{edW)UPf~1Wv@nYv~I;O4*wP=CT^Ip6Z@%s z`&xk=B1t65>y_74@`=0248(|>Cn|Zqq2-m#%uHW?8v7kMGBvo*+Pbprd#C?<+mb-B zCeiLxd@yirK=+qkuIjN*IxFDHw49`mPG2>fDRO6GaOXWLU@;`)6=QVc)DeaEFrQuz?h~EhmP3NCLHNq zwt`$$^IkX=UVyqNKD5v9kx|{J$W;8WG?Axqoedj4L-9nLQu{wE&;JFD2~kLk=@gf7 zB>_1%JU-$vmtK+7SvvGhWPV7ROif+#C3B>; zKS&G^wz#l=`}Orn7`*?+XH}6w+q&W$=p=lKP}>+?0n?`vVmj3$e|rUz#(EC7mgwAJ z-El-|06?ftzp{ruWIlQjNy+%Kt0>M*^)_Eg7>`v7eUJt{W;NN--OcbnkG(m3Xz=`1 zKzF1Wpk9gTULb@1n0fG-&v@`Skd*D0i38r(b&>hNbAA*~SA6&hDQ||f-juyTMJwg} z^_SMW3}R4Ih@1k-z;16LTgEmUbX0~&K*+43i0LKPrDOiHSkEtDvZWC4Wz#_)$a_d+ zx{V@nNQ8~_^*Fu$bFU{`4jEEQvul%H?m`Z4mpvF{rMYLJHh4#8%*91M={tloXNX$D zXq9&}xF!B4(9ZC9`-X8fwP4{x?Xv8~`kbWMx?sqPcqT~?`QgKHa&3`unEujgZ+6n- zi=HB9E5y-%M!fv9JPSuIfjw7F=LD4vf9JNSJA;Lbi!bu@Z=*W;PXO@OXP?UYKp(&| zKy+{IY(mdTG9Q)*$-=dp{0G;bt_SriDuT2zy(QM_$aEA(5>H0`Ec@8mYfyX%8cE|! zQQ-e^UisU3tXZ9FMjK-wXDhREG(x}a_2k0+IAR3IEq`Ujv)Z4_`AMw{4ZVH#0f^2m z2l`I}^%ld4k0IsyP~C7>m;%qS&mzE^p{=7=WAyHCSbH2eXJA#!h+3x0y^`A;8T`;F zLs8&C;=d(JN_C$xkffBrN=hz+o{^}r$<%jwwP_Q;E70m0|rpt)pAfi<>il{&^@jY6@}>zOM}NGZ!3XVm4Tm z%!!ckND92X<+4yD#~EBcgM;01;(xLPQD2#uIK?@Z z0^iVqG|V2X8x+bSpM%DLgAU}=d^LyM>)oSEpuJ9FQ3y$vW=K^mRK~U8``ZGLv$3khP~F4x04##Sn+nP`S{<5Gqb>ptU-^I1!_7 zkbQIl`r$knxUC0s6vf$?EMM2+f6K817b$;^0WmCx!WA@wHYn?xHnJbcrLCQvd4Nn_ zR=px!1)pP-GKmdDzV(nG+&v|BW6dX_4PK9k)LPm|E^G0)o{JJN_~DnYUoVmU`*{Ei zODhs;qdG+CQ#pNAiy`YMyI*Pvi1XTphoYmRL=G!)qgU_RR|y!M7rq+K^9ZI-9W4CHo)>!U^&Z2(T<$k`^%~;nmy7{8xIq zx{{TJhKH|!3m8U|r09r;YDq2`0clS}9nxvS9M9Pc2$go;rJb2oD09!(cUq>JbH9h8 z2umd`6B83+{MrUhwYAlR(0mxiRwZ@y47bA5{{i+^fv0i=hGZ>Rv-)ii+(&FP@pSmHKG2Yfxfl7SSM z`}O}6%Bv?j!A6xP0EbE|ocVLs=OK_3vIjJL;a*b)(mlU1kq z7*hy13M7qt`t;0MFK%a4z{IdWT!4Tzql3e`liy4sYlu(S0PI+cT$;ZDYQu+2{`?v0 zTmz_ZGcL$8eTs){d_oN_a+<4a>gUfn%}byE{#O6%;LaZcz2|!WTwAiQ!PiS^Cry4| z_+@>l<-0%*Xb$j!0`t&96+VmbW5~?obsDGi0m%}^Arc4yY;=lmBL?LV5>IPUh&nm# zt(rb)P&fLtex%9*B*CNRq?eJ+hVN!@j0zF};=y3hp{nht3XMci5*49CNG?fa%6o_f zIWkYCylBzz)z(#YVPs{vZbP+J08OYBuBh;vac1-K`z4q9?p<}hrW`ux38PgRPNQ{u zZO?CbY6tJuIWHrGy99%%IAOCortgcmiD{odV+6PO{rdj#34mSIIUeOG)O(_Iw3uZjRwdUl-H6=Dc`)`JzlhI zHG!4X^x9J%D|HL{H74D8#QwMk@KEZ z4?!dT#d}F#vPhmimMqCYu!E$n>o+?^*?8h)g~q(s854?_9k;a$g2Ce*z+JPDBt-1r zuiv$+Eqa$20&qMz)?4^b7K1DK9JGkCi72EC7egR)U$n>pI*1F-1M}OJU)ZoackUeL zIJmyo#EE{B4fX1&g?K6_xIMDx031!v!N9mi*$?+UK!+q6zx_tN-1ozhJVb0VY|+9L z&V()QGp*kU5xYtiE4~;sg+mTbo_56l5LiI`j?0*0HpPOsnM0*cvP)g&an4;)_IUj@ z{p;3h-ez>*9Epy9CtlPaePA8%&e3ic>7|B&(YE{A5m%DqZf>n!_HI+Jb0B2maL8I2x+$QpK{!Q7G(=qWw&k~O;}cA8U73S04jL3I zzS*>nj;D^x2x07p_~9EqzU3EYjI3Y)IqtY5_Xydnco#hL;Jc3|oz8-Rl@;&g#P@oz z<169GkW|XwjdSGlr%$&rWjoqULs3ct|9z&XIMaDhBT0lewFYgMK!B3HbI*olpXT#Z z4~~m`8kfZ!iC8w){uw8Z9I>Xelo{jDF=6^$3vfmCHkHeMVWSS|)WejeC!o?5!Y(sn zHJ|&kYxnLf%rOZdY*%}C1_QlIVwoJ^1nO6~3m}s;&%@AW|K%wnQhoC8Lw!&OheK}N zTUyKAn|>)S-U38ie?<8`y9HJ7IHXo??s$^6C7~(Mg?T<(3EELYW%;uSOlJv&Qf^kQ zTA}QJgI>M9FP{f)C$Ue$3W>cvQOUU5Lc*6$-MWRBzwE)pw>_8$N6wOWKe0YEA=y&B zT9iOoDx{k|18cDZQ^2h52s;t7%%t+4P%O~yZsGIt7|o3i@b&MjnSi{RJRL2Je!974 zcKWjme3IR92CJ;DE>R0LKXV=@_`hP^nv1E6#Ho9S41i5FZmbCm+)55X(|vbT5=Und zSCG$v`_g%|;&UjJGDY=rzd^d)4Pq}x9K+Pi|1GupfDH+g&Q^#WbL9E* zwb8tnJe(rXN_y*f!y5m&IHeV!{xF{OrnIy)8j|~9dHub3qR_?(@p9uxrZ%o$zrOW! z7T}Wn*b~i`=8`@+LTa$#ejW{Jt`uAg`}flDjyRpkkSR(p^Gz?`y*q>+4I5R9|6N*R zs+I(L{u29XkSz}V6|r@q4FbAMq@f`Oqc8ru4L%=}a+I6)`}c26EiF5X6DbdwFU6iB zvSpGo@-`XmuIKS7fCiB~^#|(_(J%aWZT0#HCL~c1>1pdLNLi>V?Z~~fMwV~d7H#4B zjfbqqDR`Gy6v88ndrXJ~b8sMJyeMXBe-sJSbQK!mo9ca@)UruOe1 zpLT%Lu$cyAG>rfGOhf<6_0Ik^66|S3M42wHpdL}F>?#Vn(njzX=XM_-CDtizB9&6F z-d<436A-hdFx9eG?+(h-sDXcC)5|>+cd{(WlDahIaNQ{uG8hzA{1Z4YAEMXM-YIng zu)#h~^HS5Lpv)36Q%XLZHgrl&R;yYbWBb0R3Mit(arx1s@nBuOy3b-{ZxL>?8CS0w zUAkCF>AOH5Xi8o_IXysbJY@hCHEnB3olz#GKaT~}?>KADpN-Uz`ti?!0+pNCV&Hfh zf-S%zRM0k~6fVl5Azv;(CVS;bG&rR;Lxg$S7!3^J(R`p?}ygzc~c zi+`TmGP*47=^Sy+T}|kvD5LgQ{l}v1p9>2EM`D>~5fVEN?d|pRBsl(YI(`41PB#;& z7rryDnHa|ZcR+Ctj%zWHUDVb6<&dUK=Oi89Odqjwd1zK%_VV^FozS$Dyvh}<0m{QZ zdIS@2lP;LrnOHuoOYicl+|!hDQkFUnmH*$R-nS1S?KllXF2I#N50q`0JqxWVC!L(z zX!wFS`v#yMQ@x&UAg`K1#v@aaYM@?f>c9Y=%PobeKl2zYOn7jt+F;Ml={dhK0J&B{JeS$vBHlK1?1 zIJHpUoyV{}kRu67%=W+MRZu>+`SWzoG+>Q1>c8ydfl+&B^}YwuitmJICgaT1@@H|E zv)qFAN+zGHQ@i$O(~@~N`yB+un1MI?5=vvOAyvvhwB{=k8MGX)z9(~}{y@mg?1~0s z$Bp~v`%lGZ+44W5O-;}5-(b0m;1>0 zti`aDz()(IJP|(Uu6AbgW!M;OS9?^5T-aQlc4Kb69ujEwZ=k9P{3LnD@cI`{ojiG& zt`VtAcU7uVFjG*{i8+ipDs2IShMDe zdnqeTEa3T%)9>t`Y4T=O(aDuXC;7ANFR!Lg3?%NcDuQtgJS27rtNqqFKKKQNA&mq$y$I=#e8q`}Xald~#vx8_J&t4=}|-rgPuLd$QPK;v@^%##hq(T?4|S- z_@Jn~c5=NP?ne}vO71rQP}(=HP4JQhw4q2fW&qle$*tz@Mj+UYB$qb^PVF83XW{xR+lLNRV&Tr& zsZo-Ru~@N**%TNUNX@){0~xB)H;X*(Ae%ID(%BV;R-vX_E?)lNHsIHQC0A6Jj?3#QbAc_paaANXFdJ{)~CDl>-u4OaD;p1S$v;qlSKtUti5XZ^w z*wSunxTcQbG?p7A`b|7`XOHVtvUjTQY^wS+<+ADI$>F&kubPrHU7ml90}`svQP7=E ze&nb%^a)ZnP-ZOz3n(23X`+wLv53s1ut=p4ga!gEeVv8NiAjK}Af`G(W7^Em5 zioF0zv13CKG>OIv3|K(KCz4pXd8dVqoT9 z-glR^)?V+UM_tKuCzF$DhbOQS2T-7yoFQUGv;SvPvgR|CKy9_{;Jv&&CzN4X9D$|H z4EX6S13$&6l8)QdVfgdU?i!DO0bZnsWqiHubLT`l>8xE>pf9Dgl%*>{c z+%uVCpQo4dDq~b1HGNeMOr9DF+E1*CYNJq0qQK)vQs~#|o*elN#NrE5_&N)Q? zGJLXhI(I(r70*hPj`*9FIsxtNZThBf9#j87{q8bHx|dA5A5KCQ2@(=Zp�B=GqIX zJUPG9KXDitUmx;Ke{lUcKBi^e8hoP*-eMg#Xbd);t1&gwQgY^SyZ+99aNW&p>I29t zufZ^w2;7>6G#IT1C@YA*e_K-V9Vx3yn=`3;?L)-%yc~9MHR2LT(x-_iy z&f9sX9uVQ7`m?sEccAyIJHkz0zrFxpF2^pmGngF!$<$4WNH+s*rodPAE7aT-+EvZb zPgz7GEhXtY69&m4(&EQYtI~%CJCcs51Yk&|kTR9N7XD7kFlVry>L|FM5;`1JBp>TW zJR#~Wv$4^?V4^*;p;CLZhF8z=2L~vwcW&`vh(d}P$cy^-_4q8pNbJ_K90D#}x-^_g zg*~0+H(z}j3>=%aIc2f?D3~-36Gw zoT;s;l6plE!wKuyP<8sLnXC-kKf%=8fgKhuSP-aSaCmAvgw0H4H}-$8yr2f{=w!W6 zE-US50FE)~xm9P(Uj!+?urjuf8(oziJ5Qm8wt9B^NCX2#y>mC-*Q+A^VWpSx?Ex)=uI7JXbb`y9euY z&-$$3FupkAq$(0vwl2I<9W<+Jdmo3PxRT04^%#GBXtEemlA8YbL`Y}XHguu)CG{vRou2b0;LM67X8Mo59j)D9e&zVx9W{GfGH*M zC{>)z;KjVpr%MUp8yO?(sg!z(MIYT<%d!m*F5ssr%ZpmQudLi*Hcyi`9b;kp4;Em% znLQ@`447P$LYIRz=be82m2FFw@c9;{d`dfg*G(irg_42aag0|qvk|S0wR z-=bq=l=om5f6X7;aOkb&D^~1$hamQl-q_eSm@EY30*28=YAH|oR3%}c#A2Q6;$ija z<-`zM_7ZX9sOD9X{KpvnFBC(c`t=eY!ao9#Xekrjv-|tFNgRHlE-WoQ@Pk>UIomdq zOgf`JBk-7RAX28h?xB2^pBN{{_x{@AGmTB+)PDv~PE{n>{3zIw^qgg`AiHixKQ;or zw&;BfF>|1?NcZwYsu+sT#Wh?m>O!;geOv%y#EXSq2X&2<{{ogp%xuVDT$TCR1eON{ z_i&X1mHeE7vy-9CWgWHA!qkYp#KTk}yJHNd=+g z*HxHA?C8~B?f7jfMhE#hF^OxZBsz_97lVL`3NKlR8t>HT~G>%F^ZwvvUkOSf* z`9)>|RBr7vb&9HGhYs;fUz)ya)6%8>7tZTPAa|2`lkT9GQqT`Lq;l$rc6E9E)5lW5 zUEpN4XO!=z&XBv-3^3b2*4&&xa)8%ivmM(h21e>)DJp9YOKgAX^mzX62t+9Cu+o)r%D4{B18Sg<}8cJwrEa+7x`|jE`%w zZQu0k^Xq@#e3e6#Roma!u8rjuRFU8C3Tl#wt+M`N2{=ChOFEs*o zQzY-FCsRe@VBto-@z37f*!wT*-b&YQn5+|Wg+fWy<6dy~3~rqpQMW3RU&xD*jSs+Fa3X=S#DDh5lci<@ z0L4Qw<=}!9zh?duV&rrE&J9Mk>z2#|kR@iKTT@f9rtyBwTa#?91U4nkD3pxOje#kn zW&N(P_Gr8`zhr6#}QwQabpYKiD7N(v?!A>}E@C()-@w_%Yj+dh2H!$ub zKQu`WElFg&>+b<{@YS#W{$Y98e~h&F8OAcx4$ncXMPQzH|EtdzqW*~_DsP`Yk(B`J zvGdCMXd%`rvsrUR zjAICped@lGMDc8OAxq&bmc${`ceBA$uV3#qQvdU1-mGS(NPv!&z)M%XKG8E8Ka4pr z!LaV$CLFto^O67)P%akoC`ka*y-ehLmwDE{Pxs|G&Hl;^e4B+|__alsXso8yL?F(E zIIK_$3a7n$rK;#P84>wq6bevl-|F)1l;RN-yr3bmf=^61wqLkE0(vYZ)6t})8!_8e zx|9?6OYX!-$ZQv<+X7Wcl?oL$_TVHVDIAd5FkpB!bJEhHGH(q(1>~R`zhumsMo$uj zy+98-HZdiiKR98hHfew{vmt+m~Ih;e8>ac{&VjR;(h-NRs`GF z&)DYdw$RX6zR?4iNDm5~8cuuMxLi;Fi42>1**518(nGW+!D{h<%RoB~DeJPhG=D4w zR@#!NFxJhI%y+zvxsw7+a?l+nGNY=JpvV`u57U_iB&~)4%L3R+FwNncggTmY_r$HT z-(srZQ}qT6a72}RlOHdl-)1Z#PI#d3+A)Yygp|ZI`p~hSo}Q-)i6Pmn`NfY@e(W{= zusLQQw}^@^ZXx>HEpSheM&77@>-ydDDt-l?yRClwaeDqsj^e&j z|9*8*AiOym=`5b@*gwrbi->r4N|an0Uy%O2z`4}q(xQ7~bG=9_v!Lj*x5jw_pR`vy-=P-Gf(aE)8*X3K zp<~Cb0RexK*WA&a;AoKam#tDilubPacJJNdMu4?7Z+QmURxV<)~>x(F=$Jnf6kIFUkydiXkQ4yEiOjl~!-#Ew zOs>D~pbF-3FhfSK&ZxQ@IK32fK+@=UdXd_)W9&yV{x}wSv$YQ!Z}zDG-Q$lhd26I} zEVxjiCR^j9t-1*5M{b zcNXC?NolxTfSA_ULz(Yu0_DVqdl4^N+23zwituF{((AN%;qQYyAd> z!3`;@%ule1#$o`@d_&-^K!tMZ%GbZc&cuD*ljG<`Px~IFN|MEA^_wZu5vPii+p_4}E#s;;M_m<@3WP}iI z`TpzI-=Y$Ko87@*MsC_Mq!&|3COsgh4K+*mdHDNJ1{f!2)&RMPkfmq34@`+DK~xU> zR8Ri~2neCq`v<@NAlZd62@+Tdhv^v_H}7xWSiC*x2gl@gYwvIon?(+8g%61k5JX=nwkw zXdz)<1YMG5X%b?v@dY_gxn(3B_0y-7DQ^+gs9rp|&1btD)a_nr0?f3mX^MD_dA>;Q-ZW41}kI zLfEwBw`QN3z?9Dchao0I!%tBVMUk0GSi22{q_Zb^HmX#F6cGJiMg6=a@AmE4R9h1c zPg_lz-`+!P!MkM^qo){rZ`G1;%W!PG{!29y`cCTam{m)6>ns{)Zf?Hi$*CTFa=y@* zvg~1#R=p<;vM+8;uhQrAt2J&zC@t8cDL*YT*@6m=n4flRDuokO!;`>j4lJFMZ^nX! zE7V-^f?5|y+Cn8pAHCN!NPRROv>nm-<~wf$A9!3?D6l=AMA{`>e3d4m{ysKZBx+v8C3ClMQ%&^6?Ref;<%URA!ndzahDm{&&WI3BHi zHE=*?E2U_7uv7_;$cwxS^~Up%gWjW2T$wtaBf>}D@x8rb(Ufa}GFp9;cWhmSwlCG; zTva-U&a8@G4ku&XWEsUrpn8UR_?HLDfzZ1QPlRmfXBBFo6u}$MvjKe( zxf&&2OEg5F3q6dCoVbs%>_ftyHu}iSr+?(BsCbCVJ*bg|c-ykYk+Rl-n-CPA_;)p{ z#g0W8w5qM>tXXk7pdn&|3L3%^c?|r+CKM1l7XED8<#QbG1k<6T)FU{6la!~JF`Dd1 z)wimubRhMjK=k0@2s;B<7||kzM9~`yXt%9@5hl=IR^z-OFrZYdFjsMK(%vsl77Im< zm63GHPMwBuD`+*Oeix*eEGbPiq7tE+s(BD@s*-`l+j~tMFz$-)Bp!k@^DfFj0;?Nk zwLk_fJ9a!kSt05{5Ku`dJizkv*PqyKM3^<__F<9CMO9RAX{j(P1O$7SWM^chUe~PN zQwjarFwA`R@ zqp5@zu!t(gop~<+p2Lax(|vGg5BZ@1PkXR_A$2Y0ZngkTLBiV6$^v0p(yHI|`|Jyc zvn$_w6<@h>r6hXystk$|PI~BvdMlOi2LkShA?;WcP$pM9wQCoPF~x9-yc|kiDrG}9 z^>~Qe(bOl|^cq$_o$}cMYdSksr6pemtH}Y-(ZpHx)^StDP-au#lwA2RSA7t{5jCEY zyo6|QJBhtB{IqGs-9_%V5APwWI*_qFi_cNINCJ|tW#w&?`e0kn7%mb$Psx!(-*E&= z7)OpBs_Tp>2%+>{IM*M zd#F zO-ek{)|+?l9KoYCjdLa=`$Ey{uyzjX+4t#pe&8MweI;65ym;~Jd}2u}mKW!Le1Nv&skG+L}L&kLyBf5@@{1#oSOe|4NA|) zXaxJar0s_tqzpH>*XAC-hiLch>LVptF%eYPCi6cs8BtsLwi7iruk~$8ZX1>Y8{KsN z+Zx^;;E$Hnq#XQtb6~;Bh;cyOfrZtRNtobJ2l)v)p?9m_(Xw8;bSY%+l0FlY;?LH6 z@SqcYf8@z>)xBEhG7yg^Pb;}p^*hbtwF_3PsPd{}ZC?$vvJE9c^qO>Hw7i}N1@G=v zRI+-Ac=j}Q_B5QGjHk>!>pOhJh-Xrhw+ecnoLqZfT3FHd(I88;UCtP)kPiMGu)ZHJ zqm=CMO7n>h4#5m;9)lzl42f5rg-mtf^MsDw?zeyW6SoOU^k((BlSIi|^bgWa!=GEs zyu9iCm9AFHUmFYU$@e(lo<(C2J2^RtA}W$@D%xWs;dl1z{iq#9_xbW{l?}j6+bxA^ zbqp$ClldzV@$m~w*7``@1?{TLVCsk^^|D!ddk*l1tKaqYmOf5Lhe6c0eV2z=bjwel zpR7G&6#Z?+<4YZQW7fxO|N6F%bL?*99I`QJX-28whe4|%ty*oaq^%&*_f!fTLK0|6Kc$6~uxL zmVVLv&b{<1W}D)fz1|N90lYtSZIjFCL?(bXmqF4O5S{mt^*6WaHxxuuozFnuA#Sb* z4Ku^jAHW@jNO#ez<%x+vjn3Hg=DUiQOQLaQ2Yj&J(YMRrYf7bki#v2S^kIiXXWoTU zeMU0_y&qQq)J3Y8Uj6zBCYQ+w+%d+euJ7pg3zWzz=NA%9z^u~o||vjP}&Dj8MoaF%DuC9)^0Li7>T*RWVFB3I@e5-aI`0Z%GPDJ6dd@i0#v- z`>o3Zw{}Y=|J5#=!WNLzZb@xDU>qp@gW-ThZY!SwPJp07$krH*zva57|Mey5tjo7U zR)@*Bl@E^FQEQq3^ULe~{EIIVnMAd}@LvR+1jZ7^tNaaXXr|?qQ6P~tEOWT7OeFAf z^QCxXqF99wI%H*=#(LOPx9B(QG`UXYmsvXGc}342FZ#sKry{kgICE~=d2WB9A~WcT z+`S!g|QZSHH@mp$G*}v?6o#_X$T;*1b=kL(Mj}{y2H=8DI99HBNASTRERt z(Pb(X46{^m(DI@-Z~(H9tN{)vVMeAgG~erEa(GsTwlX~+g+mxPjDQ*wk`AxzK+;%u zaA(mYVHyA7X@r0Op2n{g9|~$~qCq;QLN^H_ftX{q_YYOnb9O@AJJj@2796>a*j9 z)$6UvHB%mU)zT_j{p~*e=Iz^i=zWEO$4r4sGvC&4a*bsR8C4ujQ8Nx8Fmd|dUr{0q zMK~0QnLe1cmJ%d1hX)yp{`?v3o)8E0!JuPZ8SE^P-XVF(fhj9W4Kq)$9efV1+J2TT zKv8hRAz;tS_iu!bg(Vf42+zirFeNx!M$&1MQQ>g(=ube;&{ot+8NJW4m7p{N|M1dVf}aTYe|9RwjA&6Q zr8Sx1{EZofJ@95y@=9O=agwEG|Jiz4F0%jeqvid0jb2BLpq^OR1JH(1oy>3-nSAon5IV-$!rV;-lRlu`c z-tD=X3BVv*fBp6EZdc}_(qq9+&p%`zePia-@mNvr6JDJVqTaH-$2c1l8#2;V4)}w| z%1{c84^MW^>+cicM*1d3X{&E|cr99V_0?(0iV~)nb19az?wwRGK)j$1pFLyFoCDPU zanz(tQ{|F9;UG4DeOWYEZplE4A4tlYp`4{N)xgCB{O8n8cg-U9e;rZU+)P5x#Q(U;e@z)U!Sv;T{8;{(h;h$qb--lBgJ_0>ebEm^G@$cH>fCC3Is#= z1KaO@^{uHcn1XBmK^T3`s0fNEET}EpNgM8Z>SgtCjTgT8w-O#OmQuG(5lg5e{LL(L z9rp}vLjOETQiKQz0I1|i?DlyVf<^Dn7cYMvvG7Cn)!f|NvSWKGJ)b4NJUgo5_+_v@ z-~oHcaEIe_C;MPG)YfeC(^bSXk@wWi3PC=qZ=hjN2@Ncd9HEQ@WxF5{Y-h z$GmxSre8Nm4Ld%|fCFr$1>CZ|r;{B~Yb;MQn{WVcH5RvUElJD)xt4gM%ae{4|tHbIyRVpc*OrViBYJvnq|{ zLA_X7w43#Hsq7T)b9*Qbha9z~63xaTTA4wt6MT}tawJ?u_Go>FdR2Sw7BstFO=eI( zWLCd0ORGMYj9JpaR-->?Q|vkdl#0HT9=z=J=M4ys?RuFOU}*yV+jF~C1LKoVoS04N zaP-*6;)y|W_^|p$@d7eD!m(^Bue}%z8QC7Ky+>5Ey2Ec8wsPJY$sTt{E3G~|@K|Ev zRtgsB=cvY+_i~~=lzrFj>#x5Sl#Kjmv9#B0K+$99i4Eu7zgsr>6iQDzjD;V|I}BMh ziT$tdGi2hSDW)Pzf;@;?^kUVMmk%HA0he?KN|Y{|N{v=5%4_#_Mg-pCkJPu66W#J0 zo<9_E26B%3tE%Rp*6IhuNo$qCWydg3w;z-NhUgY%;0A<1w1Dq4&rQS=X}{<&DYETV z0?b#FAEMTMW+C!F^fGd!(iskB7TqjQ%YaHi+GzEv3Q(Q!-oCV)gy7zpHS+SA)FWoy z^tuKFNn(}Nn8mCl@kt5-0p^J;`ib8YlMoV2JM}W6V}}mlYQ0#xqnNjOb}M3?un*E< zN&P|NQ2l=P@e=L4j5Zb_wFY^@L9u=dtrq`kIM~0F&SH?##lm9w`n)>&%AKzSU z)<=cG;208?Rh4u{tV8_{SFSy;y`B8>sa|ZNf8jX?_vS;JcKlL(=C|)x|JM0T({D2R zbk(ywIbhh--fQN6);{<1DY}+p%It=D|MuI>!4^MQZ}{x<&zk?|zZPAV*QU(4rX1YD z&gS!i^a=5XRd}5ZDZb|1-1Xh6q+L=p24f8^%ig|UPXPr*cavouy~&1og2?f#y$E{lNV(5 zc9Qgyg!TiMmqDZ=?z*SL5n1C7oql5(Th3sf+v~}=?%Z~NwE8Yq$dlN$HsuxOwC?4o zH`^7#^!gCB^IC(e#p#Zb~WW_>S7-}P15&< zh5yz})$^;a(OFaMgsdTADwOm~f$P?dJoTX=6oebl3rVdy<}4VK7@ONy^`@_qH-`$# zx_STc*f~c~nMYMlVJtBl*;oYUj;#_E8-1Ydf&~YOY;MRFN4&@Cb41sT*P1h|Cz$uL z8^RSKY^b$fqFEM*#ND0FWW-;;ay4%B{ZUneDME0|iU9wQ{1AVcq5zeNVH$=tLe@Ct{Mb|V-)I-& zX^ffwwn%UbH~F0R=*(|1EM`4oOW3B!22jSlHLlKaka5$SwVXnch3(;f8o&HOGI`W_ zF{(}F;x!QpL(khc_pZe3#-RjZ+Sui99`{QF$4boiCBD)lX(_#d&*O6jmayGKcQ=N| znzX}nXLHb$nOY^!r@1zdV2TfPdPeZl=eMWFmR@WT#^LcnGj<^vk%7+<{{6UHXj|?s0El0A5N8iN0!dO>!}az{(7e8+I>Ov#cK-|E?ztqv~2Uy#B3|5 zyYifg^F&35^u+;1QO(=kwX&xFtEoBfh`SY?7;!1V>SZ)>wDlKfn;-HF-o|dz9(C}I zL3fvc=8lJL=eX=0Nz`rA55qs*jacw%dX_`Y6s!(XP3Zu_>}V(=tuY5UghYVVgq8d&ada#!cnj z@aKF`EFEO)7PUY196xRGZgf0_XT2XeH!)Ijyp5gJ;a3)<>|SZ2wImnV9Vj5gZ+t`X zs^5BAb4uXW70Fdh&Vh>UJ83JEY=B4+r$Hbx(|2gOVU-U6Xq9k-(+GRM_%1y}U;C1` z{8rP$Q@zz%t$Ix`xSn)}(PVh1HbvB&y1jefuzE_8-d6M4-#5}p`?WqL|I5$`ErngB z0&y%Uf1a0l`+J^8+1t;OezvPuIBRs?h_?A>K3XKG!F_*>h4nf)_Y$@~8YZUk)}EB; z@t6EIu-^7f$j#2qo>3kEQm$r)xFx-0ESb>(l#wzk(mf(+E}61*&`R7rwhr-K6;~C~ z)+%renyMTM6GCw>`|_*7x-&gF7AV8L8c4MC?n!%<^SpVxpUsjc1Ke)&Lig16Duylm zfKTVf&UJkC_HBaBPE%L4X%gEfmk;XYIMsHP_ZE8^S;nm0Ugr}ZGFt8KB?n3U zK2lAoI&pH|S$MR3POzlT4r*E8Tls2F;H!}wkDO-pD1f^!;Y4b~G{E6wCv2XKaJ})F zvh{iokXNfi-rFW$>u`>?yxkqX+NrJ07TkrWAMw78xS-cp2cB#*Q#dK?1DzMYnrEl{ z+Ch6xpCeScLkG^iJDOrz7NS>q&@L>)V}xL)GdsFir*6q zCh&U!ux!HbW_K~$c1%@kxh>sLakc43UEOg9!-l^HQuqI9yq>Ci#tVwPm};WT>@ib> zfH2#=;)PM%VgXvId?HqYNs4s9=}csW0ZI!`y|oupFCgGaoR!|mzDjvCbpH^wPbsif zq*E<9`{Wj0J8DH9& zUb*G3EgqGuRe+g9dDKRh+mA|1py)fmYgd(BZa+^ZkkrrWk}*lw zaf1qGwT!U|jnq78|G}I5f+LVnOV-I_*qPWgY0kuo5$Ht`s;-)sl(zHH~%42 z>5({Hclw%@XjeCl&B+CR0!r4u)N}#IDBdEwxy+gsjc#(E{+naSnR51qd5sM^#zAyt zN3=hWzQW*-7Jd{mrwT%o37+6ZKr+yNJF%)pOSf;|o44n(C}Yc2HU>7QRHJOlPh82# z*9glJwJc{OifOhNdq7|h<}Fcljas}LM-sj6-MeQG>&A+e9?Fn;VVTF{5;GXR>D1%w zbFbHrhM#$RWAKK}o5w=6l_BPgDw$QXAeay&#Wx!tkFUUNOWxa~ieIj;+ZS&L`%#CGv-D68k2u=?(3UaL`8A?weBtD?MLMfUZlC9y) z%gw~rKrAcB;o-}lw6))@-7WX{#THdsY9tI%UmHCc=9>O%S@;)u*RL6xye??xx@xMK zF8HOsfP3WgSx&F36oWr3Q>Z&X!7bXWba&b9=-^;&rp&knCZmy6pb1jWYVf8wA0#74 z!f^%m^`TruemrSI>ZxB*Ac2n4ySmBDW5-xG`Z%|N(=pYGEJ1bPAe$LA=$YeId(50gGc8YyFO%y8mkH@&>3oM2#`xH)X|r>bkfeu~Sa zx%c#@PMwM}%?ZRPfDnbADq&@YOgAj_M@J#EPe2mKm9?&4&5ayz#Kcf2>+UvTI?7Wk zDh>+YghI7@+;Lh;r5$y?18}le`Sk0c$34^3ApyIxuRLOiP1Z~zWuSp}-{^}Is@dbv z(ARaMw*_uK_fAxn`i3f@niWTp?7^`*479E<_%UyMUK}L|E zm9t4YSqioTIRa?Hzmyo1L>5oj=9Td#;%9HAOLlo2x*VaI%xXU_5*?d`cXLQ4Hj_J> zN)?GCkDR>&47T*>)Tl+8%#(C;ov6Q}RDZ?y zw>N~O2QGW%;hmm-_nDN#1RI4?a9|PH%$l{m|bz+Y*O$VrQY}FGgch`O#$qL^} zvj)G?2QsMa>-ymn4U;A~>3Aw!#KKmL$AHI&IGy2jjZ(ok;G*;Xd9!|#oA((J(5l?N z1$z%-X~KbA4%HC~R250pvay22mYF^4QDig!`~=>{KE?4E<%dlJ)Ct9r>Ei$_D{jyH z9Nxw{TmP+RPxbAuJS?WCc$rk>sg_$I_L4=EL=7|x(d z?caos=YhHm;OF+S+Q>K`=e4wz9&kmMJNu&@KxQ@{4}9PnrKQEAfIOAWUfXoPkod42 z6d|TEDS=CI@aqYTS4f>GS{1fc>XuZ>Lt(A#OF5u9WydBNYH;{E2Or_{JIX#rck@x|b}a^!PyYn3+9t?vJ3Xf~Z5s zr|lkcj?@@K4Kd538H?$r;4uQ*(o!0)tUL_x+p<+F@jz&(x9q6#QRhUF0-c^ug~9gR zJW$&rJyIPxq;~zd-j*LHI8HgogmOG>*HZ>T8`TW(Iuo-_u6%z~Fm_QM!4=;^6KnS| z4B1%J2ZL_v&VSv#{QRYXez9LKmrDgbPTr%V-@N(;%E&MzfMIIiznw>FDq-JK|Ni^$ zHqI+}bob8r7I>`*B-4x!4(fizl)xz`nkg~EXj3t}fd(_c!h154t_Cl*BFyRQWVe>$ zr%lCOmRMM6KeHIs{pf)Sw}(zBzpj|M#LHkvwTWAmvPH?PCyUlbHr2BW%1du9U^e>H z3xjGqcI+sujm%s!)Eh_+Ia=%>194QZ`i9@%Zmn!|weci*Ysqv38cyWwn`EjJa!S}O zkj?l5e@mC#e;2Etvfee`&KH|F89R`y{S4b{N1h9aVz4j~>VAsJO>l(M%dA?~fwuJ6 zj+-XfuSl{8$*G|v+B}B?$Q??GuJ05KyY&}CVX`tzcW5V5z1hMTwO-$r>RaFR1@Pd; z-a3M{lhlTQgvjFAc2Ba=HV`kTy{H?I$$X^>tzQq|pMKh&Y)@8!cR zyf<69X+_4LU1e&A!uZU)d=Kx^#9W!-K$3DbJw1Id7OJzahZ)%mEeE?0lo)7GgMaL7 z>aCDYB|#Uj8Tp0Hgii@IU6E8b4ubZ>E=qe;$`&&_dmh=wt?4oX4b&WxKk)S`ZYR~T zh=jWHh;d#+=<~Xigx4b}vHJ=iDgd>6Ly45~oizN64Lt?D4N?d|f=p*LwEbpd0&FhM zjSfkkP7G+>*2~hi!8N2;8g`l%xi!j?Kx zqyRx0quT{pi!4o?EYf&+^2a%W3zcz30ku&MiROXN*WoQwJ{?=UjGr-&DuGlG=j53g zOafo+sp{Vo3QJW+>X?=Kc@S>Cwe;_y}0+|jlnNX zwl-}m^L>Qy65_jPBEXwORy}c@OB!Ky^Bg{5V~MTJljqOF+*+*24BFIPZP;I(?yzLi zIXgR^nN&8=s#$QtWk~Wwk#e~T6^O7j*>F-Dt3mX8rLks1S_|=~f1sDgwJGg0n-iQd z&K-YXR%wTnKdx1Fu-$O2vNu^Te%{&lBkYD|{E2zLVshN}qF}QOBpj{i8S7fM3l4T` zQ3(M?w<($xzFbalcWU0P?wM=6I@Sj2oYAN`CKZ!OWy9^(QV-DQ2ZT*tKY7tkzVImx zj5`R_A@i}@^EkZ{*4QfM)pio7TTu2{SYRHSVGYCPb1uP zXxcX1Zc=vrcWJbzfQf|j+}vt-?T3;&Gb&LfZ|+vNf2W4@cgZ^DW*_nV<;!{TT^DeW z6seSpwRuL3xvgyK$%QT(-vG>7z1(^KvtF*KO)k&qJWA-x#d{x!r2!*0%BC$sU?TDi zO2)-GUf=r>fXihDQ6gW`aSAxR|$^4}`aQ4&117+-sBXG8WzV`s=SJ1_#xIxs;b*6(hN)qIP7x^_o(E z=HTcl<)L~02RA2e+nRehYlrC%@w>iQpZ93d<6-O1eXYCV>_+b38ME5~3ZeL5w)<}N z>K0u--I|r(D~-jVpI888RQ~GWFA>j+inhkzpmsh6l$Tj6_}7th{{FTb8Z0~fw8vtM z)+_u+`kZ`=kHR*x-=HaNya8!FTRmzavL5h?erc2khPw;|e5Dj?B1(qHdbP^*`T#zl}qC z*Gr6kHB4Z&O{IjZ1DsoJ_m}e!C0cvyq|oKAF=&d08@<+R?S?n!O_5M{jdYF++%ft0 zyU!m4L*UQ6b4{-!Ey7Wqoz+_uF@P!}Z%6xNw{i#vv0r{isEJS7?$v+(giaN=DI{|o z&OmbN&>`x~dbc3$JMn@*5G!j%RR%2=OF7~80@;Wz%@j5QI)jH=*+bf+rI-_pip>4* z*`CEva8X|uYgHTZ(_4lc^*A&Yfbm^g9o=S3U1z;rvF~oAFseJ zSS41c(4=f$WNfPRiBC=QnCX6PN)<|aw=p)}-z!`RW4C__x$^NO@*P>%YO@ zKRq^JS~o?*D4~X@PU6L8u3x9hW<5DvR;%OP@r&JZ_=p5g9=5<&YtL#x>!D2V;SR#bbj2Vj_RcAJD*)kHb<0uuGaqEm- z7k?EG$@rw@1OR)VQI^{}OrwN~dZ)j_lWM#NNNq%iPMzku_dlLj7@SFQZA<)%ve|9c zODWXSl$EcC@4S?NhpAl6)vH&t^vYB+I5hM8cqUcC^S284)Kg_2+1QrExPXXPpy4idNzH7&8v3i+S9MCo(VzE&VP}mjMm&)Bg&j>Z4WfsYAU`6BdR~ zdKCFxiU~T6!#dh$zYg6NS&mD`Z1;04VEgnbU|#Nlm3v49sm)79r4~+2aNQg;NTDVs zZdDi=8$UZ)9Ik(6s_TM9ivSLtZp(3DYRl$Z?yhcZ0R1)x?vT>Y_6ub0f{N-8goh&$ z-vfX>mXg(A+`}82&XTx1f|1w>yc~Jb7Eiow2ak@C0z@GPHQnSLW!^0oUdGYw9EI!a z+Vx29+SKM+jG~3>)!pQ)CQU*@&sWdB)LyszVmo5cfF!16A_PCnpMq$ChV#I?Dhr8q z-Saf5>X@L%OYVFNA8|#;oZPh3S-3Ca&aUr8ueC;V@uoO^UNCdS zbM)4NKD1Sus=jZ1ZGZgf0a31WcBzhwEE#XQ*|p1Bnxf$|A|gFs8yy`AMzX1tXs1|F zfLj9YBg96l#s75&DBn~{sN8q*^Be)0v|3xFVX)MqyVYpy6vVL-4mE1|%1$Gflm*@| z@s#Z3m?IM_8S&@vMcDXqDa?+)X3vI|^(}?142O)NM$DmA!n+}kJ&}@`nYs3s=}Bp7 zUf*3xU+lR1fB*sPe*H99moe-}>k1==zvC7YZd?`&`A9dSJTfJt(zo*A*HE^Z?p5>@ z2I1+bc~~+JmF8k7YgfZ^N~h2*XQ3Jtj|RML5I?| zXk?s=w#`=oV*xN{^>bQC`{ibHe>X}FX1MyFUbw`aC))>rBBDcE&OWt|jyg5rGReRI zmv;L(Uw`#gyE~+I+@N;FRNr6u)*4O{g2?TlcMUq^c0nGzOy?u>X7DpfKn?%xhOSkV zIK8^t>=d7Ls{VAH+LEcYcH_s7->{ff4(_}RI4WfBITVGhi=^nvj%*4O7i#8nD(16c z{?w7rWEPQ~8Jw;Z;$(QcN2gA|`SL}@p z@D2L|2`I*ZZ!hZPB9_T}-^Zo2yvGsr-))sY+r6d1)&a4q^K}HC+BZHOI#+h~7&8VM zIT3xMf%bJGNWR4ldmwKNRJ)wf>|*WYGsh~Gj9hlew|W#avbtwA>4ACb?OdS$WCX`8 z$&Dn~N*vHUr=DGDlvjnP!oZ{+^j|aDL$6$ZM|)#$ZN7k;Vrs?k-umxFj0R6Qnr8^o zoej>{>(Py7dAF_G|E*DY`kPOuxHc+c7!M&8C#h0^rGpiYq z@gM=ay-1)VlJ-Prg|~Oqj#q>CRnEJ2Y9A_Ht?L&HD9|#8)V{av7G80GMY`9=5V5}% zf_cn+Mt+4jaj&LBehM{`zU5}X^$v3y{G>GezVvUdFGIpC8^h-MxX?!#-xfJkinaKV zadO$lt=&Wff_TGqd4YhaVlTO}ay~QgdgOj-Eyab2{~n8mSo99kLtx-9bEq;W;6(*Y ztJZVW3^7<1Cuc&WvS|g!JGm0~WS-aQ%pm~si zx3qirCQIHuK5sYrqBxSmQ`ucO{ZEgr7TZW7BTUcM;J$eYXm!LAFdK_L8T03LW^!X< zS4bh4lC{8_(@Ydj&DX^n7hhWztavmgqowi#;{;`l^SHPV!5l2L`HxZB?=O&p4Di%A z^Cqo_``r^q1crg&$B9KeOr-&a4SkG<5R6(Z08UiCwaM!VDw6tQrU5o?U9m6%nMEpu zVlBhGSky)mrDaGYElArRE4D=Ma(DDIw=LpFQv}F1$i8R*t!DWg+D`mO@yF^xOf!9Z zO+Rhtz(b5BMo3u;0WE`#V|;-C+-y3bmpLN}B%mfcHqV69%Z?BTVtG|D{GTjs65e9Y z>=ipu;5AkA?j>SUccV;UTDOx4F7lr1!1i_Di_td8+Y1{W(G3V4NeQQ=_0fjYgk!T? zMN3XvLq*3j%Mn9}@W$mxzgm)q{IN~uyTv=Ib-F+IBmlvkzOmXlFz{{Hs0GlH+1drRCmGhP0}NM&s|;!=?!|1C>4*S!nuyTh zO+;}cc35X?YgP;&ITH26$`9mf%vfG;@>%VlilrYSj$EcsNX$KSyS)EM5ig!Eay=3# zNM*-ZQfTt8TC82Iy>ar&3AzOhvR3D0p1sKN#s?l_*hb;X(=m^d*}!9HT;s}V$T8~d zsuW>czt;l{kwhIG-=v$N*grcgJco{*;CRE zjqv6mQvyi7c=19+^Y-xx%o%N5Ur45V#`reK&!$`!T$DJRyS-cd=wegwKb`Bwx^0cU z_K(=qCh%Dm37IEpf9j5PDp{Bw|BOR{FTx;(jg2DiqBRpjA||Kc^zsBzwPjZ4qAZid ziz?64dpFg>=8l7N9wmyS26d)#(YbizPU;1OY2i69RR|C*`(3yuj(-d-&0@|9m(C-W z!8(DiXCS^zj{OSkN!3) z-{w&Ki2)*kcd6{Vs2o{eeXs(k5nLyt4Ch&NRyC(T`L0c0bJ+#F=$>E&#oDEu>Eg)ucTX_n z@qsNp_LP^K{p;PBQ6P}X1dBY7a{#Mhc0u4X)5qkOwy7$6grVOR8odOxlL5H6>a%#{ zq+Zte^2;v;CT^X+*xrr|BZDV`(b!pyuOSka5HDobhAcbJu(fWNlRl27hrA~sy8oFJ zD2lR6Ynr(2_)uwE=T}I;a9)r8CbgiTz@n~3XI3GozBBxd>yU2vvO=EZERR~3v|tWw ze+x>~8HRb6ocJ?+dfN6ryFoZ&MT9_{aI;aQTM5)pjK;CQ5AlM!;IADiYWNU1Oc`?l z^h*|^oiHDrcVB?{&-%$I(bAz`)y-GRAQ5&>YI0C6ze^N-ml7tP9S zvE<&pd+1nA#8?uQlQG8xkbh)!Q>M5O<6yqJld5aB`(7_gPR5WU0mL3%gfPNQk@Z=) zvBTCkcW0m+;&Mj(-SAETy}5$>a>5=gktHWaK~_E^1-CwKM6A-bMG+Y zE`dp2KIMa$iO>mHc@J52P)yTU#!e9TV?gh4S*$cDS%ET(%$kD?YH``qF}sHNPGPG^ zuA4DqhK#2a7M~!U%J@1ij2W%uI04jeeI?P85TppQwrzPs%|eR?O^l=F{6GsUWxN!B zK=q@=x0l5uM-lBM4n%@jhotadBp;*_P)g26UJ7CE^!K zdg*(h4>x8f{M~1v0_xd(}XJ+%p-6Whx;lV_q+<}Zsv7XfYGm7T` zrm|jp9;0ZZetbQ%V&=SghZ>4S5gr0C92BDj_Kc`}!~qy+QzT_-@9yJ(PQ;sB>LGSa zA|?}jur(?bNBCNCR?SA3inJniOhuNAj!^SIMMEtki$IY;u5)#5!%ajcKq~4+9SL^x z_DFbUx8j721B2|uE{H$oNy#`)2k!XUa)-%?8FeZ*x3)|r9od_LnJa^i9)*9@|0Mt4 zFFtVKyeag(F(8_-N%s~?MKQLbk-JLo?=;{WGBm(+uV)FZSE@j zk#H*ltBLCxbk-W@?SF5Cs?5KC!jLrha=Z0yIUZM!cB&3Y(6DeN4Kk}2sE~Ed#y!4Z z7+;iPRa@a(TQ__()u(*xfnOS`CA{_@%`R!Y?a0zGGu9Hh#W8ZkjJ0A7LA*=){s(@* z{QKA9SptZbuvpAq>;IT10h}IR{2v_s!poV$m$=ZM?S-kT!KDg5b|2(f`yyUD!PRx#Z1ZiFE6-KmdikUN z^MtOA57Q|ceTDeS(UvGAlF1Msej%P6gmmbyx1jmjjX(8ixJe`Bqgu^i>^|29)<43> z9SBid6+BuY>IQ82N+Y2CM-Jh*{4S!Pn`=O$A6m-qux--8%9c{eXf6|mZ>BY6DQ=Hr z66J``lWTyuvf?^x8<9PZ?5eE&uKp`*6Gj#mD|B^qQuxx7w!{IkM6$0%E-|NL?*KQr z?&+R@FIUP|eL~R}%26kR)v;rO6%yCcwtUmH>5&D_eCV=Cb>L?U+f(v|ue?Tu9s8ul zX$mho$5T7p`CAQh_fM3J$?uX^8y2Rj`BzW886zYF4cH>mY5xdfq=apS8*}NhGV2z< zhG#EeE-4Qc*tU-rU1I&;YiJn#!lcQjQdX2aSo68B;GsnLq(Z9TdSuxK!+-9?2rZh7 z>ch@*C-6?h{&AdHP`9|vy#>7cVjXc^9KG6){F2K#pCbtcWMGsEYuDZoY4v{(`loL~ zt1D9i3OB$?T!7??{KB5I@h(?Bqqf4={8ri7Pv0J{L7_1Q;q;NA`Ly_QbPHiGuTBsi zDaTrEE(LtJUB3g2E+AmuRElqH!(Z^p(e#F@=8i*tXx8l50t5cSlPXIVho);_&=WdvL$L%F=Gw97w!rm=9Z z46!0wP|+;eid1@XRnVt<|I6XPgt4@a+i8jwy1*X5evTXiQ1^QMoP>8x@_k?QQ~l;R zzYW=^yC?kA+X0*8U9(?`#i#QKaNWu~O$a+jRsjjm9Q^v_cM1P2-?|YzF5@a#iAvhP z^3prwkW<#LUsr}JheDWcQ^@G6Jpa0Mpb>CS8h25ot4N>w$=~^dy4HQEUl`rQ&zHxa z#P-w)Rj~x9fFRHNr=36x1M4_flD>NGum3#jSZW9nRSS-+oWOj}6yNH_m#$p7Q@5DL zOXkLsv~3ZPdVXKJdku}G(TVP3bcYW93+!KKu?!HXsX9jE$;aUzWVFEE|n}YX16<2e#F+TAh2^R8v*>OP%ffG>_ z*8S%?uO#N6wd!1zC`&pe_eur;sJ!+~;~2?}cPInJQB0)}4GQzJw9-fG9<})dLX~t1 z;xY;how&GRO1 z1v_^>dxg}{C)nvi{S(qi$peGf3pXcHCe<;{9u)-udEi)G?68>H9xCH4Q?{{|3VfT*ZcNgYlomkG`YYum*1`HlWR3p>g?#PvAk zBR@P@z!Lmx>L2DmU!~z}71(vS5Wpu-oUl;Ic>y5z3`8xEiwdZt1QCFB>+%cDM;U9) z6@ikn`6uyNv?#U7913n#DN9)nhJypkyByf?UK_qEpdSSu?=LQxLRpz1vtFEwY}XLW z0zzbL5LDI;EJ_tgH|+*ts0XGJRsLzjdemx9t#1z0E^cHnpUa{v8ja5n-R^4nTU>6-Wf3jb=Wmm3=c ztcDmWNrj5#Td<-uFJUAEIZlf4pNe2V?Ozz47OaUUc-y#T-R1d3+tAv`Sw(eLAJAP? zXao728}h{&;nL#yrIX&^uJ@-J*_%*>RqQD z=gV|hJ@x5SzVBo%v*d<^yz19iG)2t$Bz^ZGdd_^~dXCgD`Ude|3~V6lE-pI&_9>JF zJKp{86b%j6Ntj29(FLQ*(@#Iayrk zNS>X%!P=EW0t-LOoVD90io>(+Tpq&{>})E;jELsAA0a1zA>??=iR&aen6v_s5IZK~ z_&A`R%OwKrtkYaomzDcAcp+Le-PpRC5w|Qh(574&?-p?|D47tm-$if&NYjctNp+s_;?#)EP<~w`+p&ORT?|0j-IqNY z#&vN#2rP=L%eA&~>%^qy#~ZWnP+EO@W}MvWG!fDSUjii22(z^iW5aQ z<}wz&pD%gFP`-ndRV_PmpbOYQ-I*X_R4eF3?yj(~GKTux7j2ZswipKH>jiA;ttXffjGDN!Rb0?sUJG%Msz=LyQj>PutVb?zT} zqEEdB?iVSj#NDhuqN;zy97)1774kN`R-K#kdKRe$dnztrE zH7hY%P&$4k>HE~r8`HMNc$xhWY?f`FOY#sD)s*H$jFw=x=hR(rMxE;!a1|;7McpV3 z=(sJ{T0*OKiXw#7+dpe0^#gtc51J*)ac0d*`zYysGFTm&D|~L9^IJ!Gzhe!?vL$evE?VV zNCZ{5;kihg9bT7R8h)bwfBbs(%f6pe-x~sjnMyon=jr&?d{jjW@f6hE`sf0OfBg>s z5@7hLZ|gi+=orepl>256-#rrlNb1fz19blH-%0uW(*_BcO9z=lYU!>7iO7X3l)CQr z|6PM}L6biH&a3e?_*=iN+-A-2VC(<1{O_8y z;iE?ztnNG9-sQe7hlWPw$~N^9$^qiGG55s7wH@Z4|4gak;XMwvw@D@Ie$%_5 z6_lq}`FvA-iMCsbUU4B*m}~T|C6+3aSLeTIYcPBC1rGE6^k?_3dQZ;>`gdmfn=XXr zb4@Ny8l|$rOZQymr9R&rl-$wqr_!_WlQ;hT{otN#-vh>!4(80QI8AF@4gBo+Su^WV zfXcgNFYXPduvDtzCnO>j^-4u8X+OVJ<0ta7QT^%#{3{Q_$n0)mYtm2o21A4=zqPPQ z!5!n@KYUrg@rUs=d5fybShlFDC&MK2;nZ9IdK9+@ji@V}{L=4zKlpjTiWeufr+&!T zu2T4ZEY!?A=Uclb?w>2s7_g=OSxif~?Nf129}k7#ty{H}kd&cu2T#R5_@7?K@4H#` zcX-=VfMp_?<1WU3hCnwiTRr@LKC}Ie`r~j9JMCJM?`8x>6{LW_v4vfDOPT)DBq@O! z{=8B*Jl>u6w6;%D`QH2eTTcR>hH}`?4pql}8Ms1yq=wN!mHO@fzL)VsBI+8{^RD9{ zN$%oQk%yOEkm3ik%4v|*PXBt|AKKQhrldb$JBpI-%pFBc3{w0wY5vbIP84qW*AvOP z-f;DoI(R?b)0V+QRU`)n7rei*^bOc`tN*?F!%G^Tb+2dIL*6TG2A3XFfO?IR__LCs zxhCU2t!NF6_|(P^p1kUPkytya9+O;eRGh6ivu0!C1L1#joEx6=BT@7uzc|%H@D0`7 zm2a6oB;;5Z- z(|%$HDN+#|g<^4@bo+l|n_t70j$J)@={-ZFVJ_Lr>3WVr9ew`4x8Ly1*~3K1#}_J6 z1rfkX$pw1rUr*F9q~VV8cko<<#ZoMP78BinW#y~?y-L5(YYkU9M@sd9_m|D&uvC?T zd~pp?%DjIlG0B|BwH%8aobYa5}EDMDeCC{oRsVJulrvV;nWin2zw%nYp*rNx$|jch4v zO26lII_Y$~|Nry&e&?RyET7Nk{eEA|>$Y&KWQQkq4uCs;}IR;ORv*Z z=j7ZvtrmIPJ(Yn~zl*dMxx}|yWsgr6Y6vEAb`5PfF!*6*)~R_6wzmu-jz84caqk8s z1>rkgk_W2P1lao+dYsH<>pt=eQHk2KIiXoUr%7Vol#|i)g79>z?h`qDs7okPE8Pq= zUu7Ib?}vW^3Z>Yg)dZRJox)+CCtUI4FKl#?0Ep^0k;4OF^^ z)O((f)*go&IAtAv5>xR3Z7`RDe2&$yzjci4g{Tm$($S+2|4(k$du(qi;Vl}#M`q|-Shlz;I7st2Q?SG zO^&ME1{{0tSY6p0O?K%x8K-lx={-(+&r1TS3izV6ThK#s9kwl^C|&Jod>~};9amvy z=*WcG7^<}-DO1e$aWNROOy_`U z$Sd&V=pNC*b9=p9(={TV?5|!A-Ay`N>&WJf0~vOv-Kdiad(-FFdlxVP-_YIG6M90wUsh;-Q}Yk+Lo2mAY)2IR zwGMCvsMHkf9N_1wVOXr|X@iJ<3m^@c10kt8cZ>D>1iFxFKAyT&k7pW(tIsbN|5Bdprl8YW7k~ZxxK>MS)*F{TR3eTjFQlg?IU+J!O zSXZqp@=0YHbSHzMDm774Ti!SA$dA=eamb08m2t>{g~^ntq_tP&4AN9UhS^ch z)?c^x^YhC)Tap}Z*arS_@u}T(Hf>mosvE)KMz&mZ6%9u6xICV@tFAR0;`)pBMvf3c zyHGH(+66bT5%Lyhx`Au;`>sjjG%e#DRoQMiJ~j*n*4Q@bS_2jod*pX`+orFI05y z+BJ!uW_BOGnsQnVTm6GJa(>&1{CyN?u&>e!+AWu)?k8((z9yF{m+XRxUia=-!2E1n zjS0hG$~#+QGk31sqlyn37I>EI7?{7!POTKSIP_r6bv>===i0~O*n7r5e~7tMIU@>G z!0>O4y_c1$-FpWw)%v34M!@(^DsebF$#`o#KWqO=li=<8Jw0R+SSzES98D?)zN_(E zoK)H?%$UJ}NZvr!q?8;>Y+{Y`Tm42mFoR0lL1vNEeuGhsJ2|{hYqTb9Xtg-29o?2A z)!5Wo3|kOO)QBjvzR}){$C~F}74Iuk1nOZ|xa*p6NC)kAzHLJ1ablgOHm0t|Fm0hN zu}AlD+m6<5!o#)34Y>b@)Uj2?!sboKXpN_{V~6(XG@$5svGQ@#X`8pe>%Fz^QM!P+hYO1Lnn)UwW=bG=c#04s{Cb9vHsP5gp_ny781K84P*L1A4JK(6* z8BqzqjLsq6YH0W{hzl@ z7Z)36F1uU1k-U z!vJw3N8(!Vkm}!wWkP1dV9#KN66H9yCVeNQG@g)&p}*$w*!pfF>wv&viqWg<7Wl`E z=7FywD@r5Zr3HT23}uYFkJ(Cgv;zRMtOHab$5eN}R@jnuO33Si2j5R!R-Tj8M>y!t zhblZjGrc3B|5h7kmE5W(_m+R5nt%RYRo#$vx>sU3g(~df8N@LdW=V&9&u+; z+E|s^V%)&IW6Rqqyep0X@Qz^&j)Zy9`C`9FdnjzFz>VwDwd>k(wI@Z=v2PsLdYO|t z7fseA0GAq;s>oe@jP)iQ-^5lXue0nNZDMlP4^k@yK?E7hEM3P4z@dc|6({a2zO@f1 zY%<-}v{swFDy8aLvg2-9$3f3d*+~#LwzQ!_@%R|cXo9NUeH+{VC_gOM;|_`O+T_nB z?GG$};vjIi+McS5JAp+FM`k=8J8awaXxg|z6I#iXRQObDel5LGw)H0jOQ*o=xLqm9A*W)Jt~)An7o%Xk2~QeXc#4m4HROlsFNC8%yWk0r zQ}y9Rf3u#biXVE@()GI89Afzok7GV`#p$cmw`HL|EIC|DVLRF#PomETdO|4yUp`iN zT}gkw-BpLpgCNJ^8#A{m6Ho)&!Dg)Cay+J$eVeH`-0UoqhP?Co(&qy7koB$dS^8{6 zBjZV~9{fAAIW%lU8qUu+cw=c`NnTH{=)$*Lptaq-b}Hcs0s6P6{w$%F^X1-gxy&+L zmcUKi11wbk3_np@;V~@$gJ*=56ZllJewKb8rKmZ?(AVY+X^;*Jw~cc`hY7V8V;_?+ z+1p8qn%Zc4C&Vy$Oq35zyQ%^TDe!C9GdtugL zmUlnn#*fcAOkGi*0-G#Hx-?Z2jiuzW-DGk}lC;~-N!@R_W=^$Qc9*{mUyqE*vFwxx zDd@c*sLpV|E9P?7lI($vjEaUpZ+}8r@%G2|YnCui@ZBzjqQ#Tlt6JZD5_klHSuwCs zT8yNHFO5_mpVJ=y7yP51?OI80Jl~wTo98>;IAK`Rshjb~w-9M_zcyLN@;lg88A^u} zGye231Fs@2jC`>a5J~21D9wr8aL+6 zBYRKs$`-|avo0qaK0dgou_EXuGja>@LR;9a`)~mwIVeykjyB|>fo+SE)bJ&o8Az#! z*`GB#-SfACQ1*aDR&8hf*+HgxTvA& z(4Dx98!jn>eU-G~U&h%_sd1DiU++PIebQ743xwMzZaPXHAPiVC{tY-JA9Z=SdsW2Z z6jGEau4ge~T!+c@ih?(X#^M;lKWxdAz@v6dogF*cJo(Jl(V5Il62}|A1j?wBETQaN zD{OH0*U<`<;C3|3<$>>yRF~y;GSPS~3TRDq&x_Mn#Jq>Ug+I=7{^>^{9#0*oCs7UX zrL%h$GXtSh-+NVLDnvF?VPjw}Xh zV{VLSwis-f;^^T0fu2h#Zk?ZOQtw`xNHEa@F9>J}WRLcoYsh3q)!11bJ9p<%W%Xie zT@n|ylCdQxY6GWlz5?P2A;r{WTmsBGE>EU_v~5YYdnc9i#ji&=owrqZUvb(xb+L+# zU2=73qddk3lPP33=%YxFg+IOI$>Dz+s??;iMhr6lS|fZ6IZ*!Sq(YHCjJ!#o1fc9y zq!-#LcB!PcOf)2H#voLYh_h8H3YpQY|g`tH$(gLFA;#H&XadPrc! zK*8|A0wX1njMFjLqJJ`Nvi%FosZ6ABRpH4DwQnY#`*m+XErrKo-Y`z)mT?{}LDEB~ z-fA;Xk)B4&S^!Vg1pbJT0u{Y{<+T$1@h7%j(q!S|I1Yd{24% z{FHxrQ@_$?l#xO?&=PA{FF4nC!MRrx*{`Qr*K2KSn?9vKwCAc;izlgbV=CUq=+k3U zBggiUC&jswfligF9(oIa7F3AbcAvrJkN%0uOLIamqe^NRUyPJ!rpvzz;yn zqdBB5KtB8U>tV|qD7;@`8z+kIFC#Kfyh|_Fm_Xyf?8UJlq=X+?w%~K`a7WvTa671a z;AxDo(=`zN7XL~RJ5#(Ar?9{3D9HuL+^iFifxVqE76C*a-Q~avk|w5{4d)!1LQ>Wb!P-Z35_${+)qID(GyY6K~uOpYEUmdfXU#}VjemR^?lFsuT_>5A(s2>~qc+{MH;c(-Q9K`@L7 zfqt?@vmZK?#d(#DNPpiiue_oxaDlodSyzkl`U=~pk}-*)5eFx+UXxw5i=tZ?d$>J2 z{W#(>1p*1$>A}#qkKb4W5^P|qEic?!Km9}T)G`BN9IrqB`GX8cD2k#Os?@ zF{SD9Gq6*~qsV7v+RWNlsv)E~Dz)62WT}4-JG@ZpO;Rxfl^BoX;hSTeQiIx5pNYf6 zeBQr|StHZ?_goGq^*9skRq??cLo4YBoKT7ETDz9c@$DYnjqy_3Ga2wM`_1{L%c85l z!nC{+j_4QeMR72x&4;2@XN92*H`x${p3TVP?W%M55<`+5ak*A%d(wdD2Wu#h>L4!0 zl@Pn_F?QKh3C2^+z3SJx?!h9zei!$Z~yvlgUX1Q zZd)m@)TmM1ta|T1vh)HO|9{srDzwmDkOI6Ro&ayB(w+5mP99iL1GXufuD#H&Oli}Q zgFA|~Q`^J#@s1(<>AE*nl%ho^n&di8e?O(ise8XGg=1FoRazxubf4C}3N2h^8YKDZ zHamJ!<&?Zj81Gp#=cv#EOr1?hXd})RCu+1toI&;TIZj=5i;^R&et$lxbdq?GuoCf& z4CU4NPRIDJ)?yiR@00W=&Dp<;atjd*S<+?!NOZ6BzzywpS~g}D%w0M^li<^> zF=$rR{fuRF*aWc85#U#?G!QiowA-dS(f$BGk zpQCWOQ`!$z1XsQUIW;eKZPn8%+&Hvh0*U5Ur4*4Mq?7DT7uycX*P=hlC=A=HwMwN%GSOz}R<#9c|8lbR8ux zFCcilOg3{m_7(nrCKmVcyfYkieY(Fq$X%`=opFrBu9He!vpjU(oJcaalcZ)KQ?Qc? zT*PJBxLoP0Qa>l%>HEvZ8FbAJ%vq-|Wdskhw<~Z>9Ron1CTF0Mvc>Q9h+cM5%5N{S zm;6K;?!3xRNwIKI^9vITq|`;E?guYLiqvz*Ar*8R@QBDpug1*}d@l2!XaI8ngpNHRg39~r;Z9ew&dWvT}pOOkIw z&hsaXm(kkWjN+L(SmqQ=Jj-G$hR^%c^Zl*6v)HF9X(>45`h9IhP)FF6f20FOHj>nA z$K&N>)t->=3!eBbdr*3wh&KAUqnGYNnK(~9x`*J4L}?$Lt@pffQabE=gNHG>Z~RrV zL@7!TjExy*_$avLlG_XA>RQuQdz&H&5$*lv&tq80u)58pJjzuMBJfKBunlC0Hy>(< z@iZ<;pVKU0pvUuWun=-g!=|@_XNL5m53O`hfb&#pe&k@k#H&)CS{U0*rIxb!uD$i8 zVhTV8Ou|Lb1$la5jG1*PWBe0awV^$%&K8BjvW)!G=U_t&q67_8b41ff!6Y4bz(2i$y^EKNtWW=0wcoJO<{ zWG1M#st-qG^VY4Ai_1C4Nu)RGGP{fSi7f69@9%Volz5`64GkC!Rt|a98(dx!>kL@< zi}d3V>nNo}TMRXgu@v$_l1hk8pGUIWz#$O@Zve9+w zPS*I8Cb{gfbNC7|Uf8VREWTT7}Y!f_!bqzmFbQe$s4iHYHvYl@)zBNzUT7F9> zDTMsZnV_N5ySQGz@NleJe5q3fH{Nk`*cXcHmh|o~Nq?d0wCgx2L_wQ}CJ%#FWVlb? zQ4yEDd{w5$8?TmLdCGPpe_Hp~jkZyy%hq1+JnGNey%(6>{&`gRsS{JeyQi1x4V!Z@ z!tBJpshvgXjD0?9bpSV|G6M57SBoY*(TW^_}@pn|#b>0drRenNQE zZL&J7f+9UL+=i&F<@7&CvM?Q9^b86!qTc{`4V~stOp04*W6xSc zhBvEUR|p=6?q$xFsjn`K5x^91{rYa5m{DW)de&JHT3g|5mHzMNUs7y98$9{tTl`D& zB;chfr^}(^YOk-auR=$2c1Y%?V0?6oBqj{Tr_b;8y}`2APTRL{7b*kcs#HFTsVI+x zHPhbCZZA)E)|{tJ6L6;jvgSxR*k>Mz*Z6DDRrU~ChoL1*KASPuT$s9VEst&wMXSWj zOe-NUgBZYPbz_Zoa!32w5bN{zKfwP1)AIqMZy9eR(L*-%k42fcIZS|lyYcS_XsA+4 zkFw}Di^Mq-Nejl*?F0Hb=Im(h_)++@bEie{o+$t(vPaUze4epIpQj=PO*fq=&I!l1 z%&%~e?Ed+y*1dP8jB1LgC8VqyHC$!PzIW6NAbxZm{EOSbN!x&q+$O*{Jlxzl_49gb zFMG9|er{>{q#huTr^~x_>n1~=kXE5m<+#0(rA*Krf$Qg(!Fy8lA!x*{7oKKJz4WP< z(kZD_30EVE3`dW@UMtgJLhSd#6#|DNPaZr_?|bY4374?8$Y3fH>K4aY^zqc1#+wx| zuc1M!WN>}fmF9Nl4ONA=PDXNHzV@nkr=1t(A%4oVB6~eDCu0*3BUrkxp)p9`sU#o zE2!#q($F{H5CljSjw-&$3V>_^^c9UqG*X1zhXh@w5jm6UV4{ZUT=oQ7^u}lg_;nv~ z(Jw-5uuYf?{LnU6afGj=wrtdBuS)Ibm)`ijv6I)LF~fgm1d@F#$6Xo9@FT<@wx`QQ zXigRpf)Y(BJQM@1;Q~9+QapH>w*ouM3l7 z*h$13@t49o#s1F=T`1BXK-uQ-v2uKbHD$2t7|rmINHsgXCn?E$@}%tI3wV7PDH`EF zFF!vYg{~1%c9+-DcO3?Ece6sOR8yxy-l%36|HPvA7-b<_x(U@J_wP3=FwR$Ld9+~059yK04Ow-!$W)`oC))p>xBC5q&iO zm)sRfKC<&%7}cnUC)~PaP4eD9suNnx+>p>6i};PqcoDILeA&LPx5K}-y&f{*XUdq! zA~?|Crwk*Agw@4mOXHzB@RVk+M)vv8w1;AwH^&Mae14Gx*pLtJ0}X} zHe=T^XaG{jjBD4S(@eJvXh(P4KM%Kb?=5Y{mTlT1L#v}I2D=-%$NlSAD$nVUKmMp6 zw|b`;Ogc_JQ1GQJt`9?{Jfnk$4jpT?;=~EpsGNICl{rg;cUEpUc3cTmz{gRdJOsTt zte4}`=Q}!fW#C$_(2(Eizq!Jk;gOb2o4&btzERAaK}4C8>vd~| zPv$2lgv5Q@!)6kbXerkwOEyhI!;$dMKR+>V4ve3QgBX)*1|Zhb=RWW^#2J@gp5kH)X2V~0FOLvF zILGG*QPUB-RcKP1AIDe;ACSz5;&eiMcX8!HFDIOh$h-GcEd{xPm$U-}HU1hR2wlT_ z?~=@H_xc)!c^Oz$2eVga8tM(D%Tp+?NT_AZ`_b!V#}APxPp$V-2u(uzsmdXLDB9rj z$;)0@Wb+V;M7WLjh*UUqYdqV~(#uQ*vr6jC4+0XugcQ;g6Xm>UoJvd>#N3V^<Z0r;v3OwWyJ)%?GnMbT}2O?c-gbP)LPtxAaF z3u&*Oh-ko(!R^}pi1@1+y|N5BudxJfYZHko#z-~wvb_ZQ&2I@gE9a&Q^LpmRwVYeD zU?Q?%K<(`8Y+*Z93LnW$blQ(}CupG9lt!JZi%NzcQ6ikXrkDOxrb*ed1|Lu(YMbWr z$De;Ti+g_SWY09lvvVvi{-<^8fJXx%aT$eklgB3IO33RTQgz87tT0#FcZS5D{<7ta zs_G3GFbR?-y)T<7+WOY7yMi443sFH*H(~yi-lgvwzy9x3VPxQ+np9yj;plO+P&C46 z_+8BLKJrG9DwnBcSO{c{j(Vl9Mc4}%$fZ?17Zn3)m*H#y8MpnaN#`KEuifvfutx+- zW+{sn^lAV(qYPa$;dPCvNii50gmI9@ig%B&iT**=W!l+O6CLWIywrOG=&Gz2w2^o_Wj5`qNM{I5L!6T z$w_3NgzgQaboB7Oz;24R`-%SM5?sx_bNU4H^e@4=5AfV+3Hcv~*JW+vhKiz=@m#ZT z+mpfzW%6m~yfvaK086J|XS1J(CdRkCi!cp^!_inkLs+xaFZ3>J{}NSqE0pT=3b0Rl~K; zQ+ZSzEc8oKx^-$-Tamky)I<;SBG>>91GK%<(>rIj+G7=Jye`<)52060UT1F`mlX}= zAUy`N{edqr>hwRXL1$y*?2DWJ+aRPg)slpG+xWppVzCNBgcU?LjL;LJn5on;F)<&& z>ij<$-M#njr;n@wZo~X~PM?OE7O~b2D*9JMZ*NRXo#9clN9LB(CKH@y#GSUNA4y?(VEAo7ElXnj~v`cQ=Yz*L>iCK8JN+0W?SozgW(EE z3$WeoRj0iw&U5BP;_iQZ;-mZ+RCD_JX=H?Adny@Vc>V$hfBNJ?s)vxhl?kb-=464s zP!22gV9&va!b8|wL-wP&D(yeK=X_+NQ`kykF3Ni-OKntQ&qJZX9!1~lC!e}-Y{bV$ zJ>)AmKZR5g``cDUY~Y7Wad>b*_gDCLZ-)&ZNm;xtg>#aGT(V&GOFAav))SVR>%!0|g4k{QYgm4!7IrbB9V&k%^lgViMMx5SUytuG-DtQFc$v*mhGAcjh zSv&BI%2(Teg#{M)CW$yoef-z^Kf(5Te+#mUnNKgwruXFdtm3V>TR<{278xMIQh6j!dmn{ey3&i?;L$1-~gC~ zQs{z&o=DV>{>Se~gghi-H6ogzTF?UdZQC}x@Yg;{fPSlA(2Ndf2p1p}&UHTjymQ;O zLnQS-@{a8^tsnVkCKd5E<=jn3!#awhCUsZ*NNe0U)tIAhSX;(@5ldA$}H1+w`LD&%a; z*$Y1NkdZNk=S~@lIG6RJIY3^a4kkEGjE&9L8}zxeG+GIrB?f02)SM}co|1uM zm95C#tO#E8jU;YncVqy12t6SMhnZd1!@()Y0GQZBa>_MPL?OaLe8Zpal6y0S6iohF z>Re)R2*mcun(epXvV(KI8pH0nmbhk2iz?dCs^_q=0TExH-2gkW z|6ZpyYf9_)%=z;pa744YtT05kKw?}5vm}3wX=4zVw16iY`$tRip+2qynwxl@1@RVF zE31&1c?Rm8m8AmbL7wdPOCYiI0gy()pF|tBTihZ90iKFXI~>6Sc0qqa#MG~23N2bc zZYiLZ*zJZ$QL+_dZ_d3^)eE576d%C~iky0IuF@VCAu>F|&l=XNmSVga!5n`*7KB0_ z?Cq@(azwI#Pve~U=A6*&rYGl5&6!cip%Pv^H2O@r8Kp`n>C?kA^;ge?e?7&%RokwD zfJqXiHek}mW_nM&699WUMW{UwA3j|4o`)TZ8-d{}v2lU^_w_O4tT96ar=gbL@<1}! zBW1rD^QeQ-4sJ&k0ti6BZw;>=EJMO@DnCm`c%(vS1-sZo+B;z2K;is98Wpug39MDP zz3hF}qE-Fx#unIIdke`(Xpsj|gB+4zwbg)M+h%5TC0n?B<%%oJw;Z_snHjZ?+JQ-o z8Z*X{571B@ZZkb4Hg-6~_rlRdViJb=-9Os|*)ufVI}>pV?J3ds^1_`6GHeE1LMQ`L zKXtcDt^n_L@9y{i103sK8zQcWj42q9@Y+qKc&DZOA1U!`5RlDWHkI%!EA`Ogk=0`| zh(k70oyF=+jPYD91gXToo0Jqd9x`3q=Tl#iPly^{iz1B>JxUE=NW`H$zMjgGwHkN3 z)5f~MT@ELf@*(b29gOwB@GghtW$7AE$ofU*8m6$o-R|H#p;V=2^S6c&URu15yL3so zJK^j-?e^lA)8(EOBZe~T{zAQ6yRbTvHqgIBp*M*2p zuaNNP5NZ$-{Q@J5_t68mman9~6Tu{B$?7+ol_H*7ZQWUWM4BFOpk=+=04=G|wT~fTJrb*)lzt+h zKkj5R%9Gj4-%J$^1aUk99C6l#0aNlgiOqPx;e=UG1P0Uay6Kq*w=1mK2Y?|$L4QS; z2A;7<9={A|Kn(Yze-7I?uduKST1+4?{qDS@_lW<G}K59(qAMo=~s_GM0LIMh^R z=$o)%9Z1mWyzY7>+Un4hnJu5AdcZ*L_qRS!gxEjZg|vpgz%(EIPSiP?`!+zf{KNKx z?=}+GLjKWI@=JhXL}F2s5A-NrEfnNryYaU1Z`&+ylw-2!q3KGs>*bvT^)D^^K=(+C zTLK~$`nTjT--#CJnIELXW7Mc|2E`DlB_t=0<5)>xCRI?T+lGAk<(HwcUwrXJIHMGD z+;;BTB_a)kuVbk-Tw`1U6mXwG=5jzg`8EL^ff}s%3WhsslT(fYIur1O>@=LBnBeiS z4aYWa>?@mMKITZshh-w}ApG@vMWz^sJ$T_k*dK*6lCj)yO7iMpz^&-$=upNZ#*bNA zN72iNDCn`E)=DWL3)X``&uSrWt?}x!KDhpnhVCzm&I4jcM7*NYNZbcGa=OgSxgIJ= z=lr~4+9m16jma4q8A6dngUxm1=1^`uz^aLef|f*bpZn}`e$$fU>hnbogr>Veo#Qci zG*xHFr=;?9W=YS?H^*iseJ8tCir-*Zmgha(*}RWHBG^ik{4=B31|ksp!7{?;Mpg!s zFiqwW&Ok^tBjWkcQR5QZstCR?6kZ)2jSFcr*Jy_jdmSu+jhhqG1P0L1Xuw(fZpdW4 z3N2?EW7~tlUUWUKn}^#S={mM`Yo&)jHT46aq@{Gu=RSOR0B|W3colyVAhOw5m;Dc8 zmcL(2{dkBFE^~BA$VCrV&_JqAPu%)&cvs_(N^MFU?Gu=u)9MFDnbSL zz&y7ngp}_d?%g0VTq<-ae`oLPyX#eP=hOJTnca*+h4j#}Rocq}6*niEXRN81D*`NR?oNJRJQ^UeBcPHT%^o(Vf@t;_3Vhjn{aw0yCDGM; zw^YDYq5V`hcA_U}ie%P^w1jYksIvFBdZQ1QE-N;kzp#W;R;eNs7EjD0cnQO=BP|o8 zwEF7_;LorC^F7W%XDc623y))tXQ~S|P4cdGX-D!2I0gMUxg1bT9Sl)ed`3pUv=RsD zurQ$5B~gUb^)dB_aD19h0XAuOfNUvWOG1*)xMuw;@4%NO10s65%8|)?`n3Gw2lS-V zL}MtDW;P3nA=SY^*zy1T;OOR$#-)hVK@}N#vkwoM6`>wwY8LC z!ErNS!aK-752qvV=5hdpX-*5 zGvlINtx$X{g(Fm0a)p1B03-T*en~J*<}y~GOh8pcq~=+9OCAwhXS7;81YW67K;Yx7;w|53@6A^*B7{+^Uc zK~E@b@t^6GY@49;hf^xZ@~4bM%i0vmNYGzUzkC$c2HUR=c(i*Bv8R|D3>e2~i@3O{ zNQSB{sa_#-Hi70vF9YayMd1U0TvV@^ya{1ub3?QUd(T)U zi-J`BP?`xK!&* z(U!o#K;;}fCGyrHv52`SXgT={#m2$2X3e5i@i#ORx`0B9!V1fEO=1)x=xrL}LtSss zcB-T%?f6B)me@?nmDl7*Ls?~{8gBBy_D5!AKstJy5PFe`m3->KD|TRq4knF>!so1KxbrZa zOhK4u=`OhvTz`YHT~t(*_%w-JA2eI`@?1lbOWe3i-ua(ywjO-DR3Z$a$35g;URJh< zkf4dH^jN(9ErY^OA^CS`;jv|H-ypC=ePvl#DV9RWJSPYA@6u%yZfN9$yZ8^g;=qGa zyV|p7kNI_hBtcMRP30Ps;5A0*t8C(y8St%0|M%COSKhB3{hC#c!uQ-P%|> zf@YQf`-j53_pI^hnTibt0pSWH7uXv6OTg$CSOl}v;kPQiVDSjw~AC8Q1e z-c1`YXpqC>tL{Jl{Ih?_IOfTD7rhFHkYNZpP*PMB|9W=`Wd=FbdW8sK?|M44KujVa zkwy9vDf*Ut#!ig9L-Qt5Jl8?*q@@LmVkm@-n zifTfaD+^|N+K7e(l?{Vi)Y=7$UlixRW5<5Cc{66bxL67=z1^L?t83$*{GK)EwrBv_ z2LfX5ac05%!$*#&j(xZ`^c8`4bn~I(+zb*{hBh+Ky|Tj!%h81bX;i-ZsSUW#4ocj7rx5_!;%+*YEZA4$%?| z%UWNTHlblZ=_$PD;Qz-++9Ncvl!m3KE_o*&zx;AV_J21+88xW*_uKeCBn%wcZ{xOY z2iQh)33jBcEo{x+-rklK?ndVpgnOyu8vVTAfp$*{uh(f!i{vLAmhL(zusa2I|9OoX zTN#a|e}`Cg+bnB@2#+EgxQ*+v_Zp01cPM-*ZR z+crL0NYZace@v>A^w%6|`SsVE^0J(NI`YewtqGm}x@4uR3O#m8pnzfj@EL)BqiHTle>*Z0$64TNwgg4=}(FRAj1|rTj zB>_^oWUu?s?2Qt?9NMrRn2+JD63|r}0usGK=LZUB@`MS{(pbK_ajqvZ+~)jcd^X4F zO4k=2M%c8f-Havm*LPSiaVMH1@kvRedFYa#9NjgbYj=Pr@#zGiBIK${@M=nb`Wumn z$fJB3m-)HEeivmi;fIDVU;4nL_(LqK0anVb|GV(M(ORq-?Rci7m!g#1Zs!*(w6X_0eM*va#_7rnItlVk4Yu1+ zf9-ZR+NIMSPauQXe{D8&S{*<8n?FZ1N-$aA;o(8++VL?P1VqwwV&PISF{(WTT1D$Q(FJy%hPQ2Mx(7u`0cN`3CM10ILj-qyMg?;6j zbtLeT2#FTX%SUqUfqf*cHCN`L3%OJ1>Ph2UPOh!kSR(CY(h`e(6E=8y>qA(gLL7hJ zPZ~YEzs7d7yhjTjC;AbSC&H^NvUNE07PkN*oVuAj+W3LsCbeaW^1JW8lakFJUpAI1 z4`5^edV}0f5Fh+KsUflC$?1sP^)_2c1HhW75x%o7Uc4AQ(6~#NM7N^y$?;Eqi$bq2 z{?ermX^)K_WgF=0TQG#Z2;7PYutS=Q$p#irsrPDdkd^1caBwieR0_!+9H4o z)y|&NqZdxEMaRq5Lx&D|EMG2-kHWf67$vcVWN(Bxb2*D*aJCeTA2lkKHn6_GT>L*8 zZd9W_>{`ThG^WBI$lqmdSB1CD1F!(8ap9cV3jPqs(-SN`+!m{J$Q?AscH|FwwKulE zeEG7}!UU2R{%29HrP$o;7P!M9c3_u|9f!l?v*dIM8EEz-PlJUQ(&{%i7%2yxK76_- zo*kU`;9C~{TQOZy$bkDN7~`^k`&C;awOD7628-hob4XqSN+o@K`Gz9#1B#tox8id; zoMMX!!2_3OWz&pDlsvHY2-+M30TdMyJm}=o*Ev)zr=mGvUVL7nt+W}l;wd}`U%ASh zA>T3I44oxWE0A^?390vf7M+I9M!QwxWJru8~MpI(hM%Z%F} zT{ayiYex#Hv6+Mqn^vqC8j=qXIw2kQcxnYUm_&K#ew#N36I~{NRnUZ)4lbZyzAT4$ zO4#(l7r%c@N5X!RhQ##rzN?l{I2%if8iid5X3BX4rbGaC)=9X+KV6J?TTLp>#I`6V zj#9H5T%q>2+rdG_jkZ;llER&_v2BW`EvYqsuxHL*Jfa|JfkvPGUKlPv1ZS zN3hfHlREm4K9X~aQ2ACRs@oxk8%?MQO}4h~$|lFFmom>S@*5;HgcPFzA+B*6l`1-* zkaU|GlF=HDeI>wZ?=Kb?7q_4Zh9&Q$k_LMkbwpB4bZ(}5y!ca6QtDo;hExV@N0u*q zM0bNqN+*KofRc;@ofA1p@BkjeVA>}hZhtBur!)oy=Pp|av53@aD@#wud4>+(NFmH8 zkCfWTx39}+MD>&jv9Yn+oF2Gmk_~`Dk5-E$BU?`NTj9N$E>$>UrDi6Mkf9!e2+hBb z4Nv+j;k1{|HUPhb%a>{5dM+IpshjmI0^TmcAtpbBvn63mU=6nG<`TlemgiHfxYYiDF+ z3!)EH5J@@0$c{{W2guRJGL9VO*>IXAM_Ks!m11KtT^-kV&cQQBc)=J6KxOxmik0*Y zi?RT)`s7KI9^$4m2zkf0q@~Yo!i}3ZWi~|OVXCGbRDe3o5#Er-?Zbx;H!1g3tEKuP zpdQJxT3Nl(eZ#{aAIts>p*|1j^hAeCbL@oEz$dU=H-hHhz4c^>dKao6i~YSGu7u304J++oxoSxK-m^!@#PcnBk8m}^7ur*Ou9A! zOw@BdxYBiRCN91Ea0mIET^qzsJH-rCgLCYmF|V96UCQ&PXt$eGTxE^~M}VR6em6t| ziGroHD>*3j8i%=Oaq}UsceOxYE|lW?Xth8H`v!_teDC(Q`_*O>s5W9Q93Hw9EM(*LwBVHtD^sF#B`L;;ElHAG%OUW=CU;G8wlNYAp1h+g|$ZHnRba zga}GIu-NN@!M%SyuRA(PB)q1SDNfkMH3Z8~`D9}1-naH!7(}!$Y+F=JO8gtNS)Elf zY63-M#WpnDLjKdVX(wLglrT!6zI(~4Z!-)YL#Hxb!^eTdL z?U(=d+i#)Yxc+xlA^OSa6`}2cw>>zu64$6w37%nln)%?bJ_Y2ZqAIVPLnmTK1TYo% z(ywZtB&7gN5ldttfzVi!Ol+EgU2Oet`Y0}Ry}+Jh&80Z6e1u^W3kka7aPz`{9jl^5 zX>@68#VGk8DWKqZL*N_UX1H?y#ZA_-HnWzmzDH9+FY`%Bsiq_0Q%ay3OnF=d(J>tCY`bB7#RZojcC~PhkvfIIsr`s% zp2DlUSBL;G8m|ElheM=SsS)rgPIxx=V?Q68s*@g>n`8PmAvFAL?m699l?trWmogp| zuI-=x*vH02%=rv#wGHLcO3hJ17BS18RKqSLt*LHAYdVT07Mz6WXtY|2LCkS(`|sAm zqKYSX+_!CU-ECa7Y~^pYW{&95-za+L<>hl^DYF0@YJY?@HnMq#p!^%jw++_Oux7F3(H+$6P zN}k252X;}wk~wU(a-X?gU`1JhkVpM)QMEz}#nmRA8J^%3bnOf*dL<_!mv>j0TUl*I zqeEY)1OE=y`Qj&4U#zsDYAXat>X@ebGLtS;SvB5z`QuxmIIt%(82IM4i?0$a;0pD@ z;tRDKHEE$C57&IX_wdo$UbnHs$-{)VNrspa?(_GT$@+IWZ7L@Hs>2@Hy#2n)PaEhF zSjUg;e`m9>L8^+0jB*d`U7v^vt2eGXKO17Vw{l;4cid0lBn&x~%+W$+O9NX0lj&Us zRIZukihNdUTipDE@xU{we++zib*QhBFgV$4L@PhY0weC(S3etr@2VE@sL&n>L**l? zr)qlwqJe~5CCtTtyHn?mV~14T@e~=(P8s~hUf2oLP`-2(>Jzl%rq{S*!>T)Wr@(QU z!15?;7oAix)&G*YV8|@=bZ?-+_Np8B6WI|x8uX^a|FNNxd>eZ5e1h4YH9p{1^#LD% zr95arDk-VA_jQ~o&q}z9YuxeARd+m5csp*B(=sqVmFNocK3sxOa{lhCv(>IvZM7Tk zidLEIyi1T(sZm07e3^(>_(W=aU}Mz>CYQ#KFji|m07-(c60U+tb?bk>k$9-aGhO+U z_L*MEVBXAE$S4TgN2_y%G`M$MOJ8WJj@7t_&#UesgNe!QU{U=E*v6@iSh0cd?Hwc3 z*S#cDgQ`VXMkCYoXYdpVH>}E;;QSH1TY6ug~; zuqp4Cm9RX{B*C9d)ctot)mtI(o1%TvLk91H)iau|e<@!o6R7g@S}J@){>Ks%n!Q-Y(1_a@|}D!nvJiRbRvG->X*peNp74 z`jjpv=hPPPqP-~>JfY%XN2oqbyWESywC;oT@z(D7 ztVG!iV|EVcEf=KO#6nzTLHB^}sm@+o_5Ql!pqtfh9)kM(gU6E1b+?Fj)m_fBXT_qQ zM%_v!gHt95y%h<$1J&yqPsOh4lKUATV^IdTl3r7N5sDgk|BuE;8Nt^zbVZRL-`!8n z9#nTkqhr;=lwM^RuVg=lbj+nSVi(<8RqNz7L^2Q9r%I`7dcxRC!5?=5I&nUR( zM8hi;rJDBfcD(beT#|u83dFFo`wE1ulBd=Z_7157B)Ui)LWto=Te2Uv7QbcD#job~ zRD}x$|M{ZjuS3N1RPu$gEO~#mUmuU36W-FQ;ODMuIU$e>M66EXr#@beV(heG30AW- zhiS+Pp^1J8EhaI|IJL(UyEaJ-ln3&K^PJ~_N)|&76LQqNr~b!}U--hnTgm#+uJJ4?2DNv?xvNm>R!PNz!wr`^no!zcsHm`360!mgqMehXkV?$L)N3 z{>|HGOJa8L;qKeM`gkQl@3fJ#qQS^e%fzPVi~d~KYS8Q2Pld?XBgmZ0uIg2_{Zj}Nu6F8u2^AH08th1c+rkz~$)ajyC^{--wi4eO-&fOq~g;;TDd{;aPO z#kD-)Bb|6dcq1!cK-bzUsq+|TK_C;I{yHXCJ^Co6`9o1ARqs;_vDl~e(qAoxdRo{Z z-5GXtfn+LV&z@h_KC1wdn1uzjUNrvWhrIKH_E>iDThp-e+RD;E1T<7axY)@ITzq%K zMd$}1mfulWSlIn&n~$FyqTdTarplvzVWq@Z$R+g_8c<^H{H0J{iJooE}>3(@)L(cu$?Pg-7X3osl> zkTvBQk$SwtuWzE?{%quOBI`ho>HK>8bv}5y#&70p1bjr?g;XRlzwCe)@`;HA>02?g z^SkQ);y2a5nB}pmrSr&V;21&_E<%54fI3SL2{k&siYNTis@K<`!MFS9XX~c9)Sw^P z=v$p%rCW*C*JOa>WyxH;MsMKwyjzP~?9;i@&DB5n_MY*}-Hq~~3^${=yOq{(L#+O? z%xK9{M5<%nHti@MKNGYtth3*!eS3y#eo|pA%~gFo%icGirPj9k64fn)U2A39g6tno}U z`a`s1!vaRruwg-p2{fNaK0FeYpEZIkXoYTpl?s?#2^& zmO#OG;NhgI4=5B3CJqa=?DB0Zjs5nHbs!*c2hP>UzA-!ApAN1>^1P!oJzu|my-BML zOD6$43Li3w&OYGshXbxUO|Y`sPc>Aq6@X4P7HU5|9>Q&Cfoor6RP}4#`A)$x4>Z3N zlr2x4HfMVpNTSKcgoSWV`y+LPp1Hf>XNSM0N}K?_pTFd$!Vf7Nm3!KM+yw@MbPYrE z2&|T*QPEHdIzvNuX@g&_HP>jBT>y!N`2akRWf{i*8fy*rN8`i=@2U)afEM|E)N zyJ@iDuSZn@F(o5mPP?cu-6$>)4#zpz|83yJe$Sudt_2klV%X2B51oOkJ-QE?Ul{UY zuA`$qf$_`2bjKAxtsL)FnkGsN8qo%!oBye<_h+q;P?F@JL>S-X`FH7B@ge*?%!XYw zQ3dnO`-o=lZLYd##n{oh8}UK6Nr&4CAF)Vp6rW>D6%eK;K|gC_+eeX|#3K+kVk6>) zJWW11pI#=>PN>uI<0+cm=i_nrejuE=1Mhj)PW-bzrH4d`?NQYJv%aE%4b|2XdW`eM z76^B)GJ(;8?bKQvh}h~>Te{iJbq}S}GU1CBCRh#z>cvz57_Z8Fcd?VP8BuA!O!+T~ zjv1v3^i>Mf!X_(;PFb!Ce)!ysPlOm>UWcESmlc?#eA#KIX%G8{z!=CTe8~m+TpvNM zAhDVxAnM?Vh#nlaD>QOXtB!uXW3_8OL|=dY?Nj6x149oT8mks=Ujdjz*?}On=tmjj zfrmM=xDu}F;-~S|A~~Xy<~8rBjqk6-BZ{$!Z69LUrGpA#hn_R>aio$0_*nlWcoR&a z!tgCbuj;atin8$$oI#y<*kGJC*6?2~5u~gs`^Q%~y}Z!Mg)a4wqp2I#)bjW5>Gdw@ z-Tg*hlUhqix=d4w^3>dmBgLf*8-1dd&nA;UwzW zD`wa2asKUtN>XIu<_4=zq~xai6>i!%ef9&PE*5fFm3r{&toAs}o?`NT?V^bKbIqDH z8p=ojj7oJ@4r+}rvD}QWzSd52DOrZXNIu@I>|HIBt( zcrGs!U~KO&dW-O~Z0~J1oY**Kk6kU(!)WdGCYf0b^y|SV!o)-ne%Pl zE9ZXnm|4Zb{wQ*cDj_juZJ;I-p4fWR-~OZy4hiYlbm}5AF0ks=JZcTWVi&{8z{xsU zDChB0A}S#4GXXSUYq(3}J`4KIjjZ|@)_x{KD)N^?kt>wE!{Vy%?5}DA(%lgyyvdAj zsB?*XE#nmz>WZ)Hx%|;+l^V-AljuYDp`5DmPzFi@6Zqlfeq{n7TPhys_r|rZUKPOd z1`bY8V_~C5GUX=*EzEQu$E@nd;S5XA)E@Q&P@-l{51RokS&N^@eO{-5fRwsVqNDZ% zkBw(&H7PUj-DQWb#(Xwp=$qGXF5bFZ9ch;NR{A3Ej=4`OLSMJJ>)lY%LbRPV@RCN- zIIZgODh$R{o#nsiugN7$vV*w1P5Eq*h4`g*t*b$z1-h#B(%I_VeOsp`YSgN$5An(o zRokpL!*SQs(>tgnQT;tlarJwTLW{sI(@hBeF4g(y{XPw_|6i)I|0?%BDN(X2;;MXTglqVtIy$vpl)s(xTJvA&0=P5I+jlh6H@p+edrF zSAQm?=%{Ma=jubo7!07ZdOJK&h%D*(xf8oOXpSQxtJ^hyQpP8HX78MOf(R$#F z;rELZm{@Gsm}<1Zz)HK8-gWnpBf29vmY5@S{Fuz1Jt`9jxe>xUQCq3`3TjuC!BzkH z;jgt{Sp7$eepM-a^`BiBu6?4_f27FzD{PO}ZC|mPH*cP(aCSqKoJQ&tvIu{gvYlSI zz%#c4FAfcb$Km7ab3_HAMG7>@w|R3HY|1hC-3po|EX0MI9z?6M-qht7X~1nlg_^ z!n;^QyUaZF^T+EjmU%*~OM!d(dC{{KTjdVtgGPdv-_s~wKnYaf68PT6v)DHX+vvE12 z)e=By1R<$zlGgs9I!bseNlr8(F(vzYE}wHOMKqX1l_i%Ju<`1$TQTLT;Nu9Xtt5@KOPHmAIeD^~*6-x<^gU72$>XL}K{ zc4N0oo+@%8Oi0f-W>Og<$@QDA)3xz!Z1C70Zc$dSaCT=pjy6*O=+A;LRR>eU zm*5xm`29Y~T+U;CzL4CvBg?H)-N8;`su@qYbFa<) z%L=?cIM99)o$#(|hbe~qTUFJ_D%4bMfL6P0cn_I3%j4ucmA2%^%VXK;fzX(3juRv> zEIhn_^fQzLR76njMhw;ZB#Q{iyVFroMAKg7%bpD)4Hk_ng6XWFw;xTOVzyEB{ozid z{(dV?>0Aa=!~;b}?p!`YG%H%HGx_jhL#E?Y_WChS{MI-@)<8wGdq;w1dSrOgJMy19 zs-J(fM3;7XVh(p592NC?>YH-3Ce+G-#CgX^Bh$N9Ls?f3Th3cr36@>>q6Ce=`Mo@s zefdv_0F1Es-|tvZj!exB+cLC2+$=wrkFQv? z;)O`CEDd}S3FqqBYwtxNuY{dDNVI*_QdDlyedgO6DV|#>6MMYjD(ZVriYFplGualunsu}@rkFWl=QkwFV~M^$Td2}B(6 z1}Jut3xTd@XV4K8^N_t=u1)(W^D}`1;&uAsh<=JOHDEOHCDQ=CM3Fo}Lyh9d%<=(bE zOXvF1NV=hCp38qqq}O7d)xk+-&-MGoK@`mpeCg)6{&yBxP_v%UdPDQRT~yA6q5DJd zwbmV~zxMFFkJkM(Qcd%EdFF3-C_J~Ao~Kv-Z&nk*FAv>Zs*=+YG-TY z-TznGl}9yoXW`IR?Htj}0X#OAf*cOa7SoYw}lk2`}%x-*WH$ z?!Dg!8$i7Az9KdGI!NqR_p%Ryb5iSvaUzS}wG?=i-yK8Z^_ub)dCbUNNtEs2^di8; zJqN9SvX`wbE!_arNu41_BIx-v`&UTNzgSez5|A1Ah{YUJf}6Q9eRLb=nF_A_Ms}L_ckynC`CfGEbS%Q z=n-l5r9UM6gI2S8?2mCRBZk}Ads|z@nj0I6msAIUoge`3Oqe1O_)jIbl8z!JubGMfawE~LZ&7P~OXF8$5rKGk8m zai*Y<6Zk<%Lp7G4y}ynNh1Qde;b1ExeGdm@AW#l)b(Du9)`r&L9pM)KXI|3;+L-~fcHw6o9axHzy{ujUDmRRB9v&-M10a;2K@o~ zJ7@7rF{dZq-I+cT@#WK_3?D|p1!>SnowHKAC)wS7r1*D1^R&S9A0psCr=j)UorY<@ zv?r7ND7f;z*Z|9r)$O4*fr5x;P8Lg0E#J{ybL4Grn5!)(mdkH}{HFbN=0 z%(M-WE>=NWRMn^^^+I1xDbNL2Psl#%;;)kruw3et6xSm((!JO{X486gu!bGNi9)vr zKmRfx#kIEoHEVb zqG~{}AYvfHa(x}^OzESuKN*{^_yMF#JEm{R8IEfr)qIV=9UA7cS36Z`I7c!|kW>=% zds)^Y##F%XxILk&C+U*o-_BV3{+2CU(!vjRxS(!mj3)8z+KBq2Y} zF@NYen$$OBzsqZ2q1B?AuWXlF1zPyNvs%B*YUzjb=J0$ke3SRhJ zKUbO>`;nR1>Eox~TRhutj{{@Bj@x^&H9uRK@ar1Wp=X7CXCktX$Qp}>M~{q5&GrDF zqI1XKUfT-wZ3qhyJD$6K{)hG)rZv+)a#~3+p^7Zx2i;(mI^hSFL5)PF8^(khd7jfnr z*Yt4pdE_m(AmgM$>GWbg>_7&|sdb|sIE{ET?=AE~wdZg3qjM*~F7=}AYW0bPg>KHy zr)W142;3bV9eL*FzTg3F3*n^4B__H8)ei+(3aDg;h8`i1t2?Rx`AUPIujt7PEd@5n z)INS3kRwplijH%{bCW=wz4O!sU~LOo??9dsX!z@+*i53Zrg;_?ewR1bVjiWVoaxS> z1II@N__mj#PlS@2i&_;*Wfq(_89vg!gwNvqz1lRhHO!p!DS`i`G2VfBmo6=;s;aUM zHVQaaQc{AE=|kp{FM@6Q*kB|+)(U)uRsZIT&U0A(48Ib9Xk7cvrQU(*VKd~H{|?m3 ze9d83K3SYS*gr9{x?gWF){40~?auJA20KU-`VmA7tVRvu7q0Na>X&g@>P4QL!2fmV6NmF5Xvk+jE?bf|razCz*6)61mg- z2uap97M*Y|(<;=7XF%fNG>Y!6`PAo5ib${|JEeh~u&v3-$)x$W48;A+AtYi;k<5s{ zq`!iXv{z7e9}!I%))P~UE+ha+X9d_`@Bsg~)Es;VgjXVUca8}`?e{8hI2g=Ao%&gW z2_6ITI-9BXfT=|yR)pr(UJ7cY#==5yk3DUNqq`ElBcg5ut((9Abfq=6Hk!Q}m+!P9 zW+8bOE?9x%t_BSSX$-_hA0hNqp)D{gY^Ho`|G+>Uxm#d$P(w@2&CO|;BAI$|S7#Tk z%$+zowtPF)4kRWfGJG2w8x<P6(_O7j39MokSzw=H zcG8Na+Nsj84nx$21Satv`CJxtkKQR-0v`m=*;;8d;%{th90ep5-B!a#%fNU^Mdy2C zW9>^c1C3ja>~+xuDb5=|v(F$&M|2wT6A}_c-Ka39GL|w-x)Uc(q=$Vd-@&uA^hfUj zcos!y&tmwv9ZF0SC|Vd)WIdlr7o0PJ^B(b@Jf(UHDEXQxys)6&8zxNPUQ%+3B6L@IZ#xk zyeB^%xu3Y7uG_snyoP`Tv^Mz^e<1F_V^&~Dz+Hi90vmysLF<|i%xy*)Iv zxvgz4^=E5dUH^QtTVs8N{A750ZD_J05mC<$NCPZ{%RpM=1FsL0+7KS(I%|1P4}wYB z^3XvBT5u6?iX~_^9BZyXNW2Jx=!g2LUuEcifKzr%GebF3+tD3MKtG9S^T4P5YSXo# zdV^TVvf|XF>B6O7Rco`csoE z{XnkLZYyI){J!@%Pnv7Ar64?VL9bJii9A}e{aSu`xf|UN^2}7on`EN9LV4US?Gs4Z z9~H&OWU^<&!~S%4mYz8vgXp7+K?l!jZe%q{bt)7Jkq$3ZJ}*>0hiN~eKx;*kh5nS$ z+EW(3#{L_P4v|4)A#XQ&{G|g|is~A5U*vQ(z1dptr2ex@rD$nMNA|MRh5eY*HT`2i zlKxN4J9(m3lF{IUNE7WdFgRwjQ*#vthyI_h1#XJJKB#xwG-nm=?R}VHl)rqa(|wWY QG#%r6`>iit7y0#n07|0G)c^nh literal 177867 zcmbq+c_5W**Z!m4>YT=NP9sgHq9PHI%$nO2GLxaf5Q;J+GIcuDDUI8dS%b)&Aw!`_ zX3Cf%8W2L6$?&_@raJHY{rTIk&Zlh8zMuPE_gdF=t?Pci^Qy`_W=!Ro$}r3f*3PYJ z4D*`_!%RH(%VhkEI$z=({GZ7Oly+=oM(KZ{8R4D`^C!dFx=G_i;OAP0>zWN|67_Y# zT$}#pI{b^pVVz$tOp<;j{`)^WR6Spv6x=Dc;iA(YhaWGoR6ZzR`aI=-d%diP|u8k7vi#Ay4(_ql4_&=H(B5rg>m z>rqwE-aYOA`S(B3_dVC{O#k11vkm3{Kfk@`Q2zhEZQT@q(f|Fo!c+erm%Eg|m~%aM zzPgWjp6jh@Qg>Znx6E_p?)=Zyi3N*)-FLn;-M%&HaJ6W`+7x}g6f1-FjXwr3ZJ{@S4+329=u=Y z`c2Hd^mfg?AD42yIyL*iJ)QR7IR8>^ooH54EI&F>QghsBr{+Z2=3H^_C zO#Ms}x{9UEvW9z=N`hs}QjdMW558Zh|Hejbb&3gwX|#Cq_pfO!kx@29;#D7q`|3hI z_*cg44Ob6W);00gEKM*eNILZLz}K_?ImFyi4)SMtu57qGpW%_q!20F+RbnZ|X*RQ|KK7{U5F<+0LCix7xnSz_$D4j_|!HR(GSL1rKg^U+Qz< z-t}c$&VG!Hip_s?YT|m!%IMNetr)$A&xs|<9d9$#_w3p8-0S<*nHlWy$Z>8yJxeZk z&yL03QgEqG@0GFB&I$n3);W0i^`F#}?6$5S7 zZUIIkoxXlDjy7yzw=XA~f*cxq-&mJOwMMJs*9!A61OBbC4}`Xyn_1-48sp3BCvtH6 zWx2smpR-0s{G{wH{AT~0<>_Y-dV&tWa6WEcR*C$mOomfW{=qA}Y?h|bqytVH{ zMn+!DV|0Ei3s)(*8y@UmAoM`c$fY|aZ)Jcn*3!J}&ThY-lOXuAIA*oO*hk=sa^D2y zhnrQDW^nt9nX{FIGsGm`p15^(I-_*`&hCIlwvtd@twUXq&)U5!S!ZW04UW10TA(BI zo7KOsSUoj6Ziz1^OFWwOao*WkYLRmaCs9SNyu0ovon;~NtRVYvt+wfwG8YlL|0ARw@2rAwMo~biL;Dr?W=aP9}YAI zRNI!wsv8&>G#Y=u7^5_A_t=$P8KKPn_ce}9K`b_MKoervtWxLTCDHH+`}XMVKQp(p z{`4$UZ%r*0TV~*+r(}bddVU}k8_sKAp4#ep9@#eM#{Txwu`_Q~VwtjZSbDk?b|TGu zX~xMuo16*zL>$}GniAUXzcGD&k(+|oPkf}7?jF?jcB}HC`|0F8y&9Wmb5Wu z8-Hs{Dd}(<{U*9c8*k-N|=n<|iO+ z-MMpTWs$#l0p-Bnwl}^QwFz!Eq-fCV}xN=aZhtbf@!t+do z3YDp$kE`Ta?2C&eMf>X9-2VNMMn8VtZKNwi=}p!^k+s^McX5AlMYJJD3m-9Z>8UW; z^&`uuX3f2T5?8Z~GXkBBE0qJU(@ygc&fM8r8M7Y6LJavDH8H{rvlNtEWhmW|(PtD>GR-EyAD%+uhKRIrbAXS3|AUYv0Kz1tOzzfQ4|Dvy^>{m$#PQ-I{BR``Nad?G4JDK|Ff$wOG2t zc&Ps?iyd_QHH&WTF6v!cXZ{*0ZV$ganO1!)XEA^Z=lwhvs}s$d&dfK=4c&CQ6E}v$ zZJ5^-;%10J&M2O`Ci?0niMJacd`?iJ+B|cyl$Z1$%!^0kwfu#`!=cWCu(skLXf_T>nyAdqR*|`evSev=Lp8T0DlS+#NXq2&j14;<9B<=57eKPz{$l*<2i*BelFZ9Q zfv0xi?*zIGT?ZN-Vu}b~m=~Sw$nbR?9&8A-EzWZru5xQAS6AtApUs3SivdF8b?DEx zcu(o^b2As`U;mFgUv^c;{1rPc)8Oc(VKlo(3Ut+OTfrAC(-VvrcWZEjr zxxCL#f=WxAzdYv6Lk&_x=rnF+1b_9HunxYf9&U!8M{7q99djaZr^DllhtLoFsTefYhD znMZV}KS#be$l-;)$s>0zlnh(vpkvRu8!o5pjL95qvCZf` zW0KKR8E|kINI@vcHvUlZp_j{9odZoFzLH~MU+$)UXIGxwx6htiW=isgxb*6>jkD)! z7rJvz>_q68UXkt9i3rJNz6`HTE?1Kr{B)}e8Rd@K=!mXUbC@SD7P}DpEU<34-2uzS zZ>z}cW7Y3X<4nzrt@m_x{bBTlYu_p7irl8CZ(~Jdomt1jF6H~LEuw0Gufoy@6*^+d zZC-4eVjet8Rb_2Pcd2Agxq8=Y%Q%zppyP_24NbS20&SHELO2h7%`s8_g(-I!Nt(j> z7_mcBmO39rhHS3*4KR0AwTZ7m>CJ5(pMgYx*IE`;WsHt|3u|q!imO|m*IBvO+PnBz zK7Y6%+m`eDjMp1=6x$dZiZ#*kn*pWP$ye&z7Rd~K zd79S$-lGgl#hdrxihM9(^o+i@Yiy;MP`NU%v9f~jsMIxUpo)ZbvgxaJXacHP>%hzT zIQPkxFG4pBG^@DzpP#*IB}>Zg)7AY?&xVLB`s+_uTRmuF^L}`KNeS3d)U@zA5FXPqd%R9@uEj9*qu)?g%c8X7 zb_L5wvJ$YiO$ct~ZD&H>F=AT@wO|bckg|DMog?2m!nNZJ3G@SbGCgPHXZ$F{G%_}( z8=OdP;LFolp1g04q!?fb_r)yABMO1>y-=A%PkgDTG|plJy$P)fK5>8`PtTe0WmBC+ zH;vt|zv1FCCDi)?lV zpLl=XCTC2T&^a{N)9MR$$;;9Le;&enc9KpIIMqG-28NjiQl8jdsjkX%Ud${LG38T&)T`x|!Z7cI35PaBEXeRb2{| z$=jaF`A{)}-ykBSuSmZCXN3)NQPEc=hxrC}d#JigE|JC}+qJ#!+1n+1vbRvSucj+z zAh}Y{^78Jr&o3_yz?LZfy2v2iE)OY5ylRML^PJm^s)OLArlu7)QPmo$V(pfw?G9?4 zDfKCIsAP0Vr5NNj&~0RZ#m3Ya4|X?q$xLZz8X3z8Mo84Y3Du?n#5oW=#UQ5}a!MST zL$9pTo9`?-`iP6gCfy+}YsBewlDT@L2BX0JQ-1ogrRUSFE2gb6DMI4LIu5n=wqV(& zh!;OeTT?mke3{PN6$1^zZn{#9=T`;imbmsN0`(?n#oX9(?#_-YORxlXfTA{qB00vd z;`jS)Jazz@&wAIP{^?6(Hp&m=sjzu(ZNHR{bR9s1@06`PuE<%O*H`yjVV+MPMj1oIneHw_|KF^d>^n6F_z*OWz90X-#Yr4kB+0svn5v2QJ7(_11t zqN~DHLevu7v0;RvlbPJFu_+)R42+^{mDW%ivp0pZqGWsOhs?xEV6!0LKqpNtEuUT& zpnS=XSGSa+b_E`PZSL4pp-t)KBKP{0zDBN&P!~jV{EeKyZo~S8G|*P#P-iMTgS~A5 z_LaJ;uvd3?H5Qen*)|0_ymjQ3vOA=gbV#JGqp7c_Dr-c1mR$cn%7#M&C2qxc0<8Rj zhzRdjAAfVimdYUT|Vuw_b5c*;`>8g1+9VEE1wPxa!nSzTGL7a+ghrtyzT0;ty zrQ5_t=MTy*avS+>Lxlyom z*I4fN$j6-eN`FEx~z- zeQ!^i<$Lk7*huO%xCQgqNzM9KBoUm6*y2UU>IffW8N$Tgx{VHJbq&qeKe`D!s7sc; z3c2av%3as50=o$kk4!-Hnqeacw1dzk6!j+$aYYZAy^=E)ZV-e*cEtQ4 zbfu5aW~&6Y4o;jUvkq0xn)Xii?_;!V-wl84Jb<-_yew?DPT#B{zDqpf0!X#_=4a+= zO=V7wzKaAIRaVGxy3=qL?uOME^X^JY^{X#B--F_d2z!P;WWk4S~ zq7%#2luWSn0atFvyaR|wZ|wUF@K<-6@1SE|qGeph{mN7n_%8dJ4Q~#6 zf`7SLW@<)hB|pdR?v)8CK8l(irXyIK{PJlH61>*NY8%nRi*^LD5Uh1fTW z9|khsA~x*WYaCB?u8lygG>%EG^lwVHs!gwO>aflnXe=);7tB9ovfjR>oY=HjY-FT= z+{z^j565q!vb*j2n4Z7yUBJ5in}q;;c_Ak6?tD-;97{@q@YFM#iviC|`!B99zTVU) z0+P!bSE+R_QW`})DNVh3^FU@u64g+%#zH@g)4VRz#XJ*}u6yv3xZ(#;Pf02&s5hQS zQD^4MxJ;5r-ny3$h z%kwm>Q2(7xi~Kcttm7X~wmm#qbQKDwp2oN$Kkqz#Zy8txi@gZztBV-Or=>B5(9JKK zTVEtmS4i?Gn3{s*_cn#c%YS}fp=i{i$fnc>k<%{7LQ6yABfzW@Az52e@*vqKzQ3@> z6vAW)Y1+gD?4?xL(MXTeoD09adun+J8=+0ANk`}P{Ug$KmRWDTma$OBz0WKTH#~ua zhd1b1#tg{a2eY0gJ*G52T5b>W;d38^)ZjTUQ?8w}$aa_cH|$Fx(AZG@ajNT{eF$Pg z^7FNO@9VF&2Uf_DLG7zP;IY=CNCk}#h-Y=S^{1xXLCd1rA}YoPA7_SAT-PcSqnE4~ z+LL`iB*b;lJSHvOu#5T=fGLidv>hEQE#5vHa1KeqOjq_JsQ1=?R{AS_s7vo^-x$XX ztqBIPLD5;rVlO`aYMK1V*DIWOj{s>1J12+8S_cfg2AOT&vF|Ko-YqQD!^#s#I);am zC8~`*Wxjn*q%>;#@!Ag4FmJ8M=dO6l zy12Ah0fENRTWssbU>V2GCzsWQcln8iRi;9^?s7S7V$$&B@)*>M&#ggH3~PfbAAOO< zM3g}038ZYq5{2l3dY?-BF?;vV5RQzrxReD>b;kAbk^9vvwGHewp?`XU5j8-#EEb50 zRf~;=(iQ3Tr|4MEP`+G93*dvJ0z^r3v?Kt*F2% zohBceu_3)d&3lM1XuV#73dHTnFb&^ZXM)>_=f!-5iZHJ_4SS9XtNzi)lSl=(tukzWA>6Ac z70VGa&^wge(h@NG5W3yQx*Vu6_WN_ju$d0;_Dek8DRr*WtWv0yX9lZhF*8bh_FDNh z_!`NvI$vy=1=N<#nX{kUz7j#Mp{eQp976cJ3F4FcGX>VHA$l`?shkWTg}u3J=f^y5 znG;`@%l54dIILzVbKu!O+nh2deFY9F?tAS}=aN#H3f^U_uf2MX#U-=iK*^HV9tn~; ze+@R;Us@z-lX=D9NALv_Y%1+VsOn>X2>yx>O9Wb3IncR&1SlZp3;z zuqm5bU*TP=(4)Z@6(w`m`bVdrR#dxwYirWIg?!gsm*p0dh6OD$>hEZd66SMmfD(7T z;~Q7BtP!IfxNAL_e{yHMOG^y@XwqrKlW0vk^J?{31)V zsnuYCA*9ud3ni?ATS?tQ;w?Q<=hC-(TXvwlsV@EZ3!643FG6=iMiQ`c<2N z-jdgIf(6g7EFBHXA&oZ;$&<|s$d?B}Z6HQ`soT)8C4{VkM*&i!#771_7tK2}S6g3X z$5IzN^P%@1!i_i1E%9$U2JGu<{=lK*tq1R3dwYB0rycqTd0S}zKF|rwqeESvF>pUQm%&9?wq^qoL8)wax8dz4dRlchwr<)Xi3j^FTgeN%MsDhg%p=GJ+YPmr%4Y{al^i% z%Ml2AT5RuvB%BB+>hrv>#^9Uncj;XcTjJ9FQtuLf zW;4=L34Z36pP&C7riB(0Y~(esPB4ivtzgzk0MiQ{Q3>355Bc(pLTH=l;uTYIA-?7K>Bc!WFL-^)42ha}9oL<8(28r2I&|HJdpvm&(zm}>+N3K*hnNHBaWJ-m|`y|R4b@%nidE80gwwr6gC8X315WH1!a)=6@0GN#IKKo z+n1(V*JV~?jO!1-iYKN=+7fb*EwsQpsFk{wG0CK?`-+Hg_`##001$k(mt5RLcgKN2 z*)6C`{rmaRIXjTHWsKT6ES5fAxX}9HHB{%)`Ts5Gq-=AMasvb$~pn5yM^|mDfT5xtcfSh;^kJ8#?4Qhfea{6?GTh95!WAn<(;x;y#PK7!P{5EyL1ExC z;f;z;P$adCAPmVm2lC1x!x4~5L#LM0p+KcBMtTe6)dkSxj1E=12=F>n!QQPW$|%gT zDwbBJ6B;BE-GB;Edm=P{V|-j80WWCCf)|wZ4naWmA?ysoVLUQemDTt)XK4wL549FH z9C>X_<_1)L?~LBpQG%m#V+f*L^M>kb2dZr?X`5k1@R1&?bM{TFCdPSb`%=zY4E^R< zD#_HH`!!};0RfBuiNsOJ$8_{}%2;tMT{w3=h{(ma)=?%HJ#X~ar0*H?U( zS?i}n(eK{MFXI3|YEhxc6h-qzR( z1(D4wE-ucoS(GvHx37b;vPU0W`7zQjZa-R2diM|Oz77U$0+frt9T6K_OkI)k%zwR^ z?|Z}YAGku2lVkyd(6HRc62Ujvau56tW{M%3xEO^kKicdsq1u)9 zqA$*YPyCs#S^b0K%^~+}GtdN;8>%}|+0f3_@ppqSN2#mMxfZ?2I$eBlMH+C=w(LbRBF4UWZ1B2;xPITaiXt19TeZPu3T}X^xtO z7U3>mA%g^{jt%Ir=i+`cHJeZzgbZ@B=Nsw!$KfKaJ-y9kW?XOgpo^FKW6JT(zD?gz z(u*(MD4*E%+UR1B^0Io-H6;rKr&GJjk4;br^2cEwaSJa1gm5&Cn$al6b{47fhr4Ds z6l+l+9}N&a@=3+-e<}*E7VHdWU|N8$M_oHTLwH#Pkx8>eHXJ^XW{LrM{TPtc@z2)* z@c0Hgo9CcKPL#Q02~r_13v-HmTM#(r=&C>0j243m@cTWT=pMf!R2B{Y(MG5Nz5WMd zXx*Xn319jKof?d6{hPyyyOHk`5WL2oyEo*aqvUgH;*>jps4O-xtT4J#ul3hsidL;R zE8fK>HH3xsfOYH-pjE4=N;-?vQ5ZmpL z09%>&`;SN^3748)Y-bdz6$=Xs8zA{6EP;xNi3yB< zsBLd4f|lx7IKBp=5NAR|cvUYZ737=fjQtvt?B)7A9@HWUT3k5R8h zW?0eaAKh-b>5^(K9C)VDj<@!Hk)@8omn#bp?aU)IUo2^SVVDCe*NWtcMlK#Yy?tX{OZCL(vQ(LkE`ZQ3@eLGW4){7amF4 zLj@1k8iwJCr08+`_NGqMB;TtLSb?wjy%1ACk-YIXj^Hd<@E-X)bO zlLaLv4N$i{jsGc0?9@bJq2=uj1XjW2iA9ljGEU)bh6?En;jur|mSGA!cSQdP3B z@04Y^jV4)C2%rHJR<+`a3>!c)y|{;qap!ybL;HcW1CcuUIdwf_?o20(6*{8iP6Kh5 zWr+e7c7Lv34@|Ax*IwII7BV764Twxl2>Cn{zVE^71`f626kfD#WV&B$A*K61S}5>o zg$ADhV74vWEy2fvsWk}ZA7a3a`(VAW+kjGY$y6gAD_lc@55uk7pVv)*$)*W3PiJ*< zra6+N9BJlp$ohs}jc|sKp;I$Ju~dZ%>M=E*oGl>_gp>f%=!L)LObqD9 z87)f`8r#4+Snr;v2l-7?n~8rGnJw+7C7* z3A+uOmxvOn8yXt@5wOb73#-sJ8s2M*%ouB13-#g-!U-#L>FjjDYlMHH@ClI=g(gX2 zbW*7zi%k$(**<)CfDajx9f3}|kjKq>KUYs%fzb!qPgx#R4Z~Ge z-&S31{4w`haxdCQ>mUMxEDp=G61~DI#)(THN(PE?7d?A!*^J?geJ?4XM^Ka-uWp%% zPJ(TrNOMh^E#RIGc}#>zs6cWyaC-lV+br;%9D=p(3v`%|V5qsYOuw(#ZcJ4XIk`Zp zqJ|_fq*W7GA#-^wqpzVAwH{fqakJudrC0)0woS#!jew@tfL0=v?a_v8gYzU@jKoHo z4x|-%$aE3vN3g>Jp>byQ9V1tYP})Z}ZyvmhouKc=XbxX*x;J~}u3QkttGv~44I~0) z7agm`n8b~AD^XbVKA8CEHcHz9c)2uvl^EM*HPk%bGD}vBMYw>ROC;aGHxj=Qb_8*R zq#0QTd0E&cXoQ7(NzP@M@}-n7$<>EY`yBstCdZ!zrCNj>r$LtJXH|2r+owS83nH+9 zM!WIud7CW`ogg#$<{PTvJkV|;#Dl@o4I%4@S$rITvrkZ$DB9~_u+EfTNLEFNVSJ@Q zf3a+n{`zco*+a*!v6fVvEqN!1uibrKLme4M{AtpK{S=Dg+e5c5HKzBrR_#UK-fz8G zxl$>7!nr;hKwA!Z1$OhDG99mtM_(!3sfnPaggTahc`s0mt{5<42U zv9xz*7s4*PVl#G(;eRTxfzjr}8RW4kG1LJ_z7{7M8ampqaa+Yd2J zyaF+$(q2&f%;eFi5?8h{T!InF)W~^;1HzwWuzTDe5AOI@Ck!g+@rp z32kNbv=3bIS1X`ll19gg9Xii}&&!k?d4(3Ck1^UuXExtZ7N?{~zAz{yJ0T+_pfZt> z-C*ahr?O2T%oISuFtoJ7uCyN+MOSURF`OI4WCS9y*w9Ga1z6<__QUZ4Y-n#@%C#e3 z&dGnwU2$a>@ShDj9=`rUhjA(D9cJ1Okvs5;+~Dd}A7^<#s|sr*u~aDl=^cDq=$@*hOfN`5?b>0)|R7gycp@t2^0Z z)^BAbpiDYc){u=4OoPx--sr)O;-`_ZU0GMMW_r{=xoLXfP;6LQN9ev!?^2Be)ja)K z=J$SIc;tEGeSg!!m#%uISr(;*5XHAYhWvD zZ{7Or$PDFvT})%<=*Y<<)d|}WyX0~43J8dTc<~8ypbafS{LqU(v6L#Yv9T#G?HA^* zp?S0b6p}jG8_kfoIBj5oA;t;Ua>onVNtYLUW#Ok!v)CEBRH|-N7Dkd=P|EZHl zhfSBXZ7lI?{!;dl6DCZUvtR)kA|kLT+s(Kb z)w@U4*E03WUxp_#?uzGcjM*Nrk^F*WPio<4&r3Ku11-U0;*oFG%fh%a*@ug<+Od86 zS!8VSBER)HX_c8SCuu(*ql?7T6|OFtd9Js;HVRGoPx;LD+VjfP;cc zWMyQ^Z)}<30?w8Wh&@AaA`{wKe@Bmb6jVPR!_WU76C@Y~KCN)&#kkfkl7)7y^yH-^ z-16eaX{5KCfq{!oPvc^?&~h(0A+1nKP4p87AK|B+Gheh_peC znLndOvAC#cHw+aoF*%_S!&vNC!;DC%VFt5gzPN*1-hKE`=HLo0hVANfx@2get0KWf z2~AumBD(C`NC7PXTovfy38|>4oL+;Zqgu_kYSkJ!Im4!s5QkXGSU=}gvgVPTc__V(>&xH)sOz)79>5^7Nu8&g=g>lSR@4Udq{?9hB=vC$!j*yb^h zTOZS`nD;Y-F;+Nz6batl{H-IaO4#k<=f=i6Fy{#f3hqMrivSQY-N(mh9L~ACxD4I; zxl5M{2Nd0@si|3Ln4%--gb1q$Q<{HzDsJEXra`8&?keR#Rcwe$%3SV>W+?Jf$KL;4 zXqx!f>C>7SPWHE8KvBm}LTD4Yzsi@e_1)8ic%7l&PCs<4*v zQo(LjhYv6zA;GTmBa0ex1RSwARAzC+PC4YtFA{C$PW$=an(HfQq&4OFLi#VTrhz4KMp>~)<^?4sS11?Fp?4SGn2tF) zuzDg>FF_{ufmCeS$v#-l(&wt*OZ_8j%G>>4KZv^yoxp^sAo>%k55K;5@LX}Qtfk!& zMq?w&uLiC(hleK;0=)*_CH(TnuONhFz%$18bm(hCfF|DZ$Acx9?EJGB%6v)ZQy7Y> z9ICH6H#fIlqUoj2#~D+UdzN6qEPkSczVOao!9|WZYvj%|DfeUvYMJR!pYDBYZ$a zDuA<B<5&5}u=0sq+ zC7cIw52I^Wt=fUhXeldSZmEik1GDq_v=1Ov_0F#2>(j^Yg;C8M7#Ki}E%|g&@!jiW zOT}HgyoyZK4RXKe)+O4egUqY{kXQ)0D6=rjsZ}$6+isy#V1}yL8Pm%PeKY5TeKC4 zaF0UJhP>t9y#NaC8xFq;1$bzM@Hq5#26I$tLjdhx_U-uib(66hRP(N5DV~1%acojG z2{&eeroLXf-Ce9HQh1B=5#RtGDZ7YM_KW}KBlI&-Gh&Yl0@=9JQC7wvdJApBFqy2I$5x)7&)X|5~r^716QbfGGwPPV| z#VErQ?b}V)PP0zfI&%y zq#Svpr4^+~R#$uAO&*=e%%j>jS#DYA>CLehMa@BCl2olpL5(2}d7;jC4vlyV; z3?8waY`ej3a}s~CT z)vCZ}t%OWnvZXJ9%`C`5ljj1|ID;qOxO(-s7~RC}1R4>T6^Ujlw-T{aciGu-peD-+ z1ZP1m_9vgQ&77XEdNgi&g+7}zp0EJW311;kM8(H{xu01Hw2cwsB)PylMW){#WijDR z8@_nfn_fga`heXZxZIguC~kWpHmDvy-aDgp%B&?CFgs0;oWN+z!$e-VbV(E21OZqU49(&0_OLRYbmerE}T?gJl6GV;$~-X(+Pk7I7YqTOSZo zvD#ST7N&jd6)~MwOjRRI&U})Dunt#bc5)MF*EfR+SsjK(?{d*+K zm8kPPfU>JsWDa#IH7T5Z@j{iezgT|%y}tx*c6Zy2$1Nav84_wo%R{0eNV}10VTvMi zCn!{3_4B(65kmzz09YMd%%47&F;PZkq@@HT&b3I6M5i*+Fe4o+kC*B)YG4#Edxl^7 z_19k~&*x(D|3Xi{8OLK@Ak_I-aCjLXg~{5x>|ba?br-;qmzURV)aSKegXrq;-%pg4 zl}$D&n3s&|^{V(k?g`6RuH3F7H*lLb4*&o)$20rDwd@0Xpz?ft_iq1DKp%x_RUMtx z^XJdMwVp;3`hKy$%MH4wS<4*t+gIYC*kc<5+Lc;*FsnB5SapB~GBoKw|H(aY4_^^TclIk$o6=3IilqvW}N%mjKxwKC^#FyAXKZ76>3ybhPNQBVB`DZmKD3D7*(G|(#Llle_((w z#0xOZv$cJ1;iZ{6V+Nt{SkNu61AJ&gSOWm~&l5I`M$1$UZK5*BcGk4DO`ZT>04`bI z4Gpb7+k=D^34L<1CKvOkiLr468qDcO-~Gkg3>Ux)bj_#7PGG$51BmHD!}aDR%+Alx zPZcf|e0jk?KgbqSb7{WeiA~570{6Obe&wVr`Z0XRC|ehwwP_>uA&h6I>!}P0s;aqFOiLO>S^KDpMn@YiibS7KyUSzc8Fzr zdQv=~EYwlPAw_2fx9C>3MzVz_A0OXcG>a%;6$?OqyJ7aigA-(By{})7z&vtt&h(#X zi)nxEy+)g~h(FS}H;Rg?f$Rzb?u9=5)^PPKV!0VP>WoR*qq2;x(5(nG4 zih^bLf@iIVYT%w=2}O|DKMYOgc9<6SZC^tt7P`;AyeRpqEEe%`2~3!3DWy#08G`U^ zy60`%xARag%xC_!nNsds*To{LTcNIIA2&soNj{h}6QMKrjUS`>J0STQY3aQvC!$jL z)kn=(Q4%Lmy-kw!=5ll2#V7dfT6wFC6HqDda2X1GD7m~WUxzmG zffuk7%LyIjZWxZnhV#qM;ixWp>-z1;rNz?sb&9c3)ejuF2@ZatK?YGqH5Qq9rG$h8 zm1KoU_i$pU=hMs6`%ltmt11V{x#&ULeu4Ajd@L}kK}d*zgO*3RAvA@BhXa`yN%OSH7?1(jx4w82pxm@xKHZV42LraU^MH9x|$P<~kzP>)7vbFr~En;;@ zYA69M<##=~2)96_J$|?@)4)2T$A^;O`0601&D{zRAUMwhM1+@x1pBhHp`ZscEf1uS z)V;XfN*p?}|N4F#OAW^|y{=vj#hGJ{PLh8;&pG->?aaOnRz@tbqrcU%JmLn9@?SWG z6qHYymbjCvtE)tG2-FY}5fLXe)+^F%%_ua<#*eoCEgW>?(HOhj1yPqlJLGsB_%*Zu zP0;5in;ADMWdwtq8wTMa0_|t^ZB#}3_8MJWlH|sAFQ|mrO$2mNOo0nx3^V*O3MO>t z%5zWOt&>8U?H)8@B-YK3_+1|6t7*P}4)J1`?-)C4}$ThY z_$i9U=>dg5!=b$t3R({V4D3wX*tE1XFptsuB@4t3U4O?p5vbD`amzKh#r@_+r;ovfNVZCVe^OwEuQ2%_<^peH$j4b0)@ z=4a7Lqwx1dHKnGe9%{(?M=w_(i%EC5rC=YkWJaQ z(MQq7!wAYDXlg>lqjBM7QUP6~CtQA*Fq&Pgqmz>b8j((rn~)`~;tg<2rz;XR_J67Do&I_RKfuy3A2TIzOgX|g&zeYZ?0Opm5hAedmxz7Tf;`7}Tn0#N< zP63jQ9%74uEc;+d;1jwDGPpqJht{-sl}d0qNGUWLL4c__Y1j->xJC?CZxy)aKz~0t zY?PN0SLhHrRHfM-^Krl-jEFYck`goa_v=E_qXLfG>T+0%kuUt$$Wsp2rfZU%jba*P z`R?62e@WXr7M0PCd(C9v*7uh^dB1C5oR-f&xdzRVyuv~ax+jQd{{AS)|I_8PxLqFk~RtWgZhM# zB%SQDLa0T~8TrVle#M=NTh-`5Slt7*UJ(VwTMSCF7QR~wSc1jxAl(1iYcP%8v>48SfRFQ1j0fo

b|c*%190j1Yt%3y!fh zKx`>6rPjCp(69B!gUAuv?XaS?B0uRjjVom#9wUipp_+i!&^JX$MiS(W{Z&;JnRQaQ=5&8 zX5L!C87=;^?ln|xB9LKWVUkr(o<0=^H-K1M_K5TTN5arGKVh~j(9pHV16!J$2oJK^ z%jeIR3rDceiL0s6hqT=aC&NJiXD@Y#Y1$wtB=lKSMej|C1qF>k;QPE5hoB9K^?+#B zDNX{wUo<>1 z#av>e3<9uHFPm=7{STz28^~VkuR_YW2e^0_XJ5mPXP{QD;p00)a1c?n>C4`o^bKAL zVfNjh_Mj7xfivg%1qGhG*f-S61eFs{oe6z>J(7$pyl?4HS1RD`y0k!KGw=-(sHoHo z#(g=JD%zM^xBlJRQ5Q$05TeY5F@?olw9q2ir(Yc0hzj8_o~#1xZOzwK&Lk+F%X@cp z9FhCjx zDTtxJG427xchG7UmqPRs|G6C&MqzH+Z{mS2)$ob^66eDA=(9Ty0CLYxJNJK4eV+L9sl4{`_m~^xY@%_?KUCe#c>QIp!6MULl7SpxCGuL zfcZl6gmlX7+qd5fuDf?|2LXB~oT9$aqfaAI@O%2BCMjee|CswbMtKNbi{j)*j}*wc zfIfmagdF$IW^DVhra&18nmv3!E^s2z*#l}^;^oDHGXV;G7+~g2n2}nbIEx}N2t*UE z6B1Gez@nZDmdIn6XQd|#&M~8ISa9;6KE3s-gaaVFB!8BA4SfVSkH~?)h4fSZWZ}2U zkUmgeHm0Sgb0#J`9#0X0=(PitQiMbRn%QQYuqcP$n~xP79PEUtkK%4 zwu&fUu+|lBZle!c#wJhNv-Tp4f@81Hjf6W1DFFg5{T|dOY}X*7%K@HxaEj_KP(y-_ zAifCB0qS5W-T$~65)ubFO`E7X@lUcP1R+bR-UPIWLRZETW@buwpm%?w<;^KnxQFMk zbR^j|s}T;(zAzU>R>;PP$n%mVLXg6-De{d)Ym>rphywSbYGu`a#NZ93mz>LJXMFhB z(&D7;Hj_rLFkt-Or6F}Mn4uHQ8MOZ2;ga~l=hGp4Jwq>y`C{~iKreZNHg43}zkfg3Nrr}pty@1sdoagMXQjnKU#AD;;iKIg z;0r-QeRBP?sU2sZ(qA1~0spBh^~dblL6#7HiSbt4X3d^h05;PWfwWEJ9o9|hDoEU8 zSdW*LnVH!F#)AGfzED$4G=!M?nebaRL$9Ld-t7=Mb32^;cFtc-y%s!Thj0>45Eo;j z2G|JsI)b_ix(Of8hRh!+g>&~7&M|F8%Lx13)7Q7EBh#nCVz}tWbUZ1hoP<<-%5Xan zEnagy{<}!jEB~uV+zzp7fz2%%87$`#9v$el9!-ZzuoU9Q=ou}RbME&6f$TFp^Y`Cl zLh`zQer57cLBZNWQO0{0(1oBoG&K@i@mkOfzX=e^h4bh4=<5ffy?UVrr$=UEGw{pE z=pzb!Jv<5w4Dj@mThyVu76;g>iAOA+qC?>i(#OE>lj}bv3xzbVpr9D?N5ex>oD8D} zeS<&|C9BIEU!;EIfLyB-%bu1BSdI=~`v&g;T)zX-gIi?)w$L5bK6EI8JOH#ksN50m zK@-6J>A6`%y$~p67`8noG@v&%J)ImBm|t*6@+o*^+(JEvG@rIV2ERNAgFYF^4rEb_ znhCA>seXqJRpZFp*_n$Y@#sGJ;^cq+`2~+?*n$@9IvT4NsZKBwG|8jgewFvTs|!Hk zsB|p)?hwEiVA=6ny$$n4t#fpH<{ioGsm5%F-xZJON6QT`bK2Aq70yWjb*F*elc+58WgqhC1XE!td~@ z)N7L)3hhj7>}MZgqgZ@fmy`p|g^Ye^p#iEwT{Ko`Rzdp<5VQv-=(~uSvtt@&zRbUd zka_+30!Y{JDl4%+!pj1$hXu^_APt1s7>)moHjNG~g$;+W3GT_}*fEi@fMG~7#3Oeh zja!sc@2BN;aui~}92FWG3XlFTp$RyUq=`XNn{NNp$#d;J6e01#)c|!HUz%}-vJ(|K zYn*!UXU+67KnduA^|oK(p^Y8SSDXy&{uvCFHj_Wkp~IT zAr;LLBzo=)JiQgli`KO`h>A4!slWU}mN;_L(6PwTbCXnW0z;zDwvrM7I1E|GuYGIB zChuu`qYVVy0y6`2Ma4Awe*ggO@n1*-(OtHZdKO5o|0lXN#0qXSJdgkG1@Kv0h7Ouj zLDlf^Fx1bvJUjx4)zQ!#ybdg;3x|q=1^9yjy1e-Yg|KKSTZQi^)c>LjJ2z?TvW3&9nPmMvL`dI&${nU&@@1-~ynhn2{l)uC=ST5AmA??A3 zP~CPMUol2mhQ7i^LN;-E#%d6IdSJUm=8A_zLlZpL0QeU*k`EpN7{dtre{ni~{62}M zaHU9(O5&(nTyQfu&-f1D^P3qONNrR_#S4^pX3xF_9Ua>p-D=T*B1$}PtCEPFA4Z?R zcuDmXQVuvO(hCx-6To`K&27`h zlg8@xKi#V9icz@5@d#56FogqI48W=)V$W0J-R^H3;9=IhtCs!g;(_%zQUd0^4X{yM zYLWsshn2!Wm+m^8&T9_jGze-rg9>;=Kc0&cH#!h9dX1OkESx+e{X-(ME7Ct73%+_a zOw)xGzbkP2qKl0cM|iKnN^Gm$t_JTOD)9fK>rLQl&foX{gE3|>8Y4^Aj1ZGGTQW3^ z`C|azI8H_DCSrb_biBLpJ41=OXNz#T&D239h`aSOxGxPcWAOG|CK0e=> zk<^pFX|cqx7qpZ`)%F@I{E-x7v5HN;?Uv!oSV;L>(q_eYm>+|Hdl$+2W3TDER| zf+7yARrRs{)Ozr-FWyM8`yPmZtTHFSAcYJekVgW|$x@>m*~r+Kg0k@PA2r*z*Q0<( zL@D|ueUiNhkY7`pPzA6~64Xn26>Snt(XF1)tGHbYOeq>vkkJCrA1}jmM2Mv#=0K@bsK${}Hhd z>aYg-3!t#9JRdGv6)5Mn67j-bHv|LN9vkP+xZh3Dn<9h4lIRSB)C| zL?w(>;B-ymhl*bL-MBH54C)n0^`fU2g0Ki%A~cPZ)K4F9EhSU}A2qS?TX$X9BSjp?$qX!8t7NLzPY}AJFX&Km2e3 zo(!o}%Ls}hGcgkEmf(;yW35X`S@PiUZ(f~7gy5FM&`4}9H4YAqQU!-y_{Adn{OJ;# zh5bG%VKq&~ssQrXHzIJC9VS%@&;z219z;l)nVGkCe|z{VVMdB!{r`2y*yuoO@HN88 z4Fgkey;8rQ{M)Tv%(#gxFjil8*t7e;o{}~+|rr5QOm|RdeJl!6c!U)PK;ne40 z3!I#sh?dn{L4sD@fdi+|FzAK<)`GOg$EY8qWc5GsKW6XS(|AOlkWANxp97pIdeAIM zCGBVn7K7lBI`-{;Mn_diaY#BD0+iJYp7j;V>B7V2eq*Tkn{9F|`Fxju6SH7p7$Yi=5C zBx9tGvR&Y%C#u-%=-1~=e=%VcHz;Z$rJYX9}UxkY#<4drr`4O za{S`tV9N;{6)Dak%6EikSqNw(Wd=U~<+VKXqEak?(wmIPOIV!VN(pL^pgqP(8r~I1 z?Xcf8den>0r#eQTIlGXQhJ!;>#_LZ-(OfyeJHf)G__othzWn6rwH@zjD>)UnoCOxa zpccN-Qw)|066vS7agv9V;>Ibd@7}db2VO>c%DK6tB$wmVNYMbqf%2{-jScERawGG9 zyzc9EW(;0an%pyV^zlI?XHUW46am`{-5@ZcehhAF)9rc&sq7si60odWxtmZ}T2 z_cGUvoofE^ZS6JFapOid@itme-y&dC-|u(*`b)qcKbU<#*}M15zw(0Hltj1Gj96It zr*o4Z0$Vqk&~)^|o!wLqng{4=C)PjWQ@=-X)$g5(mulw*^y^WSGTkaa&NkstzMo%C z+@ZMD;Z~16h47FL#B>sK${uRz+X>1~cUG&eYP?b5bxx~n&-5=b!u5J!97)jSmKVRA zpj2($x;1l6*-Y^g&dx_W_QlRQsl2@fFq}!X9ke6~khuS&i8xnrywwP8@8H<$C{0aG zSHjhh{$(dMoas4!sLRW3Jtv=>b17urtu2eV(G?$w1%l0vj9gesMkv^#v5MgZ9ja-7 zh}{!sG|Tl<`7$wd{GLhxGH65ZjM~A_qOfw3-C0RP1mWW;uO z-ek9q3m*hO(Jw{X1)8+({fHhLr@3-9Y#&h_)BOH>ZKVKm+}yoEHPmhTNZw1+2!8{P zKVsMB6!-O$2Ya4&fps(G^t6X>gjGK71Y}uyaeCe=YOQN#1OVV&S7L1W{r88l`NPf3 zDuCKgO%77&1YsJmyHmnne*IxRPHiy+D)8k;0Lja-;I?U`1VP*_K|ANnIlQG+uS##9 zx);(L^Y8oEVE-Obh^8*{)H((Jv=B}x-G%q%dMWzb6g)XE&Y-K30;~lzKK%@PHU&+o zQ_#i8MSwR73goa)5`4{i=1l#o8Vaq}v?2(ticW?^2~VKV2Gb;^xc&>VO%endY)Nly zhzTiW60o>M#_00yUXTFht1CXZqNo1JpG&_u{jxc26ykz0 z=e~}Sz*(|of|2+RT3l(=S)J-Cyo1gEDZ_eS!RjOWH?}P>q)MgI$=Lrq53O_h%?|3B za#}s(ge6`=EA2Ry9A;2Dd#fzTL(m)5Y~FlsNH9%!!U6*qoS(W@F{F?dBr!!ONcK`z z!UPYdWNM|JJf$UMN}fov_e!RGX|fXhD|ubNhb?!;y+tIQ+>Q4_t=bS$U&;|B9kk33M zD=E$3OD?H*Fmxo?hVd@lBQkG}^CD2po{|I51L##0w9^X8Nk%0Jz{Pe^uB4(Ys17ZT)BGfjv3kv$P;`>yJ&^ObP#Q9qz4-N1+G%hD zLW_b?#^2=V@f}aY>~}z}xCr9Qd;XN_N{IDHTmsR%5QlrtiYgyvst)ZGdi^s~-+Zwg zo_-56qdcUhY+$nIf9!^H=nyfep*MsKkpH-sQkZELj(bU$d9~tvn(^!&Zr~rW-M%}D zLMATu`V){_s^vnRkoricPX2o9oD?0Qtquv(9`!mg@S^+sd5z^F=ln8yi>CBh5r@RB zAdlUFddm@{2`11%_yE*QO6zOaaU(S68%0aJEVvkD(UGB9c+xA{8CWQ&cEpRb2^Dj{)^b2D*rBM&5s_iLr5_SSIihF*8tqQ$PjIcr{dbm{R( zJ_YlsLm#UNuQ(Ya(MxTD3YMaAz>f69PTU$qv1k$sL^-*+(&)9j=M-a7q?b1QGhyL@ z9U1BA9bf2=OM?R!NCOP}Y6L~};?QP_97Cb)%eo0-#MW8fspn~pcdB`S^iK);->+zF zmjAf=1g!uPR|&~Tw(ZRjIhEgJyC}|CHf!hq@3Vi1C(|K2 zp-Or>IzPK!kJKhvFVx%m;Raac(>v&o!lw+t3{YaauQ$~7EH3F3hy_97AsqE#+;l8_ zDfn=$0vdXz_aLqkt|l%)I)tFE&z7}k<2x zMHf>YPgzHVdbFPrVr@W@CQK}K81l7ediQSKZV&VNnJG@vn@!P7Y=XuG*e1gI5l{yh zA!YpqdGY$Zc+A>}rK?T<@r^MFSho|D+ItQiI$cEx@)R=`j7ZaValUW8=5KkHe5;{B zb>w3oaMDbtt6-7D*k|}{f{U+2hE>n_8-+(!tWS3cWQ)#NM_5qye?Ua{Had$`QH}(4 zl2%>6^7OMOLRv`$SEemSe%#QwrNFJisb*bF(Iq*r+gXvbhUZQq&6D^aTm`T4@Y5mM zG3c%+AwqF*(}R zy=O`j{Q|6qfQD zU{+3u_l9qY6})%1@o!*sf7i|1d7ATCZ25S8vg4k~m(jgv6OvDiC3FbR^bEC_|F~-c zh>)92#ac#4ky5(07_2rQ7}siFGljMinuK5jLbRo|Ezx>5k)FgnAnT7eG*E1XA21dH z)8j0LV+h+A&S;omDxR|j9f-fNT%k~2SyL4*jof!KX5RuCLl3sZUac zb480im2d46%nT76b%0f5ptCE)F}65+dGYz#d)QS zxpe~vEjEJRDK9GhQ^<<-P0{Ey+#2R~zjcxIa+<)IegtS zZKi$s(x65SpS$L)w#ACXBHVyE7gt&@TxJq336W*WNwdFh*eVob{S4hqB~u5wieDi_ z2%z%GK^9$`_|!8hm4*hPh!H6WbLQR(kLz-FrRXHh%n7C zfOS4#WnX#J_@nPD-0yx=9}1qoBTF!ha5+XV<{9@CBRG(WE2btW%tWQ z4vGUsTjH!h{w(JQj2R94_ayzop(w@vQ=DAl)_H&;rz;VYR1M>-y?>W&kIL)pm2k&!{)&IJTl0kggI%)w{^g{q)T#GO2%FB&*H^@Xj%dmx_nINOmK_52uSk({yn;MEGF z-mOV$z#@{*%$Ta8lbk5x6xPyKnNK8_jijG z&R0Q%h3Uy}Pz4a)<%5en8`eva(_FGq-awMN*QURnhx-I);R!DWqo=iajkJ!B>$W-Y z=P!Vyo|M;C6lKmN_wScNJYXz2AAySAc*!Jf)A1@L5X$q}C{=*;g^8Am-@${uOutsd zEJCTr5|b|c2sPzP%4t;DtayRMKCs}4tWVQv)mbTx2*Ob{FzDysR*^H1=s^NCe4~z1 z+#;dz?b!|w7%E3>BoZ9B(8jZelVwCsXkYF8DWJ~y52lK^OJHYsS$vV?XsSV zrPctS;s}a2?jYEHO_d>gqlO|To8aU)C}S{3NfjXd9mplMI>AQ9T$4Lov+H2COUUid z5vr&4BEm_Cf)5F)bj6m6DiCF{oa}6yWdhV{yP&gZb7tE4kt)KsKnzUKxSc=B7Y=KR zZ<1DNiItVY@DSDgt&{3`+6eH(`BPfp_DfKMnKZ4b;={M-UK>xdyB(Suo+yFe|byRRAHN8XhHutBY9T46(g!2P`;D z*%+TFIC$fxO)m^>rFho4$#>t;zj4sE&Bn$pdj`Pt*{9#Go{{MVsB}Wz2AYSaAnl>R z5p!1PDXxzSpYvCJ_5gi*$d>dk-%U$Pdoh_Osc6K_@Z6P~UHsnEYR^A@@GS)=jh_)Q z^BuD6D0_;LqNdOZ6M{ABu0w+&+<2g6AGKnIrmezrwKR*tRNJW|Tmr~Et-9Cu+y#gm z4DGG!55eodZ~>^rK!GEGNPkyPQSnGRL8Qr0CF2`q;sk$>JsrWGDhltuc^?tqw`rH3 z){Fa!R-;$R&-oUO&>jD^Nlj!5mh*#$)E}Ve3phrXE(P?Ls}}X z_vDtCsaq;W)Q(e$=7w~Tu;ZSMWE+ICts~THsCj!y#$*RABi!E~6@?VCQ)Qy0Qry^S zf*)uX*$}v0p4tkN{BH>Ed|9pnOM~MYsD_Xv)dgJnY);O=78HT3q4ZdSV+y2m+Dbpz zrk+ACoJeuSs|UZR0Hz00DH}&sPoAw-@=u{fQQhCeV~Ozw=dAlXpLaF!V7vNmZ=ZHO z5O2o~Qg}vfCEL+9uo^JDQ^KQ#bqj|R5stury+zL-e_X#_wmim#BF}N!JWW)LG4YVR zT1P2pHKoR`N`{7<;3m#r_M_rl8S;Tr+#|K-gs7jT@K_E#&V^q6jMd&P$5P}Rf z@DdxVl-^FZ?KBBqOlPj}BYcg4SGaLb`gtyeC>x!ccK)bSW>k-S(?aQ>C3 zIFvup-ZYH}08i9QNeH-y%z59=)FBeLe$DOld}RAq`Rb@i;W}SDzwHy^wIc%9iYQT& z{ui7wrVI639ix0WO;6D*5nj+?43!B7U-3lXZ1_exO4(I3Rpzo#mu(8uBd&N9AJ=(} zYELc(%<~{|e2}yo7JkxL3aVIl&JV~rxA1BC-}t}T+%_8_CR3^HB26r6UJ`oduK?;S z5Xb1nU{_p|KZ)0Y9fam9n$Vcye2SC1e}_FyzH@(2l#dV{vI=*&Rjs_a|BG!($MKLleLzqaZ=Wn2vK2FYPj^7r$>4|xi}%_G{&5J z%Lb4!h$5N9smMr72{dlWp^UkMvv9m0Qpz~SzEzw;D;%}pphRrlK^H}df|y!1Mqza5 zFJgyhe~wp&HTII%%E#y_?6VWNgMAIPxFix`V(!&okM`_oxQvR++%WZfL8Y32o%dOO z!ssT3egP|Ak#Tk1rXie_fWLvsDBr4<=Y8#s?A z#=brxbktI$3XS?2O0GeJXf?gl5X ztFmEZO*8VqUZxEcF|qRN6*0rNCDc=R-Hod={>i_oUylC?V!WldP=~29`sP#1J%+eb zuw!ayUA*2g1cFyATo)1`$;4>|Sh51Gm1a``kvoi7YmG$SBc5<_z_{jMwK1Cn3gx$`WnO2)y*uvL`VVZ;Q3cW}&?3&xTU|SV3jUB{ zfKaUc-Bu5vVw%GqzZNZC8s?lw4FL&!e#_0q3SQwofUPNTh#RC1Ma{XuLj!w zS5~#pC$bS|(Uy9S8F-m`(}0P87r$|}|Ip!XcfIPMP|T!1cOCSK$l?hh92`k((E?}> zV*HTLTLvJebaOh_kPWzh|9(&F%ja~Q>>AAJAdB^SMUnm29EksZ_rH09?Gd(KkVibE z#rW~tbObBK{iYzvc``;=nB-F$Ey$R^Fj0 zG!E`}tffG26|SM(6*PFKPDYeR(hD8Mho8J`I?pd~FP_`}MDDK@6;Ik-ap&H|H3-mXkd^gvwzG?W@aY9$8M4G3p=VVl08!sE}P!$d+oz| zxfO(6J*Qtd2|4lft5>gV(*y`vok^SJ{XKe{G&&6`Yu>)SU#}{DTTE3=@bSNR^IX3mh4&3j~1xnGOCtwZ-&JD_%B8?ehB(+(&$0+eEO$5k5Jk^L8d>G zV$?-7WUtUZD-)bW;fxiXBnmO$-+cKk-f7mCm;QpK z7fdZ~1(j52a9bzCp9Q9nANp`b(07)^lD?lq9xt^dlC&IKU!i@cNs}g%=zd=D;_q*j z7Gl*C4)vlBS5oe}k8^|W!C#!ek<$lmX)yHvx||v&fCCw7-BDNAMv@f>=k(#jhhj-E ztg;wJUMr}UGI7R1`V!*TH&vlw-I+S?WU4e4Kt^hqO#RCu>ix4^*Xu1?woF=Mq-2jI zoQZUg3aBkDPVkQ}4(a^)0iT7Cb4bsiaK@V6^wxnaHXe$kN?OvTO%dRpTXU&Dv7h>* zz3JFLxp=^HSZ7jg70tNf7a8!mO+^Js?X14o{>wk1V;~Ppz^7g)**ivT9mW|*>#$Jt ztQM3puv|(@qVY+PKTS(bjo>$WkNUTYXECSEMmf=BmZH_EDzuI=oPSrd*X(F(nt9YH z7LmvXFr$<7L`y`*Do#d8SxU6jFQ(A_mi%7t;);CB@7na0(yH0cA4g9HYmS?b=1XScu(y~vOHd&3;qnn6boJ6Y`9ko*Jr1HtQg&FFl3JB1lap5F8`;<1Z-4pA@ zoFj31MRYa=)Jvq#fV+d>vJ3PlH3*|BDfV$+g}q3kA^uiyASQfXK~AmWo&WXPKEXTU znfe(7>0X+51xS`}0=)_*aF9MKHGDaAMbC`t+oVMVg>MoAxANAzOrtjbIUKr;+-egwjE_seVzTYSu}lQ66UNJz}{$2#7A!H z9D5RIC)wOvBi-8{+qSf?a(d~DOE zk_D!gauiIKbwZe3cx|9N2WlD0Jc8=pG6UFR4Y$YW4Y!wWcuxG;2ZK_?@- zcNO)X&2k&Dl{*lZ(1ork8;Ch$2&}fGk&U-Q3au~I|M<83`8FWKw%D?u!s5a~>{;BK z6Tj;XSXc9Dzz+Myt$X_wgmYzkI3*En+FpvNb=U9Z)&7KmvtBt_;FGJhMYU4?KMxB& zEP*<9J7Nyw^H)KqnzwE1YgZE&<7HF4N&?aaoWQwZsfnvK2n(;fqqV(_#tFk+YrrVS z@V62RrmEgE+odf?wTJJzNjO0IsQ64+Pct0%8Md!5lQ@D;v z!!rFDY0w~z+Sb72#j42Ty}H!4TWV`<$SKowp##BeZ2LB4 zu?{s>6qHgIcWTwD)aU?Vv%!U;Cr_R<3g5nEiwuf-{$SOUl$62AWz(l$%i1`%4X}pm zsVd>--aNZ~cmH=4?>lw>;`s_bF_=z3GUAx7N^&U*h@y%joT;rz>YoZ_v4H3fhyoW? zYm^N}a+f5Q6oLVCU>CbcNJS^fS)|@_E2xtf1ZOFQHEE^SH{~j`Nbqk5q@xMaWe+Nx`{KXLe#iEv~XN4H8S6x{F0STw1z(;};j&jVC> zOyqDDFk##k{bR4ucncsAs5&xiQ^AlyONcC`Y4~Q}5>6*^rWJYqE~l*MGa_wAe?T&k zOE>Z=te;xG7t@>Qw`K+Rcu`jLRmx}Rc6R4tjpw+hux7jKI1lIvO~-ld(K$F|*+^M) zq{J=W4yFJEBlR)I^(?0e8@m9bHA#WXi%9L7(2&{HkvtRZ}J5Bgl zpuAJ5fM29;Fq7T~ZnmsFdAeQr0eyQf@kKnTG_(O|B!l}(36R4s#B|Yni7(~7Xw_aC zZi}Z1qwR=SCy4ouFP^xrwwl+w5(pkyjmCw91d>?7ZASEm05dR$xi-mk2#UBid7D%t z866c2{wI zc8wp8cv)X@E6GRl-otkFO-n!6!C>I)gWoyOZ|eFMs!g_L8kaxiT&m+3hz;?j+Itfm_M3a-t1eVAGOE8g75i~MwjkA7L{;NmyWLJ7)WUx4 z-d($FiPAGZrP8OD`R9%uZVS?gs(g#WF4*0ODr?g-P-}G2?wwX9ML)MX4Zp?wq*>=K z0|pF84Tmrtp1%(KY7B_ic1}#Uv>Hz7Mi9`{8W3JL-AQ9?%g$G1xZ>se(-XFDUL;X+ z+jjRiGK4d-->;o;-L#dEO% z7V&Fpi^?zq+v_)M+=eC4r&8|ZM@swW)~)(NzO*4Qw2Ne20xEoSzS1iHR6g-;?z+$3 zO1o196&HxcCnQ}x{h|urmCuoos>tVTHCP}P>3_iGZqQ7Vb{tZpm(2P0Spm0UQ}cxW zVu&g0&hLE*@Cy2X-IlkjpfO1yk?X0^g*IMQz`u#b0Q-w&C|M&nA`Ly=Pn68=_?Gde%;FtaAPmbyVZD8cs1cPEad>G*RNz?X~>^i&QNF&U@#Y} zQd%X^CakvjTr^KbJxoR`g}%iaFTP)?8+#;GCv2o^#{+~fGl@cati6WS5S6_6Tq5qD zSm@Mn)CN+J6N}Gfba&U}H4|x@CAU~;he%WqhTNg|j`jxqldYzXsC?B$3h>q)StlGz zQaX(n;DyKdWm4IX7)G&cQ+VXckEf(WqD&;|3+p<=u06=3zFnLd{-sm-A%Z+;u6Die zV5UdZR_qY&1vqH;v@9Cf;_py_YPKV{!$zkScoGg_tWpL5hSYE@MaC_# zd<|f|NKkS{Q~_Liak4$auPw{o2+zW)_bSB5a7E6puBPEvq1Z}88kT*CUuxf`5?e?e zU8iNAS=OSd9vtit`ZgHqrXb_@Xg!nQ;AWLi0GocoJ>&&~M3V^B;&aKgrtrEG-mkI5 z;6#?C5(mo=oK4R3d()ic0j5nLM+>e%s!F%}9lY8NzJDaT!!o?ERKG{mehMRIk|~qI z)$9?j(X82HO2u)xZ22kSO@*&;b4wvyn?ly8Oe7vEVc<^SPgzK?NG2!J^_1Z(aSOs% zV&PvwYn^OlWOSwS-ge!)Y>9ldXX`f|#e|~$v;#|>Z`>7^`YQ#NMGTqjsjd>Bi0NH6 zD{gOl0KJrMVv<{{atARDI>_p@nJVMEMLoW4*6jZMam=D6382?amt-a`YTr{jhDZh; zz#-y^K8`OsCWTw#1@itmvmDp{4}@WTysVx>3EXIkoG(MW430{VUXvE3^<($Uf*+L- zU?#p_g=>)Br;IC*AG)ndx-nCoh?UbA26sMg^|(g1cg}mjKzsy-GN3GjfgZ*0X}4o( zLIAc5Ty8s6fXD`w&LB_a3H9QucXTj(yt;tpkrqVd#jAL@nFlCZ7a!M?#fkVG8W(ZB zeeX^~mKs6YR=PyuTm{MiH=9C7aj5>hjWTRi=!09buDQka$nfZ3@soSX;a?g^72KL4 zmfOownc11kW=*#lrVMKJz{FJ2Z^uc1q7b|uTf-5YvSl&c+p|P9E*@NK*RvCyWq|Uc z^P8(gg~zqww^XQ>OnukS0BVwJUakNwVcw^@B)Wu*G_I|Rdsr5)pD--@fy0U-k3X>E zB9PqdqrWdmvGi_pP6YvwZ4h!~n*WG(nISP(IP3xrz8TK7IwA7|tecU7b8>T+-sVx@ z4iCOnHVWcs`@)|t8ApHmR+fZMP6JvQ-=tkS%SEyYAzRrglU>xPvl_4UEU<)dBJyDI zt{mUpl;hOxoLK5a#-JX-Hru-Wy{R|W+{yAJN7-O`3oFjYodgCDn_Kq_8E? z`lS#!-wW?sAKXKXyMTpr%?D~8wZYiJMVRgOF2eib`S(6w%P0Y-!R=CK3L_$8{8O!d zSecB=%WgW&#C6iMcw+p=Dy)U5>bd4isUPaT?WrsZKV&;UpW`$hSvc#kpD*x-D76G9NKKeu9jS1De8L6l+!Aldc|dDDM#V$|O&QP7WP z0!iYz$&S))Zh3Q#g&Vf?*k9G$W!p=BkF)kL)!>m+g>?9qmfoLc)M);l^A%S&+)i|^ z&GIICXR1QcZ?0uo`})E3h8n7!tTo^o3i6^3(_`&A3>6i3|A=%7#B>M*?lc+*QswI3=|jgdui~|z-j9_cBejtp;b%ePMrFr?uP;DTv{~L|xAA_{>-|Os z^|V)`h7AK#-B|i$y0%1tsM&nZ@%b50?6;0ele?mKbR|PGh@WpAsqLqHy)Mf(CwaL{-&%ZN)iDKr}0-Q4p=sY^4DJ`PvW94cdx1vCrCi@5W_7^pOR37oLk^5x(B2V z`|rK3Fv#fj5@fNJ-n7F2A%+(ggIW3fJlk0)@yDd&^hmVxPyb-;F?*#u7X0w~>Gq4m zQcDnLaUco^vYaWb9!1V~pgl`Z&cqr?wO!H~a7t@pgkOMJM;K;XQD(jo_1A%Z!`5r;j_cke z;q!T0MphKR+mfM~=utzk5<-4)%NfOR6X6S;z_PaJiQKjNgbHqDCd^?dpa&b<3_~WP`$w;^!MaDFQFy$gayi1c2!abA0ICDVo8sEZ_SmFRfN8} zQ##)DZ^mnMQVXyW9_u&NrAs^{$XKw53|EYSHdG|Yjw(^ z%fd{EECIo$h(;iN6g;V*hN+I>jiL+J9O`kkJ4m6lQKn_A7OYP&AHn^kl^CJ(*|?i# zBmArEjX*W7+)T?{bId3E|^4)NSUX>ndo+02xSc;+@@&HaEg54zVyTadW!y-Ha-`VW|5^Ou z!`;c(fbEr~1Me3U6i7LB?N3DgL2J7%q-#<7D>JU7S5`fQV>6-VCFguf^y&)7)b62KOOCNJMbXE&)^uu7iH;&_RhW~UIt-{w)Ip>Ls!O-UIt(w zKuh|2n`r7D##CeHh9^pKMu?bhw>NMDQJ;7q($_z9H5N87H2r;}vU}g`E3J?8Fg<2Y zo2uPjyO-;RP+M7XG6Rpy>9i}4$|Cyh;N*~7^)dvbCHhyOi<7Thn(%hmNaL48oS+Xq zIMIm*Q4p>!@u}aSL5HVV)kT}OwVEjJ4-WbEfvug@GADye|J3_Dhb8oFx`t^Jh`|l& zqs>KDk70sJ@fpxP?R_Zq3<}C`_bf@jyi<eheO*Vrnp5!m0{>f!Lz ztSlZpTT2?L{{I-^)DJ_)t(^1t>}l1^!P_k<8JRS0n|_w_nfjq^nq#jwpNmZWyKhq&4=jHm{TOc~`7$#z3tclm_sNg+ zb>6F#Tl#J(fR^Y~h_apze$H61KbV&VPesa$_=n~~gh~Bdnios!5 z{>*6eo(Pz$Z%%1kY-TkX@Pv!HH-gVbQ(3gze^08}c=$l+!vq!aVFzYb(X=p5#MVw3 z=ogP@Z*PCcscXVn8(AHiFW1U&4;{LY-thdE+zTOJwd`&EB%~Unr@tcelMKDUkmVK> z$a4YF2?w_@V}{g6p^I2#45#Jhxl0?bKN0CXdGhzITTS-7)xYHYsc9W~ff#8dhFuJf z^n1{AN2o3QQZ)@rp=kdDpyZ{3SKCHL#8)TeXQmz{8tsAkI3=wDAwzDE@Jkf|OR1Ms z-3Q4gjL!L=V!YFb?_7m~MG)@bF)&nxbI1PLGjZskiN?m# z>>jtiDGm%0y`=LrkDlVY?cMhzREdF-XjcgH(p4*^OzeHZQ$O^<*sVmUqN=yYpp3cR z77b&m6LuFv*ET?pv<|S{BlDoJvI{ z`De2Qz|rlrYOD+_yz9A{ed$*-IwZAMkO3iW`}W!06DZ#m22W)mM0T5c&Zjsf^DQ9cegqOQcu=7}hlY z@78E}0}e|#PD6;rma1Pe`*Lq1+Z7jGdp;i#9#sIP_1M_W_s;mVXw&A^gALW37~`Fyrx)8V)_{Z_^-;Y`~-@40hwNo@9Pt!E+;g#AsWq&<> zYzc~x$X6#b^V4P>v=|Tym$Y)B7C*tnFLxR%ydrN6*xZSP#nZQk8ojRF@>L_|CvxNS zJi?FO2eJ}TS3vkZb8Dev?|NMhw4-&2MoU=1KJ8-xQ#y|wJJzO(wA-rp{y#yU5-;Oc zPg3T;?Fu&tMe)$q7d-oAulR#q2qpwqIbFSUnNUWW`^(EnCG*0qYyZyw_T}M!ZrPDg zsLjIXp(-%ARBcD$B-2S1*F>F9#!O|^`~sH&zG<0$5)@UqjIW;%64*ZR+P?j2-Y4vj z)!m(~QK)&|bo55)S?9B-@IZ1=cbHmdz){5O%78QAF=G{r8^D^J#Ma;5T)IC3l^8KF zQjh4q!G2Ju#wSX;N9pM3yrK@ayJTJW$nwg{OxF$La=cjhnuHb;U{$Y4@JI^Tx3UjD zi@^)AiB%JtHESk=6!Z%WIj)O2454>0!G#8HzDR)V?lXH*_ znJ&Ce1dK4QGDrYXDDiNczio1YyNeE}{KvBPX@oRQ^Z$x_>@G|Tvt8c12L=V`wd|zH zk&tn<=MSO~22N5*Zu$cUB(JQ%~u#n&~bzC#!FKzakjnV3q6oFBbhsfgh)G%zK#>FQMN{>aaTRS2baibiYd6+xyosV-~@+ z8MEve?ZY9Q^?fbRJc#sPG0V;&N)DjzH6G4+&*KB!4XdHih#GEu%O&b+QbqwGD|CRV z3a{il*HBwq`(D{hNESM=nVGYw7RG$2j$6)l*nZ60-9g+bLCSnbqOs|;c|>HF1DfBZ z#QY@M-`__E!JqC2lv@Y=yAkfv3r`8`BdIdo(&nlXhNp{!j-+$3uX%jBPs|YkrwX(- zr61cSK==8@4i3S1%&cXrF&8;l7005d7xP1k697C-rsl+($k<^H%lT^B+A@oW*CH4E zAtM{{*9oL#>IsMVr(=3lQ%oc$5aI_n;FbI<{Ymn#c)iT`Yjr<4Iy(0ETd~iP?0{YddjG-(s)xgY^|%QwYO9Ctk}C#Cf~eqqrc-|qWh#}097H& zsxakNE1Mlssg9C|C{^TD?jwYurTdjk2L?)Mo5qPuVmGxnzqpxVNxLFl4Xk@ktcLZk6BFPMF)I~+Ee0nZ*IHs^xD482xD)u5%z@a;3%G*83Oix-ob zKJ1#Jp*vw;-(P^g2Zw`KiW;1u7d7e>pNYd4I)Yyt+u!$#U0y)2Gmz88TJ2d2m)~7CWB>Q4G|` zdVchuG8U-6^bHCd} zEI;2=PI3D{2~exk9O;u0inAP68Mw<}5)meVH7XLjtn+02rE`Ag%uCb>PEl;WT`J;l z!eljcE1b-?OKwOv#hcwzXFUXOg_E&}(=n@mzkWL>mJXXd7eW(xp@ z8D^MFT(EaE5v224%eY?QEwmf?68&&kBZ0hw1Ex@(5#2!}0LU|6_~NX~OP`0h1lEcuC;vJf zDU>~d?+LVHO8PO;Dg*x4^;Ma*5CbnB8oJ`{P^u_~Uf+K6jr+S!-QH<@swQ#6(Oy7A zC9OP7#$9}hkD5)OXQ_M)`{<>a@xJ+Z0vYALtqt=YkxhBM;QlW*P1JyiUZj;5@Y`Mn%0EE3n$j-S)4 zW-V))bNWX4Cz;!+b*O6g<)xkPmBknO7O#DL5nl6@aIfaATkj2gkL+PTuVrUH9?@|i zP3l;_Y7H|k@|*ODn)AtRSMtSa&dwiZ=561)HLR3*wug!X&$v+QbHCG%n0tRm8~od2 z@`o&!39uv0A;#4B&aK7!g?*}ijn=}R=)n{(qw*^+t*foE^6egZc8YD;wJ?HjTi+tk zzAJgFR;f;dN?(LzRPI$<11L4^jAif&TClC|7UN{Jv|v|#u}T?y<@ltpp!k%}ACeHwx3M_}Z%78mOAVP>5%*eNAx@jc zC|rsh{bjBNg?`aYOWo~i*^0Dhf}>mqZS zE14JKzqWh)`$$wmi-+LTLQNIDC-Q+x9Q$sD2|)}89TQg}zB;$C@KB(KEV!l~phP0J zx~md*VF_3?-gL4%H+-TP5}BSXbUyCwWAfCN&6^*A%bJ>&7EEul$PZBf(q?3p-kDoc z62Z~P*oMc1_8-ws(}fxfB1q?kG!)37aUlVTR2n0@{N5Hw-+<(BSfquJ#YGKIJO#{C z(Txx+iVn;^B(EJttQ~f94UNQya*Gn!t_$2a!AITCijt`aDS;AxB$z%4i1vmD{cY{F z1#rBjWq$yzK2jrMew+A3KBDFEW#*g=oB=4yD7dV(3#}rNG7#bpa2E{i6~b+$F!Cd4 zDtNRhTtssT-)ZEGLBY2mrs1Aj*r)hrUruhBGeEo0<~f=JOGV69l(D#OHuKa$gT_(V z1H?K9R9*gew@afw|N85{i_(XLsG9xZKfe;!kCpGCCOPm;Up_H^Lkr!O z1`SUHv^FpFTz~I=!IwW=+FgVyMy!nzCR#x-$4u(R(xKf=bgcgSiOG@>FWVKyg{}Ra zpxo=#=lU6b`2pLP?5+hGB>gq}3rhx=^%!%bNCAE&jME{%OU|KhalOvlqA^dink!-) zhV5vmNY^+dZMivmkMndF5e~}4SMqVYg`e_gm z0jkYHk@rxmZHmbtCqVh)Flo*Kb-${SDgSkk4%O@ZeqSlfy#Ky^+gL@MGv2iR;gc^s z^c32W1au;uxj9ez^9(k8G}wX^t2TIFVTcXJPM4XPs1z$Lc$;K$)2Kfi$ zf548HSEFf-NUZn@i6iVqe~i~OLnr~z>AM?6V(@{^A9r?BP z!K#`5ld3)+uDTW&#bIq} zBU~0qlGLqRJ+obsu8D46z>I5q;iye8{Ynv2E4=s0FO2D3cY=mJrXjfCO4i2+F<>rTJ|n0K+inb@VXn&q@Jpux2Zr-X z;RauOK&?@wI~{vJXIP)J$O_=$MLC0$8;t#iDK3cG$M#r{o($;IfWs?K+JQ0jI!dDcXE&V{o{>_#5VNmVjl_J;@g-_GCBSF^ zS@>(FfWG{?z9Po~K{PXD=XGdJKQNL@<=Os5CC%I&G(!Jl5kf@W0r}WRjJz5>W{j~4 z1WX1@IB-W-yOZ53`uEfR$+lZC~xz@0ydrll$ zdH3>FJV~QN8VWOaq}dWLAFL^Aq}bQFx=3_M_H#SDvjW$3OoPnV0x1NA=5Sd^tB>0G zE?WtyUoO|?Kgwqu^W3W^5`K$3N2zoulIFpB(eEelq1gq|eFJq``OE_h2lbaD>^GgO zCZG?J#_#szoicLWhoq#;5-Yo&s!(h&p@<~Xp_q)AhY!sm#7z=Xuk5?1 zxo{g~>dUpYYZ^9cbP`G&E^SC z8$VwTv*paLQyYKSGUhR6lt`p6nk*_%?Qr_;-?VA0SDcTZ-)Sn`!cP?UCTat+tF#kJ zyN@(!&=EmP%zH1FfbF{j=-a0~;^nwO3Zv&g;6-?WaaX2PD!FiB5*?`<97I#&8cp)C zn+XUXLST}FSeXc-r*^tAbJeT<{_y8jAM>Swd~^y_yn%DBD;8&5Nqz!CXCif2F>dN* z_Z&dfBxDCJ&4+@0gdUdUXZov|jx$sBrT0UHT<;8m@Lmbd*yIsRB9pOKaME|`2*E{2 zBL*!MSUA zfZ7YS`8YgNrRw0pgR>CW5VaaX0}Q0$)w4vZACL-@ys}_PNFkVCKZR-E(vCtBFiu5b z;~1?n`}cgO@V=(*v7$^YUAu9*_6kK9S?DR~NYKUi>c~)PV$S|Nz4owd5J(F@BVBd@#9uDB5rk;IT>I2flJdeGpm{Tyg`Eo32w2ipD5j2zE9t>iX|7Cg3T(bK;4el^W27a*z?@&5h?qG@hDkLBtqT) z{9>~@JK9ke%rG|Sqk*8{>QBcf1nRVS>cdA(d6EZm2S6o?$93`3(!l@&u}RqsAj$`_ zF{jZ-g4}iowso2zm=*zJXQc!TT(rF>DmYh3P+fxvHjGu+;S;im$aeIXFdwwOyHX|} ztgWfASMTfZQX~hgR|QDV%})clB02r#8RK#uRi@9&4He3Vd@O4Ve! zHqbz2Fm&Cf8X|3B{#o+_&QDI|dv5zNLPx=r%)!r!hPslsyrC_M9GN&uoF`ILxhuyg zjA~H)?je3J&k*k)$e`B zHxvj)i;p(yICo7m#WFyn0Q|xBr~`P(3amh1!kMT2a72Q(s{t0do4*g%0WBC74MeLW4M) zDV!`6P2`=p{B{l@yop-V7ow)fOK~peuktcH4|EQ!Arn-_Vh7hAj=+)t&UeD*Gl|g@ zO1lO5hqF$1g?25K_=A8k$0^;V0`Ve_@aD<+V;rAnhscQaAVaHbU!wZRdp*jTjP^n( zr4ivmt0WS{s7o-@?F2c9?#gLss6E`%B;Vzq3XW|&cU$^xpmesWqAV^^AlTmLS8R_c zAJt=}$eSXGK=v&p7haU6lCXg^W5`U#K(ha~v&DZxMCZEfo_o4(>S{;bNG=Mv2M|l; zIIM)n;fTZoGOugmAw6Z!+0juj{zSb#@5v|BL}m6CZLpKTsC7h_hLKQySaI!qQDl=c z#+50KQKb8gdiO$GZkJpJG(md~E4VZ0X?zcLV=p!wP$M?K+zL6iq}Q z#&PQ?rGN`I+LipL4uNTCLUIEoc-xH)7wT`kaJE3#^7K9&{g03+IDYeW=bYpj_T;^0 zoF_>0{)|oY)YFK7YhK<>-azt#cH&?OSyj?aDO?r0*6zMXAmzg*nqE#(wI?3)))9#| zpg-?h^ZFT}7CDn&3ke(^ohvW+M>s&h?%p$>=GY>E;|!RE4>eUQzDO1hhh{K#MPc;t zHgVaOL)O;*lfKk*p4}(VIaaU1*FvJZ9SzG?gaJf+VRNh)k54kTiF!-CS-7|54XKE6 z;_N(nb4jIBU{vbJ>EDjx>H`lIU-!7fkXwrH1JcSI$UsF#Pcmi=zvoUIiR2@r8c7DC zdW1@uM5TZg_mZ>gD?Vg`FUfQw0ITt;M~tx&^=8nWZrvMr7DQ6v7K<-LJW-+eNMW1d z%*pbueyuPET%R}elBfcJ?qs;5^U|e#StEr)eQMy{i$n&w zx$4ZBGj(;xV!$Y^`Aur|=3HioX!uJZKATjsux7bummHQsROT-y^ zQ6aM)zgwoSh}3VkP6h{Cl;BUXH;3lqctoQ^h;2ZR-a5j_k*1?R8X5i8iX0ik(6&?k z(hnaJc}HP6`S{dRI2+>*g)&95rc}?a5eE)X!Gf@jZE*o+<+*=V`dNHQVYR5uERqRb zWUbrQ5SXRVRt}=qt_5F5Luj9#J>P`i*g)VXYYbp`@3Hm5TTNa!X)XE7^XGHlH2O&S z*bOrH^F!HaADyKjF&mG)n{1|fPVM)sv!wHCMM!SzLV7b~8sZ^)G{jC*4ZDZjhPwV7 zB)vlT;`EG+2`FJ|^v1D)rC%1<;I-vnPe^-3HbGvq<0G6q&)NsckZ~%J8wW8|&lOg0 zU(II1T+LY?7yiv6uGdzO#)!VF^-eR@|1~@@KNBxfF7h*%9pe}`N%g%4vWUSP)VOtG z<2Iso0k772H4M53YK+TIPEu?uw6mMxUlpRagAS#6(>p+1+ z&CeqOumB^l1hXD*iqeXwp?Jj&CLPZv^x(v(KiX9N@_AXNqWWxW-OU70~ha`LlR^k+u<}h(C-F z;R5OZN7_PL3Hd|jtqerEPwgFXi&H2%B}4ZW9-56mUHsr!N1+|;Q~N=yzc59O8?|_x z_fLd9I)j?f0Lcc@!sDN$pu{eiGO_UhbvEF=hUh>`y^k`wDkmq$F`n@?dyXvae8%$# z@nC3J*uB89bG(p!ou-cev53=&Ws)e%6~B%Bio+pL`alRpHX_O)^WR}r-TE6ta(Wu0 ztEI4u&23vHda6hl_&|017`jDHv17M0|8DJ;(Ek(ed^STbejmlw$6TV?(qFT8K+C@EX>cHL zOn7V!!?y0#rMHm4{bgd8m~}EKg5*r@y`*xY-Ql&XE9wdl=q^}}qKHkF2sgnl@KzZp zVm8ULp{u+AwCnb8f*!K(i3A6dDz-NyE>4ODi^=qbhHxX%| zN5m{2sgty=k`S0+_#`}Pbv%FlcL{|WZjJrd*m@on-D=6&IVjuzzc%TT0DhzsW1f$k zUhgLCM8+}roQVL8V!BYTV@o@~m606y`_;XFUcX*SRWgGP`k%e)E=NO;4*{}ExJRuj z5ENkwi{dsl_io1P*O%eBBgeGUfU|Lm0(WAyU4bLe&5UcR?Hr5^{*?{94LC2 zfEH2$waJ2dF@R=8s`PcB(dY?C5uwq!{lo$;=jOj5l94&M=V`ijtZ}2{jLb!#N0~u0A`Yx} zsb>2Qj#xK*6r23*O_HWvbdH~i5aT2F_Kzb#kbX-wL}@~7ov;)xU%uS5)-v4D3ik5^ z09e6Sh^XdS7%Oz6B+X%CbP=kcwlKxk)^_7Mw-qarWp+Fagxz{pi2=KKMyOA3vTAEC ziOM^y7TfYmWv zTgs@2%1VWXXpn3YWtEXt*((&4k<82zLUu~Fk`-lUhW_{Kot*Re-M)Xf@Auq3=Nw1x z_v`h1Uf1J#T#xH<35D@DXjNUtMCQ)%&E}LjN4wg$=w|w9jA1|+dDXNe%MuTtr8hTo zWyncWhMlSU1{2hGxr_HH9n)C;1wN*$!OVh@FIts}%w~LODAqm#vfn=J45Dg!Zs@ag z?9qb(COf!DSFT-ayE442+1W{$c#x?3OACwqyFPEd=;r7_+0$1O|01Y0&gaWX_KJQY ze;zkb_Hw~i4xF4kGy^6#{i>P#pEKYpHsGiZ8Z=pY?9idx z+-Y22b@%H+1q6*>vSP&@<`6mcht-2mp6)!iC3KeHOOo`kl$&t914H|*!0GrFq3sCE zR|E{h_K)pXHc00j=7+$jOqirhJm&A+y}Q0~6{Tt@A_`&T=!D%tw#7p3X@{DZFXFhhO}>nKEUj=JtyJ*OU%gtt^Wqq8^oz?O z|A}BJvM}s_A{ebymU{cBP*u~O{aNP9WT@=wRaXz&9@MN&n>J2UMC=OxXIxdVRcP5@ zhF6}2m|nA6h&i$sVNB|9l4c|k7N6Y|r~q3}S*S( z(9(cIH1#5wfRI27*&#{`c@k5mgfm+O$JkB|MW;>=6SABAa+dxpy|WRNfbA@uwwc0Y zNcT_F->`;1r{{DoEeeBDJb{(C*=Nbyo+IQrp!~a1o6itIj3BX&)pxQqaP5Q_E_KhL zZtLLCAc4Ij+C~|=U^C%3cXoKAOHC=Cq3?%*&^eJ8`sv%=mE8`A!^5IrfJTT}N$1*C zO&T_ADBp)35@keg$Ck<^3r>xY=yH%XH9e*`8cb*FH9c5?gdfDvhOZ@xuiSU(-7#m- zhvP>Gzy!w~E=^|QBzM6&WBZkxyoGw;L${+yZ&?(-R~*`12XIU>k53sV!(ZqX#9E|P zTfSPlE(Sl_L$qbq(5rs><9dixMnV;4e^*4dsZ}Xs-%t z*j2Eg&-v&JYlBgiqZ6~)phLYNVJf{jIVjrn@k|T!V8?aQoQ&iX4+YNH46MM`$hg4$ zUd7+n3>EU7x@rh?&BJO=W{f!4AVkcffqEFqgR5nOF6PwBU%Vv|jKAn*-ZW5FK~qR- zXlcdJ;1Jqq!wULiCjj(1kWf;;i6S94iqG073rH1-vf;(|NfXA87rTl)j#c=UEhD!! zX`-w${P#;&L*VC5A`r5BiEfOa1`7I7HW*3s7@u~PDpf+h zaZfOOiY>X6dWAwX9O~+|aqd^1jpA+vFP+RnP z|5HS-@)&>`jVzrG=(cQWKrJR1<%0+)2pEq)&xK#HW5ECL^a4leNx=k*aRxKX+Ip2f zCLyv#$1Faq@}S4=9vOi&kq;38vrOlp85SLNWW}CFV@866ngMEk%3m)(nffsvz1fR< z#(24_m(pWqz3Mh>xQA5eeQQUb)H8%ePwF%|xGK6~)kqS7n59s24@-t^$eX>@cJUm$n>d5GC0t z?hwhvLA!g|$ZV9jq4kcH~pVN*zmhvTuu~LS7FcA&=MtXAOv|ZeA=WEcH+x( z?nu9W7ggsZr77oxlQvek;YH`kw6dk*WuEbt&6~y2O2qqvR{97zNDzCyqL12_FJGjT z4_2?pJsZ(x?SoWp`Z+LjVY>8;*0K~R*1 zd3%%ik&`cpM2So8+&!50CRU-4mLUgc!(06OC0p{!Pc78~6u#;n@A7>RFS74FxiRpZ_9d$wF4cp6N8T9?8}ah; z2ZSUdT7(ngZ$2*L5bU8wOr6g6&yXcwl;FrHRR)(dyV_hkGUl>+?wn|XvJdg@cvyY? ztKiwgsHpkId1RhjoLZB{!~XzNW~E4~rJxL6ekCzB28!hPKY`SipHbY`jV)`Yn63Rc z80h{t0=|Kg70&{dh=oWQ`N|64F)p!CiBfAn`cQS&jHlVzk$k86i-?Wz4PxFrIZsk5 z31G^d_|ft0MxJ=~V-ihCQGc=hqWcv`lee;-5P1}_jUQh z@iIz<$1SIiZK?V^e4>t0dr~S-6?XMlkE!ed?hRZJNkxm@FjxAULI0vWD`D`*>AT42 z+riuOI75aEr0(=P%Q(BfD}O{V8_?yB%dg2B`#R-FDA2J4UvMx)gGtdK1aSbGho|TmIwDAtpL_5u}zI5y* zm+^g}%SJUX&-Y!%<$wbpgiHG6TeE;E;Q2OqMl|ime4s|Vx<1o;=<)mHSJNA^)PmYk z#-J18f$So6k~%2MO*WVliMa z1p%9s0%YJDYra=2%dKG=Q)rczvXI@ebg|%}|j5P5`ljR1THEWx-An?NZwj)}5 zlBXnuPG!&2^9)ck-RwBJz|f+XRO##c4QMtQDJwnLEH#-j^|xq#A%0|C%Zlqr6{hys zUMlCgbaDFIhpiyJVf5=4$tHK9qCl9c)Lmf^Sk3OJP;Pj%oZt`s%bN~MvBim*s?21a zw(yX&@kUDl28ZzC+bP*;l{hJPa=_#H=gH;t>qH_4k>957l{wd@eE;YnYBSa1LkMBx zmhNQ}LnI?rsL+b~FYxDw5R)Gl7gG|nh7Dpwl-ya?iwf5(KE2*=1s& zL!9W(1l5hz-}|m;G~!vc`nq|PZm9+6Dqn6ou5?>ZIqQX~U;|d}z@bVbJ9o@^P>*F8 zCBYH8(PDo^#5zM}@zUsUnA}c!YSB}w?|kn0F6sRc!B9Oq%P`goX|TbRw-$`Xgd~-W z%3%M=@KzSgoruj?3_r7GL=(lf&|z=q5f&m<;#c;(HsoKa?YK6&rO-_LLZ!%r@M4b( zi0KVAOi7|)AVVLj#px5%fW|^+l(8U{jPdc=)PE((na{UFM8nL-rhWPGYunFXzdj>U z|D?SnO8i}9Uz?uQ58fnF*6gCKvW7-h_vw0}p~8dPYQ>oHtER{^V00Z$Pgdr@VP2l< z%*&LzL;RU(!${m!UE&c!+)7+vM#d56OW}7LzeFuVfR=O&owuQG-e{n&=xS-4u3fR^ zOXpLx6F>YmQnKd;n3swMSJhs(XaSgc+}4tIX{CvP!ICqv$_|ud z*Wm~g{Ltl3{z*k!1HKoU41u!v&W-bWD+^m7i%!4nM2Zo~lkm&a(U|}4Sqey$LjnV3 zo`iC~6m=b?B`!=iBEIyc_0rGKKoABMero)+HN#G=I^%a11rTTuIpy`}ejEW^u}hU! zy-L%?sra~^4o)63Pq$Dm9Sn$0?@DEHhp98_w0u`mQ(M6Ka0)r4eMAXqYrBe? zS~WV@Wj{MagsUe=N^|3J*6h>N73mP;@4DN}VjMJ8a?`0IrkkYPFavMVYMizBa zt&VqYCAGGgii?a!RlJND1X&Eg8V6Zz=)$yu`Oz1`>Z|HxHNVLREV=4Z+`v4vC0 zAV{!TYrcuDqPQfYeiKVHsqfLtrOw3?SIkc-)^6Mh5Lr5yzSaJLEzuFs4YVFGAc7V# z9s7VMQzSh@ZG}(WqZ>d|rn6?9pehIb({n9qc8}tPf1EdvqDe`tbI5w9aE3ITJ$_6i z(Wxj#%Ke@wC!6<=0g|gc`E{k6xA07pfvFu#Cv5r4gd|jr)J;~c^$0_(50sXLHnarvup=76JUBffMQJbYyCT6aVLCgx2aR zZ-GDO9BR4NGE;qin+2A({hj&_$lRz?uf1p6on1~C-v1F7t)FoC=-W)MuAat&56Asl z-LS|$)qX<9Bf7npbRV*+=|RJ!o<9-OePdP= z@#krP+4#YO2Mc%i{B?Qz-kh)0kW}hi#Gy?@?!P~-OY)RvE$ej9S1M*zWZ{u&vps2-A}7pv)E}9=2n*yGh4#cZ zP%Ji!{T_oOrPZrp@-X>`sn)$K^?frb`9en@^St%+=!>$pKXAr~${6wBDNsNm+crp1d74j6#-ILrQW-^aC0eutH}gwv0s8piQP~|^9a-yY zT(?1kI9m0%oEFGpVGph$c zZiLdzN4K&1kyKpNQ>HCcQV~6_i@}eVJ&R*$ljyf?drj->`}a4Suk0qCINYt#U#Fq5 zf{Qiwt$1cNmY=Hb4BEKthR9L3> znoRGZgd)~4jB=|lzIu;PJ&G2n$Itf`7=;};zoNPlW4r6GMj|4JPT3ne>(dhxJs95- zt(*)`NPZ0Zi0a!(Hm+2ymme?uEQq1cMr^qTlP?1N31RrF_}oq z5rpNrMMX7r=L2g&&WJBKL^4G~mNjYCitFOVyK+43e}rc@EPm9SbLN2}#HDPe4SUdW zpKvx+tO7~EF`Qb}!d}p}S7$|eu3gIZ1ht2m@InoMH_Jwjlb#g?>28|-yO~YdBp@rI z68;%oXy(}^)@ssZVuZ9j3xn(3{#}<`4b^i)-??(NYVEDH$eCjj95yFX5!w!eyp1Ul ztQvRaO#1DB@B&iI63aW$0J_5#jf8==rA-1kKhb;t;lmw}jCGCZVrkD8#7(FL-{hdf zp>?K|c0-VmakD>%^x4EfZYHAyvg9-mamP?5Wr&vhAH>6dNdFVl*!hm8YQq}DmW5V#7PY>tjuWW(&bG~wWXk~{omfUk=}N47Xi?=e;8TM-@n z&Z)DLJoI}w?6BQYzX@dogWLU{$}rlm1evP~x%7cDY?1ISP=}aGQZN)?_m0=txY{MCt|+>JeeY zzTdYqUSb4h52#j|=HZeUd#_AH_xkngLjdoTlfa<%raneUx>CavuZ|%d-hpqw zQKPns{2o$efS?2xq0Bn>3m6}ZURIQ4v?0QG?;cmBWd$WN<8R&N7yO^N9yPk%kVb@A z97OdwYyvWZ(pxhkv)%9Ps1!d;7)hT;ET=FP+gYRC-Tv}OM1Ik7pX!k)5kqP1h*&}B zb)>!S4{1gsV#yRoC1UmCk3aMB@*w<&@y21VWh51mV?w7L>wH9y!L+xo7043+x+PR6 zn3gCM($5!Jj&16Sh7%^i4x!yt+?V3`)DOE=&)%h>p`lO5q9hyMXLPLkSD}p&(nrzx z!~38g!MrUPPRSlCn~FrJM>>?LW2(JJeo0Uv+&sd0E2hNiE{k3Wh_h{NmsawP6ph^+ zT~*M=tyvYc0Wo4u(n7|NqWd^HZd}o*vs;AW-D+Am=@f6OcjjkZw#;+d8!oE$rED-| z{bg39TN#x6fzPdo5Ml_4Ib|RCsDDxmJkqI`2Ftr2^86T8p0RY}CwqI!(V(TmoF1m;m^Mh0GV!&T`wo;S#9lRB# z_4>`58(}k%1|x21X#@<{?jE-zp!Z93_(4I+PQRS{WrLYd+B!g0h$+7E9Yt-z#EjU+ zrY2r+v%9giyEqNdblBmras#PX3T`s!g)S3k{lk!bh>2fUK%3ZxZC|(@^1@&<5V%Ly(LF*={kUVsmpMd@rx=1FT*T3ZA?03hwAUKblZ z>bh0Bk7m8x!lIoX^)z|a*SV8jIn32m#Rb`2=NkH^ceaTxhAu2BnTM)l%i)smWqXU$ z=83#jBmuG!Ib;Gi_!dnZV1+?=dQKpdm13wt$dT|Yd^7re8J+;0jlEb-L9;1Ks|2{= zy}|J1_J(hVqk-!FKDit*osq$lt&t6;SWRrxzI}MopfZYwW8c2PxZCQpMw-T^?BqU^ zGnf0nbm>xZX(lzUA-ySy0cIQemcC5Ct}N4xVJnF2U^YqxN9`cQ#X*m%j_n1#KX*EL z6|;U5WaUa$=Lpz@t@7%2H(S2-A6YiIU_jCM?nF$4(4vEo&e>oSa)*J`bZ`Kz z|Bq-cj|68BqKu6k*5keYe!o334>WG;tiGI}HR8ACjZR+L3Zjk@N!QYpQKwu2?^V;n zcp{3S2@S9Pq!`4i=Ydr9j5-ib2n^E%Qpqphy$251;(Qf<>f=&65xEE25)H)92^!M`ZxQiB1CWJ$39v68;%^u&(qQ{7m!lA`aJcr+*n_J-@3n0H)OdNGOu+eY*VL6d#J@FE@pq8AuFMg}U;RooO>@ne$oU*A3>s{}Xznh!oey35{e+6A3L zhckZNB;fr#wC2v^nfS~ei&$n9={M}!Sk-tmh?+Tun%a5@38|zu5mke#{;kuq z$D-qZojH7TCZTirrp{P`arnL$fs5|Hq-Q-6OS%zW?-;Kl|E6RWYslV7!tw| z(-l>2h?yztR2f0A4ue(rELLlP?%nxSl^{3a${`j9ijL&mOsY5WawKbohm}#@Kltwj zqSzZa@cLgkRC0m7PN?_m7w%HD(w|zP~;ZQxPl9xJ|k`cGUvmk7R*Gfm<@yLOyd03cej&oy73VX$A8qZjAQMW`nQHAwGLM_4PT z>2z{M^$m~`ktuJKCxO??m?Msy`)DG?+^rnt!oR>VaW(by8<|&yHw;aHNb^LZL0fr| zbdvRFBWU?U*Z1i0Sl_i1Q+7)~PFh18fpFC zokw21CmRoJHcmZf63heoRWLCy_r(@_aS|4PLWt;gRzk~D@DE|c6MDz&9)KuHi~+{~ zBQ1v#P=wbBG0lJheURwsoEw`V)d-UM=`ZVuvd|eGKHgbG%?8E&zG;-wRMi zE&!U@7k_F9CBnDs^!%H|S&u{a8l|!lNVfZlVJeN7j;$Kcy#lt7m;U_OGs`fLNXN;~ zEG4wj944{^9n3@8DW`*BMLGN<@eBzrdh}{ZE_RozNgc5|O%6dq3e!#So@m-biEKoS z726-X^9QJ@8or%zvGI%;uDx7#2q@@Grxf|RZU27V1L)aQ2wpsXA|uUIlM>=gfg_3Z ziX&>2kUwA4gNX{SdA-!%=%l4+NUjCVf$XcZ7ky0m<#B^sKU(b1d4^N9uua=8l@j#9 zVv}I|@eD(VzB_94^?MjM?B&I6V$vrKsY^F$dx{gEw1t?G`WRd!3k=ac$Q~}*Cq+(O z)?RSC#pU}9DTWC9*L1gXO4_{8P&GN&TybDP292|ciKEe?fk-&pG81togqrrt-!`#j zI=D%-NRw?p?s7yJs6@okSm*}ZkCdZRQE#Yj@&SgCxG1rfQ$QfC|6&WDJE1MbLWN&H z=aF2+u?fgmBbPR)IN{28F=ul_L=DqDJWpX+wiW*z*QZ}U$p-nBitT_V^l)Nt47 znhh1^g#+2<&a4mtDLsGM(&@O+#ZXD^He$pG=?ink3h@+?>d5qAJlzyHByl^SL(&52 zu&M~Ru3XBz#H<&SP^zic2ra^3P72AtY*&2z)VvBnJ$&QF*Cij4vuk2vvb4iG^tvUq z132%SwyLMcw@w5gy?=Wo7-q&ufdq}=t9#FF+ex}fjvPO_2cz<*lvAuznd5<8 zX-)jw$5w4*eAEg6Zm}>a5ohQvyI?QZJE%%2^KN#5Sy~x$^Wx7`l}DK|b{6y{69lsZ zrIRQ2@8AD6;)~Ff3bV_cgR?B$RByr0wg19t7)n^|zi>L}sU{jD!NJIi^Yuw&CTUuv zgwE0xA=GaECy6$KtZ=V4YUWw5i=g9p`YrVFFO5BvM{9c6s8Nrn70sMAE0%~YaVcr; zFIfbtk~)Rkyd3GMx?9Q+EmGZ0T0WZf>WvE(iI`YY^0p-ded{xD;0g*J4^?fKAHU;0 zdq)`-Ag7^#cx#(nt5Ku;k~pmA4nmfnBM=)3^!2<-+D2W0DSgc4fan?G$Q=7=oQFpq z8$vTk&$1*@IFQdX>Gu%oWHTcXTtaZ;ZZqE#f~hd2nB$>q&&)0ZN21~tg3?p%9UL4y zr?c-~ekI@gFPJFozhD-G84`vIw>8QrdaVr&2Qjncr|$#@EL1-Dz_j~1gi0Z}4i;XS||7X%kUVMx}5QytD*`*w;EYiFM>r6*iJ3we%gm*~>% zgAP-Mq6Ho~khCY2fob3R7WbwD4OOd~xi@GDrya_ro;k}^Ah-XhGv9CJ)-DLJE15Oq?eH?w=P&QfLuey~e~ z86U$u8YIB1MvI6{_Tx~`Y&7v$DrVLpA9&3weR~JbT2GR!6Qt)#Y!MS!p)1fD$i%EE z0_5zvd)bL_e&q)Kv8baMgOF8S`$RZ8v!W8C4+s*!l@i<&Vv^TQg;g(&vX|T zbDywUK?l?R=k!H(qBrbA?^_C0{t#U&4p0VqT~Y5c^=mc)MddzZFa{7UC&~uXf!FYA z$6zHRlUb@NTI!c4Po9*#(X?ZQg;QmuMHUG`0AE{CjwuciTrWj}jsl@^DxJ6vDEDO2 z*w?q4)4s#R8`&c%-bfxE9`$o+rgy=zMB2*(Dk+JYl6~O~(Y4ZR6mhx`CA&Oe{a?#q z3C!wlpvxn_+0=iqV}`=y_3r)t5aA|K^Y-_8v>@eI+8SERvfEhIVo#vC;YH8#A+^;v zn?L-wDnUpI+V}E}Ykn+tM!bp=4B8i&?{g^q^K0Z!(i}uZ_?fr6u`!XsDxJh8tQIxI zZP!y6fQX|Vb(}uJO!%O&s8RG=wYrRl46+UB+cG1w6^jo21pw?QsM?6TAK*dR5#1Ku z03bqEF30h@MF>IE=p*t_m;+ivs4Mh@eLa7$jM*$Zc3&YYdVRg?yCINYQ_)ttZ(_V*eH>}r%WkMFB*2fd)a+NYlbT7;j zl(>XZm2WLY&o;g1T8c3Nu=n0#|cf-uqq zB_Z}gZDB7t#x~$y5nu8DI{Xv&(eisyI_KOaG>T`MBp%wAv9uAecA0EdYzR4Ut5 ze#8kN;%BPEJsc{A8|agS{!f1{xxkVk2!@_9wbF8LL+6r^HjFzfC8?3c-v>j!=`%Nd z75Q5ApafL#Stpk=Nw#$vk!SOT>Ndk3H9EQ|B*aHX#BfRg!rNFcelQJb6%|=#zLpa7 zAOinoZjKnn(d1i8fpmN2tnB16GQ}pHLQzl_(vEFJJobSx^v_SYC~#E5VbH;e{|4mS zqDK_-4GNqvU6fWJXy=KiKd*((lSJ;fHa~wndXKxRyPJ;#C}RYWMm$vM2}=JAvYHK= zj-?1mNozg3{y8+FM`nNZ4Vl2(yUn_--jWnG5pnM6F#F4RDYS}bpIiaO3snOY*)M+x ziB}ddAXBFDwXs%HJO;sI%AtpKm#BCjX%^0idl*X?bu6AQ4Oty6(lA2!Kh0>!>9_og zQ0_(iPmLTFCDD;EJq%9ShA*aHCblRMnF{x)jV{ z+_y$=krtD-_HfK(eKVcD3HIiSez9=EOfSjQ3O)T{bLXB(xaO81W^fYs#VN?mnp&f8 zDUT9#uqD+jMbjNr51yge^1Mc?vic8#Ifml=o~rY*YW@0Geyh)4d;j(83|`(kqlLl2 zAIF%>O-%BlxZqC?6Y^W;=~%ZDFPAOavRAKpEU?)Rv=b3njRy!Lw-ci51V~np%?#C> z^l+BVRp(~n4Io6LYy*MHZVwqa#2IQa9bvp+9cf$Tf}EvI-VyUx1WxPMOWWW0FDlR1 zZ__42emhmX$CxpeVT82~?x||x!tb14S&4Sw0;vG*IfVP6Nw+7i{V(E8^puxkwwnJ` z)b4QP?NyCAU}qdE5pJotG7QW@`1R{}CA|zOP&Pp7y24_@N{Nk926fvlH z+ZoGjx;Rg;_TC`NNHJ@*Dm;8A2-$~bw8i?4b9{`6#CPk z|29B(+Fp;^z55M)kSr|amM3r-=<xG0?2Dc+x!fn~3&^tn*?Y@`3mOm>nq~Rd7xNl$P z9aYAO4-;=`og}==k_^u86%kQJM(iw=&qfa)Hf-$<7FQ9jxagKYp696}QCh+Tg)2Bm zKMB#`)TigGs0e2nk_=4>pBP+%vnNKhg{dO3mD~D{)VYxvt-vg#LYar7?V(s(jvc#8 zTIw=FM5wIsKoGz9auXJ|SmpV(2mNO9--`f1>ySemap*jQpC!&an>cs2qsB831J~%~ z&mT z!P!CYES9X=BHF?ZjzZS;iB%OpOei~{7#deoYzNBJ2OYy9#Ccs=`y|>233-!El;nL< zL2LWJYL@%*ET2hSB7`ZtOr2)ds&^^3YQ3(M*u<~2-B>m-r%UsboJ~h%J>H3Fb!UH= z|I5SBrPI_`tAy%|I-Y+4BE20zl=;iPB909oCg+e}8u5H-#)4)`yGeEL9ukW&xE9z# zg}_Yh!=-?JkvtWDyVz4~rkP(c>->>2Up8ZbRMzMs=@&PjaD=|{X^)8=2b`1W-lInE zUxo|h0(1!;d`ZPN?cZyaODGiG7P8RgstAH>G|yBxs z%8(L+T1gx8%(ti2i1r&ct0~!SWW@51K-x0wE$gHZ6|JdMl@PO}^qs74 z#juk|0TJsgPr&xrlYPr4SRpTf^g8C@e<&mhru{S~RbM4krBnf^?4`ZawIl%rJ!$V> zC=G@Ou2-Q4Vfclbu!@E6rfZlE#r*kl%We9mrde8F3k!wU{w!&>Y18EM3loQtwjt+t zNk(~}Qcak7GoF)(SuxdaWA*yx_5}#WuGkz`Y|DtDnnXROOmwZJ45ZKGj)#+CTlMeN z2pf)KiSZF{6we%jRk|pv#h=|DJ7DJO1`MpM95einqzeitkEVCK|G}yBd&H;hPu$8L zCWS18nT&o&4;Ho<;flBE<-ma?pE5Z(5=q9vu1c-*{EdIS|(O9^@T5kIIrqH3VLGGtG{r(tJ zcjWC0CD5Y9`C@s>pC8YDH`4UX*lP^{oEukFR)R+VgmySVbBHU^lE5J&J6tefA0ik2 zLQr@MIf$+m74~q7QQ>6erMl>plFGJ|MKc)qGq;I&C+pIsQ}w?)`qUlNy{v8AAB9DK zEmo7VlWjC7_5apXQIp+(^6v<41V(q!h`Iw6d86B>P@F-UQ7_i1^YoZx%fRnfM(bKh zZ45roND2(}_2&(>HZ>iN!geJ*W^*f%q9LU5;e!T^_}f{P(HX|C*W+BLtY2LEb~=sz zH)`BS)IUfC^aGW42CkJzppp%#zDpEfGBq9=8v3tV!nHCEVtC}2swI?2HwUPAj^{4w zm@c7}7zbiXL;OCeVkg*lc^FEH`}L&w*Eatz`LPH9?Hw5P(HwhY>pses{$WzUi*pT4 zD3Nro7jp>+T%o*m#q$T!xr10q8C59_d4BsjGqg`+LbPM5Q~}a@CTwxEY->kHN9Vz- z^%eTt6(A*5@#+_%T4f|5^rIjGDqJ>Q4gI6xvRy43e}y=txw?J_?TLhh1b<|bGTjt1 zK{^-|0iSt1;E^j)3qeF};D-mZLQfoq@aCO^G*^Z;WfHnY(T%|0J6RZQ`6K$kI3|xo zR?GKNTC5-F(L|M|mCZYlUdXx@iQ81DR$)vJ$Q}k^hrys}hyE}1(Ns;Agh-4+pjfk} zX;l#@ev1?rNX6ET#1Ku6qlse%jUzp59K+Av*rlYMe)!N?LI7OW5TMYxqs-SV*Y55J zn~~e<4$%1MDb%!5KInQ;Yip3!kz-S!VO4gRCjg`EMZd+Lb%L0OD#`$tR&CNUcb9yV zWSdT8xZfXE26}=*Qyu8o%2cCxyn#s5mX)dQBplaT;+=)|xF9ahQ>974#&D*~znjZ-UTHW6{@z7RR}xgy+{lmSMbJh$=Vxl%;vl zQZkf6OGx&Y2vcC&hEpsCr*z~+9E-+SeADG6MV}XiBC1_0A;4+K46@^PYu=dZJ+Gv* zW4l+bw=fILl&sMm(Wb6cj#TbzS;m0LNJPg!4ZyO&3m29r4A%YA@)!CHF+-_-YH2?h z0+CzNSiP^`yc_eukZaefS5z+Ki*lJDBq3VD#rS3H?t~fC(A2z8-AL~hEmF~K)0q~P z+TOiwAbg~X);AS*Kj;&yboZ6r`(kmJ+zZ{UwDklkNPmFK$a2PUhp==DQXh*UUYibG zOL@T%inl$o$?F~PXAiA0>cxRAc9pfge{Wg6d_&qD;Do893Y}{nhNBXX`Za?V;&RxN zR!Z-@^>u2r&&9u=6yL_5f6eOE+u`-ZCSz$IRE{DkmKSM_1mtdAeCrAHQ*JM_5=qQ2 zyqwSY2GDgSmu#rAgs(G-RZwULR_Vgi8?+xg>gRCk)pD@tX+5d{ezY#pCSLK$-+g8^4N5Ad|zi7MZrSP>9;?og~&mRkDB=P zn0w*-w+b(Fhi_gvdQy+MXTN{bEcqM$&CmwdE7>-ZsYV8(M6FMG(&x+vb+bHJx!r-k zKW(!fIB*yYICDM*{E>&;*UT3B3dRRSMTh13?b>#&$;+Z5>YF2l2I;ufe= z6aeK!f9BnjglmEiN-nsE$AYW`uo8h4CuR%}%*-od)22PrXaMm5U9+$#I$p7%613Fg z#HM>y_OC&cNEjLjVLccvEy{?QPM^dW2e^82UuPooET>{K>p_E(3e<=QX9_k|Oajrd z7UApKB(mwoV{>jd{rmiQvOLUsB~SLxV~)-VP>#6~jX(iHYYS^n$BMHO6n>ufjZh+A zOEZi|`1Zt`c7mQs=|eb~;G(N57FBIZf@6KaR26^s|2BjsYDl3fl8WjrRRP5EXMkGd zfUc9DEtYW#VG}W`{3o@0w{EhLN{mXynte$XS6LuQA6OnDr=~+!xR49pocYvvR0NFC zQscG<6mD$!wXduIqz|*5h;1d6_{5VXHMnh; z&G?#vfkbXG_2;)qge0@$GoZ9ZOOJR?40S+``$Vz`7#%`9zP2J{a#NK~G3UN|@ZP<9 z|EAqQ7**+%muhOKBQ;+ISVH0;VGLQoZJOw5$4dbb<8;7|iJ`;6gm_xU(0Yju3Q|B5 zfxc@)3t);5_o*si@>-6fhC7U&x#x<RepI+MF?mA7 zd&ZZaev(>m8e)ovAB|<=34Phafdi{b?dK&dzqRO6S_|mcR;h zv)zcQMm$~w7SSLOYZ>aQ-&e=;icEH`o7u{O3(Rut?ZTc>iPw0*bBQGii(PgWV_41U z*oTGk&Y^yB175+qqEq(Hrp=%}eN0?&teiJ)FaZw5RAS5_?&^vg|$#Nn46CX*O z{m`b$b|D!rCx!G66K`ca=VYQbDE*~)d@-S?YbAWN=x)TV+4`guuiFB#bdib^xY?p5 z5KV84|B_+GG7l^!WT4aK#xj7yUKvWV%+bF;)}y->F-BK{jj7Cwrp80A>_fR7%ad!+ zd2$pS=vX(Ch!#}yw^+5hSG1qx?bMmH;f1#oZ&HS!O^ur=LmKhcGo|$^O^bT%oTa)U zWHsR%&JEL*=}@lC?I*>*XPkx*Q&qtO)y7>eshlvk0tD||n8N{XZ9P%i2_PUuX5_#6 zdTMkP!JPx0tEl3lzVD1DbB@XUzDRTUQ>6hcA{!LUJpom1^YWfwoxO z%A_*%NXogBOGiMgOGQCR9P$BPOmIkYb=h?AZ5mXO4Jvqj14>cX2owI{I;k>R*Xr+T zw?FZ8A0|0AwjZ}bsDRvEjp)%Zk~>-)TL<+Ofm9mu1any!CWb1ExwwXM@4O(i{j=v) z8ZaOv-GwMETtuVa0kn1p5o9v>R5}R23o4XX9-7hNlWH9s3v}qWwBjLrq?O22)y?x$ z6GuYn^}Q##82G0`pU$Aql7E6v5#9GvLA05d46D(T+VBvn_3sYG;PD&r3LPR|O$L$-SjVDW^00f9gi zwN}zq5%yb-VvxGYqr7M}1gSD7No*P$3VI$QHrG|E@_h6cK&BpH6SGiAvb@1q8*w6S zqYW)L3=v*JM8HBZZk ziben#`vsx&TSA$Gc*)Dv$ zZgf*oeVbSW&HTYS3gmiYQNeS`}k+>A;+uq`1TcYY+qbq8nKZ83m`W#2{MOI_B&b2um&91lg5K0RQOVAuFy$=l3!_9KFBH9bX$>dq6$x#@LJen%9 z%>a+wabhEP%=eMB0Ib4bV&wp-j3;P8XvjLvT^F@y4Wy>|DL%RB>DGXcR=?hiEc_NrX3vfpopnWjv0KiSXS_WOwL_8>VKeFja7y z=)xWF2^9<%sm5Apk^SrD83uk|2JP#usv68UJEb+XV*Cus>SPBz8&Mr}`D81fZ-C6j z$;IZLhRZ@zPm+(A(HuXU-9e1IDd1IZLC9xsQikkTB{?3_!#I?#%<~Lla(tbxP;a!n zhSP(%d^znXS4Gu&ILD&YzwA$Au2ls!Xp2=CeUx4++o>q+Mal?Sy^G-dil&#yZ{b^g zr*}|}dHo9^8mD>i*7uTZBs$7(T2kf83hX_Gh;r@RdH@K;7gNQ)_SCYiWg?4OL=Ke9 z@^)Qu`)A;nw_RH=eG}N^o5jW8pjw(`DwKD(sBN!NO{?0`^VVfk`?}faEbhC!h4s(- zH?}+Lc{hBU|mo7C&K(XZo&4)16KY4ZkhygI^ zvE|nwKArrsJj&;Fadn+X_$j9#mVBRouYKp?eR_Y^Q(uz(?%hhlp$<|Bw=Mhkx1(Ja zLaDW)SJ`QWVB%`q)xdor(b0N6e!OZ`_%IfQDYS&)97e2w2 zS%6NHpH)^O;a@uuuDdolJVg7H?eD?b$+a3cUe1(!muu0nu~k?I+01Wyx1h5b84<;U zmy~?Z#oh<}KYM>GD%#2B2o`13glSs@@HW007PFc^9fey%0?j!pb-g>hqO>Zr!>ysj(>;_yaxw&Ghvvqh$ZsF5Zqbp2OiX&raFX zOl>92k=xF3ad9`Zvg(zsmsULjcyUefsU=hoP{OPrc*cl_jY#)Vs zu_uDxqbNd4ZSFCr0Torml%c(QXRJrd?1!#hRxLwnGp^Q+(xECl6-{-ncXJnYrC9+nf)m-EC$Np}6F$ z)=sq5PPvKaL&ef>TKjb04jnqUxVx9b1$DzUjOT}7`IgOOO+U!TTm07bn>OVVPS@P~ zXYr_hC{0ilhH!G^g*)5WRL610;Y>hh>T#WmRU5Ww+qODgIf*8Ove(^L9zWihfhT3k zlqoqEFI`&PtDWi*OpKu#$%V}L*lvM*;m1a59)E(PHxJPjM@RD_#C*N^49^|pg&bj^ ziCM0O0I@zCQ8>Uw6u4@1Lp{R^>ea3L6eAXbdst{7DXM4!kL}d%u}8J1LRKGSoaGkA+$4&gUxv85U54TJODm z^JX&KpPb=Wd3l#(Q+`05oZ}AHCdeYOL&Q=};e1q>GUb#14Pd}BV18QM09Axn{l&kW zam2im*bygY@ngo0Evtxn_AVy-$N4Q`m3aPbMxM~%2llnItBu?A5|D?4v=*NzR)t`x z?7TQMRKr}0azk(b&0!4*R$5wGmFHDMMp4IHOE#_@+E^OgUlla1yX+RDsf#bTb({NB zi<7P>g9i`pJiM>26K#7K>Ymwoc}sv}OQ!Yh+t+W>qIeD?XKGJExE4~e0%hB zI`h@5{&_j7uj2OSPSzeOCRMoZX((oc2GwNbAYuxNRoy{7e}8KZfS&a3ad(2F8v>~H zU1#`ic>$bNPpLbqx(l{d^WJ;R0El~8m2VeO$%xIT71GiI9)y=Xp~Z`%c1A`9@eIo< zkW_yjGEcta%(oz~Tyn0FiHZ7LL@)D%M^7_C&p>z+^BXGh?`fi*;k5HK2-aV^I9wMjxDuvisg@H`LR#}#%l zC2dnu3nidL+qG-!78Vw|`1ma2JM847RewB?`^g^?3@Zd_x-%-OJOx!Us!SB9Y5_E6 zrfoeG8L3bmA(vwzis>o>YkIo3kw>%B4#&1BC$20YoKE%~O}WgBbvM!@Ph0YM7+;kmtg{OY+T|2hJWb zT|+73PI6D2UeJJGP7-J^HMF5m6YgdX9S?IeGX+KVs;ygVthOvaa^%Ru{Cjoy-pQyT z-)N5Cuz7PR_cVu))N8;1HM%w1ckQYuzwPa#hWLpx_G3n??oBsSZnmAfU9GI=CR0i- zW#K>s`+eZY{cxI8l6g|bNaF;2_5L^=si~Tlk>Zx11=Vqm!htYRZAlNHiTXUrOkdwU zxyYS)bGUokwr!UrFZ!qi3zeQez4T5j9%TxVdY(~8y|pwBnlO7lnUt+Z#gqcmlwDA; z9NOOX_>LVqyz5XXH+Kq`$*~1o{%@eW)vHzo@z`9y(-C#?^&RzY7Qwf+re+21m0*k= zL%1RSS7P_<*?Hgqdrs^)JbDOk_l+bq4p>Zqz6gE^J8E zY6iRa?hWPhxIxD$K~zjGs1X(~U%v0?Q6ig+gLJ&}#Fj9!9%HTpH@rA_edwN~6hL{n@p`{8|27FM*WjY%>`;%> zD@Prf^=YiM_^Al@9GM9Bd;p8+0HguxUyrxeSzQ@*;=Ge5XYYH`-22Iy?%W7$lR_MQ zJiX(u6xZbKc-z0xcs{aBN%}iI7oQ8m@+O?Bpm}P&dzX*Qw|Ir#tBiipf%|k89gg8; zjE{eV`}gl3ICyaW>C@fk;tBKZ+qb6Ox}CaT)4n6A)58Nbb0r2F)Oq;l`6jD2Y*<1Y zkG<-RQCH5}w5cJZpXE6z6-}suRKIRD?B(Rt(9zLR{mPh0nYXyK=WgGw!e@;+#zo(FNgtjB7H7_qF;qaqMND}Yk==A!wyXpyq(V?Xlhm^AvIKl_2dpDH|gF)0KX$niM9?+R^bT7#CvwEaDM^w1`# zXm#|~zyWL?#bZ=-wA3zin-w7;J9X;Rv_*?ucfQjwTgXW@Z&UZ&rAw9hu)TnW3`?zA zvt|)^FB_MhMf?Ek{{26|@g_fhtgW*;=K2xS3JDHsvuDrdwY#7#B_=v9SUJhPU?(&I zt5r3*L^m@s7M?uWm9EX|b?ZWiA&l6#5I^M+DXQAE>-0AKTc6FQJ$p9cbzfxC`2EM{ z{V_z$hBv)P%Yt{ll43fWcdOOizk`DLa(x_h9T)&Qd~$l0sTl~;jArQxw0Q9@&jXOZj87O zU13*}Btn-jzeTS}08(mds?k6H1oIA`jMFXolXZ3+a~wOi1(@T*v&(hJJpX)~vQD6v ztnbxboA>5TmVf48|Ji^MdFa3ZReZgZM~=FDxr)3r#wEABs9w2pGL_!@pWml+3;f*( z`gZ2;pPje%m{WDagaZGp*+pL_IE)z4YxL+Qyth|(6Pl~3sb#-~m5tHRf4roIFf?+VC?ArG=xJX5*87BfjS=6yDI zH8QG&cd&U8pz}aNLhPigJjD;-+A0M!L)^JRNSZ97&j5E|-a~5XQ)2e?^z)~mS{MB&DssnNhzXoB3JKi}x-av3b;TlB?JQZUOvK1%79?_t>!EXJ=yvJS zfHbFJUXikAsdP9x1%CexSZlt2bW)ARjkmNe!WPtxG=V-{QP>gUg1KwQ>m!H$0^T24 z5xZyj%7l6I${=wf^ty5#6ViT9>z=wiVI{aa7`C(OMH!9$Mz+|Kzlj5nDyDKX-5GBm z_9An~?VEvgCuP!bJG*(?wl$$SJj~NSHZyl(eFUwpk27bZilSeAn*&g zHOQL=9i{4H{3pMjiA=)0>4Gg=8keIavYZb-BZZb#E?v4L$4F7YybBT%j0tV;*`_3H z@;8!w?S>7PVT91lzpyg#aRH?;U~dthFBvOV3Gshm#+}>T*hPyfpsO+nzcG?TWzcrd zo6BE@b`JPvB-k5il_Wx(fh&P!DNOHYACH}cC4v;SEkMx@M_0_F%6y-C2elw=_l!0sPO@mb*s_^o~kX!BKz_L3L^5IC7P71i$~xW(4x&|%Z9l;}?)Q4}&kX0XAK=fJl0-DXW* z?oP#1jzqQnG(9)=-VD2EN*B$o$p|MWce?1A*So`~EeS7#E(=9u8v7<`#yf4`-Ja9y zLwG^*Fb_kTv|H$xF_w7(gaL|2v*wh;b?@_jX15)Eu?2Av9y_z5i88HgYJAx99wHyE zb;)Jy*kKczmb3kG5?bVoL{#`LXM;(Onp6rO zu9-e@;od`h*U`{?v$=Zz(gV^v^X{ox?GecZfx1&JT&RE>uvV9;o(o={4)|_@H_LLG z4K8Sz)}su>qP1b8y+7XPSTyQ9xjcdoGw?A^KcxD3%;;9k`nd~*nFR=)aOdc8sqX}$ zX-|y+jEhoV26Ufczv<5z{n)*39Cg#%DGk zr}#?G@OUfgL{J~|Buq{3%)CG7vEqX28S1F;{)iq67ZPdV|}GV$BqpHK3=-(%D{9}#j1C& zAptCaghg%LHVGE!>a}Zcx=#c%0ONs*W0u1Jlv5}ig6IDFkvV&m0ugfDFY_jlWpQsE zHSKO%R6w&9ynuEv|z0~vvWI&S?oY!h@w zE~)QkX67PT=A`!4oXm{!hYufq&)~KT1fCKEVN9u535?PIh&zLEt2S<23g88|oQbvG zSI5ceL4&LrB#nW;mWPJw-v3;s>z>QB6q?=A#`BByKr5T9q!OxD0QPXhr{lKS!X6yQl-x=R-QanAxU4S6ND)$j!>N>0FpitgFk&wo zn|*f@6z@mD$mn_(@%vt(_&)N>;7n@l5(??wU3GNIN<%m!cau5Xj;ouSy-!cC6_bvq z-tD(>WSIzi;5EZcc!lHAti z(F3=u$@E{tsJ)?7zNXf_N-nW(PZ;X?mUN}op15w1QLDDyH1XSzqo4>W9Q+rJ*FK#i zr8AUA^X}anQ%+hA8nmv{hom#3&IF8j{*kxc!IJ|r6#zKI(HGCmY};U9@^-|YZX1>^ zwO(LRKd#G$*b~17v-PtfX}6+_~4tk*9CH*;BK9`$G+~j?w2E_zMcS)vfE7FUO8{ z9SRg!U3nmBW7UtYwZ-_UHxS1*ZW45JSlqVY;EJddL&-#C^iMv45mBEvgQ85mD3=Ov zA-N=DT11Em)8`a2&T(18DR5+K>+(5#(w4Va54tw4g%{w|4Dw=T^+G_~hHGsN4t7iZ z+^nDnf(Tui%P8E`eq0SwS$2ND^Sj7pOOq<4SG4i=@i~{AtT4Q<1YtNH%eBf>i zb-wN8(2N`ATf5DgPg+!hNWV~z4fg4m0L`$PI0@x^p-0)`qBZDsnkn2~&TAi9iw@Xj zmQbXs-?~G!VRm23TNxMW8{|M@0nC{0ilpd<+43Y}EqKwkRjb&^v)s3AwtMvD^$uPN zBu-yghwXsj>eZKf$46{jv!(*n>Q73{lo_U1kb~5vg>n{2Ma%q)yts~uJ>QyvYx{@HgSwTfAry15h&Odi#ut7pVxrW@A?@Ms^>_MbxI z5!z|10Cl|mij{^)+|68ouH%YKGe3e>8(>63dqIN^HTP)EHfSen(4}s z`3rQLcTRT3u}ppcCl9T&W5#(Ub4 zmBl+TB0VY|^F~T7PIt)QGpD+tl6?Jo4Qd-3NB5ENG^XCguQ~dbnlVNU2Z6dODy!D6 z<+k#(rrjS4R9R#j{PDZ@>@DdBJ^IJKjQ=+B3fF(h7m69Z`!udo=u$>)Egs5;50Bf{ zq2KZuH*Kn*bm+xnNI+1YGo>rpizZJ4pf|0K@{}KGB`m~@WJIpbK0KVx?NLFU zTr!uEX5Ps1AIvRu@24;9l9AC9N4Y!3X=(POryNWFsrhE#UEjq^%V=iWF&laBpEB8G z+gG^1)cPk9#*3#^m!}MIU6GSWGvfTpy*{ftVr4xjXU}J?a?6ueiXXf%-;gk8$+HF( zCeqZ*q3I~)wJel=cydPk{o~I*td~;PTg|@b&SfiBU_%iTBPe3|(3%pUYoBfoC%y!+ zy<&!i(dIqPBJ*v2OorT>ymuj;3k@Veb>UOzIIUPyO7HL=%@Ue|M8&jxIW8>qz&5`l z>9j3e!Pqn^QjljGp^51{T!lGptVWJ(jQ+9KeN*3u$$`bcTHbm|Y`b*rS|75Q(d1{B zcD4JkFB%g1R=b)5!iQb>4dv(SXVdZ86I3@#reFAh(99w4S%6m{VnV&!_oJzWjcDem zS`!?0@udGrZ}C-Dd=p^KlONRvW=;>?R zG*f)nd>RrkU@P;tk5^i;8mi(GL%D4~!U#n%F_rOvR+Q$=oAcBJG;amb2bSc4}pR5zMo0%)?%?_s_=$Cv}rOT^ZyEnx2X7r)YTXq1KGM7B2 z3a_0CTVZ{5^@a^2c25}V;bC=n+SIAujR&vG4q5sM;Mf1K!JTd4;mtokj9&l)zogm% z;SmKq_HPTJWZ!+R+}AhGWWA4f`$2gM#l$Q)GSbD|DPhXgJKosPes@~J#2@Zqe3zKb zL*h>`cFKt@s~C0YFg|1cpZiFh)DLihGeAgSq2MUk^x;3)qPo-PEZoJ?wnOiWe`Xdp zFF`Ky*C8X|?WAo9WRO|3l%Y0dlJDPdz+vu1I$<`eERE34!vq)hjxR?bQDer?6U(Wl zwBQs5tQI03h>44Pii9(4_>zfwaMbV_l?$Njo;`nV)$t3&fqFC9VMnK6?!Menr)D;r zHV1Aa!ZXRgS9OIR-pbxxDX(51A6x#a|+j~J1|ZAj^Ju$2EiF|8V0#rh6j>Hv&q&c>>XI*`Ut z%ZKd{1W4CZxhs)h&i0!9=7Xw3f-Pj4t@4Bf2UYEwI*NrRJ0`fhH_uGuNyYxawZ&@l zfp~hg+THyv3lk0>?k$<%3k9Gw!8bP1pFMzqX-Rdk9AW;O_nvAns*(b7Iyr)LF@UDIP<7AdwcrNee@S=b9v!$hn4ZnHXmRh*$%SVZc{?LB zQx(a$dajh{Bux<^CY&-<6N)EaV`xt~rXpi4wKA<89rf}?J2=dVbBUY=#O56wY#(d_ z2DRniCASsYDv_T~m4Z`0icr5?XJuFI>({Ow$*pL*aLBfxgoGC>>f7XyiZPh{#*JQb|Wn9V#?aiGV7~;wne7Nu;9`QmElK15TNK7cHyqwyR+dQ zl8Vq48Q}O2oFR&-HlUIOE@00+%{s~%9o}mdsgk*=DKsixSQc(X3t5zy<2QvmMx*@} z%DKcl4IV#x_RJl(l(2r#An)boTE(9Q%9o(svy=sP52|C{v?e$>*u0!GF5aKOJpJ&zDF?`RxsslxTS^=EO6xDG#<^iJAsmd3 zryeqyXZwyF_V?$2?xYryMbE^<@Xl!|{(+b%9qw&Eabo_F$=vjH+JVP=?7ryYKKbr| zYKpY*XzeLaq)AT+|9rgq*VE~Pr`^EpkdyVyis(`JgJbvty-)9>oS7XX1R*Za+ z-lK;|+@2FnGgaH9;(K@gSW(yoRg|mPPVJd`Hit9&BS_2z_5-{g5&>_zPhxE`e&btv z4pCxXz+9X0vu4d&@9C2(9bw=~7|E$z*P(YOcn*0$TJlF$^eNRgC^K4(z z$8;JO7|2JLE^EP5lY7Ufs#UTdQtus@1{Y*9_|%jNZtl#-=bBTv9ywk?zOldV?#=Ci z0cPf%5*}C1p2XIL2D5!V5bJw$Bnyel+SWGu_{e$l3|CbA_&2QZ^=rNHc@g2T!ZoSr zC2dv|g6bM!g^bwcIJJUi_SM~e8o`87GE2R;Y_Zw>UKl@H4tK~*^i0SGpKzWtrydiV zz%d3+``l&R$=Xyw8FXG|3?F>RQb$8zR{{9}hgZWK-jSo*k&+eU_~gmI>4J551S5xr`>0W4AkOQsl?7YB0Xwp_8 zrmRXx=!LM=uj{$8DkuYjv(HA2nK`qQ0L7RdS*8cQauTD&g(|>Pc1HL zBZ&g-NnmNy^e?BDvcyuMNyO5_BEys6J7p^yYH4}11{*+ExCk!gPte&hs~3Ni?`Pd6 z{J!GmitB}>W4i64-0DylS_xAFrXl-?Le zpnRgUb8BEyL08Y;ys5!zRXj;&R&t1QXP&_fUtE-5o!{ZE)MhlD ze0AaOC{lVwl@yI2j@;I4oS9WqI8iLRw%iFgh3RbA=Myxj%Ga@|85IRcX6fyLuV{8?0jdTLgS}B4dA_&Z`JmIfy zH(sdEiKrcabl&m)EnvjC<37wV`_`~}Ovi+Oq~?u<1q+S8E^dx#T;4HsLK~a)wzYgw z2X+9(A6#l9mA7Pdi^W`X-Xm5v4)NqIE&Nmo8RIFlAitddcCkgqeg2v zoz8=GiF~;1=ccHr4lBwFd&#oJrmgH~WiD$oPy%{*KO~TDF_um?qyR*Auf32xoF@R@ zT|7g$P=sCPJ(d2X#^?LFxvB-YA9=kHKdWQkMKsiSUwF9f^rkcnb7oIQ%bj|Ly{hP` zSuBgc;~?f7sh^O^Sj9W-hcdE(+%F|Py@nzTV7`|buWtUp)ThoT162cX7P7Bhh&(j1aG&83HC*wK1G)rBuUtbVqRd7CZf<3_i? z-qSoyd*a#<{piM-rgKhmpPaFaO3k_YAa42^cIl-BO`TT$=*h{`nK|h8_FB`XO(SGV zR}}L@j+=>e2%UlTT<;!jP6v9**uShxUuFjA4uwomPay%ykbq7Lw3f$$1qsu(Uy@)45-(uKLLIa z$!;t|03-y%NOL?sr4}{y+aSB~jU>K+ig}fiE*gMJhCRfCl*jXgaF-LELk_@)_9DswDWEBhsJaG>8r6VCh^L=l9hj`1C?D@G>jRy_ zXvrrXIj0d&3p<75r2cwyHfL25+NXp_uJG81&oo9fgr^r1(-o%>cXXvf)+HKeWo3D; zUtddDgUpsmxq0*WqmnDRj4Q6+purm2_fH53r{=$^i#{rp#Rp;x4DAA(1OgbOADX`~ z3nu_MjiB@a-lVZCVFy-GN{ppZk1t$R@DyQ6K_E}zcwZxh7p+g)8(9)i_2d{;_0)#B!z{vD zPd}pW-(GA>>XA;>3;}s3bHy5~YRA~x+ow_Kb(@`Ml(hV-2HkT>b|ptR5t7+6r>IpM zHx99bbkMbALSY&4Bo^FGF(ok|0(G%BT7_ZO$JSmjgE(Pz)h~V2#}ZYtC+a*>bD=iT zT7J8DlbZw8K1~8u=ZWA)^5HzZR1T735WW>pmVlVQY|$CthV2s4Xb?fc zhkHZc!YlCni{Ui2g+VObFQE+{IB?)omm#STW4r=2jvP4>MrQcRH}c&3M`zXO-q2_z zFVGN-m-1ukx#lFhQEaNa^&n}O_4)hnhEN|3kLbc zy;dRB9_@w9xGte@{Qpw7@eqLoppk9K50*k$WLd@DbEue;Y&QyYl!q+Ll)8Uc0-&4jpru7%i7u3%+a>73Oh=OFa}= zlek!`kUaPHndTH{J#g!GCF3@a{vEnj`91PTxpY`r@Oo40nNNq z5GWL(PKdM2?pLAdQY!-BV*H0#)RXm+9_zY>@#{3i108eB=vLb2FcE(J|pNzT@ zRrseae*g15*S$W1I?j&*Ka5@{R24nWViH(Y#d_4J3qc)5Nu4BL5n#ldbA4CG$`~_7 zucAxB;p7Xn?B*nVs%RRsr-7`!IVMw=G_rOea|Spxfx|+7%!kDNLRMCNtT4s0WEKB= zCGld7A;X4UWZRSMt;SJ+o_Kv@V`I8U>97bOyFYl|@-^ElpE^b&J`7b6@%8N4Q&Mf> zSO5NHPpWRz?iu~4yg#i>8c2$#i0h-5KvwflNipSWp0GzQUB5nPL&MIYM~>8ixX(_! zK#EJ+a>!x0-%~E-oCvP|5&HQ zxWy)io~0kEE7DV<#9UU8c9?x)~^&mr`Hu-i`t6O)gE6MkSXm=Kh zH(1=qw7tT}#*ucf>cEzSlEKfxQBvHiuSq(%QvckD* z+e1QJ;Bk=}dC|+*{Zf*=->;>2G5UPQ2V{U3Zr-`GW2`im1dT$c><91TQk0qesM|26 zahU<`^EIANP$ihQMjlALR$C!iil^_#iFKej*UM1q369W1^LC?`Vd<@9B{{PcQyVl2 zeRpyHZ#|!o?8qPdp-YYq5$@(`uRj*d$Qfyxo>MlB;pdOX=@0 z)`=Y;v~3nWMN*1n|Ik_{L;kb9JGu)5kyEo~%osQMF+V(+$4vlR_z}D@w-0^yd^hPOd6Y9+Z0LuEGV*iBUk#G3 z>-)6hfo9Hp&r!er@faaf6h453Q!}i#LU!EtmbK4a&en-RiF2P;5#|m(Agcm_gNZ<` zJ9VnfJxsw#VSVrUFHJfA!<{BqR746`NS%n}zap@&y(g#D`VraE=$N+Fer0J zv+|u$2iqCQ2<2ZVp*;KEjfF}I(oBxl;H?|X+psz8d#A%J=Zvmlz365ElKVsNdZC6` zUXtSwyp@S&H7Uqu1Wy*P&Ce-;zAJA5K#iXzPfSgvhVr)fOFZjMqY$KThqs~K^21i+ z7*U~(0Z!NN!*OG>aCeWjTQz|_d4cm?QHV|bppCtCT==Ys;gp(G$Dtg8A8a4#k%Znm zv#=v_dR2};;nS0Rno$_Nn4rDoDv^+8fC}uEZDq^qDkJywJr5r61Qp1DH8rFg*R5N( zNcVHHkjS*riiuRKQ1tpe*MmleN{>|L(QQOhRx zH^9Uz9lmlm5JtMGwMixfd!r@Lv09V;2M(lynUhM}?%q{ujE|Wbwsl>@9wHott#v*x zuNlyZDbP1?qU=W&pC}4u-CipF4Ajas3qQ?W!zvA@z?S0-@fT;bNIW1H90o3rh$&x&eH*kXT;yCLuFhwVfyNy&%ibR-so1!`)!vpwyNqM#y`Xl53s2@Uy#iLDnL^m(LnsbOJTPq>w_H8*2c3e9W9j(fqdRpx* zpgud&^2e%~Y4x{~nC-|ZJ>%y4SWS`bFQU0P*g3UH)agA9VC%59gefiw3#hx)zFe=E zPv(uNE!|xB;a;s8)1NgmHm+>Lbn7Nv*!Pcn-MqT~ySy%13h!=wFWj1S`2G#=JGBie8N?4_4_KlX>;3rkRV(%(&ZXTXfR5 z@jh=c#aE&G_OZCw9R8`G%)^)$c3^;=`6ua+L4{NypHIUb!g=acZw`}Ze09mW$k{h? z9|^MONp$e4?0N3}{CV><_WZ-NX$_)BI;3sqDO0K;DCZV~!9FG84WRZGOoHPzpCf|S zS=6FOz(gW!MH*hsx-{<&WM&$yGCf!nC$zrtai~X(F&{m;p@`NqHdVA-_N95lw49%H zzF%1#8o5sEaBQfJ`tYDo^`@&MHD{#w=-SwHy6CE*xmU+!-RfBLwlB8~o?M4OYdT!> zyp_xOE`P5MwzLVk>Aa-kNk-CD+eHqmRKBk`x}hNC#QbS5A5BGv>BT&ydiUs0@tD{?yPnS)** z&v$qUbfKv=<=C-fo(v0dopvR)N;Nc0CjJ&rLdCg*~+X+k%2SbvO?Oa?(mK7=3bNQbjlh8~*fFe7n4kkBbwjy@-t{ zMCGKEgA;O+tU>_0mHQ^!x2I8p>V9ih3z|4AV~3*|uJ?|$7F&W+Zkzqm7BA-Yrh~3~ zrf@>!@v%)}Hh`wv45;zXKd)SdB=b3gIQ_B@j%+F1I&c-7F>13#&0qQWwlMJ(=gr7g zGW^R+K-q$LB>10qZQ3M}sfO~gsAvF)R%WM}qin>yD0^OZ(cvC&ur#*LnmN-vrscvT z+;Z`FI6c3;^t7HXA`XM>Ns&6UpjPQP^B&at#QCtdd)5c^s*#2$)46^Lbm*LUjFuEWaADjZ$E3xvOI1e2H)ztN!}8v@ zaQPp$!?3y(fS5qt4pWbMA!i5454cY3iXTg}jcJJsWY`$GAEE})vf-u(aN2_RwjaBE z1lT(SkAGqdROk>lZrbz$a3O0{8vC@gfHgLPhHgD$g26*rD~Y+3$>tyS7UUdS%YNQR z{@9ot1f9sZndwIYXf98%+x(R9+#vm{8FAm7RpUHsmLG7Gq-Ibt1mi!u?SIqxZFtF5 z$0a@;@WlJ^UM*}f+fo(R<)D*E$l*05GFa-<*At;L4NA$%7BEol?Pii)-$ak{lgGvz z8HmHi*`}G-?IPPqCf<6W&$l`PnYxl5 zCpzXYv7tTGm`KpnBmsA&4kuE_(wP#9UziZ#f>qbbS#jr1L$*~JO&NQu7SUEBZ_T?w zdC7Y=d9k8)+S@YTZPfHcQiQfRlP}nr-zd4~@To~%M3KU9qPS($&J+||bCdCs4X>h0 z;tC2qB6fvVzZC*r7#TUbj$~}7-R1#Xkb=BkBZG@TgQ7G?4cDM07IW^K6?a3AuaC)Q zy~qiPC+9vu>}bCJhy8#h6ANxnA#2du$^q;TQnu}W1yEi1N#nG7zCUj$^?fbRc24^d z*`Pt_it)nWyFNgzVJB0HwcExm?B{?&v7`-zj!8#VI{&0+-nX6Aqn?8hEQaZDrITw< zD7`4;R12v$zkgU+G3v|*@_NmVm#ppBg0YCT!kX8w-%7vuK;*G?jjXM^MIQcg16(Ve zrekn=85&0j2@bA|%y`Da>=>U6=J+}m6=ri1hoBZ~|HE7&58y)T!-*tKE4JHtTq?^Z z_z!Cb2}cXjp%@_0)QoKXE~@e}Sc<2I__zYBuj` zRsKV?DLKP(!u1%^q)(}BHKA+Z<&RI-wp1-d`DR-VyMDv8t^SY=2#dioP*F}tK`6v5 z%6Ci0f1h7cr+KAT$Z2Ed;E-^27MQcmbf+piLPEm1YgXl)6vG^U(%*sPeQ-h3)8o5b ztywQ!u|-!BxQa(e8Z)bmB(wNVxXhTtqzS zylC9;u9?{IhzFm|1$0&Ob4n|BvMKa89(dO&*%UymyV-w&T^NrNT+q{BO>(KbHxoTU$^c;c{H z6SqX$0!O>~9R0gYR<@A!h*74X?;JE+$Hl6L`+h=_6EybNu`CqMy4v;R*oY{H&Aaht ze`vyxhBR}ho~`$do8n9j#($7~Tn;w07rOf1e!6_!x0Y0en{GEhn z0WXv+s3Xi_JbrNnuxs?kA0n+ZwZiL6^i&|T~ zwHdVg)sP~Cb9lL+>m6Fyk|Qi>genceaY1?GgJq_yS(`u(7>=oq&yA2itd%ZJ#djgd zATXpj)mylet%1E&qr?gBHHt&_8%}IQGC+S%Ggtifqc;1aV3MX3)d-fkyNp%=R5To# zc{H-<8C>)5{O)p5QDqzRYu}*1pswY=xKZ(k$4(LM%rsj?#p{{XpHa|OwYCIDB*iRr z3jxl+0j<_78gC-pd$Y(wElx27ReGRB_D6h8P18D_kiLR9+mvLDQ^q~S;-YJajp>G)lgceR-s z-O!>507gvQNDv6~Lf{J923q3~nn@i)_o-4z1g|2T#6Y(+nR9&)9?Ubh7AFLQd2^Pi z&LXLTmJk&;i}-V;v;U{i?WeXggd$FFAOk0k^<|!)M!8hW+-$RP?E;@-aOBXT9h?3; zztS)1EQd?DOMTJ?{q_^Iv5~e98ql0B@8-)>%*>X6H17C;@I(Kt)8cUpkC9;u2xvN> z`Yo2s=zFomrXm{TLcuc~u=*Vhcp+n9$fH@%b=#F#Jeh>^dhblgu1OOJcvM^KrfGsH zjsDsg){3UDq2`{w8%8Hr*Ps2JyH`J+Q#j1_Ds3ayv9LxxSBH1tR(`OvJrezF8kwa< zp=1YGT4i2cZ1d=@anJSRy9nE??f1PSLwXE(QDBg!bDO$0c+;VK2}!;$_0spxF49Z; z8IeYXzp_^KB;gMLsYNbq)&Y{kp&l3d1@C43N*P5&kRumKw&U#Tmg^pErBgx&^?>bT zzzbjB5NO8R9D@c-u(%NmcotE5A|ZjB-bc~Y^ajWL?RVitW)$g>$FTJzqKxZKJS@jM zMuI1p+4!(6iE)}8pQF+I^RgR7644rwKJ2gykREi$whz3FE!7&TqGqHt_AkGv$j{ov zK)1n4b8Px01^#p6O)#vMpxK5%!+2Nl^Lz`p^dp`y3_;UadFe)7pjLGkfD z$*(1OQL5K^OqnE9o~*=w=k##9T@xHne0l`Iq?&h~F=H~VTyZu%wl`$FUov6OLhYeq zedz5ZFnJ5fBU~(+c=|RmdGz=D)F~zMGzJfLIGx&0Ye-JfF}4_@Q?2wMRs${;k-OfF z$=1&wd+>lX9VtrcDUJItHSVOfJZ&)Cko?N6uDkrUY5=4sw<)R;_H9u3GpjlZG%S8OOirbIkE!0 z`7#tTt&ua3B}dF2RqPQJ4gn~iJY_K`mgnj(B7GO^#9;BqNdnm6RFdXv9{dvi2s!25 zMab#GODV|4{0~!po!c&InwgK1nt-!*6w$-jyEJ)3wFT#!3FHyx>Xm@OLKku^l1L>5 zNJeN*Lz@w-?Cq%zD{DcWjmUr2?YCx$1@*X<)Fy3A+bdQFNE%FjHioifUP+IBa()d? z0oqdxcSJe?Wy0UB8rn)dp)dBe>vG>-T{L0h`c+eVQ(be&<6Us5^kim>3fd}tmzCBw zJ%9Cc#I*%`cJ1oq=CG~F`Hr7Ydz5`_O?@b;y7-e-gG@#$NP{ee7+2^q^o6*tFr;J+ z1_+!#b%6i)Wf80Bss`V%8N6GXKv8HThp5gzP(lCupvXD3@w_-Qfenu_PPGeC*tGMS&c*mhKL10F*n9Z-3HlH=o5>SoZB(wAP%kUQ0|y z{)t?~B(2LY=8ws|Upyx>#XE94Z-Cg;cB!$zFQRWRu4j3s7+P{@hz# zT`Mpkpw0K|4)D+5RA~&B_Y6KVRPI&(vgG_J)3U`bKc&=O)g+*sdlS}Rs@>NKMI z-ZrDQZ}(Z01c#DlS+u{44bfz#J((E}p{Fl};cxO>r_waB!WU)a@7i;mqhnBRSEfJS z9=%I9DcW}V?40%EcE2hG4%psr+21%isAyiD*O$ytb``CyfuxGQ9o|8mDT zkLlH|+*W5Uo)FY?P3#%X4XK)b5lI=FGS?*@uzj^Fs?5Y;&!+MR(Orgo4!+E-tyFr! zwiV}r!i*vAcN(PW+&!*WIx8nScx*1-jP4htWKiq?I-)01tQ6+3O4^^)Z2_bYm5S=r*5zi-R+mO!P$E@pF$baP&kEkWm0f^N+F*-rDH@m)3!bfc+`4yrll=9eD7JUFXcuGJ)rV$JN&2vAp$NH?GSzsh%6C8q#T>#3}Zghj+T=A!BK4 z$i&Fs+78gRsN&VNz2U?Z0j)e?WUNi@7rKzC$A`5W$ZqUt*kg!&dVyKdE}_&>B?Xnc zXD<70y>Xem>Eq;;y$205Evo39Rxu>nSm;_;YvMNLPs~rYdFsn4zb@C?yS`o6JqNOH zT%RoCD(GN{iDuy`WRPMay#CH!+#EIBiV{Bg zpJ~%1K@?%r$(i?UXSO823onJjF1{}FHUx-Qe%Y(9ja~ zov7YK{$E5CR5q@8rwjz$hn+g4`Bp2)eV+jdWG=q1HVQtvDa`*< z3lsg0v+b|$HED?qJ0|W~)Y6Ha{fyK4X=c<YS?N08T!(VA1mP>c8VUc{+rLq$7p!v?plB*?NP zSFKr76<{6EwDm$ub2pokZ}k)^0`o!);MeEEWxRLJBQa}sZd?cU`Xl}R_Klk~c}g4o zqV%AJUO}@kwWO^go0;5gEtGrLK4~pCMbCdIiSX~2-R`q1erU5&gla2nrEFtPfZu#w z@Viwo;mmC+Qh$Tg0BP4_A<(8x zhe(qbbL)&+P~$VvSjJ_$mY0`TRWMHySB>`>vu_=!Lk;7-2=Di91!oK)v%Y9CGK@Oj zH&-U-oPLjo_b|s6-6omt=;%jU!FHmW zZF4cRnNH)gwcEBYpY0~(A=spOOYZ3jkp(b5ixfHPf-PlBBac-xQDHjw(29`RxZ77U1ZUM=~*LMOdHF#{2g>Siymi=?1 zG1|CC(2HoOh`sIMaS88_96e815t46Mb0#Z%T6mF0vlqSLS=dmP`#rP14F+Pn$zHHX z)#~j$R~P76fbOIaJM4d|)vQ^6mI+3s8s#=6p^zLn$J+n61X{}rWW4+ZFc0Z{E;&_ z5EXG9c7yJ>ettMbkRzz0%z<5za{m0P>c3<(dDCZ9f(7bg#}nL58-&LS4`-<`!wTQL z>EPUh)ua>g?>W6SqBd#rH$n*Z+G6rXmVrofM3iUdKY{TB0*bp{c`*+e{K$w|6=ZL! z>uc%l`F=QaSbnt5;f7QwLC0n~JKIciVj+^tTQAtfy^3D23l2&O{-Z2N-4%MaiHm00 zB)SHS7*lR%LAhhm-C-~?{JzUhMcOWg3rf)JP#Pu;(d*pK<`0U(A)}wob z_syus>1!jBa{Y`>+pf?*sKal714buJu(1A;q|Iomc?RJ(Bflkwm2KB=H>0605VJ3B zMDz7|FM5yIs>5?-$5&A#X@=i+#-7E zm-iE} zq9U}L{!YdEUEU+w=Qe!|3~X-CX2yk8IYgI(J=lT55U29B`r&Zcv9-QYF+3p`4~`1m zr7c&Gl*AvM-n>~e9;pg;CaOQ){jslSWxbvyZc>jQx5R~M0IYg3HigXYEJ^7xG0P`O}fj^y~gWay2hQ9`wJ<2)bWNfVZZ+I8SyqI-!70r?F9j5x+1da_Z-5nI9V`q3b zCnt^32KyK6BpF_p3o~55`Jva7?l+Oj6;2wd2p^-RBunK#Xe-J zF<8lq{X5ut8*+Wu57HfCX=&YY$3guKAmNne(x8#TS42j%e5VviUzFuZfAL)A1K`b} zj$&Y7<=r#-1o`HrDTdfD}8qiYo|TK zu1Qr94-LwI@zz+3iRrg0De6t7V&xeh7>9F51 z?geKujIpSjg}&*tF|mdY9W-OSoyYBHdo|$WduNyRq9y_V2fg`=uRgT7w$14+S6&#c z_#G?RxiWgcI2XGV`F)Tj**z?7SBZu~;do5Jvd5 z3Q3N3;ZYW01UJyDNNjW1f@`cENXqg1Wxtg#e*qRASlL`Q5#utuWe?+FiKtK9mB^ZW zXj+qBw{^c*xwwwvk9kp+Q=BY}n{$fSuZw(nX36JvRJ|&i!_v>dkNkFeQVGliCE z#Kl+&nGFHK@NW^#jeq$!5m1Ue&(&kBhg#XqW(>&dGUyok^Y^k>HP0z_X^f&W@4gYH z`=~d6a|G+R+}CFB!Em~lP*~OI1kRK(vV|Q}xu9;?chl%F1>Hnx<%PS%=@!p8eZ^IT z!4X3J;Ua=hlES56cN$a*+PBNvXun7uC}@Lc2X5V3gCTU{6)8v#xs$zblz!CfyVWag zpQyiG%PBMC9QBDY@`B0zF(RY^)9J(wn$2FXsw*&ku5_Bzn%GyGgTMw0UCp%)#1%zFf)}q(mzhVJ1(RJ|s({;u5tOF$9D+_i z<}OE%Z+tilcrvK;GR!J`TC#p3E3tZ);F{egyI>g!n0^|PpbpCq9OHL_Ps(Zd`xWgf z905kgZ{xZ#Y-+4y_{k2@&H+o%8K0)jCkM1weOgIE{5eC%K#^ku}RF+*<0> zQK^&|_W2GaaIS4XDYcqyFboY%J&6K6>)gtU{u7f|)PZJ))o`fUrU?f}G++Ls1^vbYQcedxE}?d z%*Kj&f2H9lX*jQj#=0pg-O9nikN)+CwQkr$j31^{}v7*vmiv5OX+VPbgz>) zWv197l4Nj9rQn&h7I0!~fq}>pu*sKR5O0{uMtu6})xn!5A(%RzoH=I4`YW7~o=BW4 zP2+iTqoYhOQ&-Jpk|Msnt@ZU)!KF2>J)hXzuDGCz0`Tku%A{JzUXw~^YXA1o0?GjG zRWxW=@g#~AWm#|=X)!aWj$WuvQaR|Fz;^jLB8PX zc4FoQ4auT(O5C~4BUfkbFjAJ_d%NJ*$89(GI#qB;L+sr$RIa-!j1cS#<-R0DsHA z&ALuq!jT12y$BaiQP8(-TUGdGYuElo8Q(=e-#Uakbo{BgDgf!DJEK21q;-q*$-BuL zar#F>n-A(`X=!PS!iT%3-msw$Ye3DrDh)~lymoEBlp{J-z2Od{o|`NP5y7h!iZ9M; z>c>6;H3B%pBZz9&-MZV*LqJYhkB&K4#e~RLnmxa)31zy)itj~(h1vw*m5N}{E;oUl z&X4I^bt?VzDHWEHh&`rEnIaQR@Ty6z`1!?T*xvrlA(*t5K$E|Q*U#O3O39vU`A{4iA&_zxFmUaf&FK zhgCxvvT*N{+)Z+6d&y6zaUZv83*Y< z6SW7<_0qSZhv(c^XX!Q;y$A_Jz-NFDu&{w360oLZLXLoXl2bPx%&u4q_7b$T4hW*I zzS|dyjmasVD$>bzK_xPOei1fUf`!0-ZB4B=ahm$U0Ajy0Kg0^SK)9EDo^EX|Z$F@% zweY2D{b0>aaC6pCm}yi63$?a+2XX5Yi!vef0gBorx}DG0~UI=mnpVXYx%l>Q0w3l!qV3&e^^~k{4GJ`O|jS^hxG$@ zWeM4*5}qQD(;%n^`uCT0w;iv5!FZ%?-DQlCPEWt@#}AD8Zh|}wIv^=spFU)6r}L^V z5e&Dq_@;Q>bHr6q1Q?`2VU=*7shu~g4O6ybr&;Nu*h>8aQC|Os*)mSWQ;CTk3-6xu zSl0Uf$ZS4J1RCq6eI9)qtQ&?IA!P945t_Y?jq~C3)KJ7(9F$_T_BK3AaA34Z_<_MC z;`bAqpWB8Ytx~u>;XB@J_$I9)VcO~Fm|M1CEp-)|X^$=zRHj}SkQ2Ex-_eAXwI|ZU z=`IVfH6|y@Oxi2H^w4^MyP$!_YLtr#2*QEhqGfalgvR#v-Cgf9|9+?oIjot{Txq-L z#f}_u;}%8@$DoD6P}+8SCF#)OrAu9Sn!49(_E}!`(d9(eBk+L{BSwVZcVxOV0dd?* zyh?I)lfGJKkR?pQCi?9+lJI;2mHo10Um9y4$95gUSqT=WtADi{j0nR(KRSAA#0j%IkuHLrK?&2}K(~-q#z~_*&e-1j) zCFxpOm>y;-z#i$6jgJW$x-TNaTV$>TP2F}6Y4p`5U$xpaX2DF4ad2pOpfF52#c!f% zMPL;DE1)JliADvtLI+gc#XuR;`(GwY*Oxd%sFtC`Zw=6atbxO}VcO-R@5)=J#b4(^ zd*`d^Zn->#w5bW-!m4mSsSG_VQ$1Dd0FHX`Mu8*Oq30t^gvJ(^iPMh}zD-&pmn#5(NM?C%dG`<4u}2 zZA(KJczf#4ks85k$x(vGexD|!8HUwifkaevc)8-+mk(kQQjEHNx+ zc7O}wP=dLh`&C-9x_sSsag6T~AOl9SnfE3~Iv1l3J0-V3FKV6JNN;h=VgRhR`ubMW zAG4^RCRhCEih#^6x0lV5r)9q_f!Uv=bUYuK7>B0ht4qPPYuBW=I~ig9oRZz`E0g@G zJ9^C#j8f73eiXR-bNG+}Z24)3LE5mPyqEhEEgUSn=<>t%eL8+E9Hd^!D-cJh$3|hL z!>IL<#{6vfC^@3yetc{f17wFRp?FSW=02!R(QgT`+%$^{W zKmZV?ux!jcQHvx%8;@WW6vvdMHJwZ6u=nWEPGa|xw`4R;Q`59F|NQY|rEf>$#)P<< zH2+Oud~q{{S&8*$E$ZNi>N-|80Y8}$)(Y5+uQnJN3x`)2!b~*E3&_q$s52P6CwWqQ z=?}EilP?G8is=6M@#ErTY1rW38NT40vf8k65XlT*(Li|433BJfHuhP19f6F=jVwou z7{pIZF>C_P2CK5FbTxs+<`l2WV|_}85j;`tL=8PCp7!>|1>u|F!83ti4GTrcLvlL7 z7KGo^dRKRti7M>J;J}!nwtJodcGYM%a?eIE+I-sfV&4JHV$1gJHBXQCXNl~i5h25& zcJH1LJoHrHdGR8}6S%yd#@Kn0H9sg>^zq6h$W6ByP^*7Bpxt@G(55BcE1) zL5hvML}n;Xk^yH{Z=%SQ8I}wPk`V?PG7m;5S0tn`7|oeyhkeMw9{0CLn^1p74Qv%{ zE8CyqwGP~`N~IxejMfP5$+4vMiG%ByT{ZBj+bwjeSJ<$);r475Z%(GkSVefZ|d6HlJfq3=d)C^5&B zpXFJK^BNNwggHVC5=)M6rG*;^+77Pj!s7k?3J);1uy7^^5Ea}nA_x&$bZ~Sktde(D z3Z$dmFhG($BRnywtbFP=_eGhTlwEBuo&SFLJvIW5GaKMTM)!;ATdT(>`|arlc!Kag ziYTxq4%?hd-Xjtpil~9m&sc3w)~ZWBQVm|G`QX9(pxhZT3gPzCq|ZjT02|KM@^2}B zUitd4mF$KtoUnxis>mN%ANNl>&#Pq!)Pq`srDj^Eh+J3}82Fx|kmII93RYoPu5SyV z`-y2Vb$E2pm8zMZzvuy_aTVwI#Vv#}8?9K|p>J!xekh}|!>qOwb=3Z|N0*>j`Lb^- z#R||P!BN&nwlP4A7!_j}4lOZ2&rEJ@_@}9pCV8^^!s&x7W-QUAAqwgO{Z`Moj7Sc8zQ*oqKaEMOn!Dllu+^op;WosSGKDR3XIFs0-Yb1SS{x z@FQ(0-A&I=xyVsv!4ahEzmfYHtsU%Z2v+P&R-K%>IxOT0m;i8q?b-p;BDERI`D;di zJR$pGPdA;!;zq~c%d?`Q??;c8>grKzo>hWIa^W`DKGQdSmv_1WtQ<8A_&bd~7!~zL zmnZXLO`3`eak%ykHaxlGv6Cl7AV@fJc60OPi=>`EKj7_0u>Cp=6q(F+mU?(|U|>~6 zRP3T*AXOgPOa>y@Tj*;|6%yChTL2fG8p}gzvos-z_M&1PGqKG9P(Wq*x=?odFsu%flTv;AX?GAw$p*^YC^q*O--Pw!j>8ZaE zY0;NpwBx;6P(?U3QXO!+YhxACkol*4(7&VEL^3Y+*@+wdt6G2Ta|kBKK3K0c)zkuw zjJZm0IQ_Uy>1$4r77S$picZJP5DM32?zgq^39n`AZF0OaI<#5Zl=_EEu1ZRHZF`IC zDY?LEmW^xi3>tywi@u6KsNBNxubgY-zcT8EjLIM1tIDWC4D=n9Vib7HfQzQQs z?rP@ygJ69Kblay-AD93x=+Cv9H<0&NKU($!T3{A5HA= zx8@Py@=*EP;%C8h#Flmoe)Z3b>&i&UV^?*qN1y*sWg!zEY!)O*cI&l;h4brWGls9y zjjAusHCu{64XxLIV@<_V{>`b!56Kj>hE&%|iDC3wC<~HCNWY9piz*5a+!5SPpkmS# zk(+}bYYetBFP;tdoDSkC@rHiQ6O3BdjPLT7LP)B?u?DSXEgXW2Tj=52EKHp!1rsW?<%>4(Hp{_S!Cz;mz7ib(m4x@O-e1E_eFA0ivqHu-$N;- zO^3NyfSzn7Zr1R2f#-<;lu|hVq!SvL)FLIaUatoR#zA+jF`b&=<7E2T1+u!LntCb5g7YdC#!P9Ed4j)zyTM69K@mq z0(H#%A70=-Kw1rROZnz39fpO4;jIW>B>fb)d7@W))MR)_k5xF2i4!5GN(28EwEeaak9Su*#c)1xj2;k;_Jwv zFg9)kw~OFGx3oW_E!x?5RX&Xm2e7Lnj;K)GNU@5SAo6`*nr>paBHlH(TWhH2)I@(O z{&UDPIW7c<7b8=0B0%jux1Y)_suND@b8pv$^()`Ec=l<`J62#2ORG_GHW>yQQFr~% zk$CfpxGOHM2laZ}cI|3FFt%OGd0@YBe3!MX8t|E#a1kKz3MH%Zk1F5Nym*8q1I3Am z(%vT6MBMu(iY%P&*a6@7RI0WjeXVd&r5h%&9{5+(h1PANN7MQYi;1a6mLyA+BvA_F z$dO>~lPGx@`$r2uj1aA1Ax8Z@FIqMHJq-6Uw=Trq^-w5^^GUGQ6;Hr`NfaKHG*0_# zl6cTOR2cDWF?a5T8?bb9On@i>RD=<3pp7HrBcSj^Zk zSW!Ij)j>K`Nof9cNjBGtnMNo&AQ|gNEiz`N2OlF0R)QSGsKe@-eySiQRonOL})p!wni%hU*6LLx_A{YoXZtO9pT7%6%GQpD0)G+w{9k0cnxTg8;k zJr~KGoPNzwfcI<2i1)luq6 zG{p?fu_d`XmEm8E&TfptnL$0B2JGiv*;&y2{`IwkXI|RenoM6sK~W@*!IgbRH~RG# z7?4v#L1=LvPH(Hg%l~^_-vX|@kkABk5NoUPomz*|N*7uy=Cw@1ZetU~r&PXr4#p_} z`%al7OEKHFOP8Y;R|x5O_wMG>u>hoSoj%=u-hO`C&bu;{$b|*J17*O0QoJKU+r_X) zoVF`BqQ-hAEK_(&`K-uSrcmx&iHZLD$edf$=`!pS4s{z`Q{qNY2gzOLFsG$71zP{$ zaJ@f^%D zg7<5URwDU;DGK)Sy<@Wt`7tM2|Hz5P) zoK}3T4$#~12)Kj@w!zf|mWLoR^uOB}8A&u~!(EWQ%4kE7nxXIRLTq9C%U)$mlH*1` zuiOkH z{mb~xf#KGH*C!L{>89zHVxiZB0$i0OnP7uU?7@Jxa&D{I_P1yvk|x?0iUNQH6W8j} zl;a6x#FejTAG!t3PgA`WPlQQ=%K*Zxy=X&NvOsL54!u)+gMZCrU%YJD(Eo`*Ik=j5 z^QfMooa$WqZ25nzOn_(~ps4?rM)m_SIa|GEjVup{LqS2|41?YvBu+ekXbgF>$f{K) zfW15H4X@3#QIUthaZ(XG=ko~1xUWTb)a8ul+H$0bl9WwBhOiENg~eI7ay4t|GWJ$< z50$mP_`cJFEF0+lJMgq=Q=N{xJaL)CPWI$tk~#-ZEkR^3rU?aF)Ufz{uZt`0C4T<6 z=lb`GXEE>8#;n_%n`S%It-k%ZMm8=(-c3Jf=^LPJarO31n*(RFvTt7Rwfbc0ZO7Gr z>uH!-*}Zc+bT+7=uEz29TQ7YozYx?%U@I!z23~%jDl?Pm|SBAz0EB#H*#~wLMO?FVV8rRaRlfk&|Q>fcR}%T@TJ(dFLWI(&uBuY@x15jblyZA+;bC>ck$KkH{?349bsEM zFDyL5i81udmxjr#zENI(RbG&ib+%~%HJX;Tb~HQYO+v!sp7iE&I$xbCpB&T;v=@id%_Mp~nL%Ch{^5hymU?Zl%7MLdlZruS2nwoPnptQ$ z=gc)!Ob6^nril|Bl-zAPLwdU1&cb7(nATj=KM$kZ9dfo}WcuTrJW7J8KR=aVfE|sO zhBlUO6Bj?NPPx_O&ll3ofncy-2IjFjeEt0?gzr?H%mII!?Bgq(DcXT8owZb6~t{UyJ8<*Ec<{2Gk|9fB+ z_6-EQr|IF3es`5K2L0WV` z$xezE#i+3x}Wq0iu~7&fIUY@QJ#1<;qZs zp#6qMyq|TZ2V|LVu(^t9d~VN$02=|74D;Iblqq^6_@ofdU>rb>!Xp?G056zx;1Fkj z<6U)?kI6u8qlmj%un#A>F~R08rU6~ZoaH-LzS0U{>DwR@M$)+X!-@o_B}+|+;mG3l0m{rU*D)n>Hk82N;TZu_~x*$DKBbB_l zmGNXjw@ga09x!F7AVYGY8y{hZMf1(&f%LsfP2CfTS7)bldBsa+gUq8Et<$o9-jfBD zm#df-VcX#^9bNVw{@qsGsIT81=H(Sx|Q3

rwUbI3$WCGkar|)# z-(bj@r6tiK6YWgD6V;SKeDcRswXvt>hH;ChOfs&#Y9(|r>DO4uK-u__hS80GRnWax zFU*s+(I=8PEBp9Q>^SuS5eVUJ8z*EO<1fVBw_bAxhaaMIrD^C4-}_@S~J7FU7lXp8{>y@5jrxSucUL&_LP3p#=*U^86n z>*2BSR#bN#?4#|5JbLu#EyLZ|{*hdwb^{G1 z^BAGgm7`l06FvJDiyt`QrqPp3#t#d2lJ6Q3zk;9Q`-pr-;t5nAJU?#&taEA8h9_8fNb0+Co`;e{5nDOc%7jE1bNijBZdB1Y^mX-h1 z`~QA1a>%_+7tC4aBi{Mqy8``Jy0 z=ylFpv}h{?vwAhFrc4GEcHl=*5aE-5kgRhew4Qp+$L$+bhaEg#3`clY8+!AsN7ckD(J zI7RzkI)u19!E81sWg&eD;4T5(wBJ-WSyq8}qlud_7|V|RbzPoq z#5T$e=zr|-DeI4pKOGQr1O0v1v&8|A7C)=Qu{x3p^1uDDmpFrP;KiFaQEW~psI%R) zT!F{eMje|@n#-C-vE7CE(CXaC82eCJqO!G67@gn+ePS-YMceo3u#q$Gn}6RI7m*7#EwjM zRNRGX??{BLO-#Jj)z>~koz|=(c&pCJ9-thhj(36kvKTeWBl`@&C7Ggu6b+Pf4L&G7 zvS;)XZdgz7EkE7$Zjruosu+hHIy6EJ&5RpFPf0X*oYfDNXEv65-afl90buYR20qS( z^*&f;zp&iQBOk_qZe2JJ> z_I=KPztDqoIXI1Dv$qe<6!HpilZh=KNk!OPBWJTuZOA@QyZ=6={3nzUY=8Di2IoPE zL1=wYr5MKl9{BQSK9Q{cZ?CmwZ)WmvK1x!?n}Hk*rj5*Q&YX8urXdGUqmCifO6FjL z-s;mf-2K(f*1fR80RNMK-LAQ!WK}lhS9w_$U~|!nWaaN&-G>nV)d;jYO7hx*Eb-#& zvL8}>r}Lj4(d+m>I}quxWG7x;3L)Ep7Ov`g)yn=t@+r<_19A^JmKe`DY}nQRqu{%; zb1rc?2bP8>C>7q{h|R>w;gpdQ@FYN0{*Zib(wE&6qX&dq&(i*R|NbbofGw-Hr(yY* z@KXOnG?$GfqDBlRY7~O2YT-bLyy9cAPT56S-r%W(P*BNP0-lfp-O{H25&kd-H*tt< z>aIz_ri~E!N1;2|dy`}}il2!J}j0%JWft6O$WzHQNU*wTth!o0BHgVVq_R(CbD z^l|nZEt?u4eI^qFTykG!WrcI0C|0dw7ZE8vP|>#N>0%ieRZUTSRVA0bo7W4JB1bqv zF_|z^1BT?kJO6$La9uPSt-h@*euz{#5}hXUDrcw#8y;M&CuAFm+z5C*c^>@Lx)uZy zc|Es+%eA6dV#MoO@Gp8;^Z$DS=R7@s8j5jZKraC72x{3Mb4t)79>LxtoZV1uJk^O+ z#jd>a8u4z60ZWcSrZ~?a?CckjHFea_tXwriOfFFYNOl0IGvIbQLY8V(t7dQR^UTsu z->xDr$6o~ftybg42dQwQU|0|(|64&77UqQfIeFQe7l+9X?obDQT|Z5VK-x))o$?6q zZ7TiT0x@iXe45Z*xKrnre?6iLC1Va`c^^4oozdmRcYxv3vkQi!TDr{x1s4i|vrHfJ za#DpBU_~)fh-F!RV?Se&NiA;wGj3RF%DF-D#%7&C2#|cM5=KJ^Epnd>|erCre^Uk?ttR^+q#QKPgcGu)L zpVJ`-?;f8voSZd`f}HdoyoQ7}0z@)rUYo6NAxcDb-lxy$%Efi4dNu}?Lo9})*%-2c z5DV;ngiI(BVOu27)z4z`!x`AX6LhVXMmQP@suZJ-O9Rx^YS5spjw(-VOw0k4!>EuY z^pl2Lj2N-}{|NmjFd*IBs^7=s_Yd1g?ZIwUXYSnhtFP8je7k8nm)#GqJkHB}o(t_d z4(jQyuHm$;jAf580L}YnkSjVse4mXoB}g4A^ruzkp8D(LNk1Nt6cvIF2_T_a_Le5* z7*It~q|^C55hC(rSI5|*+w*wtek2Mw@kRm98KsP8bG1N;C*3&IBwD&mzA5?{*(}5k zG?_7xG>Y{+DH$zok)R5FDx!#2y0qeQt0SoT4BEBJSmlYXmXs&wPo zOX}v*mnvO^|3p|9@)a_j>?*}TIF@oe*J&O^vD188#WQl)@h`MTF`Intn(D;a{rmQb zCNr0MWdS~McpHfd$B*-Mqlk&;CPRB;3btYG)0P_YTjMOuUy%)l{@ z>VVi6#8H#~_VHMd*Plm?90P@hcrv5p>E5@gUuV$kYPcV-tF_*N=itZpOGaRa3}ZgKIyje=SPW; ztSXHPS|fyHSYC!VXk6$8L8;?-2w4~T?RHW++sv7j6pLydGIVv?noQbJY)0IeolNiHkcTBO`yV=eFV`OJN&5 z?p_4#iC7W`lD}(SU#569+FTKBRFg0!CrDR+%_sLf32f$yVA(cA@^@Z8OlU%9fWlFN zkt+pK;oGQboagu8homB>k14gHY*0P~kjXo61RC7yg-B8k&K&KMg7u)(n-ZmLi(+~L;DE*G(L3Fb>gOYa zkAfLl@JXODUdb0o^?T8t3GL$AF)J?UN<1S|b6@q3jMiF+_Jk4~LKEPiTJkYQ{E3%iwLNQ!&#W&# zv$gbp{N`xq_lc-7M$OjJx4n%{D^kq;_<#NV@`G{OUyo~l)pBPDro28(ht|r<$%$aw zG`YSE=>bPMh2ail^R~yQn%d!HO<9F{!zW_w(IcFwasp7Q@JKrEoTymt#8^I=)1C!w z#U@}|{N^5P+~ZDab~#XCGDr$1=;iy=%e|;!DL%&{%N@%qM%6ojZPv1NLoP+7MJ98E z160#x3jzeK>~<#w4FRvHmJDa__8QSh|igRRR8N&yPTL0`+$<1xQ+;omfRphL!u!<@huef+pph=t#D zyfwf(=?_V!4;Lv%gk|7rPWG(KOkHXdo>wxB#q4_da~x-<z^3FJHXS;rmL>RQifB<7Yj2p)yI4#Am+Sh@i|gcEl3z-vDcnHokVBEIjJS7u3EMp^Be7B zONXs5e`Adi%a)zH%BTd&Pn!woJp^AHD09zB5pRoA&~1Vngtpv$AhT{f`?2%wx|aBR zFhcuAW5;ghFp72nHNYnsm)4U96kopoYu~<_1)J+)BC%lA+;>xr-#Hn-n^yY2|JQE} zvOUW#BOsGmk2{#~7{#K=KD{-44}1V}ycwy%8_B0TW3vUwe=Q}&ijt_hUUje3S5&kl zK{i}H074M?%WDp-`o(S1$rd3nQqxO*EC$4GvCUrHbH~rev4b|7gA@f!jxtw6ne?RF zY?Qyz{=L7I$Di`s;$=Q-)?p~w!=$CJW}apvfv3LqU){v8no<<&?^$S0L39@$M=%f; z(XK+`T3X!7mB7VO@!tFVm7l?C(WGGY!26NpIeNhAx#Ymg_fm@^%uHL9a=l0I#HOmLs8{V2RCnayV3O zB2-Iy_AK_y3(JaP!x_Nih;RZBr1}O1Bh;DM*WE6{S_{V7b?B6-*ge-tQr zA!$?h{*6j+ipklp{v}1j<=ceL*i z@>CKReVVynY#8+O$f!>j{eEDLU{==dM@2X-;}Kx)r|T|bNunKDS5UvlCvKD@2O<<#nh@EjXg#>B;z~%SSdQXX0N{hd2MZvdJ?PCI|%)C%_Y_2+uI`0SyRsW`L^aH9}7<5PDh=&HL_*7 z381L*0~#Z!T_>+BUf8;?n~6G;tOEUj&GB`E4e!Jp`>p&6Eg}iRxfMHQHVLDggm~!J zEe&m%i}6rk1zu&k!P!>_WU+NByLW?;S?8|IpkUDgt-?&FkSh1X>}z=fTzvY zzxDwcjstL==5)oY$5l=M1N+11jNa;+qp%-~YIq@^J9xsS&fMdpw|^^71T4<7&1T5m z5o!+9U?y8}&%ouUU>8fDtk=gkAk!`IdX~XP=J0^GrAwY1q3j}58<6+0!T$NaxELw} z(jd9w!G??l@db}wNV~JSnhKZIB45mT9ETC{%pLWsxoA5yTedW4*>dmd(F5-90W0+yzI4}xiwK4sc;$q16j_^2W~t$)qPr%U6+|fl8o-58Mc}%c&{D-ocb~& zn&hpkdH;DY*8a#py0ZDcvWO7y85jP0Gml>QP5+vVpYuRjTzbcD(l9{|YQxE{YmUnF zHo5X7)dHJ}bD>}}0kae34|2iiQ>S+73eWuU^JiNId6uF9mTd%20=rb}a&rWg6z=Mf zECI`LwCnd|Ow^sn=LCdr-Fmj0K7Y>DroN_i`VJdQ&wR|ld|w8sF1T~SOYw^S8{SE} ztI^`>y3%{dr`xT16;c5verNzlprN6WO4)Qb%&NV)HTP6@mT<-liLc&~T-0Rgc8IO_ zUCs)Z%2{7$B!8#`s0J{^koGvWZW7cv+p|YsNk;kk*9(}%iP8LsQoKVDQ>kG4YAFGI zsS<8{c4%vkh{+zT_E~X)j*tB0OJXXaJgv$Y~yi7%06Gh_ooAkUvSZ@ca!rVa2UtFbOpjCn=Rpqjvd{E%+a z-Mc5_OV%`#ublK%R5VQ3kyn7NM!kCX;7+=gE_Bklx}&QqhN)yscCer3sf6DXR7o8V zZ2I}x?Q{UH;i3GwU zaT`R-z&884<=mJ+WF>L{b2G{g;L`6&ST>_$Vk7y9t~pSku=}im0A0*D{7Zw42#r{( z{w{6)is{(pRmI<0fa*4t;`uX!ymvh!Ymjux0Nh5spY_I#n#2 zHiA=gSYlm94Q^m0ZmU(hqPH<@8@l+_J6GuP8PP3 zHN)O6RzPFrUqyAkN!N)k{XH??sXMZ=q6!Dgn%aNzu_+g)IgY8w)B+mNWI9$bc>q9N zb4eL$)w1GFxs%2GL~P36(eex3M_|S_YAJOh>1dSg;5!t9QX9$xJ##qzJ0uF)xyycD zw0AJOA9d9Zq=GmSb}{e2o5%`YRPp{PnhU>OBYVWtKm2_+61s8nX)rKO>- z;sf|+fO{dp%*aF#zL-_!iW0Jl^|JMHtTgR}L5^%!h-r?P^3*wB2|7iO2K3oZq2S+t54~`=F|M$VA zW)YD^#d18{W14$Wx_|qU+CLxI)T!kU40G-OePH>yhyJez)_)?AP)${T zP}D~Iswbtc+57JM(0p9G|C7Wq^upQ-ar8kvZs8To zOCK6;MnG8q`_Ozf0bgjI4%RfU^ow@Wq zbh-}tE;IzkkLKLA0ox{F-@&Q5%*Bz_2(vW)#>OdC$@DS78F=D7ILDQJ)lHnDPn^JT z|0KtZhyoA3l(j+ps|m>Et4p=gVqev*FY$IkZSi)C*=kE;QX$tL&ZP+lN6>h@O;l1=Qp<=@<)g8(}&ejTcbvwbE~< zByXcPYaExV)+PP7APW4_w_$01`rD0NflwmmsRl=5f~aea@>am+kW3#V1RcUxWe|Oc zwj{EA2|>V6`3Moe3=oe%)zM+Xgr@hlel&xvI1H|l%{K?dQG=r48XL|I6zqfme{WN_ zSkCS9Q~%D7On6pb*(MSJSH@5D$7PVKJ|z4u@E_0wdOk}c-rbm`3&2H2iMht{nVJNb zbs#b)K^jGGb&lyrKuoq#<9o@olJP2>@3lRiQs!FIyxV*Ka#W{e58komNM##;@(h}= z6z889s8^$S7BqTxDG__QJvRY38A+T@r77gZw8VN+pdsoQmMHmB!w zv_3RcM6uXIGPgN|XD6kj=n+@FS~zcB($@}otd(p=D{fs;%zYKLz@#|ONldkhprmqm zKGItTjD)^~R7^V4_R3-IhtS5lod6a9k=kCwzy#b;wSoI<$pL&bRXJvW%Gn?^!)l7S;^F;{fl zS7Ot>&te@+s`brtrEox~fS}4P)`6IzkXNJ^FZ2OJ~MU-l!? zY*EHo5{YRx*}TO&{U<+TxmwHx}l!lPz`2f2o5%S z^p+GWix}?yOzI(ZFoqqZXggD^w#bIE6nhiXHUAyKE_pJEeP7sv-a#tKNnSm89z=CWt5#M2G~M`G*{4`20RU`8#oi zMbcymLBH(fnOpMXeaQ*hcFe6AZGt!`m?NddK8-l%IS4h&Uf%@6!73M#hX``aY^a;x|?i~(C-7CPa)ZDH`qKtG&2^xH>-BW{!pYF%~xXb)G zglOZ>xrYmN#(z&n-%sH!(lj}+08Dlt5!zu?pB`F%FCNraOlcc)q{-f%Tn$1`b?{|T zJIFb(pPB3k0==Xqy66po(Ibd&v=!_zD*7;F_ulmLy>JMzn|mRDCsHVj22IbpR%ijz zT5J{W?Q@d>=F3nXU~Zucs9^%kxhyCA(C0@pkGiG=nMT-#tAm%%y}q~CO#4;$87X{p zA=1vux5~?TCH_*;mslA~vqt@V>rYTq6^L9QA4`fz91GOJ;8KuBVO*I+<}xmO+J&Nz z*K_V1+R7Fa4_L&P&$7+b!SpAD0YC}dFC-t*-h=J4KBb#Bm$Llj>VSCCx}a56YCwuk zqb|ND2n7^V)0`HRzoNs&9`EbE87=O*2+Z)be=X^NKQd5$k3mc?kW4#8Ol(dNVdBigjZ# zPH#Q6;*b#RrABSjUrYfCa~(@&^mqL0x6PQ?7hay(QCHW6#i!OCGv-#u1LZ;uaLG@n z5?VQ_SLM`9ceZt zjGVdSV^^G|F+;z0?M;Ix9mIujzPo#ozqDcOd!O;(@Zrv)BlO-Qy#|D>?)y=NO`v=! z`E(BP#-1(EA)m2&Z8GDCqgJA);zgzl(9*{B4QICV9M6(3LQD)u-Fwn!+U7(_j8Jm# zx`|CriQ5VgE)fIXS!!l>t?|)f8s-t%4uQ6zHd^*#Y5C?=3I?KAxC=#+FWN-Lxp0;n z_HWc?bwx#NQj5_Ar4B$P_lwf812VL*FtOO{Ro5F3@q z;e3Z2s?GAZpo}y4BX#iNXG_=bol=qNs}d_j2#RkbfRQ_N1omRk$(>6!?EM{N%|z5P zxlcgwcS7Z;M5(m9*3`H{%GYc9wQVcrs=9TiPEQ4-W;pP z-t0m(Kd1lPJE4EKpY-+dJQO=yPtHL~Y4k0#vAH!1L;GyAa>WH$+1}2ZE9l(kDdhoJ zlzk%3z;f^kCfckvk)5#T^mrTxr&0R__)7l6QxKsK`VTAeMgSI}J@0Zx0178(Z>T2P zFv6ieZJJn&S#b@(t8M85J~=m?9hko?xQ}7eP^e~rd>85mIYdM(LPeSk>;`Ww`<;sG z2%kMLr3itu1SVaPf&sA=`QI;sDwp0IM14q=djW+ru^U!VK*YIh5x^>~bL zxQd!~28@avTnqDFm?Gu84rR&XA4@8F7fup#bYNZ)-{jF>zi$P-521SmTcMexPqy@M z*s?M2_$QIfj?1k7>OR^8vsWu8Qq_OE8Fl!u#=;*Zo^8jfLG;>udqgJlntUnr6NkS@ z{=9dN(qH;LZ#W({!dt#Ya>dXy0p9ufG&CQQ?aB(#TC+maH2=Fnw{Bz5`Q4@LmK>O; zkc)y?f~^4kzaLmSj6`KDxD{o8@wf%QJ39+e&b9vOS&&f!a;_7)9GFpS0v9aX1m6Db zs`YfhznIcvln^NBoy@IadB}3|ujO(X>jhY+aPKg3*N5y?V=IPRjqra6H{}}F?-hOkh^X+syICT8pB)Zq!4wsvKUw-?> z^24884R=ldt=WWYJrXWIs6I+J+MrQm>+J)z0?k5375~V7{B7ZpBUuw|$L5_$pEKw5 zr*k)R7f!yFdv5fheV>*MbPStxzBj@94+vc`iwY`4b|E7^XlrZ0xLI_^IFE=)0cG|O zZtL|fUjVQeg)7D!RQ^{W5T`gf$=HLcJf_urt<0gPlf`Eml_Q(38A7yeJ9n-l{vG6I zi+_w@CHMXc(E$LuOtK@6ryNVX;!{%_u|ozUo-l-!p!fB{;v%s*<{-n0eJxwtgc;fZ zrbFdNIPgy%#-^EJb$bMzlwryU< zKMJZ42nVr7Hx`TM1}3d|+ephYe|H+voG^~v;0HdU0@~H{8xpZ0!-wa&tsQeuFMJ8 znj^#@@2^Sjn~m{TgiYQ=V)Rmm8ZC>7j`0NNez9KLvw=EEbL{6&G=Ht9110|Q<*HQn zmtght-QO{C_0H75PK`o8F40Dc@C)*&wx>5U;hfrAOycMP-1G%;uyn`s4h7uF zV{q`zTC_+WcoQrbPd(cg`QXfbQ69{G$={&(s;p4RXV95=`WX;H9&pm1OP;b2A*HrG z{s&+^}*=>6i3H&>y1_#$kTCN8~ZL6 zy`v6JYq&tF=G`iyoDzd%L`&kn{RjES5)bGv8J`vAi2(x4s@NUSYTX>O)zJ@Q0!v;| z#)T4Reh2Itb53^xk5|5^gzP_I3y`VvL47yvvmLW#ctt*1<>6wh{m@S)$ejXnR6 zVcPr|HP~gIpcN^Xc^*~TG?LOY^zCf)xZ?j7M6(HHu$R2$+PADl8Gri3h>gDPOM1F~A4`sBiUO#u7jz0DzP1G#gh(!RhoO!NBcD(b6JX zOfl5Z;iNhA;bDw#LHN{fXKYsedq<~&(BP&S54PZ4s{;y6H+Od#j+FWAbLnh#>wh?v z#M}@c+bZz$Y|ZK<4`L)i_29jX$2pv4*Xr#(`VbTnI3T^();V|XoD9XH|6vUw7rp%y z;O=98{dEbU&N0rw9$Tfp>AU1ZTMFwbIPp?>Orb9fhl(pK*W>6%#FU!m3dM^&>3QVQ zSoPj!Cc9Cz)Ow8iWbC;2tPuGKd2Gy&5yMKJoOq4^a?AsZhfJtK4zAV6aKN(9HA$N> z7`^uEipn7}kf8vOc^84NwW)%PVH3(4iW$;#(t}5Z&g@ zyU4*U?~G4DyS8l=@uIg+UW(g#EMcNi(kP?pR0BcL(VyN-y4JxSTO9Qh``%9RY^f1? zp~+Wo-KtL(KZuO<7$pMtM7l;F(5b|Ax7TgMa?qy*U$>9zpn1*k0A zYl;;Idu|_tWl<@vVaPKRnZ&h;NAps8RKGPBBQPF+lV=koQoG*iAkT^8{E`G^w2Zdl z*40=&;%6v+&S%X#>dTCbizD#d+TAlqdj~%RuzwpoS}U;r!%BCo)kifu9mssa zb?Q~Oxc&(vnRd;0F+CC2MP$c>q_S7U08+$C2%6mAKWi=aeLUlacoG7@qONPHFpqpa zU5R40g}+Jy1rDa88-XEpFqK3Ab5Y!~$?j?;g5xgSZ8XAo<&ObmVLW5t(FB&W8?^fX z=3Wp)#vPwp4f&s<+2Or*U$?fRlv?B{w~F9}DernVt5~U##F#RkRcyjL?`lAs4>>Nb zmk|U0p^6Zn`gZNsFl8F$!deQe=M)PvGmS34wgVh#bh^w%dRL9f5#v=(rrRWA%wu^; z7}~&K8aB0e)kZuJJRbH0zgkb^Y1(9noMP+{al;(2qCluKv1t(HZ+3R}b8g}TOy3b; zP1JGdZ#Q|QOm-`K*>2=uazdtriB41dWq&6puU@~t2Fc1+5*3%egXJzqxB&WNuUM16 zG6hm9$b+T0+S|JthlfH;!Z3&_cr|Iaxgb?vkV-JWehvZI-Y;}2XtliZu zt!uAbxzg#w7;HkN$>7@q5#-j7)PhSWWmX>qy431CFsp{5Zmcs~$031G$Pfx|Z9LS^w z-V+)DZVGem7i>cmc+9h7t%=8Wt$ z-1Y%faop)S`-6>aJKdUjZ~#J^;gl!X+14kcvc-b^%(~6^ebT9*>3I)CPK^J_r6;z3 zKMhI4;Jv8dH+^{;v$Qd~6}cI*I_0`}va&~E#l3)*G!|B=Tv@YTTAi9Tw|y7eXVbX* zn@VyiUM+9kyJx?on9EQ<`x7Yf_CFq;P0eLeYsTXC3WhQfsrl6Y9W5G6n?AiNzPm|p zDdC1XcUrRu5M~Q$f*cZnj;U)(4>s8og<8f~y z0l#O|h5Eyjc8pBpU$6q|@@K&&3CH}KY;Z2h^gDxSTZ%a^ncfio?=)qK0!qYG7lqn) zoj5cG1OzBDTA0e$n^^yxo)#03< z#Xb9WC2AR*?z9eUYV!1xF*WnC4}-BqV{wmo(w>t1$cJwjGjBN1K5pr!>XL3_%rNbxx6lPrf8bQwanX{#{T4wxmaeSn}w$8A51`7$tr~BD6PD&uOJ2$K0?>D$A4v9lW+K z*ucK#_QZSlnvjp})EweK#@Za_ZS6-&kzhB!Ik*FMrmMs3X5bON*%W92jjy>3+N2i3 z$g7ef)1FZmSTB77_?{rr&e5ZH^_tlPz4WtAMx)No&+^%Jql!W#=(IE_#umg+({C=l zrDtd7eQKf5|0cOdCf6o&WMaR+`_+3P0YUfNO$1l1DN1dZzDbSs)Ee_41&Al)l{L7S z_fS=`H~}NWAAFWti1aZip^%v0(4z^3@dnhG5677P$y_v{IEn^Z{`K1^agrb0uS=Jx z8`ZbOZsi1>Lhg4Aw)Ff;60w9uRH&R2$p$AhvJhT3I`#I=o7=!B7Wapz#6Qu=(M1aa zFd34#gPvIj&Pf!Nc>JR!v0qts!ttwi3!cWA^XF@4ai~4#$*UhBkgq~hhfGHl&6It+ z*uN2@Lo+k5(mKz50SPJEX;2t_@O7f559A zy?bAk6YoilE|lYyNj^BKDyc?J-4DuyH+OvOT2wJSIrN?<1cjIqQ;$_tZ|gaRhU ztYe`m3MeOITW7R05@_-g;>e+SDvLR7$B?x%jN-_LrHd%)WK3jWD@{#Xo!4u0M3dq_ zyow3OS9fT|z!*#@Vm!XvPtCO5-u>UYKcz+jI}6fxFoQiIJ8CK7AdVDf0~v{D&&a>T z8Y<=d%=Fj`3AMH;9}rn=Oor{&$gM|z(zxPE3MTK7(IwoXRz@qDDIZYdh8^y9mlR$y z?!d!`4t3l!%V6t~BVB3Hw&+sbE_mcrcQ>~N_J%by#va-kR8aL5sC`n@C#J!s=86xsHFG>86A^#>EDkKly0(?ErU0td#>-P?Ut^fBN|G z!_&p-8B|y@;+{Q}Hh!b&9nj}v81MuRfV)l6?;jFEV_;AvN9(a6#I)ZwX6kECv#uWC zR&DuG^Tj?`Ej9JH=~Mlt*4Rh)kG?Rsk$He^CM2JnK{6MV*#+3gW_RDqNT&*7kdX~y=PVph4##$kxTdYomZKX%emF=WsHBiO%tE@_m&(4E$W27dGA+Wyf7qn zRJ=EyWVeqP=rDTe=XT34bp3HUL^C?yPtSQtabYh}6)PsBQ<4^*EY$BtF9l^(=k13} z9meFBu){I}rDt9`eiF`mc>)=+%krjI(@FyW$?7mp^&&f4rN0t-!v!~>ghFz?rhapu zoXPX7lw4CoqtVm%i|_$vJ^7sq2lm1+$9q0a!I{6`Py|aNJyb>g!qQ2(kih|-6x9^+ z*utwAdLRMp^!0ISuFa%THM0m1Z5ae(ku3)Gl39i<^fA6OYRs5FPpDQCiMcDb_k;7@WL_P0=lHPmtCcBlj!D#-{iK+-6KzVA1o7RlFNNx}H<$3}`UxJqLDH@)w8` zJ}Mp>AnV=A?_Z*}eW;S)lu6Z$%<7kj8#lOhlJHSLt{ zh&ICeEwy}fokI!cBStiiYSWuJf++!alT&LbRz0*7{Te07>eiNBmdCuHr(qRdTbGCf z9CI`BZ4OZP*QG4Kfdj~$zzQqll;M&kf*v$UhT*pCw{#!{>n(p{aTDh&+umyz+t@j2 zWu?3Ybxj7Wa%=+yVD$}9>3x%3m7_PFfECj_nadosuozSmNgiL|CY)O%1;-k&b`}}l zbHE9`oMP~~ppQ_qa*^jorG#Q=J7bi-RKej5v&-_K-5u8C_g3TD&;7U#xJ!d~2J zw|R4;I{ztLK39TAyh@Xd%+Ya*en^s-66>*?dO)LEwQ4Q4V061-gMJWES^w!#00;>0 zz{4}DeUkZlwhV+hnpz05TSckUWcXXO|GT z$H;_z?A>d$-J|Kay;ugMo}SZM5yTfJe{Eo()9(CN85xy0$^+kIWrg&7cpdR;!$ys) zU*x|?EEosT7Ik@P=!Q4FqE9p$U~z%dcF#wak+|ctFYUhlI-EY{W*Pl!YW`SZzW>3b zqVe15Kj_qav}x$9YDZrr=d@CFiTE+OlE$rXIuVVQuP`HN$)NGH&X?BM{7~lVPtrE^ zaqOeCg<**L5}mK5msP8F?I9*6&~I@}I}r_rz45;XDPoJO8`@2oQ`O`Ok zU!mn4vHb7oTN~}}{^{|$O+cTLHA^;$nBQMZj<@hv)d5eQD$Y$* zy}05<0u_%bzARq`m`jBS*DiBs)X#2xpP245=1mPR6hcXBS{dkszgl(fmC4c_^Xqo| zaeAF^`k&f9Wc6Ca6~*#Qp@Tz_QKzj&KG*6^#ooX^^1_?N-MV!e!>lJta}7l>KXU!F zKh(03<&}MjaV8={l8gMr09Fsc*^`InmA_dNB9Wfmj7K^V<{VnU8IDP!C zL%|*Nk&v9;#jqbws-wk6S%co@F3xsQl+vl@ce>N`%#@xRc<#NGULg|>KnpyW)cpW=(S*X zon5Y&dIEQkC_dw?hnG6oX({6c+6w6IJ1!*ju>0%OYS7gQ0Fm=Z-5^sF5)*4+nxmJ) zsqMs)laa})PM13RudwcVJoYGjpiF^vz5os}6^h&}fW}-eFQ4>rQOS zW%7%hcj^OlvzAi{iWg@Fw~{3^j}ySJkyDeYz8l14*=TYF==V^}tTIu1-%h3R`wS*hu6a8Kej5sg|!E*!KGmnU^wL%4N==A$+m! z$PHvt9h?-+kO_zlajQ-4;Xe!fZRpSl8k%ODSo!d-k;M=?lf=l@XX@3~B?CN*#-+v5tC`heu4%x{VAqHKB`*w`YORdweAE#&$`y!A87D1s+CY-d ztSpHQ8x0Z&4qFxHl9BuRHYD-i@!g2$q?g~x`%T+~jw#FqhH;BSUM*_)Ft`pK0j>_-`GdAy7?wPtR`}tgj;4HLL;N0PvZf^C9)&%b zwLR41Rc7WCHX=aX@h6$XqVu3RT96rpw8>pRG`XLNRwCXH+HvTwP5{)R^^d+<;F!wf{2AuWbDB;UG~2+0IB0j>0aw&LJdzMq|(tjk^J$%D3zq|lp( zM~je+rF0@=MByG~SQmlf5`Q~w{Gc6j)|hF*N)(ZYCj_4+7ucxoQECZoNYi|e-OqoY zGpDQm^&>X|sSf5^{Mt(Ku3`h;yEd9KKxl*r56v;jw{c$e&X+WgpB`_gJ80?Dni(OKJ`;J$C=}p3(Qs9{PuWJmZ;UgDDo@q&keNhyZ%ed9neD zspj|kt^2PfrX|qTn1)o!+LpoQoloxK@!HQwv^LE-X|`bEWUe~wKPDZ9JME2lsa8qh^51HSKoK8iir%g`b+^9W}N$D#k zaVkg^(5}{j?;Nw-Nn!0xoK|NtetY~wK97MUgQ9vvWH$EOWUTF{Lo2E65%dbly>kYP z@hC1l88*wwsh$4yt4D8sD)f0yoB}Fqn2ec+)8KVmHIL5txu&$4Kh|aBo}RS;rq^A# zFwiT(9J9VQ(vam6-$O1Z0p|T<)KIAAfH}9iAUOR%E#ME{RYT>l5aC-oFTFC?$ zy&Q7qw%D?LjEgJzF(EA$_-%e}*^}c}k1*Kvj?0E&A<;sE3lNu+PHx?woBH0qIFb(Ci^LY~Zq&%%L+v;gP8`E`k?;mXAl;X!e?`Yc( znopqUrCH0h#eZ>-%baL3$e9Mbn!CgCS#o0D9u^99hYy#htgV&;^})@u$t{ZJ=r2{g z*hGHg{*F0u0&L8-YpjoV4>&u78A~13+D`4&C_Hl8h}Qe&BMftzkgVUDmE71b#B-Q( z8(bE#MF8c~{I4yWHLLl!dGqG_TRZ;ReBAEE#dF?2Iom#bvb}v1i1fAdU006r8^rhq zJag7jT2@kE8mf_Rhs+jz2cs@X6F%0o3TFeA;&k=F1=pmwjHXjN^WxHI4wq5eaM`0V% zW;z1agmqqCPxH{muT~A2&$X%1FJ|95KkHa2RqA|1M2hzs2ga~gq2BS%XSzxT7si2a z7iJXf^7LFUlE}3JZUrx*4s7Iy2ftOsyni3{VF*BjS#lg8dsGz8eAtDlFiD}?9~emw z>neO8EW4^y@#OS+Coe2|yC>`kt;FVUaVOU^o}hmqvyEzn{kVOziqMz}$>TmtAr7}h z?!TEFwf3tCOZCUQ1#Li2o7@`|g8W()xy@|Iu8Kht6fy08pEAXZ=1?IZWU`S$%2%4Q zp?^S`6rNF2rd4DQ!QQ-Sicd>aD~Yi4Ycnfd+0olqoJ^TfP+Rf}vpt=cLXY*>6kcG_ zR#~ex-F9i==kVZVS9bKE#b}gsF@p9D9eOvNdlY!G4h2(nMaDO0QTeJw7@XA|j6y$r zuA--|0wHB@{cCu7)T&jL+!rqNo{zgxaMBa&xI;c&Ij1O#>*DiJSCP2_GAG-m^8rry zODN>nVQTxp8eSYBwW?f2o%B8B$iyAY<*^tYXhy%6s+0IuFtf%W!sg!nZUKgqJ2k$? zTuG8YpN=(ELG+JfMi97$?9Z2R-eJC>N+6oh8^S8mO>oX7Pn7%jlE6pe#J$jT) zA8o?9JxjEm162;Mzn}PXQq~ARIa2l)(gv==b%;Y>6zH`<6n6AfP6Xw*^CK9Q0-rP+ z1ZTD(0%)BAgRq7FSECw9I^}D9EA|1#W5(gKl(B8+4%<4T|>l zZhc_D(ja@aTSTk>tc4AVrcvjOfBd0oU$=`&CF7}BL}PU(l*aMx=g4X=`}n>oY)MnqZ%-oI3TUc(djy* z6OBhj(?QZ7NxR=Eb4FcpaOi!tTG8ltDg;qLobX(<=+vWH1Lr)8sf7-sFLP?y*;Lgw zksb~L(W9CXMMi*|*F9PnN1WWsnx2OKP$vHI-@L18TTeX?=99dP3S49+UI|)DV44kH z7XaqUAVMJ&2Tj&qjqc3Gc|QpECi}PZ7A!EZIz(oFMa(n~@7TN7p#NN>;?Fi`^XUev zOL`UfCO6lh(>ieLn&tzi*QgzPG&-iYexqj1#!c0Qpg8~LZ|`1a1DVzx$4 znKi5PEVQ$J541HkVUlVPJXNjMQtQ|_{R;N`T4Nli?oQuvCil?v)7)E`p@|1!kA&P# zegA5b?{Mv$$-fV0X7qT6xvUI}TxAif9?8p z(Ss0Zdp3KzJ`(vz(5qMya((|OVdp6i2A^erWv7Mj$TaH12oOA&MhCUy7y-Aj}VQ-;p6ng{|J=AxF}AusDisb=o_4-i98-5Irs|(-@m`hw*vZ} z8v5}tju^TQ_VhHOZAGtcTYWWJA8csq0rm_7MuIjJN+{{=^}&pfk+I^`F^aeUMOeNO z7yJ=(WKeucNi%L-Gu&M}pM4AE=G)<@Q2HW}pnqr(xFT?QX*K_nJ=};D$xtv;Din0m zZsb2n^XzO1OQHfrDW`SsEK(jA%=W$zl@|L-Odd{-^LdQR!8XmI+eO7ngPEeBC+Yj^ zuh*k~XY8*uR;nDa^%5tJ{;o#2xS8FYMp6K%7XH17uYN9?2~O?;xf87N#Va8s`0%yS z*aUI7mUJ2z+Tq!7hcH09WBa#lvz%H&@$LNo6=kRt)LpT}wq8Q4Lp*lZ{(T zHBC2l6+)TQZ@Sok89AAp_fd*i%5iUCA}uq zARfA$HO3l6G6~nc@Y^CQ82nH~{t0uJ=EWhLLu_xmr+{szK^YU(;1=|}UZUe30UTLw zwX)z+odB#fa?w7#lnQa{?XZ2XUHU6!(VSl+mY{DC>=adTJ7!RFWbeZ7!>#cJQk!zX zO+S#+KZx@n$gjPScyTLBdc2=7e0U886^VF3hPrXpQl?0;K~sX@ZBhzQdjlAEL^7Cy zqMDRhM_)f;zp@W2TbwXXD-eL^HDpGg^vL+eRZ=s*hpp|kM@zjzQT+nQx zcu><1VWvg!`*1Tl*f#3cb6$AU+{#xsNad=bv>LFa0S8UzyPl+;ZipsQX6EP`b4%8u zC&&Ck^enXVYLVqK-)1;cI?edoKJ{9kF1S3CA-^gU4t1BT1IgDIhQy?4 z5xT*_GZWklX+|CNPVycQ8UUK!mt~ygYUHvBmwO_W)PKiL*=6WdnKs;wY>z;;98;#_ zk-5*KOIxNhGr0PUWja0XuQT(gEnjeh$DN#^B|Hk_xRGDQbJyshz%8H}nu&%jTW*r# zURSp&H>VhM>TKGM&%XI5tq#DnPo=+*;pufSh^x=}vTM&C^e9dHF8HSomhyQRL@M5K zjoI3^NBV}40ylwDJ+`vMQ}M2)b!yI2A6g6pAwYm}TS=M`VCY~D7?auN(v>UK81>n} zzQcFan8>Yu`TXW#W6I4T9OMvdYe{Ei%n|7BU*pyV9?|{7Yw<*(kaxlZ*w0 zM3+ElTlIG|3e8zYH4vLIqQ0&ujL^T-BISP)w^Dogmp3w|*?W017K6-Cy^#8Gx+86rK(XyDqggT80Oh;XB+z1U$oREE}=4Skjyk0%l?6m z_u%xsFSbwSB$lo|y~!&%ia7P`M*moZCW^Q%L+!j9XHwc%qH~liMI>_i(`EoSyJ#&_ z5T!RHb&@H&2D4{UX^5(jBx~?|uNTl>^AKYN2M0?@h*jEpCUP@qpzHXMVLqSflbCq6 z5ypB6B~>)%k(#ZpRvTRB$cf2NNIbQ0_q*SPl9o}^$j&4)J-d(|pg_0Exhx`;as z97|>|GuK5M2mYy^F#Oq9W)W@V2R3!^x_&Su#GAxI_)l(igB!n@2|u$JSpcfe#0rcd@`k zQ`s{T6R9A4+n~znJuYTRck%8!F&Q4r2gYX_2bFMl!F6Wh=x`YipshrOh{LrNSgm*j zN+oM|B>5Le#T& zAAA>KnFN3@#l+MgZjz$l%G}OrTMv>UDWfmgTUvUR6u)2DclnpY35(#bWD2aPP~tN5fv8e9 z%c5>{$>w2gq{L6(w?<w?2 z1d3}f-2+0jk$Tz{K4E&^mvMjnR#%$xJN-a|AbMByAa)L?x%uO0!cACyjgI5N(bG)< zXS^9J*JgkT*p1y6#+Lp#eYu$zbC5uhHXO)JwxJONoVtks?@!idNs|qWPVxkf;IKcc zsYcFmw7(T;WKW-p2`MYsyaN~U(sV06u3cg1>^=d9k&S9`F8ls46YyJGR__#G_ z(+u{TP2K|>ehd-DK6FZ=oWUZ;aQhv^9dc!?Kv7R4XOQS%v2}6bXICeak$ma9qw!wr z(4hk@Kyn;I*NKz3PWSE&X=PdqH%1tdA?>JBO+K!jL>5QHs|GjFcixMtlADk>aIggt z@g&}(F_0mlu0baTU>_9EW|3gLk4zena46>!3&IlREaYGgAdu zHlv)7L6_3*!O(ijtw6R43QSP)$r}-lD}+I=O^%k$VIY#>T45Q5Xhq#WE6^9z6+@QeWAlG1I{* z0A1H*Pz>Z9t3{vCwQD_|8|BAZKo`=`b?ABnNNc4e9sjk1^d~~Cw}^t_N7Jl%vwgGPy{pGJC4twWXAzxR!pUgvn6%{~ z+`n1s` z^R(A88moD1?eauIM@QljwClEGUni-@-Z@WaTrK%XCQJxthuyWP7SXQ?-!P!%!bOYL z9X$B**RKboqBaAkPQl{f!C$}Ezv*wh6>&U(MHNJ5!-@8CScx%Q)bI0o+n*V-6&{mEAAm!U1?ddaji`022ns49h8uK@G z`8*E&1tx|P7q_n?a@F8BTy;E&#F5u-rC-38LCaMXSgD?^q}4=TR#zk)at&s&mENjqQilksGjh%L3Q?e4 zE7Kh$K%QL=FB?5HmEPlnFm(xW>`Ny{?2=uy8~fj0kNUv$!EgVPkvge0vB2SZ2arf9 zUJwVOhvzIdg)=Ds!UZ%Dw4aq8x6-lv!>ml_BPVe>Ur^Gr{N8Rb`i(tXUBOCj<2!$@ zsMCBnDyQ?=UpHv}M-DhNL?#8&Y|#kTNtyjNVZt^JJufIS?rG0bfvMFM`y!{g3_+^i zzI0g!`2-h~)K6=!DHvuOAArqVbr%tQLRp+<57$Uz8ZTV8~s5HS+xX`iu#^ zfSr;2QBZPF!TU`wl00nxL-6Y8|AGb1(CO24t5ZM6oZ@&tG4p=Ck{{oXR&QUrgm!W2 z)0$pML7y>v^5g^S%BC#qjKT>a!OvivKW3=j4yl51Kb0s9hg(wij zj}HbGQv&!Sn#;C7+4-NP95#{-pTQoX%wKc-cz303hYr=rzt^5V-G|D!SF3iVn+^%h zX8w95h31f5*BF(?OZwhf3Q9cv!S&!W>z1-!a`$b!qpb^MeJEa-(`7PW z!XlyNn%0!n&daJ=&nfYW(|5A4(nq6z4&2KL*4-iM)PL7kR;a8mQE`>6ug5#e*OwS8 zXJpIU>#yV6UPSMpNJnJ5A;zU2;IRUA`8-0G;>C1;V;_I=##(s4>^heEjW1|rX&E5# zH%w<4Y_OsI!-Ry&qBf5l_@97ibr!A36;WLAW~I%IQtFF-6(b|)x!Z1QEs*lzvbes%%%*acDtY7&ig%<@PHt}L-7SqVK|UdTwd_YH4V1PdC78UWNuaiE zh|5;fQn-?R+>DVacMtq$4Vbm3_*(@?@CUx3OT@rTMxjHoMqa!oH>2k*tZ5&7`TBLZ zr)O;SgJlQFDEQ?r(f}Gb00X|d!-mKO^Rk^MmN*Zd;elAX_I*?o<5WLd$pBK;-<$pS zLA02L^8%el7io(n3YV@H(KjFg$jltVH!5_&yCQ)(L!6XC_xGn1gcruiwYu2{ribwEv!iuKP%m`B2fXC?@13 zNP5tziV z36_(YXDD|r$cLc{BDvH|W-jT{Lofe(0d_S}!gLaphsgyd6V89Q=S-nXgY`V}?6Q;C z_J=?zfNF{fr5J~Ac=?(L8{kTn2XrI-)a1(HWk;x7h5iYu?t`q0(l+Y0%&xt}cd;@R zM4d=qNx>0jHHS*fop~o{kGV6vZHo zy5tH!x~~72+V$;ox%{%%GcmMo$e?v0^M9^UlEPlin#;HOCVp-i4DD0!`l^O$R#5qk zsd$48`k_vjei(mwH*j)pzx0xl1ySx>bkv{Pu4$-cg%D@ZmW?CB?fz*yZEyIT$8~0Z zYJIoOqKAKd9T~aXX~y2;;jMPqsQwt{qaN3=@%)-YZt6I=pNU;r;PL0{SH3gyU%SPL zHA(I=NB>*jKb@K5Z=8Jc+~mj!qcP^uO``fgfbOK}#HuD%l+`;I+#1*i*m#7x=y6=} zm5&IV0?B@`(1b=#ZZO%SlU3-3pchk_ACr=!dDKE!P znzWyJD;*PC|38$y2UL}J)9$_17!yl;6I*Pkaoam6ilUfe*>=T-1+al&2P}Y;#1=Iw zZYzjj#ex)3RFD=;1XMslz=EABB1#bvgzq<-BIfy1&c=-_e{<>bN@4R{$@>xM}TYHrDIe|0Io;^E- zRAA!cCCU?TjVR^W9ln!zX^Ps zT1oI3)$4MKni_x>bLvI}TRmEKIjxI9u}VF$tPNzttnUUra~|-(>YzzSjp)|7qBW0K zCquvwfP}`pu2w6Hp4hLTcJ^?#?^WMtl~a59ElDkq@3f26%Wzk!R3&Y1maKVVo1u(8zH68p zxwF&DE*eb+vTKGfe}0pEf4Mnmuy1uV1?lMeN)EOcG)%b{mk!%eR!sf zCV!?)zv=p1`={Q;rp>B;lk@n52}5%B4taB>BiYz-d08#ida{0h+)~VJt+npi@9sA7 zssVM@z9+|!vMRFe1BwWfU#)IfF=9=3@zpoMOORa%!_^JKvZ&N44BfM!q|Z0>{lh=9 zHNR!pwC?)?5P~|UBY)KGR`UA7MEU58*eg|l>EC})9n^`WQqL<5ofG+9D|9|jL9yDF ze=6Fegqnz=`ZOR$_tCk+?0s^xWwS4zD$SbLHhux4c^PEh)Yb>uOgCTkrqUlizux$(Oj&?$Q!k$@&zk5o z1_+cNQKxa!IrSpW(xo~7yCJXr-F|O~6oVd8Io1i|2PP&B->lqPdCy0lq{^lxlNfzN z@@|Uepepa4;lz`uWVF;?IGX81#w$B0B7)!XAJaneV{c)uX4oL5zcc^_+!I17<3M=n zq9=|e%oz}mZv4h`g*;xT2JkM_(! z+uWXs<5o#V=4nhl8>gn0NT2<@4P(yo9G(^GXmn<3_rzBfhpv7eQeP*?q`MAtu#1_+ z>slvHo3RMv*VDhD;>a56{PcmEF(J13Q$WctcMj=o@bT`h(zZyzC+)_QreX@>D9O1X zV*(00?i}=!Y!qI`*i6=n3&g>lBHN(Hxl4dMs-3MEt%fGm zh1iecc|+nea!z^JSvr=NJUs;&cfL)r{suY`d0PxhJ77>_xmB|-ggr1jM9WBT5r5i>+c%7mlSmMK2BCM0QEpH8H52 zrgC&MMT7GrerTg=@yLq0k?Qi*nk8>**tG|%PG5K&|3xl9_ zsN%ZiYVHv<(lgud=)6PCR%=$sd9{yN2G(T4;H(n`*~xt>AJRCXf>?e38GV(hZNF0PeJY_Io|A zN(&?q4yMoyx!Uq(O5xEapOTRJIs;9mJ5iFg-?~`ycW3 zaVWw<=n1;$2^JkRLFRDuK(_X7#$!^eLqvXc-VJ17?r`puEyPoG}%2dsFRWo{9|ps8EC zc+5QR)pl)4KcvS#anA9C>v3e>4vv5;x_?W~O}@^Rym+I$t2Zpsn^7sdoYFPNdSl7E zZZ*&Qtb`H)tPx(zC@r$3$snICNyi*@8(hzHgG`Ph+usx3@|IjYR0o?KE||8tUKl&rZG6j+sOO$=_n@nxO z0dJ5eF7q?4Udmo{m)1jyY|38?^uNncak~0 z&TTQraD^4%1DmyiL8HbsK&xWXqe`^)p?=Mkn7uq`QH4jJx25fVQojrFZ*^N}V2$QY z6oE(2P3^vK+fL1&?oaAD)4x5r!B|e3Ws*2K?Cv5EOb@(v_1DZ>oA$dyCf0`Z&+(uOzR4DIkQwnqO}EYkvw!3ZObUgiB*)abI=d zGdM%%A~f@!Dt^qg*N@YUZ8VWm#OAY@SX<2U6gtf=thwH3;0ag=!58A?s=gu~npRA_ ziD-pE?PUe(NW+T7?Vr8eRcT8+W_5qwmPMErqpSd6+1`>g)* zzFB!WW(EA)eD*?;D?yMc7t;36?yQn12Hwk@8IDH5HWptj#DSV8R(B2c@bf`_+8}?o zKp2OGaykCH5MdFY7ozTv+z{(G__fbR!d*`XPwhng_w1D#cUbX|Z*6NDyz$Y$HXgbc zE3t@@iUCW+jzW|M@Vw1y-uco!)%S3z<#&TKkLc$`b^N>%Fym>Uias?D><9DB6^E*+ zDV9q<)r#Y@(A(z{Q&Kqbn9+4)<@N%?n$OW#--_px5PymcD&Xr-0FuW2m=5kbc(rBB zED7n>yuMF(bGGJzy?^5KHW^;dj~!x;uw%=9^M4buYo#k5SK?~|Sg)E!;=WC#q4d{+ zc#ESJLz~`{*XJX(RBAj(AN~+6;RW%^shW1|e!-6w2G(Co?`=dK9qMv`0DfjUn_Dj* z4Xb0t>1$DQ?G9;Gct}N(4%MKxnCX2`MCR2CL9LExMFtS8{n6m_Q8G=KG`;)2F(XE4 zR^n~3G4OIKQ97zL-bO<*k8IFJ1nz$=MP~IJ`3glVF1x4W3NnwQ>xa;qvcq3}+}NsOB|Z5k?7 zCKYCae6?)a7eBG85oj%=+is%QH-i+mt1OFXmD-B!q?+OI(b){OKnZQ;{ zh>NK<0#y_Wlc$1bfm5j~oJwZLJp8#{+r1dflhW+CspdP)t9ESXJPxmU*o{qHL1)@- z;@VCsEs)x4%uLInb~fVHtG@i7*Yr{|bRnv7Wy;A8q?vL&dDNJ+`^zKIM1R^t~s4I(#yEznP zF<`hs?~3=9CvH1j*GB5KuJ_}>$;xu%Oc@?M{?A`1@DgH}PeK*I@oonVw> zr_WwCt>#;uORn}&!*eWwh%7@q<)vr~HSC#AF+IQYB0@0PW zStk#~12L(&Xiss}s+#7~zfAK=W{0dESJeW5Vw(v3H%DPWFa6*GwesYYZWRCB?y#ke z8nx&z?*7d1syn>ZfqT)9}c?lp~I{ekB~GAlg)Hfz>7l^q*TMeUL#q0w+4>2ioy&8x9nT9q`DX_I91 z&*hR(h+!dWb*bRDn~W&;pvff_;o;sjFUhufl|$!LYGiWtChEWYO*1j0+rBX^QLG39 z7-vu@+51YB)H%O0DYU{T{;@;qe)FzA{p6b|ki)d2F1X+|(fA1Sh>=U#4cH-vZL!YduR#O~o0Gxk z_!&{5Fq@u*!<00BVJ)-&sdj2J4FZ=x9;{;?JFa@g_#ty=l&<)6%R?pzW&l1h;9dnY5kalDJx5KHk zJRqMZAxza*#H<;{l_Ne{Z(er!u`UsVy_g~hlrAbQ2;_^)9_eBPzF5(R=9N3@O=`O* zlDAPzmUHI@4OHcE3e2ed5T^18(e)R8Bd7AZJNx;74aNOol_l6yMvou(c&{uWEX9|< zs_O7~pR8D_+XNY|1aN?s-g6Ev*fmsFem@b_9EhpvuX8OcJ=@HV1Yb7Xb}Rg6XB7@y zc4I^1VS||5IQ5Hb^s(~7o9JwADFG5z& z0$Zp>VQ?jHtNCruZdKxs;XamxcgtA~akHo&jZsrHFc5lnG&YE}tZDh)7FRytktEK` zNytNuQ>R~`lZrVi*HN14E7l04Vf4jf?5}hj=X8W6rnc1;hVm2#OJxN-JH<%}0y))$ z%+C8NA24EYBKSls5$EOs52{xBg}$G)6KY#yoE-`#fANUlR-THk|4bg$b|0sr&DxYz zD)p2zduibEf`POxeHU5tHz}%qU=qxeBu5s3n=SaCdZAczu+9`CBUd0m%{MxvUG-B_ zp_#c!2jKrs5>TtuuU@^9b)YvOnrmLuftngFs=QR$n^rhv&lZxn`gXv43O68Ze9hb8 z->nk1I2VqjSs=ZET7-Kah?SqSQ7iQq;4*Gc8E?!{K}3?0~ajQSV9>AX&bhj%WV{{OHR z3(Tt4BD88PVrfO%d%!90?t57bEME%z%NL7rzG^WV{la2sumA9t&g^dN*<5uhWrA_v zc5!#P5IeXBN!xBU{FC#J9hC=g>7=lt(A-6|%jrGe*hJ%|1xpBwgXcL{^XBVzuDbbu z!`3ddf)164N^>%VXBkaE@c(l%TGy#u#QW{5?g*n8WNS4^GL$n8F4(EpS-7E^=XPmk z)!pB439Og_nRA;=pJ!g)#9{BlNW2Ma`@hzsL)FqZGOk(=4}Cq1lf(agJsRz-OI9j)=dau(kpKcp!^m4$UJ;5<}y% z=us~q(bf|9?x7>F`5aBgQe)8~KE_u49!oN2abziUZ1Ylu=vJu*K3UhXi^sb8k`F?= z^h8bTnKYwnpH+j1D}}skGEu*uYD)Kg+5nL@q_obQJ=?toba5`MUOw$hoQ)LDMi9hd z0tARM8UW-f?pZyycQnL*{Tc|`)M0+*p+Dp1O1JyY(qgeVIoxVTd4G-1-+cIlKB0E=)u+g66$wNh`EnbH9Ts z??&qtsVDYKxM^g5cps{uH8*Sc?y7~~v-Y3wN1fokyBxNeqVXhvbnzru)S4vEk*<}i zH(c#=`N?3W%UNqORtS@*@*t9j{-2IT@Fjqg!vFkfU*A~vYDVz~$-@58p!aFpVJy2W zLyxwGjDAH=j_Y?Rx>mh5E@XOF>?<-M-jGbAI4YpwgiD|h*(M7nIc8{ulanaTa4Iv8 z$rmW3$4q5|qWeIE{+Cm%#>V7>U5AkcHc8V}m?mgDmVW3-HvZJ%HM1DpI+;Y``jfIX zM>c$S6U6EvKi3W)BV>Uj^WpRtUBclFct|Ll8@Dzr|8U^c3gS+V;qYZ*p^`LC03W(< zm*_&mMcnonS~5270rRcJXi4_f#4bg)M64Baaj%&hFQL()Ebl(|7 zt7{1qrpv_2SpE(D>j5R{l+}+=r6i>gq%>#U^GyTds9$o2pw56CxJ3{dkJL@+o3-J$ zcqW6QQjpOA0R{9y$pw)yK|^vb8hVgbA}`aAxV@NfJ_N_v2gazp#d}8lb(QogHug&V zM*&%UMB&({ttnv>4i6^OgT$GHsN($+DEn<=|N6WIkm;_i;%!UNM2t&^U-J&Z$;x(MDhBLPe$o%(us|d7v_Ug zVCj+fnii8SjPcGvFaGRl95P5KS5Be_*}wJavp6bIz&2}!FFTrbz0P&sPXo^>b_I-Z z!!mS~!WmfcS384YsMAfvH6~EcLvgDPtMehF?9*3@Z8qFj0W8JQ6CKs~i{2`2D0m5- zC5sGEU9`_$@(b`;MjUjtWSWYL2 z_9%<_j&QRT-zc7yUm!zBu|_k!SEr8$kee|RU=17+_RT^uNy?ytKsVJMW>~RPb`7H% zR#3XVcW_rpky_+Tboyv00Cv`r8fP{};A3kSGWENcEJJe!y1mQC67?syhbzVaGB&B=Mh$s$AJ^J2IlXSj7-{*}#XN8TN=* za>sPrbM{Z$mNKl*r{E#&dP~Cm?_&dCC*E2Yk@*@LuOVp&!-_T~zoLasol0KG+vsYR z*P%S3`;%1gJ-|Ir&mFH&oI57`rLYiUIwjx`KfvU*?Xk6;Uk4@|OHe{ptGi$fv+xWL z`*^`zM07pcUS0R@h5M=-Esbb<$#_!;pDwv}kYC`FM%8yCB3XjOrNgDlz%3H+J2xwk z9`8i`<%wq#54^cRlCx8B>cGh?Xr}cpJ)G+W$>3qbHCbF`P`cX^_4>j7waA&+!k*{= zFogmYH$(7qVde8{$1W)DZ(N3xm8yp~7?{`o*T2J`KV7dQjtPa^J>07HDl0^uGDwyd z$A^+*+DjHT(N0sBQt_}Ro+hgrDFRKQnfaio16rCCJ&EGtStVtKeoj8Nr8z5{>`x{e zI~J$8MN%?T8t*__>#AF1pU{GM^RM-lp6i2BF~!kBB?ARhGmr0z6@$8zlP6iH@Vo|{ zNWBY5FA-ua8C)6&`T==Vv}p(*-}La;5uGy;-;NV5LLQjek>k}+!<~_HyV=<3!}Y)L zmiyl86{T2POVWZs=WNWJ>h)HHb;jr)X|7KY)sp@$USp4EY3hq&utB%USFD1{PaN-x}m(F&A` z2uZStofWbts^hM4l3k@P$}#Qf7W@IdqDH>#p3R#Sz4DKK`q&s#V;1w06s%)bV0rPawwS)>EwqXr=Q<7)#LM^jh7`n)RjH|5 zDKQBin0*5{I>n~8;#>fZB|OP*G6i|ua5EadvZ#RPllD^=md=h0oJ8rJ7xAp@q%dpZ zua|h1^Xdzc%?|qTV*8S#pSdgNM4>G1{Xxf$Pydvn?3w_fDv%i@7bjQIyJh8WPG$_L)MFC}_ff?khIT(p`M#$gw60(@^3s zjeQJyv8_iuPQ-o9LyI$~PmiV_W&4CWrcM00t?BTyCO$_SssvI<0oX0r*h_1!e_rgF zy#Qwsc3uS9EsWRAQ!k{Q+!oMomF;smouZW}fS24-$whhU`2pnz>^jiXDDPzQ2Nq?C z7L&44$L`9(&Tt*~CGDk+M7Y9p~6|s3CPgWGK&Fd{49^GpWHP_Tq;nS*}Ng%Z$Nh{LZts zAy|Swp*IY|%i=N|JV?!@V@zWqgxsmib7=OoZeR`;st~pb+E!8yCykESpK=k9ptvdlp9blYc#gz^YxbJt8)jqGRHb*zyoecK4lc? zw{_xf(-9o-z|6C3au^Srr0>W*4!05G*;^$WMeAfi4?m~B>$W&&rWbe;k%19i6cOF+ zxjdE*U9b1P`dUqcbE@D2(EK#--TT);)8Bv2Aet8L zZP6B!?j|4`Gp^zZK{IZH~7cuNJsYB;k2A4kIrQ+*uV?@;EoixUXV( zuSXmXp~D4^*p?M%=%BM(r=_CnZ3IlDQp+cCD2dtEB*#pB-c*h_=OjfWUwAO19e1ct ztbYFO()5Ah(X=V&F}rmz*h=!)mc|A=3K-LU>WmrYq#USADl&gh*ZsJdmyq>~4%_&h z!7Zd=?R;#9t$Y?i&iD0_oPLxmN#qpM-pwyFJ3Z42Af#;T!_TE}S2J>AHkzVB?_ZOeoE ze?53zT5>(@aNxYw`43(@UhSP+Xcua7Dl#Kq%i_JGqvF}O^t4Qdt4c*ykL>K};lJ1R z+WD2j86{F98P`lx)Xvx8jE{4G@3xat$%RhU>rwG9`Uiu~aJZb$funa#Ui@5U+x_<2 z#(wFksV77YjV5Q|?@4ubLaj2CQ0Em*9VlIjbX~LFb7rE}kzuG?OEKJ+iPJ}^s?UUb zpNbF|lX`k0HCLbPtJPtOkSr;i(X+e3O|l_sZ8P-qk*;`>md7O`w*8CQagf(tR5Fqi zxkGoeQi{^xn!3U%qV1N-NMqU&W zcGcIPLoBri?aVY^P|j;Z zD~e2;Ck~i~8=MM4ZdlZF>S>qV;aJ$lF_X^C?6<&?chbrK;eroqe8Whc`s|rY+EC;5 z??x<`yC&(Wy;G+%kkLjUPsPI9G|`e8Po~i1EZL14_eifr^=Ga%W#p46E1Sx+csfmF zx;(;6J-T(vI`2_P?d&UxdlShNwaX6s&r7cBd*L2E>xNq*5*8qlV)v;qxs!?$6<-d9 za2=#ce@EP-vf-HIHiM=ISuK*$- zg=gvHzlc--Bkj47ugySNSD^LSV z#*7=+|L6bpF&k=pj5flt%n(0LD&{nYEt4KJnH~-k+R+`l^iuNu~K|}){V>+iU z?fLVgc*a}kIU!vTK%3~#^A%BtrWhKoMuAI=5skx1ToPHrOr{XtMJr>6ZN2Z>xOjSc zk_q)8^Oi+kYA@p?zTNZ1ZbT&Ps@_QF{*1PidoMM%=x>bH5!+Ds`hg*bY#O~Ha%qq4 zRlK!9&qVtn|8c-WMg-o4vpvOed42jZa{=7M^susZQc1&)mA)%q&@%fjK^G-ZtI10rg7iF`qlZK;o_mNuYMDB2tQ%LcT z<_HDyO)&@i{mNO~FWniVq0y!>EOcu^^)ASRMVY-=ce8C&_m~O zpG$M(6K$IqSX2?w#S!dCB9csx!yIO-1^YUX_3S*Mt|CgcBCObp8{bP-ZaRgj&04jx zd@;4=GbvmyPpaMu4=d*83Hp}(-3~q{Khlb^;uWf_%OP#lgfmd>#$w(|N9#UzMZ^S;WGB0fBwnDT+Ns?$p!0s z0t^Zi8Y4ggkw_XD^CAl6&f@HC7>mZhcyIC$c9fsqJHKEQkoiNH`6Zm+ zs@9K^q38H3zeFu zT_A60c$!)EGRm*`-vKuFel5e68`A>9>ox`TB6@Z>vo3@q&AdokGnhhhLI+Z&# zVvZOlTs5#c6L?7rEFl8Ty205vMil&tqUyZ!sFzITWpLx? z$O%qF*liC+QF(@TK#lc#n=0al+!%=`r=m4m1zAx@ks!dsQvMEaLo(aL!;ZRx@$TO! zOvmsGTR9`X10w^_mrt9uM6`-FW~pO7dG$;0k*uK3GTH>!j)*($k9{ISQ{wM8ZA5I?_D zyV3D@JG2nUFO6qE4T-11KD=l!H^^!PqJX-jWOYVSlsg%fU#RFa5MUjyhc;=_q!1oY zr4}_){`Oq>WLtWpA2mg9RyB)K-_4u)@hW-8NgZ&mOfdddYu6q?h03nzjSoqRLxWo> z^rs;vYf%~=hLEyINR1sE)~TVQ_%V-(u8l8IUyR4l*-XA=r2NToZL91|#S&TdvNo)T z$6m_bj{JEu8`2GwMha&-p%08)9}B8bJ~AAofE>N|=-oeMC82DT(lx3tDlyAau1v<; zO~aQDV3feJ$HCJ8vO z5-b+5foEycG+y_-ZZG4InUtD$9`}u6;x0ntY+$qc1boeDg0apMzEVUrLM-wWoYiPG z(6c{!CB!{7&xv!2pkC9TL=J($Y|3bq@u|-}uwlcmo2_Xo^`)lJ>*_G%CRJ08{<5in z?1)!ua)jsCuV1fJfjN`0?g8!zKs)h2cTEu9cY6oe_hu4ug-<|P$l?ZKyF1#w@?mV2 zBqq}u+D=1d<8JSn{ztyu!}FwS;dkzFO?^lg<9J6RJ+nv2ta#KLN28e*ysbqaU%F6 z1P<|k>7kWbw*$JKLVEUpNx0L0Sk^lB5gZ#eeRIe&9Ks$s0!v_jZp;)z_ZMyGk|ij5 ze2W=-{?7fMo@czd6*L*X>i z731kf4b63>%dWcw+!ae=*^ z-IKM(V1XU)si(TIey0*0hE_RH7d9^?DyA*(B5Ib!B_-Zsex{uGQ{qWng-Q*1+;-@rlL%a^uYX(14VB7wgJo)aPwL`3W;mn z%0)cL@H={BEMd5}?b?;hbL|t~^AH_l%-{*$I18$%fBK61{y#%x zVMCufm|#yLHtgkA!@zl9%!Y2-yxEeQJu7T|5Y@J|TModi107m|S>AJx)VSkg?FMYed9iwS_yw4Pk)A7qwB-9m?lXsiUt(FE{l_P2U2w^ zLbw0kxY24;lQ(&Vg#+qq_g|heQ~WNLZ2-Nu5*A=rWD`(U&9zSl{2Zf5Yb;p@hOQq_ zTCigEzxn14ebo}I9RE>r$`{`D1|OuD5OE7KKRnD~7J_`lQrD**`|aBW`p9fUt=Z9> zJ?+v`qecwf1;%L4!yp_MofXc(Xf>Nu9x;C>Cf=Jq0n101e3V%Tt=?rW?!p(peQ{D< zhG-)9)EV5ZeKr~OOnUXYP!{=)Q~%c*sHWT@aY(!|yM>n3DG2-}Z5SgfahGm_RhzB( zJ$^u&#oTi@J!>y?)6WWZq)yGv^+P1lEc2A6eXBV!byjeKP10PMy}!G3tB3BCO?&t~WcV$o}qUx!99a>o!F>+djqN$MI@}h_nV3 zz4;KjM7yJ|?sO>=zyc^$gmg&&GRLRIVi7Su&46rhT4U5Qi9zbAv*wg$GvFHI`CITG z4b=-5Ejo$eyvbzQE0++jRH=alxb=Nr?Kda|ydr>dhO2%IAniaO{ztM!2|0720gulm z?c<7OHquliC;Iw6?KOP2o7-8KBS;!cL9+L*9srJck>H4a{CF-L;43I=ys-JhxA@tj=8LYBe47bNdV+YB`IOVw zuMd&HOisqV-CrEd_Q^+qvIip`Z;4b!$7|a=HSN-=Q_CFtb~odq9_y&p>b-7mZf|~e zP)T?af_rjAKLxkctaa-iSrg`MVYAMH>5GP*`fckEZVXzcc6i>LLqqK^VL`MD-CWM& zW}d8l9d2U44cn`Pdzc9Jyt#5bK6EOiFAJ1-u-pVCoRN>1D$D#1p|L2x-H~0?yy;fv zsEAm{XkkjDdW`Xd*EcfW^Ari)q46KFopLR9T^xnm3uHA$J?yBhowqv)ZyMdT_MW*M z{ApAqYFRXX6WNxg&Q%6w|9-l%q2%4Vrfe9I(G2(~x_k%tF!Kqb^B#6}ANsE<>G?wf z`ZOTp)~#D(h2$kU7GL`5YlVjyGIr8YqC7(pXw%b-xOPD;Q1SpmDq?g8Gk=u>z4U*f z<%&c4WXprx=0l^-V)a?*?fMH-xpg<({;ID%daYLfhTndxTKhOQb^>(69fpJh+7q{= zx6)qtC+Zk&Vb{-c<}9Ct6#ts6KY`n6h#Jl21(f^j-?AmLPVyGyNQpXI?u3WT6eW_> zzlbft==vnki{rxDvSu`Tr*H;zlv;^d4h!2mRNw=a4g zWS?9*gETV*!0lAQ`FFw)_933}!u;GyNzqRTVleuS;_1_;yBZf=5!GZg0@rU|z|Jq&T zth!G<{dRQr*u7ofMYS9w|J2jk=i`&p(VSs*4ykQYb`81G&Z0=OSaSS6P|`3?&Kq}j zH9*Sd<%n%fCxbMEp1?c*vVZ^j244*?dkZu9V-7aLwePoz_I_APue-ynQvVcF*~IG$ z8pb(gOP3!M5x=paCkY*2fzpG?Jn+){PZkypo3xgTQWCqJyTt`oM#seDgyjRUy7%eh z*K<+S4wT7VSU$P(cra~S}Gm9OAh0iQu(^3gq#N)^e<&lZ9mr(%Uf8r)x-&?bXSqWhcaa?I0K_RC2a{JAEAe>&bqnb!4Sw;(+16{Tzitxz zs1Q&@fAKMN1Bc;-l-**I@4aNEZs&mmFGmKSQsNKBva4@}mFn_l!STOwB~gowyyLRwfSm{;iGm%9TeXOWU~>>h zpGXzmew&P1R)mUON@~CRN1P$nJ_%eWih;z5XJG?01g!&#OTq>y&tNfLr3790Ho@!( z4!7A4Kg=(nQtfcs&#4YS*nVs)(F7sD5C<{{P zYKoLGP5-@i-ErpuScsAqSwvs%i3x~vcXw~vrp?lV*8LYBfq_W0 z`TdIxQVb9Kx5;-3mj+F{PaVy*N~BkRHLARG!`2&NB)J%S$Iv~}puzTfzAGT*$Y(cg z*6d_{MqH0!!;E|P?%jR%rPp z)dhF#KbW8^a0fI(;Cj&K!ccvElomP@=e^<#+%SejR`1)F=#VV@8a}x@!f0;UP)0Mj zB%GG{b9ziV;{2`<6?C7aX}2Z5Xl*X{e(e4tYEe$Js9lBEj?VjN?-V|I3WX6DLa<s%jEkIXp4hGLm-s>!X1E=0#d(ew$hJE*=a-TO4<2lm)xi;&@#BjlUf$zr zBg4#%r~1y(W6yifUcw}uc4Ie#=)^p@dX#fo&DIJ3IdN?acwl+U_P4XA$`fnCA z6@lZ2kTLTE;#ROQB(cYP4x1}%%ae64cl7jJ#Vp3qx7Qj7^QJt5E{m`T1dR*azlT4k z2xu{BCh>u^;H^e^TzaYrG}THrO)53i(#wYLYi*tq+FJ#l&eBG~`*GKhmCWk^XGoxT zc1}xE^ZuGcuQ=toY{LzpjluRAtRNPble6;HaFg%C2MNowL1g!<4DwsgM#Ew9!oYYDu zCnqxQ(K8KbtAPqC4bvlH>ng0+9&c}Nr3H+GbO(D0kyi7o@6hU#&wnnyE@fDRfXB<> zIZpM6TIok&!fAp@KIm3KOUmh!p#}abgAhz;w(!afjCLWihChCSr+vjuXlZu-V(H~1 zDK7Fjqc3m-z?k~_^*J(P3oSnvhHD-ns^5`%rG0aMvUrlCr%t3(TPLJuuRi@W+i4)FVFOH z6&8(t4)qBj(ivzs*PCBt8ZeJ7Zf%)Z?VZ>4C)qM4A^h?;*RD71F?}2}32ie&s87%t zU(QQY`4oGvB}+DvnXpD|tf3_0Vp7*s)LP zK;>!6=8G8lB11*Up>-m;ul?vhRd~)8pELK0)GHe|ZtU*>A6wpJldG$dSGojm<7Gk= z44mt*4CA`>n?}72CP_ix*ijACrVIgS&@brf)z;P&!3-ecRmIDBJN%#R`|@RC|Ngtf zcmpOJB}o2Gdq8e@-5)7NJO}x*@T2?fR{HDwC-41PJ*>RcI>uEeGIPbFOXFGfDahp~ zQnnoFIPA;AvCgVSQIJCa6J=ou1t?#*Wd%Yd-o6(CO7zM?CYU36ZXK1xL~eDCw7{ROh5;WM#Q}IpJT%`88_L;MTnm;uN!zhN{6U zpEv|+AhG?MqaxLCAWH{;>qp(vgUl@(H)c{t7klp^d8}FZ zC5`s(P@6DD!UKlQ3z%|pj{7aSJazwdZ=a3Js4Cg0+?=2mpI9Qj*|n~{q>iJKw)K2W2d?t!^9Igdf#mf8kbSM4KqfL8s$x`3*3(63BBTk`_R}# zRGUu`CU+xfyIxGW)g%^UHL9wrcsfX?ofy4)+~H|7#D&tHVDVmk0^5D_#*NMiu3zrx z@HdMrUR6I+`*OK_IP#`Vn<`ZTn+i!^oR$0#nrV!a@gZ|0(MbIU4H9@h1K|r9LE^3K zrmZ~%Z*;>EumNVB=B7h4^J!*^ON|p*FE^M_4pbtgwTZrSXG=)Uc=pkw|CCsT1Fee3 zufX~g^$ZoI?{!7y?7E! z?d;R}ScyHjO_HWMF~sv>W@>7|%=f^(bq|p_&m^NiLt-bg(5{2&;8^!peL@uRu-K?O zK=6Hg_e#g5=;TkS$th0J>ii;{fjE}27@NH~Gt4o8ZkgQ2uXvo^I; zUP?}e46afN9MI;6MgGbvmim6F8#+tElfIn?O@n+K>kqCYH&OKFl-hzW|sRN`3ObqaI>HpIHd~ASEE6pO26l_b#t6 z5rjr&$7?6|O=}McElMUWM8kxn$8kZKxTM|C$50qV6^2qVO>L=$&57dJUJr#ve`7xZ zTIV8D79`(-B+~{V-~ptN3B>;r7C(9N#FA{$C6ny{COX596Mw4W=>R&M6Iy-BHUBdF z&99Tce%1j;6Tkk|tdS6xN5mIVjKIH63Vugx+>iF6k4j;=<>nH1q_7SSTp0B~V%vYr zCm09zC{@t)FlWc=H}83H`7U-hONw-CGMc;uSsdCrW9zA(8OR|+-Bfn;L28$NT&JGk zp?`*Kk4gIgg9B~l}@337XPWSJ6)>-S;6t#wFD#s)H&m%jC-=nJoXb-I)tjOqtBk1%6MVY zcMqPlmXmgYlLjmrF#hMwN1$0uD^)zSoONjkg)-iR6s?BZU7WvQ zLGF+hfN@G@k_m;iu6T0l$b2hRfYs^ni}09^HfG0q#o)JHa**IAQ*kbKz|2wF4*~PQ3`;Br%CTe?29g}JbiDcIAz=8}tM%Z4v z_KQ@3QgUVey3Mo6C`OH?6pYyYgv3BEGafv6aE4%$B#oP-_gJP=Ee!~hqy>89 zRjcm8Lj>9tT5>p5>dFm~p~9fQ&e7i~g7$HAlss1SKUCbJ&<91)bN)%8$wFL6gjUlpPM>=MtYI-c-$Z&q&OGzsx zW?Wc13K}_LyeDLUzH*9_5PaU$D<8YYS^T0rt zBc?A&}680sE()0HZzmRpS|q@OUPu=6oa-V$GU1J%Pg5 zbh72saa>bb^nG0?|AHO8xiXh6Xqin|f;sKKKpL*c>ZIr7_)xk0WMIb2rcl-EXRX2v zctp9b<-{%q5J}wt=CzXwlkOsQ`6Lieb2*HqUbOrum3mz3u`w%Y3pDPEg)AQO_-^*og*V~i&Puc&Y)!m)jT5S8D0ln+juAN`9!E@JN z`dttFUN5z9*7m^n`H$_Lip&l2KgH_bcFVOt#cIK)s(K^tacp~%)7uU0POy=iHoZeN zVByZ8@9)3H02uk3{;W~QLmcrIG8ci5b2HY4m&|2&!$p1C!1dnWU!YN)MtclQE?w|< z{qkv-b*F&lS~ycb&iUukV(W9sk2whlgr%$r-?h-8>#u6OM~}!h*0;Y4O5ZHvO_~N_ z5u{qiMTrLTd%%M&ooqrcBj)NhG?OPRvCI#+Eut18 z_CuueICTJZbm<~PE}aI%d>7!`0OMUSoXdfWdvJHB@b;3G$@5v^u;jv4UVt&%27Tee}tWL$oUFW1?TW*P-P% zFNYQSFbK;2s)@CE0QqOpIT=f-TRyHTgWN?NO1OLI>P|g#%3}=4NfDG6xp7hY71=ZV zQ}fvJ1kbX)J^Vtnqd)p3a|kd250ysk{27{fN0j_zx<_wn+h{Lco?KEKyEbf(G?j~z z1AfI-=?YwxZd}n3NuFapo^q!{h_PJL^DHmT$Vby7?CnPpSzxZiL8WMX7+>)mFG|ST zD4h556}@XwztRmV-#rSwS&@PvgiCo*b~%aOk^}N&K%h904Wq12#<;{dm1Q`I)QPAR zY39`!eE7R@UQnFF@(UmJ(eMyGPo^1bgOqL&8V`H>L8PZi@@z_e`svNu{Loy7KB_#1 zH}p$On_aZvvb5TB+VVsQ8S+`BMpj5>jIwO>=$LSd*JrMQNZBtpp1ul#9+E$|xTx1*=fSU_4GdX`J+c)rQ? zNuY>Vrv3z>xS>1bs|L@GpCFI6DF?Wp#{T!2H$d^aK09r6|GjlJ&Nf}}OUol)vtv3U zbGJtcASpNN^WjNz5yYas$OA-^fvCRdgkLThu(q?P0%vfP zykr0_EFH`tS`yxyv>Ue8?TM0(;3&sJZ3&hdRGG0NBaLo*8W?azV;FTac|md;y10Fb{(BAc<5AE+dEU9GoD~~C z#o>D<`0~aXV!1?^3Mh~!ZTSY}#c`UMBUX$NO6y@#JVrD|MCS+xc@s|C;NnxPUq?dZ zXzH@_amg!kA9b~a!xphU(Lw&jbQHsu_NB4V(b&2CEzUArim-Q9* z2Z&9PzY)pqW%#IA(c#!}?c}8-8A3rG(j2KvQHoSA#GuI4TOOR{C0*nLtlB!y{rBoT zM^zs0Fr$IZSI+ZwOCM)k4L7OGHW9uI{iUU81G%WM1l>Z6DY4{G?x(O&blD$p-K=2} zX3QQG8Y&XBE#|EJnUHo5inXgQH(0dJX}CZMwGv6DNrCU-G7KQS>;nIN0>F>)EGsyr zj5eox-SYQ_tv~K6|FCNu>Te|2qy0DpwAA}|UUAe7p`6pR+F!qlL7#Hp5&m^a|Gm$f zcz*Z|=JwKukj8co-vUuZF2FH!)qtFTnoPWn!25ot6lIH^&2}woGzrs0l|uwPvEXJG zN|w{ArcUML`tF`}`5p})RwX%?SLBt9fek$y>Vb-`y3qOsAf>lmEJ#oWI+z1dgE+L? zHft{(6bVW@2sq8a5Bcgw>aO>L-8x9RXT^b4BUZKUz1(32#?y+Pr4@$ZK%m=VwS8pp z#iNiTtHC3#T+fj?kAmB;g=4gR`**#8mIhp}1VS{QH)~m1F?(T?OK?EpG25FsqI6}> ze@%QSW2{gUS$62713#sMe3s=hbiK@_?SI*;wfG)atntVkQXFNv4(YZ$d4uhz2HHHd zICT=VGyCz1`HmsCO4scwe&^9v&w#FhO`^&tl1fK&c;Q$M_`qVnQm(=U$0C~7nR@b-B- zyE(u^D@sIHOn#dCB$DfAh`t`q%IEB=B_d5n9Y)Hz1Rs9%bDsRm)rW2D*E)8}jX9sw&$sJx)S_Ak&b5l}^eE)?8u*UZz4{Jg^)B1q zEf#+&2&Q%5itJr%|Labuqx%*=!I5g9~M zF<5)=Kr(FW54AWvk;{Zt%o>?PE!0QdUV}ZEMRd1l9k7F+Pbr-lSA7h0FN!&4s9N+I zr&%QJLUX+4vgTdvIk82!Mvew{r- zg7q4N??Pit#M)^tk>jXidGBYFpdldY9zyH(hG9p^l)v@x}`f?TL{YX`)L1Fu~#SmhruI-d{QS6JF`^ zl12mOU3b|ba^?Q&!g*g49WucBU){p|HxB4aE&}9j>KAyt&6@l32o+<^G!j#EYITmT zw<|}f5woYre*Dgy7}o`@!&>iLgS;=&QA;!4<$s)yPdP2>Yh?)Pf@ zNM*TnY50)HOOaQO@$@&A-CKja{&MSZ=#pD_kI6pEt_Ot%;#2p4_Lf;2(#tBKtyOsEh}g4q3T%2C%S(K_NMX8%42#MAH&hya+oL*-ao_ga zMb676Vd-R;n-M*2vN0@gPr<|plh4Yz!oh2HX<);8>%toqVOZ8)u>6pcG3-E{#`=9> zWTFH*_6phHlgy!DW{<<6k5^C}Tyom2R|2gN*WTm9bZt{0E9!3#deZKP^FbZzx9+We z`asmVI8C3LdkX#6lY&K!Ej@+$5svo3;>k~ZUp=0*xz2Zkjwq9#s@wk=!`{d=1?7Nt z&H+PjH2XKyY1OLndTDzNFO3by&Y#|0_famX-u#?AW4ariu7*Z($RGCpl=RR1ocZ4k zEx6Ol$p4cg9Lj^`x#x?|dlbLZ%6~a@h@GQMCt%c+(Q-m3xluqm$y)ewie(1Gjcd_j z3CIdeZ;(c4GHp5PS!DYINBY+C@1aLhOZqN(gT4rZIZ;-dh*r_XdyBX;ws>!Af=8uF z+<+`qC*SIr$TUN_x0YoD+ohQt8QsP6wy62Z^e!Nm)4cM?Veijhdta|tya`$Wcddtw zra4R0@$lJ$VG$OD6`bs|?BBMpjfcJ4If3(-&pRT{ zgk;vd=OsJ+Pd0cc-+bcOvH9Em@_$}%0R^ji?bff91Vky*)ly!BtsRMxH&8Ft|9Z_x zF`DnmF!V%du1@3{fUtbLa)6V`qXlznDX#irMZE$7v}M{DsN;YP=7Maw#n=riI*!sA z6(jN|mx9Z=y(!E>MBH#}0R73zY=b@T8L`C)a6m1?&n>u_^V~6VWo2?44o=LEtg~nt zp6uPg`S5LOK@XFI?xLbvFXSiRdN~S3vyD;<&>u7WPqXSX{n~w__V7ksd-S+PymJVt z0&`{owA5WX6HqYi{!q` zl~GU{Wg~BsPu=N1>eFg*Ut)31^kwgyM_^64b8-(wr}A#LKKI(_^76Sc&C6>+kVqqg#UDPT}0Li~U2oqbf4Wg5m`thO)vGPUJ+klY3pOen2TQR`G3 z9aL;}g@{qGNZmjQ5L=25&mOU3da?$AOpsCu`4CA>G!pP+DH@ut$%L|g1Y$&DkOGRh zziVKS`m6u9`3Gk_@Xq_Z&vSoV_kCX%6RVI*y_UZ|u|tWjqyep$TLF-AKJ<*)E-0#_bnke zE{7ynfh-IseRI5gieJk1G$wjXY*~AxmW7yV6$v@vW^w}K=AxBNF_pDfMoN}&(c(^G z=ok_nx1h2!H&E)RX$28}M<`#1|C>k$?|1ONuyi!u=T^qP21MFIMROlBFJbbrC&>-?mT3}37nl& zAeQr@=eEYz`<#};EWS`pbnI^}2re%C%f;G_O)RJ%2g_;_ksAGX(Zn^ca))-tQIefk z8B|jp7jS82{)w+odtSqhn&LPe(ce72Uuq@o*PD{ESO#&+$3dod3|2^&CDS$PK*F9< z;JQ*-Db>FZ*ol z>W;=7*OH1pCxk0^dP3f%J@bfI1)BH;c^XB7)Yk;GmMXj!>sWF1$ZWjJ0kl;nUb^uF zL$Zh5b26*|8%i|IR=~76LY!Cla}%5fpGgpXBT{z5%y!jYZWxQdG;^;qL9m*wm)^v< zxfS$uPK4GE!wGz=KPiC^b2M3NJs6iqqYn^UGtXoqoHmE)F4iH)X6W%p3RH}ZO8sOg ziBqN0>iCv*r%#;=r>$u$14&zRgWC_BBKjuWJ$wEf))}-_z_+y!4V?vW#l1> z(&~CTp|>n>l|bI%F+Z#_Rlr=khI)Js#cby>uZP0Nvi{Bohf0dsr*S<<6#<)=SS;u%(A33H#b|b%ObW7Ip^oJv}P9ZmxvvSvV_5y#fB~hNx5HPgn>S# zH_wOe+b&b9vMf2qIKEuX(PE=$${1))qu~%VnHk2djj@_hAI1?Xaw_V{PgrCWFt}PMdkXoD4P~ zox{6uTN?IQEW*y-i!6!Pe+rq;c-;5WP2_GB{0YnRA9Hq@@Hjtwg!i~47~V&$+A?jz z(PCO^lV8DWT-h7xZ#|N5F-CSJ@t2h$l6VZFm6cmbF$raw_-n_9@^Kx&1c#ZIwNsS~ z6xz^ol6&=S-7~@N9)ahrVE{nE-%XPFZT;oq%#XaQl<29Rv+i<2{dI059aleElBkJf z*Yl{*#I;mMKXryIC zML=>`#8104t`zWG?$<+&_%)gVP)$j|43u7DIVeJ+R3vJ`%C$=5i z@Dq769@;x;ztqF2X1JLZdBy|jxkvdvn|*BHPqdNRPcbCt z`jWYa7-l+f?Mv6r_xK}pILvY1K~Ch(@bF?fgWjiFaXhf>>UYwPW8!1@tlTw|K{{jL z6?HQ!#z6{r!@dD~3pWBA+&{*~_at$7L=fqU{63+Rf+>;vkN;(#2r1Gxyd;dPpf{w%`zWeO*LAQ=FB0k3S*5m3CKwRhTCFm~fa-YUTjNXhp!WOVM zm*4C1Qg2^&&$G^k&9f9P?Rto1zX73+#;2rxo-uM%gsn)qFqyzX0&Gh|v|%h6q`oRd zSVU;d`7xQ&+g<|!tK>H}yld7qY<^j)Kj z*N$na2+QVS#=5kzh*tv-GG}Y)8O10soYXa3RlaBwZhAL#=+d7zRbW|KlD*=G+-Q0F z#>{gEjm>xJmsJ;_+&t8*D3HMC6FP0&3i~=9ZDbxsF0OLIi7A#LhHDtuu^xazV@!^lS*=&vfC@CRsVheGY$@AiPz1efm6`CD zEAEP6GIm%{*?~8Ji$p@+Liz0pP_B_t$wtIx+_CauO*sqlmOg@&AnV==71R1UkJ_|v zK+my>U)=rFAAWoAn+sG`mC>Tif))=vH|^Mo3@VrPtfntda@IKyrzMeGsgkM1|Bx(Y z#GVLJNz-Z~gRlp#bnAV>O}G`pDAM&hv(dME)Xsdut6>iVyDe2^olnw0GJMC8OdoH6 z=|lmCeO<;qK?{=S+*G3^i=*eMO5jU7Bi0gki9>sMqtB0@JTx<9IR)8clAjJ+iypYW#Z>x{HWRwH~D; z?0&Ugx>(%1Ru+zti`^&9aNP_ZubQ@KeOC?g_LO?sU`G2m1lwN!hVFu9yh2)$-_q5e z@FykMp#`8S=_P3pk67B8x?d#&99lYQ$dh;ph^K3FJLrLAO2x6S!Y{vEufirB->MtW zNWZJ-(Dx$rCrXfjIXc?UYY(dMO(mbYr$=m@;}S;6NXIokl`=S+VKSuhPgS=PvdyPE z#%=Rmp;aQ9ZNv&^d-fC_m;P-TU9`Mj*eJ3pVIQCOSPIU_Jp5wMMY{h{et7clWF8&4#Boe>j@u2StCK{sVtaSwH7;K^s;I^SGH8#WX7^-&-It6= z6(aJxSLpnI%xUP;wqUGM7ErNN`vyDnNCALuwy0+uucbB52NAc<%3bOB#PQQs(eY70 z?g+o{%?-W!IV|QV;}g<8u7+w?W9iE58j$l`_3~l R8}*-NU;pO|pAG%@{{Wg+^oIZd diff --git a/docs/configuration/optimization.md b/docs/configuration/optimization.md index fdd9c317b022..556d9f8b9420 100644 --- a/docs/configuration/optimization.md +++ b/docs/configuration/optimization.md @@ -7,7 +7,7 @@ This guide covers optimization strategies and performance tuning for vLLM V1. ## Preemption -Due to the auto-regressive nature of transformer architecture, there are times when KV cache space is insufficient to handle all batched requests. +Due to the autoregressive nature of transformer architecture, there are times when KV cache space is insufficient to handle all batched requests. In such cases, vLLM can preempt requests to free up KV cache space for other requests. Preempted requests are recomputed when sufficient KV cache space becomes available again. When this occurs, you may see the following warning: diff --git a/docs/deployment/integrations/production-stack.md b/docs/deployment/integrations/production-stack.md index 2f1894ccf002..624e98a08c98 100644 --- a/docs/deployment/integrations/production-stack.md +++ b/docs/deployment/integrations/production-stack.md @@ -4,7 +4,7 @@ Deploying vLLM on Kubernetes is a scalable and efficient way to serve machine le * **Upstream vLLM compatibility** – It wraps around upstream vLLM without modifying its code. * **Ease of use** – Simplified deployment via Helm charts and observability through Grafana dashboards. -* **High performance** – Optimized for LLM workloads with features like multi-model support, model-aware and prefix-aware routing, fast vLLM bootstrapping, and KV cache offloading with [LMCache](https://github.com/LMCache/LMCache), among others. +* **High performance** – Optimized for LLM workloads with features like multimodel support, model-aware and prefix-aware routing, fast vLLM bootstrapping, and KV cache offloading with [LMCache](https://github.com/LMCache/LMCache), among others. If you are new to Kubernetes, don't worry: in the vLLM production stack [repo](https://github.com/vllm-project/production-stack), we provide a step-by-step [guide](https://github.com/vllm-project/production-stack/blob/main/tutorials/00-install-kubernetes-env.md) and a [short video](https://www.youtube.com/watch?v=EsTJbQtzj0g) to set up everything and get started in **4 minutes**! diff --git a/docs/design/cuda_graphs.md b/docs/design/cuda_graphs.md index 7baadf8ba23c..19c02fc88641 100644 --- a/docs/design/cuda_graphs.md +++ b/docs/design/cuda_graphs.md @@ -41,7 +41,7 @@ These features allow the most flexibility for cudagraph capture and compilation * `NONE` — turn CUDA Graphs off. Good for debugging. * `PIECEWISE` — a single-mode strategy (and past default). It is the most flexible: attention or other CUDA Graphs-incompatible operations stay eager, everything else goes into CUDA Graphs. Requires piecewise compilation. * `FULL` — a single-mode strategy, which only captures full CUDA Graphs for non-uniform batches, then uniform-decode batches reuse the CUDA Graph of non-uniform batch of the same batch_size, since they are compatible; can be good for small models or workloads with small prompts. -* `FULL_DECODE_ONLY` — full CUDA Graph for uniform decode, no cudagraph for prefill/mixed etc; suitable for decode instances in a P/D setup where prefill is not as important, this way we can save the memory needed for `PIECEWISE` CUDA Graphs. +* `FULL_DECODE_ONLY` — full CUDA Graph for uniform decode, no cudagraph for prefill/mixed etc.; suitable for decode instances in a P/D setup where prefill is not as important, this way we can save the memory needed for `PIECEWISE` CUDA Graphs. * `FULL_AND_PIECEWISE` — (default mode) full CUDA Graph for uniform decode, piecewise CUDA Graphs for others; generally the most performant setting, especially for low latency with small models or MoEs, but also requires the most memory and takes the longest to capture. Defaults: If you’re on v1 with piecewise compilation, we default to `FULL_AND_PIECEWISE` for better performance, (for pooling models, it's still `PIECEWISE`). Otherwise, e.g. if piecewise compilation unavailable, we default to `NONE`. @@ -49,7 +49,7 @@ Defaults: If you’re on v1 with piecewise compilation, we default to `FULL_AND_ While `NONE` , `PIECEWISE`, and `FULL` are single-mode configurations and simply equivalent to past implementations of eager execution, piecewise CUDA Graphs, and full CUDA Graphs respectively, `FULL_DECODE_ONLY` and `FULL_AND_PIECEWISE` are newly appended dual-mode configurations, which require dispatching to switch between concrete runtime modes according to runtime batches dynamically. !!! note - Here, the single-modes `NONE`, `PIECEWISE`, and `FULL` are treated as the runtime modes for CUDA Graphs dispatching. If using a dual-mode, the dispatcher will always dispatch to one of its member modes (plus a potantial `NONE` if no suitable CUDA Graph available), depending on the batch composition. + Here, the single-modes `NONE`, `PIECEWISE`, and `FULL` are treated as the runtime modes for CUDA Graphs dispatching. If using a dual-mode, the dispatcher will always dispatch to one of its member modes (plus a potential `NONE` if no suitable CUDA Graph available), depending on the batch composition. While cascade attention is not cudagraph compatible, it is now compatible with all possible cudagraph mode configurations. If a batch uses cascade attention, it always gets dispatched to `PIECEWISE` mode if available (otherwise `NONE`). diff --git a/docs/design/optimization_levels.md b/docs/design/optimization_levels.md index 940286071ef3..4987c1820ad3 100644 --- a/docs/design/optimization_levels.md +++ b/docs/design/optimization_levels.md @@ -4,7 +4,7 @@ ## Overview -vLLM now supports optimization levels (`-O0`, `-O1`, `-O2`, `-O3`). Optimization levels provide an intuitive mechnaism for users to trade startup time for performance. Higher levels have better performance but worse startup time. These optimization levels have associated defaults to help users get desired out of the box performance. Importantly, defaults set by optimization levels are purely defaults; explicit user settings will not be overwritten. +vLLM now supports optimization levels (`-O0`, `-O1`, `-O2`, `-O3`). Optimization levels provide an intuitive mechanism for users to trade startup time for performance. Higher levels have better performance but worse startup time. These optimization levels have associated defaults to help users get desired out-of-the-box performance. Importantly, defaults set by optimization levels are purely defaults; explicit user settings will not be overwritten. ## Level Summaries and Usage Examples ```bash diff --git a/docs/design/paged_attention.md b/docs/design/paged_attention.md index d87b2a639df1..5cc587842551 100644 --- a/docs/design/paged_attention.md +++ b/docs/design/paged_attention.md @@ -36,7 +36,7 @@ the input pointers `q`, `k_cache`, and `v_cache`, which point to query, key, and value data on global memory that need to be read and processed. The output pointer `out` points to global memory where the result should be written. These four pointers actually -refer to multi-dimensional arrays, but each thread only accesses the +refer to multidimensional arrays, but each thread only accesses the portion of data assigned to it. I have omitted all other runtime parameters here for simplicity. @@ -229,7 +229,7 @@ manner. ## QK -As shown the pseudo code below, before the entire for loop block, we +As shown the pseudocode below, before the entire for loop block, we fetch the query data for one token and store it in `q_vecs`. Then, in the outer for loop, we iterate through different `k_ptrs` that point to different tokens and prepare the `k_vecs` in the inner for @@ -403,7 +403,7 @@ for ... { // Iteration over different blocks. } ``` -As shown in the above pseudo code, in the outer loop, similar to +As shown in the above pseudocode, in the outer loop, similar to `k_ptr`, `logits_vec` iterates over different blocks and reads `V_VEC_SIZE` elements from `logits`. In the inner loop, each thread reads `V_VEC_SIZE` elements from the same tokens as a diff --git a/docs/features/tool_calling.md b/docs/features/tool_calling.md index c77fe4465979..70a11d6def56 100644 --- a/docs/features/tool_calling.md +++ b/docs/features/tool_calling.md @@ -420,7 +420,7 @@ Flags: `--tool-call-parser pythonic --chat-template {see_above}` ## How to Write a Tool Parser Plugin -A tool parser plugin is a Python file containing one or more ToolParser implementations. You can write a ToolParser similar to the `Hermes2ProToolParser` in [vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py](../../vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py). +A tool parser plugin is a Python file containing one or more ToolParser implementations. You can write a ToolParser similar to the `Hermes2ProToolParser` in [vllm/tool_parsers/hermes_tool_parser.py](../../vllm/tool_parsers/hermes_tool_parser.py). Here is a summary of a plugin file: @@ -468,7 +468,7 @@ Here is a summary of a plugin file: # register the tool parser to ToolParserManager ToolParserManager.register_lazy_module( name="example", - module_path="vllm.entrypoints.openai.tool_parsers.example", + module_path="vllm.tool_parsers.example", class_name="ExampleToolParser", ) diff --git a/docs/getting_started/installation/cpu.arm.inc.md b/docs/getting_started/installation/cpu.arm.inc.md index ad9c7d9ef21b..657bf2509db0 100644 --- a/docs/getting_started/installation/cpu.arm.inc.md +++ b/docs/getting_started/installation/cpu.arm.inc.md @@ -16,15 +16,15 @@ vLLM offers basic model inferencing and serving on Arm CPU platform, with suppor # --8<-- [start:pre-built-wheels] Pre-built vLLM wheels for Arm are available since version 0.11.2. These wheels contain pre-compiled C++ binaries. -Please replace `` in the commands below with a specific version string (e.g., `0.11.2`). ```bash -uv pip install --pre vllm==+cpu --extra-index-url https://wheels.vllm.ai/%2Bcpu/ +export VLLM_VERSION=$(curl -s https://api.github.com/repos/vllm-project/vllm/releases/latest | jq -r .tag_name | sed 's/^v//') +uv pip install vllm --extra-index-url https://wheels.vllm.ai/${VLLM_VERSION}/cpu ``` ??? console "pip" ```bash - pip install --pre vllm==+cpu --extra-index-url https://wheels.vllm.ai/%2Bcpu/ + pip install vllm==${VLLM_VERSION}+cpu --extra-index-url https://wheels.vllm.ai/${VLLM_VERSION}/cpu ``` The `uv` approach works for vLLM `v0.6.6` and later. A unique feature of `uv` is that packages in `--extra-index-url` have [higher priority than the default index](https://docs.astral.sh/uv/pip/compatibility/#packages-that-exist-on-multiple-indexes). If the latest public release is `v0.6.6.post1`, `uv`'s behavior allows installing a commit before `v0.6.6.post1` by specifying the `--extra-index-url`. In contrast, `pip` combines packages from `--extra-index-url` and the default index, choosing only the latest version, which makes it difficult to install a development version prior to the released version. @@ -35,20 +35,28 @@ LLM inference is a fast-evolving field, and the latest code may contain bug fixe * `https://wheels.vllm.ai/nightly/cpu/vllm` -To install from nightly index, copy the link address of the `*.whl` under this index to run, for example: - +To install from nightly index, run: ```bash -uv pip install -U https://wheels.vllm.ai/c756fb678184b867ed94e5613a529198f1aee423/vllm-0.13.0rc2.dev11%2Bgc756fb678.cpu-cp38-abi3-manylinux_2_31_aarch64.whl # current nightly build (the filename will change!) +uv pip install vllm --extra-index-url https://wheels.vllm.ai/nightly/cpu ``` +??? console "pip (there's a caveat)" + + Using `pip` to install from nightly indices is _not supported_, because `pip` combines packages from `--extra-index-url` and the default index, choosing only the latest version, which makes it difficult to install a development version prior to the released version. In contrast, `uv` gives the extra index [higher priority than the default index](https://docs.astral.sh/uv/pip/compatibility/#packages-that-exist-on-multiple-indexes). + + If you insist on using `pip`, you have to specify the full URL (link address) of the wheel file (which can be obtained from https://wheels.vllm.ai/nightly/cpu/vllm). + + ```bash + pip install https://wheels.vllm.ai/4fa7ce46f31cbd97b4651694caf9991cc395a259/vllm-0.13.0rc2.dev104%2Bg4fa7ce46f.cpu-cp38-abi3-manylinux_2_35_aarch64.whl # current nightly build (the filename will change!) + ``` + **Install specific revisions** -If you want to access the wheels for previous commits (e.g. to bisect the behavior change, performance regression), specify the full commit hash in the index: -https://wheels.vllm.ai/${VLLM_COMMIT}/cpu/vllm . -Then, copy the link address of the `*.whl` under this index to run: +If you want to access the wheels for previous commits (e.g. to bisect the behavior change, performance regression), you can specify the commit hash in the URL: ```bash -uv pip install -U +export VLLM_COMMIT=730bd35378bf2a5b56b6d3a45be28b3092d26519 # use full commit hash from the main branch +uv pip install vllm --extra-index-url https://wheels.vllm.ai/${VLLM_COMMIT}/cpu ``` # --8<-- [end:pre-built-wheels] @@ -103,10 +111,10 @@ Testing has been conducted on AWS Graviton3 instances for compatibility. See [Using Docker](../../deployment/docker.md) for instructions on using the official Docker image. Stable vLLM Docker images are being pre-built for Arm from version 0.12.0. Available image tags are here: [https://gallery.ecr.aws/q9t5s3a7/vllm-arm64-cpu-release-repo](https://gallery.ecr.aws/q9t5s3a7/vllm-arm64-cpu-release-repo). -Please replace `` in the command below with a specific version string (e.g., `0.12.0`). ```bash -docker pull public.ecr.aws/q9t5s3a7/vllm-arm64-cpu-release-repo:v +export VLLM_VERSION=$(curl -s https://api.github.com/repos/vllm-project/vllm/releases/latest | jq -r .tag_name | sed 's/^v//') +docker pull public.ecr.aws/q9t5s3a7/vllm-arm64-cpu-release-repo:v${VLLM_VERSION} ``` You can also access the latest code with Docker images. These are not intended for production use and are meant for CI and testing only. They will expire after several days. diff --git a/docs/models/supported_models.md b/docs/models/supported_models.md index 586d5d91634d..9ba0f4ca9096 100644 --- a/docs/models/supported_models.md +++ b/docs/models/supported_models.md @@ -659,7 +659,9 @@ These models primarily accept the [`LLM.generate`](./generative_models.md#llmgen | Architecture | Models | Inputs | Example HF Models | [LoRA](../features/lora.md) | [PP](../serving/parallelism_scaling.md) | |--------------|--------|--------|-------------------|----------------------|---------------------------| | `AriaForConditionalGeneration` | Aria | T + I+ | `rhymes-ai/Aria` | | | +| `AudioFlamingo3ForConditionalGeneration` | AudioFlamingo3 | T + A+ | `nvidia/audio-flamingo-3-hf`, `nvidia/music-flamingo-hf` | ✅︎ | ✅︎ | | `AyaVisionForConditionalGeneration` | Aya Vision | T + I+ | `CohereLabs/aya-vision-8b`, `CohereLabs/aya-vision-32b`, etc. | | ✅︎ | +| `BagelForConditionalGeneration` | BAGEL | T + I+ | `ByteDance-Seed/BAGEL-7B-MoT` | ✅︎ | ✅︎ | | `BeeForConditionalGeneration` | Bee-8B | T + IE+ | `Open-Bee/Bee-8B-RL`, `Open-Bee/Bee-8B-SFT` | | ✅︎ | | `Blip2ForConditionalGeneration` | BLIP-2 | T + IE | `Salesforce/blip2-opt-2.7b`, `Salesforce/blip2-opt-6.7b`, etc. | | ✅︎ | | `ChameleonForConditionalGeneration` | Chameleon | T + I | `facebook/chameleon-7b`, etc. | | ✅︎ | @@ -743,7 +745,7 @@ Some models are supported only via the [Transformers modeling backend](#transfor - There's no PLE caching or out-of-memory swapping support, as described in [Google's blog](https://developers.googleblog.com/en/introducing-gemma-3n/). These features might be too model-specific for vLLM, and swapping in particular may be better suited for constrained setups. !!! note - For `InternVLChatModel`, only InternVL2.5 with Qwen2.5 text backbone (`OpenGVLab/InternVL2.5-1B` etc), InternVL3 and InternVL3.5 have video inputs support currently. + For `InternVLChatModel`, only InternVL2.5 with Qwen2.5 text backbone (`OpenGVLab/InternVL2.5-1B` etc.), InternVL3 and InternVL3.5 have video inputs support currently. !!! note To use `TIGER-Lab/Mantis-8B-siglip-llama3`, you have to pass `--hf_overrides '{"architectures": ["MantisForConditionalGeneration"]}'` when running vLLM. diff --git a/docs/serving/parallelism_scaling.md b/docs/serving/parallelism_scaling.md index 339a5b814021..ed93432701f3 100644 --- a/docs/serving/parallelism_scaling.md +++ b/docs/serving/parallelism_scaling.md @@ -154,7 +154,7 @@ vllm serve /path/to/the/model/in/the/container \ ## Optimizing network communication for tensor parallelism -Efficient tensor parallelism requires fast inter-node communication, preferably through high-speed network adapters such as InfiniBand. +Efficient tensor parallelism requires fast internode communication, preferably through high-speed network adapters such as InfiniBand. To set up the cluster to use InfiniBand, append additional arguments like `--privileged -e NCCL_IB_HCA=mlx5` to the [examples/online_serving/run_cluster.sh](../../examples/online_serving/run_cluster.sh) helper script. Contact your system administrator for more information about the required flags. diff --git a/docs/usage/security.md b/docs/usage/security.md index 74060d86f685..e619eec660ae 100644 --- a/docs/usage/security.md +++ b/docs/usage/security.md @@ -10,7 +10,7 @@ All communications between nodes in a multi-node vLLM deployment are **insecure ### Configuration Options for Inter-Node Communications -The following options control inter-node communications in vLLM: +The following options control internode communications in vLLM: #### 1. **Environment Variables:** @@ -28,7 +28,7 @@ The following options control inter-node communications in vLLM: ### Notes on PyTorch Distributed -vLLM uses PyTorch's distributed features for some inter-node communication. For +vLLM uses PyTorch's distributed features for some internode communication. For detailed information about PyTorch Distributed security considerations, please refer to the [PyTorch Security Guide](https://github.com/pytorch/pytorch/security/policy#using-distributed-features). diff --git a/examples/offline_inference/audio_language.py b/examples/offline_inference/audio_language.py index 40462c78ae8c..a6d0c5d12dd4 100755 --- a/examples/offline_inference/audio_language.py +++ b/examples/offline_inference/audio_language.py @@ -42,60 +42,31 @@ class ModelRequestData(NamedTuple): # Unless specified, these settings have been tested to work on a single L4. -# Voxtral -# Make sure to install mistral-common[audio]. -def run_voxtral(question: str, audio_count: int) -> ModelRequestData: - from mistral_common.audio import Audio - from mistral_common.protocol.instruct.chunk import ( - AudioChunk, - RawAudio, - TextChunk, - ) - from mistral_common.protocol.instruct.messages import ( - UserMessage, - ) - from mistral_common.protocol.instruct.request import ChatCompletionRequest - from mistral_common.tokens.tokenizers.mistral import MistralTokenizer - - model_name = "mistralai/Voxtral-Mini-3B-2507" - tokenizer = MistralTokenizer.from_hf_hub(model_name) - +# AudioFlamingo3 +def run_audioflamingo3(question: str, audio_count: int) -> ModelRequestData: + model_name = "nvidia/audio-flamingo-3-hf" engine_args = EngineArgs( model=model_name, - max_model_len=8192, + max_model_len=4096, max_num_seqs=2, limit_mm_per_prompt={"audio": audio_count}, - config_format="mistral", - load_format="mistral", - tokenizer_mode="mistral", enforce_eager=True, - enable_chunked_prefill=False, ) - text_chunk = TextChunk(text=question) - audios = [ - Audio.from_file(str(audio_assets[i].get_local_path()), strict=False) - for i in range(audio_count) - ] - audio_chunks = [ - AudioChunk(input_audio=RawAudio.from_audio(audio)) for audio in audios - ] - - messages = [UserMessage(content=[*audio_chunks, text_chunk])] - - req = ChatCompletionRequest(messages=messages, model=model_name) - - tokens = tokenizer.encode_chat_completion(req) - prompt_ids, audios = tokens.tokens, tokens.audios - - audios_and_sr = [(au.audio_array, au.sampling_rate) for au in audios] + # AudioFlamingo3 uses token for audio + audio_placeholder = "" * audio_count - multi_modal_data = {"audio": audios_and_sr} + prompt = ( + "<|im_start|>system\n" + "You are a helpful assistant.<|im_end|>\n" + "<|im_start|>user\n" + f"{audio_placeholder}{question}<|im_end|>\n" + "<|im_start|>assistant\n" + ) return ModelRequestData( engine_args=engine_args, - prompt_token_ids=prompt_ids, - multi_modal_data=multi_modal_data, + prompt=prompt, ) @@ -361,6 +332,63 @@ def run_ultravox(question: str, audio_count: int) -> ModelRequestData: ) +# Voxtral +# Make sure to install mistral-common[audio]. +def run_voxtral(question: str, audio_count: int) -> ModelRequestData: + from mistral_common.audio import Audio + from mistral_common.protocol.instruct.chunk import ( + AudioChunk, + RawAudio, + TextChunk, + ) + from mistral_common.protocol.instruct.messages import ( + UserMessage, + ) + from mistral_common.protocol.instruct.request import ChatCompletionRequest + from mistral_common.tokens.tokenizers.mistral import MistralTokenizer + + model_name = "mistralai/Voxtral-Mini-3B-2507" + tokenizer = MistralTokenizer.from_hf_hub(model_name) + + engine_args = EngineArgs( + model=model_name, + max_model_len=8192, + max_num_seqs=2, + limit_mm_per_prompt={"audio": audio_count}, + config_format="mistral", + load_format="mistral", + tokenizer_mode="mistral", + enforce_eager=True, + enable_chunked_prefill=False, + ) + + text_chunk = TextChunk(text=question) + audios = [ + Audio.from_file(str(audio_assets[i].get_local_path()), strict=False) + for i in range(audio_count) + ] + audio_chunks = [ + AudioChunk(input_audio=RawAudio.from_audio(audio)) for audio in audios + ] + + messages = [UserMessage(content=[*audio_chunks, text_chunk])] + + req = ChatCompletionRequest(messages=messages, model=model_name) + + tokens = tokenizer.encode_chat_completion(req) + prompt_ids, audios = tokens.tokens, tokens.audios + + audios_and_sr = [(au.audio_array, au.sampling_rate) for au in audios] + + multi_modal_data = {"audio": audios_and_sr} + + return ModelRequestData( + engine_args=engine_args, + prompt_token_ids=prompt_ids, + multi_modal_data=multi_modal_data, + ) + + # Whisper def run_whisper(question: str, audio_count: int) -> ModelRequestData: assert audio_count == 1, "Whisper only support single audio input per prompt" @@ -382,7 +410,7 @@ def run_whisper(question: str, audio_count: int) -> ModelRequestData: model_example_map = { - "voxtral": run_voxtral, + "audioflamingo3": run_audioflamingo3, "gemma3n": run_gemma3n, "granite_speech": run_granite_speech, "midashenglm": run_midashenglm, @@ -392,6 +420,7 @@ def run_whisper(question: str, audio_count: int) -> ModelRequestData: "qwen2_audio": run_qwen2_audio, "qwen2_5_omni": run_qwen2_5_omni, "ultravox": run_ultravox, + "voxtral": run_voxtral, "whisper": run_whisper, } diff --git a/examples/offline_inference/vision_language.py b/examples/offline_inference/vision_language.py index 9142279140e5..dd5b22ae9b0f 100755 --- a/examples/offline_inference/vision_language.py +++ b/examples/offline_inference/vision_language.py @@ -118,6 +118,32 @@ def run_bee(questions: list[str], modality: str) -> ModelRequestData: ) +def run_bagel(questions: list[str], modality: str) -> ModelRequestData: + assert modality == "image" + model_name = "ByteDance-Seed/BAGEL-7B-MoT" + + engine_args = EngineArgs( + model=model_name, + trust_remote_code=True, + max_model_len=8192, + max_num_seqs=2, + limit_mm_per_prompt={modality: 1}, + ) + + prompts = [ + ( + f"<|im_start|>user\n<|image_pad|>\n{question}<|im_end|>\n" + f"<|im_start|>assistant\n" + ) + for question in questions + ] + + return ModelRequestData( + engine_args=engine_args, + prompts=prompts, + ) + + # BLIP-2 def run_blip2(questions: list[str], modality: str) -> ModelRequestData: assert modality == "image" @@ -1832,6 +1858,7 @@ def run_tarsier2(questions: list[str], modality: str) -> ModelRequestData: model_example_map = { "aria": run_aria, "aya_vision": run_aya_vision, + "bagel": run_bagel, "bee": run_bee, "blip-2": run_blip2, "chameleon": run_chameleon, diff --git a/examples/online_serving/structured_outputs/structured_outputs.py b/examples/online_serving/structured_outputs/structured_outputs.py index ff473d044e32..2599c951ef8a 100644 --- a/examples/online_serving/structured_outputs/structured_outputs.py +++ b/examples/online_serving/structured_outputs/structured_outputs.py @@ -112,7 +112,7 @@ class CarDescription(pydantic.BaseModel): "messages": [ { "role": "user", - "content": "Generate an SQL query to show the 'username' and 'email'from the 'users' table.", + "content": "Generate an SQL query to show the 'username' and 'email' from the 'users' table.", } ], "extra_body": { diff --git a/tests/benchmarks/test_param_sweep.py b/tests/benchmarks/test_param_sweep.py index 0d47cfd9d623..467797d9915c 100644 --- a/tests/benchmarks/test_param_sweep.py +++ b/tests/benchmarks/test_param_sweep.py @@ -23,14 +23,6 @@ class TestParameterSweepItem: {"compilation_config.use_inductor_graph_partition": True}, "--compilation-config.use_inductor_graph_partition=true", ), - ( - {"compilation_config.use_inductor": False}, - "--compilation-config.use_inductor=false", - ), - ( - {"compilation_config.use_inductor": True}, - "--compilation-config.use_inductor=true", - ), ], ) def test_nested_boolean_params(self, input_dict, expected): diff --git a/tests/compile/distributed/test_fusions_e2e.py b/tests/compile/distributed/test_fusions_e2e.py index 1fcafe1840cd..bd326f1157d8 100644 --- a/tests/compile/distributed/test_fusions_e2e.py +++ b/tests/compile/distributed/test_fusions_e2e.py @@ -27,6 +27,7 @@ class Matches(NamedTuple): attention_fusion: int = 0 allreduce_fusion: int = 0 + rms_quant_norm_fusion: int = 0 sequence_parallel: int = 0 async_tp: int = 0 @@ -40,6 +41,7 @@ class ModelBackendTestCase(NamedTuple): MODELS_FP8: list[ModelBackendTestCase] = [] MODELS_FP4: list[ModelBackendTestCase] = [] +MODELS_GROUP_FP8: list[ModelBackendTestCase] = [] MODELS: list[ModelBackendTestCase] = [] # tp-only if current_platform.is_cuda(): @@ -498,3 +500,79 @@ def run_model(compile_config: int | CompilationConfig, model: str, **model_kwarg compilation_config.compile_ranges_split_points = ( llm.llm_engine.vllm_config.compilation_config.compile_ranges_split_points ) + + +if current_platform.is_cuda(): + MODELS_GROUP_FP8 = [ + ModelBackendTestCase( + model_name="Qwen/Qwen3-30B-A3B-FP8", + model_kwargs=dict(max_model_len=1024, kv_cache_dtype="fp8"), + backend=AttentionBackendEnum.TRITON_ATTN, + matches=Matches( + rms_quant_norm_fusion=48, + ), + ), + ] + +CUSTOM_OPS_QUANT_RMS_NORM = ["+quant_fp8,+rms_norm"] + + +@pytest.mark.parametrize( + "model_name, model_kwargs, backend, matches, custom_ops", + # Test rms norm+group quant_fp8 fusion + list[tuple[Any, ...]](flat_product(MODELS_GROUP_FP8, CUSTOM_OPS_QUANT_RMS_NORM)), +) +@pytest.mark.parametrize("inductor_graph_partition", [True, False]) +def test_rms_group_quant( + model_name: str, + model_kwargs: dict[str, Any], + backend: AttentionBackendEnum, + matches: Matches, + custom_ops: str, + inductor_graph_partition: bool, + caplog_mp_spawn, + monkeypatch, +): + if inductor_graph_partition and not is_torch_equal_or_newer("2.9.0.dev"): + pytest.skip("Inductor graph partition requires torch>=2.9") + + custom_ops_list = custom_ops.split(",") if custom_ops else [] + + if inductor_graph_partition: + mode = CUDAGraphMode.FULL_AND_PIECEWISE + splitting_ops: list[str] | None = None + else: + mode = CUDAGraphMode.FULL_DECODE_ONLY + splitting_ops = [] + + # Disable, compile cache to make sure custom passes run. + # Otherwise, we can't verify fusion happened through the logs. + monkeypatch.setenv("VLLM_DISABLE_COMPILE_CACHE", "1") + + # To capture subprocess logs, we need to know whether spawn or fork is used. + # Force spawn as it is more general. + monkeypatch.setenv("VLLM_WORKER_MULTIPROC_METHOD", "spawn") + monkeypatch.setenv("VLLM_ATTENTION_BACKEND", backend.name) + + compilation_config = CompilationConfig( + # Testing properties + custom_ops=custom_ops_list, + use_inductor_graph_partition=inductor_graph_partition, + cudagraph_mode=mode, + splitting_ops=splitting_ops, + # Common + mode=CompilationMode.VLLM_COMPILE, + pass_config=PassConfig(eliminate_noops=True, enable_fusion=True), + # Inductor caches custom passes by default as well via uuid + inductor_compile_config={"force_disable_caches": True}, + ) + + with caplog_mp_spawn(logging.DEBUG) as log_holder: + run_model(compilation_config, model_name, **model_kwargs) + + log_matches = re.findall( + r"\[fusion.py:\d+] Replaced (\d+) patterns", + log_holder.text, + ) + assert len(log_matches) == 1, log_holder.text + assert int(log_matches[0]) == matches.rms_quant_norm_fusion diff --git a/tests/compile/test_dynamic_shapes_compilation.py b/tests/compile/test_dynamic_shapes_compilation.py index bc3dbf553331..9ccb363b088f 100644 --- a/tests/compile/test_dynamic_shapes_compilation.py +++ b/tests/compile/test_dynamic_shapes_compilation.py @@ -36,7 +36,7 @@ def get_test_models(): DynamicShapesType.BACKED_SIZE_OBLIVIOUS, ], ) -@pytest.mark.parametrize("use_aot_compile", ["0"]) +@pytest.mark.parametrize("use_aot_compile", ["0", "1"]) @pytest.mark.parametrize("use_bytecode_hook", [True, False]) @pytest.mark.parametrize("evaluate_guards", [False, True]) @pytest.mark.skipif( @@ -54,6 +54,12 @@ def test_dynamic_shapes_compilation( if use_bytecode_hook and shapes_type == DynamicShapesType.UNBACKED: pytest.skip("UNBACKED dynamic shapes require VLLM_USE_BYTECODE_HOOK=0") + if evaluate_guards and shapes_type == DynamicShapesType.UNBACKED: + pytest.skip("unbacked dynamic shapes do not add guards") + + if evaluate_guards and use_aot_compile: + pytest.skip("evaluate_guards requires use_aot_compile=0") + monkeypatch.setenv("VLLM_USE_AOT_COMPILE", use_aot_compile) monkeypatch.setenv("VLLM_USE_BYTECODE_HOOK", "1" if use_bytecode_hook else "0") @@ -120,7 +126,7 @@ def test_model_specialization_with_evaluate_guards( and dynamic_shapes_type == DynamicShapesType.BACKED and evaluate_guards ): - pytest.skip("evaluate_guards for backed does not work with aot_compile =1") + pytest.skip("evaluate_guards for backed does not work with aot_compile=1") @support_torch_compile class ModelWithSizeCheck(torch.nn.Module): diff --git a/tests/entrypoints/openai/test_chat_error.py b/tests/entrypoints/openai/test_chat_error.py index 102eeaf61441..b194e9b74d87 100644 --- a/tests/entrypoints/openai/test_chat_error.py +++ b/tests/entrypoints/openai/test_chat_error.py @@ -80,10 +80,9 @@ async def _fake_process_inputs( return dict(engine_prompt), {} async def _fake_preprocess_chat(*args, **kwargs): - # return conversation, request_prompts, engine_prompts + # return conversation, engine_prompts return ( [{"role": "user", "content": "Test"}], - [[1, 2, 3]], [{"prompt_token_ids": [1, 2, 3]}], ) diff --git a/tests/entrypoints/openai/test_response_api_parsable_context.py b/tests/entrypoints/openai/test_response_api_parsable_context.py index 1899c5f04fe3..6d97602f3247 100644 --- a/tests/entrypoints/openai/test_response_api_parsable_context.py +++ b/tests/entrypoints/openai/test_response_api_parsable_context.py @@ -165,6 +165,7 @@ async def test_mcp_tool_call(client: OpenAI, model_name: str): model=model_name, input="What is 13 * 24? Use python to calculate the result.", tools=[{"type": "code_interpreter", "container": {"type": "auto"}}], + extra_body={"enable_response_messages": True}, temperature=0.0, ) @@ -178,3 +179,8 @@ async def test_mcp_tool_call(client: OpenAI, model_name: str): # make sure the correct math is in the final output assert response.output[3].type == "message" assert "312" in response.output[3].content[0].text + + # test raw input_messages / output_messages + assert len(response.input_messages) == 1 + assert len(response.output_messages) == 3 + assert "312" in response.output_messages[2]["message"] diff --git a/tests/entrypoints/openai/test_response_api_simple.py b/tests/entrypoints/openai/test_response_api_simple.py index aee03199bc6f..02e06297f398 100644 --- a/tests/entrypoints/openai/test_response_api_simple.py +++ b/tests/entrypoints/openai/test_response_api_simple.py @@ -87,3 +87,48 @@ async def test_reasoning_item(client: OpenAI, model_name: str): assert response.output[0].type == "reasoning" assert response.output[1].type == "message" assert type(response.output[1].content[0].text) is str + + +@pytest.mark.asyncio +@pytest.mark.parametrize("model_name", [MODEL_NAME]) +async def test_streaming_output_consistency(client: OpenAI, model_name: str): + """Test that streaming delta text matches the final response output_text. + + This test verifies that when using streaming mode: + 1. The concatenated text from all 'response.output_text.delta' events + 2. Matches the 'output_text' in the final 'response.completed' event + """ + response = await client.responses.create( + model=model_name, + input="Say hello in one sentence.", + stream=True, + ) + + events = [] + async for event in response: + events.append(event) + + assert len(events) > 0 + + # Concatenate all delta text from streaming events + streaming_text = "".join( + event.delta for event in events if event.type == "response.output_text.delta" + ) + + # Get the final response from the last event + response_completed_event = events[-1] + assert response_completed_event.type == "response.completed" + assert response_completed_event.response.status == "completed" + + # Get output_text from the final response + final_output_text = response_completed_event.response.output_text + + # Verify final response has output + assert len(response_completed_event.response.output) > 0 + + # Verify streaming text matches final output_text + assert streaming_text == final_output_text, ( + f"Streaming text does not match final output_text.\n" + f"Streaming: {streaming_text!r}\n" + f"Final: {final_output_text!r}" + ) diff --git a/tests/entrypoints/openai/test_serving_chat.py b/tests/entrypoints/openai/test_serving_chat.py index 5a9293f1b9ae..444275e061c6 100644 --- a/tests/entrypoints/openai/test_serving_chat.py +++ b/tests/entrypoints/openai/test_serving_chat.py @@ -19,9 +19,9 @@ ) from vllm.entrypoints.openai.serving_chat import OpenAIServingChat from vllm.entrypoints.openai.serving_models import BaseModelPath, OpenAIServingModels -from vllm.entrypoints.openai.tool_parsers import ToolParserManager from vllm.outputs import CompletionOutput, RequestOutput from vllm.tokenizers import get_tokenizer +from vllm.tool_parsers import ToolParserManager from vllm.v1.engine.async_llm import AsyncLLM from ...utils import RemoteOpenAIServer @@ -877,7 +877,7 @@ async def test_simple_chat(self, serving_chat, stream): # Test the Harmony messages for the first turn's input req = ChatCompletionRequest(model=MODEL_NAME, messages=messages) - input_messages, _, _ = serving_chat._make_request_with_harmony(req) + input_messages, _ = serving_chat._make_request_with_harmony(req) verify_harmony_messages( input_messages, [ @@ -905,7 +905,7 @@ async def test_simple_chat(self, serving_chat, stream): # Test the Harmony messages for the second turn's input req_2 = ChatCompletionRequest(model=MODEL_NAME, messages=messages) - input_messages_2, _, _ = serving_chat._make_request_with_harmony(req_2) + input_messages_2, _ = serving_chat._make_request_with_harmony(req_2) verify_harmony_messages( input_messages_2, [ @@ -927,7 +927,7 @@ async def test_tool_call_response_with_content( # Test the Harmony messages for the first turn's input req = ChatCompletionRequest(model=MODEL_NAME, messages=messages, tools=tools) - input_messages, _, _ = serving_chat._make_request_with_harmony(req) + input_messages, _ = serving_chat._make_request_with_harmony(req) verify_harmony_messages( input_messages, [ @@ -971,7 +971,7 @@ async def test_tool_call_response_with_content( # Test the Harmony messages for the second turn's input req_2 = ChatCompletionRequest(model=MODEL_NAME, messages=messages) - input_messages_2, _, _ = serving_chat._make_request_with_harmony(req_2) + input_messages_2, _ = serving_chat._make_request_with_harmony(req_2) verify_harmony_messages( input_messages_2, [ @@ -1008,7 +1008,7 @@ async def test_tools_and_reasoning( # Test the Harmony messages for the first turn's input req = ChatCompletionRequest(model=MODEL_NAME, messages=messages, tools=tools) - input_messages, _, _ = serving_chat._make_request_with_harmony(req) + input_messages, _ = serving_chat._make_request_with_harmony(req) verify_harmony_messages( input_messages, [ @@ -1052,7 +1052,7 @@ async def test_tools_and_reasoning( # Test the Harmony messages for the second turn's input req_2 = ChatCompletionRequest(model=MODEL_NAME, messages=messages) - input_messages_2, _, _ = serving_chat._make_request_with_harmony(req_2) + input_messages_2, _ = serving_chat._make_request_with_harmony(req_2) verify_harmony_messages( input_messages_2, [ @@ -1089,7 +1089,7 @@ async def test_multi_turn_tools_and_reasoning( # Test the Harmony messages for the first turn's input req = ChatCompletionRequest(model=MODEL_NAME, messages=messages, tools=tools) - input_messages, _, _ = serving_chat._make_request_with_harmony(req) + input_messages, _ = serving_chat._make_request_with_harmony(req) verify_harmony_messages( input_messages, [ @@ -1133,7 +1133,7 @@ async def test_multi_turn_tools_and_reasoning( # Test the Harmony messages for the second turn's input req_2 = ChatCompletionRequest(model=MODEL_NAME, messages=messages) - input_messages_2, _, _ = serving_chat._make_request_with_harmony(req_2) + input_messages_2, _ = serving_chat._make_request_with_harmony(req_2) verify_harmony_messages( input_messages_2, [ @@ -1183,7 +1183,7 @@ async def test_multi_turn_tools_and_reasoning( # Test the Harmony messages for the third turn's input req_3 = ChatCompletionRequest(model=MODEL_NAME, messages=messages) - input_messages_3, _, _ = serving_chat._make_request_with_harmony(req_3) + input_messages_3, _ = serving_chat._make_request_with_harmony(req_3) verify_harmony_messages( input_messages_3, [ @@ -1246,7 +1246,7 @@ async def test_multi_turn_tools_and_reasoning( # Test the Harmony messages for the fourth turn's input req_4 = ChatCompletionRequest(model=MODEL_NAME, messages=messages) - input_messages_4, _, _ = serving_chat._make_request_with_harmony(req_4) + input_messages_4, _ = serving_chat._make_request_with_harmony(req_4) verify_harmony_messages( input_messages_4, [ @@ -1295,7 +1295,7 @@ async def test_non_tool_reasoning(self, serving_chat): }, ] req = ChatCompletionRequest(model=MODEL_NAME, messages=messages) - input_messages, _, _ = serving_chat._make_request_with_harmony(req) + input_messages, _ = serving_chat._make_request_with_harmony(req) verify_harmony_messages( input_messages, @@ -1327,7 +1327,7 @@ async def test_non_tool_reasoning_empty_content(self, serving_chat): }, ] req = ChatCompletionRequest(model=MODEL_NAME, messages=messages) - input_messages, _, _ = serving_chat._make_request_with_harmony(req) + input_messages, _ = serving_chat._make_request_with_harmony(req) verify_harmony_messages( input_messages, @@ -1357,7 +1357,7 @@ async def test_non_tool_reasoning_empty_content_list(self, serving_chat): }, ] req = ChatCompletionRequest(model=MODEL_NAME, messages=messages) - input_messages, _, _ = serving_chat._make_request_with_harmony(req) + input_messages, _ = serving_chat._make_request_with_harmony(req) verify_harmony_messages( input_messages, diff --git a/tests/entrypoints/openai/test_serving_responses.py b/tests/entrypoints/openai/test_serving_responses.py index cf00f0a04224..7d03dccec30d 100644 --- a/tests/entrypoints/openai/test_serving_responses.py +++ b/tests/entrypoints/openai/test_serving_responses.py @@ -21,7 +21,7 @@ extract_tool_types, ) from vllm.entrypoints.tool_server import ToolServer -from vllm.inputs.data import TokensPrompt as EngineTokensPrompt +from vllm.inputs.data import TokensPrompt class MockConversationContext(ConversationContext): @@ -237,7 +237,7 @@ def test_validate_generator_input(self, serving_responses_instance): """Test _validate_generator_input with valid prompt length""" # Create an engine prompt with valid length (less than max_model_len) valid_prompt_token_ids = list(range(5)) # 5 tokens < 100 max_model_len - engine_prompt = EngineTokensPrompt(prompt_token_ids=valid_prompt_token_ids) + engine_prompt = TokensPrompt(prompt_token_ids=valid_prompt_token_ids) # Call the method result = serving_responses_instance._validate_generator_input(engine_prompt) @@ -247,7 +247,7 @@ def test_validate_generator_input(self, serving_responses_instance): # create an invalid engine prompt invalid_prompt_token_ids = list(range(200)) # 100 tokens >= 100 max_model_len - engine_prompt = EngineTokensPrompt(prompt_token_ids=invalid_prompt_token_ids) + engine_prompt = TokensPrompt(prompt_token_ids=invalid_prompt_token_ids) # Call the method result = serving_responses_instance._validate_generator_input(engine_prompt) diff --git a/tests/entrypoints/openai/test_sparse_tensor_validation.py b/tests/entrypoints/openai/test_sparse_tensor_validation.py new file mode 100644 index 000000000000..907c82b57dea --- /dev/null +++ b/tests/entrypoints/openai/test_sparse_tensor_validation.py @@ -0,0 +1,342 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +""" +Sparse tensor validation in embedding APIs. + +Tests verify that malicious sparse tensors are rejected before they can trigger +out-of-bounds memory writes during to_dense() operations. +""" + +import base64 +import io + +import pytest +import torch + +from vllm.entrypoints.renderer import CompletionRenderer +from vllm.multimodal.audio import AudioEmbeddingMediaIO +from vllm.multimodal.image import ImageEmbeddingMediaIO + + +def _encode_tensor(tensor: torch.Tensor) -> bytes: + """Helper to encode a tensor as base64 bytes.""" + buffer = io.BytesIO() + torch.save(tensor, buffer) + buffer.seek(0) + return base64.b64encode(buffer.read()) + + +def _create_malicious_sparse_tensor() -> torch.Tensor: + """ + Create a malicious sparse COO tensor with out-of-bounds indices. + + This tensor has indices that point beyond the declared shape, which would + cause an out-of-bounds write when converted to dense format without + validation. + """ + # Create a 3x3 sparse tensor but with indices pointing to (10, 10) + indices = torch.tensor([[10], [10]]) # Out of bounds for 3x3 shape + values = torch.tensor([1.0]) + shape = (3, 3) + + # Create sparse tensor (this will be invalid) + sparse_tensor = torch.sparse_coo_tensor(indices, values, shape, dtype=torch.float32) + return sparse_tensor + + +def _create_valid_sparse_tensor() -> torch.Tensor: + """Create a valid sparse COO tensor for baseline testing.""" + indices = torch.tensor([[0, 1, 2], [0, 1, 2]]) + values = torch.tensor([1.0, 2.0, 3.0]) + shape = (3, 3) + + sparse_tensor = torch.sparse_coo_tensor(indices, values, shape, dtype=torch.float32) + return sparse_tensor + + +def _create_valid_dense_tensor() -> torch.Tensor: + """Create a valid dense tensor for baseline testing.""" + return torch.randn(10, 768, dtype=torch.float32) # (seq_len, hidden_size) + + +class TestPromptEmbedsValidation: + """Test sparse tensor validation in prompt embeddings (Completions API).""" + + def test_valid_dense_tensor_accepted(self, model_config): + """Baseline: Valid dense tensors should work normally.""" + renderer = CompletionRenderer(model_config) + + valid_tensor = _create_valid_dense_tensor() + encoded = _encode_tensor(valid_tensor) + + # Should not raise any exception + result = renderer.load_prompt_embeds(encoded) + assert len(result) == 1 + assert result[0]["prompt_embeds"].shape == valid_tensor.shape + + def test_valid_sparse_tensor_accepted(self): + """Baseline: Valid sparse tensors should load successfully.""" + io_handler = ImageEmbeddingMediaIO() + + valid_sparse = _create_valid_sparse_tensor() + encoded = _encode_tensor(valid_sparse) + + # Should not raise any exception (sparse tensors remain sparse) + result = io_handler.load_base64("", encoded.decode("utf-8")) + assert result.shape == valid_sparse.shape + + def test_malicious_sparse_tensor_rejected(self, model_config): + """Security: Malicious sparse tensors should be rejected.""" + renderer = CompletionRenderer(model_config) + + malicious_tensor = _create_malicious_sparse_tensor() + encoded = _encode_tensor(malicious_tensor) + + # Should raise RuntimeError due to invalid sparse tensor + with pytest.raises((RuntimeError, ValueError)) as exc_info: + renderer.load_prompt_embeds(encoded) + + # Error should indicate sparse tensor validation failure + error_msg = str(exc_info.value).lower() + assert "sparse" in error_msg or "index" in error_msg or "bounds" in error_msg + + def test_extremely_large_indices_rejected(self, model_config): + """Security: Sparse tensors with extremely large indices should be rejected.""" + renderer = CompletionRenderer(model_config) + + # Create tensor with indices far beyond reasonable bounds + indices = torch.tensor([[999999], [999999]]) + values = torch.tensor([1.0]) + shape = (10, 10) + + malicious_tensor = torch.sparse_coo_tensor( + indices, values, shape, dtype=torch.float32 + ) + encoded = _encode_tensor(malicious_tensor) + + with pytest.raises((RuntimeError, ValueError)): + renderer.load_prompt_embeds(encoded) + + def test_negative_indices_rejected(self, model_config): + """Security: Sparse tensors with negative indices should be rejected.""" + renderer = CompletionRenderer(model_config) + + # Create tensor with negative indices + indices = torch.tensor([[-1], [-1]]) + values = torch.tensor([1.0]) + shape = (10, 10) + + malicious_tensor = torch.sparse_coo_tensor( + indices, values, shape, dtype=torch.float32 + ) + encoded = _encode_tensor(malicious_tensor) + + with pytest.raises((RuntimeError, ValueError)): + renderer.load_prompt_embeds(encoded) + + +class TestImageEmbedsValidation: + """Test sparse tensor validation in image embeddings (Chat API).""" + + def test_valid_dense_tensor_accepted(self): + """Baseline: Valid dense tensors should work normally.""" + io_handler = ImageEmbeddingMediaIO() + + valid_tensor = _create_valid_dense_tensor() + encoded = _encode_tensor(valid_tensor) + + # Should not raise any exception + result = io_handler.load_base64("", encoded.decode("utf-8")) + assert result.shape == valid_tensor.shape + + def test_valid_sparse_tensor_accepted(self): + """Baseline: Valid sparse tensors should load successfully.""" + io_handler = AudioEmbeddingMediaIO() + + valid_sparse = _create_valid_sparse_tensor() + encoded = _encode_tensor(valid_sparse) + + # Should not raise any exception (sparse tensors remain sparse) + result = io_handler.load_base64("", encoded.decode("utf-8")) + assert result.shape == valid_sparse.shape + + def test_malicious_sparse_tensor_rejected(self): + """Security: Malicious sparse tensors should be rejected.""" + io_handler = ImageEmbeddingMediaIO() + + malicious_tensor = _create_malicious_sparse_tensor() + encoded = _encode_tensor(malicious_tensor) + + # Should raise RuntimeError due to invalid sparse tensor + with pytest.raises((RuntimeError, ValueError)) as exc_info: + io_handler.load_base64("", encoded.decode("utf-8")) + + error_msg = str(exc_info.value).lower() + assert "sparse" in error_msg or "index" in error_msg or "bounds" in error_msg + + def test_load_bytes_validates(self): + """Security: Validation should also work for load_bytes method.""" + io_handler = ImageEmbeddingMediaIO() + + malicious_tensor = _create_malicious_sparse_tensor() + buffer = io.BytesIO() + torch.save(malicious_tensor, buffer) + buffer.seek(0) + + with pytest.raises((RuntimeError, ValueError)): + io_handler.load_bytes(buffer.read()) + + +class TestAudioEmbedsValidation: + """Test sparse tensor validation in audio embeddings (Chat API).""" + + def test_valid_dense_tensor_accepted(self): + """Baseline: Valid dense tensors should work normally.""" + io_handler = AudioEmbeddingMediaIO() + + valid_tensor = _create_valid_dense_tensor() + encoded = _encode_tensor(valid_tensor) + + # Should not raise any exception + result = io_handler.load_base64("", encoded.decode("utf-8")) + assert result.shape == valid_tensor.shape + + def test_valid_sparse_tensor_accepted(self): + """Baseline: Valid sparse tensors should be converted successfully.""" + io_handler = AudioEmbeddingMediaIO() + + valid_sparse = _create_valid_sparse_tensor() + encoded = _encode_tensor(valid_sparse) + + # Should not raise any exception + result = io_handler.load_base64("", encoded.decode("utf-8")) + assert result.is_sparse is False + + def test_malicious_sparse_tensor_rejected(self): + """Security: Malicious sparse tensors should be rejected.""" + io_handler = AudioEmbeddingMediaIO() + + malicious_tensor = _create_malicious_sparse_tensor() + encoded = _encode_tensor(malicious_tensor) + + # Should raise RuntimeError due to invalid sparse tensor + with pytest.raises((RuntimeError, ValueError)) as exc_info: + io_handler.load_base64("", encoded.decode("utf-8")) + + error_msg = str(exc_info.value).lower() + assert "sparse" in error_msg or "index" in error_msg or "bounds" in error_msg + + def test_load_bytes_validates(self): + """Security: Validation should also work for load_bytes method.""" + io_handler = AudioEmbeddingMediaIO() + + malicious_tensor = _create_malicious_sparse_tensor() + buffer = io.BytesIO() + torch.save(malicious_tensor, buffer) + buffer.seek(0) + + with pytest.raises((RuntimeError, ValueError)): + io_handler.load_bytes(buffer.read()) + + +class TestSparseTensorValidationIntegration: + """ + These tests verify the complete attack chain is blocked at all entry points. + """ + + def test_attack_scenario_completions_api(self, model_config): + """ + Simulate a complete attack through the Completions API. + + Attack scenario: + 1. Attacker crafts malicious sparse tensor + 2. Encodes it as base64 + 3. Sends to /v1/completions with prompt_embeds parameter + 4. Server should reject before memory corruption occurs + """ + renderer = CompletionRenderer(model_config) + + # Step 1-2: Attacker creates malicious payload + attack_payload = _encode_tensor(_create_malicious_sparse_tensor()) + + # Step 3-4: Server processes and should reject + with pytest.raises((RuntimeError, ValueError)): + renderer.load_prompt_embeds(attack_payload) + + def test_attack_scenario_chat_api_image(self): + """ + Simulate attack through Chat API with image_embeds. + + Verifies the image embeddings path is protected. + """ + io_handler = ImageEmbeddingMediaIO() + attack_payload = _encode_tensor(_create_malicious_sparse_tensor()) + + with pytest.raises((RuntimeError, ValueError)): + io_handler.load_base64("", attack_payload.decode("utf-8")) + + def test_attack_scenario_chat_api_audio(self): + """ + Simulate attack through Chat API with audio_embeds. + + Verifies the audio embeddings path is protected. + """ + io_handler = AudioEmbeddingMediaIO() + attack_payload = _encode_tensor(_create_malicious_sparse_tensor()) + + with pytest.raises((RuntimeError, ValueError)): + io_handler.load_base64("", attack_payload.decode("utf-8")) + + def test_multiple_valid_embeddings_in_batch(self, model_config): + """ + Regression test: Multiple valid embeddings should still work. + + Ensures the fix doesn't break legitimate batch processing. + """ + renderer = CompletionRenderer(model_config) + + valid_tensors = [ + _encode_tensor(_create_valid_dense_tensor()), + _encode_tensor(_create_valid_dense_tensor()), + _encode_tensor(_create_valid_dense_tensor()), + ] + + # Should process all without error + result = renderer.load_prompt_embeds(valid_tensors) + assert len(result) == 3 + + def test_mixed_valid_and_malicious_rejected(self, model_config): + """ + Security: Batch with one malicious tensor should be rejected. + + Even if most tensors are valid, a single malicious one should + cause rejection of the entire batch. + """ + renderer = CompletionRenderer(model_config) + + mixed_batch = [ + _encode_tensor(_create_valid_dense_tensor()), + _encode_tensor(_create_malicious_sparse_tensor()), # Malicious + _encode_tensor(_create_valid_dense_tensor()), + ] + + # Should fail on the malicious tensor + with pytest.raises((RuntimeError, ValueError)): + renderer.load_prompt_embeds(mixed_batch) + + +# Pytest fixtures +@pytest.fixture +def model_config(): + """Mock ModelConfig for testing.""" + from vllm.config import ModelConfig + + return ModelConfig( + model="facebook/opt-125m", + tokenizer="facebook/opt-125m", + tokenizer_mode="auto", + trust_remote_code=False, + dtype="float32", + seed=0, + enable_prompt_embeds=True, # Required for prompt embeds tests + ) diff --git a/tests/entrypoints/openai/tool_parsers/test_gigachat3_tool_parser.py b/tests/entrypoints/openai/tool_parsers/test_gigachat3_tool_parser.py index 02c5189d0f6c..6ac48317e8bc 100644 --- a/tests/entrypoints/openai/tool_parsers/test_gigachat3_tool_parser.py +++ b/tests/entrypoints/openai/tool_parsers/test_gigachat3_tool_parser.py @@ -10,8 +10,8 @@ run_tool_extraction_streaming, ) from vllm.entrypoints.openai.protocol import FunctionCall -from vllm.entrypoints.openai.tool_parsers import ToolParser, ToolParserManager from vllm.tokenizers import TokenizerLike +from vllm.tool_parsers import ToolParser, ToolParserManager SIMPLE_ARGS_DICT = { "action": "create", diff --git a/tests/entrypoints/openai/tool_parsers/test_hermes_tool_parser.py b/tests/entrypoints/openai/tool_parsers/test_hermes_tool_parser.py index ce6727bb04f6..8600aaf63943 100644 --- a/tests/entrypoints/openai/tool_parsers/test_hermes_tool_parser.py +++ b/tests/entrypoints/openai/tool_parsers/test_hermes_tool_parser.py @@ -6,8 +6,8 @@ import pytest from vllm.entrypoints.openai.protocol import ChatCompletionRequest -from vllm.entrypoints.openai.tool_parsers.hermes_tool_parser import Hermes2ProToolParser from vllm.tokenizers import TokenizerLike +from vllm.tool_parsers.hermes_tool_parser import Hermes2ProToolParser from ....utils import RemoteOpenAIServer diff --git a/tests/entrypoints/openai/tool_parsers/test_hunyuan_a13b_tool_parser.py b/tests/entrypoints/openai/tool_parsers/test_hunyuan_a13b_tool_parser.py index bdd5344652c4..394457532139 100644 --- a/tests/entrypoints/openai/tool_parsers/test_hunyuan_a13b_tool_parser.py +++ b/tests/entrypoints/openai/tool_parsers/test_hunyuan_a13b_tool_parser.py @@ -12,7 +12,7 @@ run_tool_extraction_streaming, ) from vllm.entrypoints.openai.protocol import FunctionCall, ToolCall -from vllm.entrypoints.openai.tool_parsers import ToolParser, ToolParserManager +from vllm.tool_parsers import ToolParser, ToolParserManager def make_tool_call(name, arguments): diff --git a/tests/entrypoints/openai/tool_parsers/test_llama3_json_tool_parser.py b/tests/entrypoints/openai/tool_parsers/test_llama3_json_tool_parser.py index 6c286ca90ce4..3ce7801b4597 100644 --- a/tests/entrypoints/openai/tool_parsers/test_llama3_json_tool_parser.py +++ b/tests/entrypoints/openai/tool_parsers/test_llama3_json_tool_parser.py @@ -6,8 +6,8 @@ import pytest from vllm.entrypoints.openai.protocol import ExtractedToolCallInformation -from vllm.entrypoints.openai.tool_parsers.llama_tool_parser import Llama3JsonToolParser from vllm.tokenizers import TokenizerLike +from vllm.tool_parsers.llama_tool_parser import Llama3JsonToolParser @pytest.fixture diff --git a/tests/entrypoints/openai/tool_parsers/test_llama4_pythonic_tool_parser.py b/tests/entrypoints/openai/tool_parsers/test_llama4_pythonic_tool_parser.py index 8aa88a007188..3bd1ca7f528d 100644 --- a/tests/entrypoints/openai/tool_parsers/test_llama4_pythonic_tool_parser.py +++ b/tests/entrypoints/openai/tool_parsers/test_llama4_pythonic_tool_parser.py @@ -10,8 +10,8 @@ run_tool_extraction_streaming, ) from vllm.entrypoints.openai.protocol import FunctionCall -from vllm.entrypoints.openai.tool_parsers import ToolParser, ToolParserManager from vllm.tokenizers import TokenizerLike +from vllm.tool_parsers import ToolParser, ToolParserManager # Test cases similar to pythonic parser but with Llama4 specific format SIMPLE_FUNCTION_OUTPUT = "[get_weather(city='LA', metric='C')]" diff --git a/tests/entrypoints/openai/tool_parsers/test_olmo3_tool_parser.py b/tests/entrypoints/openai/tool_parsers/test_olmo3_tool_parser.py index a0b9a3c563bc..3774b3d1833e 100644 --- a/tests/entrypoints/openai/tool_parsers/test_olmo3_tool_parser.py +++ b/tests/entrypoints/openai/tool_parsers/test_olmo3_tool_parser.py @@ -10,8 +10,8 @@ run_tool_extraction_streaming, ) from vllm.entrypoints.openai.protocol import FunctionCall -from vllm.entrypoints.openai.tool_parsers import ToolParser, ToolParserManager from vllm.tokenizers import TokenizerLike +from vllm.tool_parsers import ToolParser, ToolParserManager # https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/text_prompt_format.md#model-response-format-1 SIMPLE_FUNCTION_OUTPUT = "get_weather(city='San Francisco', metric='celsius')" diff --git a/tests/entrypoints/openai/tool_parsers/test_pythonic_tool_parser.py b/tests/entrypoints/openai/tool_parsers/test_pythonic_tool_parser.py index 52202c55e840..c4cad17fd2d0 100644 --- a/tests/entrypoints/openai/tool_parsers/test_pythonic_tool_parser.py +++ b/tests/entrypoints/openai/tool_parsers/test_pythonic_tool_parser.py @@ -10,8 +10,8 @@ run_tool_extraction_streaming, ) from vllm.entrypoints.openai.protocol import FunctionCall -from vllm.entrypoints.openai.tool_parsers import ToolParser, ToolParserManager from vllm.tokenizers import TokenizerLike +from vllm.tool_parsers import ToolParser, ToolParserManager # https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/text_prompt_format.md#model-response-format-1 SIMPLE_FUNCTION_OUTPUT = "get_weather(city='San Francisco', metric='celsius')" diff --git a/tests/entrypoints/openai/tool_parsers/utils.py b/tests/entrypoints/openai/tool_parsers/utils.py index 2d4f5f173410..0b32e5f899ff 100644 --- a/tests/entrypoints/openai/tool_parsers/utils.py +++ b/tests/entrypoints/openai/tool_parsers/utils.py @@ -10,8 +10,8 @@ FunctionCall, ToolCall, ) -from vllm.entrypoints.openai.tool_parsers import ToolParser from vllm.tokenizers import TokenizerLike +from vllm.tool_parsers import ToolParser class StreamingToolReconstructor: diff --git a/tests/kernels/core/test_apply_rotary_emb.py b/tests/kernels/core/test_apply_rotary_emb.py new file mode 100644 index 000000000000..23c722fa5e63 --- /dev/null +++ b/tests/kernels/core/test_apply_rotary_emb.py @@ -0,0 +1,203 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +""" +Tests for ApplyRotaryEmb CustomOp dispatch behavior. + +This test ensures that RotaryEmbedding classes correctly call the appropriate +ApplyRotaryEmb methods based on the calling context: + +1. RotaryEmbedding.forward_native() -> ApplyRotaryEmb.forward_native() +2. RotaryEmbedding.forward_cuda() -> ApplyRotaryEmb.forward() (auto-dispatch) +3. RotaryEmbedding.forward_hip() -> ApplyRotaryEmb.forward() (auto-dispatch) +""" + +from dataclasses import dataclass + +import pytest +import torch + +from vllm.config import ( + CompilationConfig, + VllmConfig, + get_cached_compilation_config, + set_current_vllm_config, +) +from vllm.platforms import current_platform + +CUDA_DEVICES = ["cuda:0"] + + +@dataclass +class RotaryEmbeddingTestCase: + """Test case configuration for RotaryEmbedding dispatch tests.""" + + name: str + rope_class: type + rope_kwargs: dict + method_name: str # forward_native, forward_cuda, forward + positions_shape: tuple # (num_tokens,) or (3, num_tokens) or (4, num_tokens) + expect_forward_native: bool # Should call ApplyRotaryEmb.forward_native() + expect_forward: bool # Should call ApplyRotaryEmb.forward() + + +def get_test_cases() -> list[RotaryEmbeddingTestCase]: + """Generate test cases for all RotaryEmbedding classes.""" + from vllm.model_executor.layers.rotary_embedding.ernie45_vl_rope import ( + Ernie4_5_VLRotaryEmbedding, + ) + from vllm.model_executor.layers.rotary_embedding.mrope import MRotaryEmbedding + from vllm.model_executor.layers.rotary_embedding.xdrope import XDRotaryEmbedding + + common_kwargs = { + "head_size": 128, + "rotary_dim": 128, + "max_position_embeddings": 4096, + "base": 10000, + "is_neox_style": True, + "dtype": torch.bfloat16, + } + + return [ + # MRotaryEmbedding tests + RotaryEmbeddingTestCase( + name="MRotaryEmbedding.forward_native", + rope_class=MRotaryEmbedding, + rope_kwargs={**common_kwargs, "mrope_section": [16, 24, 24]}, + method_name="forward_native", + positions_shape=(3, 32), # 2D for multimodal + expect_forward_native=True, + expect_forward=False, + ), + RotaryEmbeddingTestCase( + name="MRotaryEmbedding.forward_cuda_1d", + rope_class=MRotaryEmbedding, + rope_kwargs={**common_kwargs, "mrope_section": [16, 24, 24]}, + method_name="forward_cuda", + positions_shape=(32,), # 1D triggers apply_rotary_emb path + expect_forward_native=False, + expect_forward=True, + ), + # XDRotaryEmbedding tests + RotaryEmbeddingTestCase( + name="XDRotaryEmbedding.forward", + rope_class=XDRotaryEmbedding, + rope_kwargs={ + **common_kwargs, + "scaling_alpha": 1.0, + "xdrope_section": [16, 16, 16, 16], + }, + method_name="forward", + positions_shape=(4, 32), # 4D for P/W/H/T + expect_forward_native=False, + expect_forward=True, + ), + # Ernie4_5_VLRotaryEmbedding tests + RotaryEmbeddingTestCase( + name="Ernie4_5_VLRotaryEmbedding.forward_native", + rope_class=Ernie4_5_VLRotaryEmbedding, + rope_kwargs={**common_kwargs, "mrope_section": [22, 22, 20]}, + method_name="forward_native", + positions_shape=(3, 32), # 2D for multimodal + expect_forward_native=True, + expect_forward=False, + ), + ] + + +def run_dispatch_test( + test_case: RotaryEmbeddingTestCase, + device: str, +): + """Run a dispatch test for a RotaryEmbedding class.""" + vllm_config = VllmConfig( + compilation_config=CompilationConfig(custom_ops=["all", "+apply_rotary_emb"]) + ) + get_cached_compilation_config.cache_clear() + + with set_current_vllm_config(vllm_config): + rope = test_case.rope_class(**test_case.rope_kwargs).to(device=device) + + apply_rotary_emb = rope.apply_rotary_emb + + # Verify custom op is enabled + if test_case.expect_forward_native: + assert ( + apply_rotary_emb._forward_method != apply_rotary_emb.forward_native + ), "Test setup error: ApplyRotaryEmb custom op should be enabled" + + # Setup call tracking + call_tracker = {"forward_native_called": False, "forward_called": False} + original_forward_native = apply_rotary_emb.forward_native + original_forward = apply_rotary_emb.forward + + def tracked_forward_native(*args, **kwargs): + call_tracker["forward_native_called"] = True + return original_forward_native(*args, **kwargs) + + def tracked_forward(*args, **kwargs): + call_tracker["forward_called"] = True + return original_forward(*args, **kwargs) + + apply_rotary_emb.forward_native = tracked_forward_native + apply_rotary_emb.forward = tracked_forward + + try: + num_tokens = test_case.positions_shape[-1] + num_q_heads = 8 + num_kv_heads = 2 + head_size = test_case.rope_kwargs["head_size"] + max_position = test_case.rope_kwargs["max_position_embeddings"] + + positions = torch.randint( + 0, max_position // 4, test_case.positions_shape, device=device + ) + query = torch.randn( + num_tokens, num_q_heads * head_size, dtype=torch.bfloat16, device=device + ) + key = torch.randn( + num_tokens, + num_kv_heads * head_size, + dtype=torch.bfloat16, + device=device, + ) + + # Call the method under test + method = getattr(rope, test_case.method_name) + method(positions, query.clone(), key.clone()) + + # Verify expectations + if test_case.expect_forward_native: + assert call_tracker["forward_native_called"], ( + f"{test_case.name} should call ApplyRotaryEmb.forward_native()" + ) + if not test_case.expect_forward: + assert not call_tracker["forward_called"], ( + f"{test_case.name} should NOT call ApplyRotaryEmb.forward(). " + "Bug: when +apply_rotary_emb is enabled, forward_native() " + "incorrectly dispatches to CUDA/HIP kernels." + ) + if test_case.expect_forward: + assert call_tracker["forward_called"], ( + f"{test_case.name} should call ApplyRotaryEmb.forward()" + ) + finally: + apply_rotary_emb.forward_native = original_forward_native + apply_rotary_emb.forward = original_forward + + +@pytest.mark.skipif( + not current_platform.is_cuda_alike(), reason="Skipping CUDA/ROCm only tests." +) +@pytest.mark.parametrize("test_case", get_test_cases(), ids=lambda tc: tc.name) +@pytest.mark.parametrize("device", CUDA_DEVICES) +def test_rotary_embedding_dispatch( + test_case: RotaryEmbeddingTestCase, + device: str, +): + """ + Test that RotaryEmbedding classes dispatch to the correct ApplyRotaryEmb method. + + - forward_native methods should call ApplyRotaryEmb.forward_native() + - forward_cuda/forward methods should call ApplyRotaryEmb.forward() + """ + run_dispatch_test(test_case, device) diff --git a/tests/kernels/moe/modular_kernel_tools/common.py b/tests/kernels/moe/modular_kernel_tools/common.py index d95c22fdf0a5..6078ce44cee9 100644 --- a/tests/kernels/moe/modular_kernel_tools/common.py +++ b/tests/kernels/moe/modular_kernel_tools/common.py @@ -594,7 +594,8 @@ def next_power_of_2(x): ) modular_kernel = mk.FusedMoEModularKernel( - prepare_finalize=prepare_finalize, fused_experts=fused_experts + prepare_finalize=prepare_finalize, + fused_experts=fused_experts, ) return modular_kernel diff --git a/tests/kernels/moe/test_flashinfer.py b/tests/kernels/moe/test_flashinfer.py index d553e2820e5f..bf4ef2d30466 100644 --- a/tests/kernels/moe/test_flashinfer.py +++ b/tests/kernels/moe/test_flashinfer.py @@ -5,6 +5,7 @@ import pytest import torch +import vllm.model_executor.layers.fused_moe.modular_kernel as mk from vllm.config import ParallelConfig, VllmConfig, set_current_vllm_config from vllm.model_executor.layers.fused_moe.config import ( FusedMoEQuantConfig, @@ -107,6 +108,19 @@ def make_moe_tensors_8bit( layer.w2_input_scale = a2_scale layer.w13_weight_scale = w13_weight_scale layer.w2_weight_scale = w2_weight_scale + # Setup dummy config. + layer.moe_parallel_config = mk.FusedMoEParallelConfig( + tp_size=1, + pcp_size=1, + dp_size=1, + ep_size=1, + tp_rank=1, + pcp_rank=1, + dp_rank=1, + ep_rank=1, + use_ep=False, + all2all_backend="naive", + ) register_moe_scaling_factors(layer) diff --git a/tests/kernels/quantization/test_awq.py b/tests/kernels/quantization/test_awq.py index efb62ca3799a..3bf59dea3097 100644 --- a/tests/kernels/quantization/test_awq.py +++ b/tests/kernels/quantization/test_awq.py @@ -41,9 +41,9 @@ def test_awq_gemm_opcheck(monkeypatch: pytest.MonkeyPatch): qweight = torch.randint( -2000000000, 2000000000, (8192, 256), device="cuda", dtype=torch.int32 ) - scales = torch.randint( + scales = torch.empty((64, 2048), device="cuda", dtype=torch.float16) + qzeros = torch.randint( -2000000000, 2000000000, (64, 256), device="cuda", dtype=torch.int32 ) - qzeros = torch.empty((64, 2048), device="cuda", dtype=torch.float16) split_k_iters = 8 - opcheck(torch.ops._C.awq_gemm, (input, qweight, qzeros, scales, split_k_iters)) + opcheck(torch.ops._C.awq_gemm, (input, qweight, scales, qzeros, split_k_iters)) diff --git a/tests/models/fixtures/audioflamingo3/expected_results_batched.json b/tests/models/fixtures/audioflamingo3/expected_results_batched.json new file mode 100644 index 000000000000..4dbb107edccb --- /dev/null +++ b/tests/models/fixtures/audioflamingo3/expected_results_batched.json @@ -0,0 +1 @@ +{"transcriptions": ["There is no clear relationship between the barking and the music, as they seem to be independent of each other.", "(B) To indicate that language cannot express clearly, satirizing the inversion of black and white in the world"], "token_ids": [[3862, 374, 902, 2797, 5025, 1948, 279, 293, 33452, 323, 279, 4627, 11, 438, 807, 2803, 311, 387, 9489, 315, 1817, 1008, 13, 151645], [5349, 8, 2014, 13216, 429, 4128, 4157, 3158, 9355, 11, 7578, 404, 4849, 279, 46488, 315, 3691, 323, 4158, 304, 279, 1879, 151645, 151671]]} \ No newline at end of file diff --git a/tests/models/fixtures/audioflamingo3/expected_results_single.json b/tests/models/fixtures/audioflamingo3/expected_results_single.json new file mode 100644 index 000000000000..be9233467a20 --- /dev/null +++ b/tests/models/fixtures/audioflamingo3/expected_results_single.json @@ -0,0 +1 @@ +{"transcriptions": ["The content of the input audio is 'you can ask why over and over and over again forever even if one day we explain every physical interaction and scientific law and hope and dream and regret with a single elegant equation'."], "token_ids": [[785, 2213, 315, 279, 1946, 7699, 374, 364, 9330, 646, 2548, 3170, 916, 323, 916, 323, 916, 1549, 15683, 1496, 421, 825, 1899, 582, 10339, 1449, 6961, 16230, 323, 12344, 2329, 323, 3900, 323, 7904, 323, 22231, 448, 264, 3175, 25777, 23606, 4427, 151645]]} \ No newline at end of file diff --git a/tests/models/language/generation/test_mistral.py b/tests/models/language/generation/test_mistral.py index bc8bb05c284e..0ef4ba257772 100644 --- a/tests/models/language/generation/test_mistral.py +++ b/tests/models/language/generation/test_mistral.py @@ -5,12 +5,12 @@ import pytest -from vllm.entrypoints.openai.tool_parsers.mistral_tool_parser import ( +from vllm.sampling_params import SamplingParams +from vllm.tokenizers.mistral import MistralTokenizer +from vllm.tool_parsers.mistral_tool_parser import ( MistralToolCall, MistralToolParser, ) -from vllm.sampling_params import SamplingParams -from vllm.tokenizers.mistral import MistralTokenizer from ...utils import check_logprobs_close diff --git a/tests/models/language/pooling/test_token_classification.py b/tests/models/language/pooling/test_token_classification.py index 2dfc0072126b..64d42432c74b 100644 --- a/tests/models/language/pooling/test_token_classification.py +++ b/tests/models/language/pooling/test_token_classification.py @@ -68,3 +68,34 @@ def test_modernbert_models( hf_output = torch.tensor(hf_output).cpu().float() vllm_output = torch.tensor(vllm_output).cpu().float() assert torch.allclose(hf_output, vllm_output, atol=1e-2) + + +@pytest.mark.parametrize("model", ["bd2lcco/Qwen3-0.6B-finetuned"]) +@pytest.mark.parametrize("dtype", ["float"]) +@torch.inference_mode +def test_auto_conversion( + hf_runner, + vllm_runner, + example_prompts, + model: str, + dtype: str, +) -> None: + with vllm_runner(model, max_model_len=1024, dtype=dtype) as vllm_model: + vllm_outputs = vllm_model.token_classify(example_prompts) + + with hf_runner( + model, dtype=dtype, auto_cls=AutoModelForTokenClassification + ) as hf_model: + tokenizer = hf_model.tokenizer + hf_outputs = [] + for prompt in example_prompts: + inputs = tokenizer([prompt], return_tensors="pt") + inputs = hf_model.wrap_device(inputs) + output = hf_model.model(**inputs) + hf_outputs.append(softmax(output.logits[0])) + + # check logits difference + for hf_output, vllm_output in zip(hf_outputs, vllm_outputs): + hf_output = torch.tensor(hf_output).cpu().float() + vllm_output = torch.tensor(vllm_output).cpu().float() + assert torch.allclose(hf_output, vllm_output, atol=1e-2) diff --git a/tests/models/multimodal/generation/test_audioflamingo3.py b/tests/models/multimodal/generation/test_audioflamingo3.py new file mode 100644 index 000000000000..d14291a62c34 --- /dev/null +++ b/tests/models/multimodal/generation/test_audioflamingo3.py @@ -0,0 +1,142 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project + +# Copyright 2025 The vLLM team. +# Copyright 2025 NVIDIA CORPORATION and the HuggingFace Inc. team. All rights +# reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os + +import pytest + +from tests.models.registry import HF_EXAMPLE_MODELS +from vllm import LLM, SamplingParams + +MODEL_NAME = "nvidia/audio-flamingo-3-hf" + + +def get_fixture_path(filename): + return os.path.join( + os.path.dirname(__file__), "../../fixtures/audioflamingo3", filename + ) + + +@pytest.fixture(scope="module") +def llm(): + # Check if the model is supported by the current transformers version + model_info = HF_EXAMPLE_MODELS.get_hf_info("AudioFlamingo3ForConditionalGeneration") + model_info.check_transformers_version(on_fail="skip") + + try: + llm = LLM( + model=MODEL_NAME, + trust_remote_code=True, + dtype="bfloat16", + enforce_eager=True, + limit_mm_per_prompt={"audio": 1}, + ) + return llm + except Exception as e: + pytest.skip(f"Failed to load model {MODEL_NAME}: {e}") + + +def test_single_generation(llm): + fixture_path = get_fixture_path("expected_results_single.json") + if not os.path.exists(fixture_path): + pytest.skip(f"Fixture not found: {fixture_path}") + + with open(fixture_path) as f: + expected = json.load(f) + + audio_url = "https://huggingface.co/datasets/nvidia/AudioSkills/resolve/main/assets/Why_do_we_ask_questions_converted.wav" + + messages = [ + { + "role": "user", + "content": [ + {"type": "audio_url", "audio_url": {"url": audio_url}}, + {"type": "text", "text": "Transcribe the input speech."}, + ], + } + ] + + sampling_params = SamplingParams(temperature=0.0, max_tokens=128) + + outputs = llm.chat( + messages=messages, + sampling_params=sampling_params, + ) + generated_text = outputs[0].outputs[0].text.strip() + + expected_text = expected["transcriptions"][0] + + assert expected_text in generated_text or generated_text in expected_text + + +def test_batched_generation(llm): + fixture_path = get_fixture_path("expected_results_batched.json") + if not os.path.exists(fixture_path): + pytest.skip(f"Fixture not found: {fixture_path}") + + with open(fixture_path) as f: + expected = json.load(f) + + items = [ + { + "audio_url": "https://huggingface.co/datasets/nvidia/AudioSkills/resolve/main/assets/dogs_barking_in_sync_with_the_music.wav", + "question": "What is surprising about the relationship " + "between the barking and the music?", + "expected_idx": 0, + }, + { + "audio_url": "https://huggingface.co/datasets/nvidia/AudioSkills/resolve/main/assets/Ch6Ae9DT6Ko_00-04-03_00-04-31.wav", + "question": ( + "Why is the philosopher's name mentioned in the lyrics? " + "(A) To express a sense of nostalgia " + "(B) To indicate that language cannot express clearly, " + "satirizing the inversion of black and white in the world " + "(C) To add depth and complexity to the lyrics " + "(D) To showcase the wisdom and influence of the philosopher" + ), + "expected_idx": 1, + }, + ] + + conversations = [] + for item in items: + messages = [ + { + "role": "user", + "content": [ + {"type": "audio_url", "audio_url": {"url": item["audio_url"]}}, + {"type": "text", "text": item["question"]}, + ], + } + ] + conversations.append(messages) + + sampling_params = SamplingParams(temperature=0.0, max_tokens=128) + + outputs = llm.chat( + messages=conversations, + sampling_params=sampling_params, + ) + + for i, output in enumerate(outputs): + generated_text = output.outputs[0].text.strip() + expected_text = expected["transcriptions"][i] + + assert expected_text in generated_text or generated_text in expected_text diff --git a/tests/models/multimodal/generation/test_vit_backend_functionality.py b/tests/models/multimodal/generation/test_vit_backend_functionality.py new file mode 100644 index 000000000000..78797ff7c197 --- /dev/null +++ b/tests/models/multimodal/generation/test_vit_backend_functionality.py @@ -0,0 +1,434 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +""" +Consolidated test for ViT attention backend functionality across multiple models. + +This test validates that each multimodal model can successfully generate outputs +using different ViT attention backends. Tests are parametrized by model and backend. +""" + +from dataclasses import asdict +from typing import Any + +import pytest +from transformers import AutoProcessor + +from vllm import LLM, EngineArgs, SamplingParams +from vllm.attention.backends.registry import AttentionBackendEnum +from vllm.multimodal.utils import encode_image_base64 +from vllm.multimodal.video import sample_frames_from_video +from vllm.platforms import current_platform + +from ....utils import create_new_process_for_each_test +from ...utils import dummy_hf_overrides + +# Dots.OCR prompt from official repository +# https://github.com/rednote-hilab/dots.ocr/blob/d72d1d8c5bdd0362eb264f714cdbd1e5daa7cdff/dots_ocr/utils/prompts.py#L3 +# ruff: noqa: E501 +DOTS_OCR_PROMPT = """Please output the layout information from the PDF image, including each layout element's bbox, its category, and the corresponding text content within the bbox. + +1. Bbox format: [x1, y1, x2, y2] + +2. Layout Categories: The possible categories are ['Caption', 'Footnote', 'Formula', 'List-item', 'Page-footer', 'Page-header', 'Picture', 'Section-header', 'Table', 'Text', 'Title']. + +3. Text Extraction & Formatting Rules: + - Picture: For the 'Picture' category, the text field should be omitted. + - Formula: Format its text as LaTeX. + - Table: Format its text as HTML. + - All Others (Text, Title, etc.): Format their text as Markdown. + +4. Constraints: + - The output text must be the original text from the image, with no translation. + - All layout elements must be sorted according to human reading order. + +5. Final Output: The entire output must be a single JSON object. +""" + +VIDEO_PLACEHOLDER = "<|vision_start|><|video_pad|><|vision_end|>" + + +# Model configurations +MODEL_CONFIGS: dict[str, dict[str, Any]] = { + "dots_ocr": { + "model_name": "rednote-hilab/dots.ocr", + "interface": "llm_chat", + "max_model_len": 32768, + "max_num_seqs": 1, + "limit_mm_per_prompt": {"image": 1}, + "sampling_params": { + "temperature": 0.1, + "max_tokens": 16384, + "top_p": 0.9, + "stop_token_ids": None, + }, + "use_specific_image": "stop_sign", + "prompt_builder": "build_dots_ocr_prompt", + "output_validator": lambda x: len(x) > 10 and "stop" in x.lower(), + }, + "ernie45_vl": { + "model_name": "baidu/ERNIE-4.5-VL-28B-A3B-PT", + "interface": "llm_generate", + "max_model_len": 16384, + "max_num_seqs": 2, + "sampling_params": { + "temperature": 0.0, + "max_tokens": 256, + "stop_token_ids": None, + }, + "use_processor": True, + "question": "What is the content of each image?", + }, + "glm4_1v": { + "model_name": "zai-org/GLM-4.1V-9B-Thinking", + "interface": "llm_generate", + "max_model_len": 32768, + "max_num_seqs": 2, + "sampling_params": { + "temperature": 0.0, + "max_tokens": 256, + "stop_token_ids": None, + }, + "use_processor": True, + "question": "What is the content of each image?", + }, + "keye_vl": { + "model_name": "Kwai-Keye/Keye-VL-8B-Preview", + "interface": "llm_generate", + "max_model_len": 8192, + "max_num_seqs": 5, + "sampling_params": { + "temperature": 0.0, + "max_tokens": 256, + "stop_token_ids": None, + }, + "supported_backends": { + AttentionBackendEnum.FLASH_ATTN, + AttentionBackendEnum.ROCM_AITER_FA, + }, + "use_processor": True, + "question": "What is the content of each image?", + }, + "ovis2_5": { + "model_name": "AIDC-AI/Ovis2.5-2B", + "interface": "llm_generate", + "max_model_len": 8192, + "max_num_seqs": 2, + "sampling_params": { + "temperature": 0.0, + "max_tokens": 256, + "stop_token_ids": None, + }, + "prompt_builder": "build_ovis_prompt", + "question": "What is the content of each image?", + }, + "qwen2_5_vl": { + "model_name": "Qwen/Qwen2.5-VL-3B-Instruct", + "interface": "vllm_runner", + "media_type": "video", + "max_model_len": 4000, + "max_num_seqs": 1, + "limit_mm_per_prompt": {"video": 1}, + "sampling_params": { + "max_tokens": 128, + }, + "runner_kwargs": { + "runner": "generate", + "dtype": "bfloat16", + }, + "video_params": { + "num_frames": 16, + "pruning_rates": [0.0, 0.75], + }, + }, + "qwen2_5_omni": { + "model_name": "Qwen/Qwen2.5-Omni-3B", + "interface": "llm_generate", + "max_model_len": 32768, + "max_num_seqs": 2, + "limit_mm_per_prompt": {"image": 3, "video": 3, "audio": 3}, + "sampling_params": { + "temperature": 0.6, + "top_p": 0.95, + "top_k": 20, + "max_tokens": 16384, + }, + "use_processor": True, + "question": "What is the content of each image?", + }, + "qwen3_omni": { + "model_name": "Qwen/Qwen3-Omni-30B-A3B-Instruct", + "interface": "llm_generate", + "max_model_len": 32768, + "max_num_seqs": 2, + "limit_mm_per_prompt": {"image": 3, "video": 3, "audio": 3}, + "sampling_params": { + "temperature": 0.6, + "top_p": 0.95, + "top_k": 20, + "max_tokens": 16384, + }, + "use_processor": True, + "question": "What is the content of each image?", + }, +} + + +# Prompt builder functions +def build_dots_ocr_prompt(images, config): + """Build Dots.OCR specific prompt with OCR instructions.""" + # Use only stop_sign image for Dots.OCR + image = images[0] # Already filtered to stop_sign + + image_url = f"data:image/jpeg;base64,{encode_image_base64(image)}" + + placeholders = [{"type": "image_url", "image_url": {"url": image_url}}] + messages = [ + { + "role": "user", + "content": [ + *placeholders, + { + "type": "text", + "text": f"<|img|><|imgpad|><|endofimg|>{DOTS_OCR_PROMPT}", + }, + ], + }, + ] + + return messages + + +def build_processor_prompt(images, config): + """Build prompt using AutoProcessor.apply_chat_template().""" + processor = AutoProcessor.from_pretrained( + config["model_name"], trust_remote_code=True + ) + + image_urls = [ + f"data:image/jpeg;base64,{encode_image_base64(img)}" for img in images + ] + placeholders = [{"type": "image", "image": url} for url in image_urls] + messages = [ + { + "role": "user", + "content": [ + *placeholders, + {"type": "text", "text": config["question"]}, + ], + }, + ] + + return processor.apply_chat_template( + messages, tokenize=False, add_generation_prompt=True + ) + + +def build_ovis_prompt(images, config): + """Build Ovis2.5 specific prompt with custom format.""" + image_urls = [ + f"data:image/jpeg;base64,{encode_image_base64(img)}" for img in images + ] + + placeholders = "\n".join( + f"Image-{i}: \n" for i, _ in enumerate(image_urls, start=1) + ) + + return ( + f"<|im_start|>user\n\n{placeholders}\n{config['question']}<|im_end|>\n" + "<|im_start|>assistant\n" + ) + + +def build_qwen2_5_video_prompt(): + """Build Qwen2.5-VL video prompt with EVS placeholder.""" + return ( + f"<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n" + f"<|im_start|>user\n{VIDEO_PLACEHOLDER}" + "Describe this video with a short sentence (no more than 20 words)" + "<|im_end|><|im_start|>assistant\n" + ) + + +# Handler functions +def run_llm_generate_test(config, mm_encoder_attn_backend, image_assets): + """Standard LLM.generate() interface handler.""" + images = [asset.pil_image for asset in image_assets] + + # Build prompt + if config.get("use_processor"): + prompt = build_processor_prompt(images, config) + else: + prompt_builder_name = config.get("prompt_builder", "build_ovis_prompt") + prompt_builder = globals()[prompt_builder_name] + prompt = prompt_builder(images, config) + + # Determine limit_mm_per_prompt + limit_mm_per_prompt = config.get("limit_mm_per_prompt", {"image": len(images)}) + + # Create engine + engine_args = EngineArgs( + model=config["model_name"], + trust_remote_code=True, + max_model_len=config["max_model_len"], + max_num_seqs=config["max_num_seqs"], + limit_mm_per_prompt=limit_mm_per_prompt, + mm_encoder_attn_backend=mm_encoder_attn_backend, + hf_overrides=dummy_hf_overrides, + load_format="dummy", + ) + + engine_dict = asdict(engine_args) | {"seed": 42} + llm = LLM(**engine_dict) + + # Generate + sampling_params = SamplingParams(**config["sampling_params"]) + outputs = llm.generate( + { + "prompt": prompt, + "multi_modal_data": {"image": images}, + }, + sampling_params=sampling_params, + ) + + # Validate + for o in outputs: + generated_text = o.outputs[0].text + validator = config.get("output_validator", lambda x: len(x) > 10) + assert validator(generated_text), ( + f"Validation failed for {config['model_name']}: {generated_text}" + ) + + +def run_llm_chat_test(config, mm_encoder_attn_backend, image_assets): + """LLM.chat() interface handler for Dots.OCR.""" + # Filter to stop_sign image only + stop_sign_image = [ + asset.pil_image for asset in image_assets if asset.name == "stop_sign" + ][0] + + # Build messages + messages = build_dots_ocr_prompt([stop_sign_image], config) + + # Create engine + engine_args = EngineArgs( + model=config["model_name"], + trust_remote_code=True, + max_model_len=config["max_model_len"], + max_num_seqs=config["max_num_seqs"], + limit_mm_per_prompt=config["limit_mm_per_prompt"], + mm_encoder_attn_backend=mm_encoder_attn_backend, + hf_overrides=dummy_hf_overrides, + load_format="dummy", + ) + + engine_dict = asdict(engine_args) | {"seed": 42} + llm = LLM(**engine_dict) + + # Generate using chat + sampling_params = SamplingParams(**config["sampling_params"]) + outputs = llm.chat(messages=messages, sampling_params=sampling_params) + + # Validate + for o in outputs: + generated_text = o.outputs[0].text + validator = config.get("output_validator", lambda x: len(x) > 10) + assert validator(generated_text), ( + f"Validation failed for {config['model_name']}: {generated_text}" + ) + + +def run_video_test(config, mm_encoder_attn_backend, video_assets, vllm_runner): + """Video test with EVS (Efficient Video Sampling) handler.""" + for pruning_rate in config["video_params"]["pruning_rates"]: + num_frames = config["video_params"]["num_frames"] + + # Sample frames from video + sampled_vids = [ + sample_frames_from_video(asset.np_ndarrays, num_frames) + for asset in video_assets + ] + + # Build prompt and prepare video + prompt = build_qwen2_5_video_prompt() + prompts = [prompt] + videos = [sampled_vids[0]] + + # Run with vllm_runner context manager + with vllm_runner( + config["model_name"], + max_model_len=config["max_model_len"], + max_num_seqs=config["max_num_seqs"], + limit_mm_per_prompt=config["limit_mm_per_prompt"], + tensor_parallel_size=1, + video_pruning_rate=pruning_rate, + mm_encoder_attn_backend=mm_encoder_attn_backend, + hf_overrides=dummy_hf_overrides, + load_format="dummy", + **config["runner_kwargs"], + ) as vllm_model: + outputs = vllm_model.generate_greedy( + prompts, + config["sampling_params"]["max_tokens"], + videos=videos, + ) + + # Validate output + assert len(outputs) == 1, f"Expected 1 output, got {len(outputs)}" + output_ids, output_text = outputs[0] + assert len(output_ids) > 0, "Generated no output IDs" + assert len(output_text) > 0, "Generated empty text" + assert isinstance(output_text, str), ( + f"Output is not string: {type(output_text)}" + ) + + +# Main test function +@pytest.mark.parametrize("model_key", list(MODEL_CONFIGS.keys())) +@pytest.mark.parametrize( + "mm_encoder_attn_backend", + [None] + current_platform.get_supported_vit_attn_backends(), +) +@create_new_process_for_each_test() +def test_vit_backend_functionality( + model_key: str, + mm_encoder_attn_backend: AttentionBackendEnum | None, + image_assets, + video_assets, + vllm_runner, + request, +): + """Test ViT attention backend functionality for multimodal models. + + This test validates that each model can successfully generate outputs + using different ViT attention backends. The test: + 1. Filters unsupported backends per model + 2. Applies appropriate GPU marks + 3. Routes to the correct test handler based on interface + 4. Validates output meets minimum requirements + """ + config = MODEL_CONFIGS[model_key] + + # Step 1: Backend filtering + if ( + "supported_backends" in config + and mm_encoder_attn_backend is not None + and mm_encoder_attn_backend not in config["supported_backends"] + ): + pytest.skip( + f"{model_key} does not support {mm_encoder_attn_backend} backend now." + ) + + # Step 2: Apply GPU marks dynamically + if "gpu_marks" in config: + for mark in config["gpu_marks"]: + request.applymarker(mark) + + # Step 3: Route to appropriate handler + if config.get("media_type") == "video": + run_video_test(config, mm_encoder_attn_backend, video_assets, vllm_runner) + elif config["interface"] == "llm_chat": + run_llm_chat_test(config, mm_encoder_attn_backend, image_assets) + elif config["interface"] == "llm_generate": + run_llm_generate_test(config, mm_encoder_attn_backend, image_assets) + else: + raise ValueError(f"Unknown interface: {config['interface']}") diff --git a/tests/models/multimodal/processing/test_audioflamingo3.py b/tests/models/multimodal/processing/test_audioflamingo3.py new file mode 100644 index 000000000000..d7c00516ffea --- /dev/null +++ b/tests/models/multimodal/processing/test_audioflamingo3.py @@ -0,0 +1,125 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project + +# Copyright 2025 The vLLM team. +# Copyright 2025 NVIDIA CORPORATION and the HuggingFace Inc. team. All rights +# reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from unittest.mock import MagicMock + +import numpy as np +import pytest +import torch +from transformers import PretrainedConfig + +from tests.models.registry import HF_EXAMPLE_MODELS + + +class MockAudioFlamingo3Config(PretrainedConfig): + model_type = "audioflamingo3" + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.audio_config = PretrainedConfig() + self.text_config = PretrainedConfig() + + +class MockAudioFlamingo3Processor: + def __init__(self): + self.audio_token = "" + self.audio_token_id = 12345 + self.feature_extractor = MockFeatureExtractor() + + def __call__(self, text=None, audios=None, **kwargs): + return {"input_ids": [1, 2, 3], "input_features": [np.zeros((3000, 80))]} + + +class MockFeatureExtractor: + def __init__(self): + self.sampling_rate = 16000 + self.chunk_length = 30 + + +@pytest.fixture +def mock_ctx(): + config = MockAudioFlamingo3Config() + + ctx = MagicMock() + ctx.get_hf_config.return_value = config + ctx.get_hf_processor.return_value = MockAudioFlamingo3Processor() + ctx.model_config.hf_config = config + return ctx + + +@pytest.fixture(autouse=True) +def check_transformers_version(): + # Check if the model is supported by the current transformers version + model_info = HF_EXAMPLE_MODELS.get_hf_info("AudioFlamingo3ForConditionalGeneration") + model_info.check_transformers_version(on_fail="skip") + + +def test_audio_chunk_counting(mock_ctx): + from vllm.model_executor.models.audioflamingo3 import ( + AudioFlamingo3DummyInputsBuilder, + AudioFlamingo3MultiModalProcessor, + AudioFlamingo3ProcessingInfo, + ) + + info = AudioFlamingo3ProcessingInfo(mock_ctx) + processor = AudioFlamingo3MultiModalProcessor( + info, AudioFlamingo3DummyInputsBuilder(info) + ) + + sr = 16000 + audio_1 = np.zeros(30 * sr) + audio_2 = np.zeros(45 * sr) + + mm_data = {"audio": [audio_1, audio_2]} + prompt = "<|user|>Listen.<|end|>" + + from vllm.multimodal.processing import BaseMultiModalProcessor + + def mock_base_call(self, prompt, mm_data, mm_kwargs, tok_kwargs): + return {"input_ids": [1, 2, 3], "input_features": torch.randn(1, 80, 3000)} + + with pytest.MonkeyPatch.context() as mp: + mp.setattr(BaseMultiModalProcessor, "_call_hf_processor", mock_base_call) + + processed = processor._call_hf_processor(prompt, mm_data, {}, {}) + + chunk_counts = processed["chunk_counts"] + + assert chunk_counts[0].item() == 1 + assert chunk_counts[1].item() == 2 + assert len(chunk_counts) == 2 + + +def test_dummy_data_generation(mock_ctx): + from vllm.model_executor.models.audioflamingo3 import ( + AudioFlamingo3DummyInputsBuilder, + AudioFlamingo3ProcessingInfo, + ) + + info = AudioFlamingo3ProcessingInfo(mock_ctx) + builder = AudioFlamingo3DummyInputsBuilder(info) + + mm_counts = {"audio": 2} + dummy_data = builder.get_dummy_mm_data(100, mm_counts, None) + + assert "audio" in dummy_data + assert len(dummy_data["audio"]) == 2 + + expected_len = 600 * 16000 + assert len(dummy_data["audio"][0]) == expected_len diff --git a/tests/models/registry.py b/tests/models/registry.py index ca50785b46a1..c5d72b5d581b 100644 --- a/tests/models/registry.py +++ b/tests/models/registry.py @@ -573,12 +573,17 @@ def check_available_online( "Qwen3ForSequenceClassification": _HfExamplesInfo( "tomaarsen/Qwen3-Reranker-0.6B-seq-cls" ), + "Qwen3ForTokenClassification": _HfExamplesInfo("bd2lcco/Qwen3-0.6B-finetuned"), } _MULTIMODAL_EXAMPLE_MODELS = { # [Decoder-only] "AriaForConditionalGeneration": _HfExamplesInfo("rhymes-ai/Aria"), + "AudioFlamingo3ForConditionalGeneration": _HfExamplesInfo( + "nvidia/audio-flamingo-3-hf", min_transformers_version="5.0.0.dev" + ), "AyaVisionForConditionalGeneration": _HfExamplesInfo("CohereLabs/aya-vision-8b"), + "BagelForConditionalGeneration": _HfExamplesInfo("ByteDance-Seed/BAGEL-7B-MoT"), "BeeForConditionalGeneration": _HfExamplesInfo( "Open-Bee/Bee-8B-RL", trust_remote_code=True, diff --git a/tests/multimodal/test_sparse_tensor_validation_unit.py b/tests/multimodal/test_sparse_tensor_validation_unit.py new file mode 100644 index 000000000000..2eec8ea8283a --- /dev/null +++ b/tests/multimodal/test_sparse_tensor_validation_unit.py @@ -0,0 +1,134 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +""" +Unit tests for sparse tensor validation. + +Simple, fast unit tests that can run without server fixtures. +Run with: pytest tests/multimodal/test_sparse_tensor_validation_unit.py -v +""" + +import io + +import pytest +import torch + + +class TestSparseTensorValidationContextManager: + """Test that torch.sparse.check_sparse_tensor_invariants() works as expected.""" + + def test_valid_sparse_tensor_passes(self): + """Valid sparse tensors should pass validation.""" + indices = torch.tensor([[0, 1], [0, 1]]) + values = torch.tensor([1.0, 2.0]) + shape = (2, 2) + + with torch.sparse.check_sparse_tensor_invariants(): + tensor = torch.sparse_coo_tensor(indices, values, shape) + dense = tensor.to_dense() + + assert dense.shape == shape + + def test_out_of_bounds_indices_rejected(self): + """Sparse tensors with out-of-bounds indices should be rejected.""" + indices = torch.tensor([[5], [5]]) # Out of bounds for 2x2 + values = torch.tensor([1.0]) + shape = (2, 2) + + with pytest.raises(RuntimeError) as exc_info: # noqa: SIM117 + with torch.sparse.check_sparse_tensor_invariants(): + tensor = torch.sparse_coo_tensor(indices, values, shape) + tensor.to_dense() + + assert ( + "index" in str(exc_info.value).lower() + or "bound" in str(exc_info.value).lower() + ) + + def test_negative_indices_rejected(self): + """Sparse tensors with negative indices should be rejected.""" + indices = torch.tensor([[-1], [0]]) + values = torch.tensor([1.0]) + shape = (2, 2) + + with pytest.raises(RuntimeError): # noqa: SIM117 + with torch.sparse.check_sparse_tensor_invariants(): + tensor = torch.sparse_coo_tensor(indices, values, shape) + tensor.to_dense() + + def test_without_context_manager_allows_invalid(self): + """ + WITHOUT validation, invalid tensors may not immediately error. + + This demonstrates the vulnerability: PyTorch 2.8.0+ doesn't validate + by default, which can lead to memory corruption. + """ + indices = torch.tensor([[100], [100]]) # Way out of bounds + values = torch.tensor([1.0]) + shape = (2, 2) + + # Without validation context, this might create an invalid tensor + # (actual behavior depends on PyTorch version) + tensor = torch.sparse_coo_tensor(indices, values, shape) + + # The tensor object is created, but it's invalid + assert tensor.is_sparse + + +class TestTorchLoadWithValidation: + """Test torch.load() with sparse tensor validation.""" + + def test_load_valid_sparse_tensor_with_validation(self): + """Valid sparse tensors should load successfully with validation.""" + # Create and save a valid sparse tensor + indices = torch.tensor([[0, 1], [0, 1]]) + values = torch.tensor([1.0, 2.0]) + tensor = torch.sparse_coo_tensor(indices, values, (2, 2)) + + buffer = io.BytesIO() + torch.save(tensor, buffer) + buffer.seek(0) + + # Load with validation + with torch.sparse.check_sparse_tensor_invariants(): + loaded = torch.load(buffer, weights_only=True) + dense = loaded.to_dense() + + assert dense.shape == (2, 2) + + def test_load_invalid_sparse_tensor_rejected(self): + """Invalid sparse tensors should be caught when loaded with validation.""" + # Create an invalid sparse tensor (out of bounds) + indices = torch.tensor([[10], [10]]) + values = torch.tensor([1.0]) + tensor = torch.sparse_coo_tensor(indices, values, (2, 2)) + + buffer = io.BytesIO() + torch.save(tensor, buffer) + buffer.seek(0) + + # Load with validation - should fail on to_dense() + with pytest.raises(RuntimeError): # noqa: SIM117 + with torch.sparse.check_sparse_tensor_invariants(): + loaded = torch.load(buffer, weights_only=True) + loaded.to_dense() + + def test_load_dense_tensor_unaffected(self): + """Dense tensors should work normally with the validation context.""" + # Create and save a dense tensor + tensor = torch.randn(10, 20) + + buffer = io.BytesIO() + torch.save(tensor, buffer) + buffer.seek(0) + + # Load with validation (should have no effect on dense tensors) + with torch.sparse.check_sparse_tensor_invariants(): + loaded = torch.load(buffer, weights_only=True) + + assert loaded.shape == (10, 20) + assert not loaded.is_sparse + + +if __name__ == "__main__": + # Allow running directly for quick testing + pytest.main([__file__, "-v", "--tb=short"]) diff --git a/tests/standalone_tests/lazy_imports.py b/tests/standalone_tests/lazy_imports.py index ddcdd2a51ab9..fff5c54f276d 100644 --- a/tests/standalone_tests/lazy_imports.py +++ b/tests/standalone_tests/lazy_imports.py @@ -5,9 +5,6 @@ # The utility function cannot be placed in `vllm.utils` # this needs to be a standalone script import sys -from contextlib import nullcontext - -from vllm_test_utils import BlameResult, blame # List of modules that should not be imported too early. # Lazy import `torch._inductor.async_compile` to avoid creating @@ -16,26 +13,10 @@ # `cv2` can easily mess up the environment. module_names = ["torch._inductor.async_compile", "cv2"] +# set all modules in `module_names` to be None. +# if we import any modules during `import vllm`, there would be a +# hard error and nice stacktrace on the first import. +for module_name in module_names: + sys.modules[module_name] = None # type: ignore[assignment] -def any_module_imported(): - return any(module_name in sys.modules for module_name in module_names) - - -# In CI, we only check finally if the module is imported. -# If it is indeed imported, we can rerun the test with `use_blame=True`, -# which will trace every function call to find the first import location, -# and help find the root cause. -# We don't run it in CI by default because it is slow. -use_blame = False -context = blame(any_module_imported) if use_blame else nullcontext() -with context as result: - import vllm # noqa - -if use_blame: - assert isinstance(result, BlameResult) - print(f"the first import location is:\n{result.trace_stack}") - -assert not any_module_imported(), ( - f"Some the modules in {module_names} are imported. To see the first" - f" import location, run the test with `use_blame=True`." -) +import vllm # noqa diff --git a/tests/test_inputs.py b/tests/test_inputs.py index 8351af2528e4..073be24a4a07 100644 --- a/tests/test_inputs.py +++ b/tests/test_inputs.py @@ -34,6 +34,13 @@ ] +# Test that a nested mixed-type list of lists raises a TypeError. +@pytest.mark.parametrize("invalid_input", [[[1, 2], ["foo", "bar"]]]) +def test_invalid_input_raise_type_error(invalid_input): + with pytest.raises(TypeError): + parse_raw_prompts(invalid_input) + + def test_parse_raw_single_batch_empty(): with pytest.raises(ValueError, match="at least one prompt"): parse_raw_prompts([]) diff --git a/tests/tool_parsers/__init__.py b/tests/tool_parsers/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/tool_use/test_deepseekv31_tool_parser.py b/tests/tool_parsers/test_deepseekv31_tool_parser.py similarity index 96% rename from tests/tool_use/test_deepseekv31_tool_parser.py rename to tests/tool_parsers/test_deepseekv31_tool_parser.py index 8beb7739b608..69a4cc8b989c 100644 --- a/tests/tool_use/test_deepseekv31_tool_parser.py +++ b/tests/tool_parsers/test_deepseekv31_tool_parser.py @@ -3,10 +3,10 @@ import pytest -from vllm.entrypoints.openai.tool_parsers.deepseekv31_tool_parser import ( +from vllm.tokenizers import get_tokenizer +from vllm.tool_parsers.deepseekv31_tool_parser import ( DeepSeekV31ToolParser, ) -from vllm.tokenizers import get_tokenizer MODEL = "deepseek-ai/DeepSeek-V3.1" diff --git a/tests/tool_use/test_ernie45_moe_tool_parser.py b/tests/tool_parsers/test_ernie45_moe_tool_parser.py similarity index 99% rename from tests/tool_use/test_ernie45_moe_tool_parser.py rename to tests/tool_parsers/test_ernie45_moe_tool_parser.py index 92f86de23267..533bd1ec3dff 100644 --- a/tests/tool_use/test_ernie45_moe_tool_parser.py +++ b/tests/tool_parsers/test_ernie45_moe_tool_parser.py @@ -13,9 +13,9 @@ FunctionCall, ToolCall, ) -from vllm.entrypoints.openai.tool_parsers.ernie45_tool_parser import Ernie45ToolParser from vllm.tokenizers import TokenizerLike, get_tokenizer from vllm.tokenizers.detokenizer_utils import detokenize_incrementally +from vllm.tool_parsers.ernie45_tool_parser import Ernie45ToolParser # Use a common model that is likely to be available MODEL = "baidu/ERNIE-4.5-21B-A3B-Thinking" diff --git a/tests/tool_use/test_glm4_moe_tool_parser.py b/tests/tool_parsers/test_glm4_moe_tool_parser.py similarity index 99% rename from tests/tool_use/test_glm4_moe_tool_parser.py rename to tests/tool_parsers/test_glm4_moe_tool_parser.py index 753b3f1c23ad..52f5a9198e9b 100644 --- a/tests/tool_use/test_glm4_moe_tool_parser.py +++ b/tests/tool_parsers/test_glm4_moe_tool_parser.py @@ -7,12 +7,10 @@ import pytest from vllm.entrypoints.openai.protocol import FunctionCall, ToolCall -from vllm.entrypoints.openai.tool_parsers.glm4_moe_tool_parser import ( +from vllm.tokenizers import get_tokenizer +from vllm.tool_parsers.glm4_moe_tool_parser import ( Glm4MoeModelToolParser, ) -from vllm.tokenizers import get_tokenizer - -pytestmark = pytest.mark.cpu_test pytest.skip("skip glm4_moe parser test", allow_module_level=True) # Use a common model that is likely to be available diff --git a/tests/tool_use/test_jamba_tool_parser.py b/tests/tool_parsers/test_jamba_tool_parser.py similarity index 98% rename from tests/tool_use/test_jamba_tool_parser.py rename to tests/tool_parsers/test_jamba_tool_parser.py index 9036bd32dd70..ccad16ae2f6b 100644 --- a/tests/tool_use/test_jamba_tool_parser.py +++ b/tests/tool_parsers/test_jamba_tool_parser.py @@ -9,11 +9,9 @@ from partial_json_parser.core.options import Allow from vllm.entrypoints.openai.protocol import DeltaMessage, FunctionCall, ToolCall -from vllm.entrypoints.openai.tool_parsers.jamba_tool_parser import JambaToolParser from vllm.tokenizers import TokenizerLike, get_tokenizer from vllm.tokenizers.detokenizer_utils import detokenize_incrementally - -pytestmark = pytest.mark.cpu_test +from vllm.tool_parsers.jamba_tool_parser import JambaToolParser MODEL = "ai21labs/Jamba-tiny-dev" diff --git a/tests/tool_use/test_kimi_k2_tool_parser.py b/tests/tool_parsers/test_kimi_k2_tool_parser.py similarity index 99% rename from tests/tool_use/test_kimi_k2_tool_parser.py rename to tests/tool_parsers/test_kimi_k2_tool_parser.py index 1558a9c3e01f..d02f53c34b45 100644 --- a/tests/tool_use/test_kimi_k2_tool_parser.py +++ b/tests/tool_parsers/test_kimi_k2_tool_parser.py @@ -7,10 +7,8 @@ import pytest from vllm.entrypoints.openai.protocol import FunctionCall, ToolCall -from vllm.entrypoints.openai.tool_parsers.kimi_k2_tool_parser import KimiK2ToolParser from vllm.tokenizers import get_tokenizer - -pytestmark = pytest.mark.cpu_test +from vllm.tool_parsers.kimi_k2_tool_parser import KimiK2ToolParser # Use a common model that is likely to be available MODEL = "moonshotai/Kimi-K2-Instruct" diff --git a/tests/tool_use/test_minimax_tool_parser.py b/tests/tool_parsers/test_minimax_tool_parser.py similarity index 99% rename from tests/tool_use/test_minimax_tool_parser.py rename to tests/tool_parsers/test_minimax_tool_parser.py index dda63f984a83..28cfc4ea7a17 100644 --- a/tests/tool_use/test_minimax_tool_parser.py +++ b/tests/tool_parsers/test_minimax_tool_parser.py @@ -12,10 +12,8 @@ FunctionCall, ToolCall, ) -from vllm.entrypoints.openai.tool_parsers.minimax_tool_parser import MinimaxToolParser from vllm.tokenizers import get_tokenizer - -pytestmark = pytest.mark.cpu_test +from vllm.tool_parsers.minimax_tool_parser import MinimaxToolParser # Use a common model that is likely to be available MODEL = "MiniMaxAi/MiniMax-M1-40k" diff --git a/tests/tool_use/test_mistral_tool_parser.py b/tests/tool_parsers/test_mistral_tool_parser.py similarity index 99% rename from tests/tool_use/test_mistral_tool_parser.py rename to tests/tool_parsers/test_mistral_tool_parser.py index d498863317e8..9400a67267f4 100644 --- a/tests/tool_use/test_mistral_tool_parser.py +++ b/tests/tool_parsers/test_mistral_tool_parser.py @@ -12,10 +12,10 @@ from partial_json_parser.core.options import Allow from vllm.entrypoints.openai.protocol import DeltaMessage, DeltaToolCall -from vllm.entrypoints.openai.tool_parsers.mistral_tool_parser import MistralToolParser from vllm.tokenizers import TokenizerLike, get_tokenizer from vllm.tokenizers.detokenizer_utils import detokenize_incrementally from vllm.tokenizers.mistral import MistralTokenizer +from vllm.tool_parsers.mistral_tool_parser import MistralToolParser @pytest.fixture(scope="module") diff --git a/tests/tool_use/test_openai_tool_parser.py b/tests/tool_parsers/test_openai_tool_parser.py similarity index 99% rename from tests/tool_use/test_openai_tool_parser.py rename to tests/tool_parsers/test_openai_tool_parser.py index 6537f281c0e1..44b8c92745e9 100644 --- a/tests/tool_use/test_openai_tool_parser.py +++ b/tests/tool_parsers/test_openai_tool_parser.py @@ -15,8 +15,8 @@ ) from vllm.entrypoints.openai.protocol import FunctionCall, ToolCall -from vllm.entrypoints.openai.tool_parsers.openai_tool_parser import OpenAIToolParser from vllm.tokenizers import get_tokenizer +from vllm.tool_parsers.openai_tool_parser import OpenAIToolParser MODEL = "gpt2" diff --git a/tests/tool_use/test_qwen3coder_tool_parser.py b/tests/tool_parsers/test_qwen3coder_tool_parser.py similarity index 99% rename from tests/tool_use/test_qwen3coder_tool_parser.py rename to tests/tool_parsers/test_qwen3coder_tool_parser.py index 5a56768805fd..3a0a612d7fbf 100644 --- a/tests/tool_use/test_qwen3coder_tool_parser.py +++ b/tests/tool_parsers/test_qwen3coder_tool_parser.py @@ -13,14 +13,12 @@ FunctionCall, ToolCall, ) -from vllm.entrypoints.openai.tool_parsers.qwen3coder_tool_parser import ( - Qwen3CoderToolParser, -) -from vllm.entrypoints.openai.tool_parsers.qwen3xml_tool_parser import Qwen3XMLToolParser from vllm.tokenizers import TokenizerLike, get_tokenizer from vllm.tokenizers.detokenizer_utils import detokenize_incrementally - -pytestmark = pytest.mark.cpu_test +from vllm.tool_parsers.qwen3coder_tool_parser import ( + Qwen3CoderToolParser, +) +from vllm.tool_parsers.qwen3xml_tool_parser import Qwen3XMLToolParser MODEL = "Qwen/Qwen3-Coder-30B-A3B-Instruct-FP8" diff --git a/tests/tool_use/test_seed_oss_tool_parser.py b/tests/tool_parsers/test_seed_oss_tool_parser.py similarity index 99% rename from tests/tool_use/test_seed_oss_tool_parser.py rename to tests/tool_parsers/test_seed_oss_tool_parser.py index 8795c35a1347..c7f595830f34 100644 --- a/tests/tool_use/test_seed_oss_tool_parser.py +++ b/tests/tool_parsers/test_seed_oss_tool_parser.py @@ -14,11 +14,9 @@ FunctionCall, ToolCall, ) -from vllm.entrypoints.openai.tool_parsers.seed_oss_tool_parser import SeedOssToolParser from vllm.tokenizers import TokenizerLike, get_tokenizer from vllm.tokenizers.detokenizer_utils import detokenize_incrementally - -pytestmark = pytest.mark.cpu_test +from vllm.tool_parsers.seed_oss_tool_parser import SeedOssToolParser # Use a common model that is likely to be available MODEL = "ByteDance-Seed/Seed-OSS-36B-Instruct" diff --git a/tests/tool_use/test_xlam_tool_parser.py b/tests/tool_parsers/test_xlam_tool_parser.py similarity index 99% rename from tests/tool_use/test_xlam_tool_parser.py rename to tests/tool_parsers/test_xlam_tool_parser.py index 3098fda036a8..380792a9926a 100644 --- a/tests/tool_use/test_xlam_tool_parser.py +++ b/tests/tool_parsers/test_xlam_tool_parser.py @@ -12,11 +12,9 @@ FunctionCall, ToolCall, ) -from vllm.entrypoints.openai.tool_parsers.xlam_tool_parser import xLAMToolParser from vllm.tokenizers import TokenizerLike, get_tokenizer from vllm.tokenizers.detokenizer_utils import detokenize_incrementally - -pytestmark = pytest.mark.cpu_test +from vllm.tool_parsers.xlam_tool_parser import xLAMToolParser # Use a common model that is likely to be available MODEL = "Salesforce/Llama-xLAM-2-8B-fc-r" diff --git a/tests/tool_use/test_tool_choice_required.py b/tests/tool_use/test_tool_choice_required.py index d5572cfbebe3..35ed8d215f73 100644 --- a/tests/tool_use/test_tool_choice_required.py +++ b/tests/tool_use/test_tool_choice_required.py @@ -12,7 +12,7 @@ ChatCompletionToolsParam, ) from vllm.entrypoints.openai.serving_chat import OpenAIServingChat -from vllm.entrypoints.openai.tool_parsers.utils import get_json_schema_from_tools +from vllm.tool_parsers.utils import get_json_schema_from_tools pytestmark = pytest.mark.cpu_test diff --git a/tests/v1/attention/test_attention_splitting.py b/tests/v1/attention/test_attention_splitting.py index f08e2f480e30..734819fcdca8 100644 --- a/tests/v1/attention/test_attention_splitting.py +++ b/tests/v1/attention/test_attention_splitting.py @@ -323,6 +323,7 @@ def test_prefill_split_across_ubatches( num_tokens, batch_spec.batch_size, split_point=split_point, + num_ubatches=2, ) assert ubatch_slices is not None and len(ubatch_slices) == 2 diff --git a/tests/v1/entrypoints/conftest.py b/tests/v1/entrypoints/conftest.py index 40b9d1fe850c..bc9674ee86cf 100644 --- a/tests/v1/entrypoints/conftest.py +++ b/tests/v1/entrypoints/conftest.py @@ -76,6 +76,8 @@ def sample_json_schema(): }, "required": ["name", "age", "skills", "grade", "email", "work_history"], "additionalProperties": False, + "minProperties": 1, + "maxProperties": 10, } @@ -96,6 +98,9 @@ def unsupported_json_schema(): }, "required": ["score", "tags"], "additionalProperties": False, + "patternProperties": { + "^score$": {"type": "integer"}, + }, } diff --git a/tests/v1/kv_connector/unit/test_nixl_connector.py b/tests/v1/kv_connector/unit/test_nixl_connector.py index 53da09cfbc21..66804fa671c7 100644 --- a/tests/v1/kv_connector/unit/test_nixl_connector.py +++ b/tests/v1/kv_connector/unit/test_nixl_connector.py @@ -461,7 +461,7 @@ def test_multi_xfer_one_engine( metadata = NixlConnectorMetadata() if num_xfers > 0: num_xfers -= 1 - metadata.add_new_req( + metadata.add_new_req_to_recv( request_id=request_id, local_block_ids=[num_xfers + 1, num_xfers + 2, num_xfers + 3], kv_transfer_params={ @@ -532,7 +532,7 @@ def test_async_load_kv( vllm_config, connector.engine_id ) metadata = NixlConnectorMetadata() - metadata.add_new_req( + metadata.add_new_req_to_recv( request_id="id", local_block_ids=[1, 2, 3], kv_transfer_params={ @@ -588,7 +588,7 @@ def test_concurrent_load_kv( metadata = NixlConnectorMetadata() total_reqs = 5 for i in range(total_reqs): - metadata.add_new_req( + metadata.add_new_req_to_recv( request_id=f"id_{i}", local_block_ids=[1, 2, 3], kv_transfer_params={ @@ -752,7 +752,7 @@ def test_kv_connector_stats(dist_init): # Create transfer metadata request_id = "test_req_for_stats" metadata = NixlConnectorMetadata() - metadata.add_new_req( + metadata.add_new_req_to_recv( request_id=request_id, local_block_ids=[1, 2, 3], kv_transfer_params={ @@ -1515,7 +1515,7 @@ def test_handshake_failure_returns_finished(dist_init): request_id = "test_handshake_fail" metadata = NixlConnectorMetadata() - metadata.add_new_req( + metadata.add_new_req_to_recv( request_id=request_id, local_block_ids=[1, 2, 3], kv_transfer_params={ @@ -1565,7 +1565,7 @@ def test_transfer_setup_failure_returns_finished(dist_init): request_id = "test_transfer_fail" metadata = NixlConnectorMetadata() - metadata.add_new_req( + metadata.add_new_req_to_recv( request_id=request_id, local_block_ids=[7, 8, 9], kv_transfer_params={ diff --git a/tests/v1/kv_offload/test_cpu_gpu.py b/tests/v1/kv_offload/test_cpu_gpu.py index a248104e16d2..3516c0013879 100644 --- a/tests/v1/kv_offload/test_cpu_gpu.py +++ b/tests/v1/kv_offload/test_cpu_gpu.py @@ -9,7 +9,7 @@ from vllm.platforms import current_platform from vllm.v1.attention.backends.flash_attn import FlashAttentionBackend from vllm.v1.kv_offload.mediums import CPULoadStoreSpec, GPULoadStoreSpec -from vllm.v1.kv_offload.worker.cpu_gpu import CpuGpuOffloadingHandler +from vllm.v1.kv_offload.worker.cpu_gpu import CpuGpuOffloadingHandlers BACKENDS_TO_TEST = [FlashAttentionBackend] @@ -82,7 +82,7 @@ def test_transfer( # create handler cpu_block_size = gpu_blocks_per_cpu_block * gpu_block_size - handler = CpuGpuOffloadingHandler( + handlers = CpuGpuOffloadingHandlers( attn_backends=attn_backends, gpu_block_size=gpu_block_size, cpu_block_size=cpu_block_size, @@ -112,8 +112,7 @@ def test_transfer( # set transfer direction if gpu_to_cpu: - src_kv_caches = handler.gpu_tensors - dst_kv_caches = handler.cpu_tensors + handler = handlers.gpu_to_cpu_handler src_spec_class = GPULoadStoreSpec dst_spec_class = CPULoadStoreSpec src_blocks = gpu_blocks @@ -122,8 +121,7 @@ def test_transfer( dst_blocks_in_gpu_block_size = cpu_blocks_in_gpu_block_size dst_size_in_gpu_blocks = num_cpu_blocks * gpu_blocks_per_cpu_block else: - src_kv_caches = handler.cpu_tensors - dst_kv_caches = handler.gpu_tensors + handler = handlers.cpu_to_gpu_handler src_spec_class = CPULoadStoreSpec dst_spec_class = GPULoadStoreSpec src_blocks = cpu_blocks @@ -144,12 +142,12 @@ def test_transfer( dst_spec = dst_spec_class(dst_blocks) # clone src and dst tensors before transfer - orig_src_caches = [x.clone() for x in src_kv_caches] - orig_dst_caches = [x.clone() for x in dst_kv_caches] + orig_src_caches = [x.clone() for x in handler.src_tensors] + orig_dst_caches = [x.clone() for x in handler.dst_tensors] # call transfer function assert handler.transfer_async(1, (src_spec, dst_spec)) - assert set(handler.transfer_events.keys()) == {1} + assert set({x[0] for x in handler._transfers}) == {1} # wait for transfer to complete end_time = time.time() + 10 @@ -161,15 +159,15 @@ def test_transfer( time.sleep(0.1) # verify src tensors did not change - for orig_tensor, tensor in zip(orig_src_caches, src_kv_caches): + for orig_tensor, tensor in zip(orig_src_caches, handler.src_tensors): assert torch.equal(orig_tensor, tensor) # verify dst tensors for dst_block in range(dst_size_in_gpu_blocks): src_block_candidate = dst_to_src.get(dst_block) for src_cache, dst_cache, orig_dst_cache, kv_dim in zip( - src_kv_caches, - dst_kv_caches, + handler.src_tensors, + handler.dst_tensors, orig_dst_caches, handler.kv_dim_before_num_blocks, ): diff --git a/tests/v1/structured_output/test_utils.py b/tests/v1/structured_output/test_utils.py index 513a21dd6bb3..c026ab0e4e78 100644 --- a/tests/v1/structured_output/test_utils.py +++ b/tests/v1/structured_output/test_utils.py @@ -44,8 +44,6 @@ def unsupported_array_schemas(): @pytest.fixture def unsupported_object_schemas(): return [ - {"type": "object", "minProperties": 1}, - {"type": "object", "maxProperties": 5}, {"type": "object", "propertyNames": {"pattern": "^[a-z]+$"}}, {"type": "object", "patternProperties": {"^S": {"type": "string"}}}, ] @@ -79,6 +77,8 @@ def supported_schema(): }, }, }, + "minProperties": 1, + "maxProperties": 100, } diff --git a/vllm/_custom_ops.py b/vllm/_custom_ops.py index 52a58a082683..2319655008c5 100644 --- a/vllm/_custom_ops.py +++ b/vllm/_custom_ops.py @@ -498,15 +498,15 @@ def awq_dequantize( def awq_gemm( input: torch.Tensor, qweight: torch.Tensor, - qzeros: torch.Tensor, scales: torch.Tensor, + qzeros: torch.Tensor, split_k_iters: int, ) -> torch.Tensor: if envs.VLLM_USE_TRITON_AWQ: from vllm.model_executor.layers.quantization.awq_triton import awq_gemm_triton - return awq_gemm_triton(input, qweight, qzeros, scales, split_k_iters) - return torch.ops._C.awq_gemm(input, qweight, qzeros, scales, split_k_iters) + return awq_gemm_triton(input, qweight, scales, qzeros, split_k_iters) + return torch.ops._C.awq_gemm(input, qweight, scales, qzeros, split_k_iters) # gptq @@ -632,8 +632,8 @@ def _awq_dequantize_fake( def _awq_gemm_fake( input: torch.Tensor, qweight: torch.Tensor, - qzeros: torch.Tensor, scales: torch.Tensor, + qzeros: torch.Tensor, split_k_iters: torch.SymInt, ) -> torch.Tensor: num_in_feats = input.size(0) diff --git a/vllm/attention/layer.py b/vllm/attention/layer.py index c095b9451814..7ef77db8fbb5 100644 --- a/vllm/attention/layer.py +++ b/vllm/attention/layer.py @@ -3,7 +3,6 @@ """Attention layer.""" import functools -from collections.abc import Callable from typing import cast import torch @@ -17,6 +16,7 @@ MLAAttentionImpl, ) from vllm.attention.backends.registry import AttentionBackendEnum +from vllm.attention.layers.mm_encoder_attention import maybe_get_vit_flash_attn_backend from vllm.attention.selector import get_attn_backend from vllm.attention.utils.fa_utils import get_flash_attn_version from vllm.attention.utils.kv_sharing_utils import validate_kv_sharing_target @@ -49,58 +49,9 @@ SlidingWindowSpec, ) -if current_platform.is_rocm(): - from vllm.platforms.rocm import on_gfx9 -else: - on_gfx9 = lambda *args, **kwargs: False - - -FP8_DTYPE = current_platform.fp8_dtype() logger = init_logger(__name__) -def maybe_get_vit_flash_attn_backend( - attn_backend: AttentionBackendEnum, - attn_backend_override: AttentionBackendEnum | None = None, -) -> tuple[AttentionBackendEnum, Callable | None]: - if current_platform.is_rocm(): - if envs.VLLM_ROCM_USE_AITER and envs.VLLM_ROCM_USE_AITER_MHA and on_gfx9(): - attn_backend = AttentionBackendEnum.ROCM_AITER_FA - elif ( - attn_backend_override is None - and on_gfx9() - and attn_backend == AttentionBackendEnum.FLASH_ATTN - ): - pass - else: - return AttentionBackendEnum.TORCH_SDPA, None - elif current_platform.is_cuda(): - pass - elif current_platform.is_xpu(): - assert attn_backend == AttentionBackendEnum.FLASH_ATTN, ( - "XPU platform only supports FLASH_ATTN as vision attention backend." - ) - pass - else: - return AttentionBackendEnum.TORCH_SDPA, None - - if attn_backend in { - AttentionBackendEnum.FLASH_ATTN, - AttentionBackendEnum.ROCM_AITER_FA, - }: - if attn_backend == AttentionBackendEnum.ROCM_AITER_FA: - from aiter import flash_attn_varlen_func - else: - try: - from vllm.attention.utils.fa_utils import flash_attn_varlen_func - except ImportError: - flash_attn_varlen_func = None - else: - flash_attn_varlen_func = None - - return attn_backend, flash_attn_varlen_func - - def _init_kv_cache_quant( layer: nn.Module, quant_config: QuantizationConfig | None, @@ -496,29 +447,15 @@ def __init__( attn_backend_override = None if multimodal_config is not None: attn_backend_override = multimodal_config.mm_encoder_attn_backend - backend = get_vit_attn_backend( + + self.attn_backend = get_vit_attn_backend( head_size=head_size, dtype=dtype, attn_backend_override=attn_backend_override, ) - self.attn_backend = ( - backend - if backend - in { - AttentionBackendEnum.TORCH_SDPA, - AttentionBackendEnum.PALLAS, - AttentionBackendEnum.ROCM_AITER_FA, - AttentionBackendEnum.FLASH_ATTN, - } - else AttentionBackendEnum.TORCH_SDPA - ) - - self.attn_backend, self._flash_attn_varlen_func = ( - maybe_get_vit_flash_attn_backend( - self.attn_backend, - attn_backend_override=attn_backend_override, - ) + self._flash_attn_varlen_func = maybe_get_vit_flash_attn_backend( + self.attn_backend, ) self.is_flash_attn_backend = self.attn_backend in { @@ -527,7 +464,10 @@ def __init__( } self.fa_version = None - if self.attn_backend == AttentionBackendEnum.FLASH_ATTN: + if ( + self.attn_backend == AttentionBackendEnum.FLASH_ATTN + and current_platform.is_cuda() + ): self.fa_version = get_flash_attn_version() assert self._flash_attn_varlen_func is not None self._flash_attn_varlen_func = functools.partial( diff --git a/vllm/attention/layers/mm_encoder_attention.py b/vllm/attention/layers/mm_encoder_attention.py new file mode 100644 index 000000000000..c9107ebcab85 --- /dev/null +++ b/vllm/attention/layers/mm_encoder_attention.py @@ -0,0 +1,284 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project + +from collections.abc import Callable + +import torch + +from vllm.attention.backends.registry import AttentionBackendEnum +from vllm.attention.ops.vit_attn_wrappers import ( + vit_flash_attn_wrapper, + vit_torch_sdpa_wrapper, +) +from vllm.config import MultiModalConfig +from vllm.logger import init_logger +from vllm.model_executor.custom_op import CustomOp +from vllm.model_executor.models.vision import get_vit_attn_backend + +logger = init_logger(__name__) + + +def maybe_get_vit_flash_attn_backend( + attn_backend: AttentionBackendEnum | None, +) -> Callable | None: + # At this point, + # we already have the attn_backend, + # overriding logic is done in the platform-specific implementation. + # so we don't need to override backend here. + # Just return the attn_backend and flash_attn_varlen_func. + + if attn_backend == AttentionBackendEnum.FLASH_ATTN: + from vllm.attention.utils.fa_utils import flash_attn_varlen_func + elif attn_backend == AttentionBackendEnum.ROCM_AITER_FA: + from aiter import flash_attn_varlen_func + else: + flash_attn_varlen_func = None + + # if attn_backend is TORCH_SDPA, + # it will reach here and the flash_attn_varlen_func will be None. + return flash_attn_varlen_func + + +@CustomOp.register("mm_encoder_attn") +class MMEncoderAttention(CustomOp): + """Multi-headed attention without any cache, used for multimodal encoder.""" + + def __init__( + self, + num_heads: int, + head_size: int, + scale: float | None = None, + num_kv_heads: int | None = None, + prefix: str = "", + multimodal_config: MultiModalConfig | None = None, + ) -> None: + """ + Args: + num_heads: number of attention heads per partition. + head_size: hidden_size per attention head. + scale: scale factor. + num_kv_heads: number of kv heads. + prefix: This has no effect, it is only here to make it easier to + swap between Attention and MultiHeadAttention + multimodal_config: configs for multi-modal. + """ + super().__init__() + + self.num_heads = num_heads + self.head_size = head_size + self.scale = scale + self.num_kv_heads = num_heads if num_kv_heads is None else num_kv_heads + self.layer_name = prefix + + assert self.num_heads % self.num_kv_heads == 0, ( + f"num_heads ({self.num_heads}) is not " + f"divisible by num_kv_heads ({self.num_kv_heads})" + ) + self.num_queries_per_kv = self.num_heads // self.num_kv_heads + + # During model initialization, the default dtype is set as the model + # weight and activation dtype. + dtype = torch.get_default_dtype() + + # Try to get vision attention backend from multimodal_config. + attn_backend_override = None + if multimodal_config is not None: + attn_backend_override = multimodal_config.mm_encoder_attn_backend + + # Get device-specific vision attention backend. + self.attn_backend = get_vit_attn_backend( + head_size=head_size, + dtype=dtype, + attn_backend_override=attn_backend_override, + ) + + self.is_flash_attn_backend = self.attn_backend in { + AttentionBackendEnum.FLASH_ATTN, + AttentionBackendEnum.ROCM_AITER_FA, + } + + self.flash_attn_varlen_func = maybe_get_vit_flash_attn_backend( + self.attn_backend, + ) + + logger.info_once(f"Using {self.attn_backend} for MMEncoderAttention.") + + @classmethod + def enabled(cls) -> bool: + return True + + def reshape_qkv_to_4d( + self, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + bsz: int, + q_len: int, + kv_len: int, + ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Reshape query, key, value to 4D tensors: + (batch_size, seq_len, num_heads, head_size) + """ + query = query.view(bsz, q_len, self.num_heads, self.head_size) + key = key.view(bsz, kv_len, self.num_kv_heads, self.head_size) + value = value.view(bsz, kv_len, self.num_kv_heads, self.head_size) + + if (num_repeat := self.num_queries_per_kv) > 1: + # Handle MQA and GQA + key = torch.repeat_interleave(key, num_repeat, dim=2) + value = torch.repeat_interleave(value, num_repeat, dim=2) + + return query, key, value + + def reshape_qkv_to_3d( + self, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + bsz: int, + q_len: int, + kv_len: int, + ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Reshape query, key, value to 3D tensors: + (batch_size * seq_len, num_heads, head_size) + """ + query = query.view(bsz * q_len, self.num_heads, self.head_size) + key = key.view(bsz * kv_len, self.num_kv_heads, self.head_size) + value = value.view(bsz * kv_len, self.num_kv_heads, self.head_size) + + if (num_repeat := self.num_queries_per_kv) > 1: + # Handle MQA and GQA + key = torch.repeat_interleave(key, num_repeat, dim=1) + value = torch.repeat_interleave(value, num_repeat, dim=1) + + return query, key, value + + def _forward_sdpa( + self, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + cu_seqlens: torch.Tensor | None = None, + ) -> torch.Tensor: + # TODO(Isotr0py): Migrate MultiHeadAttention + assert cu_seqlens is not None + + bsz, q_len = query.size()[:2] + kv_len = key.size(1) + + query, key, value = self.reshape_qkv_to_4d( + query, key, value, bsz, q_len, kv_len + ) + + output = vit_torch_sdpa_wrapper( + q=query, + k=key, + v=value, + cu_seqlens=cu_seqlens, + ) + return output + + def _forward_fa( + self, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + cu_seqlens: torch.Tensor | None = None, + max_seqlen: torch.Tensor | None = None, # Only used for Flash Attention + ) -> torch.Tensor: + assert self.flash_attn_varlen_func is not None, ( + "Flash attention function is not set." + ) + # # TODO(Isotr0py): Migrate MultiHeadAttention + assert cu_seqlens is not None and max_seqlen is not None + + bsz = query.shape[0] + + output = vit_flash_attn_wrapper( + q=query, + k=key, + v=value, + cu_seqlens=cu_seqlens, + max_seqlen=max_seqlen, + batch_size=bsz, + is_rocm_aiter=(self.attn_backend == AttentionBackendEnum.ROCM_AITER_FA), + ) + return output + + def forward_native( + self, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + cu_seqlens: torch.Tensor | None = None, + max_seqlen: torch.Tensor | None = None, # Only used for Flash Attention + ) -> torch.Tensor: + return self._forward_sdpa(query, key, value, cu_seqlens) + + def forward_cuda( + self, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + cu_seqlens: torch.Tensor | None = None, + max_seqlen: torch.Tensor | None = None, # Only used for Flash Attention + ) -> torch.Tensor: + if self.is_flash_attn_backend: + return self._forward_fa(query, key, value, cu_seqlens, max_seqlen) + elif self.attn_backend == AttentionBackendEnum.TORCH_SDPA: + return self._forward_sdpa(query, key, value, cu_seqlens) + else: + raise ValueError( + f"Unsupported multi-modal encoder attention backend for CUDA: " + f"{self.attn_backend}." + ) + + def forward_cpu( + self, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + cu_seqlens: torch.Tensor | None = None, + max_seqlen: torch.Tensor | None = None, # Only used for Flash Attention + ) -> torch.Tensor: + return self._forward_sdpa(query, key, value, cu_seqlens) + + def forward_xpu( + self, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + cu_seqlens: torch.Tensor | None = None, + max_seqlen: torch.Tensor | None = None, # Only used for Flash Attention + ) -> torch.Tensor: + assert self.is_flash_attn_backend, ( + "XPU only supports FLASH_ATTN for vision attention." + ) + return self._forward_fa(query, key, value, cu_seqlens, max_seqlen) + + def forward_tpu( + self, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + cu_seqlens: torch.Tensor | None = None, + max_seqlen: torch.Tensor | None = None, # Only used for Flash Attention + ) -> torch.Tensor: + assert self.attn_backend == AttentionBackendEnum.PALLAS, ( + f"MMEncoderAttention on TPU only supports PALLAS backend, " + f"but got {self.attn_backend}." + ) + if cu_seqlens is None: + query, key, value = (x.transpose(1, 2) for x in (query, key, value)) + from torch_xla.experimental.custom_kernel import flash_attention + + out = flash_attention(query, key, value, sm_scale=self.scale) + out = out.transpose(1, 2) + return out + logger.warning_once( + "PALLAS backend with cu_seqlens is not supported for ViT yet. ", + "Falling back to SDPA implementation.", + ) + return self._forward_sdpa(query, key, value, cu_seqlens) diff --git a/vllm/attention/ops/vit_attn_wrappers.py b/vllm/attention/ops/vit_attn_wrappers.py index 9036c2b80194..46c7d83dfa5c 100644 --- a/vllm/attention/ops/vit_attn_wrappers.py +++ b/vllm/attention/ops/vit_attn_wrappers.py @@ -44,9 +44,7 @@ def flash_attn_maxseqlen_wrapper( dropout_p=0.0, causal=False, ) - context_layer = einops.rearrange( - output, "(b s) h d -> s b (h d)", b=batch_size - ).contiguous() + context_layer = einops.rearrange(output, "(b s) h d -> b s h d", b=batch_size) return context_layer @@ -59,8 +57,7 @@ def flash_attn_maxseqlen_wrapper_fake( batch_size: int, is_rocm_aiter: bool, ) -> torch.Tensor: - b, s, h, d = q.shape - return torch.empty((s, b, h * d), dtype=q.dtype, device=q.device) + return torch.empty_like(q) direct_register_custom_op( @@ -106,7 +103,6 @@ def torch_sdpa_wrapper( output_i = einops.rearrange(output_i, "b h s d -> b s h d ") outputs.append(output_i) context_layer = torch.cat(outputs, dim=1) - context_layer = einops.rearrange(context_layer, "b s h d -> s b (h d)").contiguous() return context_layer @@ -116,8 +112,7 @@ def torch_sdpa_wrapper_fake( v: torch.Tensor, cu_seqlens: torch.Tensor, ) -> torch.Tensor: - b, s, h, d = q.shape - return torch.empty((s, b, h * d), dtype=q.dtype, device=q.device) + return torch.empty_like(q) direct_register_custom_op( diff --git a/vllm/attention/selector.py b/vllm/attention/selector.py index bbf95ff00900..e66f698add99 100644 --- a/vllm/attention/selector.py +++ b/vllm/attention/selector.py @@ -2,11 +2,11 @@ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from functools import cache -from typing import cast, get_args +from typing import NamedTuple, cast, get_args import torch -from vllm.attention.backends.abstract import AttentionBackend +from vllm.attention.backends.abstract import AttentionBackend, AttentionType from vllm.attention.backends.registry import ( MAMBA_TYPE_TO_BACKEND_MAP, MambaAttentionBackendEnum, @@ -18,6 +18,31 @@ logger = init_logger(__name__) +class AttentionSelectorConfig(NamedTuple): + head_size: int + dtype: torch.dtype + kv_cache_dtype: CacheDType | None + block_size: int | None + use_mla: bool = False + has_sink: bool = False + use_sparse: bool = False + use_mm_prefix: bool = False + attn_type: str = AttentionType.DECODER + + def __repr__(self): + return ( + f"AttentionSelectorConfig(head_size={self.head_size}, " + f"dtype={self.dtype}, " + f"kv_cache_dtype={self.kv_cache_dtype}, " + f"block_size={self.block_size}, " + f"use_mla={self.use_mla}, " + f"has_sink={self.has_sink}, " + f"use_sparse={self.use_sparse}, " + f"use_mm_prefix={self.use_mm_prefix}, " + f"attn_type={self.attn_type})" + ) + + def get_attn_backend( head_size: int, dtype: torch.dtype, @@ -43,8 +68,7 @@ def get_attn_backend( vllm_config = get_current_vllm_config() backend_enum = vllm_config.attention_config.backend - return _cached_get_attn_backend( - backend=backend_enum, + attn_selector_config = AttentionSelectorConfig( head_size=head_size, dtype=dtype, kv_cache_dtype=cast(CacheDType | None, kv_cache_dtype), @@ -53,36 +77,25 @@ def get_attn_backend( has_sink=has_sink, use_sparse=use_sparse, use_mm_prefix=use_mm_prefix, - attn_type=attn_type, + attn_type=attn_type or AttentionType.DECODER, + ) + + return _cached_get_attn_backend( + backend=backend_enum, + attn_selector_config=attn_selector_config, ) @cache def _cached_get_attn_backend( backend, - head_size: int, - dtype: torch.dtype, - kv_cache_dtype: CacheDType | None, - block_size: int | None, - use_mla: bool = False, - has_sink: bool = False, - use_sparse: bool = False, - use_mm_prefix: bool = False, - attn_type: str | None = None, + attn_selector_config: AttentionSelectorConfig, ) -> type[AttentionBackend]: from vllm.platforms import current_platform attention_cls = current_platform.get_attn_backend_cls( backend, - head_size, - dtype, - kv_cache_dtype, - block_size, - use_mla, - has_sink, - use_sparse, - use_mm_prefix, - attn_type, + attn_selector_config=attn_selector_config, ) if not attention_cls: raise ValueError( diff --git a/vllm/benchmarks/serve.py b/vllm/benchmarks/serve.py index 254e4d35e535..f5d8ea5a975a 100644 --- a/vllm/benchmarks/serve.py +++ b/vllm/benchmarks/serve.py @@ -235,7 +235,9 @@ async def get_request( def calculate_metrics_for_embeddings( - outputs: list[RequestFuncOutput], dur_s: float, selected_percentiles: list[float] + outputs: list[RequestFuncOutput], + dur_s: float, + selected_percentiles: list[float], ) -> EmbedBenchmarkMetrics: """Calculate the metrics for the embedding requests. diff --git a/vllm/benchmarks/startup.py b/vllm/benchmarks/startup.py new file mode 100644 index 000000000000..086f7bf62f83 --- /dev/null +++ b/vllm/benchmarks/startup.py @@ -0,0 +1,326 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +"""Benchmark the cold and warm startup time of vLLM models. + +This script measures total startup time (including model loading, compilation, +and cache operations) for both cold and warm scenarios: +- Cold startup: Fresh start with no caches (temporary cache directories) +- Warm startup: Using cached compilation and model info +""" + +import argparse +import dataclasses +import json +import multiprocessing +import os +import shutil +import tempfile +import time +from contextlib import contextmanager +from typing import Any + +import numpy as np +from tqdm import tqdm + +from vllm.benchmarks.lib.utils import ( + convert_to_pytorch_benchmark_format, + write_to_json, +) +from vllm.engine.arg_utils import EngineArgs + + +@contextmanager +def cold_startup(): + """ + Context manager to measure cold startup time: + 1. Uses a temporary directory for vLLM cache to avoid any pollution + between cold startup iterations. + 2. Uses inductor's fresh_cache to clear torch.compile caches. + """ + from torch._inductor.utils import fresh_cache + + # Use temporary directory for caching to avoid any pollution between cold startups + original_cache_root = os.environ.get("VLLM_CACHE_ROOT") + temp_cache_dir = tempfile.mkdtemp(prefix="vllm_startup_bench_cold_") + try: + os.environ["VLLM_CACHE_ROOT"] = temp_cache_dir + with fresh_cache(): + yield + finally: + # Clean up temporary cache directory + shutil.rmtree(temp_cache_dir, ignore_errors=True) + if original_cache_root: + os.environ["VLLM_CACHE_ROOT"] = original_cache_root + else: + os.environ.pop("VLLM_CACHE_ROOT", None) + + +def run_startup_in_subprocess(engine_args_dict, result_queue): + """ + Run LLM startup in a subprocess and return timing metrics via a queue. + This ensures complete isolation between iterations. + """ + try: + # Import inside the subprocess to avoid issues with forking + from vllm import LLM + from vllm.engine.arg_utils import EngineArgs + + engine_args = EngineArgs(**engine_args_dict) + + # Measure total startup time + start_time = time.perf_counter() + + llm = LLM(**dataclasses.asdict(engine_args)) + + total_startup_time = time.perf_counter() - start_time + + # Extract compilation time if available + compilation_time = 0.0 + if hasattr(llm.llm_engine, "vllm_config"): + vllm_config = llm.llm_engine.vllm_config + if ( + hasattr(vllm_config, "compilation_config") + and vllm_config.compilation_config is not None + ): + compilation_time = vllm_config.compilation_config.compilation_time + + result_queue.put( + { + "total_startup_time": total_startup_time, + "compilation_time": compilation_time, + } + ) + + except Exception as e: + result_queue.put(None) + result_queue.put(str(e)) + + +def save_to_pytorch_benchmark_format( + args: argparse.Namespace, results: dict[str, Any] +) -> None: + base_name = os.path.splitext(args.output_json)[0] + + cold_startup_records = convert_to_pytorch_benchmark_format( + args=args, + metrics={ + "avg_cold_startup_time": results["avg_cold_startup_time"], + }, + extra_info={ + "cold_startup_times": results["cold_startup_times"], + "cold_startup_percentiles": results["cold_startup_percentiles"], + }, + ) + if cold_startup_records: + write_to_json(f"{base_name}.cold_startup.pytorch.json", cold_startup_records) + + cold_compilation_records = convert_to_pytorch_benchmark_format( + args=args, + metrics={ + "avg_cold_compilation_time": results["avg_cold_compilation_time"], + }, + extra_info={ + "cold_compilation_times": results["cold_compilation_times"], + "cold_compilation_percentiles": results["cold_compilation_percentiles"], + }, + ) + if cold_compilation_records: + write_to_json( + f"{base_name}.cold_compilation.pytorch.json", cold_compilation_records + ) + + warm_startup_records = convert_to_pytorch_benchmark_format( + args=args, + metrics={ + "avg_warm_startup_time": results["avg_warm_startup_time"], + }, + extra_info={ + "warm_startup_times": results["warm_startup_times"], + "warm_startup_percentiles": results["warm_startup_percentiles"], + }, + ) + if warm_startup_records: + write_to_json(f"{base_name}.warm_startup.pytorch.json", warm_startup_records) + + warm_compilation_records = convert_to_pytorch_benchmark_format( + args=args, + metrics={ + "avg_warm_compilation_time": results["avg_warm_compilation_time"], + }, + extra_info={ + "warm_compilation_times": results["warm_compilation_times"], + "warm_compilation_percentiles": results["warm_compilation_percentiles"], + }, + ) + if warm_compilation_records: + write_to_json( + f"{base_name}.warm_compilation.pytorch.json", warm_compilation_records + ) + + +def add_cli_args(parser: argparse.ArgumentParser): + parser.add_argument( + "--num-iters-cold", + type=int, + default=5, + help="Number of cold startup iterations.", + ) + parser.add_argument( + "--num-iters-warmup", + type=int, + default=3, + help="Number of warmup iterations before benchmarking warm startups.", + ) + parser.add_argument( + "--num-iters-warm", + type=int, + default=5, + help="Number of warm startup iterations.", + ) + parser.add_argument( + "--output-json", + type=str, + default=None, + help="Path to save the startup time results in JSON format.", + ) + + parser = EngineArgs.add_cli_args(parser) + return parser + + +def main(args: argparse.Namespace): + # Set multiprocessing start method to 'spawn' for clean process isolation + # This ensures each subprocess starts fresh without inheriting state + multiprocessing.set_start_method("spawn", force=True) + + engine_args = EngineArgs.from_cli_args(args) + + def create_llm_and_measure_startup(): + """ + Create LLM instance in a subprocess and measure startup time. + Returns timing metrics, using subprocess for complete isolation. + """ + # Convert engine_args to dictionary for pickling + engine_args_dict = dataclasses.asdict(engine_args) + + # Create a queue for inter-process communication + result_queue = multiprocessing.Queue() + process = multiprocessing.Process( + target=run_startup_in_subprocess, + args=( + engine_args_dict, + result_queue, + ), + ) + process.start() + process.join() + + if not result_queue.empty(): + result = result_queue.get() + if result is None: + if not result_queue.empty(): + error_msg = result_queue.get() + raise RuntimeError(f"Subprocess failed: {error_msg}") + else: + raise RuntimeError("Subprocess failed with unknown error") + return result + else: + raise RuntimeError("Subprocess did not return a result") + + os.environ["VLLM_ENABLE_V1_MULTIPROCESSING"] = "0" + print("Setting VLLM_ENABLE_V1_MULTIPROCESSING=0 to collect startup metrics.\n") + + print("Measuring cold startup time...\n") + cold_startup_times = [] + cold_compilation_times = [] + for i in tqdm(range(args.num_iters_cold), desc="Cold startup iterations"): + with cold_startup(): + metrics = create_llm_and_measure_startup() + cold_startup_times.append(metrics["total_startup_time"]) + cold_compilation_times.append(metrics["compilation_time"]) + + # Warmup for warm startup + print("\nWarming up for warm startup measurement...\n") + for _ in tqdm(range(args.num_iters_warmup), desc="Warmup iterations"): + create_llm_and_measure_startup() + + print("\nMeasuring warm startup time...\n") + warm_startup_times = [] + warm_compilation_times = [] + for i in tqdm(range(args.num_iters_warm), desc="Warm startup iterations"): + metrics = create_llm_and_measure_startup() + warm_startup_times.append(metrics["total_startup_time"]) + warm_compilation_times.append(metrics["compilation_time"]) + + # Calculate statistics + cold_startup_array = np.array(cold_startup_times) + cold_compilation_array = np.array(cold_compilation_times) + warm_startup_array = np.array(warm_startup_times) + warm_compilation_array = np.array(warm_compilation_times) + + avg_cold_startup = np.mean(cold_startup_array) + avg_cold_compilation = np.mean(cold_compilation_array) + avg_warm_startup = np.mean(warm_startup_array) + avg_warm_compilation = np.mean(warm_compilation_array) + + percentages = [10, 25, 50, 75, 90, 99] + cold_startup_percentiles = np.percentile(cold_startup_array, percentages) + cold_compilation_percentiles = np.percentile(cold_compilation_array, percentages) + warm_startup_percentiles = np.percentile(warm_startup_array, percentages) + warm_compilation_percentiles = np.percentile(warm_compilation_array, percentages) + + print("\n" + "=" * 60) + print("STARTUP TIME BENCHMARK RESULTS") + print("=" * 60) + + # Cold startup statistics + print("\nCOLD STARTUP:") + print(f"Avg total startup time: {avg_cold_startup:.2f} seconds") + print(f"Avg compilation time: {avg_cold_compilation:.2f} seconds") + print("Startup time percentiles:") + for percentage, percentile in zip(percentages, cold_startup_percentiles): + print(f" {percentage}%: {percentile:.2f} seconds") + print("Compilation time percentiles:") + for percentage, percentile in zip(percentages, cold_compilation_percentiles): + print(f" {percentage}%: {percentile:.2f} seconds") + + # Warm startup statistics + print("\nWARM STARTUP:") + print(f"Avg total startup time: {avg_warm_startup:.2f} seconds") + print(f"Avg compilation time: {avg_warm_compilation:.2f} seconds") + print("Startup time percentiles:") + for percentage, percentile in zip(percentages, warm_startup_percentiles): + print(f" {percentage}%: {percentile:.2f} seconds") + print("Compilation time percentiles:") + for percentage, percentile in zip(percentages, warm_compilation_percentiles): + print(f" {percentage}%: {percentile:.2f} seconds") + + print("=" * 60) + + # Output JSON results if specified + if args.output_json: + results = { + "avg_cold_startup_time": float(avg_cold_startup), + "avg_cold_compilation_time": float(avg_cold_compilation), + "cold_startup_times": cold_startup_times, + "cold_compilation_times": cold_compilation_times, + "cold_startup_percentiles": dict( + zip(percentages, cold_startup_percentiles.tolist()) + ), + "cold_compilation_percentiles": dict( + zip(percentages, cold_compilation_percentiles.tolist()) + ), + "avg_warm_startup_time": float(avg_warm_startup), + "avg_warm_compilation_time": float(avg_warm_compilation), + "warm_startup_times": warm_startup_times, + "warm_compilation_times": warm_compilation_times, + "warm_startup_percentiles": dict( + zip(percentages, warm_startup_percentiles.tolist()) + ), + "warm_compilation_percentiles": dict( + zip(percentages, warm_compilation_percentiles.tolist()) + ), + } + with open(args.output_json, "w") as f: + json.dump(results, f, indent=4) + save_to_pytorch_benchmark_format(args, results) diff --git a/vllm/compilation/backends.py b/vllm/compilation/backends.py index 8fcd2b42e13b..a1eec7d74483 100644 --- a/vllm/compilation/backends.py +++ b/vllm/compilation/backends.py @@ -463,21 +463,27 @@ def call_module( # the tag for the part of model being compiled, # e.g. backbone/eagle_head model_tag: str = "backbone" +model_is_encoder: bool = False @contextmanager -def set_model_tag(tag: str): +def set_model_tag(tag: str, is_encoder: bool = False): """Context manager to set the model tag.""" global model_tag + global model_is_encoder assert tag != model_tag, ( f"Model tag {tag} is the same as the current tag {model_tag}." ) old_tag = model_tag + old_is_encoder = model_is_encoder + model_tag = tag + model_is_encoder = is_encoder try: yield finally: model_tag = old_tag + model_is_encoder = old_is_encoder class VllmBackend: @@ -523,6 +529,9 @@ def __init__( # them, e.g. backbone (default), eagle_head, etc. self.prefix = prefix or model_tag + # Mark compilation for encoder. + self.is_encoder = model_is_encoder + # Passes to run on the graph post-grad. self.pass_manager = resolve_obj_by_qualname( current_platform.get_pass_manager_cls() diff --git a/vllm/compilation/decorators.py b/vllm/compilation/decorators.py index f07061bdb7b2..d1ee995ee895 100644 --- a/vllm/compilation/decorators.py +++ b/vllm/compilation/decorators.py @@ -390,14 +390,6 @@ def __call__(self, *args, **kwargs): serialized backend artifacts), then we need to generate a new AOT compile artifact from scratch. """ - # Validate that AOT compile is not used with unbacked dynamic - # shapes. aot_compile re-allocates backed symbols post dynamo! - if ds_type == DynamicShapesType.UNBACKED: - raise ValueError( - "AOT compilation is not compatible with UNBACKED dynamic shapes. " - "Please use BACKED or BACKED_SIZE_OBLIVIOUS dynamic shapes type " - "when VLLM_USE_AOT_COMPILE is enabled." - ) from .caching import compilation_config_hash_factors factors: list[str] = compilation_config_hash_factors(self.vllm_config) diff --git a/vllm/compilation/fusion.py b/vllm/compilation/fusion.py index a7e6a69e64c9..d121106334cb 100644 --- a/vllm/compilation/fusion.py +++ b/vllm/compilation/fusion.py @@ -23,17 +23,14 @@ kNvfp4Quant, kStaticTensorScale, ) -from vllm.model_executor.layers.quantization.utils.w8a8_utils import ( - cutlass_block_fp8_supported, -) from vllm.platforms import current_platform -from vllm.utils.deep_gemm import ( - is_deep_gemm_e8m0_used, - should_use_deepgemm_for_fp8_linear_for_nk, -) from .inductor_pass import enable_fake_mode -from .matcher_utils import MatcherFusedAddRMSNorm, MatcherQuantFP8, MatcherRMSNorm +from .matcher_utils import ( + MatcherFusedAddRMSNorm, + MatcherQuantFP8, + MatcherRMSNorm, +) from .vllm_inductor_pass import VllmInductorPass, VllmPatternMatcherPass logger = init_logger(__name__) @@ -118,21 +115,18 @@ def __str__(self): class RMSNormQuantPattern: - def __init__(self, epsilon: float, key: FusedRMSQuantKey): + def __init__( + self, + epsilon: float, + key: FusedRMSQuantKey, + has_col_major_scales: bool = False, + is_e8m0: bool = False, + ): self.epsilon = epsilon self.quant_dtype = key.quant.dtype config = get_current_vllm_config() self.model_dtype = config.model_config.dtype if config.model_config else None - # groupwise FP8 linear uses col major scales if deepgemm and cutlass - using_deepgemm = should_use_deepgemm_for_fp8_linear_for_nk( - self.model_dtype, - config.model_config.hf_config.intermediate_size, - config.model_config.hf_config.hidden_size, - ) - use_col_major_scales = using_deepgemm or cutlass_block_fp8_supported() - use_e8m0 = is_deep_gemm_e8m0_used() if using_deepgemm else False - assert key in FUSED_OPS, f"unsupported fused rmsnorm+quant op for {key}" self.FUSED_OP = FUSED_OPS[key] @@ -142,7 +136,7 @@ def __init__(self, epsilon: float, key: FusedRMSQuantKey): else MatcherFusedAddRMSNorm(epsilon) ) self.quant_matcher = MatcherQuantFP8( - key.quant, use_col_major_scales=use_col_major_scales, use_e8m0=use_e8m0 + key.quant, has_col_major_scales=has_col_major_scales, is_e8m0=is_e8m0 ) @@ -260,6 +254,8 @@ def __init__( quant_dtype: torch.dtype, group_shape: GroupShape, symmetric=True, + has_col_major_scales: bool = False, + is_e8m0: bool = False, ): scale = ScaleDesc(torch.float32, False, group_shape) key = FusedRMSQuantKey( @@ -267,7 +263,11 @@ def __init__( quant=QuantKey(dtype=quant_dtype, scale=scale, symmetric=symmetric), ) self.group_shape = group_shape - super().__init__(epsilon, key) + self.has_col_major_scales = has_col_major_scales + self.is_e8m0 = is_e8m0 + super().__init__( + epsilon, key, has_col_major_scales=has_col_major_scales, is_e8m0=is_e8m0 + ) def register(self, pm_pass: PatternMatcherPass): def pattern(input: torch.Tensor, weight: torch.Tensor, residual: torch.Tensor): @@ -283,9 +283,7 @@ def replacement( input = input.to(dtype=self.model_dtype) result = torch.empty_like(input, dtype=self.quant_dtype) - scale = self.quant_matcher.make_scale( - input, transposed=self.quant_matcher.use_col_major_scales - ) + scale = self.quant_matcher.make_scale(input, self.has_col_major_scales) at = auto_functionalized( self.FUSED_OP, result=result, @@ -296,7 +294,7 @@ def replacement( scale_ub=None, residual=residual, group_size=self.group_shape[1], - is_scale_transposed=self.quant_matcher.use_col_major_scales, + is_scale_transposed=self.has_col_major_scales, ) # result, residual, scale @@ -318,6 +316,8 @@ def __init__( quant_dtype: torch.dtype, group_shape: GroupShape, symmetric=True, + has_col_major_scales: bool = False, + is_e8m0: bool = False, ): scale = ScaleDesc(torch.float32, False, group_shape) key = FusedRMSQuantKey( @@ -325,7 +325,9 @@ def __init__( quant=QuantKey(dtype=quant_dtype, scale=scale, symmetric=symmetric), ) self.group_shape = group_shape - super().__init__(epsilon, key) + super().__init__( + epsilon, key, has_col_major_scales=has_col_major_scales, is_e8m0=is_e8m0 + ) def register(self, pm_pass: PatternMatcherPass): def pattern(input: torch.Tensor, weight: torch.Tensor): @@ -340,7 +342,7 @@ def replacement(input: torch.Tensor, weight: torch.Tensor): result = torch.empty_like(input, dtype=self.quant_dtype) scale = self.quant_matcher.make_scale( - input, transposed=self.quant_matcher.use_col_major_scales + input, transposed=self.quant_matcher.has_col_major_scales ) at = auto_functionalized( self.FUSED_OP, @@ -352,7 +354,7 @@ def replacement(input: torch.Tensor, weight: torch.Tensor): scale_ub=None, residual=None, group_size=self.group_shape[1], - is_scale_transposed=self.quant_matcher.use_col_major_scales, + is_scale_transposed=self.quant_matcher.has_col_major_scales, ) # result, scale @@ -489,27 +491,6 @@ def __init__(self, config: VllmConfig): # Make sure fused add patterns are before simple rms norm, # as the latter is a subset of the former in torch ops for epsilon in [1e-5, 1e-6]: - # Fuse fused_add_rms_norm + fp8 group quant - # Only register group quant patterns on CUDA where the C++ op exists - if current_platform.is_cuda(): - FusedAddRMSNormGroupQuantPattern( - epsilon, FP8_DTYPE, group_shape=GroupShape(1, 128) - ).register(self.patterns) - - # Fuse rms_norm + fp8 group quant - RMSNormGroupQuantPattern( - epsilon, FP8_DTYPE, group_shape=GroupShape(1, 128) - ).register(self.patterns) - - FusedAddRMSNormGroupQuantPattern( - epsilon, FP8_DTYPE, group_shape=GroupShape(1, 64) - ).register(self.patterns) - - # Fuse rms_norm + fp8 group quant - RMSNormGroupQuantPattern( - epsilon, FP8_DTYPE, group_shape=GroupShape(1, 64) - ).register(self.patterns) - # Fuse fused_add_rms_norm + static fp8 quant FusedAddRMSNormStaticQuantPattern(epsilon, FP8_DTYPE).register( self.patterns @@ -526,6 +507,29 @@ def __init__(self, config: VllmConfig): # Fuse rms_norm + dynamic per-token fp8 quant RMSNormDynamicQuantPattern(epsilon, FP8_DTYPE).register(self.patterns) + # Only register group quant patterns on CUDA where the C++ op exists + if current_platform.is_cuda(): + for group_shape in [GroupShape(1, 128), GroupShape(1, 64)]: + for has_col_major_scales in [True, False]: + for is_e8m0 in [True, False]: + # Fuse fused_add_rms_norm + fp8 group quant + FusedAddRMSNormGroupQuantPattern( + epsilon, + FP8_DTYPE, + group_shape=group_shape, + has_col_major_scales=has_col_major_scales, + is_e8m0=is_e8m0, + ).register(self.patterns) + + # Fuse rms_norm + fp8 group quant + RMSNormGroupQuantPattern( + epsilon, + FP8_DTYPE, + group_shape=group_shape, + has_col_major_scales=has_col_major_scales, + is_e8m0=is_e8m0, + ).register(self.patterns) + self.dump_patterns(config, self.patterns) @VllmInductorPass.time_and_log diff --git a/vllm/compilation/matcher_utils.py b/vllm/compilation/matcher_utils.py index 0c0bece9b3fd..ec9ed34f561b 100644 --- a/vllm/compilation/matcher_utils.py +++ b/vllm/compilation/matcher_utils.py @@ -234,24 +234,30 @@ def __init__( self, quant_key: QuantKey, enabled: bool | None = None, - use_col_major_scales: bool = False, - use_e8m0: bool = False, + has_col_major_scales: bool = False, + is_e8m0: bool = False, ): if enabled is None: enabled = QuantFP8.enabled() super().__init__(enabled) self.quant_key = quant_key - self.use_col_major_scales = use_col_major_scales - self.use_e8m0 = use_e8m0 assert quant_key in QUANT_OPS, f"unsupported quantization scheme {quant_key}" self.QUANT_OP = QUANT_OPS[quant_key] + self.has_col_major_scales = has_col_major_scales + self.is_e8m0 = is_e8m0 + assert quant_key.dtype == current_platform.fp8_dtype(), ( "Only QuantFP8 supported by" ) assert quant_key.scale2 is None - self.quant_fp8 = QuantFP8(quant_key.scale.static, quant_key.scale.group_shape) + self.quant_fp8 = QuantFP8( + quant_key.scale.static, + quant_key.scale.group_shape, + column_major_scales=has_col_major_scales, + use_ue8m0=is_e8m0, + ) def forward_custom( self, @@ -264,7 +270,7 @@ def forward_custom( if self.quant_key.scale.group_shape.is_per_group(): assert scale is None - scale = self.make_scale(input, transposed=self.use_col_major_scales) + scale = self.make_scale(input, transposed=self.has_col_major_scales) finfo = torch.finfo(self.quant_key.dtype) fp8_min = finfo.min @@ -279,7 +285,7 @@ def forward_custom( eps=1e-10, fp8_min=fp8_min, fp8_max=fp8_max, - scale_ue8m0=self.use_e8m0, + scale_ue8m0=self.is_e8m0, ) return result, scale diff --git a/vllm/compilation/piecewise_backend.py b/vllm/compilation/piecewise_backend.py index a15c693767a5..58d3e2a14b22 100644 --- a/vllm/compilation/piecewise_backend.py +++ b/vllm/compilation/piecewise_backend.py @@ -53,12 +53,7 @@ def __init__( self.is_last_graph = piecewise_compile_index == total_piecewise_compiles - 1 self.is_full_graph = total_piecewise_compiles == 1 - # TODO: we need to generalize encoder compilation to other models - self.is_encoder_compilation = vllm_backend.prefix in [ - "Qwen2_5_VisionPatchEmbed", - "Qwen2_5_VisionPatchMerger", - "Qwen2_5_VisionBlock", - ] + self.is_encoder_compilation = vllm_backend.is_encoder self.compile_ranges = self.compilation_config.get_compile_ranges() if self.is_encoder_compilation: diff --git a/vllm/config/compilation.py b/vllm/config/compilation.py index 3b6cb8a34360..1fdb843e1a7c 100644 --- a/vllm/config/compilation.py +++ b/vllm/config/compilation.py @@ -8,7 +8,7 @@ from pathlib import Path from typing import TYPE_CHECKING, Any, ClassVar, Literal -from pydantic import Field, TypeAdapter, field_validator +from pydantic import ConfigDict, Field, TypeAdapter, field_validator from pydantic.dataclasses import dataclass import vllm.envs as envs @@ -96,7 +96,7 @@ def __str__(self) -> str: @config -@dataclass +@dataclass(config=ConfigDict(extra="forbid")) class PassConfig: """Configuration for custom Inductor passes. @@ -251,7 +251,7 @@ class DynamicShapesType(str, enum.Enum): @config -@dataclass +@dataclass(config=ConfigDict(extra="forbid")) class DynamicShapesConfig: """Configuration to control/debug torch compile dynamic shapes.""" @@ -290,7 +290,7 @@ def compute_hash(self) -> str: @config -@dataclass +@dataclass(config=ConfigDict(extra="forbid")) class CompilationConfig: """Configuration for compilation. @@ -932,9 +932,13 @@ def set_splitting_ops_for_v1( self.splitting_ops = list(self._attention_ops) added_default_splitting_ops = True elif len(self.splitting_ops) == 0: - logger.warning_once( - "Using piecewise compilation with empty splitting_ops" - ) + if ( + self.cudagraph_mode == CUDAGraphMode.PIECEWISE + or self.cudagraph_mode == CUDAGraphMode.FULL_AND_PIECEWISE + ): + logger.warning_once( + "Using piecewise compilation with empty splitting_ops" + ) if self.cudagraph_mode == CUDAGraphMode.PIECEWISE: logger.warning_once( "Piecewise compilation with empty splitting_ops do not" diff --git a/vllm/config/model.py b/vllm/config/model.py index fe98bbca2429..b5b6dcb480e8 100644 --- a/vllm/config/model.py +++ b/vllm/config/model.py @@ -8,7 +8,7 @@ from typing import TYPE_CHECKING, Any, Literal, cast, get_args import torch -from pydantic import ConfigDict, SkipValidation, field_validator, model_validator +from pydantic import ConfigDict, Field, field_validator, model_validator from pydantic.dataclasses import dataclass from safetensors.torch import _TYPES as _SAFETENSORS_TO_TORCH_DTYPE from transformers.configuration_utils import ALLOWED_LAYER_TYPES @@ -109,7 +109,7 @@ class ModelConfig: """Convert the model using adapters defined in [vllm.model_executor.models.adapters][]. The most common use case is to adapt a text generation model to be used for pooling tasks.""" - tokenizer: SkipValidation[str] = None # type: ignore + tokenizer: str = Field(default=None) """Name or path of the Hugging Face tokenizer to use. If unspecified, model name or path will be used.""" tokenizer_mode: TokenizerMode | str = "auto" @@ -164,7 +164,7 @@ class ModelConfig: """The specific revision to use for the tokenizer on the Hugging Face Hub. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.""" - max_model_len: SkipValidation[int] = None # type: ignore + max_model_len: int = Field(default=None, gt=0) """Model context length (prompt and output). If unspecified, will be automatically derived from the model config. @@ -175,7 +175,7 @@ class ModelConfig: - 25.6k -> 25,600""" spec_target_max_model_len: int | None = None """Specify the maximum length for spec decoding draft models.""" - quantization: SkipValidation[QuantizationMethods | None] = None + quantization: QuantizationMethods | str | None = None """Method used to quantize the weights. If `None`, we first check the `quantization_config` attribute in the model config file. If that is `None`, we assume the model weights are not quantized and use `dtype` to @@ -597,6 +597,14 @@ def __post_init__( self._verify_cuda_graph() self._verify_bnb_config() + @field_validator("tokenizer", "max_model_len", mode="wrap") + @classmethod + def _skip_none_validation(cls, value: Any, handler: Callable) -> Any: + """Skip validation if the value is `None` when initialisation is delayed.""" + if value is None: + return value + return handler(value) + @field_validator("tokenizer_mode", mode="after") def _lowercase_tokenizer_mode(cls, tokenizer_mode: str) -> str: return tokenizer_mode.lower() @@ -610,10 +618,19 @@ def validate_quantization_before(cls, value: Any) -> Any: @model_validator(mode="after") def validate_model_config_after(self: "ModelConfig") -> "ModelConfig": + """Called after __post_init__""" if not isinstance(self.tokenizer, str): - raise ValueError("tokenizer must be a string after __post_init__.") + raise ValueError( + f"tokenizer must be a string, got " + f"{type(self.tokenizer).__name__}: {self.tokenizer!r}. " + "Please provide a valid tokenizer path or HuggingFace model ID." + ) if not isinstance(self.max_model_len, int): - raise ValueError("max_model_len must be an integer after __post_init__.") + raise ValueError( + f"max_model_len must be a positive integer, " + f"got {type(self.max_model_len).__name__}: {self.max_model_len!r}. " + "Example: max_model_len=2048" + ) return self def _get_transformers_backend_cls(self) -> str: @@ -1189,7 +1206,15 @@ def get_total_num_kv_heads(self) -> int: // block.attention.n_heads_in_group ) - raise RuntimeError("Couldn't determine number of kv heads") + raise RuntimeError( + "Could not determine the number of key-value attention heads " + "from model configuration. " + f"Model: {self.model}, Architecture: {self.architectures}. " + "This usually indicates an unsupported model architecture or " + "missing configuration. " + "Please check if your model is supported at: " + "https://docs.vllm.ai/en/latest/models/supported_models.html" + ) if self.is_attention_free: return 0 @@ -1783,6 +1808,7 @@ def get_served_model_name(model: str, served_model_name: str | list[str] | None) ("ForTextEncoding", ("pooling", "embed")), ("EmbeddingModel", ("pooling", "embed")), ("ForSequenceClassification", ("pooling", "classify")), + ("ForTokenClassification", ("pooling", "classify")), ("ForAudioClassification", ("pooling", "classify")), ("ForImageClassification", ("pooling", "classify")), ("ForVideoClassification", ("pooling", "classify")), diff --git a/vllm/config/parallel.py b/vllm/config/parallel.py index 1f9dd38ac911..3fe066ec3250 100644 --- a/vllm/config/parallel.py +++ b/vllm/config/parallel.py @@ -156,6 +156,8 @@ class ParallelConfig: enable_dbo: bool = False """Enable dual batch overlap for the model executor.""" + ubatch_size: int = 0 + """Number of ubatch size.""" dbo_decode_token_threshold: int = 32 """The threshold for dual batch overlap for batches only containing decodes. @@ -325,6 +327,14 @@ def world_size_across_dp(self) -> int: including data parallelism.""" return self.world_size * self.data_parallel_size + @property + def use_ubatching(self) -> bool: + return self.enable_dbo or self.ubatch_size > 1 + + @property + def num_ubatches(self) -> int: + return 2 if self.enable_dbo else self.ubatch_size + def get_next_dp_init_port(self) -> int: """ We might need to initialize process groups in multiple diff --git a/vllm/config/scheduler.py b/vllm/config/scheduler.py index 8da3ae538d67..8abbe8ba0103 100644 --- a/vllm/config/scheduler.py +++ b/vllm/config/scheduler.py @@ -122,10 +122,12 @@ class SchedulerConfig: the default scheduler. Can be a class directly or the path to a class of form "mod.custom_class".""" - disable_hybrid_kv_cache_manager: bool = False + disable_hybrid_kv_cache_manager: bool | None = None """If set to True, KV cache manager will allocate the same size of KV cache for all attention layers even if there are multiple type of attention layers like full attention and sliding window attention. + If set to None, the default value will be determined based on the environment + and starting configuration. """ async_scheduling: bool = False diff --git a/vllm/config/vllm.py b/vllm/config/vllm.py index 097660d6ab75..0439dc52e7e6 100644 --- a/vllm/config/vllm.py +++ b/vllm/config/vllm.py @@ -870,9 +870,12 @@ def has_blocked_weights(): f"cudagraph_mode={self.compilation_config.cudagraph_mode}" ) - if self.parallel_config.enable_dbo: + if self.parallel_config.use_ubatching: a2a_backend = self.parallel_config.all2all_backend - assert a2a_backend in ["deepep_low_latency", "deepep_high_throughput"], ( + assert a2a_backend in [ + "deepep_low_latency", + "deepep_high_throughput", + ], ( "Microbatching currently only supports the deepep_low_latency and " f"deepep_high_throughput all2all backend. {a2a_backend} is not " "supported. To fix use --all2all-backend=deepep_low_latency or " @@ -887,48 +890,75 @@ def has_blocked_weights(): if not self.instance_id: self.instance_id = random_uuid()[:5] - if not self.scheduler_config.disable_hybrid_kv_cache_manager: - # logger should only print warning message for hybrid models. As we - # can't know whether the model is hybrid or not now, so we don't log - # warning message here and will log it later. - if not current_platform.support_hybrid_kv_cache(): - # Hybrid KV cache manager is not supported on non-GPU platforms. - self.scheduler_config.disable_hybrid_kv_cache_manager = True + # Hybrid KV cache manager (HMA) runtime rules: + # - Explicit enable (--no-disable-kv-cache-manager): error if runtime + # disables it + # - No preference: auto-disable for unsupported features (e.g. kv connector) + # - Explicit disable (--disable-kv-cache-manager): always respect it + need_disable_hybrid_kv_cache_manager = False + # logger should only print warning message for hybrid models. As we + # can't know whether the model is hybrid or not now, so we don't log + # warning message here and will log it later. + if not current_platform.support_hybrid_kv_cache(): + # Hybrid KV cache manager is not supported on non-GPU platforms. + need_disable_hybrid_kv_cache_manager = True + if self.kv_events_config is not None: + # Hybrid KV cache manager is not compatible with KV events. + need_disable_hybrid_kv_cache_manager = True + if ( + self.model_config is not None + and self.model_config.attention_chunk_size is not None + ): + if ( + self.speculative_config is not None + and self.speculative_config.use_eagle() + ): + # Hybrid KV cache manager is not yet supported with chunked + # local attention + eagle. + need_disable_hybrid_kv_cache_manager = True + elif not envs.VLLM_ALLOW_CHUNKED_LOCAL_ATTN_WITH_HYBRID_KV_CACHE: + logger.warning( + "There is a latency regression when using chunked local" + " attention with the hybrid KV cache manager. Disabling" + " it, by default. To enable it, set the environment " + "VLLM_ALLOW_CHUNKED_LOCAL_ATTN_WITH_HYBRID_KV_CACHE=1." + ) + # Hybrid KV cache manager is not yet supported with chunked + # local attention. + need_disable_hybrid_kv_cache_manager = True + + if self.scheduler_config.disable_hybrid_kv_cache_manager is None: + # Default to disable HMA, but only if the user didn't express a preference. if self.kv_transfer_config is not None: - # NOTE(Yifan): warning when both kv connector and hybrid kv cache - # manager are enabled but don't disable hybrid kv cache manager here. - # TODO(Kuntai): have a more elegent solution to check and - # turn off HMA for connector that does not support HMA. + # NOTE(Kuntai): turn HMA off for connector unless specifically enabled. + need_disable_hybrid_kv_cache_manager = True logger.warning( - "Warning: both kv connector and hybrid kv cache manager are " - "enabled. However, not all kv connectors support HMA. Please " - "check if the kv connector you are using supports HMA, or " - "disable HMA by setting `--disable-hybrid-kv-cache-manager`." + "Turning off hybrid kv cache manager because " + "`--kv-transfer-config` is set. This will reduce the " + "performance of vLLM on LLMs with sliding window attention " + "or Mamba attention. If you are a developer of kv connector" + ", please consider supporting hybrid kv cache manager for " + "your connector by making sure your connector is a subclass" + " of `SupportsHMA` defined in kv_connector/v1/base.py and" + " use --no-disable-hybrid-kv-cache-manager to start vLLM." ) - if self.kv_events_config is not None: - # Hybrid KV cache manager is not compatible with KV events. - self.scheduler_config.disable_hybrid_kv_cache_manager = True - if ( - self.model_config is not None - and self.model_config.attention_chunk_size is not None - ): - if ( - self.speculative_config is not None - and self.speculative_config.use_eagle() - ): - # Hybrid KV cache manager is not yet supported with chunked - # local attention + eagle. - self.scheduler_config.disable_hybrid_kv_cache_manager = True - elif not envs.VLLM_ALLOW_CHUNKED_LOCAL_ATTN_WITH_HYBRID_KV_CACHE: - logger.warning( - "There is a latency regression when using chunked local" - " attention with the hybrid KV cache manager. Disabling" - " it, by default. To enable it, set the environment " - "VLLM_ALLOW_CHUNKED_LOCAL_ATTN_WITH_HYBRID_KV_CACHE=1." - ) - # Hybrid KV cache manager is not yet supported with chunked - # local attention. - self.scheduler_config.disable_hybrid_kv_cache_manager = True + self.scheduler_config.disable_hybrid_kv_cache_manager = ( + need_disable_hybrid_kv_cache_manager + ) + elif ( + self.scheduler_config.disable_hybrid_kv_cache_manager is False + and need_disable_hybrid_kv_cache_manager + ): + raise ValueError( + "Hybrid KV cache manager was explicitly enabled but is not " + "supported in this configuration. Consider omitting the " + "--no-disable-hybrid-kv-cache-manager flag to let vLLM decide" + " automatically." + ) + + if self.scheduler_config.disable_hybrid_kv_cache_manager is None: + # Default to enable HMA if not explicitly disabled by user or logic above. + self.scheduler_config.disable_hybrid_kv_cache_manager = False if self.compilation_config.debug_dump_path: self.compilation_config.debug_dump_path = ( diff --git a/vllm/distributed/eplb/rebalance_execute.py b/vllm/distributed/eplb/rebalance_execute.py index 376dad8a72ef..55856d940f00 100644 --- a/vllm/distributed/eplb/rebalance_execute.py +++ b/vllm/distributed/eplb/rebalance_execute.py @@ -322,9 +322,6 @@ async def transfer_layer( num_local_physical_experts = next(iter(expert_weights[0])).shape[0] assert new_global_expert_indices.shape == (num_moe_layers, num_physical_experts) assert num_physical_experts == ep_size * num_local_physical_experts - # A buffer to hold the expert weights in one layer during the exchange. - # NOTE: Currently we assume the same weights across different layers - # have the same shape. is_unchanged, is_received_locally, experts_recv_loc = move_to_buffer( num_local_experts=num_local_physical_experts, diff --git a/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py b/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py index 514b8534aaa6..fb4b8ac391af 100644 --- a/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +++ b/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py @@ -202,17 +202,22 @@ def compute_nixl_compatibility_hash( return compat_hash +@dataclass +class RemoteMeta: + block_ids: list[int] + host: str + port: int + engine_id: str + request_id: str + + @dataclass class ReqMeta: local_block_ids: list[int] # To be used when logical block size does not match the kernel block size local_physical_block_ids: list[int] - remote_block_ids: list[int] - remote_host: str - remote_port: int - remote_engine_id: str - remote_request_id: str tp_size: int + remote: RemoteMeta | None = None class NixlConnectorMetadata(KVConnectorMetadata): @@ -223,31 +228,43 @@ def __init__(self): self.reqs_in_batch: set[ReqId] = set() self.reqs_not_processed: set[ReqId] = set() - def add_new_req( + def _add_new_req( self, - request_id: ReqId, local_block_ids: list[int], kv_transfer_params: dict[str, Any], - load_remote_cache: bool = True, - save_to_host: bool = False, - ): - # save and load are mutually exclusive - assert load_remote_cache ^ save_to_host - _req = ReqMeta( + ) -> ReqMeta: + return ReqMeta( local_block_ids=local_block_ids, local_physical_block_ids=local_block_ids, - remote_block_ids=kv_transfer_params["remote_block_ids"], - remote_engine_id=kv_transfer_params["remote_engine_id"], - remote_request_id=kv_transfer_params["remote_request_id"], - remote_host=kv_transfer_params["remote_host"], - remote_port=kv_transfer_params["remote_port"], # P workers don't need to receive tp_size from proxy here. tp_size=kv_transfer_params.get("tp_size", 1), ) - if save_to_host: - self.reqs_to_save[request_id] = _req - if load_remote_cache: - self.reqs_to_recv[request_id] = _req + + def add_new_req_to_save( + self, + request_id: ReqId, + local_block_ids: list[int], + kv_transfer_params: dict[str, Any], + ): + self.reqs_to_save[request_id] = self._add_new_req( + local_block_ids, kv_transfer_params + ) + + def add_new_req_to_recv( + self, + request_id: ReqId, + local_block_ids: list[int], + kv_transfer_params: dict[str, Any], + ): + req = self._add_new_req(local_block_ids, kv_transfer_params) + req.remote = RemoteMeta( + block_ids=kv_transfer_params["remote_block_ids"], + engine_id=kv_transfer_params["remote_engine_id"], + request_id=kv_transfer_params["remote_request_id"], + host=kv_transfer_params["remote_host"], + port=kv_transfer_params["remote_port"], + ) + self.reqs_to_recv[request_id] = req class NixlConnector(KVConnectorBase_V1): @@ -666,22 +683,18 @@ def build_connector_meta( # Loop through scheduled reqs and convert to ReqMeta. for req_id, (req, block_ids) in self._reqs_need_recv.items(): assert req.kv_transfer_params is not None - meta.add_new_req( + meta.add_new_req_to_recv( request_id=req_id, local_block_ids=block_ids, kv_transfer_params=req.kv_transfer_params, - load_remote_cache=True, - save_to_host=False, ) for req_id, (req, block_ids) in self._reqs_need_save.items(): assert req.kv_transfer_params is not None - meta.add_new_req( + meta.add_new_req_to_save( request_id=req_id, local_block_ids=block_ids, kv_transfer_params=req.kv_transfer_params, - load_remote_cache=False, - save_to_host=True, ) meta.reqs_to_send = self._reqs_need_send @@ -1124,10 +1137,11 @@ def _background_nixl_handshake( # Do NIXL handshake in background and add to _ready_requests when done. fut = self._handshake_futures.get(remote_engine_id) if fut is None: + assert meta.remote is not None fut = self._handshake_initiation_executor.submit( self._nixl_handshake, - meta.remote_host, - meta.remote_port, + meta.remote.host, + meta.remote.port, meta.tp_size, remote_engine_id, ) @@ -1774,6 +1788,7 @@ def get_finished(self) -> tuple[set[str], set[str]]: # clean up metadata for completed requests meta = self._recving_metadata.pop(req_id, None) assert meta is not None, f"{req_id} not found in recving_metadata list" + assert meta.remote is not None if self.use_host_buffer: self.sync_recved_kv_to_device(req_id, meta) if self.enable_permute_local_kv: @@ -1781,7 +1796,7 @@ def get_finished(self) -> tuple[set[str], set[str]]: # post processing for heteroblocksize block_size_ratio = self.kv_topo.block_size_ratio_from_engine_id( - meta.remote_engine_id + meta.remote.engine_id ) if ( not self.use_mla @@ -1916,17 +1931,18 @@ def start_load_kv(self, metadata: NixlConnectorMetadata): meta.local_physical_block_ids = self._logical_to_kernel_block_ids( meta.local_block_ids ) - meta.remote_block_ids = self._logical_to_kernel_block_ids( - meta.remote_block_ids + assert meta.remote is not None + meta.remote.block_ids = self._logical_to_kernel_block_ids( + meta.remote.block_ids ) - remote_engine_id = meta.remote_engine_id + remote_engine_id = meta.remote.engine_id logger.debug( "start_load_kv for request %s from remote engine %s. " "Num local_block_ids: %s. Num remote_block_ids: %s. ", req_id, remote_engine_id, len(meta.local_physical_block_ids), - len(meta.remote_block_ids), + len(meta.remote.block_ids), ) # always store metadata for failure recovery self._recving_metadata[req_id] = meta @@ -1965,17 +1981,18 @@ def start_load_kv(self, metadata: NixlConnectorMetadata): self._reqs_to_send[req_id] = expiration_time def _read_blocks_for_req(self, req_id: str, meta: ReqMeta): + assert meta.remote is not None logger.debug( "Remote agent %s available, calling _read_blocks for req %s", - meta.remote_engine_id, + meta.remote.engine_id, req_id, ) self._read_blocks( request_id=req_id, - dst_engine_id=meta.remote_engine_id, - remote_request_id=meta.remote_request_id, + dst_engine_id=meta.remote.engine_id, + remote_request_id=meta.remote.request_id, local_block_ids=meta.local_physical_block_ids, - remote_block_ids=meta.remote_block_ids, + remote_block_ids=meta.remote.block_ids, ) def _read_blocks( diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 286753275645..ca19e468914c 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -408,6 +408,7 @@ class EngineArgs: enable_expert_parallel: bool = ParallelConfig.enable_expert_parallel all2all_backend: str | None = ParallelConfig.all2all_backend enable_dbo: bool = ParallelConfig.enable_dbo + ubatch_size: int = ParallelConfig.ubatch_size dbo_decode_token_threshold: int = ParallelConfig.dbo_decode_token_threshold dbo_prefill_token_threshold: int = ParallelConfig.dbo_prefill_token_threshold disable_nccl_for_dp_synchronization: bool = ( @@ -491,7 +492,7 @@ class EngineArgs: enable_chunked_prefill: bool | None = None disable_chunked_mm_input: bool = SchedulerConfig.disable_chunked_mm_input - disable_hybrid_kv_cache_manager: bool = ( + disable_hybrid_kv_cache_manager: bool | None = ( SchedulerConfig.disable_hybrid_kv_cache_manager ) @@ -841,6 +842,10 @@ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: "--all2all-backend", **parallel_kwargs["all2all_backend"] ) parallel_group.add_argument("--enable-dbo", **parallel_kwargs["enable_dbo"]) + parallel_group.add_argument( + "--ubatch-size", + **parallel_kwargs["ubatch_size"], + ) parallel_group.add_argument( "--dbo-decode-token-threshold", **parallel_kwargs["dbo_decode_token_threshold"], @@ -1557,6 +1562,7 @@ def create_engine_config( enable_expert_parallel=self.enable_expert_parallel, all2all_backend=self.all2all_backend, enable_dbo=self.enable_dbo, + ubatch_size=self.ubatch_size, dbo_decode_token_threshold=self.dbo_decode_token_threshold, dbo_prefill_token_threshold=self.dbo_prefill_token_threshold, disable_nccl_for_dp_synchronization=self.disable_nccl_for_dp_synchronization, diff --git a/vllm/entrypoints/chat_utils.py b/vllm/entrypoints/chat_utils.py index 8485022024a4..ab055dfb1fb0 100644 --- a/vllm/entrypoints/chat_utils.py +++ b/vllm/entrypoints/chat_utils.py @@ -24,6 +24,7 @@ ChatCompletionContentPartInputAudioParam, ChatCompletionContentPartRefusalParam, ChatCompletionContentPartTextParam, + ChatCompletionFunctionToolParam, ChatCompletionMessageToolCallParam, ChatCompletionToolMessageParam, ) @@ -269,6 +270,9 @@ class CustomChatCompletionMessageParam(TypedDict, total=False): reasoning: str | None """The reasoning content for interleaved thinking.""" + tools: list[ChatCompletionFunctionToolParam] | None + """The tools for developer role.""" + ChatCompletionMessageParam: TypeAlias = ( OpenAIChatCompletionMessageParam @@ -300,6 +304,9 @@ class ConversationMessage(TypedDict, total=False): reasoning_content: str | None """Deprecated: The reasoning content for interleaved thinking.""" + tools: list[ChatCompletionFunctionToolParam] | None + """The tools for developer role.""" + # Passed in by user ChatTemplateContentFormatOption = Literal["auto", "string", "openai"] @@ -1619,6 +1626,8 @@ def _parse_chat_message_content( if "name" in message and isinstance(message["name"], str): result_msg["name"] = message["name"] + if role == "developer": + result_msg["tools"] = message.get("tools", None) return result @@ -1629,12 +1638,17 @@ def _postprocess_messages(messages: list[ConversationMessage]) -> None: # so, for messages that have tool_calls, parse the string (which we get # from openAI format) to dict for message in messages: - if ( - message["role"] == "assistant" - and "tool_calls" in message - and isinstance(message["tool_calls"], list) - ): - for item in message["tool_calls"]: + if message["role"] == "assistant" and "tool_calls" in message: + tool_calls = message.get("tool_calls") + if not isinstance(tool_calls, list): + continue + + if len(tool_calls) == 0: + # Drop empty tool_calls to keep templates on the normal assistant path. + message.pop("tool_calls", None) + continue + + for item in tool_calls: # if arguments is None or empty string, set to {} if content := item["function"].get("arguments"): if not isinstance(content, (dict, list)): diff --git a/vllm/entrypoints/cli/__init__.py b/vllm/entrypoints/cli/__init__.py index 9dff68236fe9..dc02ac563406 100644 --- a/vllm/entrypoints/cli/__init__.py +++ b/vllm/entrypoints/cli/__init__.py @@ -2,12 +2,14 @@ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from vllm.entrypoints.cli.benchmark.latency import BenchmarkLatencySubcommand from vllm.entrypoints.cli.benchmark.serve import BenchmarkServingSubcommand +from vllm.entrypoints.cli.benchmark.startup import BenchmarkStartupSubcommand from vllm.entrypoints.cli.benchmark.sweep import BenchmarkSweepSubcommand from vllm.entrypoints.cli.benchmark.throughput import BenchmarkThroughputSubcommand __all__: list[str] = [ "BenchmarkLatencySubcommand", "BenchmarkServingSubcommand", + "BenchmarkStartupSubcommand", "BenchmarkSweepSubcommand", "BenchmarkThroughputSubcommand", ] diff --git a/vllm/entrypoints/cli/benchmark/startup.py b/vllm/entrypoints/cli/benchmark/startup.py new file mode 100644 index 000000000000..81eefd7c174d --- /dev/null +++ b/vllm/entrypoints/cli/benchmark/startup.py @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +import argparse + +from vllm.benchmarks.startup import add_cli_args, main +from vllm.entrypoints.cli.benchmark.base import BenchmarkSubcommandBase + + +class BenchmarkStartupSubcommand(BenchmarkSubcommandBase): + """The `startup` subcommand for `vllm bench`.""" + + name = "startup" + help = "Benchmark the startup time of vLLM models." + + @classmethod + def add_cli_args(cls, parser: argparse.ArgumentParser) -> None: + add_cli_args(parser) + + @staticmethod + def cmd(args: argparse.Namespace) -> None: + main(args) diff --git a/vllm/entrypoints/context.py b/vllm/entrypoints/context.py index c70eaaa082fe..b076b883b4d9 100644 --- a/vllm/entrypoints/context.py +++ b/vllm/entrypoints/context.py @@ -2,11 +2,13 @@ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import asyncio import contextlib +import copy import json import logging from abc import ABC, abstractmethod from collections.abc import Callable from contextlib import AsyncExitStack +from dataclasses import replace from typing import TYPE_CHECKING, Union from openai.types.responses.response_function_tool_call_output_item import ( @@ -34,13 +36,13 @@ ResponseRawMessageAndToken, ResponsesRequest, ) -from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ToolParser from vllm.entrypoints.responses_utils import construct_tool_dicts from vllm.entrypoints.tool import Tool from vllm.entrypoints.tool_server import ToolServer from vllm.outputs import RequestOutput from vllm.reasoning.abs_reasoning_parsers import ReasoningParser from vllm.tokenizers.protocol import TokenizerLike +from vllm.tool_parsers.abstract_tool_parser import ToolParser from vllm.transformers_utils.tokenizer import AnyTokenizer from vllm.utils import random_uuid @@ -74,24 +76,24 @@ class TurnMetrics: def __init__( self, - input_tokens=0, - output_tokens=0, - cached_input_tokens=0, - tool_output_tokens=0, - ): + input_tokens: int = 0, + output_tokens: int = 0, + cached_input_tokens: int = 0, + tool_output_tokens: int = 0, + ) -> None: self.input_tokens = input_tokens self.output_tokens = output_tokens self.cached_input_tokens = cached_input_tokens self.tool_output_tokens = tool_output_tokens - def reset(self): + def reset(self) -> None: """Reset counters for a new turn.""" self.input_tokens = 0 self.output_tokens = 0 self.cached_input_tokens = 0 self.tool_output_tokens = 0 - def copy(self): + def copy(self) -> "TurnMetrics": """Create a copy of this turn's token counts.""" return TurnMetrics( self.input_tokens, @@ -164,6 +166,12 @@ class SimpleContext(ConversationContext): def __init__(self): self.last_output = None + + # Accumulated final output for streaming mode + self._accumulated_text: str = "" + self._accumulated_token_ids: list[int] = [] + self._accumulated_logprobs: list = [] + self.num_prompt_tokens = 0 self.num_output_tokens = 0 self.num_cached_tokens = 0 @@ -183,6 +191,13 @@ def append_output(self, output) -> None: self.num_cached_tokens = output.num_cached_tokens or 0 self.num_output_tokens += len(output.outputs[0].token_ids or []) + # Accumulate text, token_ids, and logprobs for streaming mode + delta_output = output.outputs[0] + self._accumulated_text += delta_output.text + self._accumulated_token_ids.extend(delta_output.token_ids) + if delta_output.logprobs is not None: + self._accumulated_logprobs.extend(delta_output.logprobs) + if len(self.input_messages) == 0: output_prompt = output.prompt or "" output_prompt_token_ids = output.prompt_token_ids or [] @@ -194,11 +209,26 @@ def append_output(self, output) -> None: ) self.output_messages.append( ResponseRawMessageAndToken( - message=output.outputs[0].text, - tokens=output.outputs[0].token_ids, + message=delta_output.text, + tokens=delta_output.token_ids, ) ) + @property + def final_output(self) -> RequestOutput | None: + """Return the final output, with complete text/token_ids/logprobs.""" + if self.last_output is not None and self.last_output.outputs: + assert isinstance(self.last_output, RequestOutput) + final_output = copy.copy(self.last_output) + # copy inner item to avoid modify last_output + final_output.outputs = [replace(item) for item in self.last_output.outputs] + final_output.outputs[0].text = self._accumulated_text + final_output.outputs[0].token_ids = tuple(self._accumulated_token_ids) + if self._accumulated_logprobs: + final_output.outputs[0].logprobs = self._accumulated_logprobs + return final_output + return self.last_output + def append_tool_output(self, output) -> None: raise NotImplementedError("Should not be called.") @@ -267,12 +297,40 @@ def __init__( self.chat_template = chat_template self.chat_template_content_format = chat_template_content_format + self.input_messages: list[ResponseRawMessageAndToken] = [] + self.output_messages: list[ResponseRawMessageAndToken] = [] + def append_output(self, output: RequestOutput) -> None: self.num_prompt_tokens = len(output.prompt_token_ids or []) self.num_cached_tokens = output.num_cached_tokens or 0 self.num_output_tokens += len(output.outputs[0].token_ids or []) self.parser.process(output.outputs[0]) + # only store if enable_response_messages is True, save memory + if self.request.enable_response_messages: + output_prompt = output.prompt or "" + output_prompt_token_ids = output.prompt_token_ids or [] + if len(self.input_messages) == 0: + self.input_messages.append( + ResponseRawMessageAndToken( + message=output_prompt, + tokens=output_prompt_token_ids, + ) + ) + else: + self.output_messages.append( + ResponseRawMessageAndToken( + message=output_prompt, + tokens=output_prompt_token_ids, + ) + ) + self.output_messages.append( + ResponseRawMessageAndToken( + message=output.outputs[0].text, + tokens=output.outputs[0].token_ids, + ) + ) + def append_tool_output(self, output: list[ResponseInputOutputItem]) -> None: self.parser.response_messages.extend(output) diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index 31319cf64aeb..2768e267f483 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -18,6 +18,7 @@ create_sort_beams_key_function, ) from vllm.config import ( + AttentionConfig, CompilationConfig, PoolerConfig, ProfilerConfig, @@ -175,6 +176,10 @@ class LLM: compilation_config: Either an integer or a dictionary. If it is an integer, it is used as the mode of compilation optimization. If it is a dictionary, it can specify the full compilation configuration. + attention_config: Configuration for attention mechanisms. Can be a + dictionary or an AttentionConfig instance. If a dictionary, it will + be converted to an AttentionConfig. Allows specifying the attention + backend and other attention-related settings. **kwargs: Arguments for [`EngineArgs`][vllm.EngineArgs]. Note: @@ -213,6 +218,7 @@ def __init__( | StructuredOutputsConfig | None = None, profiler_config: dict[str, Any] | ProfilerConfig | None = None, + attention_config: dict[str, Any] | AttentionConfig | None = None, kv_cache_memory_bytes: int | None = None, compilation_config: int | dict[str, Any] | CompilationConfig | None = None, logits_processors: list[str | type[LogitsProcessor]] | None = None, @@ -252,51 +258,28 @@ def __init__( if hf_overrides is None: hf_overrides = {} - if compilation_config is not None: - if isinstance(compilation_config, int): - compilation_config_instance = CompilationConfig( - mode=CompilationMode(compilation_config) - ) - elif isinstance(compilation_config, dict): - compilation_config_instance = CompilationConfig( - **{ - k: v - for k, v in compilation_config.items() - if is_init_field(CompilationConfig, k) - } - ) - else: - compilation_config_instance = compilation_config - else: - compilation_config_instance = CompilationConfig() - - if structured_outputs_config is not None: - if isinstance(structured_outputs_config, dict): - structured_outputs_instance = StructuredOutputsConfig( - **{ - k: v - for k, v in structured_outputs_config.items() - if is_init_field(StructuredOutputsConfig, k) - } - ) - else: - structured_outputs_instance = structured_outputs_config - else: - structured_outputs_instance = StructuredOutputsConfig() - - if profiler_config is not None: - if isinstance(profiler_config, dict): - profiler_config_instance = ProfilerConfig( - **{ - k: v - for k, v in profiler_config.items() - if is_init_field(ProfilerConfig, k) - } - ) - else: - profiler_config_instance = profiler_config + def _make_config(value: Any, cls: type[_R]) -> _R: + """Convert dict/None/instance to a config instance.""" + if value is None: + return cls() + if isinstance(value, dict): + return cls(**{k: v for k, v in value.items() if is_init_field(cls, k)}) # type: ignore[arg-type] + return value + + if isinstance(compilation_config, int): + compilation_config_instance = CompilationConfig( + mode=CompilationMode(compilation_config) + ) else: - profiler_config_instance = ProfilerConfig() + compilation_config_instance = _make_config( + compilation_config, CompilationConfig + ) + + structured_outputs_instance = _make_config( + structured_outputs_config, StructuredOutputsConfig + ) + profiler_config_instance = _make_config(profiler_config, ProfilerConfig) + attention_config_instance = _make_config(attention_config, AttentionConfig) # warn about single-process data parallel usage. _dp_size = int(kwargs.get("data_parallel_size", 1)) @@ -341,6 +324,7 @@ def __init__( pooler_config=pooler_config, structured_outputs_config=structured_outputs_instance, profiler_config=profiler_config_instance, + attention_config=attention_config_instance, compilation_config=compilation_config_instance, logits_processors=logits_processors, **kwargs, diff --git a/vllm/entrypoints/openai/api_server.py b/vllm/entrypoints/openai/api_server.py index 7be601d824f3..5d0eacae34dd 100644 --- a/vllm/entrypoints/openai/api_server.py +++ b/vllm/entrypoints/openai/api_server.py @@ -72,7 +72,6 @@ OpenAIServingTranscription, OpenAIServingTranslation, ) -from vllm.entrypoints.openai.tool_parsers import ToolParserManager from vllm.entrypoints.openai.utils import validate_json_request from vllm.entrypoints.pooling.classify.serving import ServingClassification from vllm.entrypoints.pooling.embed.serving import OpenAIServingEmbedding @@ -95,6 +94,7 @@ from vllm.logger import init_logger from vllm.reasoning import ReasoningParserManager from vllm.tasks import POOLING_TASKS +from vllm.tool_parsers import ToolParserManager from vllm.usage.usage_lib import UsageContext from vllm.utils.argparse_utils import FlexibleArgumentParser from vllm.utils.gc_utils import freeze_gc_heap diff --git a/vllm/entrypoints/openai/cli_args.py b/vllm/entrypoints/openai/cli_args.py index b798b05dcfcb..a8eef76cd8ae 100644 --- a/vllm/entrypoints/openai/cli_args.py +++ b/vllm/entrypoints/openai/cli_args.py @@ -27,8 +27,8 @@ H11_MAX_INCOMPLETE_EVENT_SIZE_DEFAULT, ) from vllm.entrypoints.openai.serving_models import LoRAModulePath -from vllm.entrypoints.openai.tool_parsers import ToolParserManager from vllm.logger import init_logger +from vllm.tool_parsers import ToolParserManager from vllm.utils.argparse_utils import FlexibleArgumentParser logger = init_logger(__name__) diff --git a/vllm/entrypoints/openai/parser/responses_parser.py b/vllm/entrypoints/openai/parser/responses_parser.py index 00045a7ccfd2..c364d6d80544 100644 --- a/vllm/entrypoints/openai/parser/responses_parser.py +++ b/vllm/entrypoints/openai/parser/responses_parser.py @@ -3,7 +3,11 @@ import logging from collections.abc import Callable -from openai.types.responses.response_function_tool_call import ResponseFunctionToolCall +from openai.types.responses import ResponseFunctionToolCall, ResponseOutputItem +from openai.types.responses.response_function_tool_call_output_item import ( + ResponseFunctionToolCallOutputItem, +) +from openai.types.responses.response_output_item import McpCall from openai.types.responses.response_output_message import ResponseOutputMessage from openai.types.responses.response_output_text import ResponseOutputText from openai.types.responses.response_reasoning_item import ( @@ -11,11 +15,12 @@ ResponseReasoningItem, ) +from vllm.entrypoints.constants import MCP_PREFIX from vllm.entrypoints.openai.protocol import ResponseInputOutputItem, ResponsesRequest -from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ToolParser from vllm.outputs import CompletionOutput from vllm.reasoning.abs_reasoning_parsers import ReasoningParser from vllm.tokenizers.protocol import TokenizerLike +from vllm.tool_parsers.abstract_tool_parser import ToolParser from vllm.transformers_utils.tokenizer import AnyTokenizer from vllm.utils import random_uuid @@ -111,6 +116,37 @@ def process(self, output: CompletionOutput) -> "ResponsesParser": return self + def make_response_output_items_from_parsable_context( + self, + ) -> list[ResponseOutputItem]: + """Given a list of sentences, construct ResponseOutput Items.""" + response_messages = self.response_messages[self.num_init_messages :] + output_messages: list[ResponseOutputItem] = [] + for message in response_messages: + if not isinstance(message, ResponseFunctionToolCallOutputItem): + output_messages.append(message) + else: + if len(output_messages) == 0: + raise ValueError( + "Cannot have a FunctionToolCallOutput before FunctionToolCall." + ) + if isinstance(output_messages[-1], ResponseFunctionToolCall): + mcp_message = McpCall( + id=f"{MCP_PREFIX}{random_uuid()}", + arguments=output_messages[-1].arguments, + name=output_messages[-1].name, + server_label=output_messages[ + -1 + ].name, # TODO: store the server label + type="mcp_call", + status="completed", + output=message.output, + # TODO: support error output + ) + output_messages[-1] = mcp_message + + return output_messages + def get_responses_parser_for_simple_context( *, diff --git a/vllm/entrypoints/openai/protocol.py b/vllm/entrypoints/openai/protocol.py index aeff6bded7f0..a7c4980cd367 100644 --- a/vllm/entrypoints/openai/protocol.py +++ b/vllm/entrypoints/openai/protocol.py @@ -320,6 +320,7 @@ class ResponsesRequest(OpenAIBaseModel): max_tool_calls: int | None = None metadata: Metadata | None = None model: str | None = None + logit_bias: dict[str, float] | None = None parallel_tool_calls: bool | None = True previous_response_id: str | None = None prompt: ResponsePrompt | None = None @@ -333,6 +334,7 @@ class ResponsesRequest(OpenAIBaseModel): tools: list[Tool] = Field(default_factory=list) top_logprobs: int | None = 0 top_p: float | None = None + top_k: int | None = None truncation: Literal["auto", "disabled"] | None = "disabled" user: str | None = None @@ -387,6 +389,7 @@ class ResponsesRequest(OpenAIBaseModel): _DEFAULT_SAMPLING_PARAMS = { "temperature": 1.0, "top_p": 1.0, + "top_k": 0, } def to_sampling_params( @@ -408,6 +411,10 @@ def to_sampling_params( top_p = default_sampling_params.get( "top_p", self._DEFAULT_SAMPLING_PARAMS["top_p"] ) + if (top_k := self.top_k) is None: + top_k = default_sampling_params.get( + "top_k", self._DEFAULT_SAMPLING_PARAMS["top_k"] + ) stop_token_ids = default_sampling_params.get("stop_token_ids") # Structured output @@ -428,6 +435,7 @@ def to_sampling_params( return SamplingParams.from_optional( temperature=temperature, top_p=top_p, + top_k=top_k, max_tokens=max_tokens, logprobs=self.top_logprobs if self.is_include_output_logprobs() else None, stop_token_ids=stop_token_ids, @@ -435,6 +443,7 @@ def to_sampling_params( RequestOutputKind.DELTA if self.stream else RequestOutputKind.FINAL_ONLY ), structured_outputs=structured_outputs, + logit_bias=self.logit_bias, ) def is_include_output_logprobs(self) -> bool: diff --git a/vllm/entrypoints/openai/serving_chat.py b/vllm/entrypoints/openai/serving_chat.py index d94fa7dd9193..98fc7810faf9 100644 --- a/vllm/entrypoints/openai/serving_chat.py +++ b/vllm/entrypoints/openai/serving_chat.py @@ -57,11 +57,9 @@ clamp_prompt_logprobs, ) from vllm.entrypoints.openai.serving_models import OpenAIServingModels -from vllm.entrypoints.openai.tool_parsers import ToolParser -from vllm.entrypoints.openai.tool_parsers.mistral_tool_parser import MistralToolCall from vllm.entrypoints.openai.utils import maybe_filter_parallel_tool_calls from vllm.entrypoints.utils import get_max_tokens, should_include_usage -from vllm.inputs.data import TokensPrompt as EngineTokensPrompt +from vllm.inputs.data import TokensPrompt from vllm.logger import init_logger from vllm.logprobs import Logprob from vllm.outputs import CompletionOutput, RequestOutput @@ -73,6 +71,8 @@ truncate_tool_call_ids, validate_request_params, ) +from vllm.tool_parsers import ToolParser +from vllm.tool_parsers.mistral_tool_parser import MistralToolCall from vllm.utils.collection_utils import as_list from vllm.v1.sample.logits_processor import validate_logits_processors_parameters @@ -234,11 +234,7 @@ async def create_chat_completion( ) if error_check_ret is not None: return error_check_ret - ( - conversation, - request_prompts, - engine_prompts, - ) = await self._preprocess_chat( + conversation, engine_prompts = await self._preprocess_chat( request, tokenizer, request.messages, @@ -254,11 +250,7 @@ async def create_chat_completion( ) else: # For GPT-OSS. - ( - conversation, - request_prompts, - engine_prompts, - ) = self._make_request_with_harmony(request) + conversation, engine_prompts = self._make_request_with_harmony(request) except (ValueError, TypeError, RuntimeError, jinja2.TemplateError) as e: logger.exception("Error in preprocessing prompt inputs") return self.create_error_response(f"{e} {e.__cause__}") @@ -278,7 +270,7 @@ async def create_chat_completion( generators: list[AsyncGenerator[RequestOutput, None]] = [] try: for i, engine_prompt in enumerate(engine_prompts): - prompt_text, _, _ = self._get_prompt_components(request_prompts[i]) + prompt_text, _, _ = self._get_prompt_components(engine_prompt) # If we are creating sub requests for multiple prompts, ensure that they # have unique request ids. sub_request_id = ( @@ -313,7 +305,7 @@ async def create_chat_completion( self._log_inputs( sub_request_id, - request_prompts[i], + engine_prompt, params=sampling_params, lora_request=lora_request, ) @@ -537,7 +529,7 @@ async def chat_completion_stream_generator( request_id: str, model_name: str, conversation: list[ConversationMessage], - tokenizer: TokenizerLike, + tokenizer: TokenizerLike | None, request_metadata: RequestResponseMetadata, ) -> AsyncGenerator[str, None]: created_time = int(time.time()) @@ -591,6 +583,11 @@ async def chat_completion_stream_generator( try: if self.reasoning_parser: + if tokenizer is None: + raise ValueError( + "Tokenizer not available when `skip_tokenizer_init=True`" + ) + reasoning_parser = self.reasoning_parser( tokenizer, chat_template_kwargs=request.chat_template_kwargs, # type: ignore @@ -604,6 +601,11 @@ async def chat_completion_stream_generator( # Prepare the tool parser if it's needed try: if tool_choice_auto and self.tool_parser: + if tokenizer is None: + raise ValueError( + "Tokenizer not available when `skip_tokenizer_init=True`" + ) + tool_parsers: list[ToolParser | None] = [ self.tool_parser(tokenizer) ] * num_choices @@ -962,21 +964,9 @@ async def chat_completion_stream_generator( assert reasoning_end_arr is not None output_token_ids = as_list(output.token_ids) if not reasoning_end_arr[i]: - delta_message = ( - reasoning_parser.extract_reasoning_streaming( - previous_text, - current_text, - delta_text, - previous_token_ids, - current_token_ids, - output_token_ids, - ) - ) # When encountering think end id in prompt_token_ids # i.e {"enable_thinking": False}, # set reasoning status to end. - # Remove the text and token ids related - # to 'reasoning'. if ( res.prompt_token_ids and reasoning_parser.is_reasoning_end( @@ -985,30 +975,38 @@ async def chat_completion_stream_generator( ): reasoning_end_arr[i] = True current_token_ids = output_token_ids - if delta_message and delta_message.content: - current_text = delta_message.content - delta_message.content = None - else: - current_text = "" - # When encountering think end id in delta_token_ids, - # set reasoning status to end. - # Remove the text and token ids related - # to 'reasoning'. - if reasoning_parser.is_reasoning_end(output_token_ids): - reasoning_end_arr[i] = True - current_token_ids = ( - reasoning_parser.extract_content_ids( - output_token_ids + # Don't update current_text, keep it as is from delta + else: + delta_message = ( + reasoning_parser.extract_reasoning_streaming( + previous_text, + current_text, + delta_text, + previous_token_ids, + current_token_ids, + output_token_ids, ) ) - if delta_message and delta_message.content: - current_text = delta_message.content - delta_message.content = None - else: - current_text = "" + + # When encountering think end id in delta_token_ids, + # set reasoning status to end. + # Remove the text and token ids related + # to 'reasoning'. + if reasoning_parser.is_reasoning_end(output_token_ids): + reasoning_end_arr[i] = True + current_token_ids = ( + reasoning_parser.extract_content_ids( + output_token_ids + ) + ) + if delta_message and delta_message.content: + current_text = delta_message.content + delta_message.content = None + else: + current_text = "" # handle tool calls only after reasoning is done, - else: + if reasoning_end_arr[i]: delta_token_ids = output_token_ids # First time to tool call, # add the remaining text and token ids @@ -1317,7 +1315,7 @@ async def chat_completion_full_generator( request_id: str, model_name: str, conversation: list[ConversationMessage], - tokenizer: TokenizerLike, + tokenizer: TokenizerLike | None, request_metadata: RequestResponseMetadata, ) -> ErrorResponse | ChatCompletionResponse: created_time = int(time.time()) @@ -1367,6 +1365,11 @@ async def chat_completion_full_generator( reasoning = None if self.tool_parser is not None: + if tokenizer is None: + raise ValueError( + "Tokenizer not available when `skip_tokenizer_init=True`" + ) + tool_parser = self.tool_parser(tokenizer) # NOTE: We use token_ids for openai tool parser tool_call_info = tool_parser.extract_tool_calls( @@ -1409,6 +1412,11 @@ async def chat_completion_full_generator( if self.reasoning_parser: try: + if tokenizer is None: + raise ValueError( + "Tokenizer not available when `skip_tokenizer_init=True`" + ) + reasoning_parser = self.reasoning_parser( tokenizer, chat_template_kwargs=request.chat_template_kwargs, # type: ignore @@ -1648,7 +1656,7 @@ def _get_top_logprobs( self, logprobs: dict[int, Logprob], top_logprobs: int | None, - tokenizer: TokenizerLike, + tokenizer: TokenizerLike | None, should_return_as_token_id: bool, ) -> list[ChatCompletionLogProb]: return [ @@ -1672,7 +1680,7 @@ def _create_chat_logprobs( self, token_ids: GenericSequence[int], top_logprobs: GenericSequence[dict[int, Logprob] | None], - tokenizer: TokenizerLike, + tokenizer: TokenizerLike | None, num_output_top_logprobs: int | None = None, return_as_token_id: bool | None = None, ) -> ChatCompletionLogProbs: @@ -1690,6 +1698,11 @@ def _create_chat_logprobs( if should_return_as_token_id: token = f"token_id:{token_id}" else: + if tokenizer is None: + raise ValueError( + "Tokenizer not available when `skip_tokenizer_init=True`" + ) + token = tokenizer.decode(token_id) logprobs_content.append( @@ -1800,10 +1813,10 @@ def _make_request_with_harmony( # Render prompt token ids. prompt_token_ids = render_for_completion(messages) - engine_prompt = EngineTokensPrompt(prompt_token_ids=prompt_token_ids) + engine_prompt = TokensPrompt(prompt_token_ids=prompt_token_ids) # Add cache_salt if provided in the request if request.cache_salt is not None: engine_prompt["cache_salt"] = request.cache_salt - return messages, [prompt_token_ids], [engine_prompt] + return messages, [engine_prompt] diff --git a/vllm/entrypoints/openai/serving_engine.py b/vllm/entrypoints/openai/serving_engine.py index d83a7c8d59f3..5f7cfaa53ec1 100644 --- a/vllm/entrypoints/openai/serving_engine.py +++ b/vllm/entrypoints/openai/serving_engine.py @@ -5,60 +5,19 @@ import sys import time import traceback -from collections.abc import AsyncGenerator, Callable, Iterable, Mapping, Sequence +from collections.abc import AsyncGenerator, Callable, Iterable, Mapping from concurrent.futures import ThreadPoolExecutor from dataclasses import dataclass, field from http import HTTPStatus from typing import Any, ClassVar, Generic, TypeAlias, TypeVar import numpy as np -import torch from fastapi import Request -from pydantic import ConfigDict, TypeAdapter -from starlette.datastructures import Headers -from typing_extensions import TypeIs - -from vllm.entrypoints.context import ( - HarmonyContext, - ParsableContext, - StreamingHarmonyContext, -) -from vllm.entrypoints.openai.protocol import ( - FunctionCall, - ResponseInputOutputItem, - ResponsesRequest, -) -from vllm.entrypoints.pooling.classify.protocol import ( - ClassificationChatRequest, - ClassificationCompletionRequest, - ClassificationRequest, - ClassificationResponse, -) -from vllm.entrypoints.pooling.embed.protocol import ( - EmbeddingChatRequest, - EmbeddingCompletionRequest, - EmbeddingRequest, - EmbeddingResponse, -) -from vllm.entrypoints.pooling.pooling.protocol import ( - IOProcessorRequest, - PoolingResponse, -) -from vllm.entrypoints.pooling.score.protocol import ( - RerankRequest, - ScoreRequest, - ScoreResponse, -) -from vllm.transformers_utils.tokenizer import AnyTokenizer - -if sys.version_info >= (3, 12): - from typing import TypedDict -else: - from typing_extensions import TypedDict - from openai.types.responses import ( ToolChoiceFunction, ) +from pydantic import ConfigDict, TypeAdapter +from starlette.datastructures import Headers import vllm.envs as envs from vllm.beam_search import BeamSearchSequence, create_sort_beams_key_function @@ -72,7 +31,12 @@ parse_chat_messages_futures, resolve_chat_template_content_format, ) -from vllm.entrypoints.context import ConversationContext +from vllm.entrypoints.context import ( + ConversationContext, + HarmonyContext, + ParsableContext, + StreamingHarmonyContext, +) from vllm.entrypoints.logger import RequestLogger from vllm.entrypoints.openai.protocol import ( ChatCompletionNamedToolChoiceParam, @@ -83,7 +47,10 @@ DetokenizeRequest, ErrorInfo, ErrorResponse, + FunctionCall, FunctionDefinition, + ResponseInputOutputItem, + ResponsesRequest, TokenizeChatRequest, TokenizeCompletionRequest, TokenizeResponse, @@ -92,15 +59,34 @@ TranslationRequest, ) from vllm.entrypoints.openai.serving_models import OpenAIServingModels -from vllm.entrypoints.openai.tool_parsers import ToolParser, ToolParserManager +from vllm.entrypoints.pooling.classify.protocol import ( + ClassificationChatRequest, + ClassificationCompletionRequest, + ClassificationRequest, + ClassificationResponse, +) +from vllm.entrypoints.pooling.embed.protocol import ( + EmbeddingChatRequest, + EmbeddingCompletionRequest, + EmbeddingRequest, + EmbeddingResponse, +) +from vllm.entrypoints.pooling.pooling.protocol import ( + IOProcessorRequest, + PoolingResponse, +) +from vllm.entrypoints.pooling.score.protocol import ( + RerankRequest, + ScoreRequest, + ScoreResponse, +) from vllm.entrypoints.renderer import BaseRenderer, CompletionRenderer, RenderConfig from vllm.entrypoints.responses_utils import ( construct_input_messages, ) from vllm.entrypoints.serve.disagg.protocol import GenerateRequest, GenerateResponse from vllm.entrypoints.utils import _validate_truncation_size -from vllm.inputs.data import PromptType -from vllm.inputs.data import TokensPrompt as EngineTokensPrompt +from vllm.inputs.data import PromptType, TokensPrompt from vllm.inputs.parse import ( PromptComponents, get_prompt_components, @@ -109,17 +95,15 @@ from vllm.logger import init_logger from vllm.logprobs import Logprob, PromptLogprobs from vllm.lora.request import LoRARequest -from vllm.multimodal import ( # noqa: F401 - Required to resolve Pydantic error in RequestProcessingMixin - MultiModalDataDict, - MultiModalUUIDDict, -) +from vllm.multimodal import MultiModalDataDict from vllm.outputs import CompletionOutput, PoolingRequestOutput, RequestOutput from vllm.pooling_params import PoolingParams from vllm.reasoning import ReasoningParser, ReasoningParserManager from vllm.sampling_params import BeamSearchParams, SamplingParams from vllm.tokenizers import TokenizerLike -from vllm.tokenizers.deepseekv32 import DeepseekV32Tokenizer +from vllm.tokenizers.deepseek_v32 import DeepseekV32Tokenizer from vllm.tokenizers.mistral import MistralTokenizer +from vllm.tool_parsers import ToolParser, ToolParserManager from vllm.tracing import ( contains_trace_headers, extract_trace_headers, @@ -185,34 +169,6 @@ def __init__(self, message: str = "Internal server error"): ) -class TextTokensPrompt(TypedDict): - prompt: str - prompt_token_ids: list[int] - - -class EmbedsPrompt(TypedDict): - prompt_embeds: torch.Tensor - - -RequestPrompt: TypeAlias = list[int] | str | TextTokensPrompt | EmbedsPrompt - - -def is_text_tokens_prompt(prompt: RequestPrompt) -> TypeIs[TextTokensPrompt]: - return ( - isinstance(prompt, dict) - and "prompt_token_ids" in prompt - and "prompt_embeds" not in prompt - ) - - -def is_embeds_prompt(prompt: RequestPrompt) -> TypeIs[EmbedsPrompt]: - return ( - isinstance(prompt, dict) - and "prompt_token_ids" not in prompt - and "prompt_embeds" in prompt - ) - - RequestT = TypeVar("RequestT", bound=AnyRequest) @@ -223,8 +179,7 @@ class RequestProcessingMixin: handling prompt preparation and engine input. """ - request_prompts: Sequence[RequestPrompt] | None = field(default_factory=list) - engine_prompts: list[EngineTokensPrompt] | None = field(default_factory=list) + engine_prompts: list[TokensPrompt] | None = field(default_factory=list) @dataclass(kw_only=True) @@ -425,7 +380,7 @@ async def beam_search( prompts_batch, lora_req_batch = zip( *[ ( - EngineTokensPrompt( + TokensPrompt( prompt_token_ids=beam.tokens, multi_modal_data=beam.multi_modal_data, mm_processor_kwargs=beam.mm_processor_kwargs, @@ -947,7 +902,7 @@ async def _normalize_prompt_text_to_input( prompt: str, tokenizer: TokenizerLike, add_special_tokens: bool, - ) -> TextTokensPrompt: + ) -> TokensPrompt: async_tokenizer = self._get_async_tokenizer(tokenizer) if ( @@ -988,7 +943,7 @@ async def _normalize_prompt_tokens_to_input( request: AnyRequest, prompt_ids: list[int], tokenizer: TokenizerLike | None, - ) -> TextTokensPrompt: + ) -> TokensPrompt: truncate_prompt_tokens = getattr(request, "truncate_prompt_tokens", None) if truncate_prompt_tokens is None: @@ -1011,7 +966,7 @@ def _validate_input( request: AnyRequest, input_ids: list[int], input_text: str, - ) -> TextTokensPrompt: + ) -> TokensPrompt: token_num = len(input_ids) # Note: EmbeddingRequest, ClassificationRequest, @@ -1042,7 +997,7 @@ def _validate_input( f"{token_num} tokens in the input for {operation}. " f"Please reduce the length of the input." ) - return TextTokensPrompt(prompt=input_text, prompt_token_ids=input_ids) + return TokensPrompt(prompt=input_text, prompt_token_ids=input_ids) # Note: TokenizeRequest and DetokenizeRequest doesn't have max_tokens # and does not require model context length validation @@ -1050,7 +1005,7 @@ def _validate_input( request, (TokenizeCompletionRequest, TokenizeChatRequest, DetokenizeRequest), ): - return TextTokensPrompt(prompt=input_text, prompt_token_ids=input_ids) + return TokensPrompt(prompt=input_text, prompt_token_ids=input_ids) # chat completion endpoint supports max_completion_tokens if isinstance(request, ChatCompletionRequest): @@ -1078,7 +1033,7 @@ def _validate_input( f" - {token_num})." ) - return TextTokensPrompt(prompt=input_text, prompt_token_ids=input_ids) + return TokensPrompt(prompt=input_text, prompt_token_ids=input_ids) async def _tokenize_prompt_input_async( self, @@ -1086,7 +1041,7 @@ async def _tokenize_prompt_input_async( tokenizer: TokenizerLike, prompt_input: str | list[int], add_special_tokens: bool = True, - ) -> TextTokensPrompt: + ) -> TokensPrompt: """ A simpler implementation that tokenizes a single prompt input. """ @@ -1105,7 +1060,7 @@ async def _tokenize_prompt_inputs_async( tokenizer: TokenizerLike, prompt_inputs: Iterable[str | list[int]], add_special_tokens: bool = True, - ) -> AsyncGenerator[TextTokensPrompt, None]: + ) -> AsyncGenerator[TokensPrompt, None]: """ A simpler implementation that tokenizes multiple prompt inputs. """ @@ -1158,11 +1113,7 @@ async def _preprocess_chat( chat_template_kwargs: dict[str, Any] | None = None, tool_parser: Callable[[TokenizerLike], ToolParser] | None = None, add_special_tokens: bool = False, - ) -> tuple[ - list[ConversationMessage], - Sequence[RequestPrompt], - list[EngineTokensPrompt], - ]: + ) -> tuple[list[ConversationMessage], list[TokensPrompt]]: model_config = self.model_config resolved_content_format = resolve_chat_template_content_format( @@ -1235,9 +1186,7 @@ async def _preprocess_chat( "Prompt has to be a string", "when the tokenizer is not initialised", ) - prompt_inputs = TextTokensPrompt( - prompt=request_prompt, prompt_token_ids=[1] - ) + prompt_inputs = TokensPrompt(prompt=request_prompt, prompt_token_ids=[1]) elif isinstance(request_prompt, str): prompt_inputs = await self._tokenize_prompt_input_async( request, @@ -1250,14 +1199,15 @@ async def _preprocess_chat( assert is_list_of(request_prompt, int), ( "Prompt has to be either a string or a list of token ids" ) - prompt_inputs = TextTokensPrompt( + prompt_inputs = TokensPrompt( prompt=tokenizer.decode(request_prompt), prompt_token_ids=request_prompt, ) - engine_prompt = EngineTokensPrompt( - prompt_token_ids=prompt_inputs["prompt_token_ids"] - ) + engine_prompt = TokensPrompt(prompt_token_ids=prompt_inputs["prompt_token_ids"]) + if "prompt" in prompt_inputs: + engine_prompt["prompt"] = prompt_inputs["prompt"] + if mm_data is not None: engine_prompt["multi_modal_data"] = mm_data @@ -1270,7 +1220,7 @@ async def _preprocess_chat( if hasattr(request, "cache_salt") and request.cache_salt is not None: engine_prompt["cache_salt"] = request.cache_salt - return conversation, [request_prompt], [engine_prompt] + return conversation, [engine_prompt] async def _process_inputs( self, @@ -1302,7 +1252,7 @@ async def _process_inputs( async def _render_next_turn( self, request: ResponsesRequest, - tokenizer: AnyTokenizer, + tokenizer: TokenizerLike | None, messages: list[ResponseInputOutputItem], tool_dicts: list[dict[str, Any]] | None, tool_parser, @@ -1313,7 +1263,7 @@ async def _render_next_turn( request_input=messages, ) - _, request_prompts, engine_prompts = await self._preprocess_chat( + _, engine_prompts = await self._preprocess_chat( request, tokenizer, new_messages, @@ -1322,20 +1272,20 @@ async def _render_next_turn( chat_template=chat_template, chat_template_content_format=chat_template_content_format, ) - return request_prompts, engine_prompts + return engine_prompts async def _generate_with_builtin_tools( self, request_id: str, - request_prompt: RequestPrompt, - engine_prompt: EngineTokensPrompt, + engine_prompt: TokensPrompt, sampling_params: SamplingParams, context: ConversationContext, lora_request: LoRARequest | None = None, priority: int = 0, **kwargs, ): - prompt_text, _, _ = self._get_prompt_components(request_prompt) + prompt_text, _, _ = self._get_prompt_components(engine_prompt) + orig_priority = priority sub_request = 0 while True: @@ -1343,7 +1293,7 @@ async def _generate_with_builtin_tools( sub_request_id = f"{request_id}_{sub_request}" self._log_inputs( sub_request_id, - request_prompt, + engine_prompt, params=sampling_params, lora_request=lora_request, ) @@ -1388,10 +1338,9 @@ async def _generate_with_builtin_tools( # Render the next prompt token ids. if isinstance(context, (HarmonyContext, StreamingHarmonyContext)): prompt_token_ids = context.render_for_completion() - engine_prompt = EngineTokensPrompt(prompt_token_ids=prompt_token_ids) - request_prompt = prompt_token_ids + engine_prompt = TokensPrompt(prompt_token_ids=prompt_token_ids) elif isinstance(context, ParsableContext): - request_prompts, engine_prompts = await self._render_next_turn( + engine_prompts = await self._render_next_turn( context.request, context.tokenizer, context.parser.response_messages, @@ -1401,8 +1350,7 @@ async def _generate_with_builtin_tools( context.chat_template_content_format, ) engine_prompt = engine_prompts[0] - request_prompt = request_prompts[0] - prompt_text, _, _ = self._get_prompt_components(request_prompt) + prompt_text, _, _ = self._get_prompt_components(engine_prompt) # Update the sampling params. sampling_params.max_tokens = self.max_model_len - len( @@ -1412,19 +1360,13 @@ async def _generate_with_builtin_tools( priority = orig_priority - 1 sub_request += 1 - def _get_prompt_components( - self, - prompt: RequestPrompt | PromptType, - ) -> PromptComponents: - if isinstance(prompt, list): - return PromptComponents(token_ids=prompt) - - return get_prompt_components(prompt) # type: ignore[arg-type] + def _get_prompt_components(self, prompt: PromptType) -> PromptComponents: + return get_prompt_components(prompt) def _log_inputs( self, request_id: str, - inputs: RequestPrompt | PromptType, + inputs: PromptType, params: SamplingParams | PoolingParams | BeamSearchParams | None, lora_request: LoRARequest | None, ) -> None: @@ -1486,7 +1428,7 @@ def _get_data_parallel_rank(raw_request: Request | None) -> int | None: @staticmethod def _parse_tool_calls_from_content( request: ResponsesRequest | ChatCompletionRequest, - tokenizer: TokenizerLike, + tokenizer: TokenizerLike | None, enable_auto_tools: bool, tool_parser_cls: Callable[[TokenizerLike], ToolParser] | None, content: str | None = None, @@ -1526,6 +1468,11 @@ def _parse_tool_calls_from_content( and enable_auto_tools and (request.tool_choice == "auto" or request.tool_choice is None) ): + if tokenizer is None: + raise ValueError( + "Tokenizer not available when `skip_tokenizer_init=True`" + ) + # Automatic Tool Call Parsing try: tool_parser = tool_parser_cls(tokenizer) diff --git a/vllm/entrypoints/openai/serving_responses.py b/vllm/entrypoints/openai/serving_responses.py index 60d14337dcaa..1f9b5704624a 100644 --- a/vllm/entrypoints/openai/serving_responses.py +++ b/vllm/entrypoints/openai/serving_responses.py @@ -104,10 +104,9 @@ construct_input_messages, construct_tool_dicts, extract_tool_types, - make_response_output_items_from_parsable_context, ) from vllm.entrypoints.tool_server import ToolServer -from vllm.inputs.data import TokensPrompt as EngineTokensPrompt +from vllm.inputs.data import TokensPrompt from vllm.logger import init_logger from vllm.logprobs import Logprob as SampleLogprob from vllm.logprobs import SampleLogprobs @@ -258,7 +257,7 @@ def __init__( self.tool_server = tool_server def _validate_generator_input( - self, engine_prompt: EngineTokensPrompt + self, engine_prompt: TokensPrompt ) -> ErrorResponse | None: """Add validations to the input to the generator here.""" if self.max_model_len <= len(engine_prompt["prompt_token_ids"]): @@ -353,11 +352,11 @@ async def create_responses( tokenizer = await self.engine_client.get_tokenizer() if self.use_harmony: - messages, request_prompts, engine_prompts = ( - self._make_request_with_harmony(request, prev_response) + messages, engine_prompts = self._make_request_with_harmony( + request, prev_response ) else: - messages, request_prompts, engine_prompts = await self._make_request( + messages, engine_prompts = await self._make_request( request, prev_response, tokenizer ) @@ -393,7 +392,7 @@ async def create_responses( assert len(builtin_tool_list) == 0 available_tools = [] try: - for i, engine_prompt in enumerate(engine_prompts): + for engine_prompt in engine_prompts: maybe_error = self._validate_generator_input(engine_prompt) if maybe_error is not None: return maybe_error @@ -420,7 +419,7 @@ async def create_responses( context = HarmonyContext(messages, available_tools) else: if envs.VLLM_USE_EXPERIMENTAL_PARSER_CONTEXT: - # This is an feature in development for parsing + # This is a feature in development for parsing # tokens during generation instead of at the end context = ParsableContext( response_messages=messages, @@ -449,7 +448,6 @@ async def create_responses( ) generator = self._generate_with_builtin_tools( request_id=request.request_id, - request_prompt=request_prompts[i], engine_prompt=engine_prompt, sampling_params=sampling_params, context=context, @@ -564,7 +562,7 @@ async def _make_request( prev_msg=self.msg_store.get(prev_response.id) if prev_response else None, prev_response_output=prev_response.output if prev_response else None, ) - _, request_prompts, engine_prompts = await self._preprocess_chat( + _, engine_prompts = await self._preprocess_chat( request, tokenizer, messages, @@ -573,7 +571,7 @@ async def _make_request( chat_template=self.chat_template, chat_template_content_format=self.chat_template_content_format, ) - return messages, request_prompts, engine_prompts + return messages, engine_prompts def _make_request_with_harmony( self, @@ -586,13 +584,13 @@ def _make_request_with_harmony( ) messages = self._construct_input_messages_with_harmony(request, prev_response) prompt_token_ids = render_for_completion(messages) - engine_prompt = EngineTokensPrompt(prompt_token_ids=prompt_token_ids) + engine_prompt = TokensPrompt(prompt_token_ids=prompt_token_ids) # Add cache_salt if provided in the request if request.cache_salt is not None: engine_prompt["cache_salt"] = request.cache_salt - return messages, [prompt_token_ids], [engine_prompt] + return messages, [engine_prompt] async def _initialize_tool_sessions( self, @@ -659,24 +657,19 @@ async def responses_full_generator( else: status = "incomplete" elif isinstance(context, ParsableContext): - response_messages = context.parser.response_messages[ - context.parser.num_init_messages : - ] - output = make_response_output_items_from_parsable_context(response_messages) + output = context.parser.make_response_output_items_from_parsable_context() - # TODO: context for non-gptoss models doesn't use messages - # so we can't get them out yet if request.enable_response_messages: - raise NotImplementedError( - "enable_response_messages is currently only supported for gpt-oss" - ) + input_messages = context.input_messages + output_messages = context.output_messages # TODO: Calculate usage. # assert final_res.prompt_token_ids is not None num_tool_output_tokens = 0 else: assert isinstance(context, SimpleContext) - final_res = context.last_output + # Use final_output which has accumulated text/token_ids/logprobs + final_res = context.final_output assert final_res is not None assert len(final_res.outputs) == 1 final_output = final_res.outputs[0] diff --git a/vllm/entrypoints/openai/tool_parsers/__init__.py b/vllm/entrypoints/openai/tool_parsers/__init__.py index 7be1263e802d..ad1b682a9ef6 100644 --- a/vllm/entrypoints/openai/tool_parsers/__init__.py +++ b/vllm/entrypoints/openai/tool_parsers/__init__.py @@ -1,150 +1,33 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project -from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ( - ToolParser, - ToolParserManager, -) +import warnings -__all__ = ["ToolParser", "ToolParserManager"] +def __getattr__(name: str): + if name == "ToolParser": + from vllm.tool_parsers import ToolParser -""" -Register a lazy module mapping. + warnings.warn( + "`vllm.entrypoints.openai.tool_parsers.ToolParser` has been moved to " + "`vllm.tool_parsers.ToolParser`. " + "The old name will be removed in v0.14.", + DeprecationWarning, + stacklevel=2, + ) -Example: - ToolParserManager.register_lazy_module( - name="kimi_k2", - module_path="vllm.entrypoints.openai.tool_parsers.kimi_k2_parser", - class_name="KimiK2ToolParser", - ) -""" + return ToolParser + if name == "ToolParserManager": + from vllm.tool_parsers import ToolParserManager + warnings.warn( + "`vllm.entrypoints.openai.tool_parsers.ToolParserManager` " + "has been moved to `vllm.tool_parsers.ToolParserManager`. " + "The old name will be removed in v0.14.", + DeprecationWarning, + stacklevel=2, + ) -_TOOL_PARSERS_TO_REGISTER = { - "deepseek_v3": ( # name - "deepseekv3_tool_parser", # filename - "DeepSeekV3ToolParser", # class_name - ), - "deepseek_v31": ( - "deepseekv31_tool_parser", - "DeepSeekV31ToolParser", - ), - "deepseek_v32": ( - "deepseekv32_tool_parser", - "DeepSeekV32ToolParser", - ), - "ernie45": ( - "ernie45_tool_parser", - "Ernie45ToolParser", - ), - "glm45": ( - "glm4_moe_tool_parser", - "Glm4MoeModelToolParser", - ), - "granite-20b-fc": ( - "granite_20b_fc_tool_parser", - "Granite20bFCToolParser", - ), - "granite": ( - "granite_tool_parser", - "GraniteToolParser", - ), - "hermes": ( - "hermes_tool_parser", - "Hermes2ProToolParser", - ), - "hunyuan_a13b": ( - "hunyuan_a13b_tool_parser", - "HunyuanA13BToolParser", - ), - "internlm": ( - "internlm2_tool_parser", - "Internlm2ToolParser", - ), - "jamba": ( - "jamba_tool_parser", - "JambaToolParser", - ), - "kimi_k2": ( - "kimi_k2_tool_parser", - "KimiK2ToolParser", - ), - "llama3_json": ( - "llama_tool_parser", - "Llama3JsonToolParser", - ), - "llama4_json": ( - "llama_tool_parser", - "Llama3JsonToolParser", - ), - "llama4_pythonic": ( - "llama4_pythonic_tool_parser", - "Llama4PythonicToolParser", - ), - "longcat": ( - "longcat_tool_parser", - "LongcatFlashToolParser", - ), - "minimax_m2": ( - "minimax_m2_tool_parser", - "MinimaxM2ToolParser", - ), - "minimax": ( - "minimax_tool_parser", - "MinimaxToolParser", - ), - "mistral": ( - "mistral_tool_parser", - "MistralToolParser", - ), - "olmo3": ( - "olmo3_tool_parser", - "Olmo3PythonicToolParser", - ), - "openai": ( - "openai_tool_parser", - "OpenAIToolParser", - ), - "phi4_mini_json": ( - "phi4mini_tool_parser", - "Phi4MiniJsonToolParser", - ), - "pythonic": ( - "pythonic_tool_parser", - "PythonicToolParser", - ), - "qwen3_coder": ( - "qwen3coder_tool_parser", - "Qwen3CoderToolParser", - ), - "qwen3_xml": ( - "qwen3xml_tool_parser", - "Qwen3XMLToolParser", - ), - "seed_oss": ( - "seed_oss_tool_parser", - "SeedOssToolParser", - ), - "step3": ( - "step3_tool_parser", - "Step3ToolParser", - ), - "xlam": ( - "xlam_tool_parser", - "xLAMToolParser", - ), - "gigachat3": ( - "gigachat3_tool_parser", - "GigaChat3ToolParser", - ), -} + return ToolParserManager - -def register_lazy_tool_parsers(): - for name, (file_name, class_name) in _TOOL_PARSERS_TO_REGISTER.items(): - module_path = f"vllm.entrypoints.openai.tool_parsers.{file_name}" - ToolParserManager.register_lazy_module(name, module_path, class_name) - - -register_lazy_tool_parsers() + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/vllm/entrypoints/pooling/classify/serving.py b/vllm/entrypoints/pooling/classify/serving.py index d6d3825daf7b..e166405a6f05 100644 --- a/vllm/entrypoints/pooling/classify/serving.py +++ b/vllm/entrypoints/pooling/classify/serving.py @@ -72,11 +72,7 @@ async def _preprocess( if ret: return ret - ( - _, - _, - engine_prompts, - ) = await self._preprocess_chat( + _, engine_prompts = await self._preprocess_chat( cast(ChatCompletionRequest, chat_request), ctx.tokenizer, messages, diff --git a/vllm/entrypoints/pooling/embed/serving.py b/vllm/entrypoints/pooling/embed/serving.py index aafc35489710..f5a21208ed80 100644 --- a/vllm/entrypoints/pooling/embed/serving.py +++ b/vllm/entrypoints/pooling/embed/serving.py @@ -20,7 +20,6 @@ EmbeddingServeContext, OpenAIServing, ServeContext, - TextTokensPrompt, ) from vllm.entrypoints.openai.serving_models import OpenAIServingModels from vllm.entrypoints.pooling.embed.protocol import ( @@ -32,7 +31,7 @@ EmbeddingResponseData, ) from vllm.entrypoints.renderer import RenderConfig -from vllm.inputs.data import TokensPrompt as EngineTokensPrompt +from vllm.inputs.data import TokensPrompt from vllm.logger import init_logger from vllm.outputs import ( EmbeddingRequestOutput, @@ -83,11 +82,7 @@ async def _preprocess( renderer = self._get_renderer(tokenizer) if isinstance(ctx.request, EmbeddingChatRequest): - ( - _, - _, - ctx.engine_prompts, - ) = await self._preprocess_chat( + _, ctx.engine_prompts = await self._preprocess_chat( ctx.request, tokenizer, ctx.request.messages, @@ -209,14 +204,13 @@ def _should_use_chunked_processing(self, request) -> bool: async def _process_chunked_request( self, ctx: EmbeddingServeContext, - original_prompt: TextTokensPrompt, + token_ids: list[int], pooling_params, trace_headers, prompt_idx: int, ) -> list[AsyncGenerator[PoolingRequestOutput, None]]: """Process a single prompt using chunked processing.""" generators: list[AsyncGenerator[PoolingRequestOutput, None]] = [] - token_ids = original_prompt["prompt_token_ids"] # Split into chunks using max_position_embeddings max_pos_embeddings = self._get_max_position_embeddings() @@ -228,18 +222,12 @@ async def _process_chunked_request( chunk_request_id = f"{ctx.request_id}-prompt-{prompt_idx}-chunk-{chunk_idx}" # Create engine prompt for this chunk - chunk_engine_prompt = EngineTokensPrompt(prompt_token_ids=chunk_tokens) - - # Create chunk request prompt for logging - chunk_text = "" - chunk_request_prompt = TextTokensPrompt( - prompt=chunk_text, prompt_token_ids=chunk_tokens - ) + chunk_engine_prompt = TokensPrompt(prompt_token_ids=chunk_tokens) # Log the chunk self._log_inputs( chunk_request_id, - chunk_request_prompt, + chunk_engine_prompt, params=pooling_params, lora_request=ctx.lora_request, ) @@ -263,7 +251,7 @@ def _validate_input( request, input_ids: list[int], input_text: str, - ) -> TextTokensPrompt: + ) -> TokensPrompt: """Override to support chunked processing for embedding requests.""" token_num = len(input_ids) @@ -328,23 +316,15 @@ def _validate_input( ) ) - return TextTokensPrompt(prompt=input_text, prompt_token_ids=input_ids) + return TokensPrompt(prompt=input_text, prompt_token_ids=input_ids) # For other request types, use the parent's implementation return super()._validate_input(request, input_ids, input_text) - def _is_text_tokens_prompt(self, prompt) -> bool: - """Check if a prompt is a TextTokensPrompt (has prompt_token_ids).""" - return ( - isinstance(prompt, dict) - and "prompt_token_ids" in prompt - and "prompt_embeds" not in prompt - ) - async def _create_single_prompt_generator( self, ctx: EmbeddingServeContext, - engine_prompt: EngineTokensPrompt, + engine_prompt: TokensPrompt, pooling_params: PoolingParams, trace_headers: Mapping[str, str] | None, prompt_index: int, @@ -413,14 +393,16 @@ async def _prepare_generators( for i, engine_prompt in enumerate(ctx.engine_prompts): # Check if this specific prompt needs chunked processing - if self._is_text_tokens_prompt(engine_prompt): - # Cast to TextTokensPrompt since we've verified - # prompt_token_ids - text_tokens_prompt = cast(TextTokensPrompt, engine_prompt) - if len(text_tokens_prompt["prompt_token_ids"]) > max_pos_embeddings: + if "prompt_token_ids" in engine_prompt: + prompt_token_ids = engine_prompt["prompt_token_ids"] + if len(prompt_token_ids) > max_pos_embeddings: # Use chunked processing for this prompt chunk_generators = await self._process_chunked_request( - ctx, text_tokens_prompt, pooling_params, trace_headers, i + ctx, + prompt_token_ids, + pooling_params, + trace_headers, + i, ) generators.extend(chunk_generators) continue @@ -578,14 +560,13 @@ async def _collect_batch( # Get original prompt token IDs for this prompt original_prompt = ctx.engine_prompts[prompt_idx] - if not self._is_text_tokens_prompt(original_prompt): + if "prompt_token_ids" not in original_prompt: return self.create_error_response( - f"Chunked prompt {prompt_idx} is not a TextTokensPrompt" + f"Chunked prompt {prompt_idx} does not contain " + "token IDs" ) - original_token_ids = cast(TextTokensPrompt, original_prompt)[ - "prompt_token_ids" - ] + original_token_ids = original_prompt["prompt_token_ids"] pooling_request_output = PoolingRequestOutput( request_id=aggregator["request_id"], diff --git a/vllm/entrypoints/pooling/pooling/serving.py b/vllm/entrypoints/pooling/pooling/serving.py index 57f1a6440cf7..4e1b326806ea 100644 --- a/vllm/entrypoints/pooling/pooling/serving.py +++ b/vllm/entrypoints/pooling/pooling/serving.py @@ -137,11 +137,8 @@ async def create_pooling( ) if error_check_ret is not None: return error_check_ret - ( - _, - _, - engine_prompts, - ) = await self._preprocess_chat( + + _, engine_prompts = await self._preprocess_chat( request, tokenizer, request.messages, diff --git a/vllm/entrypoints/pooling/score/protocol.py b/vllm/entrypoints/pooling/score/protocol.py index a22219707c35..e81bda2eec3d 100644 --- a/vllm/entrypoints/pooling/score/protocol.py +++ b/vllm/entrypoints/pooling/score/protocol.py @@ -120,6 +120,7 @@ class RerankResult(BaseModel): class RerankUsage(BaseModel): + prompt_tokens: int total_tokens: int diff --git a/vllm/entrypoints/pooling/score/serving.py b/vllm/entrypoints/pooling/score/serving.py index f574d8bcebb4..edbfcd03ac92 100644 --- a/vllm/entrypoints/pooling/score/serving.py +++ b/vllm/entrypoints/pooling/score/serving.py @@ -502,5 +502,7 @@ def request_output_to_rerank_response( id=request_id, model=model_name, results=results, - usage=RerankUsage(total_tokens=num_prompt_tokens), + usage=RerankUsage( + total_tokens=num_prompt_tokens, prompt_tokens=num_prompt_tokens + ), ) diff --git a/vllm/entrypoints/renderer.py b/vllm/entrypoints/renderer.py index f31b309b8ca4..0f89c840be80 100644 --- a/vllm/entrypoints/renderer.py +++ b/vllm/entrypoints/renderer.py @@ -12,9 +12,7 @@ from pydantic import Field from vllm.config import ModelConfig -from vllm.inputs.data import EmbedsPrompt as EngineEmbedsPrompt -from vllm.inputs.data import TextPrompt as EngineTextPrompt -from vllm.inputs.data import TokensPrompt as EngineTokensPrompt +from vllm.inputs.data import EmbedsPrompt, TextPrompt, TokensPrompt from vllm.inputs.parse import get_prompt_components, parse_raw_prompts from vllm.tokenizers import TokenizerLike from vllm.utils.async_utils import AsyncMicrobatchTokenizer @@ -97,7 +95,7 @@ async def render_prompt( *, prompt_or_prompts: str | list[str] | list[int] | list[list[int]], config: RenderConfig, - ) -> list[EngineTokensPrompt]: + ) -> list[TokensPrompt]: """ Convert text or token inputs into engine-ready TokensPrompt objects. @@ -115,7 +113,7 @@ async def render_prompt( (e.g., tokenization and length handling). Returns: - list[EngineTokensPrompt]: Engine-ready token prompts. + list[TokensPrompt]: Engine-ready token prompts. Raises: ValueError: If input formats are invalid or length limits exceeded. @@ -129,7 +127,7 @@ async def render_prompt_and_embeds( prompt_or_prompts: str | list[str] | list[int] | list[list[int]] | None = None, prompt_embeds: bytes | list[bytes] | None = None, config: RenderConfig, - ) -> list[EngineTokensPrompt | EngineEmbedsPrompt]: + ) -> list[TokensPrompt | EmbedsPrompt]: """ Convert text/token and/or base64-encoded embeddings inputs into engine-ready prompt objects using a unified RenderConfig. @@ -146,7 +144,7 @@ async def render_prompt_and_embeds( (e.g., tokenization and length handling). Returns: - list[Union[EngineTokensPrompt, EngineEmbedsPrompt]]: + list[Union[TokensPrompt, EmbedsPrompt]]: Engine-ready prompt objects. Raises: @@ -161,31 +159,34 @@ def load_prompt_embeds( prompt_embeds: bytes | list[bytes], truncate_prompt_tokens: Annotated[int, Field(ge=0)] | None = None, cache_salt: str | None = None, - ) -> list[EngineEmbedsPrompt]: + ) -> list[EmbedsPrompt]: """Load and validate base64-encoded embeddings into prompt objects.""" if not self.model_config.enable_prompt_embeds: raise ValueError( "You must set `--enable-prompt-embeds` to input `prompt_embeds`." ) - def _load_and_validate_embed(embed: bytes) -> EngineEmbedsPrompt: - tensor = torch.load( - io.BytesIO(pybase64.b64decode(embed, validate=True)), - weights_only=True, - map_location=torch.device("cpu"), - ) - assert isinstance(tensor, torch.Tensor) and tensor.dtype in ( - torch.float32, - torch.bfloat16, - torch.float16, - ) - tensor = tensor.to_dense() + def _load_and_validate_embed(embed: bytes) -> EmbedsPrompt: + # Enable sparse tensor integrity checks to prevent out-of-bounds + # writes from maliciously crafted tensors + with torch.sparse.check_sparse_tensor_invariants(): + tensor = torch.load( + io.BytesIO(pybase64.b64decode(embed, validate=True)), + weights_only=True, + map_location=torch.device("cpu"), + ) + assert isinstance(tensor, torch.Tensor) and tensor.dtype in ( + torch.float32, + torch.bfloat16, + torch.float16, + ) + tensor = tensor.to_dense() if tensor.dim() > 2: tensor = tensor.squeeze(0) assert tensor.dim() == 2 if truncate_prompt_tokens is not None: tensor = tensor[-truncate_prompt_tokens:] - embeds_prompt = EngineEmbedsPrompt(prompt_embeds=tensor) + embeds_prompt = EmbedsPrompt(prompt_embeds=tensor) if cache_salt is not None: embeds_prompt["cache_salt"] = cache_salt return embeds_prompt @@ -213,7 +214,7 @@ async def render_prompt( *, prompt_or_prompts: str | list[str] | list[int] | list[list[int]], config: RenderConfig, - ) -> list[EngineTokensPrompt]: + ) -> list[TokensPrompt]: """Implementation of prompt rendering for completion-style requests. Uses async tokenizer pooling for improved performance. See base class @@ -240,7 +241,7 @@ async def render_prompt_and_embeds( prompt_or_prompts: str | list[str] | list[int] | list[list[int]] | None = None, prompt_embeds: bytes | list[bytes] | None = None, config: RenderConfig, - ) -> list[EngineTokensPrompt | EngineEmbedsPrompt]: + ) -> list[TokensPrompt | EmbedsPrompt]: """ Render text/token prompts and/or precomputed embedding prompts. At least one of `prompt_or_prompts` or `prompt_embeds` must be provided. @@ -249,7 +250,7 @@ async def render_prompt_and_embeds( if truncate_prompt_tokens == 0: return [] - rendered: list[EngineTokensPrompt | EngineEmbedsPrompt] = [] + rendered: list[TokensPrompt | EmbedsPrompt] = [] if prompt_embeds is not None: rendered.extend( @@ -281,10 +282,10 @@ def _maybe_apply_truncation( async def _create_prompt( self, - prompt_input: EngineTextPrompt | EngineTokensPrompt, + prompt_input: TextPrompt | TokensPrompt, config: RenderConfig, truncate_prompt_tokens: int | None, - ) -> EngineTokensPrompt: + ) -> TokensPrompt: prompt, prompt_token_ids, _ = get_prompt_components(prompt_input) if prompt_token_ids is not None: @@ -317,7 +318,7 @@ async def _create_prompt_from_text( truncate_prompt_tokens: int | None, add_special_tokens: bool, cache_salt: str | None, - ) -> EngineTokensPrompt: + ) -> TokensPrompt: """Tokenize text input asynchronously.""" async_tokenizer = self._get_async_tokenizer() @@ -350,7 +351,7 @@ async def _create_prompt_from_token_ids( truncate_prompt_tokens: int | None, cache_salt: str | None, needs_detokenization: bool | None = False, - ) -> EngineTokensPrompt: + ) -> TokensPrompt: """Optionally detokenize token IDs and build a tokens prompt.""" token_ids = self._maybe_apply_truncation(token_ids, truncate_prompt_tokens) @@ -392,8 +393,8 @@ def _create_tokens_prompt( max_length: int | None = None, cache_salt: str | None = None, prompt: str | None = None, - ) -> EngineTokensPrompt: - """Create validated EngineTokensPrompt.""" + ) -> TokensPrompt: + """Create validated TokensPrompt.""" if max_length is not None and len(token_ids) > max_length: raise ValueError( f"This model's maximum context length is {max_length} tokens. " @@ -401,7 +402,7 @@ def _create_tokens_prompt( "Please reduce the length of the input messages." ) - tokens_prompt = EngineTokensPrompt(prompt_token_ids=token_ids) + tokens_prompt = TokensPrompt(prompt_token_ids=token_ids) if cache_salt is not None: tokens_prompt["cache_salt"] = cache_salt if prompt is not None: diff --git a/vllm/entrypoints/responses_utils.py b/vllm/entrypoints/responses_utils.py index 99080fa43cb8..df3d0495755d 100644 --- a/vllm/entrypoints/responses_utils.py +++ b/vllm/entrypoints/responses_utils.py @@ -16,7 +16,6 @@ from openai.types.responses.response_function_tool_call_output_item import ( ResponseFunctionToolCallOutputItem, ) -from openai.types.responses.response_output_item import McpCall from openai.types.responses.response_output_message import ResponseOutputMessage from openai.types.responses.response_reasoning_item import ResponseReasoningItem from openai.types.responses.tool import Tool @@ -27,38 +26,6 @@ ChatCompletionMessageParam, ResponseInputOutputItem, ) -from vllm.utils import random_uuid - - -def make_response_output_items_from_parsable_context( - response_messages: list[ResponseInputOutputItem], -) -> list[ResponseOutputItem]: - """Given a list of sentences, construct ResponseOutput Items.""" - output_messages: list[ResponseOutputItem] = [] - for message in response_messages: - if not isinstance(message, ResponseFunctionToolCallOutputItem): - output_messages.append(message) - else: - if len(output_messages) == 0: - raise ValueError( - "Cannot have a FunctionToolCallOutput before FunctionToolCall." - ) - if isinstance(output_messages[-1], ResponseFunctionToolCall): - mcp_message = McpCall( - id=f"{MCP_PREFIX}{random_uuid()}", - arguments=output_messages[-1].arguments, - name=output_messages[-1].name, - server_label=output_messages[ - -1 - ].name, # TODO: store the server label - type=f"{MCP_PREFIX}call", - status="completed", - output=message.output, - # TODO: support error output - ) - output_messages[-1] = mcp_message - - return output_messages def construct_input_messages( diff --git a/vllm/entrypoints/serve/disagg/serving.py b/vllm/entrypoints/serve/disagg/serving.py index 5c1d17156a90..1798b174b141 100644 --- a/vllm/entrypoints/serve/disagg/serving.py +++ b/vllm/entrypoints/serve/disagg/serving.py @@ -27,7 +27,7 @@ GenerateResponse, GenerateResponseChoice, ) -from vllm.inputs.data import TokensPrompt as EngineTokensPrompt +from vllm.inputs.data import TokensPrompt from vllm.logger import init_logger from vllm.logprobs import Logprob from vllm.outputs import RequestOutput @@ -99,7 +99,7 @@ async def serve_tokens( # TODO(NickLucche): Change to EngineCoreRequest once Renderer work is # completed - engine_prompt = EngineTokensPrompt(prompt_token_ids=request.token_ids) + engine_prompt = TokensPrompt(prompt_token_ids=request.token_ids) if request.features is not None: engine_prompt["multi_modal_data"] = None @@ -115,7 +115,7 @@ async def serve_tokens( self._log_inputs( request_id, - request.token_ids, + TokensPrompt(prompt_token_ids=request.token_ids), params=sampling_params, lora_request=lora_request, ) diff --git a/vllm/entrypoints/serve/tokenize/serving.py b/vllm/entrypoints/serve/tokenize/serving.py index 979da02d1450..0b07f0b18dfd 100644 --- a/vllm/entrypoints/serve/tokenize/serving.py +++ b/vllm/entrypoints/serve/tokenize/serving.py @@ -21,6 +21,7 @@ from vllm.entrypoints.openai.serving_engine import OpenAIServing from vllm.entrypoints.openai.serving_models import OpenAIServingModels from vllm.entrypoints.renderer import RenderConfig +from vllm.inputs import TokensPrompt from vllm.logger import init_logger from vllm.tokenizers import TokenizerLike @@ -80,11 +81,8 @@ async def create_tokenize( ) if error_check_ret is not None: return error_check_ret - ( - _, - _, - engine_prompts, - ) = await self._preprocess_chat( + + _, engine_prompts = await self._preprocess_chat( request, tokenizer, request.messages, @@ -141,7 +139,10 @@ async def create_detokenize( tokenizer = await self.engine_client.get_tokenizer() self._log_inputs( - request_id, request.tokens, params=None, lora_request=lora_request + request_id, + TokensPrompt(prompt_token_ids=request.tokens), + params=None, + lora_request=lora_request, ) prompt_input = await self._tokenize_prompt_input_async( diff --git a/vllm/inputs/parse.py b/vllm/inputs/parse.py index 211551be8e60..71289277eb98 100644 --- a/vllm/inputs/parse.py +++ b/vllm/inputs/parse.py @@ -33,22 +33,31 @@ def parse_raw_prompts( if len(prompt) == 0: raise ValueError("please provide at least one prompt") + # case 2: array of strings if is_list_of(prompt, str): - # case 2: array of strings prompt = cast(list[str], prompt) return [TextPrompt(prompt=elem) for elem in prompt] + + # case 3: array of tokens if is_list_of(prompt, int): - # case 3: array of tokens prompt = cast(list[int], prompt) return [TokensPrompt(prompt_token_ids=prompt)] + + # case 4: array of token arrays if is_list_of(prompt, list): - prompt = cast(list[list[int]], prompt) - if len(prompt[0]) == 0: - raise ValueError("please provide at least one prompt") + first = prompt[0] + if not isinstance(first, list): + raise ValueError("prompt expected to be a list of lists") - if is_list_of(prompt[0], int): - # case 4: array of token arrays - return [TokensPrompt(prompt_token_ids=elem) for elem in prompt] + if len(first) == 0: + raise ValueError("Please provide at least one prompt") + + # strict validation: every nested list must be list[int] + if not all(is_list_of(elem, int) for elem in prompt): + raise TypeError("Nested lists must contain only integers") + + prompt = cast(list[list[int]], prompt) + return [TokensPrompt(prompt_token_ids=elem) for elem in prompt] raise TypeError( "prompt must be a string, array of strings, " diff --git a/vllm/model_executor/custom_op.py b/vllm/model_executor/custom_op.py index 9ef696d80712..66250f816f45 100644 --- a/vllm/model_executor/custom_op.py +++ b/vllm/model_executor/custom_op.py @@ -38,8 +38,9 @@ def __new__(cls, *args, **kwargs): ) return super().__new__(op_cls_to_instantiate) - def __init__(self): + def __init__(self, enforce_enable: bool = False): super().__init__() + self._enforce_enable = enforce_enable self._forward_method = self.dispatch_forward() def forward(self, *args, **kwargs): @@ -84,7 +85,11 @@ def dispatch_forward(self): # NOTE(woosuk): Here we assume that vLLM was built for only one # specific backend. Currently, we do not support dynamic dispatching. compilation_config = get_cached_compilation_config() - enabled = self.enabled() + + # CustomOp object can be enforce enabled, e.g., enable device-specific + # kernels in ViT models when enabling graph mode. By default, it will + # follow the compilation_config to determine whether enable itself. + enabled = self._enforce_enable or self.enabled() if enabled: compilation_config.enabled_custom_ops.update([self.__class__.name]) else: diff --git a/vllm/model_executor/layers/batch_invariant.py b/vllm/model_executor/layers/batch_invariant.py index 4f31e5afa1ac..fde0826779eb 100644 --- a/vllm/model_executor/layers/batch_invariant.py +++ b/vllm/model_executor/layers/batch_invariant.py @@ -6,7 +6,7 @@ import torch -import vllm.envs as envs +from vllm.attention.backends.registry import AttentionBackendEnum from vllm.logger import init_logger from vllm.platforms import current_platform from vllm.triton_utils import tl, triton @@ -1004,27 +1004,30 @@ def vllm_is_batch_invariant() -> bool: return VLLM_BATCH_INVARIANT -def override_envs_for_invariance(): - curr_attn_backend = envs.VLLM_ATTENTION_BACKEND +def override_envs_for_invariance( + attention_backend: AttentionBackendEnum | None, +): supported_backends = [ - "FLASH_ATTN", # best supported backend - "FLASHINFER", - "FLASH_ATTN_MLA", - "TRITON_MLA", + AttentionBackendEnum.FLASH_ATTN, # best supported backend + AttentionBackendEnum.FLASHINFER, + AttentionBackendEnum.FLASH_ATTN_MLA, + AttentionBackendEnum.TRITON_MLA, # Not yet supported MLA backends - # "FLASHMLA", - # "FLEX_ATTENTION", # IMA issue even if we disable batch invariance - # "FLASHINFER_MLA", https://github.com/vllm-project/vllm/pull/28967 + # AttentionBackendEnum.FLASHMLA, + # AttentionBackendEnum.FLEX_ATTENTION, # IMA issue + # AttentionBackendEnum.FLASHINFER_MLA, # PR #28967 ] - if curr_attn_backend not in supported_backends: + if attention_backend not in supported_backends: + supported_names = [b.name for b in supported_backends] + backend_name = attention_backend.name if attention_backend else None error = ( "VLLM batch_invariant mode requires an attention backend in " - f"{supported_backends}, but got '{curr_attn_backend}'. " - "Please set the 'VLLM_ATTENTION_BACKEND' environment variable " - "to one of the supported backends before enabling batch_invariant." + f"{supported_names}, but got '{backend_name}'. " + "Please use --attention-backend or attention_config to set " + "one of the supported backends before enabling batch_invariant." ) raise RuntimeError(error) - if os.environ["VLLM_ATTENTION_BACKEND"] != supported_backends[0]: + if attention_backend != supported_backends[0]: warning = ( "You are using a decode-invariant form of batch invariance. " "This will not be invariant between prefill and decode." @@ -1050,10 +1053,12 @@ def override_envs_for_invariance(): os.environ["VLLM_USE_AOT_COMPILE"] = "0" -def init_batch_invariance(): +def init_batch_invariance( + attention_backend: AttentionBackendEnum | None, +): # this will hit all the csrc overrides as well if vllm_is_batch_invariant(): - override_envs_for_invariance() + override_envs_for_invariance(attention_backend) enable_batch_invariant_mode() # Disable TF32 for batch invariance - it causes non-deterministic rounding diff --git a/vllm/model_executor/layers/fused_moe/cutlass_moe.py b/vllm/model_executor/layers/fused_moe/cutlass_moe.py index 552e38a71bf9..4a0b4e82c1b3 100644 --- a/vllm/model_executor/layers/fused_moe/cutlass_moe.py +++ b/vllm/model_executor/layers/fused_moe/cutlass_moe.py @@ -460,7 +460,6 @@ def cutlass_moe_fp8( expert_map: torch.Tensor | None = None, apply_router_weight_on_input: bool = False, global_num_experts: int = -1, - parallel_config=None, ) -> torch.Tensor: """ This function computes a a8w8-quantized Mixture of Experts (MoE) layer @@ -538,7 +537,6 @@ def cutlass_moe_fp8( c_strides2=c_strides2, quant_config=quant_config, ), - parallel_config=parallel_config, ) return fn( diff --git a/vllm/model_executor/layers/fused_moe/deep_gemm_moe.py b/vllm/model_executor/layers/fused_moe/deep_gemm_moe.py index 4a64736ed767..5ca91768c976 100644 --- a/vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +++ b/vllm/model_executor/layers/fused_moe/deep_gemm_moe.py @@ -293,7 +293,7 @@ def deep_gemm_moe_fp8( expert_map: torch.Tensor | None = None, a1_scale: torch.Tensor | None = None, a2_scale: torch.Tensor | None = None, - apply_router_weight_on_input=False, + apply_router_weight_on_input: bool = False, ) -> torch.Tensor: """ This function computes a a8w8-quantized Mixture of Experts (MoE) layer diff --git a/vllm/model_executor/layers/fused_moe/fused_moe_modular_method.py b/vllm/model_executor/layers/fused_moe/fused_moe_modular_method.py index 1947423bf477..9c9bc2514bb4 100644 --- a/vllm/model_executor/layers/fused_moe/fused_moe_modular_method.py +++ b/vllm/model_executor/layers/fused_moe/fused_moe_modular_method.py @@ -43,11 +43,6 @@ def make( prepare_finalize: FusedMoEPrepareAndFinalize, shared_experts: torch.nn.Module | None, ) -> "FusedMoEModularMethod": - parallel_config = getattr( - getattr(moe_layer, "vllm_config", None), - "parallel_config", - None, - ) return FusedMoEModularMethod( old_quant_method, FusedMoEModularKernel( @@ -55,7 +50,7 @@ def make( old_quant_method.select_gemm_impl(prepare_finalize, moe_layer), shared_experts, getattr(moe_layer, "shared_experts_stream", None), - parallel_config=parallel_config, + moe_parallel_config=moe_layer.moe_parallel_config, ), ) diff --git a/vllm/model_executor/layers/fused_moe/modular_kernel.py b/vllm/model_executor/layers/fused_moe/modular_kernel.py index 9e75a7c08070..484314091cb1 100644 --- a/vllm/model_executor/layers/fused_moe/modular_kernel.py +++ b/vllm/model_executor/layers/fused_moe/modular_kernel.py @@ -10,10 +10,12 @@ import torch import vllm.envs as envs -from vllm.config import ParallelConfig, get_current_vllm_config from vllm.forward_context import get_forward_context, is_forward_context_available from vllm.logger import init_logger -from vllm.model_executor.layers.fused_moe.config import FusedMoEQuantConfig +from vllm.model_executor.layers.fused_moe.config import ( + FusedMoEParallelConfig, + FusedMoEQuantConfig, +) from vllm.model_executor.layers.fused_moe.utils import ( _resize_cache, count_expert_num_tokens, @@ -681,7 +683,7 @@ def __init__( fused_experts: FusedMoEPermuteExpertsUnpermute, shared_experts: torch.nn.Module | None = None, shared_experts_stream: torch.cuda.Stream | None = None, - parallel_config: ParallelConfig | None = None, + moe_parallel_config: FusedMoEParallelConfig | None = None, ): super().__init__() self.prepare_finalize = prepare_finalize @@ -689,12 +691,15 @@ def __init__( self.shared_experts = shared_experts self.shared_experts_stream = shared_experts_stream - # cache whether this worker is using DP+EP - if parallel_config is None: - parallel_config = get_current_vllm_config().parallel_config + # prefer an explicit FusedMoEParallelConfig when available (from + # FusedMoE layers / tests). + # if not provided, assume this kernel is + # running in a non-DP+EP context + self.moe_parallel_config: FusedMoEParallelConfig | None = moe_parallel_config self.is_dp_ep = ( - parallel_config.data_parallel_size > 1 - and parallel_config.enable_expert_parallel + moe_parallel_config is not None + and moe_parallel_config.dp_size > 1 + and moe_parallel_config.use_ep ) self._post_init_setup() diff --git a/vllm/model_executor/layers/fused_moe/shared_fused_moe.py b/vllm/model_executor/layers/fused_moe/shared_fused_moe.py index 9aaeec4f98a6..60aa1c088b4d 100644 --- a/vllm/model_executor/layers/fused_moe/shared_fused_moe.py +++ b/vllm/model_executor/layers/fused_moe/shared_fused_moe.py @@ -30,8 +30,8 @@ def __init__( # Disable shared expert overlap if: # - we are using eplb, because of correctness issues - # - we are using flashinfer with DP, since there nothint to gain - # - we are using marlin kjernels + # - we are using flashinfer with DP, since there nothing to gain + # - we are using marlin kernels self.use_overlapped = ( use_overlapped and not ( diff --git a/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py b/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py index 5ad26f9318df..18c2ab026b2b 100644 --- a/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +++ b/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py @@ -1266,9 +1266,6 @@ def apply( ab_strides2=self.ab_strides2, c_strides1=self.c_strides1, c_strides2=self.ab_strides1_c_strides2, - parallel_config=getattr( - getattr(layer, "vllm_config", None), "parallel_config", None - ), ) else: diff --git a/vllm/model_executor/layers/quantization/fp8.py b/vllm/model_executor/layers/quantization/fp8.py index 6909bac1efc7..f2b66a2beb6d 100644 --- a/vllm/model_executor/layers/quantization/fp8.py +++ b/vllm/model_executor/layers/quantization/fp8.py @@ -332,7 +332,10 @@ def get_quant_method( fused_mapping=self.packed_modules_mapping, ): return UnquantizedFusedMoEMethod(layer.moe_config) - moe_quant_method = Fp8MoEMethod(self, layer) + if self.is_checkpoint_fp8_serialized: + moe_quant_method = Fp8MoEMethod(self, layer) + else: + moe_quant_method = Fp8OnlineMoEMethod(self, layer) moe_quant_method.marlin_input_dtype = get_marlin_input_dtype(prefix) return moe_quant_method elif isinstance(layer, Attention): @@ -745,8 +748,9 @@ def create_weights( layer.orig_dtype = params_dtype layer.weight_block_size = None - if self.quant_config.is_checkpoint_fp8_serialized: - params_dtype = torch.float8_e4m3fn + assert self.quant_config.is_checkpoint_fp8_serialized + params_dtype = torch.float8_e4m3fn + if self.block_quant: assert self.weight_block_size is not None layer.weight_block_size = self.weight_block_size @@ -773,41 +777,6 @@ def create_weights( f"weight quantization block_k = {block_k}." ) - # if we are doing online quantization, patch the weight - # loaded to call `process_weights_after_loading` in a streaming fashion - # as soon as the last weight chunk is loaded - if not self.quant_config.is_checkpoint_fp8_serialized: - weight_loader = extra_weight_attrs["weight_loader"] - # create a new holder to prevent modifying behavior of any other - # objects which might depend on the old one - new_extra_weight_attrs = extra_weight_attrs - - def patched_weight_loader(param, loaded_weight, *args, **kwargs): - # load the current weight chunk - res = weight_loader(param, loaded_weight, *args, **kwargs) # type: ignore[misc] - - # add a counter to track how many elements we have updated - if not hasattr(layer, "_loaded_numel"): - layer._loaded_numel = 0 - layer._loaded_numel += loaded_weight.numel() - - # if we have loaded all of the elements, call - # process_weights_after_loading - target_loaded_numel = layer.w13_weight.numel() + layer.w2_weight.numel() - if layer._loaded_numel == target_loaded_numel: - self.process_weights_after_loading(layer) - - # Delete the bookkeeping - del layer._loaded_numel - # Prevent the usual `process_weights_after_loading` call - # from doing anything - layer._already_called_process_weights_after_loading = True - - return res - - new_extra_weight_attrs["weight_loader"] = patched_weight_loader - extra_weight_attrs = new_extra_weight_attrs - # WEIGHTS w13_weight = torch.nn.Parameter( torch.empty( @@ -875,21 +844,11 @@ def patched_weight_loader(param, loaded_weight, *args, **kwargs): if self.block_quant else {"quant_method": FusedMoeWeightScaleSupported.TENSOR.value} ) - # If loading fp8 checkpoint, pass the weight loaders. - # If loading an fp16 checkpoint, do not (we will quantize in - # process_weights_after_loading() - if self.quant_config.is_checkpoint_fp8_serialized: - set_weight_attrs(w13_weight_scale, extra_weight_attrs) - set_weight_attrs(w2_weight_scale, extra_weight_attrs) + set_weight_attrs(w13_weight_scale, extra_weight_attrs) + set_weight_attrs(w2_weight_scale, extra_weight_attrs) # INPUT_SCALES if self.quant_config.activation_scheme == "static": - if not self.quant_config.is_checkpoint_fp8_serialized: - raise ValueError( - "Found static activation scheme for checkpoint that " - "was not serialized fp8." - ) - w13_input_scale = torch.nn.Parameter( torch.ones(num_experts, dtype=torch.float32), requires_grad=False ) @@ -986,45 +945,6 @@ def process_weights_after_loading(self, layer: Module) -> None: layer.w2_weight_scale_inv = Parameter( dg_w2_weight_scale_inv, requires_grad=False ) - - # If checkpoint is fp16, quantize in place. - elif not self.quant_config.is_checkpoint_fp8_serialized: - fp8_dtype = current_platform.fp8_dtype() - w13_weight = torch.empty_like(layer.w13_weight.data, dtype=fp8_dtype) - w2_weight = torch.empty_like(layer.w2_weight.data, dtype=fp8_dtype) - - # Re-initialize w13_scale because we directly quantize - # merged w13 weights and generate a single scaling factor. - replace_parameter( - layer, - "w13_weight_scale", - torch.ones( - layer.local_num_experts, - dtype=torch.float32, - device=w13_weight.device, - ), - ) - for expert in range(layer.local_num_experts): - w13_weight[expert, :, :], layer.w13_weight_scale[expert] = ( - ops.scaled_fp8_quant(layer.w13_weight.data[expert, :, :]) - ) - w2_weight[expert, :, :], layer.w2_weight_scale[expert] = ( - ops.scaled_fp8_quant(layer.w2_weight.data[expert, :, :]) - ) - replace_parameter(layer, "w13_weight", w13_weight) - replace_parameter(layer, "w2_weight", w2_weight) - - if self.rocm_aiter_moe_enabled: - # reshaping weights is required for aiter moe kernel. - shuffled_w13, shuffled_w2 = rocm_aiter_ops.shuffle_weights( - layer.w13_weight, layer.w2_weight - ) - - replace_parameter(layer, "w13_weight", shuffled_w13) - replace_parameter(layer, "w2_weight", shuffled_w2) - # If checkpoint is fp8, we need to handle that the - # MoE kernels require single activation scale and single weight - # scale for w13 per expert. else: # Fp8 moe kernels require a single activation scale. # We take the max of all the scales in case they differ. @@ -1387,6 +1307,151 @@ def apply( return result +class Fp8OnlineMoEMethod(Fp8MoEMethod): + """MoE method for online FP8 quantization. + Supports loading quantized FP16/BF16 model checkpoints with dynamic + activation scaling. The weight scaling factor will be initialized after + the model weights are loaded. + + Args: + quant_config: The quantization config. + """ + + def __init__(self, quant_config: Fp8Config, layer: torch.nn.Module): + super().__init__(quant_config, layer) + assert not quant_config.is_checkpoint_fp8_serialized + assert quant_config.activation_scheme == "dynamic" + assert quant_config.weight_block_size is None + assert self.flashinfer_moe_backend is None + + def create_weights( + self, + layer: Module, + num_experts: int, + hidden_size: int, + intermediate_size_per_partition: int, + params_dtype: torch.dtype, + **extra_weight_attrs, + ): + layer.intermediate_size_per_partition = intermediate_size_per_partition + layer.hidden_size = hidden_size + layer.num_experts = num_experts + layer.orig_dtype = params_dtype + layer.weight_block_size = None + + # We are doing online quantization, patch the weight loaded + # to call `process_weights_after_loading` in a streaming fashion + # as soon as the last weight chunk is loaded. + weight_loader = extra_weight_attrs["weight_loader"] + # create a new holder to prevent modifying behavior of any other + # objects which might depend on the old one + new_extra_weight_attrs = extra_weight_attrs + + def patched_weight_loader(param, loaded_weight, *args, **kwargs): + # load the current weight chunk + res = weight_loader(param, loaded_weight, *args, **kwargs) # type: ignore[misc] + + # add a counter to track how many elements we have updated + if not hasattr(layer, "_loaded_numel"): + layer._loaded_numel = 0 + layer._loaded_numel += loaded_weight.numel() + + # if we have loaded all of the elements, call + # process_weights_after_loading + target_loaded_numel = layer.w13_weight.numel() + layer.w2_weight.numel() + if layer._loaded_numel == target_loaded_numel: + self.process_weights_after_loading(layer) + + # Delete the bookkeeping + del layer._loaded_numel + # Prevent the usual `process_weights_after_loading` call + # from doing anything + layer._already_called_process_weights_after_loading = True + + return res + + new_extra_weight_attrs["weight_loader"] = patched_weight_loader + extra_weight_attrs = new_extra_weight_attrs + + # WEIGHTS + w13_weight = torch.nn.Parameter( + torch.empty( + num_experts, + 2 * intermediate_size_per_partition, + hidden_size, + dtype=params_dtype, + ), + requires_grad=False, + ) + layer.register_parameter("w13_weight", w13_weight) + set_weight_attrs(w13_weight, extra_weight_attrs) + + w2_weight = torch.nn.Parameter( + torch.empty( + num_experts, + hidden_size, + intermediate_size_per_partition, + dtype=params_dtype, + ), + requires_grad=False, + ) + layer.register_parameter("w2_weight", w2_weight) + set_weight_attrs(w2_weight, extra_weight_attrs) + + # WEIGHT_SCALES + # Allocate 2 scales for w1 and w3 respectively. + # They will be combined to a single scale after weight loading. + w13_weight_scale = torch.nn.Parameter( + torch.ones(num_experts, dtype=torch.float32), requires_grad=False + ) + w2_weight_scale = torch.nn.Parameter( + torch.ones(num_experts, dtype=torch.float32), requires_grad=False + ) + layer.register_parameter("w13_weight_scale", w13_weight_scale) + layer.register_parameter("w2_weight_scale", w2_weight_scale) + + layer.w13_input_scale = None + layer.w2_input_scale = None + + self.rocm_aiter_moe_enabled = False + + def process_weights_after_loading(self, layer: Module) -> None: + if getattr(layer, "_already_called_process_weights_after_loading", False): + return + + # Lazy import to avoid importing triton too early. + self.rocm_aiter_moe_enabled = rocm_aiter_ops.is_fused_moe_enabled() + + # If checkpoint is fp16, quantize in place. + fp8_dtype = current_platform.fp8_dtype() + w13_weight = torch.empty_like(layer.w13_weight.data, dtype=fp8_dtype) + w2_weight = torch.empty_like(layer.w2_weight.data, dtype=fp8_dtype) + + for expert in range(layer.local_num_experts): + w13_weight[expert, :, :], layer.w13_weight_scale[expert] = ( + ops.scaled_fp8_quant(layer.w13_weight.data[expert, :, :]) + ) + w2_weight[expert, :, :], layer.w2_weight_scale[expert] = ( + ops.scaled_fp8_quant(layer.w2_weight.data[expert, :, :]) + ) + replace_parameter(layer, "w13_weight", w13_weight) + replace_parameter(layer, "w2_weight", w2_weight) + + # Reshuffle weights for AITER if needed. + if self.rocm_aiter_moe_enabled: + shuffled_w13, shuffled_w2 = rocm_aiter_ops.shuffle_weights( + layer.w13_weight, layer.w2_weight + ) + replace_parameter(layer, "w13_weight", shuffled_w13) + replace_parameter(layer, "w2_weight", shuffled_w2) + + # Rushuffle weights for MARLIN if needed. + if self.use_marlin: + prepare_moe_fp8_layer_for_marlin( + layer, False, input_dtype=self.marlin_input_dtype + ) + + class Fp8KVCacheMethod(BaseKVCacheMethod): """ Supports loading kv-cache scaling factors from FP8 checkpoints. diff --git a/vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py b/vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py index bd1d39971530..20d050d387d4 100644 --- a/vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +++ b/vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py @@ -62,7 +62,7 @@ def choose_scaled_mm_linear_kernel( continue # If the current platform uses compute_capability, - # make sure the kernel supports the compute cability. + # make sure the kernel supports the compute capability. is_supported, reason = kernel.is_supported(compute_capability) if not is_supported: failure_reasons.append(f"{kernel.__name__}: {reason}") diff --git a/vllm/model_executor/layers/quantization/modelopt.py b/vllm/model_executor/layers/quantization/modelopt.py index a3a8ec738dae..030d85080a34 100644 --- a/vllm/model_executor/layers/quantization/modelopt.py +++ b/vllm/model_executor/layers/quantization/modelopt.py @@ -188,7 +188,24 @@ def get_quant_method( def apply_vllm_mapper(self, hf_to_vllm_mapper: "WeightsMapper"): if len(self.exclude_modules) > 0: - self.exclude_modules = hf_to_vllm_mapper.apply_list(self.exclude_modules) + # This is a workaround for the weights remapping issue: + # https://github.com/vllm-project/vllm/issues/28072 + # Right now, the Nvidia ModelOpt library use just one wildcard pattern: + # module_path* + # It gets applied if the whole tree of modules rooted at module_path + # is not quantized. Here we replace such pattern by 2 patterns that are + # collectively equivalent to the original pattern: + # module_path + # module_path.* + new_exclude_modules = [] + for exclude in self.exclude_modules: + if len(exclude) >= 2 and exclude[-1] == "*" and exclude[-2] != ".": + new_exclude_modules.append(exclude[:-1]) + new_exclude_modules.append(exclude[:-1] + ".*") + else: + new_exclude_modules.append(exclude) + + self.exclude_modules = hf_to_vllm_mapper.apply_list(new_exclude_modules) @staticmethod def get_config_filenames() -> list[str]: diff --git a/vllm/model_executor/layers/quantization/moe_wna16.py b/vllm/model_executor/layers/quantization/moe_wna16.py index 0131a330f70d..4bedb951a33f 100644 --- a/vllm/model_executor/layers/quantization/moe_wna16.py +++ b/vllm/model_executor/layers/quantization/moe_wna16.py @@ -17,6 +17,9 @@ FusedMoEMethodBase, FusedMoeWeightScaleSupported, ) +from vllm.model_executor.layers.fused_moe.unquantized_fused_moe_method import ( + UnquantizedFusedMoEMethod, +) from vllm.model_executor.layers.linear import LinearBase, UnquantizedLinearMethod from vllm.model_executor.layers.quantization import QuantizationMethods from vllm.model_executor.layers.quantization.base_config import ( @@ -162,6 +165,8 @@ def get_quant_method( self, layer: torch.nn.Module, prefix: str ) -> Optional["QuantizeMethodBase"]: if is_layer_skipped_quant(prefix, self.modules_to_not_convert): + if isinstance(layer, FusedMoE): + return UnquantizedFusedMoEMethod(layer.moe_config) return UnquantizedLinearMethod() elif isinstance(layer, LinearBase): # Avoid circular import diff --git a/vllm/model_executor/layers/quantization/utils/flashinfer_utils.py b/vllm/model_executor/layers/quantization/utils/flashinfer_utils.py index 09d0fe6a2f3a..3d6e9cda8766 100644 --- a/vllm/model_executor/layers/quantization/utils/flashinfer_utils.py +++ b/vllm/model_executor/layers/quantization/utils/flashinfer_utils.py @@ -247,11 +247,6 @@ def flashinfer_cutlass_moe_fp8( assert quant_config is not None # Construct modular kernel with block-scale support when requested. - parallel_config = getattr( - getattr(layer, "vllm_config", None), - "parallel_config", - None, - ) fused_experts = mk.FusedMoEModularKernel( build_flashinfer_fp8_cutlass_moe_prepare_finalize( moe=moe, use_deepseek_fp8_block_scale=use_deepseek_fp8_block_scale @@ -262,7 +257,7 @@ def flashinfer_cutlass_moe_fp8( out_dtype=hidden_states.dtype, use_deepseek_fp8_block_scale=use_deepseek_fp8_block_scale, ), - parallel_config=parallel_config, + moe_parallel_config=layer.moe_parallel_config, ) return fused_experts( diff --git a/vllm/model_executor/layers/rotary_embedding/base.py b/vllm/model_executor/layers/rotary_embedding/base.py index 4114b21168cc..afa69324c4e2 100644 --- a/vllm/model_executor/layers/rotary_embedding/base.py +++ b/vllm/model_executor/layers/rotary_embedding/base.py @@ -7,7 +7,7 @@ from vllm._aiter_ops import rocm_aiter_ops from vllm.model_executor.custom_op import CustomOp -from .common import apply_rotary_emb_torch +from .common import ApplyRotaryEmb @CustomOp.register("rotary_embedding") @@ -49,6 +49,10 @@ def __init__( rocm_aiter_ops.is_triton_rotary_embed_enabled() ) + self.apply_rotary_emb = ApplyRotaryEmb( + is_neox_style=self.is_neox_style, + ) + def _compute_inv_freq(self, base: float) -> torch.Tensor: """Compute the inverse frequency.""" # NOTE(woosuk): To exactly match the HF implementation, we need to @@ -123,7 +127,12 @@ def forward_static( query = query.view(num_tokens, -1, head_size) query_rot = query[..., :rotary_dim] query_pass = query[..., rotary_dim:] - query_rot = apply_rotary_emb_torch(query_rot, cos, sin, is_neox_style) + query_rot = ApplyRotaryEmb.forward_static( + query_rot, + cos, + sin, + is_neox_style, + ) query = torch.cat((query_rot, query_pass), dim=-1).reshape(query_shape) # key may be None in some cases, e.g. cross-layer KV sharing @@ -132,7 +141,12 @@ def forward_static( key = key.view(num_tokens, -1, head_size) key_rot = key[..., :rotary_dim] key_pass = key[..., rotary_dim:] - key_rot = apply_rotary_emb_torch(key_rot, cos, sin, is_neox_style) + key_rot = ApplyRotaryEmb.forward_static( + key_rot, + cos, + sin, + is_neox_style, + ) key = torch.cat((key_rot, key_pass), dim=-1).reshape(key_shape) return query, key diff --git a/vllm/model_executor/layers/rotary_embedding/common.py b/vllm/model_executor/layers/rotary_embedding/common.py index 13f8d15cc0f7..3e6584dbc3da 100644 --- a/vllm/model_executor/layers/rotary_embedding/common.py +++ b/vllm/model_executor/layers/rotary_embedding/common.py @@ -2,19 +2,14 @@ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import math -from collections.abc import Callable -from functools import cache from importlib.util import find_spec import torch from vllm.logger import init_logger -from vllm.platforms import current_platform +from vllm.model_executor.custom_op import CustomOp from vllm.utils.torch_utils import direct_register_custom_op -if current_platform.is_cuda(): - from vllm.vllm_flash_attn.layers.rotary import apply_rotary_emb - logger = init_logger(__name__) @@ -32,71 +27,6 @@ def rotate_gptj(x: torch.Tensor) -> torch.Tensor: return x.flatten(-2) -def apply_rotary_emb_torch( - x: torch.Tensor, - cos: torch.Tensor, - sin: torch.Tensor, - is_neox_style: bool, -) -> torch.Tensor: - cos = cos.unsqueeze(-2).to(x.dtype) - sin = sin.unsqueeze(-2).to(x.dtype) - if is_neox_style: - x1, x2 = torch.chunk(x, 2, dim=-1) - else: - x1 = x[..., ::2] - x2 = x[..., 1::2] - o1 = x1 * cos - x2 * sin - o2 = x2 * cos + x1 * sin - if is_neox_style: - return torch.cat((o1, o2), dim=-1) - else: - return torch.stack((o1, o2), dim=-1).flatten(-2) - - -def apply_rotary_emb_dispatch( - x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, is_neox_style: bool -) -> torch.Tensor: - """ - Args: - x: [num_tokens, num_heads, head_size] - cos: [num_tokens, head_size // 2] - sin: [num_tokens, head_size // 2] - is_neox_style: Whether to use the Neox-style or GPT-J-style rotary - positional embeddings. - """ - if current_platform.is_cuda(): - return apply_rotary_emb(x.unsqueeze(0), cos, sin, not is_neox_style).squeeze(0) - else: - return apply_rotary_emb_torch(x, cos, sin, is_neox_style) - - -@cache -def dispatch_rotary_emb_function( - default: Callable[..., torch.Tensor] | None = None, -) -> Callable[..., torch.Tensor]: - if current_platform.is_cuda(): - return apply_rotary_emb - - # if torch compile is not enabled - # use rotary embedding function from flash_attn package - # otherwise use the naive pytorch embedding implementation - # is faster when torch compile is enabled. - if current_platform.is_rocm() and not torch.compiler.is_compiling(): - if find_spec("flash_attn") is not None: - from flash_attn.ops.triton.rotary import apply_rotary - - return apply_rotary - else: - logger.warning( - "flash_attn is not installed. Falling back to PyTorch " - "implementation for rotary embeddings." - ) - if default is not None: - return default - - return apply_rotary_emb_torch - - # yarn functions # Inverse dim formula to find dim based on number of rotations def yarn_find_correction_dim( @@ -186,3 +116,155 @@ def _flashinfer_rotary_embedding_fake( mutates_args=["query", "key"], # These tensors are modified in-place fake_impl=_flashinfer_rotary_embedding_fake, ) + + +@CustomOp.register("apply_rotary_emb") +class ApplyRotaryEmb(CustomOp): + def __init__( + self, + enforce_enable: bool = False, + is_neox_style: bool = True, + enable_fp32_compute: bool = False, + ) -> None: + super().__init__(enforce_enable) + self.is_neox_style = is_neox_style + self.enable_fp32_compute = enable_fp32_compute + + self.apply_rotary_emb_flash_attn = None + if find_spec("flash_attn") is not None: + from flash_attn.ops.triton.rotary import apply_rotary + + self.apply_rotary_emb_flash_attn = apply_rotary + + @staticmethod + def forward_static( + x: torch.Tensor, + cos: torch.Tensor, + sin: torch.Tensor, + is_neox_style: bool = True, + enable_fp32_compute: bool = False, + ) -> torch.Tensor: + """ + Args: + x: [batch_size (optional), seq_len, num_heads, head_size] + cos: [seq_len, head_size // 2] + sin: [seq_len, head_size // 2] + is_neox_style: Whether to use the Neox-style or GPT-J-style. + enable_fp32_compute: Temporarily convert x, cos, sin to FP32 dtype + for higher accuracy. + """ + origin_dtype = x.dtype + if enable_fp32_compute: + x = x.float() + + cos = cos.unsqueeze(-2).to(x.dtype) + sin = sin.unsqueeze(-2).to(x.dtype) + + if is_neox_style: + x1, x2 = torch.chunk(x, 2, dim=-1) + else: + x1 = x[..., ::2] + x2 = x[..., 1::2] + + o1 = x1 * cos - x2 * sin + o2 = x2 * cos + x1 * sin + + if is_neox_style: + output = torch.cat((o1, o2), dim=-1) + else: + output = torch.stack((o1, o2), dim=-1).flatten(-2) + + if enable_fp32_compute: + output = output.to(origin_dtype) + return output + + def forward_native( + self, + x: torch.Tensor, + cos: torch.Tensor, + sin: torch.Tensor, + ) -> torch.Tensor: + output = self.forward_static( + x, cos, sin, self.is_neox_style, self.enable_fp32_compute + ) + return output + + def forward_cuda( + self, + x: torch.Tensor, + cos: torch.Tensor, + sin: torch.Tensor, + ) -> torch.Tensor: + from vllm.vllm_flash_attn.layers.rotary import apply_rotary_emb + + origin_dtype = x.dtype + if self.enable_fp32_compute: + x = x.float() + cos = cos.float() + sin = sin.float() + + origin_shape = x.shape + if len(origin_shape) == 3: + # x: [seq_len, num_heads, head_size] + x = x.unsqueeze(0) + + """ + Arguments of apply_rotary_emb() in vllm_flash_attn: + x: [batch_size, seq_len, nheads, headdim] + cos, sin: [seqlen_rotary, rotary_dim / 2] + interleaved: defalut as False (Neox-style). + ... + """ + interleaved = not self.is_neox_style + output = apply_rotary_emb(x, cos, sin, interleaved) + + if len(origin_shape) == 3: + output = output.squeeze(0) + if self.enable_fp32_compute: + output = output.to(origin_dtype) + return output + + def forward_hip( + self, + x: torch.Tensor, + cos: torch.Tensor, + sin: torch.Tensor, + ) -> torch.Tensor: + if self.apply_rotary_emb_flash_attn is not None: + origin_dtype = x.dtype + if self.enable_fp32_compute: + x = x.float() + cos = cos.float() + sin = sin.float() + + origin_shape = x.shape + if len(origin_shape) == 3: + # x: [seq_len, num_heads, head_size] + x = x.unsqueeze(0) + + """ + Arguments of apply_rotary() in flash_attn: + x: [batch_size, seq_len, nheads, headdim] + cos, sin: [seqlen_rotary, rotary_dim / 2] + interleaved: defalut as False (Neox-style). + ... + """ + interleaved = not self.is_neox_style + output = self.apply_rotary_emb_flash_attn( + x, cos, sin, interleaved=interleaved + ).type_as(x) + + if len(origin_shape) == 3: + output = output.squeeze(0) + if self.enable_fp32_compute: + output = output.to(origin_dtype) + else: + # Falling back to PyTorch native implementation. + output = self.forward_native(x, cos, sin) + + return output + + def extra_repr(self) -> str: + s = f"is_neox_style={self.is_neox_style}" + s += f"enable_fp32_compute={self.enable_fp32_compute}" + return s diff --git a/vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py b/vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py index 749cdbe88a62..2eda63a34ac4 100644 --- a/vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py +++ b/vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py @@ -4,7 +4,6 @@ import torch -from .common import apply_rotary_emb_dispatch from .mrope import MRotaryEmbedding @@ -55,14 +54,22 @@ def forward_native( # type: ignore[override] query = query.view(num_tokens, -1, self.head_size) query_rot = query[..., : self.rotary_dim] query_pass = query[..., self.rotary_dim :] - query_rot = apply_rotary_emb_dispatch(query_rot, cos, sin, self.is_neox_style) + query_rot = self.apply_rotary_emb.forward_native( + query_rot, + cos, + sin, + ) query = torch.cat((query_rot, query_pass), dim=-1).reshape(query_shape) key_shape = key.shape key = key.view(num_tokens, -1, self.head_size) key_rot = key[..., : self.rotary_dim] key_pass = key[..., self.rotary_dim :] - key_rot = apply_rotary_emb_dispatch(key_rot, cos, sin, self.is_neox_style) + key_rot = self.apply_rotary_emb.forward_native( + key_rot, + cos, + sin, + ) key = torch.cat((key_rot, key_pass), dim=-1).reshape(key_shape) return query, key diff --git a/vllm/model_executor/layers/rotary_embedding/mrope.py b/vllm/model_executor/layers/rotary_embedding/mrope.py index 0592aa8f967a..a74bf092b182 100644 --- a/vllm/model_executor/layers/rotary_embedding/mrope.py +++ b/vllm/model_executor/layers/rotary_embedding/mrope.py @@ -8,7 +8,6 @@ from vllm.triton_utils import tl, triton from .base import RotaryEmbeddingBase -from .common import apply_rotary_emb_dispatch from .yarn_scaling_rope import YaRNScalingRotaryEmbedding, yarn_get_mscale @@ -301,14 +300,22 @@ def forward_native( query = query.view(num_tokens, -1, self.head_size) query_rot = query[..., : self.rotary_dim] query_pass = query[..., self.rotary_dim :] - query_rot = apply_rotary_emb_dispatch(query_rot, cos, sin, self.is_neox_style) + query_rot = self.apply_rotary_emb.forward_native( + query_rot, + cos, + sin, + ) query = torch.cat((query_rot, query_pass), dim=-1).reshape(query_shape) key_shape = key.shape key = key.view(num_tokens, -1, self.head_size) key_rot = key[..., : self.rotary_dim] key_pass = key[..., self.rotary_dim :] - key_rot = apply_rotary_emb_dispatch(key_rot, cos, sin, self.is_neox_style) + key_rot = self.apply_rotary_emb.forward_native( + key_rot, + cos, + sin, + ) key = torch.cat((key_rot, key_pass), dim=-1).reshape(key_shape) return query, key @@ -347,13 +354,21 @@ def forward_cuda( query = query.view(num_tokens, -1, self.head_size) query_rot = query[..., : self.rotary_dim] query_pass = query[..., self.rotary_dim :] - query_rot = apply_rotary_emb_dispatch(query_rot, cos, sin, self.is_neox_style) + query_rot = self.apply_rotary_emb( + query_rot, + cos, + sin, + ) query = torch.cat((query_rot, query_pass), dim=-1).reshape(query_shape) key = key.view(num_tokens, -1, self.head_size) key_rot = key[..., : self.rotary_dim] key_pass = key[..., self.rotary_dim :] - key_rot = apply_rotary_emb_dispatch(key_rot, cos, sin, self.is_neox_style) + key_rot = self.apply_rotary_emb( + key_rot, + cos, + sin, + ) key = torch.cat((key_rot, key_pass), dim=-1).reshape(key_shape) return query, key diff --git a/vllm/model_executor/layers/rotary_embedding/xdrope.py b/vllm/model_executor/layers/rotary_embedding/xdrope.py index 2432273faf19..dab7aad9759a 100644 --- a/vllm/model_executor/layers/rotary_embedding/xdrope.py +++ b/vllm/model_executor/layers/rotary_embedding/xdrope.py @@ -4,7 +4,6 @@ import numpy as np import torch -from .common import apply_rotary_emb_dispatch from .dynamic_ntk_alpha_rope import DynamicNTKAlphaRotaryEmbedding @@ -36,7 +35,7 @@ def __init__( dtype, ) - def forward( + def forward_native( self, positions: torch.Tensor, query: torch.Tensor, @@ -68,14 +67,73 @@ def forward( query = query.view(num_tokens, -1, self.head_size) query_rot = query[..., : self.rotary_dim] query_pass = query[..., self.rotary_dim :] - query_rot = apply_rotary_emb_dispatch(query_rot, cos, sin, self.is_neox_style) + query_rot = self.apply_rotary_emb.forward_native( + query_rot, + cos, + sin, + ) + query = torch.cat((query_rot, query_pass), dim=-1).reshape(query_shape) + + key_shape = key.shape + key = key.view(num_tokens, -1, self.head_size) + key_rot = key[..., : self.rotary_dim] + key_pass = key[..., self.rotary_dim :] + key_rot = self.apply_rotary_emb.forward_native( + key_rot, + cos, + sin, + ) + key = torch.cat((key_rot, key_pass), dim=-1).reshape(key_shape) + return query, key + + def forward_cuda( + self, + positions: torch.Tensor, + query: torch.Tensor, + key: torch.Tensor | None = None, + offsets: torch.Tensor | None = None, + ) -> tuple[torch.Tensor, torch.Tensor | None]: + """PyTorch-native implementation equivalent to forward(). + + Args: + positions: + [4, num_tokens] (P/W/H/T positions with multimodal inputs) + query: [num_tokens, num_heads * head_size] + key: [num_tokens, num_kv_heads * head_size] + """ + assert positions.ndim == 2 + assert key is not None + + num_tokens = positions.shape[-1] + cos_sin = self.cos_sin_cache[positions] + cos, sin = cos_sin.chunk(2, dim=-1) + cos = torch.cat( + [m[i] for i, m in enumerate(cos.split(self.xdrope_section, dim=-1))], dim=-1 + ) + sin = torch.cat( + [m[i] for i, m in enumerate(sin.split(self.xdrope_section, dim=-1))], dim=-1 + ) + + query_shape = query.shape + query = query.view(num_tokens, -1, self.head_size) + query_rot = query[..., : self.rotary_dim] + query_pass = query[..., self.rotary_dim :] + query_rot = self.apply_rotary_emb( + query_rot, + cos, + sin, + ) query = torch.cat((query_rot, query_pass), dim=-1).reshape(query_shape) key_shape = key.shape key = key.view(num_tokens, -1, self.head_size) key_rot = key[..., : self.rotary_dim] key_pass = key[..., self.rotary_dim :] - key_rot = apply_rotary_emb_dispatch(key_rot, cos, sin, self.is_neox_style) + key_rot = self.apply_rotary_emb( + key_rot, + cos, + sin, + ) key = torch.cat((key_rot, key_pass), dim=-1).reshape(key_shape) return query, key diff --git a/vllm/model_executor/models/adapters.py b/vllm/model_executor/models/adapters.py index 9ba76f312eda..504de9fe1087 100644 --- a/vllm/model_executor/models/adapters.py +++ b/vllm/model_executor/models/adapters.py @@ -337,6 +337,18 @@ def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]): tokens = getattr(text_config, "classifier_from_token", None) method = getattr(text_config, "method", None) + def auto_set_score_bias(weights): + for name, weight in weights: + if name == "score.bias": + device = self.score.weight.device + dtype = self.score.weight.dtype + bias = weight.to(device).to(dtype) + self.score.bias = torch.nn.Parameter(bias) + self.score.skip_bias_add = False + else: + yield name, weight + + weights = auto_set_score_bias(weights) if tokens is None and method is None: return super().load_weights(weights) else: diff --git a/vllm/model_executor/models/audioflamingo3.py b/vllm/model_executor/models/audioflamingo3.py new file mode 100644 index 000000000000..0ca5f2c4e0a7 --- /dev/null +++ b/vllm/model_executor/models/audioflamingo3.py @@ -0,0 +1,639 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project + +# Copyright 2025 The vLLM team. +# Copyright 2025 NVIDIA CORPORATION and the HuggingFace Inc. team. All rights +# reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections.abc import Iterable, Mapping, Sequence +from typing import Annotated, Any, Literal, TypeAlias + +import torch +import torch.nn as nn +from transformers import BatchFeature, PretrainedConfig +from transformers.models.audioflamingo3 import ( + AudioFlamingo3Config, + AudioFlamingo3Processor, +) +from transformers.models.qwen2_audio import Qwen2AudioEncoder + +from vllm.config import VllmConfig +from vllm.config.multimodal import BaseDummyOptions +from vllm.model_executor.layers.activation import get_act_fn +from vllm.model_executor.models.module_mapping import MultiModelKeys +from vllm.multimodal import MULTIMODAL_REGISTRY +from vllm.multimodal.inputs import ( + MultiModalDataDict, + MultiModalFieldConfig, + MultiModalKwargsItems, +) +from vllm.multimodal.parse import ( + DictEmbeddingItems, + ModalityData, + ModalityDataItems, + MultiModalDataItems, + MultiModalDataParser, +) +from vllm.multimodal.processing import ( + BaseMultiModalProcessor, + BaseProcessingInfo, + PromptReplacement, + PromptUpdate, + PromptUpdateDetails, +) +from vllm.multimodal.profiling import BaseDummyInputsBuilder +from vllm.sequence import IntermediateTensors +from vllm.utils.tensor_schema import TensorSchema, TensorShape + +from .interfaces import ( + MultiModalEmbeddings, + SupportsLoRA, + SupportsMultiModal, + SupportsPP, +) +from .utils import ( + AutoWeightsLoader, + init_vllm_registered_model, + maybe_prefix, +) + +MAX_AUDIO_LEN = 10 * 60 + + +# === Audio Inputs === # +class AudioFlamingo3FeatureInputs(TensorSchema): + """ + Dimensions: + - num_chunks: Number of audio chunks (flattened) + - nmb: Number of mel bins + - num_audios: Number of original audio files + """ + + type: Literal["audio_features"] + input_features: Annotated[ + torch.Tensor | list[torch.Tensor], + TensorShape("num_chunks", "nmb", 3000), + ] + + feature_attention_mask: Annotated[ + torch.Tensor, + TensorShape("num_chunks", 3000), + ] + + chunk_counts: Annotated[ + torch.Tensor, + TensorShape("num_audios"), + ] + + +class AudioFlamingo3EmbeddingInputs(TensorSchema): + """ + Dimensions: + - bn: Batch size + - naf: Number of audio features + - hs: Hidden size (must match the hidden size of language model + backbone) + """ + + type: Literal["audio_embeds"] = "audio_embeds" + + audio_embeds: Annotated[ + list[torch.Tensor], + TensorShape("bn", "naf", "hs"), + ] + + +AudioFlamingo3Inputs: TypeAlias = ( + AudioFlamingo3FeatureInputs | AudioFlamingo3EmbeddingInputs +) + + +class AudioFlamingo3Encoder(Qwen2AudioEncoder): + def __init__( + self, + config: PretrainedConfig, + ): + super().__init__(config) + self.avg_pooler = nn.AvgPool1d(kernel_size=2, stride=2) + # self.layer_norm is already initialized in super().__init__ + + def forward( + self, + input_features: torch.Tensor | list[torch.Tensor], + attention_mask: torch.Tensor = None, + ): + # input_features: (batch, num_mel_bins, seq_len) + if isinstance(input_features, list): + input_features = torch.stack(input_features) + + hidden_states = nn.functional.gelu(self.conv1(input_features)) + hidden_states = nn.functional.gelu(self.conv2(hidden_states)) + hidden_states = hidden_states.transpose(-1, -2) + hidden_states = ( + hidden_states + self.embed_positions.weight[: hidden_states.size(-2), :] + ).to(hidden_states.dtype) + + for layer in self.layers: + layer_outputs = layer(hidden_states, attention_mask) + hidden_states = layer_outputs[0] + + # AvgPool (time/2) + LayerNorm + # hidden_states: (batch, seq_len, hidden_size) + hidden_states = hidden_states.permute(0, 2, 1) # (batch, hidden_size, seq_len) + hidden_states = self.avg_pooler(hidden_states) + hidden_states = hidden_states.permute( + 0, 2, 1 + ) # (batch, seq_len/2, hidden_size) + hidden_states = self.layer_norm(hidden_states) + + return hidden_states + + def _get_feat_extract_output_lengths(self, input_lengths: torch.Tensor): + """ + Computes the output length of the convolutional layers and the output length + of the audio encoder + """ + input_lengths = (input_lengths - 1) // 2 + 1 + output_lengths = (input_lengths - 2) // 2 + 1 + return input_lengths, output_lengths + + +class AudioFlamingo3MultiModalProjector(nn.Module): + def __init__(self, config: PretrainedConfig): + super().__init__() + self.linear_1 = nn.Linear( + config.audio_config.hidden_size, + config.text_config.hidden_size, + bias=config.projector_bias, + ) + self.act = get_act_fn(config.projector_hidden_act) + self.linear_2 = nn.Linear( + config.text_config.hidden_size, + config.text_config.hidden_size, + bias=config.projector_bias, + ) + + def forward(self, audio_features): + hidden_states = self.linear_1(audio_features) + hidden_states = self.act(hidden_states) + hidden_states = self.linear_2(hidden_states) + return hidden_states + + +class AudioFlamingo3ProcessingInfo(BaseProcessingInfo): + def get_hf_config(self): + return self.ctx.get_hf_config(AudioFlamingo3Config) + + def get_hf_processor(self, **kwargs: object): + return self.ctx.get_hf_processor(AudioFlamingo3Processor, **kwargs) + + def get_feature_extractor(self, **kwargs: object): + hf_processor = self.get_hf_processor(**kwargs) + feature_extractor = hf_processor.feature_extractor + return feature_extractor + + def get_supported_mm_limits(self) -> Mapping[str, int | None]: + return {"audio": None} + + +class AudioFlamingo3DummyInputsBuilder( + BaseDummyInputsBuilder[AudioFlamingo3ProcessingInfo] +): + def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str: + num_audios = mm_counts.get("audio", 0) + hf_processor = self.info.get_hf_processor() + audio_token = hf_processor.audio_token + return audio_token * num_audios + + def get_dummy_mm_data( + self, + seq_len: int, + mm_counts: Mapping[str, int], + mm_options: Mapping[str, BaseDummyOptions] | None = None, + ) -> MultiModalDataDict: + feature_extractor = self.info.get_feature_extractor() + sampling_rate = feature_extractor.sampling_rate + audio_len = MAX_AUDIO_LEN * sampling_rate + num_audios = mm_counts.get("audio", 0) + audio_overrides = mm_options.get("audio") if mm_options else None + + return { + "audio": self._get_dummy_audios( + length=audio_len, + num_audios=num_audios, + overrides=audio_overrides, + ) + } + + +def _audioflamingo3_field_config(hf_inputs: Mapping[str, torch.Tensor]): + chunk_counts = hf_inputs.get("chunk_counts") + if chunk_counts is not None: + return dict( + audio_embeds=MultiModalFieldConfig.batched("audio"), + input_features=MultiModalFieldConfig.flat_from_sizes( + "audio", chunk_counts, dim=0 + ), + feature_attention_mask=MultiModalFieldConfig.flat_from_sizes( + "audio", chunk_counts, dim=0 + ), + chunk_counts=MultiModalFieldConfig.batched("audio"), + ) + return dict( + audio_embeds=MultiModalFieldConfig.batched("audio"), + input_features=MultiModalFieldConfig.batched("audio"), + feature_attention_mask=MultiModalFieldConfig.batched("audio"), + chunk_counts=MultiModalFieldConfig.batched("audio"), + ) + + +class AudioFlamingo3MultiModalDataParser(MultiModalDataParser): + def _parse_audio_data( + self, + data: dict[str, torch.Tensor] | ModalityData[Any], + ) -> ModalityDataItems[Any, Any] | None: + if isinstance(data, dict): + return DictEmbeddingItems( + data, + modality="audio", + required_fields={"audio_embeds"}, + fields_factory=_audioflamingo3_field_config, + ) + return super()._parse_audio_data(data) + + +class AudioFlamingo3MultiModalProcessor( + BaseMultiModalProcessor[AudioFlamingo3ProcessingInfo] +): + def _get_data_parser(self) -> MultiModalDataParser: + feature_extractor = self.info.get_feature_extractor() + return AudioFlamingo3MultiModalDataParser( + target_sr=feature_extractor.sampling_rate + ) + + def _call_hf_processor( + self, + prompt: str, + mm_data: dict[str, object], + mm_kwargs: Mapping[str, Any], + tok_kwargs: Mapping[str, object], + ) -> BatchFeature: + audios = mm_data.pop("audios", []) + if audios: + mm_data["audio"] = audios + + if not mm_data.get("audio", []): + prompt_ids = self.info.get_tokenizer().encode(prompt) + prompt_ids = self._apply_hf_processor_tokens_only(prompt_ids) + return BatchFeature(dict(input_ids=[prompt_ids]), tensor_type="pt") + + feature_extractor = self.info.get_feature_extractor(**mm_kwargs) + mm_kwargs = dict( + **mm_kwargs, + sampling_rate=feature_extractor.sampling_rate, + ) + + # Calculate chunk counts + audio_list = mm_data.get("audio") + if not isinstance(audio_list, list): + audio_list = [audio_list] + + chunk_counts = [] + sampling_rate = feature_extractor.sampling_rate + chunk_length = feature_extractor.chunk_length + window_size = int(sampling_rate * chunk_length) + # MAX_AUDIO_LEN is 10 * 60 in HF processor. + max_windows = int(MAX_AUDIO_LEN // chunk_length) + + for audio in audio_list: + # audio is numpy array or list + n_samples = len(audio) if isinstance(audio, list) else audio.shape[0] + + n_win = max(1, (n_samples + window_size - 1) // window_size) + if n_win > max_windows: + n_win = max_windows + chunk_counts.append(n_win) + + outputs = super()._call_hf_processor( + prompt=prompt, + mm_data=mm_data, + mm_kwargs=mm_kwargs, + tok_kwargs=tok_kwargs, + ) + + if "input_features_mask" in outputs: + outputs["feature_attention_mask"] = outputs.pop("input_features_mask") + + outputs["chunk_counts"] = torch.tensor(chunk_counts, dtype=torch.long) + + return outputs + + def _get_mm_fields_config( + self, + hf_inputs: BatchFeature, + hf_processor_mm_kwargs: Mapping[str, object], + ) -> Mapping[str, MultiModalFieldConfig]: + return _audioflamingo3_field_config(hf_inputs) + + def _get_prompt_updates( + self, + mm_items: MultiModalDataItems, + hf_processor_mm_kwargs: Mapping[str, object], + out_mm_kwargs: MultiModalKwargsItems, + ) -> Sequence[PromptUpdate]: + processor = self.info.get_hf_processor(**hf_processor_mm_kwargs) + tokenizer = self.info.get_tokenizer() + vocab = tokenizer.get_vocab() + + audio_token = getattr(processor, "audio_token", "") + audio_token_id = vocab.get(audio_token) + if audio_token_id is None: + # Fallback if not found, though it should be there + audio_token_id = processor.audio_token_id + + out_mm_data = out_mm_kwargs.get_data() + feature_attention_mask = out_mm_data.get("feature_attention_mask") + chunk_counts = out_mm_data.get("chunk_counts") + + def get_replacement_audioflamingo3(item_idx: int): + if feature_attention_mask is not None: + if chunk_counts is not None: + counts = ( + chunk_counts.tolist() + if isinstance(chunk_counts, torch.Tensor) + else chunk_counts + ) + start_idx = sum(counts[:item_idx]) + count = counts[item_idx] + end_idx = start_idx + count + + if isinstance(feature_attention_mask, list): + mask_list = feature_attention_mask[start_idx:end_idx] + if len(mask_list) > 0 and isinstance( + mask_list[0], torch.Tensor + ): + mask = torch.stack(mask_list) + else: + mask = torch.tensor(mask_list) + else: + mask = feature_attention_mask[start_idx:end_idx] + else: + # feature_attention_mask is list[Tensor] or Tensor + if isinstance(feature_attention_mask, list): + mask = feature_attention_mask[item_idx] + else: + mask = feature_attention_mask[item_idx].unsqueeze(0) + + # mask shape: (num_chunks, 3000) + input_lengths = mask.sum(-1) + conv_lengths = (input_lengths - 1) // 2 + 1 + audio_output_lengths = (conv_lengths - 2) // 2 + 1 + num_features = audio_output_lengths.sum().item() + else: + audio_embeds = out_mm_data["audio_embeds"][item_idx] + num_features = audio_embeds.shape[0] + + if num_features == 0: + raise ValueError("Audio is too short") + + audio_tokens = [audio_token_id] * int(num_features) + return PromptUpdateDetails.select_token_id( + audio_tokens, + embed_token_id=audio_token_id, + ) + + return [ + PromptReplacement( + modality="audio", + target=audio_token, + replacement=get_replacement_audioflamingo3, + ) + ] + + +@MULTIMODAL_REGISTRY.register_processor( + AudioFlamingo3MultiModalProcessor, + info=AudioFlamingo3ProcessingInfo, + dummy_inputs=AudioFlamingo3DummyInputsBuilder, +) +class AudioFlamingo3ForConditionalGeneration( + nn.Module, SupportsMultiModal, SupportsPP, SupportsLoRA +): + """ + AudioFlamingo3 model for conditional generation. + + This model integrates a Whisper-based audio encoder with a Qwen2 language model. + It supports multi-chunk audio processing. + """ + + packed_modules_mapping = { + "qkv_proj": ["q_proj", "k_proj", "v_proj"], + "gate_up_proj": ["gate_proj", "up_proj"], + } + + def get_mm_mapping(self) -> MultiModelKeys: + """ + Get the module prefix in multimodal models + """ + return MultiModelKeys.from_string_field( + language_model="language_model.", + connector="multi_modal_projector.", + tower_model="audio_tower.", + ) + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + super().__init__() + config = vllm_config.model_config.hf_config + quant_config = vllm_config.quant_config + multimodal_config = vllm_config.model_config.multimodal_config + self.config = config + self.multimodal_config = multimodal_config + + self.audio_tower = AudioFlamingo3Encoder( + config.audio_config, + ) + self.multi_modal_projector = AudioFlamingo3MultiModalProjector(config) + + self.quant_config = quant_config + + self.language_model = init_vllm_registered_model( + vllm_config=vllm_config, + hf_config=config.text_config, + prefix=maybe_prefix(prefix, "language_model"), + architectures=["Qwen2ForCausalLM"], + ) + + self.make_empty_intermediate_tensors = ( + self.language_model.make_empty_intermediate_tensors + ) + + def _parse_and_validate_audio_input( + self, **kwargs: object + ) -> AudioFlamingo3Inputs | None: + input_features = kwargs.pop("input_features", None) + audio_embeds = kwargs.pop("audio_embeds", None) + feature_attention_mask = kwargs.pop("feature_attention_mask", None) + chunk_counts = kwargs.pop("chunk_counts", None) + + if input_features is None and audio_embeds is None: + return None + + if audio_embeds is not None: + return AudioFlamingo3EmbeddingInputs( + type="audio_embeds", audio_embeds=audio_embeds + ) + + if input_features is not None: + return AudioFlamingo3FeatureInputs( + type="audio_features", + input_features=input_features, + feature_attention_mask=feature_attention_mask, + chunk_counts=chunk_counts, + ) + + raise AssertionError("This line should be unreachable.") + + def _process_audio_input( + self, audio_input: AudioFlamingo3Inputs + ) -> torch.Tensor | tuple[torch.Tensor, ...]: + if audio_input["type"] == "audio_embeds": + audio_embeds = audio_input["audio_embeds"] + return tuple(audio_embeds) + + input_features = audio_input["input_features"] + feature_attention_mask = audio_input["feature_attention_mask"] + chunk_counts = audio_input.get("chunk_counts") + + if isinstance(input_features, list): + input_features = torch.cat(input_features, dim=0) + feature_attention_mask = torch.cat(feature_attention_mask, dim=0) + + if chunk_counts is None: + chunk_counts = [1] * input_features.shape[0] + elif isinstance(chunk_counts, torch.Tensor): + chunk_counts = chunk_counts.tolist() + elif ( + isinstance(chunk_counts, list) + and chunk_counts + and isinstance(chunk_counts[0], torch.Tensor) + ): + chunk_counts = [c.item() for c in chunk_counts] + + # Calculate output lengths + input_lengths = feature_attention_mask.sum(-1) + # Conv downsampling + conv_lengths = (input_lengths - 1) // 2 + 1 + # AvgPool downsampling + audio_output_lengths = (conv_lengths - 2) // 2 + 1 + + batch_size, _, max_mel_seq_len = input_features.shape + + # Calculate max_seq_len after convs (before pooling) for attention mask + max_seq_len = (max_mel_seq_len - 1) // 2 + 1 + + # Create a sequence tensor of shape (batch_size, max_seq_len) + seq_range = ( + torch.arange( + 0, + max_seq_len, + dtype=conv_lengths.dtype, + device=conv_lengths.device, + ) + .unsqueeze(0) + .expand(batch_size, max_seq_len) + ) + lengths_expand = conv_lengths.unsqueeze(-1).expand(batch_size, max_seq_len) + # Create mask + padding_mask = seq_range >= lengths_expand + + audio_attention_mask_ = padding_mask.view(batch_size, 1, 1, max_seq_len).expand( + batch_size, 1, max_seq_len, max_seq_len + ) + audio_attention_mask = audio_attention_mask_.to( + dtype=self.audio_tower.conv1.weight.dtype, + device=self.audio_tower.conv1.weight.device, + ) + audio_attention_mask[audio_attention_mask_] = float("-inf") + + # Forward pass + audio_features = self.audio_tower( + input_features, attention_mask=audio_attention_mask + ) + + # Project + audio_features = self.multi_modal_projector(audio_features) + + # Masking after pooling + num_audios, max_audio_tokens, embed_dim = audio_features.shape + audio_output_lengths = audio_output_lengths.unsqueeze(1) + audio_features_mask = ( + torch.arange(max_audio_tokens) + .expand(num_audios, max_audio_tokens) + .to(audio_output_lengths.device) + < audio_output_lengths + ) + masked_audio_features = audio_features[audio_features_mask].view(-1, embed_dim) + + # Split to tuple of embeddings for individual audio input. + chunk_embeddings = torch.split( + masked_audio_features, audio_output_lengths.flatten().tolist() + ) + + grouped_embeddings = [] + current_idx = 0 + for count in chunk_counts: + audio_chunks = chunk_embeddings[current_idx : current_idx + count] + grouped_embeddings.append(torch.cat(audio_chunks, dim=0)) + current_idx += count + return tuple(grouped_embeddings) + + def get_language_model(self) -> torch.nn.Module: + return self.language_model + + def embed_multimodal(self, **kwargs: object) -> MultiModalEmbeddings: + audio_input = self._parse_and_validate_audio_input(**kwargs) + if audio_input is None: + return [] + masked_audio_features = self._process_audio_input(audio_input) + return masked_audio_features + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + intermediate_tensors: IntermediateTensors | None = None, + inputs_embeds: torch.Tensor | None = None, + **kwargs: object, + ) -> torch.Tensor | IntermediateTensors: + if intermediate_tensors is not None: + inputs_embeds = None + + hidden_states = self.language_model.model( + input_ids, + positions, + intermediate_tensors, + inputs_embeds=inputs_embeds, + ) + return hidden_states + + def compute_logits( + self, + hidden_states: torch.Tensor, + ) -> torch.Tensor | None: + return self.language_model.compute_logits(hidden_states) + + def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]: + loader = AutoWeightsLoader(self) + return loader.load_weights(weights) diff --git a/vllm/model_executor/models/bagel.py b/vllm/model_executor/models/bagel.py new file mode 100644 index 000000000000..98229c6d4ca1 --- /dev/null +++ b/vllm/model_executor/models/bagel.py @@ -0,0 +1,584 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +# Copyright 2025 Bytedance Ltd. and/or its affiliates. +"""Inference-only BAGEL model compatible with HuggingFace weights. + +BAGEL is a unified multimodal model for image understanding and generation. +For vLLM, we focus on the image understanding (vision-to-text) capabilities. +""" + +from collections.abc import Iterable, Mapping, Sequence +from typing import Any, Literal, TypeAlias + +import torch +import torch.nn as nn + +from vllm.config import VllmConfig +from vllm.config.multimodal import BaseDummyOptions +from vllm.logger import init_logger +from vllm.model_executor.layers.activation import get_act_fn +from vllm.model_executor.layers.linear import ( + ColumnParallelLinear, + RowParallelLinear, +) +from vllm.model_executor.layers.quantization import QuantizationConfig +from vllm.multimodal import MULTIMODAL_REGISTRY +from vllm.multimodal.inputs import ( + MultiModalDataDict, + MultiModalFieldConfig, + MultiModalKwargsItems, +) +from vllm.multimodal.parse import MultiModalDataItems +from vllm.multimodal.processing import ( + BaseMultiModalProcessor, + BaseProcessingInfo, + PromptReplacement, +) +from vllm.multimodal.profiling import BaseDummyInputsBuilder +from vllm.sequence import IntermediateTensors +from vllm.transformers_utils.processors.bagel import BagelProcessor +from vllm.utils.tensor_schema import TensorSchema + +from .interfaces import ( + MultiModalEmbeddings, + SupportsLoRA, + SupportsMultiModal, + SupportsPP, +) +from .siglip import SiglipVisionModel +from .utils import ( + AutoWeightsLoader, + WeightsMapper, + init_vllm_registered_model, + maybe_prefix, +) + +logger = init_logger(__name__) + + +class BagelImagePixelInputs(TensorSchema): + """ + Dimensions: + - bn: Batch size * number of images + - c: Number of channels (3) + - h: Height of each image + - w: Width of each image + """ + + type: Literal["pixel_values"] + pixel_values: torch.Tensor # Shape: (bn, 3, h, w) + + +BagelImageInputs: TypeAlias = BagelImagePixelInputs + + +class BagelVisionMLP(nn.Module): + """MLP connector for vision features.""" + + def __init__( + self, + in_features: int, + hidden_features: int, + out_features: int, + act_layer: str = "gelu_pytorch_tanh", + quant_config: QuantizationConfig | None = None, + prefix: str = "", + ): + super().__init__() + self.fc1 = ColumnParallelLinear( + in_features, + hidden_features, + bias=True, + quant_config=quant_config, + prefix=f"{prefix}.fc1", + ) + self.act = get_act_fn(act_layer) + self.fc2 = RowParallelLinear( + hidden_features, + out_features, + bias=True, + quant_config=quant_config, + prefix=f"{prefix}.fc2", + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x, _ = self.fc1(x) + x = self.act(x) + x, _ = self.fc2(x) + return x + + +class PositionEmbedding(nn.Module): + """2D position embedding for vision tokens using sin-cos embeddings.""" + + def __init__(self, max_num_patch_per_side: int, hidden_size: int): + super().__init__() + self.max_num_patch_per_side = max_num_patch_per_side + self.hidden_size = hidden_size + + # Create learnable 2D position embeddings (frozen sin-cos) + pos_embed = self._get_2d_sincos_pos_embed(hidden_size, max_num_patch_per_side) + self.register_buffer( + "pos_embed", + torch.from_numpy(pos_embed).float(), + persistent=False, + ) + + @staticmethod + def _get_2d_sincos_pos_embed(embed_dim: int, grid_size: int): + """Generate 2D sin-cos position embeddings.""" + import numpy as np + + grid_h = np.arange(grid_size, dtype=np.float32) + grid_w = np.arange(grid_size, dtype=np.float32) + grid = np.meshgrid(grid_w, grid_h) # w goes first + grid = np.stack(grid, axis=0) + grid = grid.reshape([2, 1, grid_size, grid_size]) + pos_embed = PositionEmbedding._get_2d_sincos_pos_embed_from_grid( + embed_dim, grid + ) + return pos_embed + + @staticmethod + def _get_2d_sincos_pos_embed_from_grid(embed_dim: int, grid): + """Generate 2D sin-cos position embeddings from grid.""" + import numpy as np + + assert embed_dim % 2 == 0 + # use half of dimensions to encode grid_h + emb_h = PositionEmbedding._get_1d_sincos_pos_embed_from_grid( + embed_dim // 2, grid[0] + ) + emb_w = PositionEmbedding._get_1d_sincos_pos_embed_from_grid( + embed_dim // 2, grid[1] + ) + emb = np.concatenate([emb_h, emb_w], axis=1) + return emb + + @staticmethod + def _get_1d_sincos_pos_embed_from_grid(embed_dim: int, pos): + """Generate 1D sin-cos position embeddings.""" + import numpy as np + + assert embed_dim % 2 == 0 + omega = np.arange(embed_dim // 2, dtype=np.float64) + omega /= embed_dim / 2.0 + omega = 1.0 / 10000**omega + + pos = pos.reshape(-1) + out = np.einsum("m,d->md", pos, omega) + + emb_sin = np.sin(out) + emb_cos = np.cos(out) + emb = np.concatenate([emb_sin, emb_cos], axis=1) + return emb + + def forward(self, position_ids: torch.Tensor) -> torch.Tensor: + """ + Args: + position_ids: Flattened position IDs, shape (N,) where each ID + corresponds to a position in the flattened grid + Returns: + Position embeddings of shape (N, hidden_size) + """ + # Ensure position_ids are on the same device as pos_embed + position_ids = position_ids.to(self.pos_embed.device) + return self.pos_embed[position_ids] + + +class BagelProcessingInfo(BaseProcessingInfo): + """Processing information for BAGEL model.""" + + def get_hf_processor(self, **kwargs: object) -> BagelProcessor: + from vllm.transformers_utils.processor import cached_get_image_processor + + image_processor = cached_get_image_processor( + self.ctx.model_config.model, + revision=self.ctx.model_config.revision, + trust_remote_code=self.ctx.model_config.trust_remote_code, + ) + + tokenizer = self.get_tokenizer() + + return BagelProcessor( + image_processor=image_processor, + tokenizer=tokenizer, + **kwargs, + ) + + def get_supported_mm_limits(self) -> Mapping[str, int | None]: + return {"image": None} + + def get_mm_max_tokens_per_item( + self, + seq_len: int, + mm_counts: Mapping[str, int], + ) -> Mapping[str, int]: + hf_config = self.get_hf_config() + # Calculate max tokens per image + # For BAGEL: (vit_max_num_patch_per_side) ** 2 + max_num_patches = hf_config.vit_max_num_patch_per_side**2 + return {"image": max_num_patches} + + def get_num_image_tokens( + self, + *, + image_width: int, + image_height: int, + ) -> int: + hf_config = self.get_hf_config() + vit_config = hf_config.vit_config + patch_size = vit_config.patch_size + + # Calculate number of patches + num_patches_h = image_height // patch_size + num_patches_w = image_width // patch_size + return num_patches_h * num_patches_w + + +class BagelDummyInputsBuilder(BaseDummyInputsBuilder[BagelProcessingInfo]): + """Build dummy inputs for BAGEL model profiling.""" + + def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str: + num_images = mm_counts.get("image", 0) + # Use a simple placeholder for each image + return "<|image_pad|>" * num_images + + def get_dummy_mm_data( + self, + seq_len: int, + mm_counts: Mapping[str, int], + mm_options: Mapping[str, BaseDummyOptions] | None = None, + ) -> MultiModalDataDict: + num_images = mm_counts.get("image", 0) + hf_config = self.info.get_hf_config() + vit_config = hf_config.vit_config + + # Use the configured image size + image_size = vit_config.image_size + image_overrides = mm_options.get("image") if mm_options else None + + return { + "image": self._get_dummy_images( + width=image_size, + height=image_size, + num_images=num_images, + overrides=image_overrides, + ), + } + + +class BagelMultiModalProcessor(BaseMultiModalProcessor[BagelProcessingInfo]): + """Multimodal processor for BAGEL model.""" + + def _hf_processor_applies_updates( + self, + prompt_text: str, + mm_items: MultiModalDataItems, + hf_processor_mm_kwargs: Mapping[str, object], + tokenization_kwargs: Mapping[str, object], + ) -> bool: + return False + + def _get_prompt_updates( + self, + mm_items: MultiModalDataItems, + hf_processor_mm_kwargs: Mapping[str, Any], + out_mm_kwargs: MultiModalKwargsItems, + ) -> Sequence[PromptReplacement]: + """Replace image placeholders with the correct number of tokens.""" + hf_config = self.info.get_hf_config() + + # Get the tokenizer to look up the image token ID + tokenizer = self.info.get_tokenizer() + image_token_id = tokenizer.get_vocab().get("<|image_pad|>") + if image_token_id is None: + raise ValueError( + "Image token '<|image_pad|>' not found in tokenizer vocabulary" + ) + + def get_replacement_bagel(item_idx: int): + # For BAGEL, calculate number of tokens based on max patch size + num_tokens = hf_config.vit_max_num_patch_per_side**2 + # Use the image token ID from tokenizer + return [image_token_id] * num_tokens + + return [ + PromptReplacement( + modality="image", + target=[image_token_id], + replacement=get_replacement_bagel, + ) + ] + + def _get_mm_fields_config( + self, + hf_inputs: Any, + hf_processor_mm_kwargs: Mapping[str, object], + ) -> Mapping[str, MultiModalFieldConfig]: + return { + "pixel_values": MultiModalFieldConfig.batched("image"), + } + + +@MULTIMODAL_REGISTRY.register_processor( + BagelMultiModalProcessor, + info=BagelProcessingInfo, + dummy_inputs=BagelDummyInputsBuilder, +) +class BagelForConditionalGeneration( + nn.Module, SupportsMultiModal, SupportsLoRA, SupportsPP +): + """ + BAGEL: A unified multimodal model for image understanding and generation. + + For vLLM, we focus on the image understanding (vision-to-text) capabilities. + The image generation part is not supported in vLLM. + """ + + # Weight mapping from HF to vLLM + hf_to_vllm_mapper = WeightsMapper( + orig_to_new_prefix={ + "language_model.": "language_model.", + "vit_model.": "vit_model.", + "connector.": "connector.", + "vit_pos_embed.": "vit_pos_embed.", + } + ) + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + super().__init__() + + config = vllm_config.model_config.hf_config + quant_config = vllm_config.quant_config + multimodal_config = vllm_config.model_config.multimodal_config + + # Ensure we have a BagelConfig (check by name to handle trust_remote_code) + # When trust_remote_code=True, the config comes from transformers_modules + if type(config).__name__ != "BagelConfig": + raise ValueError( + f"Expected BagelConfig, got {type(config).__name__}. " + "Make sure the model config is properly loaded." + ) + + self.config = config + self.multimodal_config = multimodal_config + + # Initialize language model (Qwen2) + # Pass the llm_config from BagelConfig to initialize Qwen2 properly + self.language_model = init_vllm_registered_model( + vllm_config=vllm_config, + hf_config=config.llm_config, + prefix=maybe_prefix(prefix, "language_model"), + architectures=["Qwen2ForCausalLM"], + ) + + # Initialize vision model (SigLIP) if visual understanding is enabled + if config.visual_und: + # Fix vit_config: checkpoint has 26 layers (0-25) but config says 27 + # Also disable head as it's not in checkpoint + vit_config = config.vit_config + if vit_config.num_hidden_layers == 27: + logger.warning( + "Overriding vit_config.num_hidden_layers from 27 to 26 " + "to match the Bagel model checkpoint." + ) + vit_config.num_hidden_layers = 26 + if not hasattr(vit_config, "vision_use_head"): + logger.warning( + "Setting vit_config.vision_use_head to False as it is not " + "present in the Bagel model checkpoint." + ) + vit_config.vision_use_head = False + + self.vit_model = SiglipVisionModel( + config=vit_config, + quant_config=quant_config, + prefix=maybe_prefix(prefix, "vit_model"), + ) + + # Initialize connector (MLP) + vit_hidden_size = config.vit_config.hidden_size + llm_hidden_size = config.llm_config.hidden_size + + self.connector = BagelVisionMLP( + in_features=vit_hidden_size, + hidden_features=llm_hidden_size, + out_features=llm_hidden_size, + act_layer=config.connector_act, + quant_config=quant_config, + prefix=maybe_prefix(prefix, "connector"), + ) + + # Position embedding for vision tokens + self.vit_pos_embed = PositionEmbedding( + max_num_patch_per_side=config.vit_max_num_patch_per_side, + hidden_size=llm_hidden_size, + ) + else: + self.vit_model = None + self.connector = None + self.vit_pos_embed = None + + self.make_empty_intermediate_tensors = ( + self.language_model.make_empty_intermediate_tensors + ) + + def _parse_and_validate_image_input( + self, **kwargs: object + ) -> BagelImageInputs | None: + pixel_values = kwargs.pop("pixel_values", None) + + if pixel_values is None: + return None + + return BagelImagePixelInputs( + type="pixel_values", + pixel_values=pixel_values, + ) + + def _process_image_input( + self, image_input: BagelImageInputs + ) -> tuple[torch.Tensor, ...]: + """Process image inputs through vision encoder and connector.""" + pixel_values = image_input["pixel_values"] + + # Handle potential extra batch dimension + # Expected shape: (batch_size * num_images, 3, H, W) + # But might receive: (batch_size, num_images, 3, H, W) + if pixel_values.ndim == 5: + # Flatten batch and num_images dimensions + batch_size, num_images, channels, height, width = pixel_values.shape + pixel_values = pixel_values.reshape( + batch_size * num_images, channels, height, width + ) + + # Get vision features from SigLIP + # pixel_values shape: (batch_size * num_images, 3, H, W) + vision_features = self.vit_model(pixel_values) + + # Pass through connector + vision_embeds = self.connector(vision_features) + + # Add position embeddings + batch_size, num_patches, hidden_size = vision_embeds.shape + patch_size = self.config.vit_config.patch_size + image_size = self.config.vit_config.image_size + + # Calculate grid dimensions + num_patches_per_side = image_size // patch_size + + # Create flattened position IDs (0 to num_patches-1) + # For BAGEL, we use extrapolate mode by default + h_coords = torch.arange(num_patches_per_side, device=vision_embeds.device) + w_coords = torch.arange(num_patches_per_side, device=vision_embeds.device) + position_ids = ( + h_coords[:, None] * self.config.vit_max_num_patch_per_side + w_coords + ).flatten() + position_ids = position_ids.unsqueeze(0).expand(batch_size, -1).flatten() + + # Add position embeddings + pos_embeds = self.vit_pos_embed(position_ids) + pos_embeds = pos_embeds.reshape(batch_size, num_patches, hidden_size) + # Ensure pos_embeds are on the same device as vision_embeds + pos_embeds = pos_embeds.to(vision_embeds.device) + vision_embeds = vision_embeds + pos_embeds + + # Split by image + return tuple(vision_embeds) + + def get_multimodal_embeddings(self, **kwargs: object) -> MultiModalEmbeddings: + """Get multimodal embeddings from input.""" + image_input = self._parse_and_validate_image_input(**kwargs) + if image_input is None: + return [] + + return self._process_image_input(image_input) + + def get_language_model(self) -> nn.Module: + return self.language_model + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + intermediate_tensors: IntermediateTensors | None = None, + inputs_embeds: torch.Tensor | None = None, + **kwargs: object, + ) -> torch.Tensor | IntermediateTensors: + """Run forward pass for BAGEL. + + Args: + input_ids: Flattened (concatenated) input_ids corresponding to a batch. + positions: Flattened (concatenated) position ids corresponding to a batch. + intermediate_tensors: Intermediate tensors from prior forward pass. + inputs_embeds: Optional tensor of input embeddings. + """ + if intermediate_tensors is not None: + inputs_embeds = None + + hidden_states = self.language_model.model( + input_ids=input_ids, + positions=positions, + intermediate_tensors=intermediate_tensors, + inputs_embeds=inputs_embeds, + ) + return hidden_states + + def compute_logits( + self, + hidden_states: torch.Tensor, + ) -> torch.Tensor | None: + return self.language_model.compute_logits(hidden_states) + + def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]: + """Load weights from checkpoint.""" + skip_prefixes = [] + # Skip vit_pos_embed.pos_embed as it's handled by PositionEmbedding module + skip_prefixes.append("vit_pos_embed.pos_embed") + + # If visual understanding is disabled, skip vision-related weights + if self.vit_model is None: + skip_prefixes.extend(["vit_model.", "connector.", "vit_pos_embed"]) + + # Skip generation-related weights since we only support text2text and image2text + # Filter out all image generation components: + # - 'moe_gen': MoE generation weights + # - 'latent_pos_embed': Latent position embeddings for VAE + # - 'llm2vae', 'vae2llm': LLM-VAE projections + # - 'time_embedder': Timestep embeddings for diffusion + # - VAE encoder/decoder: Use specific prefixes to avoid matching vision encoder + generation_keywords = [ + "moe_gen", + "latent_pos_embed", + "llm2vae", + "vae2llm", + "time_embedder", + ] + vae_prefixes = [ + "decoder.", + "encoder.", + ] # VAE encoder/decoder, not vision encoder + filtered_weights = [] + for name, tensor in weights: + # Skip generation-related keywords + if any(skip in name for skip in generation_keywords): + continue + if any(name.startswith(prefix) for prefix in vae_prefixes): + continue + + if "patch_embedding.weight" in name and tensor.ndim == 2: + out_channels = tensor.shape[0] + in_features = tensor.shape[1] + patch_size = self.config.vit_config.patch_size + in_channels = self.config.vit_config.num_channels + if in_features == in_channels * patch_size * patch_size: + tensor = tensor.reshape( + out_channels, patch_size, patch_size, in_channels + ) + tensor = tensor.permute(0, 3, 1, 2).contiguous() + + filtered_weights.append((name, tensor)) + + loader = AutoWeightsLoader(self, skip_prefixes=skip_prefixes) + return loader.load_weights(filtered_weights, mapper=self.hf_to_vllm_mapper) diff --git a/vllm/model_executor/models/dots_ocr.py b/vllm/model_executor/models/dots_ocr.py index da19d8fdb15e..6d8dbec9236c 100644 --- a/vllm/model_executor/models/dots_ocr.py +++ b/vllm/model_executor/models/dots_ocr.py @@ -5,15 +5,14 @@ import torch import torch.nn as nn -import torch.nn.functional as F from torch.nn import LayerNorm from transformers.models.qwen2_vl import Qwen2VLProcessor from vllm.attention.backends.registry import AttentionBackendEnum -from vllm.attention.layer import ( - maybe_get_vit_flash_attn_backend, +from vllm.attention.layers.mm_encoder_attention import ( + MMEncoderAttention, ) -from vllm.config import VllmConfig +from vllm.config import MultiModalConfig, VllmConfig from vllm.config.multimodal import BaseDummyOptions from vllm.distributed import utils as dist_utils from vllm.distributed.parallel_state import ( @@ -30,6 +29,9 @@ RowParallelLinear, ) from vllm.model_executor.layers.quantization import QuantizationConfig +from vllm.model_executor.layers.rotary_embedding.common import ( + ApplyRotaryEmb, +) from vllm.model_executor.model_loader.weight_utils import default_weight_loader from vllm.model_executor.models.interfaces import ( MultiModalEmbeddings, @@ -159,32 +161,6 @@ def get_hf_processor( return processor -def rotate_half(x): - """Rotates half the hidden dims of the input.""" - x1 = x[..., : x.shape[-1] // 2] - x2 = x[..., x.shape[-1] // 2 :] - return torch.cat((-x2, x1), dim=-1) - - -def apply_rotary_pos_emb_vision( - tensor: torch.Tensor, freqs: torch.Tensor -) -> torch.Tensor: - orig_dtype = tensor.dtype - tensor = tensor.float() - - cos = freqs.cos() - sin = freqs.sin() - - cos = cos.unsqueeze(1).repeat(1, 1, 2).unsqueeze(0).float() - sin = sin.unsqueeze(1).repeat(1, 1, 2).unsqueeze(0).float() - - output = (tensor * cos) + (rotate_half(tensor) * sin) - - output = output.to(orig_dtype) - - return output - - class VisionRotaryEmbedding(nn.Module): def __init__(self, dim: int, theta: float = 10000.0) -> None: super().__init__() @@ -254,11 +230,15 @@ def __init__( bias: bool = True, *, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - use_data_parallel: bool = False, - attn_backend_override: AttentionBackendEnum | None = None, ) -> None: super().__init__() + use_data_parallel = ( + multimodal_config.mm_encoder_tp_mode == "data" + if multimodal_config + else False + ) self.embed_dim = dim self.tp_size = ( @@ -287,31 +267,18 @@ def __init__( prefix=f"{prefix}.proj", disable_tp=use_data_parallel, ) - # Select attention backend - self.attn_backend = get_vit_attn_backend( - self.hidden_size_per_attention_head, - torch.get_default_dtype(), - attn_backend_override=attn_backend_override, + + self.attn = MMEncoderAttention( + num_heads=self.num_attention_heads_per_partition, + head_size=self.hidden_size_per_attention_head, + multimodal_config=multimodal_config, + prefix=f"{prefix}.attn", ) - self.attn_backend, self.flash_attn_varlen_func = ( - maybe_get_vit_flash_attn_backend( - self.attn_backend, - attn_backend_override=attn_backend_override, - ) + self.apply_rotary_emb = ApplyRotaryEmb( + enforce_enable=True, + enable_fp32_compute=True, ) - if self.attn_backend not in { - AttentionBackendEnum.FLASH_ATTN, - AttentionBackendEnum.TORCH_SDPA, - AttentionBackendEnum.ROCM_AITER_FA, - }: - raise RuntimeError( - f"Unsupported vision attention backend: {self.attn_backend}" - ) - self.is_flash_attn_backend = self.attn_backend in { - AttentionBackendEnum.FLASH_ATTN, - AttentionBackendEnum.ROCM_AITER_FA, - } def forward( self, @@ -319,7 +286,7 @@ def forward( cu_seqlens: torch.Tensor, rotary_pos_emb: torch.Tensor | None = None, *, - max_seqlen: int | None = None, + max_seqlen: torch.Tensor | None = None, ) -> torch.Tensor: # [S, C] -> [S, B=1, C] x = hidden_states.unsqueeze(1) @@ -333,44 +300,20 @@ def forward( if rotary_pos_emb is not None: qk_concat = torch.cat([q, k], dim=0) - qk_rotated = apply_rotary_pos_emb_vision(qk_concat, rotary_pos_emb) + qk_rotated = self.apply_rotary_emb( + qk_concat, + rotary_pos_emb.cos(), + rotary_pos_emb.sin(), + ) q, k = torch.chunk(qk_rotated, 2, dim=0) - if self.is_flash_attn_backend: - q_ = q.reshape(bs * q.shape[1], q.shape[2], q.shape[3]) - k_ = k.reshape(bs * k.shape[1], k.shape[2], k.shape[3]) - v_ = v.reshape(bs * v.shape[1], v.shape[2], v.shape[3]) - output = self.flash_attn_varlen_func( - q_, - k_, - v_, - cu_seqlens_q=cu_seqlens, - cu_seqlens_k=cu_seqlens, - max_seqlen_q=max_seqlen, - max_seqlen_k=max_seqlen, - dropout_p=0.0, - causal=False, - ) - context_layer = output.view( - bs, - -1, - self.num_attention_heads_per_partition, - self.hidden_size_per_attention_head, - ) - elif self.attn_backend == AttentionBackendEnum.TORCH_SDPA: - outputs = [] - for i in range(1, len(cu_seqlens)): - s = int(cu_seqlens[i - 1]) - e = int(cu_seqlens[i]) - q_i = q[:, s:e].permute(0, 2, 1, 3) - k_i = k[:, s:e].permute(0, 2, 1, 3) - v_i = v[:, s:e].permute(0, 2, 1, 3) - out_i = F.scaled_dot_product_attention(q_i, k_i, v_i, dropout_p=0.0) - out_i = out_i.permute(0, 2, 1, 3) - outputs.append(out_i) - context_layer = torch.cat(outputs, dim=1) if outputs else q[:, :0] - else: - raise RuntimeError("Unsupported attention backend") + context_layer = self.attn( + query=q, + key=k, + value=v, + cu_seqlens=cu_seqlens, + max_seqlen=max_seqlen, + ) # [B,S,H,D] -> [S,B,H*D] -> [S, C] context_layer = context_layer.permute(1, 0, 2, 3).contiguous() @@ -385,14 +328,19 @@ def __init__( config, *, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - use_data_parallel: bool = False, ): super().__init__() hidden_features = config.intermediate_size in_features = config.embed_dim bias = config.use_bias + use_data_parallel = ( + multimodal_config.mm_encoder_tp_mode == "data" + if multimodal_config + else False + ) # Referenced aimv2.py AIMv2SwiGLUFFN self.fc13 = MergedColumnParallelLinear( in_features, @@ -498,9 +446,8 @@ def __init__( config, *, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - use_data_parallel: bool = False, - attn_backend_override: AttentionBackendEnum | None = None, ): super().__init__() @@ -510,16 +457,15 @@ def __init__( num_heads=config.num_attention_heads, bias=config.use_bias, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.attn", - use_data_parallel=use_data_parallel, - attn_backend_override=attn_backend_override, ) self.norm1 = RMSNorm(config.embed_dim, eps=config.rms_norm_eps) self.mlp = DotsSwiGLUFFN( config, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.mlp", - use_data_parallel=use_data_parallel, ) self.norm2 = RMSNorm(config.embed_dim, eps=config.rms_norm_eps) @@ -546,12 +492,11 @@ def __init__( self, config: DotsVisionConfig, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, *, num_hidden_layers_override: int | None = None, require_post_norm: bool | None = None, prefix: str = "", - use_data_parallel: bool = False, - attn_backend_override: AttentionBackendEnum | None = None, ) -> None: super().__init__() self.config = config @@ -561,6 +506,11 @@ def __init__( head_dim = config.embed_dim // config.num_attention_heads self.rotary_pos_emb = VisionRotaryEmbedding(head_dim // 2) + attn_backend_override = ( + multimodal_config.mm_encoder_attn_backend + if multimodal_config is not None + else None + ) self.attn_backend = get_vit_attn_backend( head_size=head_dim, dtype=torch.get_default_dtype(), @@ -578,9 +528,8 @@ def __init__( DotsVisionBlock( config, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.blocks.{i}", - use_data_parallel=use_data_parallel, - attn_backend_override=attn_backend_override, ) for i in range(num_layers) ] @@ -592,6 +541,11 @@ def __init__( else: self.post_trunk_norm = None + use_data_parallel = ( + multimodal_config.mm_encoder_tp_mode == "data" + if multimodal_config + else False + ) self.merger = PatchMerger( dim=config.hidden_size, context_dim=config.embed_dim, @@ -647,7 +601,7 @@ def compute_attn_mask_seqlen(self, cu_seqlens: torch.Tensor) -> int | None: self.attn_backend == AttentionBackendEnum.FLASH_ATTN or self.attn_backend == AttentionBackendEnum.ROCM_AITER_FA ): - max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max().item() + max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max() return max_seqlen def forward( @@ -733,17 +687,12 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.config.vision_config = vision_config else: vision_config = self.config.vision_config - attn_backend_override = ( - multimodal_config.mm_encoder_attn_backend - if multimodal_config is not None - else None - ) + self.vision_tower = DotsVisionTransformer( vision_config, quant_config=self.quant_config, + multimodal_config=multimodal_config, prefix=maybe_prefix(prefix, "vision_tower"), - use_data_parallel=self.use_data_parallel, - attn_backend_override=attn_backend_override, ) self.language_model: Qwen2ForCausalLM = init_vllm_registered_model( vllm_config=vllm_config, diff --git a/vllm/model_executor/models/ernie45_vl.py b/vllm/model_executor/models/ernie45_vl.py index 053d260cc09b..61cf78fdb5a6 100644 --- a/vllm/model_executor/models/ernie45_vl.py +++ b/vllm/model_executor/models/ernie45_vl.py @@ -33,14 +33,14 @@ import torch import torch.nn as nn import torch.nn.functional as F -from einops import rearrange, repeat +from einops import rearrange from transformers import BatchFeature from vllm.attention.backends.registry import AttentionBackendEnum -from vllm.attention.layer import ( - maybe_get_vit_flash_attn_backend, +from vllm.attention.layers.mm_encoder_attention import ( + MMEncoderAttention, ) -from vllm.config import VllmConfig +from vllm.config import MultiModalConfig, VllmConfig from vllm.config.multimodal import BaseDummyOptions from vllm.distributed import parallel_state from vllm.distributed import utils as dist_utils @@ -53,6 +53,9 @@ RowParallelLinear, ) from vllm.model_executor.layers.quantization import QuantizationConfig +from vllm.model_executor.layers.rotary_embedding.common import ( + ApplyRotaryEmb, +) from vllm.model_executor.model_loader.weight_utils import default_weight_loader from vllm.multimodal import MULTIMODAL_REGISTRY from vllm.multimodal.inputs import ( @@ -69,7 +72,6 @@ PromptUpdate, ) from vllm.multimodal.profiling import BaseDummyInputsBuilder -from vllm.platforms import current_platform from vllm.sequence import IntermediateTensors from vllm.utils.tensor_schema import TensorSchema, TensorShape @@ -89,52 +91,6 @@ # === Vision Transformer === # -def rotate_half(x: torch.Tensor, interleaved: bool = False) -> torch.Tensor: - if not interleaved: - x1, x2 = x.chunk(2, dim=-1) - return torch.cat((-x2, x1), dim=-1) - else: - x1, x2 = x[..., ::2], x[..., 1::2] - return rearrange( - torch.stack((-x2, x1), dim=-1), "... d two -> ... (d two)", two=2 - ) - - -def apply_rotary_emb_torch( - x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, interleaved: bool = False -) -> torch.Tensor: - """ - x: (batch_size, seqlen, nheads, headdim) - cos, sin: (seqlen, rotary_dim / 2) or (batch_size, seqlen, rotary_dim / 2) - """ - ro_dim = cos.shape[-1] * 2 - assert ro_dim <= x.shape[-1] - cos = repeat( - cos, "... d -> ... 1 (2 d)" if not interleaved else "... d -> ... 1 (d 2)" - ) - sin = repeat( - sin, "... d -> ... 1 (2 d)" if not interleaved else "... d -> ... 1 (d 2)" - ) - return torch.cat( - [ - x[..., :ro_dim] * cos + rotate_half(x[..., :ro_dim], interleaved) * sin, - x[..., ro_dim:], - ], - dim=-1, - ) - - -def apply_rotary_pos_emb_vision(t: torch.Tensor, freqs: torch.Tensor) -> torch.Tensor: - t_ = t.float() - cos = freqs.cos() - sin = freqs.sin() - apply_rotary_emb = apply_rotary_emb_torch - if current_platform.is_cuda(): - from vllm.vllm_flash_attn.layers.rotary import apply_rotary_emb - output = apply_rotary_emb(t_, cos, sin).type_as(t) - return output - - def all_gather_interleave(local_tensor, hidden_size: int, tp_size: int): """All-gather the input tensor interleavely across model parallel group.""" import torch.distributed as dist @@ -163,8 +119,8 @@ def __init__( num_heads: int, projection_size: int, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - attn_backend_override: AttentionBackendEnum | None = None, ) -> None: super().__init__() # Per attention head and per partition values. @@ -193,33 +149,18 @@ def __init__( prefix=f"{prefix}.proj", ) - # Detect attention implementation. - self.attn_backend = get_vit_attn_backend( + self.attn = MMEncoderAttention( + num_heads=self.num_attention_heads_per_partition, head_size=self.hidden_size_per_attention_head, - dtype=torch.get_default_dtype(), - attn_backend_override=attn_backend_override, + multimodal_config=multimodal_config, + prefix=f"{prefix}.attn", ) - self.attn_backend, self.flash_attn_varlen_func = ( - maybe_get_vit_flash_attn_backend( - self.attn_backend, - attn_backend_override=attn_backend_override, - ) + self.apply_rotary_emb = ApplyRotaryEmb( + enforce_enable=True, + enable_fp32_compute=True, ) - if self.attn_backend not in { - AttentionBackendEnum.FLASH_ATTN, - AttentionBackendEnum.TORCH_SDPA, - AttentionBackendEnum.ROCM_AITER_FA, - }: - raise RuntimeError( - f"Ernie45-VL does not support {self.attn_backend} backend now." - ) - self.is_flash_attn_backend = self.attn_backend in { - AttentionBackendEnum.FLASH_ATTN, - AttentionBackendEnum.ROCM_AITER_FA, - } - def split_qkv(self, qkv: torch.Tensor) -> tuple[torch.Tensor, ...]: # [s, b, 3 * head * head_dim] seq_len, bs, _ = qkv.shape @@ -253,58 +194,32 @@ def forward( x: torch.Tensor, cu_seqlens: torch.Tensor, rotary_pos_emb: torch.Tensor, - max_seqlen: int | None = None, # Only used for Flash Attention + max_seqlen: torch.Tensor | None = None, # Only used for Flash Attention ) -> torch.Tensor: # [s, b, c] --> [s, b, head * 3 * head_dim] x, _ = self.qkv(x) # [s, b, 3 * head * head_dim] -> 3 * [s, b, head, head_dim] q, k, v = self.split_qkv(x) - batch_size = q.shape[1] q, k, v = (rearrange(x, "s b ... -> b s ...").contiguous() for x in (q, k, v)) if rotary_pos_emb is not None: qk_concat = torch.cat([q, k], dim=0) - qk_rotated = apply_rotary_pos_emb_vision(qk_concat, rotary_pos_emb) - q, k = torch.chunk(qk_rotated, 2, dim=0) - - if self.is_flash_attn_backend: - q, k, v = (rearrange(x, "b s ... -> (b s) ...") for x in [q, k, v]) - - output = self.flash_attn_varlen_func( - q, - k, - v, - cu_seqlens_q=cu_seqlens, - cu_seqlens_k=cu_seqlens, - max_seqlen_q=max_seqlen, - max_seqlen_k=max_seqlen, - dropout_p=0.0, - causal=False, + qk_rotated = self.apply_rotary_emb( + qk_concat, + rotary_pos_emb.cos(), + rotary_pos_emb.sin(), ) + q, k = torch.chunk(qk_rotated, 2, dim=0) - context_layer = rearrange( - output, "(b s) h d -> s b (h d)", b=batch_size - ).contiguous() - elif self.attn_backend == AttentionBackendEnum.TORCH_SDPA: - # Execute attention entry by entry for speed & less VRAM. - outputs = [] - - lens = (cu_seqlens[1:] - cu_seqlens[:-1]).tolist() - q_chunks = torch.split(q, lens, dim=1) - k_chunks = torch.split(k, lens, dim=1) - v_chunks = torch.split(v, lens, dim=1) - for q_i, k_i, v_i in zip(q_chunks, k_chunks, v_chunks): - q_i, k_i, v_i = ( - rearrange(x, "b s h d -> b h s d") for x in [q_i, k_i, v_i] - ) - output_i = F.scaled_dot_product_attention(q_i, k_i, v_i, dropout_p=0.0) - output_i = rearrange(output_i, "b h s d -> b s h d ") - outputs.append(output_i) - context_layer = torch.cat(outputs, dim=1) - context_layer = rearrange( - context_layer, "b s h d -> s b (h d)" - ).contiguous() + output = self.attn( + query=q, + key=k, + value=v, + cu_seqlens=cu_seqlens, + max_seqlen=max_seqlen, + ) + context_layer = rearrange(output, "b s h d -> s b (h d)").contiguous() output, _ = self.proj(context_layer) return output @@ -350,8 +265,8 @@ def __init__( act_layer: type[nn.Module] = QuickGELU, norm_layer: Callable[[int], nn.Module] | None = None, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - attn_backend_override: AttentionBackendEnum | None = None, ) -> None: super().__init__() @@ -366,8 +281,8 @@ def __init__( num_heads=num_heads, projection_size=dim, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.attn", - attn_backend_override=attn_backend_override, ) self.mlp = Ernie4_5_VisionMLP( @@ -383,7 +298,7 @@ def forward( hidden_states: torch.Tensor, cu_seqlens: torch.Tensor, rotary_pos_emb: torch.Tensor, - max_seqlen: int | None = None, # Only used for Flash Attention + max_seqlen: torch.Tensor | None = None, # Only used for Flash Attention ) -> torch.Tensor: hidden_states = hidden_states + self.attn( self.norm1(hidden_states), @@ -441,8 +356,8 @@ def __init__( vision_config, norm_eps: float = 1e-6, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - attn_backend_override: AttentionBackendEnum | None = None, ) -> None: super().__init__() patch_size = vision_config.patch_size @@ -477,8 +392,8 @@ def __init__( mlp_ratio=mlp_ratio, norm_layer=norm_layer, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.blocks.{layer_idx}", - attn_backend_override=attn_backend_override, ) for layer_idx in range(depth) ] @@ -489,6 +404,9 @@ def __init__( ) self.ln = nn.LayerNorm(hidden_size, eps=1e-6) + attn_backend_override = ( + multimodal_config.mm_encoder_attn_backend if multimodal_config else None + ) self.attn_backend = get_vit_attn_backend( head_size=head_dim, dtype=torch.get_default_dtype(), @@ -535,13 +453,13 @@ def rot_pos_emb(self, grid_thw: torch.Tensor) -> torch.Tensor: rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1) return rotary_pos_emb - def compute_attn_mask_seqlen(self, cu_seqlens: torch.Tensor) -> int | None: + def compute_attn_mask_seqlen(self, cu_seqlens: torch.Tensor) -> torch.Tensor | None: max_seqlen = None if ( self.attn_backend == AttentionBackendEnum.FLASH_ATTN or self.attn_backend == AttentionBackendEnum.ROCM_AITER_FA ): - max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max().item() + max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max() return max_seqlen def forward( @@ -1304,17 +1222,12 @@ def __init__(self, vllm_config: VllmConfig, prefix: str = "") -> None: self.config = config self.multimodal_config = multimodal_config - attn_backend_override = ( - multimodal_config.mm_encoder_attn_backend - if multimodal_config is not None - else None - ) self.vision_model = Ernie4_5_VisionTransformer( config.vision_config, norm_eps=getattr(config, "rms_norm_eps", 1e-6), quant_config=quant_config, + multimodal_config=multimodal_config, prefix=maybe_prefix(prefix, "vision_model"), - attn_backend_override=attn_backend_override, ) self.language_model = Ernie4_5_VLMoeForCausalLM( diff --git a/vllm/model_executor/models/glm4_1v.py b/vllm/model_executor/models/glm4_1v.py index 786482d77a1d..84989537da6e 100644 --- a/vllm/model_executor/models/glm4_1v.py +++ b/vllm/model_executor/models/glm4_1v.py @@ -47,8 +47,10 @@ from transformers.video_utils import VideoMetadata from vllm.attention.backends.registry import AttentionBackendEnum -from vllm.attention.layer import maybe_get_vit_flash_attn_backend -from vllm.config import VllmConfig +from vllm.attention.layers.mm_encoder_attention import ( + MMEncoderAttention, +) +from vllm.config import MultiModalConfig, VllmConfig from vllm.config.multimodal import BaseDummyOptions, VideoDummyOptions from vllm.distributed import get_tensor_model_parallel_world_size, parallel_state from vllm.distributed import utils as dist_utils @@ -63,6 +65,9 @@ ) from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.layers.rotary_embedding import get_rope +from vllm.model_executor.layers.rotary_embedding.common import ( + ApplyRotaryEmb, +) from vllm.model_executor.model_loader.weight_utils import default_weight_loader from vllm.model_executor.models.module_mapping import MultiModelKeys from vllm.multimodal import MULTIMODAL_REGISTRY @@ -93,7 +98,7 @@ SupportsMultiModal, SupportsPP, ) -from .qwen2_vl import _create_qwen2vl_field_factory, apply_rotary_pos_emb_vision +from .qwen2_vl import _create_qwen2vl_field_factory from .utils import ( AutoWeightsLoader, WeightsMapper, @@ -191,10 +196,15 @@ def __init__( hidden_features: int, bias: bool = False, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - use_data_parallel: bool = False, ): super().__init__() + use_data_parallel = ( + multimodal_config.mm_encoder_tp_mode == "data" + if multimodal_config + else False + ) self.gate_up_proj = MergedColumnParallelLinear( input_size=in_features, output_sizes=[hidden_features] * 2, @@ -248,12 +258,16 @@ def __init__( num_heads: int, projection_size: int, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - use_data_parallel: bool = False, - attn_backend_override: AttentionBackendEnum | None = None, ) -> None: super().__init__() # Per attention head and per partition values. + use_data_parallel = ( + multimodal_config.mm_encoder_tp_mode == "data" + if multimodal_config + else False + ) self.tp_size = ( 1 if use_data_parallel else get_tensor_model_parallel_world_size() ) @@ -287,33 +301,13 @@ def __init__( disable_tp=use_data_parallel, ) - # Detect attention implementation. - self.attn_backend = get_vit_attn_backend( + self.attn = MMEncoderAttention( + num_heads=self.num_attention_heads_per_partition, head_size=self.hidden_size_per_attention_head, - dtype=torch.get_default_dtype(), - attn_backend_override=attn_backend_override, + multimodal_config=multimodal_config, ) - self.attn_backend, self.flash_attn_varlen_func = ( - maybe_get_vit_flash_attn_backend( - self.attn_backend, - attn_backend_override=attn_backend_override, - ) - ) - - if self.attn_backend not in { - AttentionBackendEnum.FLASH_ATTN, - AttentionBackendEnum.TORCH_SDPA, - AttentionBackendEnum.ROCM_AITER_FA, - }: - raise RuntimeError( - f"GLM-4V does not support {self.attn_backend} backend now." - ) - - self.is_flash_attn_backend = self.attn_backend in { - AttentionBackendEnum.FLASH_ATTN, - AttentionBackendEnum.ROCM_AITER_FA, - } + self.apply_rotary_emb = ApplyRotaryEmb(enforce_enable=True) def split_qkv(self, qkv: torch.Tensor) -> tuple[torch.Tensor, ...]: # [s, b, 3 * head * head_dim] @@ -338,61 +332,33 @@ def forward( cu_seqlens: torch.Tensor, rotary_pos_emb_cos: torch.Tensor, rotary_pos_emb_sin: torch.Tensor, - max_seqlen: int | None = None, # Only used for Flash Attention + max_seqlen: torch.Tensor | None = None, # Only used for Flash Attention ) -> torch.Tensor: # [s, b, c] --> [s, b, head * 3 * head_dim] x, _ = self.qkv(x) # [s, b, 3 * head * head_dim] -> 3 * [s, b, head, head_dim] q, k, v = self.split_qkv(x) - batch_size = q.shape[1] q, k, v = (rearrange(x, "s b ... -> b s ...").contiguous() for x in (q, k, v)) if rotary_pos_emb_cos is not None and rotary_pos_emb_sin is not None: # [2 * b, s, heads, head_dim] qk_concat = torch.cat([q, k], dim=0) - qk_rotated = apply_rotary_pos_emb_vision( - qk_concat, rotary_pos_emb_cos, rotary_pos_emb_sin + qk_rotated = self.apply_rotary_emb( + qk_concat, + rotary_pos_emb_cos, + rotary_pos_emb_sin, ) q, k = torch.chunk(qk_rotated, 2, dim=0) - if self.is_flash_attn_backend: - q, k, v = (rearrange(x, "b s ... -> (b s) ...") for x in [q, k, v]) - - output = self.flash_attn_varlen_func( - q, - k, - v, - cu_seqlens_q=cu_seqlens, - cu_seqlens_k=cu_seqlens, - max_seqlen_q=max_seqlen, - max_seqlen_k=max_seqlen, - dropout_p=0.0, - causal=False, - ) - - context_layer = rearrange( - output, "(b s) h d -> s b (h d)", b=batch_size - ).contiguous() - elif self.attn_backend == AttentionBackendEnum.TORCH_SDPA: - # Execute attention entry by entry for speed & less VRAM. - outputs = [] - - lens = (cu_seqlens[1:] - cu_seqlens[:-1]).tolist() - q_chunks = torch.split(q, lens, dim=1) - k_chunks = torch.split(k, lens, dim=1) - v_chunks = torch.split(v, lens, dim=1) - for q_i, k_i, v_i in zip(q_chunks, k_chunks, v_chunks): - q_i, k_i, v_i = ( - rearrange(x, "b s h d -> b h s d") for x in [q_i, k_i, v_i] - ) - output_i = F.scaled_dot_product_attention(q_i, k_i, v_i, dropout_p=0.0) - output_i = rearrange(output_i, "b h s d -> b s h d ") - outputs.append(output_i) - context_layer = torch.cat(outputs, dim=1) - context_layer = rearrange( - context_layer, "b s h d -> s b (h d)" - ).contiguous() + context_layer = self.attn( + query=q, + key=k, + value=v, + cu_seqlens=cu_seqlens, + max_seqlen=max_seqlen, + ) + context_layer = rearrange(context_layer, "b s h d -> s b (h d)").contiguous() output, _ = self.proj(context_layer) return output @@ -406,9 +372,8 @@ def __init__( mlp_hidden_dim: int, norm_layer: Callable[[int], nn.Module] | None = None, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - use_data_parallel: bool = False, - attn_backend_override: AttentionBackendEnum | None = None, ) -> None: super().__init__() if norm_layer is None: @@ -420,17 +385,16 @@ def __init__( num_heads=num_heads, projection_size=dim, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.attn", - use_data_parallel=use_data_parallel, - attn_backend_override=attn_backend_override, ) self.mlp = Glm4vVisionMLP( dim, mlp_hidden_dim, bias=False, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.mlp", - use_data_parallel=use_data_parallel, ) def forward( @@ -489,11 +453,16 @@ def __init__( d_model: int, context_dim: int, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, bias: bool = False, prefix: str = "", - use_data_parallel: bool = False, ) -> None: super().__init__() + use_data_parallel = ( + multimodal_config.mm_encoder_tp_mode == "data" + if multimodal_config + else False + ) self.hidden_size = d_model self.proj = ColumnParallelLinear( self.hidden_size, @@ -649,19 +618,19 @@ def __init__( vision_config: Glm4vVisionConfig, norm_eps: float = 1e-6, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - use_data_parallel: bool = False, - attn_backend_override: AttentionBackendEnum | None = None, ) -> None: super().__init__() + assert multimodal_config is not None, "multimodal_config must be provided" + patch_size = vision_config.patch_size temporal_patch_size = vision_config.temporal_patch_size in_channels = vision_config.in_channels depth = vision_config.depth self.hidden_size = vision_config.hidden_size self.num_heads = vision_config.num_heads - self.use_data_parallel = use_data_parallel self.patch_size = vision_config.patch_size self.spatial_merge_size = vision_config.spatial_merge_size @@ -690,9 +659,8 @@ def __init__( mlp_hidden_dim=vision_config.out_hidden_size, norm_layer=norm_layer, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.blocks.{layer_idx}", - use_data_parallel=self.use_data_parallel, - attn_backend_override=attn_backend_override, ) for layer_idx in range(depth) ] @@ -701,9 +669,9 @@ def __init__( d_model=vision_config.out_hidden_size, context_dim=vision_config.intermediate_size, quant_config=quant_config, + multimodal_config=multimodal_config, bias=False, prefix=f"{prefix}.merger", - use_data_parallel=self.use_data_parallel, ) self.embeddings = Glm4vVisionEmbeddings(vision_config) @@ -723,7 +691,7 @@ def __init__( self.attn_backend = get_vit_attn_backend( head_size=head_dim, dtype=torch.get_default_dtype(), - attn_backend_override=attn_backend_override, + attn_backend_override=multimodal_config.mm_encoder_attn_backend, ) @property @@ -775,13 +743,13 @@ def rot_pos_emb( def compute_attn_mask_seqlen( self, cu_seqlens: torch.Tensor, - ) -> int | None: + ) -> torch.Tensor | None: max_seqlen = None if ( self.attn_backend == AttentionBackendEnum.FLASH_ATTN or self.attn_backend == AttentionBackendEnum.ROCM_AITER_FA ): - max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max().item() + max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max() return max_seqlen def forward( @@ -1465,18 +1433,12 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.multimodal_config = multimodal_config self.use_data_parallel = multimodal_config.mm_encoder_tp_mode == "data" - attn_backend_override = ( - multimodal_config.mm_encoder_attn_backend - if multimodal_config is not None - else None - ) self.visual = Glm4vVisionTransformer( config.vision_config, norm_eps=getattr(config, "rms_norm_eps", 1e-5), quant_config=quant_config, + multimodal_config=multimodal_config, prefix=maybe_prefix(prefix, "visual"), - use_data_parallel=self.use_data_parallel, - attn_backend_override=attn_backend_override, ) if config.model_type == "glm4v": diff --git a/vllm/model_executor/models/keye.py b/vllm/model_executor/models/keye.py index f31da0ee302b..fcf88953ba20 100644 --- a/vllm/model_executor/models/keye.py +++ b/vllm/model_executor/models/keye.py @@ -9,7 +9,6 @@ import numpy as np import torch import torch.nn as nn -import torch.nn.functional as F from einops import rearrange from transformers import PretrainedConfig from transformers.activations import GELUActivation @@ -17,11 +16,10 @@ from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from transformers.utils import torch_int -from vllm.attention.backends.registry import AttentionBackendEnum -from vllm.attention.layer import ( - maybe_get_vit_flash_attn_backend, +from vllm.attention.layers.mm_encoder_attention import ( + MMEncoderAttention, ) -from vllm.config import VllmConfig +from vllm.config import MultiModalConfig, VllmConfig from vllm.config.multimodal import BaseDummyOptions from vllm.distributed import get_tensor_model_parallel_world_size from vllm.logger import init_logger @@ -32,6 +30,9 @@ RowParallelLinear, ) from vllm.model_executor.layers.quantization import QuantizationConfig +from vllm.model_executor.layers.rotary_embedding.common import ( + ApplyRotaryEmb, +) from vllm.model_executor.model_loader.weight_utils import ( default_weight_loader, maybe_remap_kv_scale_name, @@ -61,7 +62,6 @@ PromptUpdate, ) from vllm.multimodal.profiling import BaseDummyInputsBuilder -from vllm.platforms import current_platform from vllm.sequence import IntermediateTensors from vllm.utils.tensor_schema import TensorSchema, TensorShape @@ -80,7 +80,6 @@ is_pp_missing_parameter, maybe_prefix, ) -from .vision import get_vit_attn_backend logger = init_logger(__name__) @@ -344,20 +343,14 @@ def apply_rotary_pos_emb_flashatt( cos = cos.chunk(2, dim=-1)[0].contiguous() sin = sin.chunk(2, dim=-1)[0].contiguous() - if current_platform.is_cuda(): - from vllm.vllm_flash_attn.layers.rotary import apply_rotary_emb - elif current_platform.is_rocm(): - from flash_attn.ops.triton.rotary import apply_rotary as apply_rotary_emb - else: - # For other platforms, use PyTorch fallback - from vllm.model_executor.layers.rotary_embedding.common import ( - apply_rotary_emb_torch, - ) + apply_rotary_emb = ApplyRotaryEmb( + enforce_enable=True, + enable_fp32_compute=True, + ) - apply_rotary_emb = partial(apply_rotary_emb_torch, is_neox_style=True) + q_embed = apply_rotary_emb(q, cos, sin) + k_embed = apply_rotary_emb(k, cos, sin) - q_embed = apply_rotary_emb(q.float(), cos.float(), sin.float()).type_as(q) - k_embed = apply_rotary_emb(k.float(), cos.float(), sin.float()).type_as(k) return q_embed, k_embed @@ -369,8 +362,8 @@ def __init__( self, config: PretrainedConfig, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - attn_backend_override: AttentionBackendEnum | None = None, ): super().__init__() self.config = config @@ -408,34 +401,14 @@ def __init__( prefix=f"{prefix}.out_proj", ) - # Detect attention implementation. - self.attn_backend = get_vit_attn_backend( + self.attn = MMEncoderAttention( + num_heads=self.num_heads, head_size=self.head_dim, - dtype=torch.get_default_dtype(), - attn_backend_override=attn_backend_override, - ) - - self.attn_backend, self.flash_attn_varlen_func = ( - maybe_get_vit_flash_attn_backend( - self.attn_backend, - attn_backend_override=attn_backend_override, - ) + num_kv_heads=self.num_kv_heads, + prefix=f"{prefix}.attn", + multimodal_config=multimodal_config, ) - if self.attn_backend not in { - AttentionBackendEnum.FLASH_ATTN, - AttentionBackendEnum.TORCH_SDPA, - AttentionBackendEnum.ROCM_AITER_FA, - }: - raise RuntimeError( - f"Keye-VL does not support {self.attn_backend} backend now." - ) - - self.is_flash_attn_backend = self.attn_backend in { - AttentionBackendEnum.FLASH_ATTN, - AttentionBackendEnum.ROCM_AITER_FA, - } - def forward( self, hidden_states: torch.Tensor, @@ -450,8 +423,7 @@ def forward( dim=-1, ) - max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max().item() - batch_size = q.shape[0] + max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max() if rope_emb is None: q = q.view(*q.shape[:-1], self.num_heads, self.head_dim) @@ -482,38 +454,14 @@ def forward( self.head_dim, ) - if self.is_flash_attn_backend: - q, k, v = (rearrange(x, "b s ... -> (b s) ...") for x in [q, k, v]) - - output = self.flash_attn_varlen_func( - q, - k, - v, - cu_seqlens_q=cu_seqlens, - cu_seqlens_k=cu_seqlens, - max_seqlen_q=max_seqlen, - max_seqlen_k=max_seqlen, - causal=False, - softmax_scale=self.scale, - ) - context_layer = rearrange(output, "(b s) ... -> b s ...", b=batch_size) - elif self.attn_backend == AttentionBackendEnum.TORCH_SDPA: - outputs = [] - for i in range(1, len(cu_seqlens)): - start_idx = cu_seqlens[i - 1] - end_idx = cu_seqlens[i] - q_i = q[:, start_idx:end_idx] - k_i = k[:, start_idx:end_idx] - v_i = v[:, start_idx:end_idx] - q_i, k_i, v_i = ( - rearrange(x, "b s h d -> b h s d") for x in (q_i, k_i, v_i) - ) - output_i = F.scaled_dot_product_attention(q_i, k_i, v_i, dropout_p=0.0) - output_i = rearrange(output_i, "b h s d -> b s h d ") - outputs.append(output_i) - context_layer = torch.cat(outputs, dim=1) if outputs else q[:, :0] - - context_layer = rearrange(context_layer, "b s h d -> b s (h d)").contiguous() + context_layer = self.attn( + query=q, + key=k, + value=v, + cu_seqlens=cu_seqlens, + max_seqlen=max_seqlen, + ) + context_layer = rearrange(context_layer, "b s h d -> b s (h d)") output, _ = self.out_proj(context_layer) return output @@ -547,8 +495,8 @@ def __init__( self, config: PretrainedConfig, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - attn_backend_override: AttentionBackendEnum | None = None, ): super().__init__() self.embed_dim = config.hidden_size @@ -556,8 +504,8 @@ def __init__( self.self_attn = KeyeSiglipAttention( config, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.self_attn", - attn_backend_override=attn_backend_override, ) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = SiglipMLP( @@ -601,8 +549,8 @@ def __init__( self, config: PretrainedConfig, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - attn_backend_override: AttentionBackendEnum | None = None, ): super().__init__() self.config = config @@ -614,8 +562,8 @@ def __init__( KeyeSiglipEncoderLayer( config, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.layers.{layer_idx}", - attn_backend_override=attn_backend_override, ) for layer_idx in range(config.num_hidden_layers) ] @@ -696,8 +644,8 @@ def __init__( self, config: PretrainedConfig, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - attn_backend_override: AttentionBackendEnum | None = None, ): super().__init__() self.config = config @@ -707,8 +655,8 @@ def __init__( self.encoder = KeyeSiglipEncoder( config, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.encoder", - attn_backend_override=attn_backend_override, ) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) @@ -779,16 +727,16 @@ def __init__( self, config: PretrainedConfig, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - attn_backend_override: AttentionBackendEnum | None = None, ): super().__init__() self.vision_model = KeyeSiglipVisionTransformer( config, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.vision_model", - attn_backend_override=attn_backend_override, ) self.quant_config = quant_config @@ -1329,16 +1277,11 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.config = config self.multimodal_config = multimodal_config - attn_backend_override = ( - multimodal_config.mm_encoder_attn_backend - if multimodal_config is not None - else None - ) self.visual = KeyeSiglipVisionModel( config.vision_config, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=maybe_prefix(prefix, "visual"), - attn_backend_override=attn_backend_override, ) self.mlp_AR = self._build_projector( diff --git a/vllm/model_executor/models/opencua.py b/vllm/model_executor/models/opencua.py index 23668cc2b746..35a6a78f653e 100644 --- a/vllm/model_executor/models/opencua.py +++ b/vllm/model_executor/models/opencua.py @@ -240,18 +240,12 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): ) if multimodal_config.get_limit_per_prompt("image"): - attn_backend_override = ( - multimodal_config.mm_encoder_attn_backend - if multimodal_config is not None - else None - ) self.visual = OpenCUAVisionTransformer( vision_config=config.vision_config, norm_eps=getattr(config, "rms_norm_eps", 1e-6), quant_config=self.quant_config, + multimodal_config=self.multimodal_config, prefix=maybe_prefix(prefix, "visual"), - use_data_parallel=self.use_data_parallel, - attn_backend_override=attn_backend_override, ) else: self.visual = None diff --git a/vllm/model_executor/models/ovis2_5.py b/vllm/model_executor/models/ovis2_5.py index 0ad22aab748e..945138b5972f 100644 --- a/vllm/model_executor/models/ovis2_5.py +++ b/vllm/model_executor/models/ovis2_5.py @@ -10,8 +10,7 @@ import torch.nn as nn from transformers import BaseImageProcessor, BatchFeature, PretrainedConfig -from vllm.attention.backends.registry import AttentionBackendEnum -from vllm.config import VllmConfig +from vllm.config import MultiModalConfig, VllmConfig from vllm.config.multimodal import BaseDummyOptions from vllm.model_executor.layers.linear import ReplicatedLinear from vllm.model_executor.layers.quantization import QuantizationConfig @@ -104,18 +103,16 @@ def __init__( config: PretrainedConfig, visual_vocab_size: int, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - use_data_parallel: bool = False, - attn_backend_override: AttentionBackendEnum | None = None, ): super().__init__() self.config = config self.vit = self._init_backbone( config=config, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.vit", - use_data_parallel=use_data_parallel, - attn_backend_override=attn_backend_override, ) # reserved tokens for INDICATOR_IDS head_dim = visual_vocab_size - len(INDICATOR_IDS) @@ -133,18 +130,16 @@ def _init_backbone( self, config: PretrainedConfig, quant_config: QuantizationConfig | None = None, + multimodal_config: QuantizationConfig | None = None, prefix: str = "", - use_data_parallel: bool = False, - attn_backend_override: AttentionBackendEnum | None = None, ): model_type = config.model_type if model_type == "siglip2_navit": return Siglip2NavitModel( config=config, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=prefix, - use_data_parallel=use_data_parallel, - attn_backend_override=attn_backend_override, ) raise ValueError(f"Unsupported visual tokenizer model_type: {model_type}") @@ -468,17 +463,12 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): prefix=maybe_prefix(prefix, "llm"), ) - attn_backend_override = ( - multimodal_config.mm_encoder_attn_backend - if multimodal_config is not None - else None - ) self.visual_tokenizer = VisualTokenizer( config=config.vit_config, visual_vocab_size=config.visual_vocab_size, + multimodal_config=multimodal_config, quant_config=quant_config, prefix=f"{prefix}.visual_tokenizer", - attn_backend_override=attn_backend_override, ) self.vte = VisualEmbedding(config.visual_vocab_size, config.hidden_size) diff --git a/vllm/model_executor/models/paddleocr_vl.py b/vllm/model_executor/models/paddleocr_vl.py index 9703a5b417d0..56565266c0dc 100644 --- a/vllm/model_executor/models/paddleocr_vl.py +++ b/vllm/model_executor/models/paddleocr_vl.py @@ -22,8 +22,7 @@ import numpy as np import torch import torch.nn as nn -import torch.nn.functional as F -from einops import rearrange, repeat +from einops import rearrange from transformers import BatchFeature, PretrainedConfig from transformers.activations import GELUActivation from transformers.modeling_outputs import ( @@ -32,13 +31,10 @@ from transformers.utils import torch_int from vllm.attention.backends.registry import AttentionBackendEnum -from vllm.attention.layer import ( - maybe_get_vit_flash_attn_backend, +from vllm.attention.layers.mm_encoder_attention import ( + MMEncoderAttention, ) -from vllm.attention.ops.vit_attn_wrappers import ( - vit_flash_attn_wrapper, -) -from vllm.config import VllmConfig +from vllm.config import MultiModalConfig, VllmConfig from vllm.config.multimodal import BaseDummyOptions from vllm.distributed import parallel_state from vllm.distributed import utils as dist_utils @@ -51,7 +47,7 @@ ) from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.layers.rotary_embedding.common import ( - dispatch_rotary_emb_function, + ApplyRotaryEmb, ) from vllm.model_executor.model_loader.weight_utils import ( default_weight_loader, @@ -134,47 +130,6 @@ def smart_resize( return h_bar, w_bar -def rotate_half(x: torch.Tensor, interleaved: bool = False) -> torch.Tensor: - if not interleaved: - x1, x2 = x.chunk(2, dim=-1) - return torch.cat((-x2, x1), dim=-1) - x1, x2 = x[..., ::2], x[..., 1::2] - return rearrange(torch.stack((-x2, x1), dim=-1), "... d two -> ... (d two)", two=2) - - -def apply_rotary_emb_torch( - x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, interleaved: bool = False -) -> torch.Tensor: - """ - x: (batch_size, seqlen, nheads, headdim) - cos, sin: (seqlen, rotary_dim / 2) or (batch_size, seqlen, rotary_dim / 2) - """ - ro_dim = cos.shape[-1] * 2 - assert ro_dim <= x.shape[-1] - cos = repeat( - cos, "... d -> ... 1 (2 d)" if not interleaved else "... d -> ... 1 (d 2)" - ) - sin = repeat( - sin, "... d -> ... 1 (2 d)" if not interleaved else "... d -> ... 1 (d 2)" - ) - return torch.cat( - [ - x[..., :ro_dim] * cos + rotate_half(x[..., :ro_dim], interleaved) * sin, - x[..., ro_dim:], - ], - dim=-1, - ) - - -def apply_rotary_pos_emb_vision(t: torch.Tensor, freqs: torch.Tensor) -> torch.Tensor: - rotary_emb_function = dispatch_rotary_emb_function(default=apply_rotary_emb_torch) - t_ = t.float() - cos = freqs.cos() - sin = freqs.sin() - output = rotary_emb_function(t_, cos, sin).type_as(t) - return output - - class PaddleOCRVLProcessingInfo(BaseProcessingInfo): def get_hf_config(self): return self.ctx.get_hf_config() @@ -578,9 +533,8 @@ def __init__( num_heads: int, projection_size: int, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - attn_backend: AttentionBackendEnum = AttentionBackendEnum.TORCH_SDPA, - attn_backend_override: AttentionBackendEnum | None = None, ) -> None: super().__init__() @@ -608,18 +562,16 @@ def __init__( quant_config=quant_config, prefix=f"{prefix}.out_proj", ) - - self.attn_backend = attn_backend - self.attn_backend, self.flash_attn_varlen_func = ( - maybe_get_vit_flash_attn_backend( - self.attn_backend, - attn_backend_override=attn_backend_override, - ) + self.attn = MMEncoderAttention( + num_heads=self.num_attention_heads_per_partition, + head_size=self.hidden_size_per_attention_head, + multimodal_config=multimodal_config, + prefix=f"{prefix}.attn", + ) + self.apply_rotary_emb = ApplyRotaryEmb( + enforce_enable=True, + enable_fp32_compute=True, ) - self.is_flash_attn_backend = self.attn_backend in { - AttentionBackendEnum.FLASH_ATTN, - AttentionBackendEnum.ROCM_AITER_FA, - } def split_qkv(self, qkv: torch.Tensor) -> tuple[torch.Tensor, ...]: seq_len, bs, _ = qkv.shape @@ -662,47 +614,23 @@ def forward( if rotary_pos_emb is not None: qk_concat = torch.cat([q, k], dim=0) - qk_rotated = apply_rotary_pos_emb_vision(qk_concat, rotary_pos_emb) + qk_rotated = self.apply_rotary_emb( + qk_concat, + rotary_pos_emb.cos(), + rotary_pos_emb.sin(), + ) q, k = torch.chunk(qk_rotated, 2, dim=0) - if self.is_flash_attn_backend: - if max_seqlen is None: - raise ValueError("Flash attention backend requires max_seqlen.") - context_layer = vit_flash_attn_wrapper( - q, - k, - v, - cu_seqlens, - max_seqlen, - batch_size, - self.attn_backend == AttentionBackendEnum.ROCM_AITER_FA, - ) - elif self.attn_backend == AttentionBackendEnum.TORCH_SDPA: - outputs = [] - for i in range(1, len(cu_seqlens)): - start_idx = cu_seqlens[i - 1] - end_idx = cu_seqlens[i] - q_i = q[:, start_idx:end_idx] - k_i = k[:, start_idx:end_idx] - v_i = v[:, start_idx:end_idx] - q_i, k_i, v_i = ( - rearrange(tensor, "b s h d -> b h s d") - for tensor in (q_i, k_i, v_i) - ) - output_i = F.scaled_dot_product_attention(q_i, k_i, v_i, dropout_p=0.0) - output_i = rearrange(output_i, "b h s d -> b s h d") - outputs.append(output_i) - context_layer = torch.cat(outputs, dim=1) - context_layer = rearrange( - context_layer, "b s h d -> s b (h d)" - ).contiguous() - else: - raise RuntimeError( - f"PaddleOCR-VL does not support {self.attn_backend} backend now." - ) + context_layer = self.attn( + query=q, + key=k, + value=v, + cu_seqlens=cu_seqlens, + max_seqlen=max_seqlen, + ) + context_layer = rearrange(context_layer, "b s h d -> b s (h d)") output, _ = self.out_proj(context_layer) - output = rearrange(output, "s b d -> b s d") return output @@ -774,10 +702,8 @@ def __init__( self, config: PretrainedConfig, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - *, - attn_backend: AttentionBackendEnum = AttentionBackendEnum.TORCH_SDPA, - attn_backend_override: AttentionBackendEnum | None = None, ): super().__init__() self.embed_dim = config.hidden_size @@ -787,9 +713,8 @@ def __init__( num_heads=config.num_attention_heads, projection_size=config.hidden_size, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.self_attn", - attn_backend=attn_backend, - attn_backend_override=attn_backend_override, ) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = SiglipMLP( @@ -832,14 +757,18 @@ def __init__( self, config: PretrainedConfig, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - attn_backend_override: AttentionBackendEnum | None = None, ): super().__init__() self.config = config embed_dim = config.hidden_size num_heads = config.num_attention_heads head_dim = embed_dim // num_heads + + attn_backend_override = ( + multimodal_config.mm_encoder_attn_backend if multimodal_config else None + ) self.attn_backend = get_vit_attn_backend( head_size=head_dim, dtype=torch.get_default_dtype(), @@ -858,9 +787,8 @@ def __init__( SiglipEncoderLayer( config, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.layers.{layer_idx}", - attn_backend=self.attn_backend, - attn_backend_override=attn_backend_override, ) for layer_idx in range(config.num_hidden_layers) ] @@ -941,8 +869,8 @@ def __init__( self, config: PretrainedConfig, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - attn_backend_override: AttentionBackendEnum | None = None, ): super().__init__() self.config = config @@ -952,8 +880,8 @@ def __init__( self.encoder = SiglipEncoder( config, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.encoder", - attn_backend_override=attn_backend_override, ) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) @@ -991,16 +919,16 @@ def __init__( self, config, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - attn_backend_override: AttentionBackendEnum | None = None, ): super().__init__() self.vision_model = SiglipVisionTransformer( config, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.vision_model", - attn_backend_override=attn_backend_override, ) self.quant_config = quant_config @@ -1119,17 +1047,11 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.config = config self.multimodal_config = multimodal_config - attn_backend_override = ( - multimodal_config.mm_encoder_attn_backend - if multimodal_config is not None - else None - ) - self.visual = SiglipVisionModel( config=config.vision_config, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=maybe_prefix(prefix, "visual"), - attn_backend_override=attn_backend_override, ) self.mlp_AR = Projector(config, config.vision_config) diff --git a/vllm/model_executor/models/qwen.py b/vllm/model_executor/models/qwen.py index 492ba2fb1214..61a6e67805d6 100644 --- a/vllm/model_executor/models/qwen.py +++ b/vllm/model_executor/models/qwen.py @@ -281,6 +281,9 @@ def __init__( self.transformer.make_empty_intermediate_tensors ) + def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.transformer.wte(input_ids) + def compute_logits( self, hidden_states: torch.Tensor, diff --git a/vllm/model_executor/models/qwen2.py b/vllm/model_executor/models/qwen2.py index 3af4a49cd77c..f4c2d3cb75d2 100644 --- a/vllm/model_executor/models/qwen2.py +++ b/vllm/model_executor/models/qwen2.py @@ -122,6 +122,8 @@ def __init__( prefix: str = "", attn_type: str = AttentionType.DECODER, dual_chunk_attention_config: dict[str, Any] | None = None, + qk_norm: bool = False, + rms_norm_eps: float = 1e-6, ) -> None: super().__init__() self.hidden_size = hidden_size @@ -144,6 +146,7 @@ def __init__( self.kv_size = self.num_kv_heads * self.head_dim self.scaling = self.head_dim**-0.5 self.dual_chunk_attention_config = dual_chunk_attention_config + self.qk_norm = qk_norm self.qkv_proj = QKVParallelLinear( hidden_size, @@ -162,6 +165,11 @@ def __init__( prefix=f"{prefix}.o_proj", ) + # QK Normalization support (used in BAGEL and some other models) + if self.qk_norm: + self.q_norm = RMSNorm(self.head_dim, eps=rms_norm_eps) + self.k_norm = RMSNorm(self.head_dim, eps=rms_norm_eps) + self.rotary_emb = get_rope( self.head_dim, max_position=max_position, @@ -197,6 +205,23 @@ def forward( ) -> torch.Tensor: qkv, _ = self.qkv_proj(hidden_states) q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) + + # Apply QK normalization if enabled (before RoPE) + if self.qk_norm: + # Reshape to apply per-head normalization + # q shape: (total_tokens, q_size) -> (total_tokens, num_heads, head_dim) + total_tokens = q.shape[0] + q = q.view(total_tokens, self.num_heads, self.head_dim) + k = k.view(total_tokens, self.num_kv_heads, self.head_dim) + + # Apply normalization + q = self.q_norm(q) + k = self.k_norm(k) + + # Reshape back + q = q.view(total_tokens, self.q_size) + k = k.view(total_tokens, self.kv_size) + q, k = self.rotary_emb(positions, q, k) attn_output = self.attn(q, k, v) output, _ = self.o_proj(attn_output) @@ -227,6 +252,9 @@ def __init__( else: attn_type = AttentionType.ENCODER_ONLY + # Check if QK normalization is enabled (used in BAGEL and some other models) + qk_norm = getattr(config, "qk_norm", False) + self.self_attn = Qwen2Attention( hidden_size=self.hidden_size, num_heads=config.num_attention_heads, @@ -238,6 +266,8 @@ def __init__( prefix=f"{prefix}.self_attn", attn_type=attn_type, dual_chunk_attention_config=dual_chunk_attention_config, + qk_norm=qk_norm, + rms_norm_eps=config.rms_norm_eps, ) self.mlp = Qwen2MLP( hidden_size=self.hidden_size, @@ -480,6 +510,8 @@ def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]: continue if is_pp_missing_parameter(name, self): continue + if name not in params_dict: + continue param = params_dict[name] weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) diff --git a/vllm/model_executor/models/qwen2_5_omni_thinker.py b/vllm/model_executor/models/qwen2_5_omni_thinker.py index 3438406c4fac..f9bce4bf981b 100644 --- a/vllm/model_executor/models/qwen2_5_omni_thinker.py +++ b/vllm/model_executor/models/qwen2_5_omni_thinker.py @@ -845,6 +845,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): norm_eps=getattr(thinker_config.text_config, "rms_norm_eps", 1e-6), quant_config=quant_config, prefix=maybe_prefix(prefix, "visual"), + multimodal_config=multimodal_config, ) else: self.visual = None diff --git a/vllm/model_executor/models/qwen2_5_vl.py b/vllm/model_executor/models/qwen2_5_vl.py index fba06e34f622..b730ac031589 100644 --- a/vllm/model_executor/models/qwen2_5_vl.py +++ b/vllm/model_executor/models/qwen2_5_vl.py @@ -42,13 +42,9 @@ ) from vllm.attention.backends.registry import AttentionBackendEnum -from vllm.attention.layer import maybe_get_vit_flash_attn_backend -from vllm.attention.ops.vit_attn_wrappers import ( - vit_flash_attn_wrapper, - vit_torch_sdpa_wrapper, -) +from vllm.attention.layers.mm_encoder_attention import MMEncoderAttention from vllm.compilation.decorators import support_torch_compile -from vllm.config import VllmConfig +from vllm.config import MultiModalConfig, VllmConfig from vllm.distributed import parallel_state from vllm.distributed import utils as dist_utils from vllm.forward_context import set_forward_context @@ -64,6 +60,9 @@ ) from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.layers.rotary_embedding import get_rope +from vllm.model_executor.layers.rotary_embedding.common import ( + ApplyRotaryEmb, +) from vllm.model_executor.model_loader.weight_utils import default_weight_loader from vllm.model_executor.models.module_mapping import MultiModelKeys from vllm.model_executor.models.vision import should_torch_compile_mm_vit @@ -99,7 +98,6 @@ from .qwen2_vl import ( Qwen2VLMultiModalProcessor, Qwen2VLProcessingInfo, - apply_rotary_pos_emb_vision, ) from .utils import ( AutoWeightsLoader, @@ -267,10 +265,15 @@ def __init__( bias: bool = False, act_fn: Callable[[torch.Tensor], torch.Tensor] = F.silu, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - use_data_parallel: bool = False, ): super().__init__() + use_data_parallel = ( + multimodal_config.mm_encoder_tp_mode == "data" + if multimodal_config + else False + ) self.gate_up_proj = MergedColumnParallelLinear( input_size=in_features, output_sizes=[hidden_features] * 2, # [gate_proj, up_proj] @@ -304,13 +307,16 @@ def __init__( num_heads: int, projection_size: int, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - use_data_parallel: bool = False, - attn_backend: AttentionBackendEnum = AttentionBackendEnum.TORCH_SDPA, - attn_backend_override: AttentionBackendEnum | None = None, ) -> None: super().__init__() # Per attention head and per partition values. + use_data_parallel = ( + multimodal_config.mm_encoder_tp_mode == "data" + if multimodal_config + else False + ) self.tp_size = ( 1 if use_data_parallel @@ -342,18 +348,14 @@ def __init__( prefix=f"{prefix}.proj", disable_tp=use_data_parallel, ) - self.attn_backend = attn_backend - self.attn_backend, self.flash_attn_varlen_func = ( - maybe_get_vit_flash_attn_backend( - self.attn_backend, - attn_backend_override=attn_backend_override, - ) + + self.attn = MMEncoderAttention( + num_heads=self.num_attention_heads_per_partition, + head_size=self.hidden_size_per_attention_head, + multimodal_config=multimodal_config, ) - self.is_flash_attn_backend = self.attn_backend in { - AttentionBackendEnum.FLASH_ATTN, - AttentionBackendEnum.ROCM_AITER_FA, - } + self.apply_rotary_emb = ApplyRotaryEmb(enforce_enable=True) def forward( self, @@ -380,8 +382,10 @@ def forward( qk_reshaped = einops.rearrange( qk, "b s two head head_dim -> (two b) s head head_dim", two=2 ) - qk_rotated = apply_rotary_pos_emb_vision( - qk_reshaped, cos=rotary_pos_emb_cos, sin=rotary_pos_emb_sin + qk_rotated = self.apply_rotary_emb( + qk_reshaped, + rotary_pos_emb_cos, + rotary_pos_emb_sin, ) qk_rotated = qk_rotated.view( 2, @@ -394,32 +398,17 @@ def forward( else: q, k, v = qkv.unbind(dim=2) - if self.is_flash_attn_backend: - context_layer = vit_flash_attn_wrapper( - q, - k, - v, - cu_seqlens, - max_seqlen, - batch_size, - self.attn_backend == AttentionBackendEnum.ROCM_AITER_FA, - ) - elif self.attn_backend == AttentionBackendEnum.TORCH_SDPA: - # Execute attention entry by entry for speed & less VRAM. - from vllm.platforms import current_platform - - # Never remove the next contiguous logic - # Without it, hallucinations occur with the backend - if current_platform.is_rocm(): - q = q.contiguous() - k = k.contiguous() - v = v.contiguous() - context_layer = vit_torch_sdpa_wrapper( - q, - k, - v, - cu_seqlens, - ) + context_layer = self.attn( + query=q, + key=k, + value=v, + cu_seqlens=cu_seqlens, + max_seqlen=max_seqlen, + ) + + context_layer = einops.rearrange( + context_layer, "b s h d -> s b (h d)", b=batch_size + ).contiguous() output, _ = self.proj(context_layer) return output @@ -443,10 +432,8 @@ def __init__( act_fn: Callable[[torch.Tensor], torch.Tensor] = F.silu, norm_layer: Callable[[int], nn.Module] | None = None, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - use_data_parallel: bool = False, - attn_backend: AttentionBackendEnum = AttentionBackendEnum.TORCH_SDPA, - attn_backend_override: AttentionBackendEnum | None = None, ) -> None: super().__init__() if norm_layer is None: @@ -458,10 +445,8 @@ def __init__( num_heads=num_heads, projection_size=dim, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.attn", - use_data_parallel=use_data_parallel, - attn_backend=attn_backend, - attn_backend_override=attn_backend_override, ) self.mlp = Qwen2_5_VisionMLP( dim, @@ -469,8 +454,8 @@ def __init__( act_fn=act_fn, bias=True, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.mlp", - use_data_parallel=use_data_parallel, ) def forward( @@ -542,10 +527,15 @@ def __init__( norm_layer: Callable[[int], nn.Module] | None = None, spatial_merge_size: int = 2, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - use_data_parallel: bool = False, ) -> None: super().__init__() + use_data_parallel = ( + multimodal_config.mm_encoder_tp_mode == "data" + if multimodal_config + else False + ) self.hidden_size = context_dim * (spatial_merge_size**2) if norm_layer is None: norm_layer = partial(nn.LayerNorm, eps=1e-6) @@ -586,9 +576,8 @@ def __init__( vision_config: Qwen2_5_VLVisionConfig, norm_eps: float = 1e-6, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - use_data_parallel: bool = False, - attn_backend_override: AttentionBackendEnum | None = None, ) -> None: super().__init__() @@ -598,7 +587,6 @@ def __init__( depth = vision_config.depth self.hidden_size = vision_config.hidden_size self.num_heads = vision_config.num_heads - self.use_data_parallel = use_data_parallel self.out_hidden_size = vision_config.out_hidden_size # args for get_window_index_thw @@ -612,7 +600,7 @@ def __init__( # DO NOT MOVE THIS IMPORT from vllm.compilation.backends import set_model_tag - with set_model_tag("Qwen2_5_VisionPatchEmbed"): + with set_model_tag("Qwen2_5_VisionPatchEmbed", is_encoder=True): self.patch_embed = Qwen2_5_VisionPatchEmbed( patch_size=patch_size, temporal_patch_size=temporal_patch_size, @@ -629,19 +617,17 @@ def __init__( rope_parameters={"partial_rotary_factor": 0.5}, ) + attn_backend_override = ( + multimodal_config.mm_encoder_attn_backend + if multimodal_config is not None + else None + ) self.attn_backend = get_vit_attn_backend( head_size=head_dim, dtype=torch.get_default_dtype(), attn_backend_override=attn_backend_override, ) - self.attn_backend, self.flash_attn_varlen_func = ( - maybe_get_vit_flash_attn_backend( - self.attn_backend, - attn_backend_override=attn_backend_override, - ) - ) - if self.attn_backend not in { AttentionBackendEnum.FLASH_ATTN, AttentionBackendEnum.TORCH_SDPA, @@ -651,7 +637,7 @@ def __init__( f"Qwen2.5-VL does not support {self.attn_backend} backend now." ) - with set_model_tag("Qwen2_5_VisionBlock"): + with set_model_tag("Qwen2_5_VisionBlock", is_encoder=True): self.blocks = nn.ModuleList( [ Qwen2_5_VisionBlock( @@ -661,24 +647,22 @@ def __init__( act_fn=get_act_and_mul_fn(vision_config.hidden_act), norm_layer=norm_layer, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.blocks.{layer_idx}", - use_data_parallel=use_data_parallel, - attn_backend=self.attn_backend, - attn_backend_override=attn_backend_override, ) for layer_idx in range(depth) ] ) - with set_model_tag("Qwen2_5_VisionPatchMerger"): + with set_model_tag("Qwen2_5_VisionPatchMerger", is_encoder=True): self.merger = Qwen2_5_VisionPatchMerger( d_model=vision_config.out_hidden_size, context_dim=self.hidden_size, norm_layer=norm_layer, spatial_merge_size=self.spatial_merge_size, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.merger", - use_data_parallel=use_data_parallel, ) @property @@ -1200,18 +1184,12 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): if multimodal_config.get_limit_per_prompt( "image" ) or multimodal_config.get_limit_per_prompt("video"): - attn_backend_override = ( - multimodal_config.mm_encoder_attn_backend - if multimodal_config is not None - else None - ) self.visual = Qwen2_5_VisionTransformer( vision_config=config.vision_config, norm_eps=getattr(config, "rms_norm_eps", 1e-6), quant_config=self.quant_config, prefix=maybe_prefix(prefix, "visual"), - use_data_parallel=self.use_data_parallel, - attn_backend_override=attn_backend_override, + multimodal_config=multimodal_config, ) else: self.visual = None diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py index 4e54208a59b6..321fbd764c0f 100644 --- a/vllm/model_executor/models/qwen2_vl.py +++ b/vllm/model_executor/models/qwen2_vl.py @@ -33,7 +33,6 @@ import numpy as np import torch import torch.nn as nn -import torch.nn.functional as F from einops import rearrange from transformers import BatchFeature from transformers.models.qwen2_vl import Qwen2VLImageProcessor, Qwen2VLProcessor @@ -45,12 +44,10 @@ from transformers.models.qwen2_vl.video_processing_qwen2_vl import Qwen2VLVideoProcessor from vllm.attention.backends.registry import AttentionBackendEnum -from vllm.attention.layer import ( - maybe_get_vit_flash_attn_backend, -) -from vllm.config import VllmConfig +from vllm.attention.layers.mm_encoder_attention import MMEncoderAttention +from vllm.config import MultiModalConfig, VllmConfig from vllm.config.multimodal import BaseDummyOptions -from vllm.distributed import parallel_state +from vllm.distributed import parallel_state, tensor_model_parallel_all_gather from vllm.distributed import utils as dist_utils from vllm.logger import init_logger from vllm.model_executor.layers.activation import QuickGELU @@ -62,8 +59,7 @@ from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.layers.rotary_embedding import get_rope from vllm.model_executor.layers.rotary_embedding.common import ( - apply_rotary_emb_torch, - dispatch_rotary_emb_function, + ApplyRotaryEmb, ) from vllm.model_executor.model_loader.weight_utils import default_weight_loader from vllm.model_executor.models.module_mapping import MultiModelKeys @@ -251,10 +247,15 @@ def __init__( hidden_features: int, act_layer: type[nn.Module] = QuickGELU, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - use_data_parallel: bool = False, ): super().__init__() + use_data_parallel = ( + multimodal_config.mm_encoder_tp_mode == "data" + if multimodal_config + else False + ) self.fc1 = ColumnParallelLinear( in_features, hidden_features, @@ -278,16 +279,6 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: return x -def apply_rotary_pos_emb_vision( - t: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor -) -> torch.Tensor: - rotary_emb_function = dispatch_rotary_emb_function( - default=partial(apply_rotary_emb_torch, is_neox_style=True) - ) - output = rotary_emb_function(t, cos, sin).type_as(t) - return output - - class Qwen2VisionAttention(nn.Module): def __init__( self, @@ -295,12 +286,16 @@ def __init__( num_heads: int, projection_size: int, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - use_data_parallel: bool = False, - attn_backend_override: AttentionBackendEnum | None = None, ) -> None: super().__init__() # Per attention head and per partition values. + use_data_parallel = ( + multimodal_config.mm_encoder_tp_mode == "data" + if multimodal_config + else False + ) self.tp_size = ( 1 if use_data_parallel @@ -329,41 +324,32 @@ def __init__( disable_tp=use_data_parallel, ) - # Detect attention implementation. - self.attn_backend = get_vit_attn_backend( + self.attn = MMEncoderAttention( + num_heads=self.num_attention_heads_per_partition, head_size=self.hidden_size_per_attention_head, - dtype=torch.get_default_dtype(), - attn_backend_override=attn_backend_override, - ) - - self.attn_backend, self.flash_attn_varlen_func = ( - maybe_get_vit_flash_attn_backend( - self.attn_backend, - attn_backend_override=attn_backend_override, - ) + multimodal_config=multimodal_config, ) - if self.attn_backend not in { - AttentionBackendEnum.FLASH_ATTN, - AttentionBackendEnum.TORCH_SDPA, - AttentionBackendEnum.ROCM_AITER_FA, - }: - raise RuntimeError( - f"Qwen2-VL does not support {self.attn_backend} backend now." - ) - - self.is_flash_attn_backend = self.attn_backend in { - AttentionBackendEnum.FLASH_ATTN, - AttentionBackendEnum.ROCM_AITER_FA, - } + self.apply_rotary_emb = ApplyRotaryEmb(enforce_enable=True) def split_qkv(self, qkv: torch.Tensor) -> tuple[torch.Tensor, ...]: # [s, b, 3 * head * head_dim] seq_len, bs, _ = qkv.shape + if self.tp_size > 1: + qkv = tensor_model_parallel_all_gather(qkv) # [s, b, 3 * head * head_dim] -> 3 * [s, b, head * head_dim] q, k, v = qkv.chunk(3, dim=2) + # 3 * [s, b, head * head_dim] + if self.tp_size > 1: + splitter = partial( + dist_utils.split_tensor_along_last_dim, num_partitions=self.tp_size + ) + q = splitter(q)[self.tp_rank] + k = splitter(k)[self.tp_rank] + v = splitter(v)[self.tp_rank] + # 3 * [s, b, head * head_dim] -> 3 * [s, b, head, head_dim] new_shape = ( seq_len, @@ -387,60 +373,27 @@ def forward( # [s, b, 3 * head * head_dim] -> 3 * [s, b, head, head_dim] q, k, v = self.split_qkv(x) - batch_size = q.shape[1] q, k, v = (rearrange(x, "s b ... -> b s ...") for x in (q, k, v)) # [2 * b, s, heads, head_dim] qk_concat = torch.cat([q, k], dim=0) - qk_rotated = apply_rotary_pos_emb_vision( - qk_concat, rotary_pos_emb_cos, rotary_pos_emb_sin + qk_rotated = self.apply_rotary_emb( + qk_concat, + rotary_pos_emb_cos, + rotary_pos_emb_sin, ) q, k = torch.chunk(qk_rotated, 2, dim=0) - if self.is_flash_attn_backend: - q, k, v = (rearrange(x, "b s ... -> (b s) ...") for x in [q, k, v]) - - output = self.flash_attn_varlen_func( - q, - k, - v, - cu_seqlens_q=cu_seqlens, - cu_seqlens_k=cu_seqlens, - max_seqlen_q=max_seqlen, - max_seqlen_k=max_seqlen, - dropout_p=0.0, - causal=False, - ) + context_layer = self.attn( + query=q, + key=k, + value=v, + cu_seqlens=cu_seqlens, + max_seqlen=max_seqlen, + ) - context_layer = rearrange( - output, "(b s) h d -> s b (h d)", b=batch_size - ).contiguous() - elif self.attn_backend == AttentionBackendEnum.TORCH_SDPA: - # Execute attention entry by entry for speed & less VRAM. - from vllm.platforms import current_platform - - if current_platform.is_rocm(): - q = q.contiguous() - k = k.contiguous() - v = v.contiguous() - outputs = [] - - lens = (cu_seqlens[1:] - cu_seqlens[:-1]).tolist() - q_chunks = torch.split(q, lens, dim=1) - k_chunks = torch.split(k, lens, dim=1) - v_chunks = torch.split(v, lens, dim=1) - for q_i, k_i, v_i in zip(q_chunks, k_chunks, v_chunks): - q_i, k_i, v_i = ( - rearrange(x, "b s h d -> b h s d") for x in [q_i, k_i, v_i] - ) - output_i = F.scaled_dot_product_attention(q_i, k_i, v_i, dropout_p=0.0) - output_i = rearrange(output_i, "b h s d -> b s h d ") - outputs.append(output_i) - context_layer = torch.cat(outputs, dim=1) - context_layer = rearrange( - context_layer, "b s h d -> s b (h d)" - ).contiguous() + context_layer = rearrange(context_layer, "b s h d -> s b (h d)").contiguous() output, _ = self.proj(context_layer) return output @@ -455,9 +408,8 @@ def __init__( act_layer: type[nn.Module] = QuickGELU, norm_layer: Callable[[int], nn.Module] | None = None, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - use_data_parallel: bool = False, - attn_backend_override: AttentionBackendEnum | None = None, ) -> None: super().__init__() if norm_layer is None: @@ -471,17 +423,16 @@ def __init__( num_heads=num_heads, projection_size=dim, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.attn", - use_data_parallel=use_data_parallel, - attn_backend_override=attn_backend_override, ) self.mlp = Qwen2VisionMLP( dim, mlp_hidden_dim, act_layer=act_layer, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.mlp", - use_data_parallel=use_data_parallel, ) def forward( @@ -541,10 +492,15 @@ def __init__( norm_layer: Callable[[int], nn.Module] | None = None, spatial_merge_size: int = 2, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - use_data_parallel: bool = False, ) -> None: super().__init__() + use_data_parallel = ( + multimodal_config.mm_encoder_tp_mode == "data" + if multimodal_config + else False + ) self.hidden_size = context_dim * (spatial_merge_size**2) if norm_layer is None: norm_layer = partial(nn.LayerNorm, eps=1e-6) @@ -588,9 +544,8 @@ def __init__( vision_config: Qwen2VLVisionConfig, norm_eps: float = 1e-6, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - use_data_parallel: bool = False, - attn_backend_override: AttentionBackendEnum | None = None, ) -> None: super().__init__() @@ -604,7 +559,11 @@ def __init__( num_heads = vision_config.num_heads mlp_ratio = vision_config.mlp_ratio - self.use_data_parallel = use_data_parallel + self.use_data_parallel = ( + multimodal_config.mm_encoder_tp_mode == "data" + if multimodal_config + else False + ) self.out_hidden_size = vision_config.hidden_size self.spatial_merge_size = spatial_merge_size @@ -636,8 +595,7 @@ def __init__( norm_layer=norm_layer, quant_config=quant_config, prefix=f"{prefix}.blocks.{layer_idx}", - use_data_parallel=use_data_parallel, - attn_backend_override=attn_backend_override, + multimodal_config=multimodal_config, ) for layer_idx in range(depth) ] @@ -648,7 +606,10 @@ def __init__( norm_layer=norm_layer, quant_config=quant_config, prefix=f"{prefix}.merger", - use_data_parallel=use_data_parallel, + multimodal_config=multimodal_config, + ) + attn_backend_override = ( + multimodal_config.mm_encoder_attn_backend if multimodal_config else None ) self.attn_backend = get_vit_attn_backend( head_size=head_dim, @@ -709,7 +670,7 @@ def compute_attn_mask_seqlen(self, cu_seqlens: torch.Tensor) -> int | None: AttentionBackendEnum.FLASH_ATTN, AttentionBackendEnum.ROCM_AITER_FA, }: - max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max().item() + max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max() return max_seqlen def forward( @@ -1313,18 +1274,12 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): if multimodal_config.get_limit_per_prompt( "image" ) or multimodal_config.get_limit_per_prompt("video"): - attn_backend_override = ( - multimodal_config.mm_encoder_attn_backend - if multimodal_config is not None - else None - ) self.visual = Qwen2VisionTransformer( config.vision_config, norm_eps=getattr(config, "rms_norm_eps", 1e-6), quant_config=quant_config, + multimodal_config=multimodal_config, prefix=maybe_prefix(prefix, "visual"), - use_data_parallel=self.use_data_parallel, - attn_backend_override=attn_backend_override, ) else: self.visual = None diff --git a/vllm/model_executor/models/qwen3_omni_moe_thinker.py b/vllm/model_executor/models/qwen3_omni_moe_thinker.py index 635c3bfdc65c..089129e443c0 100755 --- a/vllm/model_executor/models/qwen3_omni_moe_thinker.py +++ b/vllm/model_executor/models/qwen3_omni_moe_thinker.py @@ -48,7 +48,7 @@ from vllm.attention.backends.registry import AttentionBackendEnum from vllm.compilation.decorators import support_torch_compile -from vllm.config import VllmConfig +from vllm.config import MultiModalConfig, VllmConfig from vllm.distributed import get_pp_group from vllm.logger import init_logger from vllm.model_executor.layers.activation import _ACTIVATION_REGISTRY @@ -192,6 +192,7 @@ def __init__( mlp_hidden_dim: int, act_fn: Callable[[torch.Tensor], torch.Tensor] = F.silu, norm_layer: Callable[[int], nn.Module] | None = None, + multimodal_config: MultiModalConfig | None = None, quant_config: QuantizationConfig | None = None, prefix: str = "", ) -> None: @@ -205,6 +206,7 @@ def __init__( num_heads=num_heads, projection_size=dim, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.attn", ) self.mlp = Qwen3_VisionMLP( @@ -299,8 +301,8 @@ def __init__( vision_config, norm_eps: float = 1e-6, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - attn_backend_override: AttentionBackendEnum | None = None, ) -> None: super().__init__() self.hidden_size = vision_config.hidden_size @@ -347,6 +349,7 @@ def __init__( act_fn=_ACTIVATION_REGISTRY[vision_config.hidden_act], norm_layer=norm_layer, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.blocks.{layer_idx}", ) for layer_idx in range(vision_config.depth) @@ -376,6 +379,12 @@ def __init__( ] ) + attn_backend_override = ( + multimodal_config.mm_encoder_attn_backend + if multimodal_config is not None + else None + ) + self.attn_backend = get_vit_attn_backend( head_size=head_dim, dtype=torch.get_default_dtype(), @@ -1188,17 +1197,12 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.audio_tower = Qwen3OmniMoeAudioEncoder(thinker_config.audio_config) - attn_backend_override = ( - multimodal_config.mm_encoder_attn_backend - if multimodal_config is not None - else None - ) self.visual = Qwen3Omni_VisionTransformer( vision_config=thinker_config.vision_config, norm_eps=getattr(thinker_config.text_config, "rms_norm_eps", 1e-6), quant_config=quant_config, prefix=maybe_prefix(prefix, "visual"), - attn_backend_override=attn_backend_override, + multimodal_config=multimodal_config, ) self.quant_config = quant_config diff --git a/vllm/model_executor/models/qwen3_vl.py b/vllm/model_executor/models/qwen3_vl.py index fcd58c4d33cd..c0589986d1fe 100644 --- a/vllm/model_executor/models/qwen3_vl.py +++ b/vllm/model_executor/models/qwen3_vl.py @@ -50,7 +50,7 @@ from vllm.attention.backends.registry import AttentionBackendEnum from vllm.compilation.decorators import support_torch_compile -from vllm.config import VllmConfig +from vllm.config import MultiModalConfig, VllmConfig from vllm.config.multimodal import BaseDummyOptions, VideoDummyOptions from vllm.distributed import get_pp_group from vllm.logger import init_logger @@ -67,12 +67,19 @@ from vllm.model_executor.model_loader.weight_utils import default_weight_loader from vllm.model_executor.models.module_mapping import MultiModelKeys from vllm.multimodal import MULTIMODAL_REGISTRY +from vllm.multimodal.evs import ( + compute_mrope_for_media, + compute_retained_tokens_count, + compute_retention_mask, + recompute_mrope_positions, +) from vllm.multimodal.inputs import ( MultiModalDataDict, MultiModalFeatureSpec, MultiModalFieldConfig, MultiModalKwargsItem, MultiModalKwargsItems, + PlaceholderRange, VideoItem, ) from vllm.multimodal.parse import ImageSize, MultiModalDataItems, MultiModalDataParser @@ -92,6 +99,7 @@ SupportsLoRA, SupportsMRoPE, SupportsMultiModal, + SupportsMultiModalPruning, SupportsPP, _require_is_multimodal, ) @@ -161,10 +169,15 @@ def __init__( bias: bool = False, act_fn: Callable[[torch.Tensor], torch.Tensor] = F.silu, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - use_data_parallel: bool = False, ): super().__init__() + use_data_parallel = ( + multimodal_config.mm_encoder_tp_mode == "data" + if multimodal_config + else False + ) self.linear_fc1 = ColumnParallelLinear( in_features, hidden_features, @@ -198,10 +211,9 @@ def __init__( mlp_hidden_dim: int, act_fn: Callable[[torch.Tensor], torch.Tensor] = F.silu, norm_layer: Callable[[int], nn.Module] | None = None, + multimodal_config: MultiModalConfig | None = None, quant_config: QuantizationConfig | None = None, prefix: str = "", - use_data_parallel: bool = False, - attn_backend: AttentionBackendEnum = AttentionBackendEnum.TORCH_SDPA, ) -> None: super().__init__() if norm_layer is None: @@ -213,9 +225,8 @@ def __init__( num_heads=num_heads, projection_size=dim, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.attn", - use_data_parallel=use_data_parallel, - attn_backend=attn_backend, ) self.mlp = Qwen3_VisionMLP( dim, @@ -223,8 +234,8 @@ def __init__( act_fn=act_fn, bias=True, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.mlp", - use_data_parallel=use_data_parallel, ) def forward( @@ -256,10 +267,15 @@ def __init__( spatial_merge_size: int = 2, use_postshuffle_norm: bool = False, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - use_data_parallel: bool = False, ) -> None: super().__init__() + use_data_parallel = ( + multimodal_config.mm_encoder_tp_mode == "data" + if multimodal_config + else False + ) self.hidden_size = context_dim * (spatial_merge_size**2) self.use_postshuffle_norm = use_postshuffle_norm @@ -305,9 +321,8 @@ def __init__( vision_config: Qwen3VLVisionConfig, norm_eps: float = 1e-6, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - use_data_parallel: bool = False, - attn_backend_override: AttentionBackendEnum | None = None, ) -> None: super().__init__() self.hidden_size = vision_config.hidden_size @@ -318,7 +333,6 @@ def __init__( self.spatial_merge_unit = self.spatial_merge_size**2 self.temporal_patch_size = vision_config.temporal_patch_size self.deepstack_visual_indexes = vision_config.deepstack_visual_indexes - self.use_data_parallel = use_data_parallel self.num_grid_per_side = int(self.num_position_embeddings**0.5) # NOTE: This is used for creating empty tensor for all_gather for @@ -351,8 +365,8 @@ def __init__( norm_layer=norm_layer, spatial_merge_size=self.spatial_merge_size, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.merger", - use_data_parallel=use_data_parallel, ) self.deepstack_merger_list = nn.ModuleList( @@ -364,13 +378,16 @@ def __init__( use_postshuffle_norm=True, norm_layer=norm_layer, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.deepstack_merger_list.{layer_idx}", - use_data_parallel=use_data_parallel, ) for layer_idx in range(len(self.deepstack_visual_indexes)) ] ) + attn_backend_override = ( + multimodal_config.mm_encoder_attn_backend if multimodal_config else None + ) self.attn_backend = get_vit_attn_backend( head_size=head_dim, dtype=torch.get_default_dtype(), @@ -394,9 +411,8 @@ def __init__( act_fn=_ACTIVATION_REGISTRY[vision_config.hidden_act], norm_layer=norm_layer, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.blocks.{layer_idx}", - use_data_parallel=use_data_parallel, - attn_backend=self.attn_backend, ) for layer_idx in range(vision_config.depth) ] @@ -1043,13 +1059,39 @@ def get_video_replacement_qwen3vl(item_idx: int): tokenizer.encode(f"<{curr_time:.1f} seconds>", add_special_tokens=False) for curr_time in timestamps ] - num_tokens_per_frame = int(grid_thw[1:].prod()) // merge_length + tokens_per_frame = int(grid_thw[1:].prod()) // merge_length + per_frame_token_counts = [tokens_per_frame for _ in frames_idx_token] + + video_pruning_rate = self.info.ctx.get_mm_config().video_pruning_rate + if video_pruning_rate is not None and video_pruning_rate > 0.0: + total_retained = compute_retained_tokens_count( + tokens_per_frame, + len(frames_idx_token), + video_pruning_rate, + ) + if len(frames_idx_token) == 0: + per_frame_token_counts = [] + elif len(frames_idx_token) == 1: + per_frame_token_counts = [tokens_per_frame] + else: + first_frame_tokens = tokens_per_frame + remaining_tokens = max(total_retained - first_frame_tokens, 0) + base = remaining_tokens // (len(frames_idx_token) - 1) + remainder = remaining_tokens % (len(frames_idx_token) - 1) + per_frame_token_counts = [first_frame_tokens] + for frame_idx in range(1, len(frames_idx_token)): + extra = base + (1 if (frame_idx - 1) < remainder else 0) + per_frame_token_counts.append(extra) + placeholder = [] - for frame_idx in frames_idx_token: - placeholder.extend(frame_idx) + for frame_idx, timestamp_tokens in enumerate(frames_idx_token): + placeholder.extend(timestamp_tokens) + tokens_this_frame = per_frame_token_counts[ + frame_idx if frame_idx < len(per_frame_token_counts) else -1 + ] placeholder.extend( [vision_start_token_id] - + [video_token_id] * num_tokens_per_frame + + [video_token_id] * tokens_this_frame + [vision_end_token_id] ) return PromptUpdateDetails.select_token_id(placeholder, video_token_id) @@ -1190,6 +1232,7 @@ class Qwen3VLForConditionalGeneration( SupportsPP, SupportsMRoPE, SupportsEagle3, + SupportsMultiModalPruning, ): packed_modules_mapping = { "qkv_proj": [ @@ -1232,23 +1275,22 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "model"): self.config = config self.multimodal_config = multimodal_config self.use_data_parallel = multimodal_config.mm_encoder_tp_mode == "data" + self.video_pruning_rate = multimodal_config.video_pruning_rate + self.is_multimodal_pruning_enabled = ( + multimodal_config.is_multimodal_pruning_enabled() + ) + if not multimodal_config.get_limit_per_prompt( "image" ) and not multimodal_config.get_limit_per_prompt("video"): self.visual = None else: - attn_backend_override = ( - multimodal_config.mm_encoder_attn_backend - if multimodal_config is not None - else None - ) self.visual = Qwen3_VisionTransformer( config.vision_config, norm_eps=getattr(config, "rms_norm_eps", 1e-6), quant_config=quant_config, + multimodal_config=multimodal_config, prefix=maybe_prefix(prefix, "visual"), - use_data_parallel=self.use_data_parallel, - attn_backend_override=attn_backend_override, ) self.language_model = Qwen3LLMForCausalLM( @@ -1418,6 +1460,109 @@ def _process_video_input( sizes = (grid_thw.prod(-1) // merge_size // merge_size).tolist() return video_embeds.split(sizes) + def _postprocess_image_embeds_evs( + self, + image_embeds_split: tuple[torch.Tensor, ...], + image_input: Qwen2_5_VLImageInputs, + ) -> tuple[torch.Tensor, ...]: + """ + Append mrope positions for each for images. + This is necessary to recover correct mrope + positions after video pruning + + Args: + image_embeds_split: Tuple of image embeddings for + each image item. + image_input: Image input data. + + Returns: + Tuple of image embeddings for each image item. + Resulting embeddings will have extra 4 channels for + computed mrope positions. + """ + merge_size = self.visual.spatial_merge_size + grid_thw = image_input["image_grid_thw"] + grid_thw_list = grid_thw.tolist() + image_embeds_out = [] + for emb, size in zip(image_embeds_split, grid_thw_list): + positions = compute_mrope_for_media(size, merge_size).to(emb.device) + emb = torch.cat([emb, positions], dim=1) + image_embeds_out.append(emb) + image_embeds_split = image_embeds_out + return tuple(image_embeds_split) + + def _postprocess_video_embeds_evs( + self, + video_embeds_split: tuple[torch.Tensor, ...], + video_input: Qwen2_5_VLVideoInputs, + ) -> tuple[torch.Tensor, ...]: + """ + Prunes video embeddings via Efficient Video Sampling (EVS) + and then appends mrope positions for each retained embeddings + + Args: + video_embeds_split: Tuple of video embeddings for each video item. + video_input: Video input data. + + Returns: + Tuple of video embeddings for each video item. + Resulting embeddings will have extra 4 channels for + computed mrope positions. + """ + grid_thw = video_input["video_grid_thw"] + assert grid_thw.ndim == 2 + grid_thw_list = grid_thw.tolist() + merge_size = self.visual.spatial_merge_size + + # Cast to long to match the original code + # https://github.com/huggingface/transformers/blob/41980ce93e775f6c88500c51c8db7946fc6a2add/src/transformers/models/qwen2_5_vl/modular_qwen2_5_vl.py#L491 # noqa + second_per_grid_ts = video_input.get("second_per_grid_ts") + if second_per_grid_ts is None: + # For Qwen3-VL, second_per_grid_ts might not be available + # Use default value of 1.0 for each video + second_per_grid_ts = torch.ones(len(grid_thw_list), dtype=torch.long) + else: + second_per_grid_ts = second_per_grid_ts.long() + tokens_per_second = getattr(self.config.vision_config, "tokens_per_second", 1.0) + + video_embeds_out = [] + for emb, size, video_second_per_grid_t in zip( + video_embeds_split, grid_thw_list, second_per_grid_ts + ): + # For each video, we compute retention mask using EVS + retention_mask = compute_retention_mask( + emb, + size, + spatial_merge_size=self.visual.spatial_merge_size, + q=self.video_pruning_rate, + ) + + # Debug logging for EVS pruning + logger.debug( + "EVS: Video tokens pruned from %d to %d (T=%d,H=%d,W=%d, " + "pruning_rate=%.2f, reduction=%.1f%%)", + emb.shape[0], + retention_mask.sum().item(), + size[0], + size[1], + size[2], + self.video_pruning_rate, + (1 - retention_mask.float().mean().item()) * 100, + ) + + positions = compute_mrope_for_media( + size, + merge_size, + tokens_per_second=tokens_per_second, + video_second_per_grid=video_second_per_grid_t.item(), + ).to(emb.device) + + emb = emb[retention_mask] + positions = positions[retention_mask] + emb = torch.cat([emb, positions], dim=1) + video_embeds_out.append(emb) + return tuple(video_embeds_out) + def _parse_and_validate_multimodal_inputs(self, **kwargs: object) -> dict: mm_input_by_modality = {} for input_key in kwargs: @@ -1440,6 +1585,20 @@ def _parse_and_validate_multimodal_inputs(self, **kwargs: object) -> dict: def iter_mm_grid_hw( self, input_tokens: list[int], mm_features: list[MultiModalFeatureSpec] ) -> Iterator[tuple[int, int, int]]: + """ + Iterate over multimodal features and yield grid information. + + For videos with EVS (Efficient Video Sampling) enabled, this function + computes the offset based on the pruned token count rather than relying + on input_tokens.index(), which would fail when tokens are pruned. + + Args: + input_tokens: List of token IDs in the prompt + mm_features: List of multimodal feature specifications + + Yields: + Tuple of (offset, grid_h, grid_w) for each frame/image + """ video_token_id = self.config.video_token_id spatial_merge_size = self.config.vision_config.spatial_merge_size for mm_feature in sorted(mm_features, key=lambda f: f.mm_position.offset): @@ -1452,42 +1611,289 @@ def iter_mm_grid_hw( t, h, w = mm_feature.data["video_grid_thw"].data.tolist() llm_grid_h = h // spatial_merge_size llm_grid_w = w // spatial_merge_size - for _ in range(t): - offset = input_tokens.index(video_token_id, offset) - yield offset, llm_grid_h, llm_grid_w - offset += llm_grid_h * llm_grid_w + + # Check if EVS (Efficient Video Sampling) is enabled + is_evs_enabled = ( + hasattr(self, "video_pruning_rate") + and self.video_pruning_rate is not None + and self.video_pruning_rate > 0.0 + ) + + if is_evs_enabled: + frame_offsets = self._extract_frame_offsets_from_mask( + mm_feature.mm_position, t + ) + if frame_offsets is not None: + for rel_offset in frame_offsets: + yield offset + rel_offset, llm_grid_h, llm_grid_w + continue + + # If EVS is enabled but mask is missing, this indicates a bug + # in the prompt processing pipeline. The is_embed mask should + # always be present when video_pruning_rate > 0. + raise RuntimeError( + f"EVS is enabled (pruning_rate={self.video_pruning_rate}) " + "but is_embed mask is missing from mm_position. " + "This indicates a bug in prompt processing." + ) + else: + # Non-EVS mode: Use original logic with input_tokens.index() + for _ in range(t): + offset = input_tokens.index(video_token_id, offset) + yield offset, llm_grid_h, llm_grid_w + offset += llm_grid_h * llm_grid_w else: raise ValueError(f"Unsupported modality: {mm_feature.modality}") + def _get_evs_mask_segments( + self, mm_position: PlaceholderRange, expected_frames: int + ) -> list[torch.Tensor] | None: + """Extract contiguous segments from EVS is_embed mask. + + The EVS (Efficient Video Sampling) mask marks which placeholder + positions should be filled with video embeddings. This method splits + the mask into contiguous segments, where each segment represents one + retained frame. + + This is a pure function - it does not modify any state and always + returns the same output for the same input (idempotent). + + Args: + mm_position: MultiModal position containing the is_embed mask + expected_frames: Expected number of frame segments + + Returns: + List of tensors, each containing indices for one frame segment, + or None if EVS is not enabled or validation fails. + """ + is_embed_mask = getattr(mm_position, "is_embed", None) + if is_embed_mask is None: + return None + + # Find all True positions in the mask + mask_tensor = torch.as_tensor(is_embed_mask, dtype=torch.bool).view(-1) + true_indices = torch.nonzero(mask_tensor, as_tuple=False).flatten() + if true_indices.numel() == 0: + return None + + # Split into contiguous segments (where diff > 1 indicates a gap) + if true_indices.numel() == 1: + segments = [true_indices] + else: + diffs = torch.diff(true_indices) + split_points = torch.nonzero(diffs != 1, as_tuple=False).flatten() + if split_points.numel() == 0: + segments = [true_indices] + else: + segments = torch.tensor_split( + true_indices, split_points.add(1).tolist() + ) + + # Validate segment count matches expected frames + if len(segments) < expected_frames: + logger.debug( + "EVS mask segments (%d) do not match expected frames (%d)", + len(segments), + expected_frames, + ) + return None + + return segments[:expected_frames] + + def _extract_frame_offsets_from_mask( + self, mm_position: PlaceholderRange, expected_frames: int + ) -> list[int] | None: + """Return relative offsets for each EVS-retained frame. + + The prompt processor stores a boolean mask inside ``mm_position`` that + marks which placeholder locations should be populated with video + embeddings. By splitting that mask into contiguous runs we can recover + the start of every retained frame without probing ``input_tokens``. + + Args: + mm_position: MultiModal position containing the is_embed mask + expected_frames: Expected number of frames + + Returns: + List of starting offsets (relative to mm_position) for each frame, + or None if EVS is not enabled. + """ + segments = self._get_evs_mask_segments(mm_position, expected_frames) + if segments is None: + return None + + return [int(segment[0].item()) for segment in segments] + + def _get_actual_frame_token_counts( + self, mm_position: PlaceholderRange, expected_frames: int + ) -> list[int] | None: + """Return actual token count for each EVS-retained frame. + + This function calculates the actual number of tokens per frame by + analyzing the is_embed mask, accounting for EVS pruning. Each frame + may have a different token count due to content-aware pruning. + + Args: + mm_position: MultiModal position containing the is_embed mask + expected_frames: Expected number of frames + + Returns: + List of token counts for each frame, or None if EVS is not enabled. + """ + segments = self._get_evs_mask_segments(mm_position, expected_frames) + if segments is None: + return None + + return [len(seg) for seg in segments] + + def recompute_mrope_positions( + self, + input_ids: list[int], + multimodal_embeddings: tuple[torch.Tensor, ...], + mrope_positions: torch.LongTensor, + num_computed_tokens: int, + ) -> tuple[tuple[torch.Tensor, ...], torch.Tensor, int]: + """ + Update part of input mrope positions (starting with + num_computed_tokens index). Original mrope_positions are computed + for unpruned sequence and becomes incorrect once pruning occurs, + so once we prune media tokens we should reflect this in the + mrope_positions before we feed it to LLM. + + Args: + input_ids: (N,) All input tokens of the prompt (Containing + entire sequence). + multimodal_embeddings: Tuple of multimodal embeddings. + mrope_positions: Existing mrope positions (3, N) for entire + sequence + num_computed_tokens: A number of computed tokens so far. + + Returns: + Tuple of (multimodal_embeddings, mrope_positions, + mrope_position_delta). + """ + image_token_id = self.config.image_token_id + video_token_id = self.config.video_token_id + vision_start_token_id = self.config.vision_start_token_id + + # Device + device = ( + multimodal_embeddings[0].device + if len(multimodal_embeddings) + else mrope_positions.device + ) + + # Tensors + input_ids_t = torch.as_tensor(input_ids, device=device, dtype=torch.long) + + mm_embeddings_out = [mm[:, :-4] for mm in multimodal_embeddings] + mm_embeddings_pos = [ + mm[:, -4:].permute(1, 0).long() for mm in multimodal_embeddings + ] + + positions, mrope_positions_delta = recompute_mrope_positions( + input_ids_t, + mm_embeddings_pos, + mrope_positions, + num_computed_tokens, + vision_start_token_id, + image_token_id, + video_token_id, + ) + + return tuple(mm_embeddings_out), positions, mrope_positions_delta + def get_mrope_input_positions( self, input_tokens: list[int], mm_features: list[MultiModalFeatureSpec], ) -> tuple[torch.Tensor, int]: + # Pre-collect actual frame token counts for EVS mode + frame_token_counts_map = {} + for mm_feature in mm_features: + if mm_feature.modality == "video": + is_evs_enabled = ( + hasattr(self, "video_pruning_rate") + and self.video_pruning_rate is not None + and self.video_pruning_rate > 0.0 + ) + if is_evs_enabled: + t = mm_feature.data["video_grid_thw"].data.tolist()[0] + token_counts = self._get_actual_frame_token_counts( + mm_feature.mm_position, t + ) + assert token_counts is not None, ( + "EVS enabled but failed to extract frame token counts " + "from is_embed mask" + ) + frame_token_counts_map[mm_feature.mm_position.offset] = token_counts + llm_pos_ids_list = [] st = 0 + frame_counts_idx = {} + for offset, llm_grid_h, llm_grid_w in self.iter_mm_grid_hw( input_tokens, mm_features ): text_len = offset - st st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 - llm_pos_ids_list.append( + + # Determine actual token count for this frame + base_offset = None + for feat_offset in frame_token_counts_map: + if offset >= feat_offset: + base_offset = feat_offset + + if base_offset is not None: + # EVS mode: use actual token count from is_embed mask + assert base_offset in frame_token_counts_map, ( + f"Found base_offset {base_offset} but not in frame_token_counts_map" + ) + + if base_offset not in frame_counts_idx: + frame_counts_idx[base_offset] = 0 + + counts = frame_token_counts_map[base_offset] + idx = frame_counts_idx[base_offset] + + assert idx < len(counts), ( + f"EVS frame index {idx} out of range (total frames: {len(counts)})" + ) + + actual_frame_tokens = counts[idx] + frame_counts_idx[base_offset] += 1 + else: + # Non-EVS mode (or image): use theoretical grid size + actual_frame_tokens = llm_grid_h * llm_grid_w + + # Add text segment + text_positions = ( np.broadcast_to(np.arange(text_len), (3, text_len)) + st_idx ) + llm_pos_ids_list.append(text_positions) + st_idx += text_len + # Add frame segment with actual token count (not theoretical) grid_indices = np.indices((1, llm_grid_h, llm_grid_w)).reshape(3, -1) - llm_pos_ids_list.append(grid_indices + text_len + st_idx) - st = offset + llm_grid_h * llm_grid_w + # Only take the first actual_frame_tokens positions + frame_positions = grid_indices[:, :actual_frame_tokens] + st_idx + llm_pos_ids_list.append(frame_positions) + + # Update st using actual token count + st = offset + actual_frame_tokens + # Handle final text segment if st < len(input_tokens): st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 text_len = len(input_tokens) - st - llm_pos_ids_list.append( + final_text_positions = ( np.broadcast_to(np.arange(text_len), (3, text_len)) + st_idx ) + llm_pos_ids_list.append(final_text_positions) llm_positions = np.concatenate(llm_pos_ids_list, axis=1).reshape(3, -1) mrope_position_delta = (llm_positions.max() + 1 - len(input_tokens)).item() + return torch.from_numpy(llm_positions), mrope_position_delta def get_language_model(self) -> torch.nn.Module: @@ -1508,9 +1914,17 @@ def embed_multimodal(self, **kwargs: object) -> MultiModalEmbeddings | None: multimodal_input = mm_input_by_modality[modality] if modality == "image": image_embeddings = self._process_image_input(multimodal_input) + if self.is_multimodal_pruning_enabled: + image_embeddings = self._postprocess_image_embeds_evs( + image_embeddings, multimodal_input + ) multimodal_embeddings += tuple(image_embeddings) if modality == "video": video_embeddings = self._process_video_input(multimodal_input) + if self.is_multimodal_pruning_enabled: + video_embeddings = self._postprocess_video_embeds_evs( + video_embeddings, multimodal_input + ) multimodal_embeddings += tuple(video_embeddings) return multimodal_embeddings diff --git a/vllm/model_executor/models/qwen3_vl_moe.py b/vllm/model_executor/models/qwen3_vl_moe.py index a054bd5b3831..3186804488e5 100644 --- a/vllm/model_executor/models/qwen3_vl_moe.py +++ b/vllm/model_executor/models/qwen3_vl_moe.py @@ -419,6 +419,10 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.config = config self.multimodal_config = multimodal_config self.use_data_parallel = multimodal_config.mm_encoder_tp_mode == "data" + self.video_pruning_rate = multimodal_config.video_pruning_rate + self.is_multimodal_pruning_enabled = ( + multimodal_config.is_multimodal_pruning_enabled() + ) if not multimodal_config.get_limit_per_prompt( "image" @@ -429,8 +433,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): config.vision_config, norm_eps=getattr(config, "rms_norm_eps", 1e-6), quant_config=quant_config, + multimodal_config=multimodal_config, prefix=maybe_prefix(prefix, "visual"), - use_data_parallel=self.use_data_parallel, ) self.language_model = Qwen3MoeLLMForCausalLM( diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py index a4a964bc7c1a..4575e91e1395 100644 --- a/vllm/model_executor/models/registry.py +++ b/vllm/model_executor/models/registry.py @@ -264,10 +264,15 @@ _MULTIMODAL_MODELS = { # [Decoder-only] "AriaForConditionalGeneration": ("aria", "AriaForConditionalGeneration"), + "AudioFlamingo3ForConditionalGeneration": ( + "audioflamingo3", + "AudioFlamingo3ForConditionalGeneration", + ), "AyaVisionForConditionalGeneration": ( "aya_vision", "AyaVisionForConditionalGeneration", ), + "BagelForConditionalGeneration": ("bagel", "BagelForConditionalGeneration"), "BeeForConditionalGeneration": ("bee", "BeeForConditionalGeneration"), "Blip2ForConditionalGeneration": ("blip2", "Blip2ForConditionalGeneration"), "ChameleonForConditionalGeneration": ( diff --git a/vllm/model_executor/models/siglip2navit.py b/vllm/model_executor/models/siglip2navit.py index bbce01995412..efdee255ab5e 100644 --- a/vllm/model_executor/models/siglip2navit.py +++ b/vllm/model_executor/models/siglip2navit.py @@ -6,14 +6,14 @@ from collections.abc import Iterable import torch -from einops import rearrange, repeat from torch import nn from torch.nn import functional as F from transformers import Siglip2VisionConfig from transformers.configuration_utils import PretrainedConfig from vllm.attention.backends.registry import AttentionBackendEnum -from vllm.attention.layer import maybe_get_vit_flash_attn_backend +from vllm.attention.layers.mm_encoder_attention import MMEncoderAttention +from vllm.config import MultiModalConfig from vllm.distributed import divide, get_tensor_model_parallel_world_size from vllm.model_executor.layers.activation import get_act_fn from vllm.model_executor.layers.conv import Conv2dLayer @@ -25,11 +25,12 @@ RowParallelLinear, ) from vllm.model_executor.layers.quantization import QuantizationConfig +from vllm.model_executor.layers.rotary_embedding.common import ( + ApplyRotaryEmb, +) from vllm.model_executor.model_loader.weight_utils import default_weight_loader from vllm.platforms import current_platform -from .vision import get_vit_attn_backend - class VisionRotaryEmbedding(nn.Module): def __init__(self, dim: int, theta: float = 10000.0) -> None: @@ -147,40 +148,6 @@ def forward( return patch_embeds -# copy from flash_attn/layers/rotary.py -def rotate_half(x, interleaved=False): - if not interleaved: - x1, x2 = x.chunk(2, dim=-1) - return torch.cat((-x2, x1), dim=-1) - else: - x1, x2 = x[..., ::2], x[..., 1::2] - return rearrange( - torch.stack((-x2, x1), dim=-1), "... d two -> ... (d two)", two=2 - ) - - -def apply_rotary_emb_torch(x, cos, sin, interleaved=False): - """ - x: (batch_size, seqlen, nheads, headdim) - cos, sin: (seqlen, rotary_dim / 2) or (batch_size, seqlen, rotary_dim / 2) - """ - ro_dim = cos.shape[-1] * 2 - assert ro_dim <= x.shape[-1] - cos = repeat( - cos, "... d -> ... 1 (2 d)" if not interleaved else "... d -> ... 1 (d 2)" - ) - sin = repeat( - sin, "... d -> ... 1 (2 d)" if not interleaved else "... d -> ... 1 (d 2)" - ) - return torch.cat( - [ - x[..., :ro_dim] * cos + rotate_half(x[..., :ro_dim], interleaved) * sin, - x[..., ro_dim:], - ], - dim=-1, - ) - - def apply_rotary_pos_emb( q: torch.Tensor, k: torch.Tensor, @@ -190,14 +157,20 @@ def apply_rotary_pos_emb( ) -> tuple[torch.Tensor, torch.Tensor]: cos = cos.chunk(2, dim=-1)[0].contiguous() sin = sin.chunk(2, dim=-1)[0].contiguous() - if is_flash_attn_backend and not current_platform.is_xpu(): - from vllm.vllm_flash_attn.layers.rotary import apply_rotary_emb - apply_rotary_emb_func = apply_rotary_emb + apply_rotary_emb = ApplyRotaryEmb( + enforce_enable=True, + enable_fp32_compute=True, + ) + + if is_flash_attn_backend and not current_platform.is_cuda(): + apply_rotary_emb_func = apply_rotary_emb.forward_cuda else: - apply_rotary_emb_func = apply_rotary_emb_torch - q_embed = apply_rotary_emb_func(q.float(), cos.float(), sin.float()).type_as(q) - k_embed = apply_rotary_emb_func(k.float(), cos.float(), sin.float()).type_as(k) + apply_rotary_emb_func = apply_rotary_emb.forward_native + + q_embed = apply_rotary_emb_func(q, cos, sin) + k_embed = apply_rotary_emb_func(k, cos, sin) + return q_embed, k_embed @@ -208,6 +181,7 @@ def __init__( self, config: Siglip2VisionConfig, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", use_data_parallel: bool = False, attn_backend_override: AttentionBackendEnum | None = None, @@ -227,20 +201,25 @@ def __init__( self.dropout = config.attention_dropout self.is_causal = False - # TODO(Isotr0py): Enable data parallel after we support - # disabling TP on parallel linear layer + use_data_parallel = ( + multimodal_config.mm_encoder_tp_mode == "data" + if multimodal_config + else False + ) self.qkv_proj = QKVParallelLinear( hidden_size=self.embed_dim, head_size=self.head_dim, total_num_heads=self.num_heads, quant_config=quant_config, prefix=f"{prefix}.qkv_proj", + disable_tp=use_data_parallel, ) self.out_proj = RowParallelLinear( input_size=self.embed_dim, output_size=self.embed_dim, quant_config=quant_config, prefix=f"{prefix}.out_proj", + disable_tp=use_data_parallel, ) self.tp_size = ( @@ -249,31 +228,13 @@ def __init__( self.num_heads_per_partition = divide(self.num_heads, self.tp_size) self.use_rope = config.use_rope - # Detect attention implementation. - self.attn_backend = get_vit_attn_backend( + self.attn = MMEncoderAttention( + num_heads=self.num_heads_per_partition, head_size=self.head_dim, - dtype=torch.get_default_dtype(), - attn_backend_override=attn_backend_override, + prefix=f"{prefix}.attn", + multimodal_config=multimodal_config, ) - self.attn_backend, self.flash_attn_varlen_func = ( - maybe_get_vit_flash_attn_backend( - self.attn_backend, - attn_backend_override=attn_backend_override, - ) - ) - - if self.attn_backend not in { - AttentionBackendEnum.FLASH_ATTN, - AttentionBackendEnum.TORCH_SDPA, - AttentionBackendEnum.ROCM_AITER_FA, - }: - self.attn_backend = AttentionBackendEnum.TORCH_SDPA - self.is_flash_attn_backend = self.attn_backend in { - AttentionBackendEnum.FLASH_ATTN, - AttentionBackendEnum.ROCM_AITER_FA, - } - def forward( self, hidden_states: torch.Tensor, @@ -298,46 +259,23 @@ def forward( keys.unsqueeze(0), cos, sin, - self.is_flash_attn_backend, + self.attn.is_flash_attn_backend, ) queries = queries.squeeze(0) keys = keys.squeeze(0) - max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max().item() - if self.is_flash_attn_backend: - attn_output = self.flash_attn_varlen_func( - queries, - keys, - values, - cu_seqlens_q=cu_seqlens, - cu_seqlens_k=cu_seqlens, - max_seqlen_q=max_seqlen, - max_seqlen_k=max_seqlen, - ).reshape(seq_length, -1) - elif self.attn_backend == AttentionBackendEnum.TORCH_SDPA: - # Execute attention entry by entry for speed & less VRAM. - batch_size = cu_seqlens.shape[0] - 1 - outputs = [] - cu = cu_seqlens.tolist() - for i in range(batch_size): - start_idx = cu[i] - end_idx = cu[i + 1] - - # Each sequence is processed independently. - q_i = queries[start_idx:end_idx].unsqueeze(0) - k_i = keys[start_idx:end_idx].unsqueeze(0) - v_i = values[start_idx:end_idx].unsqueeze(0) - - # (1, seq_len, num_heads, head_dim) -> - # (1, num_heads, seq_len, head_dim) - q_i, k_i, v_i = [x.transpose(1, 2) for x in (q_i, k_i, v_i)] - - output_i = F.scaled_dot_product_attention(q_i, k_i, v_i, dropout_p=0.0) - # (1, num_heads, seq_len, head_dim) -> (seq_len, embed_dim) - output_i = output_i.transpose(1, 2).reshape(end_idx - start_idx, -1) - outputs.append(output_i) - - attn_output = torch.cat(outputs, dim=0) + max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max() + attn_output = self.attn( + query=queries.unsqueeze(0), + key=keys.unsqueeze(0), + value=values.unsqueeze(0), + cu_seqlens=cu_seqlens, + max_seqlen=max_seqlen, + ) + attn_output = attn_output.reshape( + seq_length, self.num_heads_per_partition * self.head_dim + ) + attn_output, _ = self.out_proj(attn_output) return attn_output @@ -347,25 +285,30 @@ def __init__( self, config: Siglip2VisionConfig, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - use_data_parallel: bool = False, ): super().__init__() self.config = config + use_data_parallel = ( + multimodal_config.mm_encoder_tp_mode == "data" + if multimodal_config + else False + ) self.activation_fn = get_act_fn(config.hidden_act) - # TODO(Isotr0py): Enable data parallel after we support - # disabling TP on parallel linear layer self.fc1 = ColumnParallelLinear( config.hidden_size, config.intermediate_size, quant_config=quant_config, prefix=f"{prefix}.fc1", + disable_tp=use_data_parallel, ) self.fc2 = RowParallelLinear( config.intermediate_size, config.hidden_size, quant_config=quant_config, prefix=f"{prefix}.fc2", + disable_tp=use_data_parallel, ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: @@ -380,9 +323,8 @@ def __init__( self, config: Siglip2VisionConfig, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - use_data_parallel: bool = False, - attn_backend_override: AttentionBackendEnum | None = None, ): super().__init__() self.embed_dim = config.hidden_size @@ -390,16 +332,15 @@ def __init__( self.self_attn = Siglip2Attention( config, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.self_attn", - use_data_parallel=use_data_parallel, - attn_backend_override=attn_backend_override, ) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = Siglip2MLP( config, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.mlp", - use_data_parallel=use_data_parallel, ) def forward( @@ -444,9 +385,8 @@ def __init__( self, config: Siglip2VisionConfig, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - use_data_parallel: bool = False, - attn_backend_override: AttentionBackendEnum | None = None, ): super().__init__() self.config = config @@ -455,9 +395,8 @@ def __init__( Siglip2EncoderLayer( config, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.layers.{idx}", - use_data_parallel=use_data_parallel, - attn_backend_override=attn_backend_override, ) for idx in range(config.num_hidden_layers) ] @@ -630,9 +569,8 @@ def __init__( self, config: Siglip2VisionConfig, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - use_data_parallel: bool = False, - attn_backend_override: AttentionBackendEnum | None = None, ): super().__init__() self.config = config @@ -642,9 +580,8 @@ def __init__( self.encoder = Siglip2Encoder( config, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.encoder", - use_data_parallel=use_data_parallel, - attn_backend_override=attn_backend_override, ) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) @@ -671,18 +608,16 @@ def __init__( self, config: Siglip2VisionConfig, quant_config: QuantizationConfig | None = None, + multimodal_config: MultiModalConfig | None = None, prefix: str = "", - use_data_parallel: bool = False, - attn_backend_override: AttentionBackendEnum | None = None, ): super().__init__() self.vision_model = Siglip2VisionTransformer( config, quant_config=quant_config, + multimodal_config=multimodal_config, prefix=f"{prefix}.vision_model", - use_data_parallel=use_data_parallel, - attn_backend_override=attn_backend_override, ) def forward( diff --git a/vllm/model_executor/models/vision.py b/vllm/model_executor/models/vision.py index 7602eca9c325..024c50f1207e 100644 --- a/vllm/model_executor/models/vision.py +++ b/vllm/model_executor/models/vision.py @@ -11,7 +11,7 @@ from transformers import PretrainedConfig from vllm.attention.backends.registry import AttentionBackendEnum -from vllm.config import VllmConfig, get_current_vllm_config +from vllm.config import VllmConfig from vllm.distributed import ( get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size, @@ -88,14 +88,11 @@ def get_vit_attn_backend( """ Get the available attention backend for Vision Transformer. """ - if attn_backend_override is not None: - return attn_backend_override - - selected_backend = get_current_vllm_config().attention_config.backend - if selected_backend is not None: - return selected_backend - - return current_platform.get_vit_attn_backend(head_size, dtype) + return current_platform.get_vit_attn_backend( + head_size, + dtype, + backend=attn_backend_override, + ) def should_torch_compile_mm_vit(vllm_config: VllmConfig) -> bool: diff --git a/vllm/multimodal/audio.py b/vllm/multimodal/audio.py index 062547401c3c..51b8f77f2908 100644 --- a/vllm/multimodal/audio.py +++ b/vllm/multimodal/audio.py @@ -127,13 +127,21 @@ def __init__(self) -> None: def load_bytes(self, data: bytes) -> torch.Tensor: buffer = BytesIO(data) - return torch.load(buffer, weights_only=True) + # Enable sparse tensor integrity checks to prevent out-of-bounds + # writes from maliciously crafted tensors + with torch.sparse.check_sparse_tensor_invariants(): + tensor = torch.load(buffer, weights_only=True) + return tensor.to_dense() def load_base64(self, media_type: str, data: str) -> torch.Tensor: return self.load_bytes(pybase64.b64decode(data, validate=True)) def load_file(self, filepath: Path) -> torch.Tensor: - return torch.load(filepath, weights_only=True) + # Enable sparse tensor integrity checks to prevent out-of-bounds + # writes from maliciously crafted tensors + with torch.sparse.check_sparse_tensor_invariants(): + tensor = torch.load(filepath, weights_only=True) + return tensor.to_dense() def encode_base64(self, media: torch.Tensor) -> str: return tensor2base64(media) diff --git a/vllm/multimodal/image.py b/vllm/multimodal/image.py index 789421e9e0c3..1506ecb8c7aa 100644 --- a/vllm/multimodal/image.py +++ b/vllm/multimodal/image.py @@ -122,13 +122,21 @@ def __init__(self) -> None: def load_bytes(self, data: bytes) -> torch.Tensor: buffer = BytesIO(data) - return torch.load(buffer, weights_only=True) + # Enable sparse tensor integrity checks to prevent out-of-bounds + # writes from maliciously crafted tensors + with torch.sparse.check_sparse_tensor_invariants(): + tensor = torch.load(buffer, weights_only=True) + return tensor.to_dense() def load_base64(self, media_type: str, data: str) -> torch.Tensor: return self.load_bytes(pybase64.b64decode(data, validate=True)) def load_file(self, filepath: Path) -> torch.Tensor: - return torch.load(filepath, weights_only=True) + # Enable sparse tensor integrity checks to prevent out-of-bounds + # writes from maliciously crafted tensors + with torch.sparse.check_sparse_tensor_invariants(): + tensor = torch.load(filepath, weights_only=True) + return tensor.to_dense() def encode_base64(self, media: torch.Tensor) -> str: return pybase64.b64encode(media.numpy()).decode("utf-8") diff --git a/vllm/multimodal/parse.py b/vllm/multimodal/parse.py index c3c7cc2c3da0..a69afc3176ca 100644 --- a/vllm/multimodal/parse.py +++ b/vllm/multimodal/parse.py @@ -120,7 +120,7 @@ def get_item_for_hash(self, index: int) -> _T | MediaWithBytes[_T]: return self.data[index] def get_processor_data(self) -> Mapping[str, object]: - return {f"{self.modality}s": self.data} + return {f"{self.modality}s": self.get_all()} def get_passthrough_data(self) -> Mapping[str, object]: return {} diff --git a/vllm/platforms/cpu.py b/vllm/platforms/cpu.py index d961dcf13e53..e1b461d79a65 100644 --- a/vllm/platforms/cpu.py +++ b/vllm/platforms/cpu.py @@ -23,6 +23,7 @@ logger = init_logger(__name__) if TYPE_CHECKING: + from vllm.attention.selector import AttentionSelectorConfig from vllm.config import VllmConfig else: VllmConfig = None @@ -126,21 +127,13 @@ def get_device_name(cls, device_id: int = 0) -> str: def get_attn_backend_cls( cls, selected_backend: "AttentionBackendEnum", - head_size: int, - dtype: torch.dtype, - kv_cache_dtype: str | None, - block_size: int, - use_mla: bool, - has_sink: bool, - use_sparse: bool, - use_mm_prefix: bool, - attn_type: str | None = None, + attn_selector_config: "AttentionSelectorConfig", ) -> str: if selected_backend and selected_backend != AttentionBackendEnum.CPU_ATTN: logger.info("Cannot use %s backend on CPU.", selected_backend) - if use_mla: + if attn_selector_config.use_mla: raise NotImplementedError("MLA is not supported on CPU.") - if use_sparse: + if attn_selector_config.use_sparse: raise NotImplementedError("Sparse Attention is not supported on CPU.") return AttentionBackendEnum.CPU_ATTN.get_path() diff --git a/vllm/platforms/cuda.py b/vllm/platforms/cuda.py index 38adf5dda07f..2dc4ba5d70ca 100644 --- a/vllm/platforms/cuda.py +++ b/vllm/platforms/cuda.py @@ -7,14 +7,13 @@ import os from collections.abc import Callable from functools import cache, wraps -from typing import TYPE_CHECKING, TypeVar +from typing import TYPE_CHECKING, Optional, TypeVar import torch from typing_extensions import ParamSpec # import custom ops, trigger op registration import vllm._C # noqa -from vllm.attention.backends.abstract import AttentionType from vllm.attention.backends.registry import AttentionBackendEnum from vllm.logger import init_logger from vllm.utils.import_utils import import_pynvml @@ -23,6 +22,7 @@ from .interface import DeviceCapability, Platform, PlatformEnum if TYPE_CHECKING: + from vllm.attention.selector import AttentionSelectorConfig from vllm.config import VllmConfig from vllm.config.cache import CacheDType else: @@ -255,36 +255,11 @@ def get_current_memory_usage( torch.cuda.reset_peak_memory_stats(device) return torch.cuda.max_memory_allocated(device) - @classmethod - def get_vit_attn_backend( - cls, head_size: int, dtype: torch.dtype - ) -> "AttentionBackendEnum": - # Try FlashAttention first - if (cc := cls.get_device_capability()) and cc.major >= 8: - try: - backend_class = AttentionBackendEnum.FLASH_ATTN.get_class() - if backend_class.supports_head_size( - head_size - ) and backend_class.supports_dtype(dtype): - return AttentionBackendEnum.FLASH_ATTN - except ImportError: - pass - - return AttentionBackendEnum.TORCH_SDPA - @classmethod def get_valid_backends( cls, - head_size, - dtype, - kv_cache_dtype, - block_size, - use_mla, - has_sink, - use_sparse, - use_mm_prefix, - device_capability, - attn_type, + device_capability: DeviceCapability, + attn_selector_config: "AttentionSelectorConfig", ) -> tuple[ list[tuple["AttentionBackendEnum", int]], dict["AttentionBackendEnum", list[str]], @@ -292,21 +267,15 @@ def get_valid_backends( valid_backends_priorities = [] invalid_reasons = {} - backend_priorities = _get_backend_priorities(use_mla, device_capability) + backend_priorities = _get_backend_priorities( + attn_selector_config.use_mla, device_capability + ) for priority, backend in enumerate(backend_priorities): try: backend_class = backend.get_class() invalid_reasons_i = backend_class.validate_configuration( - head_size, - dtype, - kv_cache_dtype, - block_size, - use_mla, - has_sink, - use_sparse, - use_mm_prefix, - device_capability, - attn_type, + device_capability=device_capability, + **attn_selector_config._asdict(), ) except ImportError: invalid_reasons_i = ["ImportError"] @@ -321,37 +290,19 @@ def get_valid_backends( def get_attn_backend_cls( cls, selected_backend: "AttentionBackendEnum", - head_size: int, - dtype: torch.dtype, - kv_cache_dtype: "CacheDType | None", - block_size: int | None, - use_mla: bool, - has_sink: bool, - use_sparse: bool, - use_mm_prefix: bool, - attn_type: str | None = None, + attn_selector_config: "AttentionSelectorConfig", ) -> str: - if attn_type is None: - attn_type = AttentionType.DECODER - device_capability = cls.get_device_capability() assert device_capability is not None + attn_selector_config = attn_selector_config._replace(block_size=None) # First try checking just the selected backend, if there is one. if selected_backend is not None: try: backend_class = selected_backend.get_class() invalid_reasons = backend_class.validate_configuration( - head_size, - dtype, - kv_cache_dtype, - None, - use_mla, - has_sink, - use_sparse, - use_mm_prefix, - device_capability, - attn_type, + device_capability=device_capability, + **attn_selector_config._asdict(), ) except ImportError: invalid_reasons = ["ImportError"] @@ -367,16 +318,8 @@ def get_attn_backend_cls( # No selected backend or the selected backend is invalid, # so we try finding a valid backend. valid_backends_priorities, invalid_reasons = cls.get_valid_backends( - head_size, - dtype, - kv_cache_dtype, - None, - use_mla, - has_sink, - use_sparse, - use_mm_prefix, - device_capability, - attn_type, + device_capability=device_capability, + attn_selector_config=attn_selector_config, ) reasons_str = ( "{" @@ -386,11 +329,7 @@ def get_attn_backend_cls( ) + "}" ) - config_str = ( - f"head_size: {head_size}, dtype: {dtype}, " - f"kv_cache_dtype: {kv_cache_dtype}, block_size: {block_size}, " - f"use_mla: {use_mla}, has_sink: {has_sink}, use_sparse: {use_sparse}" - ) + config_str = attn_selector_config.__repr__() logger.debug_once( f"Some attention backends are not valid for {cls.device_name} with " f"{config_str}. Reasons: {reasons_str}." @@ -418,6 +357,41 @@ def get_attn_backend_cls( return selected_backend.get_path() + @classmethod + def get_supported_vit_attn_backends(cls) -> list["AttentionBackendEnum"]: + return [ + AttentionBackendEnum.TORCH_SDPA, + AttentionBackendEnum.FLASH_ATTN, + ] + + @classmethod + def get_vit_attn_backend( + cls, + head_size: int, + dtype: torch.dtype, + backend: Optional["AttentionBackendEnum"] = None, + ) -> "AttentionBackendEnum": + if backend is not None: + assert backend in cls.get_supported_vit_attn_backends(), ( + f"Backend {backend} is not supported for vit attention. " + f"Supported backends are: {cls.get_supported_vit_attn_backends()}" + ) + logger.info_once(f"Using backend {backend} for vit attention") + return backend + + # Try FlashAttention first + if (cc := cls.get_device_capability()) and cc.major >= 8: + try: + backend_class = AttentionBackendEnum.FLASH_ATTN.get_class() + if backend_class.supports_head_size( + head_size + ) and backend_class.supports_dtype(dtype): + return AttentionBackendEnum.FLASH_ATTN + except ImportError: + pass + + return AttentionBackendEnum.TORCH_SDPA + @classmethod def get_punica_wrapper(cls) -> str: return "vllm.lora.punica_wrapper.punica_gpu.PunicaWrapperGPU" diff --git a/vllm/platforms/interface.py b/vllm/platforms/interface.py index 49437c7d56d1..d4b40045df38 100644 --- a/vllm/platforms/interface.py +++ b/vllm/platforms/interface.py @@ -7,7 +7,7 @@ import random import sys from datetime import timedelta -from typing import TYPE_CHECKING, Any, NamedTuple +from typing import TYPE_CHECKING, Any, NamedTuple, Optional import numpy as np import torch @@ -18,8 +18,8 @@ if TYPE_CHECKING: from torch.distributed import PrefixStore, ProcessGroup + from vllm.attention.selector import AttentionSelectorConfig from vllm.config import VllmConfig - from vllm.config.cache import CacheDType from vllm.inputs import ProcessorInputs, PromptType from vllm.pooling_params import PoolingParams from vllm.sampling_params import SamplingParams @@ -222,29 +222,52 @@ def import_kernels(cls) -> None: with contextlib.suppress(ImportError): import vllm._moe_C # noqa: F401 - @classmethod - def get_vit_attn_backend( - cls, head_size: int, dtype: torch.dtype - ) -> "AttentionBackendEnum": - return AttentionBackendEnum.TORCH_SDPA - @classmethod def get_attn_backend_cls( cls, selected_backend: "AttentionBackendEnum", - head_size: int, - dtype: torch.dtype, - kv_cache_dtype: "CacheDType | None", - block_size: int, - use_mla: bool, - has_sink: bool, - use_sparse: bool, - use_mm_prefix: bool, - attn_type: str | None = None, + attn_selector_config: "AttentionSelectorConfig", ) -> str: """Get the attention backend class of a device.""" return "" + @classmethod + def get_supported_vit_attn_backends(cls) -> list["AttentionBackendEnum"]: + return [ + AttentionBackendEnum.TORCH_SDPA, + ] + + @classmethod + def get_vit_attn_backend( + cls, + head_size: int, + dtype: torch.dtype, + backend: Optional["AttentionBackendEnum"] = None, + ) -> "AttentionBackendEnum": + """ + Get the vision attention backend class of a device. + + NOTE: ViT Attention should be checked and override in the platform-specific + implementation. we should not override this in any other places, like + the model_executor/models/.py. + + We check if the backend is None or not: + 1. If not, check if the backend is supported by the platform. + 2. If None, continue to the default selection logic. + """ + if backend is not None: + assert backend in cls.get_supported_vit_attn_backends(), ( + f"Backend {backend} is not supported for vit attention" + f"Supported backends are: {cls.get_supported_vit_attn_backends()}" + ) + logger.info_once(f"Using backend {backend} for vit attention") + return backend + + logger.info_once( + f"Using default backend {AttentionBackendEnum.TORCH_SDPA} for vit attention" + ) + return AttentionBackendEnum.TORCH_SDPA + @classmethod def get_device_capability( cls, diff --git a/vllm/platforms/rocm.py b/vllm/platforms/rocm.py index 876114c2d33a..e469a928da22 100644 --- a/vllm/platforms/rocm.py +++ b/vllm/platforms/rocm.py @@ -3,7 +3,7 @@ import os from functools import cache, lru_cache, wraps -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional import torch @@ -15,6 +15,7 @@ from .interface import DeviceCapability, Platform, PlatformEnum if TYPE_CHECKING: + from vllm.attention.selector import AttentionSelectorConfig from vllm.config import VllmConfig logger = init_logger(__name__) @@ -187,42 +188,19 @@ class RocmPlatform(Platform): if not on_gfx9(): supported_quantization += ["bitsandbytes"] - @classmethod - def get_vit_attn_backend( - cls, head_size: int, dtype: torch.dtype - ) -> AttentionBackendEnum: - from importlib.util import find_spec - - from vllm._aiter_ops import rocm_aiter_ops - - if rocm_aiter_ops.is_mha_enabled(): - # Note: AITER FA is only supported for Qwen-VL models. - # TODO: Add support for other VL models in their model class. - return AttentionBackendEnum.ROCM_AITER_FA - - if on_gfx9() and find_spec("flash_attn") is not None: - return AttentionBackendEnum.FLASH_ATTN - - return AttentionBackendEnum.TORCH_SDPA - @classmethod def get_attn_backend_cls( cls, - selected_backend, - head_size, - dtype, - kv_cache_dtype, - block_size, - use_mla, - has_sink, - use_sparse, - use_mm_prefix, - attn_type: str | None = None, + selected_backend: "AttentionBackendEnum", + attn_selector_config: "AttentionSelectorConfig", ) -> str: from vllm._aiter_ops import rocm_aiter_ops - if use_sparse: - if kv_cache_dtype.startswith("fp8"): + block_size = attn_selector_config.block_size + kv_cache_dtype = attn_selector_config.kv_cache_dtype + + if attn_selector_config.use_sparse: + if kv_cache_dtype and kv_cache_dtype.startswith("fp8"): raise ValueError( "ROCMAiterMLASparseBackend doesn't support fp8 kv_cache_dtype." ) @@ -232,7 +210,7 @@ def get_attn_backend_cls( logger.info_once("Using Sparse MLA backend on V1 engine.") return AttentionBackendEnum.ROCM_AITER_MLA_SPARSE.get_path() - if use_mla: + if attn_selector_config.use_mla: if selected_backend is None: selected_backend = ( AttentionBackendEnum.ROCM_AITER_MLA @@ -322,6 +300,43 @@ def get_attn_backend_cls( "ROCm. Note that V0 attention backends have been removed." ) + @classmethod + def get_supported_vit_attn_backends(cls) -> list["AttentionBackendEnum"]: + return [ + AttentionBackendEnum.FLASH_ATTN, + AttentionBackendEnum.ROCM_AITER_FA, + AttentionBackendEnum.TORCH_SDPA, + ] + + @classmethod + def get_vit_attn_backend( + cls, + head_size: int, + dtype: torch.dtype, + backend: Optional["AttentionBackendEnum"] = None, + ) -> "AttentionBackendEnum": + if backend is not None: + assert backend in cls.get_supported_vit_attn_backends(), ( + f"Backend {backend} is not supported for vit attention. " + f"Supported backends are: {cls.get_supported_vit_attn_backends()}" + ) + logger.info_once(f"Using backend {backend} for vit attention") + return backend + + from importlib.util import find_spec + + from vllm._aiter_ops import rocm_aiter_ops + + if rocm_aiter_ops.is_mha_enabled(): + # Note: AITER FA is only supported for Qwen-VL models. + # TODO: Add support for other VL models in their model class. + return AttentionBackendEnum.ROCM_AITER_FA + + if on_gfx9() and find_spec("flash_attn") is not None: + return AttentionBackendEnum.FLASH_ATTN + + return AttentionBackendEnum.TORCH_SDPA + @classmethod def set_device(cls, device: torch.device) -> None: """ diff --git a/vllm/platforms/tpu.py b/vllm/platforms/tpu.py index d6998e7a308a..7c479bf2b6a0 100644 --- a/vllm/platforms/tpu.py +++ b/vllm/platforms/tpu.py @@ -2,7 +2,7 @@ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import contextlib -from typing import TYPE_CHECKING, cast +from typing import TYPE_CHECKING, Optional, cast import torch from tpu_info import device @@ -16,6 +16,7 @@ if TYPE_CHECKING: from typing import TypeAlias + from vllm.attention.selector import AttentionSelectorConfig from vllm.config import VllmConfig from vllm.config.cache import BlockSize from vllm.pooling_params import PoolingParams @@ -57,17 +58,9 @@ def import_kernels(cls) -> None: def get_attn_backend_cls( cls, selected_backend: "AttentionBackendEnum", - head_size: int, - dtype: torch.dtype, - kv_cache_dtype: str | None, - block_size: int, - use_mla: bool, - has_sink: bool, - use_sparse: bool, - use_mm_prefix: bool, - attn_type: str | None = None, + attn_selector_config: "AttentionSelectorConfig", ) -> str: - if use_sparse: + if attn_selector_config.use_sparse: raise NotImplementedError("Sparse Attention is not supported on TPU.") if selected_backend != AttentionBackendEnum.PALLAS: logger.info("Cannot use %s backend on TPU.", selected_backend) @@ -75,6 +68,32 @@ def get_attn_backend_cls( logger.info("Using Pallas V1 backend.") return AttentionBackendEnum.PALLAS.get_path() + @classmethod + def get_supported_vit_attn_backends(cls) -> list["AttentionBackendEnum"]: + return [ + AttentionBackendEnum.PALLAS, + ] + + @classmethod + def get_vit_attn_backend( + cls, + head_size: int, + dtype: torch.dtype, + backend: Optional["AttentionBackendEnum"] = None, + ) -> "AttentionBackendEnum": + if backend is not None: + assert backend in cls.get_supported_vit_attn_backends(), ( + f"Backend {backend} is not supported for vit attention" + f"Supported backends are: {cls.get_supported_vit_attn_backends()}." + ) + logger.info_once(f"Using backend {backend} for vit attention.") + return backend + + logger.info_once( + f"Using default backend {AttentionBackendEnum.PALLAS} for vit attention." + ) + return AttentionBackendEnum.PALLAS + @classmethod def set_device(cls, device: torch.device) -> None: """ diff --git a/vllm/platforms/xpu.py b/vllm/platforms/xpu.py index 0a05750764d8..af8979af3664 100644 --- a/vllm/platforms/xpu.py +++ b/vllm/platforms/xpu.py @@ -3,7 +3,7 @@ import contextlib import os -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional import torch @@ -14,6 +14,7 @@ from .interface import DeviceCapability, Platform, PlatformEnum if TYPE_CHECKING: + from vllm.attention.selector import AttentionSelectorConfig from vllm.config import VllmConfig else: VllmConfig = None @@ -42,15 +43,7 @@ def import_kernels(cls) -> None: def get_attn_backend_cls( cls, selected_backend: "AttentionBackendEnum", - head_size: int, - dtype: torch.dtype, - kv_cache_dtype: str | None, - block_size: int, - use_mla: bool, - has_sink: bool, - use_sparse: bool, - use_mm_prefix: bool, - attn_type: str | None = None, + attn_selector_config: "AttentionSelectorConfig", ) -> str: from vllm.v1.attention.backends.utils import set_kv_cache_layout @@ -60,7 +53,7 @@ def get_attn_backend_cls( "only NHD layout is supported by XPU attention kernels." ) - if use_sparse: + if attn_selector_config.use_sparse: raise NotImplementedError("Sparse Attention is not supported on XPU.") if selected_backend == AttentionBackendEnum.TRITON_ATTN: logger.info_once("Using Triton backend.") @@ -71,12 +64,40 @@ def get_attn_backend_cls( elif selected_backend: raise ValueError( f"Invalid attention backend for {cls.device_name}, " - f"with use_mla: {use_mla}" + f"with use_mla: {attn_selector_config.use_mla}" ) logger.info("Using Flash Attention backend.") return AttentionBackendEnum.FLASH_ATTN.get_path() + @classmethod + def get_supported_vit_attn_backends(cls) -> list["AttentionBackendEnum"]: + # XPU only supports FLASH_ATTN for vision attention. + return [ + AttentionBackendEnum.FLASH_ATTN, + ] + + @classmethod + def get_vit_attn_backend( + cls, + head_size: int, + dtype: torch.dtype, + backend: Optional["AttentionBackendEnum"] = None, + ) -> "AttentionBackendEnum": + if backend is not None: + assert backend in cls.get_supported_vit_attn_backends(), ( + f"Backend {backend} is not supported for vit attention. " + f"Supported backends are: " + f"{cls.get_supported_vit_attn_backends()}." + ) + logger.info_once(f"Using backend {backend} for vit attention") + return backend + + logger.info_once( + f"Using backend {AttentionBackendEnum.FLASH_ATTN} for vit attention" + ) + return AttentionBackendEnum.FLASH_ATTN + @classmethod def set_device(cls, device: torch.device) -> None: """ @@ -110,12 +131,6 @@ def get_device_total_memory(cls, device_id: int = 0) -> int: device_props = torch.xpu.get_device_properties(device_id) return device_props.total_memory - @classmethod - def get_vit_attn_backend( - cls, head_size: int, dtype: torch.dtype - ) -> "AttentionBackendEnum": - return AttentionBackendEnum.FLASH_ATTN - @classmethod def inference_mode(cls): return torch.no_grad() diff --git a/vllm/tokenizers/deepseekv32.py b/vllm/tokenizers/deepseek_v32.py similarity index 100% rename from vllm/tokenizers/deepseekv32.py rename to vllm/tokenizers/deepseek_v32.py diff --git a/vllm/tokenizers/registry.py b/vllm/tokenizers/registry.py index 1296ce62ae69..72447ef04e87 100644 --- a/vllm/tokenizers/registry.py +++ b/vllm/tokenizers/registry.py @@ -30,7 +30,7 @@ _VLLM_TOKENIZERS = { - "deepseekv32": ("deepseekv32", "DeepseekV32Tokenizer"), + "deepseek_v32": ("deepseek_v32", "DeepseekV32Tokenizer"), "hf": ("hf", "CachedHfTokenizer"), "mistral": ("mistral", "MistralTokenizer"), } diff --git a/vllm/tool_parsers/__init__.py b/vllm/tool_parsers/__init__.py new file mode 100644 index 000000000000..181d8bcba955 --- /dev/null +++ b/vllm/tool_parsers/__init__.py @@ -0,0 +1,150 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project + +from vllm.tool_parsers.abstract_tool_parser import ( + ToolParser, + ToolParserManager, +) + +__all__ = ["ToolParser", "ToolParserManager"] + + +""" +Register a lazy module mapping. + +Example: + ToolParserManager.register_lazy_module( + name="kimi_k2", + module_path="vllm.tool_parsers.kimi_k2_parser", + class_name="KimiK2ToolParser", + ) +""" + + +_TOOL_PARSERS_TO_REGISTER = { + "deepseek_v3": ( # name + "deepseekv3_tool_parser", # filename + "DeepSeekV3ToolParser", # class_name + ), + "deepseek_v31": ( + "deepseekv31_tool_parser", + "DeepSeekV31ToolParser", + ), + "deepseek_v32": ( + "deepseekv32_tool_parser", + "DeepSeekV32ToolParser", + ), + "ernie45": ( + "ernie45_tool_parser", + "Ernie45ToolParser", + ), + "glm45": ( + "glm4_moe_tool_parser", + "Glm4MoeModelToolParser", + ), + "granite-20b-fc": ( + "granite_20b_fc_tool_parser", + "Granite20bFCToolParser", + ), + "granite": ( + "granite_tool_parser", + "GraniteToolParser", + ), + "hermes": ( + "hermes_tool_parser", + "Hermes2ProToolParser", + ), + "hunyuan_a13b": ( + "hunyuan_a13b_tool_parser", + "HunyuanA13BToolParser", + ), + "internlm": ( + "internlm2_tool_parser", + "Internlm2ToolParser", + ), + "jamba": ( + "jamba_tool_parser", + "JambaToolParser", + ), + "kimi_k2": ( + "kimi_k2_tool_parser", + "KimiK2ToolParser", + ), + "llama3_json": ( + "llama_tool_parser", + "Llama3JsonToolParser", + ), + "llama4_json": ( + "llama_tool_parser", + "Llama3JsonToolParser", + ), + "llama4_pythonic": ( + "llama4_pythonic_tool_parser", + "Llama4PythonicToolParser", + ), + "longcat": ( + "longcat_tool_parser", + "LongcatFlashToolParser", + ), + "minimax_m2": ( + "minimax_m2_tool_parser", + "MinimaxM2ToolParser", + ), + "minimax": ( + "minimax_tool_parser", + "MinimaxToolParser", + ), + "mistral": ( + "mistral_tool_parser", + "MistralToolParser", + ), + "olmo3": ( + "olmo3_tool_parser", + "Olmo3PythonicToolParser", + ), + "openai": ( + "openai_tool_parser", + "OpenAIToolParser", + ), + "phi4_mini_json": ( + "phi4mini_tool_parser", + "Phi4MiniJsonToolParser", + ), + "pythonic": ( + "pythonic_tool_parser", + "PythonicToolParser", + ), + "qwen3_coder": ( + "qwen3coder_tool_parser", + "Qwen3CoderToolParser", + ), + "qwen3_xml": ( + "qwen3xml_tool_parser", + "Qwen3XMLToolParser", + ), + "seed_oss": ( + "seed_oss_tool_parser", + "SeedOssToolParser", + ), + "step3": ( + "step3_tool_parser", + "Step3ToolParser", + ), + "xlam": ( + "xlam_tool_parser", + "xLAMToolParser", + ), + "gigachat3": ( + "gigachat3_tool_parser", + "GigaChat3ToolParser", + ), +} + + +def register_lazy_tool_parsers(): + for name, (file_name, class_name) in _TOOL_PARSERS_TO_REGISTER.items(): + module_path = f"vllm.tool_parsers.{file_name}" + ToolParserManager.register_lazy_module(name, module_path, class_name) + + +register_lazy_tool_parsers() diff --git a/vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py b/vllm/tool_parsers/abstract_tool_parser.py similarity index 98% rename from vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py rename to vllm/tool_parsers/abstract_tool_parser.py index 87ef2e0786a9..e2ccb1dad990 100644 --- a/vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +++ b/vllm/tool_parsers/abstract_tool_parser.py @@ -17,12 +17,12 @@ ResponsesRequest, ResponseTextConfig, ) -from vllm.entrypoints.openai.tool_parsers.utils import get_json_schema_from_tools from vllm.logger import init_logger from vllm.sampling_params import ( StructuredOutputsParams, ) from vllm.tokenizers import TokenizerLike +from vllm.tool_parsers.utils import get_json_schema_from_tools from vllm.utils.collection_utils import is_list_of from vllm.utils.import_utils import import_from_path @@ -203,7 +203,7 @@ def register_lazy_module(cls, name: str, module_path: str, class_name: str) -> N Example: ToolParserManager.register_lazy_module( name="kimi_k2", - module_path="vllm.entrypoints.openai.tool_parsers.kimi_k2_parser", + module_path="vllm.tool_parsers.kimi_k2_parser", class_name="KimiK2ToolParser", ) """ diff --git a/vllm/entrypoints/openai/tool_parsers/deepseekv31_tool_parser.py b/vllm/tool_parsers/deepseekv31_tool_parser.py similarity index 99% rename from vllm/entrypoints/openai/tool_parsers/deepseekv31_tool_parser.py rename to vllm/tool_parsers/deepseekv31_tool_parser.py index 10de3dabf985..33383e1bc073 100644 --- a/vllm/entrypoints/openai/tool_parsers/deepseekv31_tool_parser.py +++ b/vllm/tool_parsers/deepseekv31_tool_parser.py @@ -15,11 +15,9 @@ FunctionCall, ToolCall, ) -from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ( - ToolParser, -) from vllm.logger import init_logger from vllm.tokenizers import TokenizerLike +from vllm.tool_parsers.abstract_tool_parser import ToolParser logger = init_logger(__name__) diff --git a/vllm/entrypoints/openai/tool_parsers/deepseekv32_tool_parser.py b/vllm/tool_parsers/deepseekv32_tool_parser.py similarity index 99% rename from vllm/entrypoints/openai/tool_parsers/deepseekv32_tool_parser.py rename to vllm/tool_parsers/deepseekv32_tool_parser.py index 4973deb7cefa..db081178fdea 100644 --- a/vllm/entrypoints/openai/tool_parsers/deepseekv32_tool_parser.py +++ b/vllm/tool_parsers/deepseekv32_tool_parser.py @@ -17,11 +17,11 @@ FunctionCall, ToolCall, ) -from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ( - ToolParser, -) from vllm.logger import init_logger from vllm.tokenizers import TokenizerLike +from vllm.tool_parsers.abstract_tool_parser import ( + ToolParser, +) logger = init_logger(__name__) diff --git a/vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py b/vllm/tool_parsers/deepseekv3_tool_parser.py similarity index 99% rename from vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py rename to vllm/tool_parsers/deepseekv3_tool_parser.py index 66b14875dce2..f8cf559f2284 100644 --- a/vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +++ b/vllm/tool_parsers/deepseekv3_tool_parser.py @@ -15,11 +15,11 @@ FunctionCall, ToolCall, ) -from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ( - ToolParser, -) from vllm.logger import init_logger from vllm.tokenizers import TokenizerLike +from vllm.tool_parsers.abstract_tool_parser import ( + ToolParser, +) logger = init_logger(__name__) diff --git a/vllm/entrypoints/openai/tool_parsers/ernie45_tool_parser.py b/vllm/tool_parsers/ernie45_tool_parser.py similarity index 99% rename from vllm/entrypoints/openai/tool_parsers/ernie45_tool_parser.py rename to vllm/tool_parsers/ernie45_tool_parser.py index d054d8e4b865..79193787b3b3 100644 --- a/vllm/entrypoints/openai/tool_parsers/ernie45_tool_parser.py +++ b/vllm/tool_parsers/ernie45_tool_parser.py @@ -15,11 +15,11 @@ FunctionCall, ToolCall, ) -from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ( - ToolParser, -) from vllm.logger import init_logger from vllm.tokenizers import TokenizerLike +from vllm.tool_parsers.abstract_tool_parser import ( + ToolParser, +) logger = init_logger(__name__) diff --git a/vllm/entrypoints/openai/tool_parsers/gigachat3_tool_parser.py b/vllm/tool_parsers/gigachat3_tool_parser.py similarity index 98% rename from vllm/entrypoints/openai/tool_parsers/gigachat3_tool_parser.py rename to vllm/tool_parsers/gigachat3_tool_parser.py index dd27ffa83cfc..27a6bc1a7bad 100644 --- a/vllm/entrypoints/openai/tool_parsers/gigachat3_tool_parser.py +++ b/vllm/tool_parsers/gigachat3_tool_parser.py @@ -16,9 +16,9 @@ FunctionCall, ToolCall, ) -from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ToolParser from vllm.logger import init_logger from vllm.tokenizers import TokenizerLike +from vllm.tool_parsers.abstract_tool_parser import ToolParser logger = init_logger(__name__) diff --git a/vllm/entrypoints/openai/tool_parsers/glm4_moe_tool_parser.py b/vllm/tool_parsers/glm4_moe_tool_parser.py similarity index 99% rename from vllm/entrypoints/openai/tool_parsers/glm4_moe_tool_parser.py rename to vllm/tool_parsers/glm4_moe_tool_parser.py index 165346adb3d9..d254fcb5240a 100644 --- a/vllm/entrypoints/openai/tool_parsers/glm4_moe_tool_parser.py +++ b/vllm/tool_parsers/glm4_moe_tool_parser.py @@ -18,11 +18,11 @@ FunctionCall, ToolCall, ) -from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ( - ToolParser, -) from vllm.logger import init_logger from vllm.tokenizers import TokenizerLike +from vllm.tool_parsers.abstract_tool_parser import ( + ToolParser, +) logger = init_logger(__name__) diff --git a/vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py b/vllm/tool_parsers/granite_20b_fc_tool_parser.py similarity index 98% rename from vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py rename to vllm/tool_parsers/granite_20b_fc_tool_parser.py index df1b590526b1..d841fb57ac87 100644 --- a/vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +++ b/vllm/tool_parsers/granite_20b_fc_tool_parser.py @@ -19,17 +19,17 @@ FunctionCall, ToolCall, ) -from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ( +from vllm.logger import init_logger +from vllm.tokenizers import TokenizerLike +from vllm.tool_parsers.abstract_tool_parser import ( ToolParser, ) -from vllm.entrypoints.openai.tool_parsers.utils import ( +from vllm.tool_parsers.utils import ( consume_space, find_common_prefix, is_complete_json, partial_json_loads, ) -from vllm.logger import init_logger -from vllm.tokenizers import TokenizerLike logger = init_logger(__name__) diff --git a/vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py b/vllm/tool_parsers/granite_tool_parser.py similarity index 98% rename from vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py rename to vllm/tool_parsers/granite_tool_parser.py index 14b0ca0abe35..7abfdf72849d 100644 --- a/vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +++ b/vllm/tool_parsers/granite_tool_parser.py @@ -17,17 +17,17 @@ FunctionCall, ToolCall, ) -from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ( +from vllm.logger import init_logger +from vllm.tokenizers import TokenizerLike +from vllm.tool_parsers.abstract_tool_parser import ( ToolParser, ) -from vllm.entrypoints.openai.tool_parsers.utils import ( +from vllm.tool_parsers.utils import ( consume_space, find_common_prefix, is_complete_json, partial_json_loads, ) -from vllm.logger import init_logger -from vllm.tokenizers import TokenizerLike logger = init_logger(__name__) diff --git a/vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py b/vllm/tool_parsers/hermes_tool_parser.py similarity index 99% rename from vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py rename to vllm/tool_parsers/hermes_tool_parser.py index 14cf2f38b70c..4b1dea7edf27 100644 --- a/vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +++ b/vllm/tool_parsers/hermes_tool_parser.py @@ -18,12 +18,12 @@ FunctionCall, ToolCall, ) -from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ( - ToolParser, -) from vllm.logger import init_logger from vllm.tokenizers import TokenizerLike from vllm.tokenizers.mistral import MistralTokenizer +from vllm.tool_parsers.abstract_tool_parser import ( + ToolParser, +) logger = init_logger(__name__) diff --git a/vllm/entrypoints/openai/tool_parsers/hunyuan_a13b_tool_parser.py b/vllm/tool_parsers/hunyuan_a13b_tool_parser.py similarity index 99% rename from vllm/entrypoints/openai/tool_parsers/hunyuan_a13b_tool_parser.py rename to vllm/tool_parsers/hunyuan_a13b_tool_parser.py index d2419b5d84ea..c73982136804 100644 --- a/vllm/entrypoints/openai/tool_parsers/hunyuan_a13b_tool_parser.py +++ b/vllm/tool_parsers/hunyuan_a13b_tool_parser.py @@ -17,12 +17,12 @@ FunctionCall, ToolCall, ) -from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ( - ToolParser, -) -from vllm.entrypoints.openai.tool_parsers.utils import consume_space from vllm.logger import init_logger from vllm.tokenizers import TokenizerLike +from vllm.tool_parsers.abstract_tool_parser import ( + ToolParser, +) +from vllm.tool_parsers.utils import consume_space from vllm.utils import random_uuid logger = init_logger(__name__) diff --git a/vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py b/vllm/tool_parsers/internlm2_tool_parser.py similarity index 98% rename from vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py rename to vllm/tool_parsers/internlm2_tool_parser.py index 67788358543e..e87efe3275a7 100644 --- a/vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +++ b/vllm/tool_parsers/internlm2_tool_parser.py @@ -17,12 +17,12 @@ FunctionCall, ToolCall, ) -from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ( - ToolParser, -) -from vllm.entrypoints.openai.tool_parsers.utils import extract_intermediate_diff from vllm.logger import init_logger from vllm.tokenizers import TokenizerLike +from vllm.tool_parsers.abstract_tool_parser import ( + ToolParser, +) +from vllm.tool_parsers.utils import extract_intermediate_diff logger = init_logger(__name__) diff --git a/vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py b/vllm/tool_parsers/jamba_tool_parser.py similarity index 98% rename from vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py rename to vllm/tool_parsers/jamba_tool_parser.py index 92b09917c252..7f3de0b38a33 100644 --- a/vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +++ b/vllm/tool_parsers/jamba_tool_parser.py @@ -18,11 +18,11 @@ FunctionCall, ToolCall, ) -from vllm.entrypoints.openai.tool_parsers import ToolParser -from vllm.entrypoints.openai.tool_parsers.utils import extract_intermediate_diff from vllm.logger import init_logger from vllm.tokenizers import TokenizerLike from vllm.tokenizers.mistral import MistralTokenizer +from vllm.tool_parsers import ToolParser +from vllm.tool_parsers.utils import extract_intermediate_diff logger = init_logger(__name__) diff --git a/vllm/entrypoints/openai/tool_parsers/kimi_k2_tool_parser.py b/vllm/tool_parsers/kimi_k2_tool_parser.py similarity index 99% rename from vllm/entrypoints/openai/tool_parsers/kimi_k2_tool_parser.py rename to vllm/tool_parsers/kimi_k2_tool_parser.py index 07db52ebd5af..c215b7978854 100644 --- a/vllm/entrypoints/openai/tool_parsers/kimi_k2_tool_parser.py +++ b/vllm/tool_parsers/kimi_k2_tool_parser.py @@ -15,11 +15,11 @@ FunctionCall, ToolCall, ) -from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ( - ToolParser, -) from vllm.logger import init_logger from vllm.tokenizers import TokenizerLike +from vllm.tool_parsers.abstract_tool_parser import ( + ToolParser, +) logger = init_logger(__name__) diff --git a/vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py b/vllm/tool_parsers/llama4_pythonic_tool_parser.py similarity index 99% rename from vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py rename to vllm/tool_parsers/llama4_pythonic_tool_parser.py index 1d6de9244066..3c5409bbfaf4 100644 --- a/vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +++ b/vllm/tool_parsers/llama4_pythonic_tool_parser.py @@ -18,10 +18,10 @@ FunctionCall, ToolCall, ) -from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ( +from vllm.logger import init_logger +from vllm.tool_parsers.abstract_tool_parser import ( ToolParser, ) -from vllm.logger import init_logger logger = init_logger(__name__) diff --git a/vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py b/vllm/tool_parsers/llama_tool_parser.py similarity index 99% rename from vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py rename to vllm/tool_parsers/llama_tool_parser.py index e1fe6e90dfd0..b0dfe05c8e55 100644 --- a/vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +++ b/vllm/tool_parsers/llama_tool_parser.py @@ -20,15 +20,15 @@ FunctionCall, ToolCall, ) -from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ( +from vllm.logger import init_logger +from vllm.tool_parsers.abstract_tool_parser import ( ToolParser, ) -from vllm.entrypoints.openai.tool_parsers.utils import ( +from vllm.tool_parsers.utils import ( find_common_prefix, is_complete_json, partial_json_loads, ) -from vllm.logger import init_logger logger = init_logger(__name__) diff --git a/vllm/entrypoints/openai/tool_parsers/longcat_tool_parser.py b/vllm/tool_parsers/longcat_tool_parser.py similarity index 93% rename from vllm/entrypoints/openai/tool_parsers/longcat_tool_parser.py rename to vllm/tool_parsers/longcat_tool_parser.py index 76d76a4aa35a..72f13559a922 100644 --- a/vllm/entrypoints/openai/tool_parsers/longcat_tool_parser.py +++ b/vllm/tool_parsers/longcat_tool_parser.py @@ -3,8 +3,8 @@ import regex as re -from vllm.entrypoints.openai.tool_parsers.hermes_tool_parser import Hermes2ProToolParser from vllm.tokenizers import TokenizerLike +from vllm.tool_parsers.hermes_tool_parser import Hermes2ProToolParser class LongcatFlashToolParser(Hermes2ProToolParser): diff --git a/vllm/entrypoints/openai/tool_parsers/minimax_m2_tool_parser.py b/vllm/tool_parsers/minimax_m2_tool_parser.py similarity index 99% rename from vllm/entrypoints/openai/tool_parsers/minimax_m2_tool_parser.py rename to vllm/tool_parsers/minimax_m2_tool_parser.py index b595a98f3555..dcb2b64f6e73 100644 --- a/vllm/entrypoints/openai/tool_parsers/minimax_m2_tool_parser.py +++ b/vllm/tool_parsers/minimax_m2_tool_parser.py @@ -17,11 +17,11 @@ FunctionCall, ToolCall, ) -from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ( - ToolParser, -) from vllm.logger import init_logger from vllm.tokenizers import TokenizerLike +from vllm.tool_parsers.abstract_tool_parser import ( + ToolParser, +) logger = init_logger(__name__) diff --git a/vllm/entrypoints/openai/tool_parsers/minimax_tool_parser.py b/vllm/tool_parsers/minimax_tool_parser.py similarity index 99% rename from vllm/entrypoints/openai/tool_parsers/minimax_tool_parser.py rename to vllm/tool_parsers/minimax_tool_parser.py index 1025041037c6..86e1433c6e36 100644 --- a/vllm/entrypoints/openai/tool_parsers/minimax_tool_parser.py +++ b/vllm/tool_parsers/minimax_tool_parser.py @@ -17,12 +17,12 @@ FunctionCall, ToolCall, ) -from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ( - ToolParser, -) -from vllm.entrypoints.openai.tool_parsers.utils import extract_intermediate_diff from vllm.logger import init_logger from vllm.tokenizers import TokenizerLike +from vllm.tool_parsers.abstract_tool_parser import ( + ToolParser, +) +from vllm.tool_parsers.utils import extract_intermediate_diff logger = init_logger(__name__) diff --git a/vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py b/vllm/tool_parsers/mistral_tool_parser.py similarity index 99% rename from vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py rename to vllm/tool_parsers/mistral_tool_parser.py index f60c379d2671..49a175f69f43 100644 --- a/vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +++ b/vllm/tool_parsers/mistral_tool_parser.py @@ -21,12 +21,12 @@ FunctionCall, ToolCall, ) -from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ( - ToolParser, -) from vllm.logger import init_logger from vllm.tokenizers import TokenizerLike from vllm.tokenizers.mistral import MistralTokenizer +from vllm.tool_parsers.abstract_tool_parser import ( + ToolParser, +) logger = init_logger(__name__) diff --git a/vllm/entrypoints/openai/tool_parsers/olmo3_tool_parser.py b/vllm/tool_parsers/olmo3_tool_parser.py similarity index 99% rename from vllm/entrypoints/openai/tool_parsers/olmo3_tool_parser.py rename to vllm/tool_parsers/olmo3_tool_parser.py index baff33bd7e8a..8cd6a84a9f6b 100644 --- a/vllm/entrypoints/openai/tool_parsers/olmo3_tool_parser.py +++ b/vllm/tool_parsers/olmo3_tool_parser.py @@ -18,10 +18,10 @@ FunctionCall, ToolCall, ) -from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ( +from vllm.logger import init_logger +from vllm.tool_parsers.abstract_tool_parser import ( ToolParser, ) -from vllm.logger import init_logger logger = init_logger(__name__) diff --git a/vllm/entrypoints/openai/tool_parsers/openai_tool_parser.py b/vllm/tool_parsers/openai_tool_parser.py similarity index 98% rename from vllm/entrypoints/openai/tool_parsers/openai_tool_parser.py rename to vllm/tool_parsers/openai_tool_parser.py index a3cf793ed3a6..db92ea8982d7 100644 --- a/vllm/entrypoints/openai/tool_parsers/openai_tool_parser.py +++ b/vllm/tool_parsers/openai_tool_parser.py @@ -12,10 +12,10 @@ FunctionCall, ToolCall, ) -from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ( +from vllm.logger import init_logger +from vllm.tool_parsers.abstract_tool_parser import ( ToolParser, ) -from vllm.logger import init_logger if TYPE_CHECKING: from vllm.tokenizers import TokenizerLike diff --git a/vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py b/vllm/tool_parsers/phi4mini_tool_parser.py similarity index 98% rename from vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py rename to vllm/tool_parsers/phi4mini_tool_parser.py index acb25ea2768e..9003429d8c6f 100644 --- a/vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +++ b/vllm/tool_parsers/phi4mini_tool_parser.py @@ -16,10 +16,10 @@ FunctionCall, ToolCall, ) -from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ( +from vllm.logger import init_logger +from vllm.tool_parsers.abstract_tool_parser import ( ToolParser, ) -from vllm.logger import init_logger logger = init_logger(__name__) diff --git a/vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py b/vllm/tool_parsers/pythonic_tool_parser.py similarity index 99% rename from vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py rename to vllm/tool_parsers/pythonic_tool_parser.py index abeb923b9322..476a62d5f527 100644 --- a/vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +++ b/vllm/tool_parsers/pythonic_tool_parser.py @@ -19,10 +19,10 @@ FunctionCall, ToolCall, ) -from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ( +from vllm.logger import init_logger +from vllm.tool_parsers.abstract_tool_parser import ( ToolParser, ) -from vllm.logger import init_logger logger = init_logger(__name__) diff --git a/vllm/entrypoints/openai/tool_parsers/qwen3coder_tool_parser.py b/vllm/tool_parsers/qwen3coder_tool_parser.py similarity index 99% rename from vllm/entrypoints/openai/tool_parsers/qwen3coder_tool_parser.py rename to vllm/tool_parsers/qwen3coder_tool_parser.py index d49b14690ef0..d1a3cbeaafc7 100644 --- a/vllm/entrypoints/openai/tool_parsers/qwen3coder_tool_parser.py +++ b/vllm/tool_parsers/qwen3coder_tool_parser.py @@ -18,11 +18,11 @@ FunctionCall, ToolCall, ) -from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ( - ToolParser, -) from vllm.logger import init_logger from vllm.tokenizers import TokenizerLike +from vllm.tool_parsers.abstract_tool_parser import ( + ToolParser, +) logger = init_logger(__name__) diff --git a/vllm/entrypoints/openai/tool_parsers/qwen3xml_tool_parser.py b/vllm/tool_parsers/qwen3xml_tool_parser.py similarity index 99% rename from vllm/entrypoints/openai/tool_parsers/qwen3xml_tool_parser.py rename to vllm/tool_parsers/qwen3xml_tool_parser.py index 03862ff432a5..107f791654a1 100644 --- a/vllm/entrypoints/openai/tool_parsers/qwen3xml_tool_parser.py +++ b/vllm/tool_parsers/qwen3xml_tool_parser.py @@ -19,11 +19,11 @@ FunctionCall, ToolCall, ) -from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ( - ToolParser, -) from vllm.logger import init_logger from vllm.tokenizers import TokenizerLike +from vllm.tool_parsers.abstract_tool_parser import ( + ToolParser, +) logger = init_logger(__name__) diff --git a/vllm/entrypoints/openai/tool_parsers/seed_oss_tool_parser.py b/vllm/tool_parsers/seed_oss_tool_parser.py similarity index 99% rename from vllm/entrypoints/openai/tool_parsers/seed_oss_tool_parser.py rename to vllm/tool_parsers/seed_oss_tool_parser.py index c7947faad192..206072e65b10 100644 --- a/vllm/entrypoints/openai/tool_parsers/seed_oss_tool_parser.py +++ b/vllm/tool_parsers/seed_oss_tool_parser.py @@ -21,11 +21,11 @@ FunctionCall, ToolCall, ) -from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ( - ToolParser, -) from vllm.logger import init_logger from vllm.tokenizers import TokenizerLike +from vllm.tool_parsers.abstract_tool_parser import ( + ToolParser, +) logger = init_logger(__name__) diff --git a/vllm/entrypoints/openai/tool_parsers/step3_tool_parser.py b/vllm/tool_parsers/step3_tool_parser.py similarity index 99% rename from vllm/entrypoints/openai/tool_parsers/step3_tool_parser.py rename to vllm/tool_parsers/step3_tool_parser.py index 9213d6859dd9..acd99bf56d0b 100644 --- a/vllm/entrypoints/openai/tool_parsers/step3_tool_parser.py +++ b/vllm/tool_parsers/step3_tool_parser.py @@ -17,11 +17,11 @@ FunctionCall, ToolCall, ) -from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ( - ToolParser, -) from vllm.logger import init_logger from vllm.tokenizers import TokenizerLike +from vllm.tool_parsers.abstract_tool_parser import ( + ToolParser, +) from vllm.utils import random_uuid logger = init_logger(__name__) diff --git a/vllm/entrypoints/openai/tool_parsers/utils.py b/vllm/tool_parsers/utils.py similarity index 100% rename from vllm/entrypoints/openai/tool_parsers/utils.py rename to vllm/tool_parsers/utils.py diff --git a/vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py b/vllm/tool_parsers/xlam_tool_parser.py similarity index 99% rename from vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py rename to vllm/tool_parsers/xlam_tool_parser.py index effd2bd08b42..9c2b585fe9fd 100644 --- a/vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py +++ b/vllm/tool_parsers/xlam_tool_parser.py @@ -17,7 +17,7 @@ FunctionCall, ToolCall, ) -from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ( +from vllm.tool_parsers.abstract_tool_parser import ( ToolParser, ) from vllm.logger import init_logger diff --git a/vllm/transformers_utils/config.py b/vllm/transformers_utils/config.py index fb88c62dc5b2..a11d37b4b2ed 100644 --- a/vllm/transformers_utils/config.py +++ b/vllm/transformers_utils/config.py @@ -66,6 +66,7 @@ def __getitem__(self, key): _CONFIG_REGISTRY: dict[str, type[PretrainedConfig]] = LazyConfigDict( afmoe="AfmoeConfig", + bagel="BagelConfig", chatglm="ChatGLMConfig", deepseek_vl_v2="DeepseekVLV2Config", deepseek_v32="DeepseekV3Config", @@ -617,6 +618,28 @@ def get_config( hf_overrides=hf_overrides_kw, **kwargs, ) + + # Patching defaults for GGUF models + if _is_gguf: + # Some models have different default values between GGUF and HF. + def apply_gguf_default(key: str, gguf_default: Any): + """ + Apply GGUF defaults unless explicitly configured. + + This function reads/writes external `config` and `config_dict`. + If the specified `key` is not in `config_dict` (i.e. not explicitly + configured and the default HF value is used), it updates the + corresponding `config` value to `gguf_default`. + """ + if key not in config_dict: + config.update({key: gguf_default}) + + # Apply architecture-specific GGUF defaults. + if config.model_type in {"qwen3_moe"}: + # Qwen3 MoE: norm_topk_prob is always true. + # Note that, this parameter is always false (HF default) on Qwen2 MoE. + apply_gguf_default("norm_topk_prob", True) + # Special architecture mapping check for GGUF models if _is_gguf: if config.model_type not in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES: diff --git a/vllm/transformers_utils/configs/__init__.py b/vllm/transformers_utils/configs/__init__.py index e536ca852132..54fe1b8d7b52 100644 --- a/vllm/transformers_utils/configs/__init__.py +++ b/vllm/transformers_utils/configs/__init__.py @@ -16,6 +16,7 @@ _CLASS_TO_MODULE: dict[str, str] = { "AfmoeConfig": "vllm.transformers_utils.configs.afmoe", + "BagelConfig": "vllm.transformers_utils.configs.bagel", "ChatGLMConfig": "vllm.transformers_utils.configs.chatglm", "DeepseekVLV2Config": "vllm.transformers_utils.configs.deepseek_vl2", "DotsOCRConfig": "vllm.transformers_utils.configs.dotsocr", @@ -54,6 +55,7 @@ __all__ = [ "AfmoeConfig", + "BagelConfig", "ChatGLMConfig", "DeepseekVLV2Config", "DeepseekV3Config", diff --git a/vllm/transformers_utils/configs/bagel.py b/vllm/transformers_utils/configs/bagel.py new file mode 100644 index 000000000000..53347ef45213 --- /dev/null +++ b/vllm/transformers_utils/configs/bagel.py @@ -0,0 +1,53 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +from transformers import PretrainedConfig, SiglipVisionConfig +from transformers.models.qwen2 import Qwen2Config + + +class BagelConfig(PretrainedConfig): + """Configuration class for BAGEL model.""" + + model_type = "bagel" + + def __init__( + self, + visual_gen: bool = True, + visual_und: bool = True, + llm_config: dict | Qwen2Config | None = None, + vit_config: dict | SiglipVisionConfig | None = None, + vae_config: dict | None = None, + latent_patch_size: int = 2, + max_latent_size: int = 32, + vit_max_num_patch_per_side: int = 70, + connector_act: str = "gelu_pytorch_tanh", + interpolate_pos: bool = False, + timestep_shift: float = 1.0, + **kwargs, + ): + super().__init__(**kwargs) + self.visual_gen = visual_gen + self.visual_und = visual_und + + # Convert dict configs to proper config objects + if isinstance(llm_config, dict): + self.llm_config = Qwen2Config(**llm_config) + else: + self.llm_config = llm_config or Qwen2Config() + + if isinstance(vit_config, dict): + self.vit_config = SiglipVisionConfig(**vit_config) + else: + self.vit_config = vit_config or SiglipVisionConfig() + + self.vae_config = vae_config or {"z_channels": 16, "downsample": 8} + self.latent_patch_size = latent_patch_size + self.max_latent_size = max_latent_size + self.vit_max_num_patch_per_side = vit_max_num_patch_per_side + self.connector_act = connector_act + self.interpolate_pos = interpolate_pos + self.timestep_shift = timestep_shift + + @property + def hidden_size(self) -> int: + """Return the hidden size of the language model.""" + return self.llm_config.hidden_size diff --git a/vllm/transformers_utils/processors/__init__.py b/vllm/transformers_utils/processors/__init__.py index b49fdbe9ce77..af25dbe4ccdf 100644 --- a/vllm/transformers_utils/processors/__init__.py +++ b/vllm/transformers_utils/processors/__init__.py @@ -8,6 +8,7 @@ - There is a need to override the existing processor to support vLLM. """ +from vllm.transformers_utils.processors.bagel import BagelProcessor from vllm.transformers_utils.processors.deepseek_vl2 import DeepseekVLV2Processor from vllm.transformers_utils.processors.hunyuan_vl import HunYuanVLProcessor from vllm.transformers_utils.processors.hunyuan_vl_image import HunYuanVLImageProcessor @@ -15,6 +16,7 @@ from vllm.transformers_utils.processors.ovis2_5 import Ovis2_5Processor __all__ = [ + "BagelProcessor", "DeepseekVLV2Processor", "HunYuanVLProcessor", "HunYuanVLImageProcessor", diff --git a/vllm/transformers_utils/processors/bagel.py b/vllm/transformers_utils/processors/bagel.py new file mode 100644 index 000000000000..850e64f2fad1 --- /dev/null +++ b/vllm/transformers_utils/processors/bagel.py @@ -0,0 +1,73 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +# Copyright 2025 Bytedance Ltd. and/or its affiliates. +"""BAGEL processor for image and text inputs.""" + +from transformers import AutoProcessor +from transformers.image_utils import ImageInput +from transformers.processing_utils import ProcessorMixin +from transformers.tokenization_utils_base import PreTokenizedInput, TextInput + + +class BagelProcessor(ProcessorMixin): + """ + Constructs a BAGEL processor which wraps a + SigLIP image processor and a Qwen2 tokenizer. + """ + + attributes = ["image_processor", "tokenizer"] + image_processor_class = "SiglipImageProcessor" + tokenizer_class = "AutoTokenizer" + + def __call__( + self, + text: TextInput + | PreTokenizedInput + | list[TextInput] + | list[PreTokenizedInput] = None, + images: ImageInput = None, + **kwargs, + ): + """ + Main method to prepare for the model one or several sequences(s) and image(s). + """ + if images is not None: + # Process images with the image processor + # Ensure return_tensors is set to "pt" for PyTorch tensors + image_kwargs = {**kwargs} + if "return_tensors" not in image_kwargs: + image_kwargs["return_tensors"] = "pt" + pixel_values = self.image_processor(images, **image_kwargs) + else: + pixel_values = None + + text_inputs = self.tokenizer(text, **kwargs) if text is not None else None + + if pixel_values is not None and text_inputs is not None: + text_inputs["pixel_values"] = pixel_values["pixel_values"] + return text_inputs + elif pixel_values is not None: + return pixel_values + else: + return text_inputs + + def batch_decode(self, *args, **kwargs): + """ + This method forwards all its arguments to Qwen2TokenizerFast's batch_decode. + """ + return self.tokenizer.batch_decode(*args, **kwargs) + + def decode(self, *args, **kwargs): + """ + This method forwards all its arguments to Qwen2TokenizerFast's decode. + """ + return self.tokenizer.decode(*args, **kwargs) + + @property + def model_input_names(self): + tokenizer_input_names = self.tokenizer.model_input_names + image_processor_input_names = self.image_processor.model_input_names + return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + + +AutoProcessor.register("BagelProcessor", BagelProcessor) diff --git a/vllm/utils/deep_gemm.py b/vllm/utils/deep_gemm.py index 46be3e2cd5c5..3d4f8449ad3b 100644 --- a/vllm/utils/deep_gemm.py +++ b/vllm/utils/deep_gemm.py @@ -381,22 +381,6 @@ def should_use_deepgemm_for_fp8_linear( ) -def should_use_deepgemm_for_fp8_linear_for_nk( - output_dtype: torch.dtype, - shape0: int, - shape1: int, - supports_deep_gemm: bool | None = None, -): - if supports_deep_gemm is None: - supports_deep_gemm = is_deep_gemm_supported() - return ( - supports_deep_gemm - and output_dtype == torch.bfloat16 - and shape0 % 128 == 0 - and shape1 % 128 == 0 - ) - - __all__ = [ "calc_diff", "DeepGemmQuantScaleFMT", @@ -411,7 +395,6 @@ def should_use_deepgemm_for_fp8_linear_for_nk( "is_deep_gemm_supported", "get_num_sms", "should_use_deepgemm_for_fp8_linear", - "should_use_deepgemm_for_fp8_linear_for_nk", "get_col_major_tma_aligned_tensor", "get_mk_alignment_for_contiguous_layout", ] diff --git a/vllm/utils/torch_utils.py b/vllm/utils/torch_utils.py index edcb79fbc9cd..c97efce312b5 100644 --- a/vllm/utils/torch_utils.py +++ b/vllm/utils/torch_utils.py @@ -194,33 +194,12 @@ def get_kv_cache_torch_dtype( return torch_dtype -def get_kv_cache_quant_algo_dtype(quant_cfg: dict[str, Any]) -> torch.dtype | None: - quant_method = quant_cfg.get("quant_method", "") - if quant_method.startswith("modelopt"): - quantization_inner = quant_cfg.get("quantization", quant_cfg) - # Check if quant config is specified and use kv cache quant algo - kv_algo = quantization_inner.get("kv_cache_quant_algo") or quant_cfg.get( - "kv_cache_quant_algo" - ) - if isinstance(kv_algo, str): - return STR_DTYPE_TO_TORCH_DTYPE[kv_algo.lower()] - return None - - def kv_cache_dtype_str_to_dtype( kv_cache_dtype: str, model_config: ModelConfig ) -> torch.dtype: - # Model config may not be specified for unit tests, default to float16 - dtype = model_config.dtype if model_config else torch.half if kv_cache_dtype == "auto": - hf_cfg = getattr(model_config, "hf_config", None) - if hf_cfg is not None: - quant_cfg = getattr(hf_cfg, "quantization_config", None) - if quant_cfg is not None: - kv_algo_dtype = get_kv_cache_quant_algo_dtype(quant_cfg) - return kv_algo_dtype if kv_algo_dtype is not None else dtype - return dtype - + # Model config may not be specified for unit tests, default to float16 + return model_config.dtype if model_config else torch.half return STR_DTYPE_TO_TORCH_DTYPE[kv_cache_dtype] diff --git a/vllm/v1/attention/backends/gdn_attn.py b/vllm/v1/attention/backends/gdn_attn.py index 3a2f92d9921c..ace2cbb0564c 100644 --- a/vllm/v1/attention/backends/gdn_attn.py +++ b/vllm/v1/attention/backends/gdn_attn.py @@ -211,7 +211,7 @@ def build( # type: ignore[override] spec_token_masks = torch.repeat_interleave( spec_sequence_masks, query_lens ) - index = torch.argsort(spec_token_masks) + index = torch.argsort(spec_token_masks, stable=True) num_non_spec_tokens = num_prefill_tokens + num_decode_tokens non_spec_token_indx = index[:num_non_spec_tokens] spec_token_indx = index[num_non_spec_tokens:] diff --git a/vllm/v1/attention/backends/utils.py b/vllm/v1/attention/backends/utils.py index da43d8703823..1cbe929fc57a 100644 --- a/vllm/v1/attention/backends/utils.py +++ b/vllm/v1/attention/backends/utils.py @@ -201,10 +201,11 @@ def _make_metadata_with_slice( ) # NOTE: last token can be outside of the last request if we have CG padding. - # If the "middle" request has tokens in both ubatches, we have to split it. - # If ubatch_slice is the first ubatch then we will be splitting the last - # request. If it's the second microbatch, then we will be splitting the - # first request + # If the request is split across ubatches, we have to adjust the metadata. + # splits_first_request: The first request in this slice is the continuation of + # a request that started in a previous slice. + # splits_last_request: The last request in this slice continues into the + # next slice. splits_first_request = first_tok > start_locs[first_req] splits_last_request = last_tok < start_locs[last_req + 1] - 1 @@ -225,7 +226,10 @@ def _make_metadata_with_slice( seq_lens_cpu = attn_metadata.seq_lens_cpu[request_slice] if splits_last_request: - tokens_skipped = query_start_loc_cpu[-1] - token_slice.stop + # NOTE: We use start_locs (the original query_start_loc_cpu) to calculate + # the tokens skipped because query_start_loc_cpu might have been modified + # if splits_first_request is True. + tokens_skipped = start_locs[last_req + 1] - token_slice.stop query_start_loc[-1] -= tokens_skipped query_start_loc_cpu[-1] -= tokens_skipped diff --git a/vllm/v1/kv_offload/cpu.py b/vllm/v1/kv_offload/cpu.py index 2f2e85c0ff33..e1cf7b14a785 100644 --- a/vllm/v1/kv_offload/cpu.py +++ b/vllm/v1/kv_offload/cpu.py @@ -13,7 +13,7 @@ from vllm.v1.kv_offload.lru_manager import LRUOffloadingManager from vllm.v1.kv_offload.mediums import CPULoadStoreSpec, GPULoadStoreSpec from vllm.v1.kv_offload.spec import OffloadingSpec -from vllm.v1.kv_offload.worker.cpu_gpu import CpuGpuOffloadingHandler +from vllm.v1.kv_offload.worker.cpu_gpu import CpuGpuOffloadingHandlers from vllm.v1.kv_offload.worker.worker import OffloadingHandler @@ -32,7 +32,7 @@ def __init__(self, vllm_config: VllmConfig): self._manager: OffloadingManager | None = None # worker-side - self._handler: OffloadingHandler | None = None + self._handlers: CpuGpuOffloadingHandlers | None = None self.eviction_policy: str = self.extra_config.get("eviction_policy", "lru") @@ -67,13 +67,13 @@ def get_handlers( kv_caches: dict[str, torch.Tensor], attn_backends: dict[str, type[AttentionBackend]], ) -> Iterator[tuple[type[LoadStoreSpec], type[LoadStoreSpec], OffloadingHandler]]: - if not self._handler: + if not self._handlers: if not current_platform.is_cuda_alike(): raise Exception( "CPU Offloading is currently only supported on CUDA-alike GPUs" ) - self._handler = CpuGpuOffloadingHandler( + self._handlers = CpuGpuOffloadingHandlers( attn_backends=attn_backends, gpu_block_size=self.gpu_block_size, cpu_block_size=self.offloaded_block_size, @@ -81,6 +81,6 @@ def get_handlers( gpu_caches=kv_caches, ) - assert self._handler is not None - yield GPULoadStoreSpec, CPULoadStoreSpec, self._handler - yield CPULoadStoreSpec, GPULoadStoreSpec, self._handler + assert self._handlers is not None + yield GPULoadStoreSpec, CPULoadStoreSpec, self._handlers.gpu_to_cpu_handler + yield CPULoadStoreSpec, GPULoadStoreSpec, self._handlers.cpu_to_gpu_handler diff --git a/vllm/v1/kv_offload/worker/cpu_gpu.py b/vllm/v1/kv_offload/worker/cpu_gpu.py index 461458c1f6ce..42ae4f1413ad 100644 --- a/vllm/v1/kv_offload/worker/cpu_gpu.py +++ b/vllm/v1/kv_offload/worker/cpu_gpu.py @@ -1,5 +1,6 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project +from collections import deque import numpy as np import torch @@ -8,7 +9,7 @@ from vllm.attention.backends.abstract import AttentionBackend from vllm.logger import init_logger from vllm.utils.platform_utils import is_pin_memory_available -from vllm.v1.kv_offload.mediums import CPULoadStoreSpec, GPULoadStoreSpec +from vllm.v1.kv_offload.mediums import BlockIDsLoadStoreSpec from vllm.v1.kv_offload.worker.worker import ( OffloadingHandler, TransferResult, @@ -51,7 +52,123 @@ def expand_block_ids( output_idx = output_end_idx -class CpuGpuOffloadingHandler(OffloadingHandler): +class SingleDirectionOffloadingHandler(OffloadingHandler): + """ + SingleDirectionOffloadingHandler handles transfers for a single direction, + either CPU->GPU or GPU->CPU. + Transfers are guaranteed to be executed in order of their submission. + Each transfer uses a unique CUDA stream, and its stream will start + executing only after the streams of previous transfers have finished. + """ + + def __init__( + self, + src_tensors: list[torch.Tensor], + dst_tensors: list[torch.Tensor], + kv_dim_before_num_blocks: list[bool], + src_block_size_factor: int, + dst_block_size_factor: int, + priority: int, + ): + """ + Initialize a SingleDirectionOffloadingHandler. + + Args: + src_tensors: list of KV cache tensors to copy from. + dst_tensors: list of KV cache tensors to copy to. + Order should match src_tensors. + kv_dim_before_num_blocks: list of bools, indicating + whether the respective KV cache tensor has a KV + dimension before its num_blocks dimension. + e.g. (2, num_blocks, ...) + src_block_size_factor: The number of kernel blocks + per KV block in a source tensor. + dst_block_size_factor: The number of kernel blocks + per KV block in a destination tensor. + priority: The priority of the backing CUDA streams. + Lower numbers indicate higher priority. + """ + assert len(src_tensors) == len(dst_tensors) == len(kv_dim_before_num_blocks) + + self.src_tensors: list[torch.Tensor] = src_tensors + self.dst_tensors: list[torch.Tensor] = dst_tensors + self.kv_dim_before_num_blocks: list[bool] = kv_dim_before_num_blocks + self.src_block_size_factor: int = src_block_size_factor + self.dst_block_size_factor: int = dst_block_size_factor + self.priority = priority + + # queue of transfers (job_id, stream, event) + self._transfers: deque[tuple[int, torch.cuda.Stream, torch.Event]] = deque() + # list of CUDA streams available for re-use + self._stream_pool: list[torch.cuda.Stream] = [] + # list of CUDA events available for re-use + self._event_pool: list[torch.Event] = [] + + def transfer_async(self, job_id: int, transfer_spec: TransferSpec) -> bool: + src_spec, dst_spec = transfer_spec + assert isinstance(src_spec, BlockIDsLoadStoreSpec) + assert isinstance(dst_spec, BlockIDsLoadStoreSpec) + + src_blocks = src_spec.block_ids + dst_blocks = dst_spec.block_ids + assert src_blocks.ndim == 1 + assert dst_blocks.ndim == 1 + + src_sub_block_count = src_blocks.size * self.src_block_size_factor + dst_sub_block_count = dst_blocks.size * self.dst_block_size_factor + src_sub_blocks_to_skip = -dst_blocks.size % self.src_block_size_factor + + assert dst_sub_block_count == src_sub_block_count - src_sub_blocks_to_skip + + src_to_dst = np.empty((dst_sub_block_count, 2), dtype=np.int64) + expand_block_ids( + src_blocks, + self.src_block_size_factor, + src_to_dst[:, 0], + skip_count=src_sub_blocks_to_skip, + ) + expand_block_ids(dst_blocks, self.dst_block_size_factor, src_to_dst[:, 1]) + src_to_dst_tensor = torch.from_numpy(src_to_dst) + + stream = ( + self._stream_pool.pop() + if self._stream_pool + else torch.cuda.Stream(priority=self.priority) + ) + event = self._event_pool.pop() if self._event_pool else torch.Event() + if self._transfers: + _, _, last_event = self._transfers[-1] + # assure job will start only after the previous one completes + stream.wait_event(last_event) + with torch.cuda.stream(stream): + for src_tensor, dst_tensor, kv_dim in zip( + self.src_tensors, self.dst_tensors, self.kv_dim_before_num_blocks + ): + if kv_dim: + src_key_cache, src_value_cache = src_tensor + dst_key_cache, dst_value_cache = dst_tensor + ops.swap_blocks(src_key_cache, dst_key_cache, src_to_dst_tensor) + ops.swap_blocks(src_value_cache, dst_value_cache, src_to_dst_tensor) + else: + ops.swap_blocks(src_tensor, dst_tensor, src_to_dst_tensor) + event.record(stream) + + self._transfers.append((job_id, stream, event)) + + # success + return True + + def get_finished(self) -> list[TransferResult]: + results: list[TransferResult] = [] + while self._transfers and self._transfers[0][2].query(): + job_id, stream, event = self._transfers.popleft() + results.append((job_id, True)) + self._stream_pool.append(stream) + self._event_pool.append(event) + return results + + +class CpuGpuOffloadingHandlers: def __init__( self, gpu_block_size: int, @@ -60,27 +177,20 @@ def __init__( gpu_caches: dict[str, torch.Tensor], attn_backends: dict[str, type[AttentionBackend]], ): + assert gpu_caches assert cpu_block_size % gpu_block_size == 0 - self.block_size_factor = cpu_block_size // gpu_block_size - - # cuda streams for gpu->cpu and cpu->gpu - self.d2h_stream = torch.cuda.Stream() - self.h2d_stream = torch.cuda.Stream() - - # job_id -> transfer cuda event - self.transfer_events: dict[int, torch.Event] = {} - # list of cuda events available for re-use - self.events_pool: list[torch.Event] = [] + block_size_factor = cpu_block_size // gpu_block_size pin_memory = is_pin_memory_available() # allocate cpu tensors logger.info("Allocating %d CPU tensors...", len(gpu_caches)) - self.gpu_tensors: list[torch.Tensor] = [] - self.cpu_tensors: list[torch.Tensor] = [] - self.kv_dim_before_num_blocks: list[bool] = [] + gpu_tensors: list[torch.Tensor] = [] + cpu_tensors: list[torch.Tensor] = [] + kv_dim_before_num_blocks: list[bool] = [] + kernel_block_size: int | None = None for layer_name, gpu_tensor in gpu_caches.items(): - self.gpu_tensors.append(gpu_tensor) + gpu_tensors.append(gpu_tensor) gpu_shape = gpu_tensor.shape attn_backend = attn_backends[layer_name] @@ -88,16 +198,21 @@ def __init__( num_blocks=1234, block_size=16, num_kv_heads=8, head_size=256 ) + has_layers_dim = False if len(gpu_shape) != len(test_shape): # cross-layers tensor # shape is (num_blocks, ...) assert len(gpu_shape) == len(test_shape) + 1 num_blocks_idx = 0 - self.kv_dim_before_num_blocks.append(False) + has_layers_dim = True + kv_dim_before_num_blocks.append(False) + + # prepend a dummy num_layers=80 to test_shape + test_shape = (80,) + test_shape elif test_shape[0] == 1234: # shape is (num_blocks, ...) num_blocks_idx = 0 - self.kv_dim_before_num_blocks.append(False) + kv_dim_before_num_blocks.append(False) else: # shape should be (2, num_blocks, ...) assert test_shape[0] == 2 @@ -105,13 +220,32 @@ def __init__( assert gpu_shape[0] == 2 num_blocks_idx = 1 - self.kv_dim_before_num_blocks.append(True) + kv_dim_before_num_blocks.append(True) + + try: + kv_cache_stride_order = attn_backend.get_kv_cache_stride_order( + include_num_layers_dimension=has_layers_dim + ) + assert len(kv_cache_stride_order) == len(gpu_shape) + except (AttributeError, NotImplementedError): + kv_cache_stride_order = tuple(range(len(gpu_shape))) + + # permute test_shape according to stride_order + test_shape = tuple(test_shape[i] for i in kv_cache_stride_order) + + # find block_size (16) dimension index + block_size_idx = test_shape.index(16) + if kernel_block_size is not None: + assert kernel_block_size == gpu_shape[block_size_idx] + else: + kernel_block_size = gpu_shape[block_size_idx] + assert gpu_block_size % kernel_block_size == 0 cpu_shape = list(gpu_shape) - cpu_shape[num_blocks_idx] = num_cpu_blocks * self.block_size_factor + cpu_shape[num_blocks_idx] = num_cpu_blocks * block_size_factor logger.debug("Allocating CPU tensor of shape %r", cpu_shape) - self.cpu_tensors.append( + cpu_tensors.append( torch.zeros( cpu_shape, dtype=gpu_tensor.dtype, @@ -120,72 +254,27 @@ def __init__( ) ) - def transfer_async(self, job_id: int, spec: TransferSpec) -> bool: - src_spec, dst_spec = spec - if isinstance(src_spec, CPULoadStoreSpec): - assert isinstance(dst_spec, GPULoadStoreSpec) - stream = self.h2d_stream - src_tensors = self.cpu_tensors - dst_tensors = self.gpu_tensors - src_block_size_factor = self.block_size_factor - dst_block_size_factor = 1 - else: - assert isinstance(src_spec, GPULoadStoreSpec) - assert isinstance(dst_spec, CPULoadStoreSpec) - stream = self.d2h_stream - src_tensors = self.gpu_tensors - dst_tensors = self.cpu_tensors - src_block_size_factor = 1 - dst_block_size_factor = self.block_size_factor - - src_blocks = src_spec.block_ids - dst_blocks = dst_spec.block_ids - assert src_blocks.ndim == 1 - assert dst_blocks.ndim == 1 + assert kernel_block_size is not None + gpu_block_size_factor = gpu_block_size // kernel_block_size + cpu_block_size_factor = cpu_block_size // kernel_block_size - src_sub_block_count = src_blocks.size * src_block_size_factor - dst_sub_block_count = dst_blocks.size * dst_block_size_factor - src_sub_blocks_to_skip = -dst_blocks.size % src_block_size_factor + # TODO (orozery): adapt swap_blocks to support gpu_block_size_factor + assert gpu_block_size_factor == 1 - assert dst_sub_block_count == src_sub_block_count - src_sub_blocks_to_skip - - src_to_dst = np.empty((dst_sub_block_count, 2), dtype=np.int64) - expand_block_ids( - src_blocks, - src_block_size_factor, - src_to_dst[:, 0], - skip_count=src_sub_blocks_to_skip, + self.gpu_to_cpu_handler = SingleDirectionOffloadingHandler( + src_tensors=gpu_tensors, + dst_tensors=cpu_tensors, + kv_dim_before_num_blocks=kv_dim_before_num_blocks, + src_block_size_factor=gpu_block_size_factor, + dst_block_size_factor=cpu_block_size_factor, + priority=1, ) - expand_block_ids(dst_blocks, dst_block_size_factor, src_to_dst[:, 1]) - src_to_dst_tensor = torch.from_numpy(src_to_dst) - event = self.events_pool.pop() if self.events_pool else torch.Event() - with torch.cuda.stream(stream): - for src_tensor, dst_tensor, kv_dim in zip( - src_tensors, dst_tensors, self.kv_dim_before_num_blocks - ): - if kv_dim: - src_key_cache = src_tensor[0] - dst_key_cache = dst_tensor[0] - ops.swap_blocks(src_key_cache, dst_key_cache, src_to_dst_tensor) - src_value_cache = src_tensor[1] - dst_value_cache = dst_tensor[1] - ops.swap_blocks(src_value_cache, dst_value_cache, src_to_dst_tensor) - else: - ops.swap_blocks(src_tensor, dst_tensor, src_to_dst_tensor) - event.record(stream) - - self.transfer_events[job_id] = event - - # success - return True - - def get_finished(self) -> list[TransferResult]: - results: list[TransferResult] = [] - for job_id, event in self.transfer_events.items(): - if event.query(): - results.append((job_id, True)) - self.events_pool.append(event) - for job_id, _ in results: - del self.transfer_events[job_id] - return results + self.cpu_to_gpu_handler = SingleDirectionOffloadingHandler( + src_tensors=cpu_tensors, + dst_tensors=gpu_tensors, + kv_dim_before_num_blocks=kv_dim_before_num_blocks, + src_block_size_factor=cpu_block_size_factor, + dst_block_size_factor=gpu_block_size_factor, + priority=-1, + ) diff --git a/vllm/v1/structured_output/backend_xgrammar.py b/vllm/v1/structured_output/backend_xgrammar.py index c5e7165026d1..9dd506880389 100644 --- a/vllm/v1/structured_output/backend_xgrammar.py +++ b/vllm/v1/structured_output/backend_xgrammar.py @@ -10,7 +10,7 @@ import vllm.envs from vllm.logger import init_logger from vllm.sampling_params import SamplingParams -from vllm.tokenizers.deepseekv32 import DeepseekV32Tokenizer +from vllm.tokenizers.deepseek_v32 import DeepseekV32Tokenizer from vllm.tokenizers.mistral import MistralTokenizer from vllm.utils.import_utils import LazyLoader from vllm.v1.structured_output.backend_types import ( @@ -268,13 +268,7 @@ def check_object(obj: dict[str, Any]) -> bool: # Unsupported keywords for objects if obj.get("type") == "object" and any( - key in obj - for key in ( - "minProperties", - "maxProperties", - "propertyNames", - "patternProperties", - ) + key in obj for key in ("patternProperties", "propertyNames") ): return True diff --git a/vllm/v1/worker/dp_utils.py b/vllm/v1/worker/dp_utils.py index 1b9646e1980a..82de0cba9194 100644 --- a/vllm/v1/worker/dp_utils.py +++ b/vllm/v1/worker/dp_utils.py @@ -11,7 +11,7 @@ from vllm.logger import init_logger from vllm.v1.worker.ubatch_utils import ( check_ubatch_thresholds, - is_second_ubatch_empty, + is_last_ubatch_empty, ) logger = init_logger(__name__) @@ -56,7 +56,7 @@ def _run_ar( return tensor -def _post_process_ubatch(tensor: torch.Tensor) -> bool: +def _post_process_ubatch(tensor: torch.Tensor, num_ubatches: int) -> bool: orig_num_tokens_tensor = tensor[0, :] padded_num_tokens_tensor = tensor[1, :] @@ -68,7 +68,7 @@ def _post_process_ubatch(tensor: torch.Tensor) -> bool: # there are no "empty" second ubatches orig_min_num_tokens = int(orig_num_tokens_tensor.min().item()) padded_max_num_tokens = int(padded_num_tokens_tensor.max().item()) - if is_second_ubatch_empty(orig_min_num_tokens, padded_max_num_tokens): + if is_last_ubatch_empty(orig_min_num_tokens, padded_max_num_tokens, num_ubatches): logger.debug( "Aborting ubatching %s %s", orig_min_num_tokens, padded_max_num_tokens ) @@ -146,7 +146,7 @@ def _synchronize_dp_ranks( assert should_attempt_dp_padding == should_dp_pad # Check conditions for microbatching - should_ubatch = _post_process_ubatch(tensor) + should_ubatch = _post_process_ubatch(tensor, parallel_config.num_ubatches) if should_ubatch and not should_dp_pad: logger.debug_once( diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index 978224faae65..1aa2ec6bb655 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -2987,7 +2987,7 @@ def execute_model( cascade_attn_prefix_lens = None # Disable cascade attention when using microbatching (DBO) - if self.cascade_attn_enabled and not self.parallel_config.enable_dbo: + if self.cascade_attn_enabled and not self.parallel_config.use_ubatching: # Pre-compute cascade attention prefix lengths cascade_attn_prefix_lens = self._compute_cascade_attn_prefix_lens( num_scheduled_tokens_np, @@ -3028,6 +3028,13 @@ def execute_model( num_scheduled_tokens_np, num_tokens_padded, num_reqs_padded, + self.parallel_config.num_ubatches, + ) + + logger.debug( + "ubatch_slices: %s, ubatch_slices_padded: %s", + ubatch_slices, + ubatch_slices_padded, ) pad_attn = cudagraph_mode == CUDAGraphMode.FULL @@ -3710,11 +3717,14 @@ def load_model(self, eep_scale_up: bool = False) -> None: # wrap the model with full cudagraph wrapper if needed. cudagraph_mode = self.compilation_config.cudagraph_mode assert cudagraph_mode is not None - if cudagraph_mode.has_full_cudagraphs() and not self.parallel_config.enable_dbo: + if ( + cudagraph_mode.has_full_cudagraphs() + and not self.parallel_config.use_ubatching + ): self.model = CUDAGraphWrapper( self.model, self.vllm_config, runtime_mode=CUDAGraphMode.FULL ) - elif self.parallel_config.enable_dbo: + elif self.parallel_config.use_ubatching: if cudagraph_mode.has_full_cudagraphs(): self.model = UBatchWrapper( self.model, self.vllm_config, CUDAGraphMode.FULL, self.device @@ -4095,7 +4105,16 @@ def _dummy_run( batch_desc.num_reqs if batch_desc.num_reqs is not None else num_reqs ) ubatch_slices, ubatch_slices_padded = maybe_create_ubatch_slices( - should_ubatch, num_scheduled_tokens, num_tokens_padded, num_reqs_padded + should_ubatch, + num_scheduled_tokens, + num_tokens_padded, + num_reqs_padded, + self.vllm_config.parallel_config.num_ubatches, + ) + logger.debug( + "ubatch_slices: %s, ubatch_slices_padded: %s", + ubatch_slices, + ubatch_slices_padded, ) attn_metadata: PerLayerAttnMetadata | None = None @@ -4644,7 +4663,7 @@ def _capture_cudagraphs( # is above the threshold. Otherwise we just capture a non-ubatched # version of the graph allow_microbatching = ( - self.parallel_config.enable_dbo + self.parallel_config.use_ubatching and cudagraph_runtime_mode == CUDAGraphMode.FULL and uniform_decode and check_ubatch_thresholds( @@ -4779,8 +4798,8 @@ def initialize_metadata_builders( if kv_cache_group_id < len(kernel_block_sizes) else None, num_metadata_builders=1 - if not self.parallel_config.enable_dbo - else 2, + if not self.parallel_config.use_ubatching + else self.parallel_config.num_ubatches, ) # Calculate reorder batch threshold (if needed) # Note (tdoublep): do this *after* constructing builders, diff --git a/vllm/v1/worker/gpu_ubatch_wrapper.py b/vllm/v1/worker/gpu_ubatch_wrapper.py index 2ce2b6451256..af09129e67b1 100644 --- a/vllm/v1/worker/gpu_ubatch_wrapper.py +++ b/vllm/v1/worker/gpu_ubatch_wrapper.py @@ -103,8 +103,10 @@ def __init__( self.vllm_config = vllm_config self.compilation_config = vllm_config.compilation_config self.comm_stream = torch.cuda.Stream(device=device) - # Two ubatch threads plus the main thread - self.ready_barrier = threading.Barrier(3) + # Ubatch threads plus the main thread + self.ready_barrier = threading.Barrier( + self.vllm_config.parallel_config.num_ubatches + 1 + ) self.cudagraphs: dict[int, CUDAGraphMetaData] = {} @@ -309,7 +311,7 @@ def _make_ubatch_metadata( create_forward_context( attn_metadata[i] if attn_metadata is not None else None, self.vllm_config, - dp_metadata=dp_metadata, + dp_metadata=dp_metadata[i], batch_descriptor=batch_descriptor, cudagraph_runtime_mode=cudagraph_runtime_mode, ) @@ -417,18 +419,19 @@ def __call__(self, *args, **kwargs): # We shouldn't be here unless we are running with multiple DP ranks assert dp_metadata is not None - num_tokens_per_ubatch = ( - ubatch_slices[0].token_slice.stop - ubatch_slices[0].token_slice.start - ) - dp_size = self.vllm_config.parallel_config.data_parallel_size - ubatch_num_tokens_across_dp = torch.tensor( - [num_tokens_per_ubatch] * dp_size, device="cpu", dtype=torch.int32 - ) - ubatch_dp_metadata = DPMetadata.make( - self.vllm_config.parallel_config, - num_tokens_per_ubatch, - ubatch_num_tokens_across_dp, - ) + ubatch_dp_metadata = [] + for ubatch_slice in ubatch_slices: + dp_size = self.vllm_config.parallel_config.data_parallel_size + ubatch_num_tokens_across_dp = torch.tensor( + [ubatch_slice.num_tokens] * dp_size, device="cpu", dtype=torch.int32 + ) + ubatch_dp_metadata.append( + DPMetadata.make( + self.vllm_config.parallel_config, + ubatch_slice.num_tokens, + ubatch_num_tokens_across_dp, + ) + ) if ( num_tokens not in self.cudagraphs @@ -464,7 +467,7 @@ def __call__(self, *args, **kwargs): intermediate_tensors=intermediate_tensors, inputs_embeds=inputs_embeds, compute_stream=compute_stream, - dp_metadata=dp_metadata, + dp_metadata=ubatch_dp_metadata, batch_descriptor=batch_descriptor, cudagraph_runtime_mode=CUDAGraphMode.NONE, ) diff --git a/vllm/v1/worker/gpu_worker.py b/vllm/v1/worker/gpu_worker.py index c767cbf1c2e6..f30038dd9307 100644 --- a/vllm/v1/worker/gpu_worker.py +++ b/vllm/v1/worker/gpu_worker.py @@ -931,10 +931,11 @@ def init_worker_distributed_environment( backend: str = "nccl", ) -> None: """Initialize the distributed environment.""" + attention_config = vllm_config.attention_config parallel_config = vllm_config.parallel_config from vllm.model_executor.layers.batch_invariant import init_batch_invariance - init_batch_invariance() + init_batch_invariance(attention_config.backend) set_custom_all_reduce(not parallel_config.disable_custom_all_reduce) init_method = distributed_init_method or "env://" diff --git a/vllm/v1/worker/ubatch_utils.py b/vllm/v1/worker/ubatch_utils.py index 44788476fc9c..f6889173578d 100644 --- a/vllm/v1/worker/ubatch_utils.py +++ b/vllm/v1/worker/ubatch_utils.py @@ -27,14 +27,16 @@ def num_tokens(self) -> int: UBatchSlices: TypeAlias = list[UBatchSlice] -def is_second_ubatch_empty(orig_num_tokens: int, padded_num_tokens: int) -> bool: - return (padded_num_tokens // 2) >= orig_num_tokens +def is_last_ubatch_empty( + orig_num_tokens: int, padded_num_tokens: int, num_ubatches: int +) -> bool: + return (padded_num_tokens // num_ubatches) * (num_ubatches - 1) >= orig_num_tokens def check_ubatch_thresholds( config: ParallelConfig, num_tokens: int, uniform_decode: bool ) -> bool: - if not config.enable_dbo: + if not config.use_ubatching: return False if uniform_decode: return num_tokens >= config.dbo_decode_token_threshold @@ -42,21 +44,17 @@ def check_ubatch_thresholds( return num_tokens >= config.dbo_prefill_token_threshold -# This just pads the second ubatch slice out to the total number of tokens +# This pads the last ubatch slice out to the total number of tokens # (num_tokens + padding) since we do `create_ubatch_slices` before applying DP padding. def _pad_out_ubatch_slices( ubatch_slices: UBatchSlices, num_total_tokens: int, num_reqs_padded: int ) -> UBatchSlices: - # TODO(lucas): handle empty second ubatch - padded_second_request_slice = slice( - ubatch_slices[1].request_slice.start, num_reqs_padded - ) - padded_second_token_slice = slice( - ubatch_slices[1].token_slice.start, num_total_tokens - ) - return [ - ubatch_slices[0], - UBatchSlice(padded_second_request_slice, padded_second_token_slice), + last_slice = ubatch_slices[-1] + padded_last_request_slice = slice(last_slice.request_slice.start, num_reqs_padded) + padded_last_token_slice = slice(last_slice.token_slice.start, num_total_tokens) + + return ubatch_slices[:-1] + [ + UBatchSlice(padded_last_request_slice, padded_last_token_slice) ] @@ -65,40 +63,45 @@ def maybe_create_ubatch_slices( num_scheduled_tokens: np.ndarray, num_tokens_padded: int, num_reqs_padded: int, - split_point: int | None = None, + num_ubatches: int, + split_point: list[int] | int | None = None, ) -> tuple[UBatchSlices | None, UBatchSlices | None]: if not should_ubatch: return None, None if split_point is None: - split_point = int(num_tokens_padded) // 2 + split_point = int(num_tokens_padded) // num_ubatches + + token_split_points = [split_point * i for i in range(1, num_ubatches)] # TODO(lucas): Refactor the gpu_model_runner.py so we can pass # in cu_num_tokens directly (i.e. query_start_loc) cu_num_tokens = np.zeros(len(num_scheduled_tokens) + 1, dtype=np.int32) np.cumsum(num_scheduled_tokens, dtype=np.int32, out=cu_num_tokens[1:]) - first_ubatch_token_slice = slice(0, split_point) - second_ubatch_token_slice = slice(split_point, cu_num_tokens[-1]) + ubatch_slices = [] + start_token = 0 - # Determine request slices using exclusive stop semantics - # First ubatch includes requests whose tokens overlap [0, split_point) - first_ubatch_req_stop = int( - np.searchsorted(cu_num_tokens, split_point, side="left") - ) - first_ubatch_req_slice = slice(0, first_ubatch_req_stop) + # Add the end point to the split points to make iteration easier + all_points = token_split_points + [cu_num_tokens[-1]] - # Second ubatch starts at the request that contains the split_point - # or the request starting exactly at split_point (if on boundary) - second_ubatch_req_start = int( - np.searchsorted(cu_num_tokens, split_point, side="right") - 1 - ) - second_ubatch_req_slice = slice(second_ubatch_req_start, len(cu_num_tokens) - 1) + for end_token in all_points: + token_slice = slice(start_token, end_token) - ubatch_slices = [ - UBatchSlice(first_ubatch_req_slice, first_ubatch_token_slice), - UBatchSlice(second_ubatch_req_slice, second_ubatch_token_slice), - ] + # Determine request slices using exclusive stop semantics + # Ubatch includes requests whose tokens overlap [start_token, end_token) + + # Start at the request that contains the start_token + # or the request starting exactly at start_token (if on boundary) + req_start = int(np.searchsorted(cu_num_tokens, start_token, side="right") - 1) + + # Stop at the request that starts at or after end_token + req_stop = int(np.searchsorted(cu_num_tokens, end_token, side="left")) + + req_slice = slice(req_start, req_stop) + ubatch_slices.append(UBatchSlice(req_slice, token_slice)) + + start_token = end_token ubatch_slices_padded = _pad_out_ubatch_slices( ubatch_slices, num_tokens_padded, num_reqs_padded diff --git a/vllm/v1/worker/ubatching.py b/vllm/v1/worker/ubatching.py index be8326e2fdbc..e7a947f2ea8c 100644 --- a/vllm/v1/worker/ubatching.py +++ b/vllm/v1/worker/ubatching.py @@ -7,10 +7,15 @@ from vllm import forward_context from vllm.forward_context import ForwardContext +from vllm.logger import init_logger from vllm.utils.torch_utils import current_stream +logger = init_logger(__name__) + _THREAD_ID_TO_CONTEXT: dict = {} -_CURRENT_CONTEXTS: list[Optional["UBatchContext"]] = [None, None] +# Here we hardcode the number of microbatches to 2 for default. +_NUM_UBATCHES: int = 2 +_CURRENT_CONTEXTS: list[Optional["UBatchContext"]] = [] class UBatchContext: @@ -48,6 +53,7 @@ def __enter__(self): global _CURRENT_CONTEXTS, _THREAD_ID_TO_CONTEXT _THREAD_ID_TO_CONTEXT[threading.get_ident()] = self.id _CURRENT_CONTEXTS[self.id] = self + # _NUM_UBATCHES is set in make_ubatch_contexts self.ready_barrier.wait() self.cpu_wait_event.wait() @@ -181,7 +187,7 @@ def wrapper(*args, **kwargs): def dbo_register_recv_hook(recv_hook): if len(_THREAD_ID_TO_CONTEXT) > 0: ctx_idx = _THREAD_ID_TO_CONTEXT[threading.get_ident()] - next_ctx = _CURRENT_CONTEXTS[(ctx_idx + 1) % 2] + next_ctx = _CURRENT_CONTEXTS[(ctx_idx + 1) % _NUM_UBATCHES] next_ctx.recv_hook = recv_hook @@ -202,7 +208,14 @@ def make_ubatch_contexts( ready_barrier: threading.Barrier, schedule: str = "default", ) -> list[UBatchContext]: - assert num_micro_batches == 2, "only been tested with 2 micro-batches" + global _NUM_UBATCHES, _CURRENT_CONTEXTS + assert num_micro_batches > 1, "num_micro_batches must be greater than 1" + + _NUM_UBATCHES = num_micro_batches + # Ensure the global context list is large enough + if len(_CURRENT_CONTEXTS) < num_micro_batches: + _CURRENT_CONTEXTS.extend([None] * (num_micro_batches - len(_CURRENT_CONTEXTS))) + """ Create a context manager for micro-batching synchronization. """ @@ -210,8 +223,6 @@ def make_ubatch_contexts( gpu_comm_done_events = [torch.Event() for _ in range(num_micro_batches)] gpu_compute_done_events = [torch.Event() for _ in range(num_micro_batches)] - assert len(forward_contexts) == 2 - ctxs = [] for i in range(num_micro_batches): ctx = UBatchContext( From 6c9afb3e04af515185663038427cf9f7991d6429 Mon Sep 17 00:00:00 2001 From: Yifan Qiao Date: Tue, 16 Dec 2025 20:20:18 +0000 Subject: [PATCH 22/73] nits: revise function name and comments Signed-off-by: Yifan Qiao --- vllm/v1/core/kv_cache_coordinator.py | 7 ++++--- vllm/v1/core/kv_cache_manager.py | 2 +- vllm/v1/core/single_type_kv_cache_manager.py | 11 +++++++++-- 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/vllm/v1/core/kv_cache_coordinator.py b/vllm/v1/core/kv_cache_coordinator.py index a6324dc4397e..e1d1eed17cc7 100644 --- a/vllm/v1/core/kv_cache_coordinator.py +++ b/vllm/v1/core/kv_cache_coordinator.py @@ -109,7 +109,7 @@ def get_num_blocks_to_allocate( ) return num_blocks_to_allocate - def save_new_computed_blocks( + def allocate_new_computed_blocks( self, request_id: str, new_computed_blocks: tuple[Sequence[KVCacheBlock], ...], @@ -117,7 +117,8 @@ def save_new_computed_blocks( num_external_computed_tokens: int, ) -> None: """ - Add the new computed blocks to the request. + Add the new computed blocks to the request. Optionally allocate new + blocks for external computed tokens (if any). Args: request_id: The request ID. @@ -127,7 +128,7 @@ def save_new_computed_blocks( num_external_computed_tokens: The number of external computed tokens. """ for i, manager in enumerate(self.single_type_managers): - manager.save_new_computed_blocks( + manager.allocate_new_computed_blocks( request_id, new_computed_blocks[i], num_local_computed_tokens, diff --git a/vllm/v1/core/kv_cache_manager.py b/vllm/v1/core/kv_cache_manager.py index 4aa748e11772..dc08894e4e77 100644 --- a/vllm/v1/core/kv_cache_manager.py +++ b/vllm/v1/core/kv_cache_manager.py @@ -338,7 +338,7 @@ def allocate_slots( ): # Append the new computed blocks to the request blocks until now to # avoid the case where the new blocks cannot be allocated. - self.coordinator.save_new_computed_blocks( + self.coordinator.allocate_new_computed_blocks( request_id=request.request_id, new_computed_blocks=new_computed_block_list, num_local_computed_tokens=num_local_computed_tokens, diff --git a/vllm/v1/core/single_type_kv_cache_manager.py b/vllm/v1/core/single_type_kv_cache_manager.py index f8aa90d01b90..67718621663b 100644 --- a/vllm/v1/core/single_type_kv_cache_manager.py +++ b/vllm/v1/core/single_type_kv_cache_manager.py @@ -141,7 +141,7 @@ def get_num_blocks_to_allocate( ) return num_new_blocks + num_evictable_blocks - def save_new_computed_blocks( + def allocate_new_computed_blocks( self, request_id: str, new_computed_blocks: Sequence[KVCacheBlock], @@ -149,12 +149,19 @@ def save_new_computed_blocks( num_external_computed_tokens: int, ) -> None: """ - Add the new computed blocks to the request. + Add the new computed blocks to the request. This involves three steps: + 1. Touch the computed blocks to make sure they won't be evicted. + 1.5. (Optional) For sliding window, skip blocks are padded with null blocks. + 2. Add the remaining computed blocks. + 3. (Optional) For KV connectors, allocate new blocks for external computed + tokens (if any). Args: request_id: The request ID. new_computed_blocks: The new computed blocks just hitting the prefix cache. + num_local_computed_tokens: The number of local computed tokens. + num_external_computed_tokens: The number of external computed tokens. """ if request_id in self.num_cached_block: From 00ac78e09a931d226fbc11b1ebdba10f6fe887b4 Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Tue, 16 Dec 2025 14:28:34 -0500 Subject: [PATCH 23/73] [CI] Generalize gsm8k test args and add Qwen3-Next MTP B200 test (#30723) Signed-off-by: mgoin --- .buildkite/test-pipeline.yaml | 4 +- tests/evals/gsm8k/README.md | 13 ++-- .../DeepSeek-V2-Lite-Instruct-FP8.yaml | 3 +- .../Llama-3-8B-Instruct-nonuniform-CT.yaml | 2 +- .../Llama-3.2-1B-Instruct-INT8-CT.yaml | 2 +- .../gsm8k/configs/Qwen1.5-MoE-W4A16-CT.yaml | 2 +- .../Qwen2.5-VL-3B-Instruct-FP8-dynamic.yaml | 2 +- tests/evals/gsm8k/configs/Qwen3-0.6B-FP8.yaml | 2 +- .../gsm8k/configs/Qwen3-30B-A3B-NVFP4.yaml | 3 +- .../configs/Qwen3-Next-80B-A3B-NVFP4-EP2.yaml | 12 ++++ .../evals/gsm8k/configs/models-blackwell.txt | 1 + tests/evals/gsm8k/conftest.py | 8 +-- tests/evals/gsm8k/test_gsm8k_correctness.py | 70 +++++++++++-------- .../compressed_tensors_moe.py | 11 +-- 14 files changed, 78 insertions(+), 57 deletions(-) create mode 100644 tests/evals/gsm8k/configs/Qwen3-Next-80B-A3B-NVFP4-EP2.yaml diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index 9d0b3fdd3a02..8e6d32f71f22 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -654,7 +654,7 @@ steps: - vllm/model_executor/layers/quantization autorun_on_main: true commands: - - pytest -s -v evals/gsm8k/test_gsm8k_correctness.py --config-list-file=configs/models-small.txt --tp-size=1 + - pytest -s -v evals/gsm8k/test_gsm8k_correctness.py --config-list-file=configs/models-small.txt - label: OpenAI API correctness # 22min timeout_in_minutes: 30 @@ -1064,7 +1064,7 @@ steps: - csrc/ - vllm/model_executor/layers/quantization commands: - - pytest -s -v evals/gsm8k/test_gsm8k_correctness.py --config-list-file=configs/models-blackwell.txt --tp-size=1 + - pytest -s -v evals/gsm8k/test_gsm8k_correctness.py --config-list-file=configs/models-blackwell.txt ##### 1 GPU test ##### ##### multi gpus test ##### diff --git a/tests/evals/gsm8k/README.md b/tests/evals/gsm8k/README.md index 29c5199e1e87..dcbfd85bfeee 100644 --- a/tests/evals/gsm8k/README.md +++ b/tests/evals/gsm8k/README.md @@ -7,9 +7,8 @@ This directory contains a replacement for the lm-eval-harness GSM8K evaluation, ### Run tests with pytest (like buildkite) ```bash -pytest -s -v tests/gsm8k/test_gsm8k_correctness.py \ - --config-list-file=configs/models-small.txt \ - --tp-size=1 +pytest -s -v tests/evals/gsm8k/test_gsm8k_correctness.py \ + --config-list-file=configs/models-small.txt ``` ### Run standalone evaluation script @@ -31,5 +30,11 @@ model_name: "Qwen/Qwen2.5-1.5B-Instruct" accuracy_threshold: 0.54 # Minimum expected accuracy num_questions: 1319 # Number of questions (default: full test set) num_fewshot: 5 # Few-shot examples from train set -max_model_len: 4096 # Model context length +server_args: "--max-model-len 4096 --tensor-parallel-size 2" # Server arguments +env: # Environment variables (optional) + VLLM_USE_FLASHINFER_MOE_FP4: "1" ``` + +The `server_args` field accepts any arguments that can be passed to `vllm serve`. + +The `env` field accepts a dictionary of environment variables to set for the server process. diff --git a/tests/evals/gsm8k/configs/DeepSeek-V2-Lite-Instruct-FP8.yaml b/tests/evals/gsm8k/configs/DeepSeek-V2-Lite-Instruct-FP8.yaml index 7ec6a1e0be27..72fa7e8a38c7 100644 --- a/tests/evals/gsm8k/configs/DeepSeek-V2-Lite-Instruct-FP8.yaml +++ b/tests/evals/gsm8k/configs/DeepSeek-V2-Lite-Instruct-FP8.yaml @@ -2,5 +2,4 @@ model_name: "RedHatAI/DeepSeek-Coder-V2-Lite-Instruct-FP8" accuracy_threshold: 0.72 num_questions: 1319 num_fewshot: 5 -max_model_len: 4096 - +server_args: "--enforce-eager --max-model-len 4096" diff --git a/tests/evals/gsm8k/configs/Llama-3-8B-Instruct-nonuniform-CT.yaml b/tests/evals/gsm8k/configs/Llama-3-8B-Instruct-nonuniform-CT.yaml index caa0448f23d4..b7b59e9dcd5c 100644 --- a/tests/evals/gsm8k/configs/Llama-3-8B-Instruct-nonuniform-CT.yaml +++ b/tests/evals/gsm8k/configs/Llama-3-8B-Instruct-nonuniform-CT.yaml @@ -2,4 +2,4 @@ model_name: "nm-testing/Meta-Llama-3-8B-Instruct-nonuniform-test" accuracy_threshold: 0.74 num_questions: 1319 num_fewshot: 5 -max_model_len: 4096 \ No newline at end of file +server_args: "--enforce-eager --max-model-len 4096" diff --git a/tests/evals/gsm8k/configs/Llama-3.2-1B-Instruct-INT8-CT.yaml b/tests/evals/gsm8k/configs/Llama-3.2-1B-Instruct-INT8-CT.yaml index 615aa69a2d2b..8b3c9ff645e8 100644 --- a/tests/evals/gsm8k/configs/Llama-3.2-1B-Instruct-INT8-CT.yaml +++ b/tests/evals/gsm8k/configs/Llama-3.2-1B-Instruct-INT8-CT.yaml @@ -2,4 +2,4 @@ model_name: "RedHatAI/Llama-3.2-1B-Instruct-quantized.w8a8" accuracy_threshold: 0.31 num_questions: 1319 num_fewshot: 5 -max_model_len: 4096 \ No newline at end of file +server_args: "--enforce-eager --max-model-len 4096" diff --git a/tests/evals/gsm8k/configs/Qwen1.5-MoE-W4A16-CT.yaml b/tests/evals/gsm8k/configs/Qwen1.5-MoE-W4A16-CT.yaml index 9297bf6ddf2d..4a1b1948acac 100644 --- a/tests/evals/gsm8k/configs/Qwen1.5-MoE-W4A16-CT.yaml +++ b/tests/evals/gsm8k/configs/Qwen1.5-MoE-W4A16-CT.yaml @@ -2,4 +2,4 @@ model_name: "nm-testing/Qwen1.5-MoE-A2.7B-Chat-quantized.w4a16" accuracy_threshold: 0.45 num_questions: 1319 num_fewshot: 5 -max_model_len: 4096 +server_args: "--enforce-eager --max-model-len 4096" diff --git a/tests/evals/gsm8k/configs/Qwen2.5-VL-3B-Instruct-FP8-dynamic.yaml b/tests/evals/gsm8k/configs/Qwen2.5-VL-3B-Instruct-FP8-dynamic.yaml index 5319ada30f64..5ce3af8be346 100644 --- a/tests/evals/gsm8k/configs/Qwen2.5-VL-3B-Instruct-FP8-dynamic.yaml +++ b/tests/evals/gsm8k/configs/Qwen2.5-VL-3B-Instruct-FP8-dynamic.yaml @@ -2,4 +2,4 @@ model_name: "RedHatAI/Qwen2.5-VL-3B-Instruct-FP8-Dynamic" accuracy_threshold: 0.60 num_questions: 1319 num_fewshot: 5 -max_model_len: 4096 \ No newline at end of file +server_args: "--enforce-eager --max-model-len 4096" diff --git a/tests/evals/gsm8k/configs/Qwen3-0.6B-FP8.yaml b/tests/evals/gsm8k/configs/Qwen3-0.6B-FP8.yaml index c39fb979d98a..5452ebe753f0 100644 --- a/tests/evals/gsm8k/configs/Qwen3-0.6B-FP8.yaml +++ b/tests/evals/gsm8k/configs/Qwen3-0.6B-FP8.yaml @@ -2,4 +2,4 @@ model_name: "Qwen/Qwen3-0.6B-FP8" accuracy_threshold: 0.375 num_questions: 1319 num_fewshot: 5 -max_model_len: 4096 \ No newline at end of file +server_args: "--enforce-eager --max-model-len 4096" diff --git a/tests/evals/gsm8k/configs/Qwen3-30B-A3B-NVFP4.yaml b/tests/evals/gsm8k/configs/Qwen3-30B-A3B-NVFP4.yaml index 6b7bdd1e65bb..f162aa8bfe5b 100644 --- a/tests/evals/gsm8k/configs/Qwen3-30B-A3B-NVFP4.yaml +++ b/tests/evals/gsm8k/configs/Qwen3-30B-A3B-NVFP4.yaml @@ -2,5 +2,4 @@ model_name: "nvidia/Qwen3-30B-A3B-FP4" accuracy_threshold: 0.89 num_questions: 1319 num_fewshot: 5 -max_model_len: 4096 - +server_args: "--enforce-eager --max-model-len 4096" diff --git a/tests/evals/gsm8k/configs/Qwen3-Next-80B-A3B-NVFP4-EP2.yaml b/tests/evals/gsm8k/configs/Qwen3-Next-80B-A3B-NVFP4-EP2.yaml new file mode 100644 index 000000000000..673b473f817e --- /dev/null +++ b/tests/evals/gsm8k/configs/Qwen3-Next-80B-A3B-NVFP4-EP2.yaml @@ -0,0 +1,12 @@ +model_name: "nm-testing/Qwen3-Next-80B-A3B-Instruct-NVFP4" +accuracy_threshold: 0.75 +num_questions: 1319 +num_fewshot: 5 +server_args: >- + --enforce-eager + --max-model-len 4096 + --tensor-parallel-size 2 + --enable-expert-parallel + --speculative-config '{"method":"qwen3_next_mtp","num_speculative_tokens":1}' +env: + VLLM_USE_FLASHINFER_MOE_FP4: "1" diff --git a/tests/evals/gsm8k/configs/models-blackwell.txt b/tests/evals/gsm8k/configs/models-blackwell.txt index 3c9b1084de7b..39978aa6ffbe 100644 --- a/tests/evals/gsm8k/configs/models-blackwell.txt +++ b/tests/evals/gsm8k/configs/models-blackwell.txt @@ -3,3 +3,4 @@ Qwen2.5-VL-3B-Instruct-FP8-dynamic.yaml Qwen1.5-MoE-W4A16-CT.yaml DeepSeek-V2-Lite-Instruct-FP8.yaml Qwen3-30B-A3B-NVFP4.yaml +Qwen3-Next-80B-A3B-NVFP4-EP2.yaml diff --git a/tests/evals/gsm8k/conftest.py b/tests/evals/gsm8k/conftest.py index 1932a13cdfc6..6f25fe6414af 100644 --- a/tests/evals/gsm8k/conftest.py +++ b/tests/evals/gsm8k/conftest.py @@ -11,14 +11,12 @@ def pytest_addoption(parser): default="configs/models-small.txt", help="File containing list of config files to test", ) - parser.addoption("--tp-size", default=1, type=int, help="Tensor parallel size") def pytest_generate_tests(metafunc): """Generate test parameters from config files.""" if "config_filename" in metafunc.fixturenames: config_list_file = metafunc.config.getoption("--config-list-file") - tp_size = metafunc.config.getoption("--tp-size") # Handle both relative and absolute paths config_list_path = Path(config_list_file) @@ -55,9 +53,9 @@ def pytest_generate_tests(metafunc): # Generate test parameters if config_files: metafunc.parametrize( - ["config_filename", "tp_size"], - [(config_file, int(tp_size)) for config_file in config_files], - ids=[f"{config_file.stem}-tp{tp_size}" for config_file in config_files], + "config_filename", + config_files, + ids=[config_file.stem for config_file in config_files], ) else: print("No config files found, test will be skipped") diff --git a/tests/evals/gsm8k/test_gsm8k_correctness.py b/tests/evals/gsm8k/test_gsm8k_correctness.py index b5d67df7bf3d..ea6715f5cb53 100644 --- a/tests/evals/gsm8k/test_gsm8k_correctness.py +++ b/tests/evals/gsm8k/test_gsm8k_correctness.py @@ -5,30 +5,31 @@ Replacement for lm-eval-harness with better performance and control. Usage: -pytest -s -v test_gsm8k_correctness.py \ - --config-list-file=configs/models-small.txt \ - --tp-size=1 +pytest -s -v tests/evals/gsm8k/test_gsm8k_correctness.py \ + --config-list-file=configs/models-small.txt """ +import shlex + import yaml from tests.utils import RemoteOpenAIServer from .gsm8k_eval import evaluate_gsm8k -RTOL = 0.08 # Relative tolerance for accuracy comparison +TOL = 0.08 # Absolute tolerance for accuracy comparison -def launch_gsm8k_eval(eval_config, server_url, tp_size): - """Launch GSM8K evaluation using our isolated script.""" +def run_gsm8k_eval(eval_config: dict, server_url: str) -> dict: + """Run GSM8K evaluation using our isolated script.""" # Extract host and port from server URL if "://" in server_url: server_url = server_url.split("://")[1] host_port = server_url.split("/")[0] # Remove path if present if ":" in host_port: - host, port = host_port.split(":") - port = int(port) + host, p = host_port.split(":") + port = int(p) else: host = host_port port = 8000 @@ -48,46 +49,57 @@ def launch_gsm8k_eval(eval_config, server_url, tp_size): return results -def test_gsm8k_correctness_param(config_filename, tp_size): +def test_gsm8k_correctness(config_filename): """Test GSM8K correctness for a given model configuration.""" eval_config = yaml.safe_load(config_filename.read_text(encoding="utf-8")) - # Server arguments - server_args = [ - "--max-model-len", - str(eval_config.get("max_model_len", 4096)), - "--enforce-eager", - "--trust-remote-code", - "--tensor-parallel-size", - str(tp_size), - ] + # Parse server arguments from config (use shlex to handle quoted strings) + server_args_str = eval_config.get("server_args", "") + server_args = shlex.split(server_args_str) if server_args_str else [] + + # Add standard server arguments + server_args.extend( + [ + "--trust-remote-code", + ] + ) env_dict = eval_config.get("env", None) + print(f"Starting GSM8K evaluation for model: {eval_config['model_name']}") + print(f"Expected metric threshold: {eval_config['accuracy_threshold']}") + print(f"Number of questions: {eval_config['num_questions']}") + print(f"Number of few-shot examples: {eval_config['num_fewshot']}") + print(f"Server args: {' '.join(server_args)}") + # Launch server and run evaluation with RemoteOpenAIServer( - eval_config["model_name"], server_args, env_dict=env_dict, max_wait_seconds=480 + eval_config["model_name"], + server_args, + env_dict=env_dict, + max_wait_seconds=600, ) as remote_server: server_url = remote_server.url_for("v1") + print(f"Server started at: {server_url}") - results = launch_gsm8k_eval(eval_config, server_url, tp_size) + results = run_gsm8k_eval(eval_config, server_url) - # Check accuracy against threshold - measured_accuracy = results["accuracy"] - expected_accuracy = eval_config["accuracy_threshold"] + measured_metric = results["accuracy"] + expected_metric = eval_config["accuracy_threshold"] print(f"GSM8K Results for {eval_config['model_name']}:") - print(f" Accuracy: {measured_accuracy:.3f}") - print(f" Expected: {expected_accuracy:.3f}") + print(f" Measured metric: {measured_metric:.4f}") + print(f" Expected metric: {expected_metric:.4f}") + print(f" Tolerance: {TOL:.4f}") print(f" Questions: {results['num_questions']}") print(f" Invalid rate: {results['invalid_rate']:.3f}") print(f" Latency: {results['latency']:.1f}s") print(f" QPS: {results['questions_per_second']:.1f}") - # Verify accuracy is within tolerance - assert measured_accuracy >= expected_accuracy - RTOL, ( - f"Accuracy too low: {measured_accuracy:.3f} < " - f"{expected_accuracy:.3f} - {RTOL:.3f}" + # Verify metric is within tolerance + assert measured_metric >= expected_metric - TOL, ( + f"GSM8K metric too low: {measured_metric:.4f} < " + f"{expected_metric:.4f} - {TOL:.4f} = {expected_metric - TOL:.4f}" ) print(f"✅ GSM8K test passed for {eval_config['model_name']}") diff --git a/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py b/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py index f650a6eabbb9..c302e465aedb 100644 --- a/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +++ b/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py @@ -626,17 +626,11 @@ def apply( apply_router_weight_on_input=layer.apply_router_weight_on_input, ) else: + # If no modular kernel is provided, use cutlass_moe_fp4 for TP case + # only (no EP). from vllm.model_executor.layers.fused_moe.cutlass_moe import cutlass_moe_fp4 - assert layer.expert_map is None, ( - "Expert Parallelism / expert_map " - "is currently not supported for " - "CompressedTensorsW4A4Nvfp4MoEMethod." - ) assert self.moe_quant_config is not None - - # Cutlass moe takes in activations in BF16/Half precision - # and fp4 quantized weights loaded from the checkpoint return cutlass_moe_fp4( a=x, w1_fp4=layer.w13_weight, @@ -644,6 +638,7 @@ def apply( topk_weights=topk_weights, topk_ids=topk_ids, quant_config=self.moe_quant_config, + expert_map=layer.expert_map, apply_router_weight_on_input=layer.apply_router_weight_on_input, # TODO(bnell): derive these from arguments m=x.shape[0], From b81b822b55b329f51eea1e78e2a2e75abe8cb278 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicol=C3=B2=20Lucchesi?= Date: Tue, 16 Dec 2025 20:36:49 +0100 Subject: [PATCH 24/73] [Frontend] Add `max-completion-token` option to transcription/translation endpoints (#30769) Signed-off-by: NickLucche --- .../test_transcription_validation_whisper.py | 32 ++++++++++++++++++ .../openai/test_translation_validation.py | 33 +++++++++++++++++++ vllm/entrypoints/openai/protocol.py | 6 ++++ vllm/entrypoints/openai/speech_to_text.py | 10 ++++-- 4 files changed, 79 insertions(+), 2 deletions(-) diff --git a/tests/entrypoints/openai/test_transcription_validation_whisper.py b/tests/entrypoints/openai/test_transcription_validation_whisper.py index 3c507ee0a3fa..8bf729c517f7 100644 --- a/tests/entrypoints/openai/test_transcription_validation_whisper.py +++ b/tests/entrypoints/openai/test_transcription_validation_whisper.py @@ -244,3 +244,35 @@ async def test_audio_with_timestamp(mary_had_lamb, whisper_client): ) assert transcription.segments is not None assert len(transcription.segments) > 0 + + +@pytest.mark.asyncio +async def test_audio_with_max_tokens(whisper_client, mary_had_lamb): + transcription = await whisper_client.audio.transcriptions.create( + model=MODEL_NAME, + file=mary_had_lamb, + language="en", + response_format="text", + temperature=0.0, + extra_body={"max_completion_tokens": 1}, + ) + out = json.loads(transcription) + out_text = out["text"] + from transformers import AutoTokenizer + + tok = AutoTokenizer.from_pretrained(MODEL_NAME) + out_tokens = tok(out_text, add_special_tokens=False)["input_ids"] + assert len(out_tokens) == 1 + # max_completion_tokens > max_model_len + transcription = await whisper_client.audio.transcriptions.create( + model=MODEL_NAME, + file=mary_had_lamb, + language="en", + response_format="text", + temperature=0.0, + extra_body={"max_completion_tokens": int(1e6)}, + ) + out = json.loads(transcription) + out_text = out["text"] + out_tokens = tok(out_text, add_special_tokens=False)["input_ids"] + assert len(out_tokens) < 450 # ~Whisper max output len diff --git a/tests/entrypoints/openai/test_translation_validation.py b/tests/entrypoints/openai/test_translation_validation.py index d7d407484f16..2c577237691a 100644 --- a/tests/entrypoints/openai/test_translation_validation.py +++ b/tests/entrypoints/openai/test_translation_validation.py @@ -227,3 +227,36 @@ async def test_long_audio_request(foscolo, client_and_model): ) out = json.loads(translation)["text"].strip().lower() assert out.count("greek sea") == 2 + + +@pytest.mark.asyncio +async def test_audio_with_max_tokens(mary_had_lamb, client_and_model): + client, model_name = client_and_model + transcription = await client.audio.translations.create( + model=model_name, + file=mary_had_lamb, + response_format="text", + temperature=0.0, + extra_body={"max_completion_tokens": 1}, + ) + out = json.loads(transcription) + out_text = out["text"] + print(out_text) + from transformers import AutoTokenizer + + tok = AutoTokenizer.from_pretrained(model_name) + out_tokens = tok(out_text, add_special_tokens=False)["input_ids"] + assert len(out_tokens) == 1 + # max_completion_tokens > max_model_len + transcription = await client.audio.transcriptions.create( + model=model_name, + file=mary_had_lamb, + response_format="text", + temperature=0.0, + extra_body={"max_completion_tokens": int(1e6)}, + ) + out = json.loads(transcription) + out_text = out["text"] + print(out_text) + out_tokens = tok(out_text, add_special_tokens=False)["input_ids"] + assert len(out_tokens) < 450 # ~Whisper max output len diff --git a/vllm/entrypoints/openai/protocol.py b/vllm/entrypoints/openai/protocol.py index a7c4980cd367..94dde4564ea0 100644 --- a/vllm/entrypoints/openai/protocol.py +++ b/vllm/entrypoints/openai/protocol.py @@ -2054,6 +2054,9 @@ class TranscriptionRequest(OpenAIBaseModel): presence_penalty: float | None = 0.0 """The presence penalty to use for sampling.""" + + max_completion_tokens: int | None = None + """The maximum number of tokens to generate.""" # --8<-- [end:transcription-sampling-params] # Default sampling parameters for transcription requests. @@ -2300,6 +2303,9 @@ class TranslationRequest(OpenAIBaseModel): # Flattened stream option to simplify form data. stream_include_usage: bool | None = False stream_continuous_usage_stats: bool | None = False + + max_completion_tokens: int | None = None + """The maximum number of tokens to generate.""" # --8<-- [end:translation-extra-params] # Default sampling parameters for translation requests. diff --git a/vllm/entrypoints/openai/speech_to_text.py b/vllm/entrypoints/openai/speech_to_text.py index cea9924ebbac..df9c06adb105 100644 --- a/vllm/entrypoints/openai/speech_to_text.py +++ b/vllm/entrypoints/openai/speech_to_text.py @@ -293,8 +293,14 @@ async def _create_speech_to_text( try: # Unlike most decoder-only models, whisper generation length is not # constrained by the size of the input audio, which is mapped to a - # fixed-size log-mel-spectogram. - default_max_tokens = self.model_config.max_model_len + # fixed-size log-mel-spectogram. Still, allow for fewer tokens to be + # generated by respecting the extra completion tokens arg. + if request.max_completion_tokens is None: + default_max_tokens = self.model_config.max_model_len + else: + default_max_tokens = min( + self.model_config.max_model_len, request.max_completion_tokens + ) sampling_params = request.to_sampling_params( default_max_tokens, self.default_sampling_params ) From 03adf8ac2a95b571188aa35739aa7e944e8b057c Mon Sep 17 00:00:00 2001 From: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Date: Tue, 16 Dec 2025 14:50:59 -0500 Subject: [PATCH 25/73] [Refactor] Small refactor for group topk (#30562) Signed-off-by: yewentao256 Co-authored-by: Robert Shaw <114415538+robertgshaw2-redhat@users.noreply.github.com> --- csrc/moe/grouped_topk_kernels.cu | 13 ++++++++++--- tests/v1/determinism/test_batch_invariance.py | 1 - 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/csrc/moe/grouped_topk_kernels.cu b/csrc/moe/grouped_topk_kernels.cu index 5fa367abd96f..7229e420d3fe 100644 --- a/csrc/moe/grouped_topk_kernels.cu +++ b/csrc/moe/grouped_topk_kernels.cu @@ -446,9 +446,13 @@ __device__ inline T apply_sigmoid(T val) { template __device__ inline T apply_scoring(T val) { - if constexpr (SF == SCORING_SIGMOID) { + if constexpr (SF == SCORING_NONE) { + return val; + } else if constexpr (SF == SCORING_SIGMOID) { return apply_sigmoid(val); } else { + static_assert(SF == SCORING_NONE || SF == SCORING_SIGMOID, + "Unsupported ScoringFunc in apply_scoring"); return val; } } @@ -670,10 +674,13 @@ __global__ void group_idx_and_topk_idx_kernel( if (case_id < num_tokens) { if (if_proceed_next_topk) { + float scale = routed_scaling_factor; + if (renormalize) { + scale /= topk_sum; + } for (int i = lane_id; i < topk; i += WARP_SIZE) { float base = cuda_cast(s_topk_value[i]); - float value = renormalize ? (base / topk_sum * routed_scaling_factor) - : (base * routed_scaling_factor); + float value = base * scale; topk_indices[i] = s_topk_idx[i]; topk_values[i] = value; } diff --git a/tests/v1/determinism/test_batch_invariance.py b/tests/v1/determinism/test_batch_invariance.py index 1c45e7fe366f..7a58e1c9bad0 100644 --- a/tests/v1/determinism/test_batch_invariance.py +++ b/tests/v1/determinism/test_batch_invariance.py @@ -188,7 +188,6 @@ def test_logprobs_bitwise_batch_invariance_bs1_vs_bsN( llm = LLM( model=model_name, tensor_parallel_size=tp_size, - # enable_prefix_caching=False, max_num_seqs=32, max_model_len=8192, dtype="bfloat16", # not everything is supported From 15110e185d2cd305d48796597aee91cfb281a6aa Mon Sep 17 00:00:00 2001 From: jiahanc <173873397+jiahanc@users.noreply.github.com> Date: Tue, 16 Dec 2025 13:01:48 -0800 Subject: [PATCH 26/73] [Perf] Do FP4 quant before All gather on flashinfer trtllmgen MOE (#30014) Signed-off-by: jiahanc <173873397+jiahanc@users.noreply.github.com> --- .../device_communicators/all2all.py | 29 ++++++++++--- .../base_device_communicator.py | 7 +++- .../device_communicators/cuda_communicator.py | 16 +++++--- vllm/distributed/parallel_state.py | 13 ++++-- .../layers/fused_moe/fused_moe_method_base.py | 12 ++++++ vllm/model_executor/layers/fused_moe/layer.py | 41 ++++++++++++++++++- .../layers/quantization/modelopt.py | 25 ++++++++++- .../quantization/utils/flashinfer_fp4_moe.py | 36 +++++++++------- vllm/utils/flashinfer.py | 17 ++++++++ 9 files changed, 165 insertions(+), 31 deletions(-) diff --git a/vllm/distributed/device_communicators/all2all.py b/vllm/distributed/device_communicators/all2all.py index c40dde26b741..7a4e81cf967d 100644 --- a/vllm/distributed/device_communicators/all2all.py +++ b/vllm/distributed/device_communicators/all2all.py @@ -64,7 +64,12 @@ def dispatch( hidden_states: torch.Tensor, router_logits: torch.Tensor, is_sequence_parallel: bool = False, + extra_tensors: list[torch.Tensor] | None = None, ) -> tuple[torch.Tensor, torch.Tensor]: + if extra_tensors is not None: + raise NotImplementedError( + "extra_tensors is not supported for NaiveAll2AllManager" + ) sp_size = self.tp_group.world_size if is_sequence_parallel else 1 dp_metadata = get_forward_context().dp_metadata assert dp_metadata is not None @@ -76,6 +81,7 @@ def dispatch( router_logits = self.naive_multicast( router_logits, cu_tokens_across_sp_cpu, is_sequence_parallel ) + return hidden_states, router_logits def combine( @@ -113,7 +119,11 @@ def dispatch( hidden_states: torch.Tensor, router_logits: torch.Tensor, is_sequence_parallel: bool = False, - ) -> tuple[torch.Tensor, torch.Tensor]: + extra_tensors: list[torch.Tensor] | None = None, + ) -> ( + tuple[torch.Tensor, torch.Tensor] + | tuple[torch.Tensor, torch.Tensor, list[torch.Tensor]] + ): """ Gather hidden_states and router_logits from all dp ranks. """ @@ -121,15 +131,22 @@ def dispatch( assert dp_metadata is not None sizes = dp_metadata.get_chunk_sizes_across_dp_rank() assert sizes is not None - dist_group = get_ep_group() if is_sequence_parallel else get_dp_group() assert sizes[dist_group.rank_in_group] == hidden_states.shape[0] - hidden_states, router_logits = dist_group.all_gatherv( - [hidden_states, router_logits], + + tensors_to_gather = [hidden_states, router_logits] + if extra_tensors is not None: + tensors_to_gather.extend(extra_tensors) + + gathered_tensors = dist_group.all_gatherv( + tensors_to_gather, dim=0, sizes=sizes, ) - return hidden_states, router_logits + + if extra_tensors is not None: + return (gathered_tensors[0], gathered_tensors[1], gathered_tensors[2:]) + return gathered_tensors[0], gathered_tensors[1] def combine( self, hidden_states: torch.Tensor, is_sequence_parallel: bool = False @@ -204,6 +221,7 @@ def dispatch( hidden_states: torch.Tensor, router_logits: torch.Tensor, is_sequence_parallel: bool = False, + extra_tensors: list[torch.Tensor] | None = None, ) -> tuple[torch.Tensor, torch.Tensor]: raise NotImplementedError @@ -251,6 +269,7 @@ def dispatch( hidden_states: torch.Tensor, router_logits: torch.Tensor, is_sequence_parallel: bool = False, + extra_tensors: list[torch.Tensor] | None = None, ) -> tuple[torch.Tensor, torch.Tensor]: raise NotImplementedError diff --git a/vllm/distributed/device_communicators/base_device_communicator.py b/vllm/distributed/device_communicators/base_device_communicator.py index 3a849da70e4c..caeff54406b5 100644 --- a/vllm/distributed/device_communicators/base_device_communicator.py +++ b/vllm/distributed/device_communicators/base_device_communicator.py @@ -1,6 +1,7 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import threading +from typing import Any from weakref import WeakValueDictionary import torch @@ -68,7 +69,11 @@ def dispatch( hidden_states: torch.Tensor, router_logits: torch.Tensor, is_sequence_parallel: bool = False, - ): + extra_tensors: list[torch.Tensor] | None = None, + ) -> Any: + # Subclasses should either: + # - implement handling for extra_tensors, or + # - raise a clear error if extra_tensors is not supported. raise NotImplementedError def set_num_sms(self, num_sms: int): diff --git a/vllm/distributed/device_communicators/cuda_communicator.py b/vllm/distributed/device_communicators/cuda_communicator.py index cd9c267beb5b..9542498c453e 100644 --- a/vllm/distributed/device_communicators/cuda_communicator.py +++ b/vllm/distributed/device_communicators/cuda_communicator.py @@ -318,17 +318,23 @@ def _all_gather_single(input_: torch.Tensor, sizes: list[int] | None = None): return output_list - def dispatch( + def dispatch( # type: ignore[override] self, hidden_states: torch.Tensor, router_logits: torch.Tensor, is_sequence_parallel: bool = False, - ) -> tuple[torch.Tensor, torch.Tensor]: + extra_tensors: list[torch.Tensor] | None = None, + ) -> ( + tuple[torch.Tensor, torch.Tensor] + | tuple[torch.Tensor, torch.Tensor, list[torch.Tensor]] + ): assert self.all2all_manager is not None - hidden_states, router_logits = self.all2all_manager.dispatch( - hidden_states, router_logits, is_sequence_parallel + return self.all2all_manager.dispatch( + hidden_states, + router_logits, + is_sequence_parallel, + extra_tensors, # type: ignore[call-arg] ) - return hidden_states, router_logits def combine( self, hidden_states: torch.Tensor, is_sequence_parallel: bool = False diff --git a/vllm/distributed/parallel_state.py b/vllm/distributed/parallel_state.py index 338cb1f1814b..f5ada5a009ec 100644 --- a/vllm/distributed/parallel_state.py +++ b/vllm/distributed/parallel_state.py @@ -1007,10 +1007,17 @@ def dispatch( hidden_states: torch.Tensor, router_logits: torch.Tensor, is_sequence_parallel: bool = False, - ) -> tuple[torch.Tensor, torch.Tensor]: + extra_tensors: list[torch.Tensor] | None = None, + ) -> ( + tuple[torch.Tensor, torch.Tensor] + | tuple[torch.Tensor, torch.Tensor, list[torch.Tensor]] + ): if self.device_communicator is not None: - return self.device_communicator.dispatch( - hidden_states, router_logits, is_sequence_parallel + return self.device_communicator.dispatch( # type: ignore[call-arg] + hidden_states, + router_logits, + is_sequence_parallel, + extra_tensors, ) else: return hidden_states, router_logits diff --git a/vllm/model_executor/layers/fused_moe/fused_moe_method_base.py b/vllm/model_executor/layers/fused_moe/fused_moe_method_base.py index 8c9d8a2777d5..a46e3972ed8e 100644 --- a/vllm/model_executor/layers/fused_moe/fused_moe_method_base.py +++ b/vllm/model_executor/layers/fused_moe/fused_moe_method_base.py @@ -71,6 +71,18 @@ def select_gemm_impl( "implementation based on the prepare_finalize" ) + def prepare_dp_allgather_tensor( + self, + layer: "FusedMoE", # type: ignore[name-defined] # noqa: F821 + hidden_states: torch.Tensor, + router_logits: torch.Tensor, + ) -> tuple[torch.Tensor, list[torch.Tensor]]: + """Hook to prepare tensors and extra tensors for DP allgather + EP dispatch.""" + raise NotImplementedError( + "Method 'prepare_dp_allgather_tensor' is not implemented in " + f"{self.__class__.__name__}." + ) + @abstractmethod def get_fused_moe_quant_config( self, layer: torch.nn.Module diff --git a/vllm/model_executor/layers/fused_moe/layer.py b/vllm/model_executor/layers/fused_moe/layer.py index cc3afade709d..b39ce415a0f8 100644 --- a/vllm/model_executor/layers/fused_moe/layer.py +++ b/vllm/model_executor/layers/fused_moe/layer.py @@ -44,6 +44,7 @@ is_flashinfer_supporting_global_sf, ) from vllm.platforms import current_platform +from vllm.utils.flashinfer import has_flashinfer_trtllm_fused_moe from vllm.utils.math_utils import cdiv, round_up from vllm.utils.torch_utils import ( aux_stream, @@ -1933,10 +1934,46 @@ def forward_impl( ) with sp_ctx: + extra_tensors = None if do_naive_dispatch_combine: - hidden_states_combined, router_logits = get_ep_group().dispatch( - hidden_states, router_logits, self.is_sequence_parallel + # Avoid circular import + from vllm.model_executor.layers.quantization.modelopt import ( + ModelOptNvFp4FusedMoE, + ) + + post_quant_allgather = ( + has_flashinfer_trtllm_fused_moe() + and self.quant_method is not None + and self.dp_size > 1 + and self.use_ep + and isinstance(self.quant_method, ModelOptNvFp4FusedMoE) + ) + if post_quant_allgather: + hidden_states_to_dispatch, extra_tensors = ( + self.quant_method.prepare_dp_allgather_tensor( + self, hidden_states, router_logits + ) + ) + else: + hidden_states_to_dispatch = hidden_states + + dispatch_res = get_ep_group().dispatch( + hidden_states_to_dispatch, + router_logits, + self.is_sequence_parallel, + extra_tensors=extra_tensors, ) + if extra_tensors is not None: + hidden_states_combined, router_logits, extra_tensors_combined = ( + dispatch_res + ) + hidden_states_combined = ( + hidden_states_combined, + extra_tensors_combined[0], + ) + else: + hidden_states_combined, router_logits = dispatch_res + # Run shared experts before matrix multiply. # because matrix multiply maybe modify the hidden_states. if has_separate_shared_experts and not use_shared_experts_stream: diff --git a/vllm/model_executor/layers/quantization/modelopt.py b/vllm/model_executor/layers/quantization/modelopt.py index f71854e6b63c..d5d7e7bfaae7 100644 --- a/vllm/model_executor/layers/quantization/modelopt.py +++ b/vllm/model_executor/layers/quantization/modelopt.py @@ -1522,6 +1522,24 @@ def process_weights_after_loading(self, layer: torch.nn.Module) -> None: w2_blockscale_swizzled, requires_grad=False ) + def prepare_dp_allgather_tensor( + self, + layer: FusedMoE, + hidden_states: torch.Tensor, + router_logits: torch.Tensor, + ) -> tuple[torch.Tensor, list[torch.Tensor]]: + """Optionally prepare extra tensors to carry through DP allgather/EP.""" + import flashinfer + + a1_gscale = layer.w13_input_scale_quant + hidden_states_fp4, hidden_states_sf = flashinfer.fp4_quantize( + hidden_states, + a1_gscale, + is_sf_swizzled_layout=False, + ) + extra_tensors: list[torch.Tensor] = [hidden_states_sf] + return hidden_states_fp4, extra_tensors + def get_fused_moe_quant_config( self, layer: torch.nn.Module ) -> FusedMoEQuantConfig | None: @@ -1576,8 +1594,13 @@ def apply( e_score_correction_bias=layer.e_score_correction_bias, ) + # Hidden_states in select_experts is only used to extract metadata + if isinstance(x, tuple): + x_routing, _ = x + else: + x_routing = x topk_weights, topk_ids, _ = layer.select_experts( - hidden_states=x, + hidden_states=x_routing, router_logits=router_logits, ) diff --git a/vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py b/vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py index 76bce8a8d98d..1d410316d629 100644 --- a/vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py +++ b/vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py @@ -238,7 +238,7 @@ def prepare_static_weights_for_trtllm_fp4_moe( def flashinfer_trtllm_fp4_moe( layer: torch.nn.Module, - x: torch.Tensor, + x: torch.Tensor | tuple[torch.Tensor, torch.Tensor], router_logits: torch.Tensor, top_k: int, global_num_experts: int, @@ -269,12 +269,16 @@ def flashinfer_trtllm_fp4_moe( from vllm.model_executor.models.llama4 import Llama4MoE # Quantize input to FP4 - a1_gscale = layer.w13_input_scale_quant - (hidden_states_fp4, hidden_states_scale_linear_fp4) = flashinfer.fp4_quantize( - x, - a1_gscale, - is_sf_swizzled_layout=False, - ) + if isinstance(x, tuple): + hidden_states_fp4, hidden_states_scale_linear_fp4 = x + else: + # hidden_states is the already quantized + a1_gscale = layer.w13_input_scale_quant + (hidden_states_fp4, hidden_states_scale_linear_fp4) = flashinfer.fp4_quantize( + x, + a1_gscale, + is_sf_swizzled_layout=False, + ) # Determine routing method type use_llama4_routing = custom_routing_function is Llama4MoE.custom_routing_function @@ -360,13 +364,17 @@ def flashinfer_trtllm_fp4_routed_moe( torch.bfloat16 ).view(torch.int16) - # Quantize input to FP4 - a1_gscale = layer.w13_input_scale_quant - (hidden_states_fp4, hidden_states_scale_linear_fp4) = flashinfer.fp4_quantize( - x, - a1_gscale, - is_sf_swizzled_layout=False, - ) + if isinstance(x, tuple): + # Hidden_states is the already quantized + hidden_states_fp4, hidden_states_scale_linear_fp4 = x + else: + # Quantize input to FP4 + a1_gscale = layer.w13_input_scale_quant + (hidden_states_fp4, hidden_states_scale_linear_fp4) = flashinfer.fp4_quantize( + x, + a1_gscale, + is_sf_swizzled_layout=False, + ) # Call TRT-LLM FP4 block-scale MoE kernel out = flashinfer.fused_moe.trtllm_fp4_block_scale_routed_moe( diff --git a/vllm/utils/flashinfer.py b/vllm/utils/flashinfer.py index 5019b771f4a1..1c2710be3173 100644 --- a/vllm/utils/flashinfer.py +++ b/vllm/utils/flashinfer.py @@ -184,6 +184,23 @@ def has_flashinfer_cutedsl() -> bool: ) +@functools.cache +def has_flashinfer_trtllm_fused_moe() -> bool: + """Return `True` if FlashInfer TRTLLM fused MoE is available.""" + if not has_flashinfer_moe(): + return False + required_functions = [ + ("flashinfer.fused_moe", "trtllm_fp8_block_scale_moe"), + ("flashinfer.fused_moe", "trtllm_fp8_per_tensor_scale_moe"), + ("flashinfer.fused_moe", "trtllm_fp4_block_scale_moe"), + ] + for module_name, attr_name in required_functions: + mod = _get_submodule(module_name) + if not mod or not hasattr(mod, attr_name): + return False + return True + + @functools.cache def has_flashinfer_cutlass_fused_moe() -> bool: """Return `True` if FlashInfer CUTLASS fused MoE is available.""" From dfbea50f59074e6a7e4379db39c3f741227f9687 Mon Sep 17 00:00:00 2001 From: Lucas Wilkinson Date: Tue, 16 Dec 2025 17:10:16 -0500 Subject: [PATCH 27/73] [Attention] Cache attention metadata builds across hybrid KV-cache groups (#29627) Signed-off-by: Lucas Wilkinson Co-authored-by: Stanislaw Wozniak --- .../attention/test_chunked_local_attention.py | 2 +- .../layers/chunked_local_attention.py | 16 +++++++--- vllm/envs.py | 4 +-- vllm/v1/attention/backends/flash_attn.py | 13 ++++++++ vllm/v1/attention/backends/mamba2_attn.py | 27 ++++++++++++++++ vllm/v1/attention/backends/utils.py | 32 ++++++++++++++++--- vllm/v1/worker/gpu_model_runner.py | 24 +++++++++++++- 7 files changed, 105 insertions(+), 13 deletions(-) diff --git a/tests/v1/attention/test_chunked_local_attention.py b/tests/v1/attention/test_chunked_local_attention.py index faace3473a28..4529c2cfc29b 100644 --- a/tests/v1/attention/test_chunked_local_attention.py +++ b/tests/v1/attention/test_chunked_local_attention.py @@ -172,7 +172,7 @@ def test_local_attention_virtual_batches(test_data: LocalAttentionTestData): ) # Call the function - result = make_local_attention_virtual_batches( + result, _ = make_local_attention_virtual_batches( attn_chunk_size, common_attn_metadata, block_size ) diff --git a/vllm/attention/layers/chunked_local_attention.py b/vllm/attention/layers/chunked_local_attention.py index 0ced0028ded9..7e3794d40833 100644 --- a/vllm/attention/layers/chunked_local_attention.py +++ b/vllm/attention/layers/chunked_local_attention.py @@ -4,7 +4,7 @@ import torch -from vllm.attention.backends.abstract import AttentionBackend, AttentionMetadata +from vllm.attention.backends.abstract import AttentionBackend from vllm.attention.layer import Attention from vllm.attention.selector import get_attn_backend from vllm.config import CacheConfig @@ -51,11 +51,19 @@ def build( common_prefix_len: int, common_attn_metadata: CommonAttentionMetadata, fast_build: bool = False, - ) -> AttentionMetadata: - common_attn_metadata = make_local_attention_virtual_batches( + ): + cm, make_virtual_batches_block_table = make_local_attention_virtual_batches( attention_chunk_size, common_attn_metadata, block_size ) - return super().build(common_prefix_len, common_attn_metadata, fast_build) + metadata = super().build(common_prefix_len, cm, fast_build) + metadata.make_virtual_batches_block_table = make_virtual_batches_block_table + return metadata + + def update_block_table( + self, metadata, blk_table: torch.Tensor, slot_mapping: torch.Tensor + ): + blk_table = metadata.make_virtual_batches_block_table(blk_table) + return super().update_block_table(metadata, blk_table, slot_mapping) attn_backend = subclass_attention_backend( name_prefix=prefix, diff --git a/vllm/envs.py b/vllm/envs.py index d0f279809626..7e072a588591 100755 --- a/vllm/envs.py +++ b/vllm/envs.py @@ -207,7 +207,7 @@ VLLM_USE_TRTLLM_RAGGED_DEEPSEEK_PREFILL: bool = False VLLM_ENABLE_CUDAGRAPH_GC: bool = False VLLM_LOOPBACK_IP: str = "" - VLLM_ALLOW_CHUNKED_LOCAL_ATTN_WITH_HYBRID_KV_CACHE: bool = False + VLLM_ALLOW_CHUNKED_LOCAL_ATTN_WITH_HYBRID_KV_CACHE: bool = True VLLM_ENABLE_RESPONSES_API_STORE: bool = False VLLM_USE_TRTLLM_ATTENTION: str | None = None VLLM_NVFP4_GEMM_BACKEND: str | None = None @@ -1430,7 +1430,7 @@ def get_vllm_port() -> int | None: # kv-cache memory usage and enable longer contexts) # TODO(lucas): Remove this flag once latency regression is resolved. "VLLM_ALLOW_CHUNKED_LOCAL_ATTN_WITH_HYBRID_KV_CACHE": lambda: bool( - int(os.getenv("VLLM_ALLOW_CHUNKED_LOCAL_ATTN_WITH_HYBRID_KV_CACHE", "0")) + int(os.getenv("VLLM_ALLOW_CHUNKED_LOCAL_ATTN_WITH_HYBRID_KV_CACHE", "1")) ), # Enables support for the "store" option in the OpenAI Responses API. # When set to 1, vLLM's OpenAI server will retain the input and output diff --git a/vllm/v1/attention/backends/flash_attn.py b/vllm/v1/attention/backends/flash_attn.py index f5ad98cf2125..3445e998d637 100755 --- a/vllm/v1/attention/backends/flash_attn.py +++ b/vllm/v1/attention/backends/flash_attn.py @@ -2,6 +2,7 @@ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """Attention layer with FlashAttention.""" +import copy from dataclasses import dataclass from typing import ClassVar @@ -250,6 +251,7 @@ class FlashAttentionMetadataBuilder(AttentionMetadataBuilder[FlashAttentionMetad if get_flash_attn_version() == 3 else AttentionCGSupport.UNIFORM_BATCH ) + supports_update_block_table: bool = True def __init__( self, @@ -493,6 +495,17 @@ def schedule( ) return attn_metadata + def update_block_table( + self, + metadata: FlashAttentionMetadata, + blk_table: torch.Tensor, + slot_mapping: torch.Tensor, + ) -> FlashAttentionMetadata: + new_metadata = copy.copy(metadata) + new_metadata.block_table = blk_table + new_metadata.slot_mapping = slot_mapping + return new_metadata + def use_cascade_attention(self, *args, **kwargs) -> bool: return use_cascade_attention(*args, **kwargs) diff --git a/vllm/v1/attention/backends/mamba2_attn.py b/vllm/v1/attention/backends/mamba2_attn.py index bf1d8f09ab0a..f923371283aa 100644 --- a/vllm/v1/attention/backends/mamba2_attn.py +++ b/vllm/v1/attention/backends/mamba2_attn.py @@ -1,5 +1,6 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project +import copy import itertools from dataclasses import dataclass @@ -134,6 +135,8 @@ class Mamba2AttentionMetadata: class Mamba2AttentionMetadataBuilder( BaseMambaAttentionMetadataBuilder[Mamba2AttentionMetadata] ): + supports_update_block_table: bool = True + def __init__( self, kv_cache_spec: AttentionSpec, @@ -346,3 +349,27 @@ def build( num_computed_tokens_p=num_computed_tokens_p, ) return attn_metadata + + def update_block_table( + self, + metadata: Mamba2AttentionMetadata, + blk_table: torch.Tensor, + slot_mapping: torch.Tensor, + ) -> Mamba2AttentionMetadata: + new_metadata = copy.copy(metadata) + prefix_caching = self.vllm_config.cache_config.enable_prefix_caching + state_indices_t = blk_table if prefix_caching else blk_table[:, 0] + num_reqs = blk_table.shape[0] + + # For CUDA graphs, copy to persistent buffer + if ( + metadata.num_prefills == 0 + and num_reqs <= self.decode_cudagraph_max_bs + and self.compilation_config.cudagraph_mode.has_full_cudagraphs() + ): + persistent_state_indices_t = self.state_indices_tensor[:num_reqs] + persistent_state_indices_t.copy_(state_indices_t, non_blocking=True) + state_indices_t = persistent_state_indices_t + + new_metadata.state_indices_tensor = state_indices_t + return new_metadata diff --git a/vllm/v1/attention/backends/utils.py b/vllm/v1/attention/backends/utils.py index 1cbe929fc57a..56763f4b5253 100644 --- a/vllm/v1/attention/backends/utils.py +++ b/vllm/v1/attention/backends/utils.py @@ -4,6 +4,7 @@ import enum import functools from abc import abstractmethod +from collections.abc import Callable from dataclasses import dataclass, field, fields, make_dataclass from typing import ( TYPE_CHECKING, @@ -317,6 +318,9 @@ class AttentionMetadataBuilder(abc.ABC, Generic[M]): # If not, set this to None. Otherwise set it to the query # length that will be pulled into the front of the batch. reorder_batch_threshold: int | None = None + # Does this backend/builder support updating the block table in existing + # metadata + supports_update_block_table: bool = False @abstractmethod def __init__( @@ -387,6 +391,21 @@ def build( """ raise NotImplementedError + def update_block_table( + self, + metadata: M, + blk_table: torch.Tensor, + slot_mapping: torch.Tensor, + ) -> M: + """ + Update the block table for the attention metadata. + Faster when theres multiple kv-cache groups that create virtually the + same metadata but just with different block tables. + + Only needs to be implemented if supports_update_block_table is True. + """ + raise NotImplementedError + def build_for_cudagraph_capture( self, common_attn_metadata: CommonAttentionMetadata ) -> M: @@ -603,7 +622,7 @@ def make_local_attention_virtual_batches( attn_chunk_size: int, common_attn_metadata: CommonAttentionMetadata, block_size: int = 0, -) -> CommonAttentionMetadata: +) -> tuple[CommonAttentionMetadata, Callable[[torch.Tensor], torch.Tensor]]: query_start_loc_np = common_attn_metadata.query_start_loc_cpu.numpy() seq_lens_np = common_attn_metadata.seq_lens_cpu.numpy() block_table = common_attn_metadata.block_table_tensor @@ -715,9 +734,12 @@ def make_local_attention_virtual_batches( # tensor first, which recovers perf. batch_indices_torch = torch.from_numpy(batch_indices) block_indices_torch = torch.from_numpy(block_indices) - block_table_local = block_table[batch_indices_torch, block_indices_torch].view( - virtual_batches, -1 - ) + + # Save as a lambda so we can return this for update_block_table + make_block_table = lambda block_table: block_table[ + batch_indices_torch, block_indices_torch + ].view(virtual_batches, -1) + block_table_local = make_block_table(block_table) query_start_loc_cpu = torch.from_numpy(cu_seqlens_q_local) seq_lens_cpu = torch.from_numpy(seqlens_k_local) @@ -736,7 +758,7 @@ def make_local_attention_virtual_batches( causal=True, _seq_lens_cpu=seq_lens_cpu, _num_computed_tokens_cpu=torch.from_numpy(num_computed_tokens_local), - ) + ), make_block_table def make_kv_sharing_fast_prefill_common_attn_metadata( diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index 1aa2ec6bb655..179f713c4d86 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -1630,6 +1630,15 @@ def _get_block_table_and_slot_mapping(kv_cache_gid: int): logits_indices ) + # Cache attention metadata builds across hybrid KV-cache groups + # The only thing that changes between different hybrid KV-cache groups when the + # same metadata builder and KVCacheSpec is the same is the block table, so we + # can cache the attention metadata builds and just update the block table using + # `builder.update_block_table` if the builder supports it. + cached_attn_metadata: dict[ + tuple[KVCacheSpec, type[AttentionMetadataBuilder]], AttentionMetadata + ] = {} + def _build_attn_group_metadata( kv_cache_gid: int, attn_gid: int, @@ -1637,13 +1646,15 @@ def _build_attn_group_metadata( ubid: int | None = None, ) -> None: attn_group = self.attn_groups[kv_cache_gid][attn_gid] + builder = attn_group.get_metadata_builder(ubid or 0) + cache_key = (kv_cache_groups[kv_cache_gid].kv_cache_spec, type(builder)) + cascade_attn_prefix_len = ( cascade_attn_prefix_lens[kv_cache_gid][attn_gid] if cascade_attn_prefix_lens else 0 ) - builder = attn_group.get_metadata_builder(ubid or 0) extra_attn_metadata_args = {} if use_spec_decode and isinstance(builder, GDNAttentionMetadataBuilder): assert ubid is None, "UBatching not supported with GDN yet" @@ -1658,12 +1669,23 @@ def _build_attn_group_metadata( attn_metadata_i = builder.build_for_cudagraph_capture( common_attn_metadata ) + elif ( + cache_key in cached_attn_metadata + and builder.supports_update_block_table + ): + attn_metadata_i = builder.update_block_table( + cached_attn_metadata[cache_key], + common_attn_metadata.block_table_tensor, + common_attn_metadata.slot_mapping, + ) else: attn_metadata_i = builder.build( common_prefix_len=cascade_attn_prefix_len, common_attn_metadata=common_attn_metadata, **extra_attn_metadata_args, ) + if builder.supports_update_block_table: + cached_attn_metadata[cache_key] = attn_metadata_i if ubid is None: assert isinstance(attn_metadata, dict) From ecf0943cb8fa97a013b97a5d37c7f439a49307dd Mon Sep 17 00:00:00 2001 From: Roger Wang Date: Tue, 16 Dec 2025 14:18:17 -0800 Subject: [PATCH 28/73] [Core][MM] Optimize encoder cache manager by operating with embeddings only (#30475) Signed-off-by: Roger Wang Co-authored-by: Sun Kim --- .../multimodal/processing/test_mllama4.py | 4 +- tests/multimodal/test_utils.py | 92 +++++++++++++++++++ tests/v1/core/test_encoder_cache_manager.py | 79 +++++++++++++++- .../unit/test_ec_example_connector.py | 2 +- .../ec_connector/example_connector.py | 2 +- vllm/model_executor/models/qwen3_vl.py | 8 +- vllm/multimodal/inputs.py | 39 +++++++- vllm/multimodal/profiling.py | 32 ++----- vllm/multimodal/registry.py | 2 +- vllm/v1/core/encoder_cache_manager.py | 80 ++++++++-------- vllm/v1/core/sched/scheduler.py | 35 +++++-- vllm/v1/request.py | 6 +- vllm/v1/worker/gpu_model_runner.py | 49 +++------- vllm/v1/worker/utils.py | 6 ++ 14 files changed, 306 insertions(+), 130 deletions(-) diff --git a/tests/models/multimodal/processing/test_mllama4.py b/tests/models/multimodal/processing/test_mllama4.py index e5ff2d1391b6..325159965c80 100644 --- a/tests/models/multimodal/processing/test_mllama4.py +++ b/tests/models/multimodal/processing/test_mllama4.py @@ -60,12 +60,12 @@ def test_profiling(model_id: str, max_model_len: int): total_num_patches.item() + num_tiles.item() + 3 ) # image start, image, image end - profiled_tokens = profiler.get_mm_max_contiguous_tokens( + profiled_tokens = profiler.get_mm_max_tokens( max_model_len, mm_counts=mm_counts, ) - assert total_tokens == profiled_tokens["image"] + assert total_num_patches == profiled_tokens["image"] assert total_tokens == sum( placeholder.length for placeholder in decoder_dummy_data.multi_modal_placeholders["image"] diff --git a/tests/multimodal/test_utils.py b/tests/multimodal/test_utils.py index 636cd0ffd445..02bb1f769baa 100644 --- a/tests/multimodal/test_utils.py +++ b/tests/multimodal/test_utils.py @@ -9,6 +9,7 @@ import numpy as np import pytest +import torch from PIL import Image, ImageChops from vllm.multimodal.image import convert_image_mode @@ -410,6 +411,97 @@ def test_argsort_mm_positions(case): assert modality_idxs == expected_modality_idxs +@pytest.mark.parametrize( + "is_embed,expected", + [ + (None, 5), + (torch.tensor([True, True, True, True, True]), 5), + (torch.tensor([False, False, False, False, False]), 0), + (torch.tensor([True, False, True, False, True]), 3), + (torch.tensor([True]), 1), + ], +) +def test_placeholder_range_get_num_embeds(is_embed, expected): + length = len(is_embed) if is_embed is not None else 5 + pr = PlaceholderRange(offset=0, length=length, is_embed=is_embed) + assert pr.get_num_embeds == expected + + +@pytest.mark.parametrize( + "is_embed,expected", + [ + (None, None), + ( + torch.tensor([False, True, False, True, True]), + torch.tensor([0, 1, 1, 2, 3]), + ), + (torch.tensor([True, True, True]), torch.tensor([1, 2, 3])), + ], +) +def test_placeholder_range_embeds_cumsum(is_embed, expected): + length = len(is_embed) if is_embed is not None else 5 + pr = PlaceholderRange(offset=0, length=length, is_embed=is_embed) + + if expected is None: + assert pr.embeds_cumsum is None + return + + assert torch.equal(pr.embeds_cumsum, expected) + # cached_property should return the same object on repeated access + assert pr.embeds_cumsum is pr.embeds_cumsum + + +@pytest.mark.parametrize( + "is_embed,start_idx,end_idx,expected", + [ + (None, 2, 4, (2, 4)), + ( + torch.tensor([False, True, False, True, True]), + 3, + 5, + (1, 3), + ), + ( + torch.tensor([False, True, False, True, True]), + 0, + 2, + (0, 1), + ), + ( + torch.tensor([True, False, True, False]), + 2, + 2, + (1, 1), + ), + ], +) +def test_placeholder_range_get_embeds_indices_in_range( + is_embed, start_idx, end_idx, expected +): + length = len(is_embed) if is_embed is not None else 5 + pr = PlaceholderRange(offset=0, length=length, is_embed=is_embed) + assert pr.get_embeds_indices_in_range(start_idx, end_idx) == expected + + +@pytest.mark.parametrize( + "offset,is_embed,expected", + [ + (0, None, [(0, 4)]), + ( + 2, + torch.tensor([False, True, False, True, True]), + [(3, 3), (5, 6)], + ), + (0, torch.tensor([True, True, True, True]), [(0, 3)]), + (0, torch.tensor([False, False, False, False]), []), + ], +) +def test_placeholder_range_extract_embeds_range(offset, is_embed, expected): + length = len(is_embed) if is_embed is not None else 5 + pr = PlaceholderRange(offset=offset, length=length, is_embed=is_embed) + assert pr.extract_embeds_range() == expected + + @pytest.mark.asyncio @pytest.mark.parametrize("video_url", TEST_VIDEO_URLS) @pytest.mark.parametrize("num_frames", [-1, 32, 1800]) diff --git a/tests/v1/core/test_encoder_cache_manager.py b/tests/v1/core/test_encoder_cache_manager.py index 8a52b5bd7897..511ff48c401c 100644 --- a/tests/v1/core/test_encoder_cache_manager.py +++ b/tests/v1/core/test_encoder_cache_manager.py @@ -1,6 +1,7 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest +import torch from vllm.multimodal.inputs import MultiModalFeatureSpec, PlaceholderRange from vllm.v1.core.encoder_cache_manager import EncoderCacheManager @@ -23,7 +24,7 @@ def __init__(self, request_id, mm_hashes, token_counts): ) self.mm_features.append(feature) - def get_num_encoder_tokens(self, input_id: int) -> int: + def get_num_encoder_embeds(self, input_id: int) -> int: return self._token_counts[input_id] @@ -162,8 +163,8 @@ def test_schedule_request_multi_images_respect_space_limit(): num_tokens_to_schedule = 0 assert manager.can_allocate(req, 0, compute_budget, num_tokens_to_schedule) - num_tokens_to_schedule += req.get_num_encoder_tokens(0) - compute_budget -= req.get_num_encoder_tokens(0) + num_tokens_to_schedule += req.get_num_encoder_embeds(0) + compute_budget -= req.get_num_encoder_embeds(0) assert not manager.can_allocate(req, 1, compute_budget, num_tokens_to_schedule) @@ -174,7 +175,75 @@ def test_schedule_request_multi_images_respect_compute_limit(): compute_budget = 10 num_tokens_to_schedule = 0 assert manager.can_allocate(req, 0, compute_budget, num_tokens_to_schedule) - num_tokens_to_schedule += req.get_num_encoder_tokens(0) - compute_budget -= req.get_num_encoder_tokens(0) + num_tokens_to_schedule += req.get_num_encoder_embeds(0) + compute_budget -= req.get_num_encoder_embeds(0) assert not manager.can_allocate(req, 1, compute_budget, num_tokens_to_schedule) + + +def test_encoder_cache_with_is_embed_mask(): + class MockRequestWithMask(MockRequest): + def get_num_encoder_embeds(self, input_id: int) -> int: + return self.mm_features[input_id].mm_position.get_num_embeds + + is_embed = torch.zeros(100, dtype=torch.bool) + is_embed[torch.tensor([5, 15, 25, 35, 45, 55, 65, 75])] = True + + request = MockRequestWithMask("r1", ["img1"], [100]) + request.mm_features[0] = MultiModalFeatureSpec( + data=None, + modality="image", + identifier="img1", + mm_position=PlaceholderRange(offset=0, length=100, is_embed=is_embed), + ) + + manager = EncoderCacheManager(cache_size=100) + manager.allocate(request, 0) + + assert manager.num_free_slots == 92 + assert "img1" in manager.cached + + old_size = 100 + new_size = request.mm_features[0].mm_position.get_num_embeds + assert new_size == 8 + savings_ratio = old_size / new_size + assert savings_ratio == 12.5 + + +def test_encoder_cache_mask_based_retrieval(): + class MockRequestWithMask(MockRequest): + def get_num_encoder_embeds(self, input_id: int) -> int: + return self.mm_features[input_id].mm_position.get_num_embeds + + is_embed = torch.tensor( + [False, False, True, True, False, True, True, True, False, False] + ) + + request = MockRequestWithMask("r1", ["img1"], [10]) + request.mm_features[0] = MultiModalFeatureSpec( + data=None, + modality="image", + identifier="img1", + mm_position=PlaceholderRange(offset=0, length=10, is_embed=is_embed), + ) + + manager = EncoderCacheManager(cache_size=50) + manager.allocate(request, 0) + + assert request.mm_features[0].mm_position.get_num_embeds == 5 + + start_idx = 2 + end_idx = 8 + num_embeds_before = is_embed[:start_idx].sum().item() + num_embeds_in_range = is_embed[start_idx:end_idx].sum().item() + + assert num_embeds_before == 0 + assert num_embeds_in_range == 5 + + start_idx = 0 + end_idx = 5 + num_embeds_before = is_embed[:start_idx].sum().item() if start_idx > 0 else 0 + num_embeds_in_range = is_embed[start_idx:end_idx].sum().item() + + assert num_embeds_before == 0 + assert num_embeds_in_range == 2 diff --git a/tests/v1/ec_connector/unit/test_ec_example_connector.py b/tests/v1/ec_connector/unit/test_ec_example_connector.py index 7e9eb2131003..9ed82e1cef82 100644 --- a/tests/v1/ec_connector/unit/test_ec_example_connector.py +++ b/tests/v1/ec_connector/unit/test_ec_example_connector.py @@ -38,7 +38,7 @@ def __init__(self, request_id, mm_hashes: list[str], token_counts: list[int]): ) self.mm_features.append(feature) - def get_num_encoder_tokens(self, input_id: int) -> int: + def get_num_encoder_embeds(self, input_id: int) -> int: assert input_id < len(self._token_counts) return self._token_counts[input_id] diff --git a/vllm/distributed/ec_transfer/ec_connector/example_connector.py b/vllm/distributed/ec_transfer/ec_connector/example_connector.py index 5f2eff5a8e6a..c9aad9e9fc8f 100644 --- a/vllm/distributed/ec_transfer/ec_connector/example_connector.py +++ b/vllm/distributed/ec_transfer/ec_connector/example_connector.py @@ -144,7 +144,7 @@ def update_state_after_alloc( Update ECConnector state after encoder cache allocation. """ mm_hash = request.mm_features[index].identifier - num_encoder_token = request.get_num_encoder_tokens(index) + num_encoder_token = request.get_num_encoder_embeds(index) # Insert mm_hash only if this block has not been recorded yet. self._mm_datas_need_loads[mm_hash] = num_encoder_token diff --git a/vllm/model_executor/models/qwen3_vl.py b/vllm/model_executor/models/qwen3_vl.py index c0589986d1fe..4838f68e06f7 100644 --- a/vllm/model_executor/models/qwen3_vl.py +++ b/vllm/model_executor/models/qwen3_vl.py @@ -713,17 +713,13 @@ def get_max_video_tokens( mm_counts: Mapping[str, int], ) -> int: target_width, target_height = self.get_image_size_with_most_features() - video_soft_tokens = self.get_num_video_tokens( + num_video_soft_tokens = self.get_num_video_tokens( image_width=target_width, image_height=target_height, num_frames=self.get_num_frames_with_most_features(seq_len, mm_counts), image_processor=None, ) - - # NOTE: By default in Qwen3-VL, one video token is converted to - # "<{timestamp} seconds>" (on average 9.5 tokens) + vision_start_token + video_token + vision_end_token # noqa: E501 - formatted_video_soft_tokens = video_soft_tokens * 12.5 - return int(formatted_video_soft_tokens) + return num_video_soft_tokens def _calculate_timestamps( self, indices: list[int] | torch.Tensor, video_fps: float, merge_size: int diff --git a/vllm/multimodal/inputs.py b/vllm/multimodal/inputs.py index 6b1cbbe24e2e..fa69818a7b1f 100644 --- a/vllm/multimodal/inputs.py +++ b/vllm/multimodal/inputs.py @@ -5,7 +5,7 @@ from collections import UserDict, defaultdict from collections.abc import Mapping, Sequence from dataclasses import dataclass -from functools import partial +from functools import cached_property, partial from itertools import accumulate from typing import ( TYPE_CHECKING, @@ -169,11 +169,42 @@ class PlaceholderRange: between `offset` and `offset + length` to assign embeddings to. """ - def get_num_embeds(self) -> int: + @cached_property + def embeds_cumsum(self) -> torch.Tensor | None: if self.is_embed is None: + return None + + return self.is_embed.cumsum(dim=0) + + @cached_property + def get_num_embeds(self) -> int: + if self.embeds_cumsum is None: return self.length - return int(self.is_embed.sum().item()) + return int(self.embeds_cumsum[-1]) + + def get_embeds_indices_in_range( + self, start_idx: int, end_idx: int + ) -> tuple[int, int]: + """ + Returns the starting and ending indices of the embeddings of encoder outputs + in the range of [start_idx, end_idx) in the placeholders. + + For example, given: + PlaceholderRange(offset=2, length=5, is_embed=[False, True, False, True, True]) + + If start_idx=3 and end_idx=5, the output is (1, 3) because we want to get + the second and the third embeddings from the encoder output. + """ + if self.embeds_cumsum is None: + return start_idx, end_idx + + embeds_start_idx = ( + int(self.embeds_cumsum[start_idx - 1]) if start_idx > 0 else 0 + ) + embeds_end_idx = int(self.embeds_cumsum[end_idx - 1]) + + return embeds_start_idx, embeds_end_idx def extract_embeds_range(self) -> list[tuple[int, int]]: """Extract the start and end indices of the embedded region in prompt. @@ -188,7 +219,7 @@ def extract_embeds_range(self) -> list[tuple[int, int]]: Returns full placeholder range if `is_embed` is `None`. """ if self.is_embed is None: - return [(self.offset, self.offset + self.length)] + return [(self.offset, self.offset + self.length - 1)] mask_i = self.is_embed.int() starts = torch.nonzero( diff --git a/vllm/multimodal/profiling.py b/vllm/multimodal/profiling.py index cb70041e9744..a690948f759e 100644 --- a/vllm/multimodal/profiling.py +++ b/vllm/multimodal/profiling.py @@ -274,15 +274,11 @@ def _get_dummy_mm_inputs( def _get_mm_num_tokens( self, mm_inputs: MultiModalInputs, - mm_embeddings_only: bool = True, ) -> Mapping[str, int]: placeholders_by_modality = mm_inputs["mm_placeholders"] return { - modality: sum( - item.get_num_embeds() if mm_embeddings_only else item.length - for item in placeholders - ) + modality: sum(item.get_num_embeds for item in placeholders) for modality, placeholders in placeholders_by_modality.items() } @@ -328,12 +324,15 @@ def get_decoder_dummy_data( multi_modal_placeholders=mm_inputs["mm_placeholders"], ) - def _get_mm_max_tokens( + def get_mm_max_tokens( self, seq_len: int, mm_counts: Mapping[str, int] | None = None, - mm_embeddings_only: bool = True, ) -> Mapping[str, int]: + """ + Returns the maximum number of embeddings per item of each modality, excluding + any break/text tokens in-between multimodal embeddings/encoder outputs. + """ if mm_counts is None: mm_counts = self.get_mm_limits() @@ -349,21 +348,4 @@ def _get_mm_max_tokens( } mm_inputs = self._get_dummy_mm_inputs(seq_len, mm_counts) - return self._get_mm_num_tokens(mm_inputs, mm_embeddings_only=mm_embeddings_only) - - def get_mm_max_contiguous_tokens( - self, - seq_len: int, - mm_counts: Mapping[str, int] | None = None, - ) -> Mapping[str, int]: - """ - Returns the maximum length of the multimodal (image placeholders+text) - tokens, including any break/text tokens in-between image embeddings. - - ` [IMG] [IMG] [IMG] [IMG] [IMG] [IMG] ` - Returns 9, even when the number of image embeddings is 6. - - This is important to take into account when profiling and - initializing the encoder cache size. - """ - return self._get_mm_max_tokens(seq_len, mm_counts, mm_embeddings_only=False) + return self._get_mm_num_tokens(mm_inputs) diff --git a/vllm/multimodal/registry.py b/vllm/multimodal/registry.py index 00a84f9dec4f..1e7fe8648ab7 100644 --- a/vllm/multimodal/registry.py +++ b/vllm/multimodal/registry.py @@ -164,7 +164,7 @@ def get_max_tokens_per_item_by_modality( profiler.get_mm_limits() if profiler_limits is None else profiler_limits ) - return profiler.get_mm_max_contiguous_tokens( + return profiler.get_mm_max_tokens( seq_len, {modality: 1 for modality, limit in profiler_limits.items() if limit > 0}, ) diff --git a/vllm/v1/core/encoder_cache_manager.py b/vllm/v1/core/encoder_cache_manager.py index 50f738713590..d73c05d2cf80 100644 --- a/vllm/v1/core/encoder_cache_manager.py +++ b/vllm/v1/core/encoder_cache_manager.py @@ -39,20 +39,26 @@ class EncoderCacheManager: space for new embeddings. Oldest cached embeddings with no request referenced will be first evicted. + NOTE: The EncoderCacheManager operates on the level of multimodal embeddings + instead of encoder tokens (i.e. all tokens that represent the multimodal data + in the input sequence). This means all break/text tokens in-between multimodal + embeddings are not considered with respect to the cache size and the number + of free slots. + Args: cache_size: Limit the size of the cache, measured by the number of - tokens from the input sequence. + encoder embeddings from the input sequence. Attributes: - cache_size: Total cache capacity in encoder tokens. - num_free_slots: Current available cache capacity in encoder tokens. + cache_size: Total cache capacity in encoder embeddings. + num_free_slots: Current available cache capacity in encoder embeddings. num_freeable_slots: Capacity that can be immediately reclaimed by - evicting entries with zero references (in encoder tokens). + evicting entries with zero references (in encoder embeddings). cached: Mapping from mm_hash to a set of request IDs that currently reference the cached entry. If the set is empty, the entry exists but is not referenced by any request and is eligible for reclamation. - freeable: List of tuples (mm_hash, num_tokens) representing entries + freeable: List of tuples (mm_hash, num_encoder_embeds) representing entries whose no current running request is needed and that can be freed to make space when needed. freed: List of mm_hash strings that were actually evicted since the @@ -67,7 +73,7 @@ def __init__(self, cache_size: int): # mm_hash of mm_data => ids of requests that reference the mm_data self.cached: dict[str, set[str]] = {} - # mm_hash of mm_data => num_encoder_tokens of the mm_data + # mm_hash of mm_data => num_encoder_embeds of the mm_data self.freeable: OrderedDict[str, int] = OrderedDict() self.freed: list[str] = [] @@ -93,8 +99,8 @@ def check_and_update_cache(self, request: Request, input_id: int) -> bool: # Cached but currently not referenced by any request if not self.cached[mm_hash]: - num_tokens = self.freeable.pop(mm_hash) - self.num_freeable_slots -= num_tokens + num_encoder_embeds = self.freeable.pop(mm_hash) + self.num_freeable_slots -= num_encoder_embeds self.cached[mm_hash].add(request.request_id) return True @@ -104,7 +110,7 @@ def can_allocate( request: Request, input_id: int, encoder_compute_budget: int, - num_tokens_to_schedule: int, + num_embeds_to_schedule: int, ) -> bool: """Check if there's sufficient cache space for a multimodal input. If there is, return True and update EncoderCacheManager state. @@ -121,9 +127,9 @@ def can_allocate( Args: request: The request containing the multimodal input. input_id: Index of the multimodal input within the request. - encoder_compute_budget: Number of encoder tokens allowed to be + encoder_compute_budget: Number of encoder embeddings allowed to be computed when this method is invoked. - num_tokens_to_schedule: Number of tokens already scheduled to be + num_embeds_to_schedule: Number of encoder embeddings already scheduled to be allocated with cache space when this method is invoked. Returns: @@ -134,30 +140,30 @@ def can_allocate( Note: This method does not allocate physical memory for the encoder output but only the state of EncoderCacheManager. """ - num_tokens = request.get_num_encoder_tokens(input_id) + num_embeds = request.get_num_encoder_embeds(input_id) # Not enough compute budget - if num_tokens > encoder_compute_budget: + if num_embeds > encoder_compute_budget: return False - num_tokens += num_tokens_to_schedule + num_embeds += num_embeds_to_schedule # Enough free slots - if num_tokens <= self.num_free_slots: + if num_embeds <= self.num_free_slots: return True # Not enough reclaimable slots - if num_tokens > self.num_freeable_slots: + if num_embeds > self.num_freeable_slots: return False # Not enough free slots but enough reclaimable slots # NOTE: Eviction takes place here, but physical memory is not freed # until model runner is notified by the scheduler output. - while num_tokens > self.num_free_slots: - mm_hash, num_free_token = self.freeable.popitem(last=False) + while num_embeds > self.num_free_slots: + mm_hash, num_free_embeds = self.freeable.popitem(last=False) del self.cached[mm_hash] self.freed.append(mm_hash) - self.num_free_slots += num_free_token + self.num_free_slots += num_free_embeds return True def allocate(self, request: Request, input_id: int) -> None: @@ -176,16 +182,16 @@ def allocate(self, request: Request, input_id: int) -> None: if mm_hash not in self.cached: self.cached[mm_hash] = set() - num_encoder_tokens = request.get_num_encoder_tokens(input_id) + num_encoder_embeds = request.get_num_encoder_embeds(input_id) # NOTE: Encoder cache should always have enough space for encoder inputs # that are scheduled since eviction takes place at can_allocate(). - assert self.num_free_slots >= num_encoder_tokens - assert self.num_freeable_slots >= num_encoder_tokens + assert self.num_free_slots >= num_encoder_embeds + assert self.num_freeable_slots >= num_encoder_embeds self.cached[mm_hash].add(request_id) - self.num_free_slots -= num_encoder_tokens - self.num_freeable_slots -= num_encoder_tokens + self.num_free_slots -= num_encoder_embeds + self.num_freeable_slots -= num_encoder_embeds def get_cached_input_ids(self, request: Request) -> set[int]: """Get all cached multimodal input IDs for a request. @@ -206,7 +212,7 @@ def free_encoder_input(self, request: Request, input_id: int) -> None: When the reference set for the corresponding `mm_hash` becomes empty, the entry is appended to `freeable` and `num_freeable_slots` is - increased by the number of encoder tokens for that input. + increased by the number of encoder embeddings for that input. The entry is NOT physically freed until capacity is needed (e.g., by `can_allocate`). @@ -218,9 +224,9 @@ def free_encoder_input(self, request: Request, input_id: int) -> None: return self.cached[mm_hash].discard(req_id) if not self.cached[mm_hash]: - num_tokens = request.get_num_encoder_tokens(input_id) - self.freeable[mm_hash] = num_tokens - self.num_freeable_slots += num_tokens + num_encoder_embeds = request.get_num_encoder_embeds(input_id) + self.freeable[mm_hash] = num_encoder_embeds + self.num_freeable_slots += num_encoder_embeds def free(self, request: Request) -> None: """Free all encoder input cache reference held by *request*. @@ -361,20 +367,20 @@ def can_allocate( request: Request, input_id: int, encoder_compute_budget: int, - num_tokens_to_schedule: int, + num_embeds_to_schedule: int, ) -> bool: - num_tokens = request.get_num_encoder_tokens(input_id) + num_encoder_embeds = request.get_num_encoder_embeds(input_id) # Not enough compute budget - if num_tokens > encoder_compute_budget: + if num_encoder_embeds > encoder_compute_budget: return False - num_tokens += num_tokens_to_schedule + num_encoder_embeds += num_embeds_to_schedule # Enough free slots - return num_tokens <= self.num_free_slots + return num_encoder_embeds <= self.num_free_slots def allocate(self, request: Request, input_id: int) -> None: - num_encoder_tokens = request.get_num_encoder_tokens(input_id) - self.num_free_slots -= num_encoder_tokens + num_encoder_embeds = request.get_num_encoder_embeds(input_id) + self.num_free_slots -= num_encoder_embeds mm_hash = request.mm_features[input_id].identifier self.freed.append(mm_hash) @@ -392,5 +398,5 @@ def get_freed_mm_hashes(self) -> list[str]: return freed def free_encoder_input(self, request: Request, input_id: int) -> None: - num_tokens = request.get_num_encoder_tokens(input_id) - self.num_free_slots += num_tokens + num_encoder_embeds = request.get_num_encoder_embeds(input_id) + self.num_free_slots += num_encoder_embeds diff --git a/vllm/v1/core/sched/scheduler.py b/vllm/v1/core/sched/scheduler.py index e9f2c6e6aa40..2ebda1588d44 100644 --- a/vllm/v1/core/sched/scheduler.py +++ b/vllm/v1/core/sched/scheduler.py @@ -355,11 +355,11 @@ def schedule(self) -> SchedulerOutput: if preempted_encoder_inputs: # Restore encoder compute budget if the preempted # request had encoder inputs scheduled in this step. - num_tokens_to_restore = sum( - preempted_req.get_num_encoder_tokens(i) + num_embeds_to_restore = sum( + preempted_req.get_num_encoder_embeds(i) for i in preempted_encoder_inputs ) - encoder_compute_budget += num_tokens_to_restore + encoder_compute_budget += num_embeds_to_restore req_index -= 1 else: preempted_req = self.running.pop() @@ -912,10 +912,11 @@ def _try_schedule_encoder_inputs( # multiple encoder inputs per request), we need to create temporary # trackers for accounting at the encoder input level. mm_hashes_to_schedule = set() - num_tokens_to_schedule = 0 + num_embeds_to_schedule = 0 for i, mm_feature in enumerate(mm_features): start_pos = mm_feature.mm_position.offset num_encoder_tokens = mm_feature.mm_position.length + num_encoder_embeds = mm_feature.mm_position.get_num_embeds # The encoder output is needed if the two ranges overlap: # [num_computed_tokens, num_computed_tokens + num_new_tokens) and @@ -971,9 +972,8 @@ def _try_schedule_encoder_inputs( ): num_new_tokens = start_pos - num_computed_tokens break - if not self.encoder_cache_manager.can_allocate( - request, i, encoder_compute_budget, num_tokens_to_schedule + request, i, encoder_compute_budget, num_embeds_to_schedule ): # The encoder cache is full or the encoder budget is exhausted. # NOTE(woosuk): We assume that the encoder input tokens should @@ -993,14 +993,31 @@ def _try_schedule_encoder_inputs( num_new_tokens = 0 break + # Calculate the number of embeddings to schedule in the current range + # of scheduled encoder placholder tokens. + start_idx_rel = max(0, num_computed_tokens - start_pos) + end_idx_rel = min( + num_encoder_tokens, num_computed_tokens + num_new_tokens - start_pos + ) + curr_embeds_start, curr_embeds_end = ( + mm_feature.mm_position.get_embeds_indices_in_range( + start_idx_rel, + end_idx_rel, + ) + ) + # There's no embeddings in the current range of encoder placeholder tokens + # so we can skip the encoder input. + if curr_embeds_end - curr_embeds_start == 0: + continue + if self.ec_connector is not None and remote_cache_has_item[i]: mm_hashes_to_schedule.add(request.mm_features[i].identifier) external_load_encoder_input.append(i) - num_tokens_to_schedule += num_encoder_tokens + num_embeds_to_schedule += num_encoder_embeds continue - num_tokens_to_schedule += num_encoder_tokens - encoder_compute_budget -= num_encoder_tokens + num_embeds_to_schedule += num_encoder_embeds + encoder_compute_budget -= num_encoder_embeds mm_hashes_to_schedule.add(request.mm_features[i].identifier) encoder_inputs_to_schedule.append(i) diff --git a/vllm/v1/request.py b/vllm/v1/request.py index a775e840e841..f33059b80b89 100644 --- a/vllm/v1/request.py +++ b/vllm/v1/request.py @@ -209,10 +209,10 @@ def is_finished(self) -> bool: def get_finished_reason(self) -> FinishReason | None: return RequestStatus.get_finished_reason(self.status) - def get_num_encoder_tokens(self, input_id: int) -> int: + def get_num_encoder_embeds(self, input_id: int) -> int: assert input_id < len(self.mm_features) - num_tokens = self.mm_features[input_id].mm_position.length - return num_tokens + num_embeds = self.mm_features[input_id].mm_position.get_num_embeds + return num_embeds def record_event( self, diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index 179f713c4d86..1db5bc99fff6 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -169,9 +169,7 @@ MultiModalBudget, add_kv_sharing_layers_to_kv_cache_groups, bind_kv_cache, - gather_mm_placeholders, sanity_check_mm_encoder_outputs, - scatter_mm_placeholders, ) if TYPE_CHECKING: @@ -2209,10 +2207,7 @@ def _execute_mm_encoder( # Cache the encoder outputs by mm_hash for (mm_hash, pos_info), output in zip(mm_hashes_pos, encoder_outputs): - self.encoder_cache[mm_hash] = scatter_mm_placeholders( - output, - is_embed=pos_info.is_embed, - ) + self.encoder_cache[mm_hash] = output logger.debug("Finish execute for mm hash %s", mm_hash) self.maybe_save_ec_to_connector(self.encoder_cache, mm_hash) @@ -2263,6 +2258,13 @@ def _gather_mm_embeddings( num_encoder_tokens, ) assert start_idx < end_idx + curr_embeds_start, curr_embeds_end = ( + pos_info.get_embeds_indices_in_range(start_idx, end_idx) + ) + # If there are no embeddings in the current range, we skip + # gathering the embeddings. + if curr_embeds_start == curr_embeds_end: + continue mm_hash = mm_feature.identifier encoder_output = self.encoder_cache.get(mm_hash, None) @@ -2270,16 +2272,14 @@ def _gather_mm_embeddings( if (is_embed := pos_info.is_embed) is not None: is_embed = is_embed[start_idx:end_idx] + mm_embeds_item = encoder_output[curr_embeds_start:curr_embeds_end] + else: + mm_embeds_item = encoder_output[start_idx:end_idx] req_start_pos = req_start_idx + start_pos - num_computed_tokens is_mm_embed[req_start_pos + start_idx : req_start_pos + end_idx] = ( True if is_embed is None else is_embed ) - - mm_embeds_item = gather_mm_placeholders( - encoder_output[start_idx:end_idx], - is_embed=is_embed, - ) mm_embeds_req.append(mm_embeds_item) if self.is_multimodal_pruning_enabled and self.uses_mrope: @@ -4508,31 +4508,8 @@ def profile_run(self) -> None: dummy_encoder_outputs, expected_num_items=max_mm_items_per_batch, ) - - # NOTE: This happens when encoder cache needs to store - # the embeddings that encoder outputs are scattered onto. - # In this case we create dummy embeddings of size - # (max_tokens_for_modality, hidden_size) and scatter - # encoder output into it. - encoder_output_shape = dummy_encoder_outputs[0].shape - max_mm_tokens_per_item = mm_budget.max_tokens_by_modality[ - dummy_modality - ] - if encoder_output_shape[0] < max_mm_tokens_per_item: - encoder_hidden_size = encoder_output_shape[-1] - expanded_outputs = [] - for output in dummy_encoder_outputs: - expanded = output.new_zeros( - (max_mm_tokens_per_item, encoder_hidden_size) - ) - num_tokens = output.shape[0] - expanded[:num_tokens].copy_(output) - expanded_outputs.append(expanded) - - dummy_encoder_outputs = expanded_outputs - - # Cache the dummy encoder outputs. - self.encoder_cache["tmp"] = dict(enumerate(dummy_encoder_outputs)) + for i, output in enumerate(dummy_encoder_outputs): + self.encoder_cache[f"tmp_{i}"] = output # Add `is_profile` here to pre-allocate communication buffers hidden_states, last_hidden_states = self._dummy_run( diff --git a/vllm/v1/worker/utils.py b/vllm/v1/worker/utils.py index e9c48223d58b..2e8afec024ce 100644 --- a/vllm/v1/worker/utils.py +++ b/vllm/v1/worker/utils.py @@ -4,10 +4,12 @@ from dataclasses import dataclass, field import torch +from typing_extensions import deprecated from vllm.attention.backends.abstract import AttentionBackend from vllm.attention.layer import Attention from vllm.config import ModelConfig, SchedulerConfig, VllmConfig +from vllm.logger import init_logger from vllm.model_executor.models.interfaces import MultiModalEmbeddings from vllm.model_executor.models.utils import extract_layer_index from vllm.multimodal.cache import processor_only_cache_from_config @@ -17,6 +19,8 @@ from vllm.v1.core.encoder_cache_manager import compute_mm_encoder_budget from vllm.v1.kv_cache_interface import KVCacheGroupSpec, KVCacheSpec +logger = init_logger(__name__) + class MultiModalBudget: """Helper class to calculate budget information for multi-modal models.""" @@ -198,6 +202,7 @@ def sanity_check_mm_encoder_outputs( ) +@deprecated("`scatter_mm_placeholders` is deprecated and will be removed in v0.15.0.") def scatter_mm_placeholders( embeds: torch.Tensor, is_embed: torch.Tensor | None, @@ -226,6 +231,7 @@ def scatter_mm_placeholders( return placeholders +@deprecated("`gather_mm_placeholders` is deprecated and will be removed in v0.15.0.") def gather_mm_placeholders( placeholders: torch.Tensor, is_embed: torch.Tensor | None, From 602c268e27a6ec6a29ee05123448147c22d55f48 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20C=C3=A1mpora?= <961215+dcampora@users.noreply.github.com> Date: Tue, 16 Dec 2025 23:21:17 +0100 Subject: [PATCH 29/73] [Bugfix][DSV32] Fix overflow in topk. (#30754) Signed-off-by: Daniel Campora <961215+dcampora@users.noreply.github.com> Signed-off-by: mgoin Co-authored-by: mgoin --- csrc/sampler.cu | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/csrc/sampler.cu b/csrc/sampler.cu index fc2154beff9e..d458f8e4c1d0 100644 --- a/csrc/sampler.cu +++ b/csrc/sampler.cu @@ -550,8 +550,8 @@ static __global__ __launch_bounds__(kNumThreadsPerBlock) void topKPerRowPrefill( int rowEnd = rowEnds[rowIdx]; // Local pointers to this block - outIndices += rowIdx * topK; - logits += rowIdx * stride0; + outIndices += static_cast(rowIdx) * topK; + logits += static_cast(rowIdx) * stride0; topKPerRowJob( nullptr, logits, rowStart, rowEnd, outIndices, nullptr, stride1, topK); @@ -576,19 +576,21 @@ static __global__ __launch_bounds__(kNumThreadsPerBlock) void topKPerRowDecode( // Local pointers to this block if constexpr (!multipleBlocksPerRow && !mergeBlocks) { - outIndices += rowIdx * topK; + outIndices += static_cast(rowIdx) * topK; } else if constexpr (multipleBlocksPerRow) { const auto blockSize = rowEnd / gridDim.y; // 16384 / 2 = 8192 rowStart = blockSize * blockIdx.y; // 8192 * 1 = 8192 rowEnd = gridDim.y == blockIdx.y + 1 ? rowEnd : rowStart + blockSize; - outIndices += rowIdx * gridDim.y * topK + blockIdx.y * topK; - outLogits += rowIdx * gridDim.y * topK + blockIdx.y * topK; + outIndices += + static_cast(rowIdx) * gridDim.y * topK + blockIdx.y * topK; + outLogits += + static_cast(rowIdx) * gridDim.y * topK + blockIdx.y * topK; } else if constexpr (mergeBlocks) { rowEnd = numBlocksToMerge * topK; - indices += rowIdx * numBlocksToMerge * topK; - outIndices += rowIdx * topK; + indices += static_cast(rowIdx) * numBlocksToMerge * topK; + outIndices += static_cast(rowIdx) * topK; } - logits += rowIdx * stride0; + logits += static_cast(rowIdx) * stride0; topKPerRowJob( From 4317b41c8b5853a4eb035fcd7d2793c271563bb1 Mon Sep 17 00:00:00 2001 From: Jinzhen Lin Date: Wed, 17 Dec 2025 06:35:28 +0800 Subject: [PATCH 30/73] [Kernel][Quantization][MoE] add marlin kernel support for turing (sm75) (#29901) Signed-off-by: Jinzhen Lin Co-authored-by: Michael Goin --- CMakeLists.txt | 107 +++++-- csrc/moe/marlin_moe_wna16/.gitignore | 1 + csrc/moe/marlin_moe_wna16/generate_kernels.py | 132 +++++---- csrc/moe/marlin_moe_wna16/marlin_template.h | 208 ++++---------- csrc/moe/marlin_moe_wna16/ops.cu | 54 ++-- csrc/quantization/gptq_marlin/.gitignore | 1 + csrc/quantization/gptq_marlin/dequant.h | 2 +- .../gptq_marlin/generate_kernels.py | 132 +++++---- csrc/quantization/gptq_marlin/gptq_marlin.cu | 68 +++-- csrc/quantization/gptq_marlin/marlin.cuh | 74 ++++- csrc/quantization/gptq_marlin/marlin_mma.h | 269 ++++++++++++++++++ .../gptq_marlin/marlin_template.h | 184 +++--------- .../layers/quantization/awq_marlin.py | 2 +- .../model_executor/layers/quantization/fp8.py | 2 +- .../layers/quantization/gptq_marlin.py | 2 +- .../layers/quantization/modelopt.py | 2 +- 16 files changed, 728 insertions(+), 512 deletions(-) create mode 100644 csrc/quantization/gptq_marlin/marlin_mma.h diff --git a/CMakeLists.txt b/CMakeLists.txt index cd52df86e034..5ca71f6ba4df 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -357,6 +357,8 @@ if(VLLM_GPU_LANG STREQUAL "CUDA") # marlin arches for fp16 output cuda_archs_loose_intersection(MARLIN_ARCHS "8.0+PTX" "${CUDA_ARCHS}") + # marlin has limited support for turing + cuda_archs_loose_intersection(MARLIN_SM75_ARCHS "7.5" "${CUDA_ARCHS}") # marlin arches for bf16 output (we need 9.0 for bf16 atomicAdd PTX) cuda_archs_loose_intersection(MARLIN_BF16_ARCHS "8.0+PTX;9.0+PTX" "${CUDA_ARCHS}") # marlin arches for fp8 input @@ -364,8 +366,10 @@ if(VLLM_GPU_LANG STREQUAL "CUDA") # - sm90 and sm100 don't support QMMA.16832.F32.E4M3.E4M3 SAAS instruction # so we only enable fp8 computation for SM89 (e.g. RTX 40x0) and 12.0 (e.g. RTX 50x0) cuda_archs_loose_intersection(MARLIN_FP8_ARCHS "8.9;12.0" "${CUDA_ARCHS}") + # marlin arches for other files + cuda_archs_loose_intersection(MARLIN_OTHER_ARCHS "7.5;8.0+PTX" "${CUDA_ARCHS}") - if (MARLIN_ARCHS) + if (MARLIN_OTHER_ARCHS) # # For the Marlin kernels we automatically generate sources for various @@ -406,25 +410,39 @@ if(VLLM_GPU_LANG STREQUAL "CUDA") message(STATUS "Marlin generation script has not changed, skipping generation.") endif() - file(GLOB MARLIN_TEMPLATE_KERNEL_SRC "csrc/quantization/gptq_marlin/sm80_kernel_*_float16.cu") - set_gencode_flags_for_srcs( - SRCS "${MARLIN_TEMPLATE_KERNEL_SRC}" - CUDA_ARCHS "${MARLIN_ARCHS}") - if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.8) - set_source_files_properties(${MARLIN_TEMPLATE_KERNEL_SRC} - PROPERTIES COMPILE_FLAGS "-static-global-template-stub=false") + if (MARLIN_ARCHS) + file(GLOB MARLIN_TEMPLATE_KERNEL_SRC "csrc/quantization/gptq_marlin/sm80_kernel_*_float16.cu") + set_gencode_flags_for_srcs( + SRCS "${MARLIN_TEMPLATE_KERNEL_SRC}" + CUDA_ARCHS "${MARLIN_ARCHS}") + if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.8) + set_source_files_properties(${MARLIN_TEMPLATE_KERNEL_SRC} + PROPERTIES COMPILE_FLAGS "-static-global-template-stub=false") + endif() + list(APPEND VLLM_EXT_SRC ${MARLIN_TEMPLATE_KERNEL_SRC}) + + file(GLOB MARLIN_TEMPLATE_BF16_KERNEL_SRC "csrc/quantization/gptq_marlin/sm80_kernel_*_bfloat16.cu") + set_gencode_flags_for_srcs( + SRCS "${MARLIN_TEMPLATE_BF16_KERNEL_SRC}" + CUDA_ARCHS "${MARLIN_BF16_ARCHS}") + if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.8) + set_source_files_properties(${MARLIN_TEMPLATE_BF16_KERNEL_SRC} + PROPERTIES COMPILE_FLAGS "-static-global-template-stub=false") + endif() + list(APPEND VLLM_EXT_SRC ${MARLIN_TEMPLATE_BF16_KERNEL_SRC}) endif() - list(APPEND VLLM_EXT_SRC ${MARLIN_TEMPLATE_KERNEL_SRC}) - file(GLOB MARLIN_TEMPLATE_BF16_KERNEL_SRC "csrc/quantization/gptq_marlin/sm80_kernel_*_bfloat16.cu") - set_gencode_flags_for_srcs( - SRCS "${MARLIN_TEMPLATE_BF16_KERNEL_SRC}" - CUDA_ARCHS "${MARLIN_BF16_ARCHS}") - if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.8) - set_source_files_properties(${MARLIN_TEMPLATE_BF16_KERNEL_SRC} - PROPERTIES COMPILE_FLAGS "-static-global-template-stub=false") + if (MARLIN_SM75_ARCHS) + file(GLOB MARLIN_TEMPLATE_SM75_KERNEL_SRC "csrc/quantization/gptq_marlin/sm75_kernel_*.cu") + set_gencode_flags_for_srcs( + SRCS "${MARLIN_TEMPLATE_SM75_KERNEL_SRC}" + CUDA_ARCHS "${MARLIN_SM75_ARCHS}") + if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.8) + set_source_files_properties(${MARLIN_TEMPLATE_SM75_KERNEL_SRC} + PROPERTIES COMPILE_FLAGS "-static-global-template-stub=false") + endif() + list(APPEND VLLM_EXT_SRC ${MARLIN_TEMPLATE_SM75_KERNEL_SRC}) endif() - list(APPEND VLLM_EXT_SRC ${MARLIN_TEMPLATE_BF16_KERNEL_SRC}) if (MARLIN_FP8_ARCHS) file(GLOB MARLIN_TEMPLATE_FP8_KERNEL_SRC "csrc/quantization/gptq_marlin/sm89_kernel_*.cu") @@ -446,14 +464,14 @@ if(VLLM_GPU_LANG STREQUAL "CUDA") "csrc/quantization/gptq_marlin/awq_marlin_repack.cu") set_gencode_flags_for_srcs( SRCS "${MARLIN_SRCS}" - CUDA_ARCHS "${MARLIN_ARCHS}") + CUDA_ARCHS "${MARLIN_OTHER_ARCHS}") if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.8) - set_source_files_properties("csrc/quantization/gptq_marlin/gptq_marlin.cu" + set_source_files_properties(${MARLIN_SRCS} PROPERTIES COMPILE_FLAGS "-static-global-template-stub=false") endif() list(APPEND VLLM_EXT_SRC "${MARLIN_SRCS}") - message(STATUS "Building Marlin kernels for archs: ${MARLIN_ARCHS}") + message(STATUS "Building Marlin kernels for archs: ${MARLIN_OTHER_ARCHS}") else() message(STATUS "Not building Marlin kernels as no compatible archs found" " in CUDA target architectures") @@ -980,12 +998,16 @@ if(VLLM_GPU_LANG STREQUAL "CUDA") # note that we always set `use_atomic_add=False` for moe marlin now, # so we don't need 9.0 for bf16 atomicAdd PTX cuda_archs_loose_intersection(MARLIN_MOE_ARCHS "8.0+PTX" "${CUDA_ARCHS}") + # moe marlin has limited support for turing + cuda_archs_loose_intersection(MARLIN_MOE_SM75_ARCHS "7.5" "${CUDA_ARCHS}") # moe marlin arches for fp8 input # - sm80 doesn't support fp8 computation # - sm90 and sm100 don't support QMMA.16832.F32.E4M3.E4M3 SAAS instruction # so we only enable fp8 computation for SM89 (e.g. RTX 40x0) and 12.0 (e.g. RTX 50x0) cuda_archs_loose_intersection(MARLIN_MOE_FP8_ARCHS "8.9;12.0" "${CUDA_ARCHS}") - if (MARLIN_MOE_ARCHS) + # moe marlin arches for other files + cuda_archs_loose_intersection(MARLIN_MOE_OTHER_ARCHS "7.5;8.0+PTX" "${CUDA_ARCHS}") + if (MARLIN_MOE_OTHER_ARCHS) # # For the Marlin MOE kernels we automatically generate sources for various @@ -1026,16 +1048,29 @@ if(VLLM_GPU_LANG STREQUAL "CUDA") message(STATUS "Marlin MOE generation script has not changed, skipping generation.") endif() - file(GLOB MARLIN_MOE_SRC "csrc/moe/marlin_moe_wna16/sm80_kernel_*.cu") - list(APPEND MARLIN_MOE_SRC "csrc/moe/marlin_moe_wna16/ops.cu") - set_gencode_flags_for_srcs( - SRCS "${MARLIN_MOE_SRC}" - CUDA_ARCHS "${MARLIN_MOE_ARCHS}") - if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.8) - set_source_files_properties(${MARLIN_MOE_SRC} - PROPERTIES COMPILE_FLAGS "-static-global-template-stub=false") + if (MARLIN_MOE_ARCHS) + file(GLOB MARLIN_MOE_SRC "csrc/moe/marlin_moe_wna16/sm80_kernel_*.cu") + set_gencode_flags_for_srcs( + SRCS "${MARLIN_MOE_SRC}" + CUDA_ARCHS "${MARLIN_MOE_ARCHS}") + if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.8) + set_source_files_properties(${MARLIN_MOE_SRC} + PROPERTIES COMPILE_FLAGS "-static-global-template-stub=false") + endif() + list(APPEND VLLM_MOE_EXT_SRC ${MARLIN_MOE_SRC}) + endif() + + if (MARLIN_MOE_SM75_ARCHS) + file(GLOB MARLIN_MOE_SM75_SRC "csrc/moe/marlin_moe_wna16/sm75_kernel_*.cu") + set_gencode_flags_for_srcs( + SRCS "${MARLIN_MOE_SM75_SRC}" + CUDA_ARCHS "${MARLIN_MOE_SM75_ARCHS}") + if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.8) + set_source_files_properties(${MARLIN_MOE_SM75_SRC} + PROPERTIES COMPILE_FLAGS "-static-global-template-stub=false") + endif() + list(APPEND VLLM_MOE_EXT_SRC ${MARLIN_MOE_SM75_SRC}) endif() - list(APPEND VLLM_MOE_EXT_SRC ${MARLIN_MOE_SRC}) if (MARLIN_MOE_FP8_ARCHS) file(GLOB MARLIN_MOE_FP8_SRC "csrc/moe/marlin_moe_wna16/sm89_kernel_*.cu") @@ -1049,7 +1084,17 @@ if(VLLM_GPU_LANG STREQUAL "CUDA") list(APPEND VLLM_MOE_EXT_SRC ${MARLIN_MOE_FP8_SRC}) endif() - message(STATUS "Building Marlin MOE kernels for archs: ${MARLIN_MOE_ARCHS}") + set(MARLIN_MOE_OTHER_SRC "csrc/moe/marlin_moe_wna16/ops.cu") + set_gencode_flags_for_srcs( + SRCS "${MARLIN_MOE_OTHER_SRC}" + CUDA_ARCHS "${MARLIN_MOE_OTHER_ARCHS}") + if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.8) + set_source_files_properties(${MARLIN_MOE_OTHER_SRC} + PROPERTIES COMPILE_FLAGS "-static-global-template-stub=false") + endif() + list(APPEND VLLM_MOE_EXT_SRC "${MARLIN_MOE_OTHER_SRC}") + + message(STATUS "Building Marlin MOE kernels for archs: ${MARLIN_MOE_OTHER_ARCHS}") else() message(STATUS "Not building Marlin MOE kernels as no compatible archs found" " in CUDA target architectures") diff --git a/csrc/moe/marlin_moe_wna16/.gitignore b/csrc/moe/marlin_moe_wna16/.gitignore index ba805f9250ec..7dc482a89466 100644 --- a/csrc/moe/marlin_moe_wna16/.gitignore +++ b/csrc/moe/marlin_moe_wna16/.gitignore @@ -1,2 +1,3 @@ sm*_kernel_*.cu kernel_selector.h +kernel_*.cu diff --git a/csrc/moe/marlin_moe_wna16/generate_kernels.py b/csrc/moe/marlin_moe_wna16/generate_kernels.py index 88f1055337fd..9db03ea149d0 100644 --- a/csrc/moe/marlin_moe_wna16/generate_kernels.py +++ b/csrc/moe/marlin_moe_wna16/generate_kernels.py @@ -10,6 +10,8 @@ ARCHS = [] SUPPORT_FP8 = False +SUPPORT_SM75 = False +SUPPORT_SM80 = False for arch in sys.argv[1].split(","): arch = arch[: arch.index(".") + 2].replace(".", "") arch = int(arch) @@ -19,6 +21,10 @@ # with FP16 MMA, so it cannot achieve any acceleration. if arch in [89, 120]: SUPPORT_FP8 = True + if arch >= 80: + SUPPORT_SM80 = True + if arch == 75: + SUPPORT_SM75 = True FILE_HEAD_COMMENT = """ // auto generated by generate_kernels.py @@ -157,6 +163,7 @@ def remove_old_kernels(): def generate_new_kernels(): result_dict = {} + sm_75_result_dict = {} for quant_config in QUANT_CONFIGS: c_types = quant_config.get("c_type", ["kFloat16", "kBFloat16"]) @@ -174,6 +181,8 @@ def generate_new_kernels(): s_type = quant_config.get("s_type", c_type) if (a_type, b_type, c_type) not in result_dict: result_dict[(a_type, b_type, c_type)] = [] + if a_type in ["kFloat16", "kS8"] and c_type == "kFloat16": + sm_75_result_dict[(a_type, b_type, c_type)] = [] for group_blocks, m_blocks, thread_configs in itertools.product( all_group_blocks, all_m_blocks, all_thread_configs @@ -197,78 +206,89 @@ def generate_new_kernels(): "thread_k_blocks": thread_k // 16, "thread_n_blocks": thread_n // 16, "m_block_size_8": "true" if m_blocks == 0.5 else "false", - "stages": "pipe_stages", + "stages": 4, "group_blocks": group_blocks, "is_zp_float": "false", } - result_dict[(a_type, b_type, c_type)].append(config) + if SUPPORT_SM80: + result_dict[(a_type, b_type, c_type)].append(config) + if (a_type, b_type, c_type) in sm_75_result_dict and SUPPORT_SM75: + config_sm75 = config.copy() + config_sm75["stages"] = 2 + sm_75_result_dict[(a_type, b_type, c_type)].append(config_sm75) kernel_selector_str = FILE_HEAD_COMMENT - for (a_type, b_type, c_type), config_list in result_dict.items(): - all_template_str_list = [] - for config in config_list: - s_type = config["s_type"] - template_str = jinja2.Template(TEMPLATE).render( - a_type_id=f"vllm::{a_type}.id()", - b_type_id=f"vllm::{b_type}.id()", - c_type_id=f"vllm::{c_type}.id()", - s_type_id=f"vllm::{s_type}.id()", - **config, - ) - all_template_str_list.append(template_str) - - conditions = [ - f"a_type == vllm::{a_type}", - f"b_type == vllm::{b_type}", - f"c_type == vllm::{c_type}", - f"s_type == vllm::{s_type}", - f"threads == {config['threads']}", - f"thread_m_blocks == {config['thread_m_blocks']}", - f"thread_n_blocks == {config['thread_n_blocks']}", - f"thread_k_blocks == {config['thread_k_blocks']}", - f"m_block_size_8 == {config['m_block_size_8']}", - f"group_blocks == {config['group_blocks']}", - f"is_zp_float == {config['is_zp_float']}", - ] - conditions = " && ".join(conditions) - - if kernel_selector_str == FILE_HEAD_COMMENT: - kernel_selector_str += f"if ({conditions})\n kernel = " - else: - kernel_selector_str += f"else if ({conditions})\n kernel = " - - kernel_template2 = ( - "Marlin<{{a_type_id}}, {{b_type_id}}, {{c_type_id}}, " - "{{s_type_id}}, {{threads}}, {{thread_m_blocks}}, " - "{{thread_n_blocks}}, {{thread_k_blocks}}, " - "{{m_block_size_8}}, {{stages}}, {{group_blocks}}, " - "{{is_zp_float}}>;" - ) - - kernel_selector_str += ( - jinja2.Template(kernel_template2).render( + for result_dict_tmp in [result_dict, sm_75_result_dict]: + for (a_type, b_type, c_type), config_list in result_dict_tmp.items(): + all_template_str_list = [] + if not config_list: + continue + for config in config_list: + s_type = config["s_type"] + template_str = jinja2.Template(TEMPLATE).render( a_type_id=f"vllm::{a_type}.id()", b_type_id=f"vllm::{b_type}.id()", c_type_id=f"vllm::{c_type}.id()", s_type_id=f"vllm::{s_type}.id()", **config, ) - + "\n" - ) + all_template_str_list.append(template_str) + + conditions = [ + f"a_type == vllm::{a_type}", + f"b_type == vllm::{b_type}", + f"c_type == vllm::{c_type}", + f"s_type == vllm::{s_type}", + f"threads == {config['threads']}", + f"thread_m_blocks == {config['thread_m_blocks']}", + f"thread_n_blocks == {config['thread_n_blocks']}", + f"thread_k_blocks == {config['thread_k_blocks']}", + f"m_block_size_8 == {config['m_block_size_8']}", + f"stages == {config['stages']}", + f"group_blocks == {config['group_blocks']}", + f"is_zp_float == {config['is_zp_float']}", + ] + conditions = " && ".join(conditions) + + if kernel_selector_str == FILE_HEAD_COMMENT: + kernel_selector_str += f"if ({conditions})\n kernel = " + else: + kernel_selector_str += f"else if ({conditions})\n kernel = " + + kernel_template2 = ( + "Marlin<{{a_type_id}}, {{b_type_id}}, {{c_type_id}}, " + "{{s_type_id}}, {{threads}}, {{thread_m_blocks}}, " + "{{thread_n_blocks}}, {{thread_k_blocks}}, " + "{{m_block_size_8}}, {{stages}}, {{group_blocks}}, " + "{{is_zp_float}}>;" + ) - file_content = FILE_HEAD + "\n\n" - file_content += "\n\n".join(all_template_str_list) + "\n\n}\n" - if a_type == "kFE4M3fn": - filename = f"sm89_kernel_{a_type[1:]}_{b_type[1:]}_{c_type[1:]}.cu" - else: - filename = f"sm80_kernel_{a_type[1:]}_{b_type[1:]}_{c_type[1:]}.cu" + kernel_selector_str += ( + jinja2.Template(kernel_template2).render( + a_type_id=f"vllm::{a_type}.id()", + b_type_id=f"vllm::{b_type}.id()", + c_type_id=f"vllm::{c_type}.id()", + s_type_id=f"vllm::{s_type}.id()", + **config, + ) + + "\n" + ) + + file_content = FILE_HEAD + "\n\n" + file_content += "\n\n".join(all_template_str_list) + "\n\n}\n" + if a_type == "kFE4M3fn": + filename = f"sm89_kernel_{a_type[1:]}_{b_type[1:]}_{c_type[1:]}.cu" + elif result_dict_tmp is sm_75_result_dict: + filename = f"sm75_kernel_{a_type[1:]}_{b_type[1:]}_{c_type[1:]}.cu" + else: + filename = f"sm80_kernel_{a_type[1:]}_{b_type[1:]}_{c_type[1:]}.cu" - filename = filename.lower() + filename = filename.lower() - with open(os.path.join(os.path.dirname(__file__), filename), "w") as f: - f.write(file_content) + with open(os.path.join(os.path.dirname(__file__), filename), "w") as f: + f.write(file_content) if not SUPPORT_FP8 and kernel_selector_str != FILE_HEAD_COMMENT: kernel_selector_str += ( diff --git a/csrc/moe/marlin_moe_wna16/marlin_template.h b/csrc/moe/marlin_moe_wna16/marlin_template.h index 5b6b2456b411..138197b76f02 100644 --- a/csrc/moe/marlin_moe_wna16/marlin_template.h +++ b/csrc/moe/marlin_moe_wna16/marlin_template.h @@ -26,6 +26,7 @@ #include "quantization/gptq_marlin/marlin.cuh" #include "quantization/gptq_marlin/marlin_dtypes.cuh" #include "quantization/gptq_marlin/dequant.h" +#include "quantization/gptq_marlin/marlin_mma.h" #include "core/scalar_type.hpp" #define STATIC_ASSERT_SCALAR_TYPE_VALID(scalar_t) \ @@ -35,7 +36,7 @@ namespace MARLIN_NAMESPACE_NAME { -#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800 +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 750 template -__device__ inline void mma( - const typename MarlinScalarType::FragA& a_frag, - const typename MarlinScalarType::FragB& frag_b, - typename MarlinScalarType::FragC& frag_c, int idx = 0) { - const uint32_t* a = reinterpret_cast(&a_frag); - const uint32_t* b = reinterpret_cast(&frag_b); - using scalar_t = typename MarlinScalarType::scalar_t; - if constexpr (k_size == 16) { - if constexpr (std::is_same::value) { - float* c = reinterpret_cast(&frag_c); - asm volatile( - "mma.sync.aligned.m16n8k16.row.col.f32.f16.f16.f32 " - "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n" - : "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3]) - : "r"(a[0]), "r"(a[1]), "r"(a[2]), "r"(a[3]), "r"(b[0]), "r"(b[1]), - "f"(c[0]), "f"(c[1]), "f"(c[2]), "f"(c[3])); - } else if constexpr (std::is_same::value) { - float* c = reinterpret_cast(&frag_c); - asm volatile( - "mma.sync.aligned.m16n8k16.row.col.f32.bf16.bf16.f32 " - "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n" - : "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3]) - : "r"(a[0]), "r"(a[1]), "r"(a[2]), "r"(a[3]), "r"(b[0]), "r"(b[1]), - "f"(c[0]), "f"(c[1]), "f"(c[2]), "f"(c[3])); - } else if constexpr (std::is_same::value) { - float* c = reinterpret_cast(&frag_c); - asm volatile( - "mma.sync.aligned.m16n8k16.row.col.f32.e4m3.e4m3.f32 " - "{%0,%1,%2,%3}, {%4,%5}, {%6}, {%7,%8,%9,%10};\n" - : "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3]) - : "r"(a[idx * 2]), "r"(a[idx * 2 + 1]), "r"(b[idx]), "f"(c[0]), - "f"(c[1]), "f"(c[2]), "f"(c[3])); - } else if constexpr (std::is_same::value) { - int32_t* c = reinterpret_cast(&frag_c); - asm volatile( - "mma.sync.aligned.m16n8k16.row.col.s32.s8.s8.s32.satfinite " - "{%0,%1,%2,%3}, {%4,%5}, {%6}, {%7,%8,%9,%10};\n" - : "=r"(c[0]), "=r"(c[1]), "=r"(c[2]), "=r"(c[3]) - : "r"(a[idx * 2]), "r"(a[idx * 2 + 1]), "r"(b[idx]), "r"(c[0]), - "r"(c[1]), "r"(c[2]), "r"(c[3])); - } - } else if (k_size == 32) { - if constexpr (std::is_same::value) { - float* c = reinterpret_cast(&frag_c); - asm volatile( - "mma.sync.aligned.m16n8k32.row.col.f32.e4m3.e4m3.f32 " - "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n" - : "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3]) - : "r"(a[0]), "r"(a[1]), "r"(a[2]), "r"(a[3]), "r"(b[0]), "r"(b[1]), - "f"(c[0]), "f"(c[1]), "f"(c[2]), "f"(c[3])); - } else if constexpr (std::is_same::value) { - int32_t* c = reinterpret_cast(&frag_c); - asm volatile( - "mma.sync.aligned.m16n8k32.row.col.s32.s8.s8.s32.satfinite " - "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n" - : "=r"(c[0]), "=r"(c[1]), "=r"(c[2]), "=r"(c[3]) - : "r"(a[0]), "r"(a[1]), "r"(a[2]), "r"(a[3]), "r"(b[0]), "r"(b[1]), - "r"(c[0]), "r"(c[1]), "r"(c[2]), "r"(c[3])); - } - } -} - -template -__device__ inline void mma_trans( - const typename MarlinScalarType::FragA& a_frag, - const typename MarlinScalarType::FragB& frag_b, - const typename MarlinScalarType::FragB& frag_b2, - typename MarlinScalarType::FragC& frag_c) { - const uint32_t* a = reinterpret_cast(&a_frag); - const uint32_t* b = reinterpret_cast(&frag_b); - const uint32_t* b2 = reinterpret_cast(&frag_b2); - float* c = reinterpret_cast(&frag_c); - using scalar_t = typename MarlinScalarType::scalar_t; - if constexpr (k_size == 16) { - if constexpr (std::is_same::value) { - float* c = reinterpret_cast(&frag_c); - asm volatile( - "mma.sync.aligned.m16n8k16.row.col.f32.f16.f16.f32 " - "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n" - : "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3]) - : "r"(b[0]), "r"(b2[0]), "r"(b[1]), "r"(b2[1]), "r"(a[0]), "r"(a[1]), - "f"(c[0]), "f"(c[1]), "f"(c[2]), "f"(c[3])); - } else if constexpr (std::is_same::value) { - float* c = reinterpret_cast(&frag_c); - asm volatile( - "mma.sync.aligned.m16n8k16.row.col.f32.bf16.bf16.f32 " - "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n" - : "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3]) - : "r"(b[0]), "r"(b2[0]), "r"(b[1]), "r"(b2[1]), "r"(a[0]), "r"(a[1]), - "f"(c[0]), "f"(c[1]), "f"(c[2]), "f"(c[3])); - } else if constexpr (std::is_same::value) { - float* c = reinterpret_cast(&frag_c); - asm volatile( - "mma.sync.aligned.m16n8k16.row.col.f32.e4m3.e4m3.f32 " - "{%0,%1,%2,%3}, {%4,%5}, {%6}, {%7,%8,%9,%10};\n" - : "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3]) - : "r"(b[0]), "r"(b2[0]), "r"(a[0]), "f"(c[0]), "f"(c[1]), "f"(c[2]), - "f"(c[3])); - } else if constexpr (std::is_same::value) { - int32_t* c = reinterpret_cast(&frag_c); - asm volatile( - "mma.sync.aligned.m16n8k16.row.col.s32.s8.s8.s32.satfinite " - "{%0,%1,%2,%3}, {%4,%5}, {%6}, {%7,%8,%9,%10};\n" - : "=r"(c[0]), "=r"(c[1]), "=r"(c[2]), "=r"(c[3]) - : "r"(b[0]), "r"(b2[0]), "r"(a[0]), "r"(c[0]), "r"(c[1]), "r"(c[2]), - "r"(c[3])); - } - } else { - if constexpr (std::is_same::value) { - float* c = reinterpret_cast(&frag_c); - #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ == 1200 - asm volatile( - "mma.sync.aligned.kind::f8f6f4.m16n8k32.row.col.f32.e4m3.e4m3.f32 " - "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n" - : "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3]) - : "r"(b[0]), "r"(b2[0]), "r"(b[1]), "r"(b2[1]), "r"(a[0]), "r"(a[1]), - "f"(c[0]), "f"(c[1]), "f"(c[2]), "f"(c[3])); - #else - asm volatile( - "mma.sync.aligned.m16n8k32.row.col.f32.e4m3.e4m3.f32 " - "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n" - : "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3]) - : "r"(b[0]), "r"(b2[0]), "r"(b[1]), "r"(b2[1]), "r"(a[0]), "r"(a[1]), - "f"(c[0]), "f"(c[1]), "f"(c[2]), "f"(c[3])); - #endif - } else if constexpr (std::is_same::value) { - int32_t* c = reinterpret_cast(&frag_c); - asm volatile( - "mma.sync.aligned.m16n8k32.row.col.s32.s8.s8.s32.satfinite " - "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n" - : "=r"(c[0]), "=r"(c[1]), "=r"(c[2]), "=r"(c[3]) - : "r"(b[0]), "r"(b2[0]), "r"(b[1]), "r"(b2[1]), "r"(a[0]), "r"(a[1]), - "r"(c[0]), "r"(c[1]), "r"(c[2]), "r"(c[3])); - } - } -} - // Instruction for loading a full 16x16 matrix fragment of operand A from shared // memory, directly in tensor core layout. template @@ -439,9 +300,20 @@ __global__ void Marlin( if constexpr (a_type_id == vllm::kFE4M3fn.id()) return; #endif + #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ == 750 + // Turing TensorCore only supports fp16 and int8 + if constexpr (a_type_id != vllm::kFloat16.id() && a_type_id != vllm::kS8.id()) + return; + #endif + int num_tokens_past_padded = num_tokens_past_padded_ptr[0]; constexpr int moe_block_size = m_block_size_8 ? 8 : (16 * thread_m_blocks); + #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ == 750 + constexpr bool use_fp16_accum = a_type_id == vllm::kFloat16.id(); + #else + constexpr bool use_fp16_accum = false; + #endif using Adtype = MarlinScalarType; using Cdtype = MarlinScalarType; @@ -618,7 +490,22 @@ __global__ void Marlin( } } + #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ == 750 + + if constexpr (moe_block_size >= 16) + local_count += __shfl_down_sync(0xFFFFFFFF, local_count, 16); + if constexpr (moe_block_size >= 8) + local_count += __shfl_down_sync(0xFFFFFFFF, local_count, 8); + if constexpr (moe_block_size >= 4) + local_count += __shfl_down_sync(0xFFFFFFFF, local_count, 4); + if constexpr (moe_block_size >= 2) + local_count += __shfl_down_sync(0xFFFFFFFF, local_count, 2); + + local_count += __shfl_down_sync(0xFFFFFFFF, local_count, 1); + block_num_valid_tokens = local_count; + #else block_num_valid_tokens = __reduce_add_sync(0xffffffff, local_count); + #endif if (lane_id == 0) reinterpret_cast(sh_new)[0] = block_num_valid_tokens; @@ -1018,10 +905,6 @@ __global__ void Marlin( constexpr int sh_s_size = has_act_order ? (act_s_max_num_groups * s_sh_stride) : (stages * s_sh_stage); int4* sh_s = sh_zp + (stages * zp_sh_stage); - // shared memory reused by reduction should be smaller than - // shared memory used by weight. - static_assert(thread_m_blocks * 16 * thread_n_blocks * 16 / 8 <= - stages * b_sh_stage); int4* sh_a = sh_s + sh_s_size; // Register storage for double buffer of shared memory reads. @@ -1545,11 +1428,13 @@ __global__ void Marlin( #pragma unroll for (int i = 0; i < thread_m_blocks; i++) { if constexpr (m_block_size_8) { - mma_trans(frag_a[k2][i], frag_b0, frag_b1, - frag_c[i][j][0]); + mma_trans(frag_a[k2][i], frag_b0, frag_b1, + frag_c[i][j][0]); } else { - mma(frag_a[k2][i], frag_b0, frag_c[i][j][0]); - mma(frag_a[k2][i], frag_b1, frag_c[i][j][1]); + mma(frag_a[k2][i], frag_b0, + frag_c[i][j][0]); + mma(frag_a[k2][i], frag_b1, + frag_c[i][j][1]); } } } @@ -1583,10 +1468,12 @@ __global__ void Marlin( #pragma unroll for (int i = 0; i < thread_m_blocks; i++) { - mma(frag_a[k2][i], frag_b[0], - (group_blocks == -1 ? frag_c : frag_c_tmp)[i][j][0]); - mma(frag_a[k2][i], frag_b[1], - (group_blocks == -1 ? frag_c : frag_c_tmp)[i][j][1]); + mma( + frag_a[k2][i], frag_b[0], + (group_blocks == -1 ? frag_c : frag_c_tmp)[i][j][0]); + mma( + frag_a[k2][i], frag_b[1], + (group_blocks == -1 ? frag_c : frag_c_tmp)[i][j][1]); } if constexpr (group_blocks != -1) { @@ -2132,6 +2019,21 @@ __global__ void Marlin( // While this pattern may not be the most readable, other ways of writing // the loop seemed to noticeably worse performance after compilation. if (slice_iters == 0) { + // convert fp16 accum to fp32 for reduction + if constexpr (use_fp16_accum) { + #pragma unroll + for (int i = 0; i < (thread_m_blocks * (is_a_8bit ? 2 : 4) * 2); i++) { + float* frag_c_part_float = reinterpret_cast(frag_c) + i * 4; + scalar_t* frag_c_part_half = + reinterpret_cast(frag_c_part_float); + + #pragma unroll + for (int i = 3; i >= 0; i--) { + frag_c_part_float[i] = Cdtype::num2float(frag_c_part_half[i]); + } + } + } + if constexpr (is_a_8bit) { float frag_a_s[2 * thread_m_blocks]; diff --git a/csrc/moe/marlin_moe_wna16/ops.cu b/csrc/moe/marlin_moe_wna16/ops.cu index 4fd8fc5c5420..8ac1691220a6 100644 --- a/csrc/moe/marlin_moe_wna16/ops.cu +++ b/csrc/moe/marlin_moe_wna16/ops.cu @@ -142,7 +142,7 @@ typedef struct { int get_scales_cache_size(thread_config_t const& th_config, int prob_m, int prob_n, int prob_k, int num_bits, int group_size, - bool has_act_order, bool is_k_full) { + bool has_act_order, bool is_k_full, int stages) { bool cache_scales_chunk = has_act_order && !is_k_full; int tb_n = th_config.thread_n; @@ -160,13 +160,13 @@ int get_scales_cache_size(thread_config_t const& th_config, int prob_m, if (cache_scales_chunk) { int load_groups = - tb_groups * pipe_stages * 2; // Chunk size is 2x pipeline over dim K + tb_groups * stages * 2; // Chunk size is 2x pipeline over dim K load_groups = max(load_groups, 32); // We load at least 32 scale groups return load_groups * tb_n * 2; } else { int tb_scales = tb_groups * tb_n * 2; - return tb_scales * pipe_stages; + return tb_scales * stages; } } @@ -174,7 +174,7 @@ int get_kernel_cache_size(thread_config_t const& th_config, bool m_block_size_8, int thread_m_blocks, int prob_m, int prob_n, int prob_k, int num_bits, int group_size, bool has_act_order, bool is_k_full, int has_zp, - int is_zp_float, bool is_a_8bit) { + int is_zp_float, bool is_a_8bit, int stages) { int pack_factor = 32 / num_bits; // Get B size @@ -185,8 +185,8 @@ int get_kernel_cache_size(thread_config_t const& th_config, bool m_block_size_8, // shm size for block_sorted_ids/rd_block_sorted_ids/block_topk_weights // both of them requires tb_m * 4 bytes (tb_m * int32 or tb_m * float32) int sh_block_meta_size = tb_m * 16; - int sh_a_size = pipe_stages * (tb_m * tb_k) * (is_a_8bit ? 1 : 2); - int sh_b_size = pipe_stages * (tb_k * tb_n / pack_factor) * 4; + int sh_a_size = stages * (tb_m * tb_k) * (is_a_8bit ? 1 : 2); + int sh_b_size = stages * (tb_k * tb_n / pack_factor) * 4; int sh_red_size = tb_m * (tb_n + 8) * 2; int sh_bias_size = tb_n * 2; int tmp_size = @@ -195,8 +195,8 @@ int get_kernel_cache_size(thread_config_t const& th_config, bool m_block_size_8, int sh_s_size = get_scales_cache_size(th_config, prob_m, prob_n, prob_k, num_bits, - group_size, has_act_order, is_k_full); - int sh_g_idx_size = has_act_order && !is_k_full ? pipe_stages * tb_k / 4 : 0; + group_size, has_act_order, is_k_full, stages); + int sh_g_idx_size = has_act_order && !is_k_full ? stages * tb_k / 4 : 0; int sh_zp_size = 0; if (has_zp) { if (is_zp_float) @@ -217,7 +217,7 @@ bool is_valid_config(thread_config_t const& th_config, bool m_block_size_8, int thread_m_blocks, int prob_m, int prob_n, int prob_k, int num_bits, int group_size, bool has_act_order, bool is_k_full, int has_zp, int is_zp_float, - int max_shared_mem, bool is_a_8bit) { + bool is_a_8bit, int stages, int max_shared_mem) { // Sanity if (th_config.thread_k == -1 || th_config.thread_n == -1 || th_config.num_threads == -1) { @@ -243,7 +243,7 @@ bool is_valid_config(thread_config_t const& th_config, bool m_block_size_8, int cache_size = get_kernel_cache_size(th_config, m_block_size_8, thread_m_blocks, prob_m, prob_n, prob_k, num_bits, group_size, has_act_order, - is_k_full, has_zp, is_zp_float, is_a_8bit); + is_k_full, has_zp, is_zp_float, is_a_8bit, stages); return cache_size <= max_shared_mem; } @@ -252,7 +252,7 @@ MarlinFuncPtr get_marlin_kernel( const vllm::ScalarType c_type, const vllm::ScalarType s_type, int thread_m_blocks, int thread_n_blocks, int thread_k_blocks, bool m_block_size_8, bool has_act_order, bool has_zp, int group_blocks, - int threads, bool is_zp_float) { + int threads, bool is_zp_float, int stages) { int num_bits = b_type.size_bits(); auto kernel = MarlinDefault; @@ -266,8 +266,8 @@ exec_config_t determine_exec_config( const vllm::ScalarType& c_type, const vllm::ScalarType& s_type, int prob_m, int prob_n, int prob_k, int num_experts, int top_k, int thread_m_blocks, bool m_block_size_8, int num_bits, int group_size, bool has_act_order, - bool is_k_full, bool has_zp, bool is_zp_float, int max_shared_mem, int sms, - bool is_a_8bit) { + bool is_k_full, bool has_zp, bool is_zp_float, bool is_a_8bit, int stages, + int max_shared_mem, int sms) { exec_config_t exec_cfg = exec_config_t{1, thread_config_t{-1, -1, -1}}; thread_config_t* thread_configs = thread_m_blocks > 1 ? large_batch_thread_configs @@ -284,15 +284,15 @@ exec_config_t determine_exec_config( if (!is_valid_config(th_config, m_block_size_8, thread_m_blocks, prob_m, prob_n, prob_k, num_bits, group_size, has_act_order, - is_k_full, has_zp, is_zp_float, max_shared_mem - 512, - is_a_8bit)) { + is_k_full, has_zp, is_zp_float, is_a_8bit, stages, + max_shared_mem - 512)) { continue; } int cache_size = get_kernel_cache_size( th_config, m_block_size_8, thread_m_blocks, prob_m, prob_n, prob_k, num_bits, group_size, has_act_order, is_k_full, has_zp, is_zp_float, - is_a_8bit); + is_a_8bit, stages); int group_blocks = 0; if (!has_act_order) { @@ -303,7 +303,7 @@ exec_config_t determine_exec_config( get_marlin_kernel(a_type, b_type, c_type, s_type, thread_m_blocks, th_config.thread_n / 16, th_config.thread_k / 16, m_block_size_8, has_act_order, has_zp, group_blocks, - th_config.num_threads, is_zp_float); + th_config.num_threads, is_zp_float, stages); if (kernel == MarlinDefault) continue; @@ -433,8 +433,14 @@ void marlin_mm(const void* A, const void* B, void* C, void* C_tmp, void* b_bias, dev); cudaDeviceGetAttribute(&minor_capability, cudaDevAttrComputeCapabilityMinor, dev); - TORCH_CHECK(major_capability * 10 + minor_capability >= 80, - "marlin kernel only support Ampere or newer GPUs."); + TORCH_CHECK(major_capability * 10 + minor_capability >= 75, + "marlin kernel only support Turing or newer GPUs."); + int stages = 4; + if (major_capability == 7 && minor_capability == 5) { + stages = 2; + TORCH_CHECK(a_type == vllm::kFloat16 || a_type == vllm::kS8, + "Turing only support FP16 or INT8 activation."); + } if (a_type == vllm::kFE4M3fn) { TORCH_CHECK(major_capability * 10 + minor_capability >= 89, "FP8 only support Ada Lovelace or newer GPUs."); @@ -461,8 +467,8 @@ void marlin_mm(const void* A, const void* B, void* C, void* C_tmp, void* b_bias, exec_cfg = determine_exec_config( a_type, b_type, c_type, s_type, prob_m, prob_n, prob_k, num_experts, top_k, thread_m_blocks, m_block_size_8, num_bits, group_size, - has_act_order, is_k_full, has_zp, is_zp_float, max_shared_mem, sms, - is_a_8bit); + has_act_order, is_k_full, has_zp, is_zp_float, is_a_8bit, stages, + max_shared_mem, sms); thread_tfg = exec_cfg.tb_cfg; } @@ -479,7 +485,7 @@ void marlin_mm(const void* A, const void* B, void* C, void* C_tmp, void* b_bias, TORCH_CHECK(is_valid_config(thread_tfg, m_block_size_8, thread_m_blocks, prob_m, prob_n, prob_k, num_bits, group_size, has_act_order, is_k_full, has_zp, is_zp_float, - max_shared_mem, is_a_8bit), + is_a_8bit, stages, max_shared_mem), "Invalid thread config: thread_m_blocks = ", thread_m_blocks, ", thread_k = ", thread_tfg.thread_k, ", thread_n = ", thread_tfg.thread_n, @@ -493,12 +499,12 @@ void marlin_mm(const void* A, const void* B, void* C, void* C_tmp, void* b_bias, int sh_cache_size = get_kernel_cache_size(thread_tfg, m_block_size_8, thread_m_blocks, prob_m, prob_n, prob_k, num_bits, group_size, has_act_order, - is_k_full, has_zp, is_zp_float, is_a_8bit); + is_k_full, has_zp, is_zp_float, is_a_8bit, stages); auto kernel = get_marlin_kernel( a_type, b_type, c_type, s_type, thread_m_blocks, thread_n_blocks, thread_k_blocks, m_block_size_8, has_act_order, has_zp, group_blocks, - num_threads, is_zp_float); + num_threads, is_zp_float, stages); if (kernel == MarlinDefault) { TORCH_CHECK(false, "Unsupported shapes: MNK = [", prob_m, ", ", prob_n, diff --git a/csrc/quantization/gptq_marlin/.gitignore b/csrc/quantization/gptq_marlin/.gitignore index ba805f9250ec..7dc482a89466 100644 --- a/csrc/quantization/gptq_marlin/.gitignore +++ b/csrc/quantization/gptq_marlin/.gitignore @@ -1,2 +1,3 @@ sm*_kernel_*.cu kernel_selector.h +kernel_*.cu diff --git a/csrc/quantization/gptq_marlin/dequant.h b/csrc/quantization/gptq_marlin/dequant.h index 26b8d40368aa..edd97dbfcd8e 100644 --- a/csrc/quantization/gptq_marlin/dequant.h +++ b/csrc/quantization/gptq_marlin/dequant.h @@ -67,7 +67,7 @@ where `scale_factor * multiplier` can be computed at weight loading. namespace MARLIN_NAMESPACE_NAME { -#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800 +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 750 // Lookup-table based 3-input logical operation; explicitly used for // dequantization as the compiler does not seem to automatically recognize it in // all cases. diff --git a/csrc/quantization/gptq_marlin/generate_kernels.py b/csrc/quantization/gptq_marlin/generate_kernels.py index 27ef7271ba41..24866fc5cd54 100644 --- a/csrc/quantization/gptq_marlin/generate_kernels.py +++ b/csrc/quantization/gptq_marlin/generate_kernels.py @@ -10,6 +10,8 @@ ARCHS = [] SUPPORT_FP8 = False +SUPPORT_SM75 = False +SUPPORT_SM80 = False for arch in sys.argv[1].split(","): arch = arch[: arch.index(".") + 2].replace(".", "") arch = int(arch) @@ -19,6 +21,10 @@ # with FP16 MMA, so it cannot achieve any acceleration. if arch in [89, 120]: SUPPORT_FP8 = True + if arch >= 80: + SUPPORT_SM80 = True + if arch == 75: + SUPPORT_SM75 = True FILE_HEAD_COMMENT = """ // auto generated by generate_kernels.py @@ -166,6 +172,7 @@ def remove_old_kernels(): def generate_new_kernels(): result_dict = {} + sm_75_result_dict = {} for quant_config in QUANT_CONFIGS: c_types = quant_config.get("c_type", ["kFloat16", "kBFloat16"]) @@ -184,6 +191,8 @@ def generate_new_kernels(): s_type = quant_config.get("s_type", c_type) if (a_type, b_type, c_type) not in result_dict: result_dict[(a_type, b_type, c_type)] = [] + if a_type in ["kFloat16", "kS8"] and c_type == "kFloat16": + sm_75_result_dict[(a_type, b_type, c_type)] = [] for group_blocks, m_blocks, thread_configs in itertools.product( all_group_blocks, all_m_blocks, all_thread_configs @@ -207,78 +216,89 @@ def generate_new_kernels(): "thread_k_blocks": thread_k // 16, "thread_n_blocks": thread_n // 16, "m_block_size_8": "true" if m_blocks == 0.5 else "false", - "stages": "pipe_stages", + "stages": 4, "group_blocks": group_blocks, "is_zp_float": "true" if is_zp_float else "false", } - result_dict[(a_type, b_type, c_type)].append(config) + if SUPPORT_SM80: + result_dict[(a_type, b_type, c_type)].append(config) + if (a_type, b_type, c_type) in sm_75_result_dict and SUPPORT_SM75: + config_sm75 = config.copy() + config_sm75["stages"] = 2 + sm_75_result_dict[(a_type, b_type, c_type)].append(config_sm75) kernel_selector_str = FILE_HEAD_COMMENT - for (a_type, b_type, c_type), config_list in result_dict.items(): - all_template_str_list = [] - for config in config_list: - s_type = config["s_type"] - template_str = jinja2.Template(TEMPLATE).render( - a_type_id=f"vllm::{a_type}.id()", - b_type_id=f"vllm::{b_type}.id()", - c_type_id=f"vllm::{c_type}.id()", - s_type_id=f"vllm::{s_type}.id()", - **config, - ) - all_template_str_list.append(template_str) - - conditions = [ - f"a_type == vllm::{a_type}", - f"b_type == vllm::{b_type}", - f"c_type == vllm::{c_type}", - f"s_type == vllm::{s_type}", - f"threads == {config['threads']}", - f"thread_m_blocks == {config['thread_m_blocks']}", - f"thread_n_blocks == {config['thread_n_blocks']}", - f"thread_k_blocks == {config['thread_k_blocks']}", - f"m_block_size_8 == {config['m_block_size_8']}", - f"group_blocks == {config['group_blocks']}", - f"is_zp_float == {config['is_zp_float']}", - ] - conditions = " && ".join(conditions) - - if kernel_selector_str == FILE_HEAD_COMMENT: - kernel_selector_str += f"if ({conditions})\n kernel = " - else: - kernel_selector_str += f"else if ({conditions})\n kernel = " - - kernel_template2 = ( - "Marlin<{{a_type_id}}, {{b_type_id}}, {{c_type_id}}, " - "{{s_type_id}}, {{threads}}, {{thread_m_blocks}}, " - "{{thread_n_blocks}}, {{thread_k_blocks}}, " - "{{m_block_size_8}}, {{stages}}, {{group_blocks}}, " - "{{is_zp_float}}>;" - ) - - kernel_selector_str += ( - jinja2.Template(kernel_template2).render( + for result_dict_tmp in [result_dict, sm_75_result_dict]: + for (a_type, b_type, c_type), config_list in result_dict_tmp.items(): + all_template_str_list = [] + if not config_list: + continue + for config in config_list: + s_type = config["s_type"] + template_str = jinja2.Template(TEMPLATE).render( a_type_id=f"vllm::{a_type}.id()", b_type_id=f"vllm::{b_type}.id()", c_type_id=f"vllm::{c_type}.id()", s_type_id=f"vllm::{s_type}.id()", **config, ) - + "\n" - ) + all_template_str_list.append(template_str) + + conditions = [ + f"a_type == vllm::{a_type}", + f"b_type == vllm::{b_type}", + f"c_type == vllm::{c_type}", + f"s_type == vllm::{s_type}", + f"threads == {config['threads']}", + f"thread_m_blocks == {config['thread_m_blocks']}", + f"thread_n_blocks == {config['thread_n_blocks']}", + f"thread_k_blocks == {config['thread_k_blocks']}", + f"m_block_size_8 == {config['m_block_size_8']}", + f"stages == {config['stages']}", + f"group_blocks == {config['group_blocks']}", + f"is_zp_float == {config['is_zp_float']}", + ] + conditions = " && ".join(conditions) + + if kernel_selector_str == FILE_HEAD_COMMENT: + kernel_selector_str += f"if ({conditions})\n kernel = " + else: + kernel_selector_str += f"else if ({conditions})\n kernel = " + + kernel_template2 = ( + "Marlin<{{a_type_id}}, {{b_type_id}}, {{c_type_id}}, " + "{{s_type_id}}, {{threads}}, {{thread_m_blocks}}, " + "{{thread_n_blocks}}, {{thread_k_blocks}}, " + "{{m_block_size_8}}, {{stages}}, {{group_blocks}}, " + "{{is_zp_float}}>;" + ) - file_content = FILE_HEAD + "\n\n" - file_content += "\n\n".join(all_template_str_list) + "\n\n}\n" - if a_type == "kFE4M3fn": - filename = f"sm89_kernel_{a_type[1:]}_{b_type[1:]}_{c_type[1:]}.cu" - else: - filename = f"sm80_kernel_{a_type[1:]}_{b_type[1:]}_{c_type[1:]}.cu" + kernel_selector_str += ( + jinja2.Template(kernel_template2).render( + a_type_id=f"vllm::{a_type}.id()", + b_type_id=f"vllm::{b_type}.id()", + c_type_id=f"vllm::{c_type}.id()", + s_type_id=f"vllm::{s_type}.id()", + **config, + ) + + "\n" + ) + + file_content = FILE_HEAD + "\n\n" + file_content += "\n\n".join(all_template_str_list) + "\n\n}\n" + if a_type == "kFE4M3fn": + filename = f"sm89_kernel_{a_type[1:]}_{b_type[1:]}_{c_type[1:]}.cu" + elif result_dict_tmp is sm_75_result_dict: + filename = f"sm75_kernel_{a_type[1:]}_{b_type[1:]}_{c_type[1:]}.cu" + else: + filename = f"sm80_kernel_{a_type[1:]}_{b_type[1:]}_{c_type[1:]}.cu" - filename = filename.lower() + filename = filename.lower() - with open(os.path.join(os.path.dirname(__file__), filename), "w") as f: - f.write(file_content) + with open(os.path.join(os.path.dirname(__file__), filename), "w") as f: + f.write(file_content) if not SUPPORT_FP8 and kernel_selector_str != FILE_HEAD_COMMENT: kernel_selector_str += ( diff --git a/csrc/quantization/gptq_marlin/gptq_marlin.cu b/csrc/quantization/gptq_marlin/gptq_marlin.cu index 28ff06559a98..77f319d53bc5 100644 --- a/csrc/quantization/gptq_marlin/gptq_marlin.cu +++ b/csrc/quantization/gptq_marlin/gptq_marlin.cu @@ -37,7 +37,7 @@ __global__ void MarlinDefault(MARLIN_KERNEL_PARAMS){}; using MarlinFuncPtr = void (*)(MARLIN_KERNEL_PARAMS); -#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800 +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 750 __global__ void permute_cols_kernel(int4 const* __restrict__ a_int4_ptr, int const* __restrict__ perm_int_ptr, @@ -148,7 +148,7 @@ typedef struct { int get_scales_cache_size(thread_config_t const& th_config, int prob_m, int prob_n, int prob_k, int num_bits, int group_size, - bool has_act_order, bool is_k_full) { + bool has_act_order, bool is_k_full, int stages) { bool cache_scales_chunk = has_act_order && !is_k_full; int tb_n = th_config.thread_n; @@ -166,28 +166,29 @@ int get_scales_cache_size(thread_config_t const& th_config, int prob_m, if (cache_scales_chunk) { int load_groups = - tb_groups * pipe_stages * 2; // Chunk size is 2x pipeline over dim K + tb_groups * stages * 2; // Chunk size is 2x pipeline over dim K load_groups = max(load_groups, 32); // We load at least 32 scale groups return load_groups * tb_n * 2; } else { int tb_scales = tb_groups * tb_n * 2; - return tb_scales * pipe_stages; + return tb_scales * stages; } } int get_kernel_cache_size(thread_config_t const& th_config, int thread_m_blocks, int prob_m, int prob_n, int prob_k, int num_bits, int group_size, bool has_act_order, bool is_k_full, - int has_zp, int is_zp_float) { + int has_zp, bool is_zp_float, bool is_a_8bit, + int stages) { int pack_factor = 32 / num_bits; // Get B size int tb_k = th_config.thread_k; int tb_n = th_config.thread_n; int tb_m = thread_m_blocks * 16; - int sh_a_size = pipe_stages * (tb_m * tb_k) * 2; - int sh_b_size = pipe_stages * (tb_k * tb_n / pack_factor) * 4; + int sh_a_size = stages * (tb_m * tb_k) * (is_a_8bit ? 1 : 2); + int sh_b_size = stages * (tb_k * tb_n / pack_factor) * 4; int sh_red_size = tb_m * (tb_n + 8) * 2; int sh_bias_size = tb_n * 2; int tmp_size = @@ -196,8 +197,8 @@ int get_kernel_cache_size(thread_config_t const& th_config, int thread_m_blocks, int sh_s_size = get_scales_cache_size(th_config, prob_m, prob_n, prob_k, num_bits, - group_size, has_act_order, is_k_full); - int sh_g_idx_size = has_act_order && !is_k_full ? pipe_stages * tb_k / 4 : 0; + group_size, has_act_order, is_k_full, stages); + int sh_g_idx_size = has_act_order && !is_k_full ? stages * tb_k / 4 : 0; int sh_zp_size = 0; if (has_zp) { if (is_zp_float) @@ -217,7 +218,8 @@ int get_kernel_cache_size(thread_config_t const& th_config, int thread_m_blocks, bool is_valid_config(thread_config_t const& th_config, int thread_m_blocks, int prob_m, int prob_n, int prob_k, int num_bits, int group_size, bool has_act_order, bool is_k_full, - int has_zp, int is_zp_float, int max_shared_mem) { + int has_zp, bool is_zp_float, bool is_a_8bit, int stages, + int max_shared_mem) { // Sanity if (th_config.thread_k == -1 || th_config.thread_n == -1 || th_config.num_threads == -1) { @@ -242,7 +244,7 @@ bool is_valid_config(thread_config_t const& th_config, int thread_m_blocks, // Check that pipeline fits into cache int cache_size = get_kernel_cache_size( th_config, thread_m_blocks, prob_m, prob_n, prob_k, num_bits, group_size, - has_act_order, is_k_full, has_zp, is_zp_float); + has_act_order, is_k_full, has_zp, is_zp_float, is_a_8bit, stages); return cache_size <= max_shared_mem; } @@ -251,7 +253,7 @@ MarlinFuncPtr get_marlin_kernel( const vllm::ScalarType c_type, const vllm::ScalarType s_type, int thread_m_blocks, int thread_n_blocks, int thread_k_blocks, bool m_block_size_8, bool has_act_order, bool has_zp, int group_blocks, - int threads, bool is_zp_float) { + int threads, bool is_zp_float, int stages) { int num_bits = b_type.size_bits(); auto kernel = MarlinDefault; @@ -265,7 +267,8 @@ exec_config_t determine_exec_config( const vllm::ScalarType& c_type, const vllm::ScalarType& s_type, int prob_m, int prob_n, int prob_k, int thread_m_blocks, bool m_block_size_8, int num_bits, int group_size, bool has_act_order, bool is_k_full, - bool has_zp, bool is_zp_float, int max_shared_mem, int sms) { + bool has_zp, bool is_zp_float, int is_a_8bit, int stages, + int max_shared_mem, int sms) { exec_config_t exec_cfg = exec_config_t{1, thread_config_t{-1, -1, -1}}; thread_config_t* thread_configs = thread_m_blocks > 1 ? large_batch_thread_configs @@ -280,13 +283,15 @@ exec_config_t determine_exec_config( if (!is_valid_config(th_config, thread_m_blocks, prob_m, prob_n, prob_k, num_bits, group_size, has_act_order, is_k_full, has_zp, - is_zp_float, max_shared_mem - 512)) { + is_zp_float, is_a_8bit, stages, + max_shared_mem - 512)) { continue; } - int cache_size = get_kernel_cache_size( - th_config, thread_m_blocks, prob_m, prob_n, prob_k, num_bits, - group_size, has_act_order, is_k_full, has_zp, is_zp_float); + int cache_size = get_kernel_cache_size(th_config, thread_m_blocks, prob_m, + prob_n, prob_k, num_bits, group_size, + has_act_order, is_k_full, has_zp, + is_zp_float, is_a_8bit, stages); int group_blocks = 0; if (!has_act_order) { @@ -297,14 +302,10 @@ exec_config_t determine_exec_config( get_marlin_kernel(a_type, b_type, c_type, s_type, thread_m_blocks, th_config.thread_n / 16, th_config.thread_k / 16, m_block_size_8, has_act_order, has_zp, group_blocks, - th_config.num_threads, is_zp_float); + th_config.num_threads, is_zp_float, stages); if (kernel == MarlinDefault) continue; - // int m_tiles = div_ceil(prob_m, thread_m_blocks * 16); - // int n_tiles = prob_n / th_config.thread_n; - // int k_tiles = prob_k / th_config.thread_k; - return {1, th_config}; } @@ -321,6 +322,7 @@ void marlin_mm(const void* A, const void* B, void* C, void* C_tmp, void* b_bias, int group_size, int dev, cudaStream_t stream, int thread_k_init, int thread_n_init, int sms, bool use_atomic_add, bool use_fp32_reduce, bool is_zp_float) { + bool is_a_8bit = a_type.size_bits() == 8; TORCH_CHECK(prob_m > 0 && prob_n > 0 && prob_k > 0, "Invalid MNK = [", prob_m, ", ", prob_n, ", ", prob_k, "]"); @@ -389,8 +391,14 @@ void marlin_mm(const void* A, const void* B, void* C, void* C_tmp, void* b_bias, dev); cudaDeviceGetAttribute(&minor_capability, cudaDevAttrComputeCapabilityMinor, dev); - TORCH_CHECK(major_capability * 10 + minor_capability >= 80, - "marlin kernel only support Ampere or newer GPUs."); + TORCH_CHECK(major_capability * 10 + minor_capability >= 75, + "marlin kernel only support Turing or newer GPUs."); + int stages = 4; + if (major_capability == 7 && minor_capability == 5) { + stages = 2; + TORCH_CHECK(a_type == vllm::kFloat16 || a_type == vllm::kS8, + "Turing only support FP16 or INT8 activation."); + } if (a_type == vllm::kFE4M3fn) { TORCH_CHECK( major_capability * 10 + minor_capability == 89 || @@ -431,7 +439,8 @@ void marlin_mm(const void* A, const void* B, void* C, void* C_tmp, void* b_bias, exec_cfg = determine_exec_config( a_type, b_type, c_type, s_type, prob_m_split, prob_n, prob_k, thread_m_blocks, m_block_size_8, num_bits, group_size, has_act_order, - is_k_full, has_zp, is_zp_float, max_shared_mem, sms); + is_k_full, has_zp, is_zp_float, is_a_8bit, stages, max_shared_mem, + sms); thread_tfg = exec_cfg.tb_cfg; if (thread_tfg.thread_n != -1) { if (prob_n / thread_tfg.thread_n * @@ -440,7 +449,7 @@ void marlin_mm(const void* A, const void* B, void* C, void* C_tmp, void* b_bias, if (is_valid_config({128, 64, 128}, thread_m_blocks, prob_m_split, prob_n, prob_k, num_bits, group_size, has_act_order, is_k_full, has_zp, is_zp_float, - max_shared_mem_new)) { + is_a_8bit, stages, max_shared_mem_new)) { thread_tfg = {128, 64, 128}; exec_cfg = {1, thread_tfg}; } @@ -466,7 +475,8 @@ void marlin_mm(const void* A, const void* B, void* C, void* C_tmp, void* b_bias, TORCH_CHECK( is_valid_config(thread_tfg, thread_m_blocks, prob_m_split, prob_n, prob_k, num_bits, group_size, has_act_order, is_k_full, - has_zp, is_zp_float, max_shared_mem_new), + has_zp, is_zp_float, is_a_8bit, stages, + max_shared_mem_new), "Invalid thread config: thread_m_blocks = ", thread_m_blocks, ", thread_k = ", thread_tfg.thread_k, ", thread_n = ", thread_tfg.thread_n, @@ -475,12 +485,12 @@ void marlin_mm(const void* A, const void* B, void* C, void* C_tmp, void* b_bias, ", prob_m_split = ", prob_m_split, ", group_size = ", group_size, ", has_act_order = ", has_act_order, ", is_k_full = ", is_k_full, ", has_zp = ", has_zp, ", is_zp_float = ", is_zp_float, - ", max_shared_mem_new = ", max_shared_mem_new); + ", stages = ", stages, ", max_shared_mem_new = ", max_shared_mem_new); auto kernel = get_marlin_kernel( a_type, b_type, c_type, s_type, thread_m_blocks, thread_n_blocks, thread_k_blocks, m_block_size_8, has_act_order, has_zp, group_blocks, - num_threads, is_zp_float); + num_threads, is_zp_float, stages); if (kernel == MarlinDefault) { TORCH_CHECK(false, "Unsupported shapes: MNK = [", prob_m, ", ", prob_n, diff --git a/csrc/quantization/gptq_marlin/marlin.cuh b/csrc/quantization/gptq_marlin/marlin.cuh index 2505e221322d..33fe52f605b4 100644 --- a/csrc/quantization/gptq_marlin/marlin.cuh +++ b/csrc/quantization/gptq_marlin/marlin.cuh @@ -1,17 +1,19 @@ #pragma once -#include +#ifndef _marlin_cuh + #define _marlin_cuh + #include -#include -#include -#include -#include -#include -#include + #include + #include + #include + #include + #include + #include -#ifndef MARLIN_NAMESPACE_NAME - #define MARLIN_NAMESPACE_NAME marlin -#endif + #ifndef MARLIN_NAMESPACE_NAME + #define MARLIN_NAMESPACE_NAME marlin + #endif namespace MARLIN_NAMESPACE_NAME { @@ -51,9 +53,51 @@ using I4 = Vec; constexpr int div_ceil(int a, int b) { return (a + b - 1) / b; } -#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800 -// No support for async -#else + #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800 + +__device__ inline void cp_async1_ca_pred(void* smem_ptr, const void* glob_ptr, + bool pred = true) { + if (pred) { + reinterpret_cast(smem_ptr)[0] = + reinterpret_cast(glob_ptr)[0]; + } +} + +__device__ inline void cp_async2_ca_pred(void* smem_ptr, const void* glob_ptr, + bool pred = true) { + if (pred) { + reinterpret_cast(smem_ptr)[0] = + reinterpret_cast(glob_ptr)[0]; + } +} + +__device__ inline void cp_async4_ca_pred(void* smem_ptr, const void* glob_ptr, + bool pred = true) { + if (pred) { + reinterpret_cast(smem_ptr)[0] = + reinterpret_cast(glob_ptr)[0]; + } +} + +__device__ inline void cp_async4_pred(void* smem_ptr, const void* glob_ptr, + bool pred = true) { + if (pred) { + reinterpret_cast(smem_ptr)[0] = + reinterpret_cast(glob_ptr)[0]; + } +} + +__device__ inline void cp_async4(void* smem_ptr, const void* glob_ptr) { + reinterpret_cast(smem_ptr)[0] = + reinterpret_cast(glob_ptr)[0]; +} + +__device__ inline void cp_async_fence() {} + +template +__device__ inline void cp_async_wait() {} + + #else __device__ inline void cp_async1_ca_pred(void* smem_ptr, const void* glob_ptr, bool pred = true) { @@ -126,6 +170,8 @@ __device__ inline void cp_async_wait() { asm volatile("cp.async.wait_group %0;\n" ::"n"(n)); } -#endif + #endif } // namespace MARLIN_NAMESPACE_NAME + +#endif \ No newline at end of file diff --git a/csrc/quantization/gptq_marlin/marlin_mma.h b/csrc/quantization/gptq_marlin/marlin_mma.h new file mode 100644 index 000000000000..6ec2aaafc439 --- /dev/null +++ b/csrc/quantization/gptq_marlin/marlin_mma.h @@ -0,0 +1,269 @@ + +#include "marlin_dtypes.cuh" + +namespace MARLIN_NAMESPACE_NAME { + +// m16n8k16 tensor core mma instruction with fp16 inputs and fp32 +// output/accumulation. +template +__device__ inline void mma( + const typename MarlinScalarType::FragA& a_frag, + const typename MarlinScalarType::FragB& frag_b, + typename MarlinScalarType::FragC& frag_c, int idx = 0) { + const uint32_t* a = reinterpret_cast(&a_frag); + const uint32_t* b = reinterpret_cast(&frag_b); + using scalar_t = typename MarlinScalarType::scalar_t; + if constexpr (!std::is_same::value || k_size != 16) { + static_assert(!use_fp16_accum); + } + + if constexpr (k_size == 16) { + if constexpr (std::is_same::value && !use_fp16_accum) { +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ == 750 + float* c = reinterpret_cast(&frag_c); + asm volatile( + "mma.sync.aligned.m16n8k8.row.col.f32.f16.f16.f32 " + "{%0,%1,%2,%3}, {%4,%5}, {%6}, {%7,%8,%9,%10};\n" + : "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3]) + : "r"(a[0]), "r"(a[1]), "r"(b[0]), "f"(c[0]), "f"(c[1]), "f"(c[2]), + "f"(c[3])); + asm volatile( + "mma.sync.aligned.m16n8k8.row.col.f32.f16.f16.f32 " + "{%0,%1,%2,%3}, {%4,%5}, {%6}, {%7,%8,%9,%10};\n" + : "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3]) + : "r"(a[2]), "r"(a[3]), "r"(b[1]), "f"(c[0]), "f"(c[1]), "f"(c[2]), + "f"(c[3])); +#else + float* c = reinterpret_cast(&frag_c); + asm volatile( + "mma.sync.aligned.m16n8k16.row.col.f32.f16.f16.f32 " + "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n" + : "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3]) + : "r"(a[0]), "r"(a[1]), "r"(a[2]), "r"(a[3]), "r"(b[0]), "r"(b[1]), + "f"(c[0]), "f"(c[1]), "f"(c[2]), "f"(c[3])); +#endif + } else if constexpr (std::is_same::value && + use_fp16_accum) { +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ == 750 + uint32_t* c = reinterpret_cast(&frag_c); + asm volatile( + "mma.sync.aligned.m16n8k8.row.col.f16.f16.f16.f16 " + "{%0,%1}, {%2,%3}, {%4}, {%5,%6};\n" + : "=r"(c[0]), "=r"(c[1]) + : "r"(a[0]), "r"(a[1]), "r"(b[0]), "r"(c[0]), "r"(c[1])); + asm volatile( + "mma.sync.aligned.m16n8k8.row.col.f16.f16.f16.f16 " + "{%0,%1}, {%2,%3}, {%4}, {%5,%6};\n" + : "=r"(c[0]), "=r"(c[1]) + : "r"(a[2]), "r"(a[3]), "r"(b[1]), "r"(c[0]), "r"(c[1])); +#else + uint32_t* c = reinterpret_cast(&frag_c); + asm volatile( + "mma.sync.aligned.m16n8k16.row.col.f16.f16.f16.f16 " + "{%0,%1}, {%2,%3,%4,%5}, {%6,%7}, {%8,%9};\n" + : "=r"(c[0]), "=r"(c[1]) + : "r"(a[0]), "r"(a[1]), "r"(a[2]), "r"(a[3]), "r"(b[0]), "r"(b[1]), + "r"(c[0]), "r"(c[1])); +#endif + } else if constexpr (std::is_same::value) { + float* c = reinterpret_cast(&frag_c); + asm volatile( + "mma.sync.aligned.m16n8k16.row.col.f32.bf16.bf16.f32 " + "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n" + : "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3]) + : "r"(a[0]), "r"(a[1]), "r"(a[2]), "r"(a[3]), "r"(b[0]), "r"(b[1]), + "f"(c[0]), "f"(c[1]), "f"(c[2]), "f"(c[3])); + } else if constexpr (std::is_same::value) { + float* c = reinterpret_cast(&frag_c); + asm volatile( + "mma.sync.aligned.m16n8k16.row.col.f32.e4m3.e4m3.f32 " + "{%0,%1,%2,%3}, {%4,%5}, {%6}, {%7,%8,%9,%10};\n" + : "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3]) + : "r"(a[idx * 2]), "r"(a[idx * 2 + 1]), "r"(b[idx]), "f"(c[0]), + "f"(c[1]), "f"(c[2]), "f"(c[3])); + } else if constexpr (std::is_same::value) { + int32_t* c = reinterpret_cast(&frag_c); + asm volatile( + "mma.sync.aligned.m16n8k16.row.col.s32.s8.s8.s32.satfinite " + "{%0,%1,%2,%3}, {%4,%5}, {%6}, {%7,%8,%9,%10};\n" + : "=r"(c[0]), "=r"(c[1]), "=r"(c[2]), "=r"(c[3]) + : "r"(a[idx * 2]), "r"(a[idx * 2 + 1]), "r"(b[idx]), "r"(c[0]), + "r"(c[1]), "r"(c[2]), "r"(c[3])); + } + } else if (k_size == 32) { + if constexpr (std::is_same::value) { + float* c = reinterpret_cast(&frag_c); + asm volatile( + "mma.sync.aligned.m16n8k32.row.col.f32.e4m3.e4m3.f32 " + "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n" + : "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3]) + : "r"(a[0]), "r"(a[1]), "r"(a[2]), "r"(a[3]), "r"(b[0]), "r"(b[1]), + "f"(c[0]), "f"(c[1]), "f"(c[2]), "f"(c[3])); + } else if constexpr (std::is_same::value) { + int32_t* c = reinterpret_cast(&frag_c); +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ == 750 + asm volatile( + "mma.sync.aligned.m8n8k16.row.col.s32.s8.s8.s32.satfinite " + "{%0,%1}, {%2}, {%3}, {%4,%5};\n" + : "=r"(c[0]), "=r"(c[1]) + : "r"(a[0]), "r"(b[0]), "r"(c[0]), "r"(c[1])); + asm volatile( + "mma.sync.aligned.m8n8k16.row.col.s32.s8.s8.s32.satfinite " + "{%0,%1}, {%2}, {%3}, {%4,%5};\n" + : "=r"(c[2]), "=r"(c[3]) + : "r"(a[1]), "r"(b[0]), "r"(c[2]), "r"(c[3])); + asm volatile( + "mma.sync.aligned.m8n8k16.row.col.s32.s8.s8.s32.satfinite " + "{%0,%1}, {%2}, {%3}, {%4,%5};\n" + : "=r"(c[0]), "=r"(c[1]) + : "r"(a[2]), "r"(b[1]), "r"(c[0]), "r"(c[1])); + asm volatile( + "mma.sync.aligned.m8n8k16.row.col.s32.s8.s8.s32.satfinite " + "{%0,%1}, {%2}, {%3}, {%4,%5};\n" + : "=r"(c[2]), "=r"(c[3]) + : "r"(a[3]), "r"(b[1]), "r"(c[2]), "r"(c[3])); +#else + asm volatile( + "mma.sync.aligned.m16n8k32.row.col.s32.s8.s8.s32.satfinite " + "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n" + : "=r"(c[0]), "=r"(c[1]), "=r"(c[2]), "=r"(c[3]) + : "r"(a[0]), "r"(a[1]), "r"(a[2]), "r"(a[3]), "r"(b[0]), "r"(b[1]), + "r"(c[0]), "r"(c[1]), "r"(c[2]), "r"(c[3])); +#endif + } + } +} + +template +__device__ inline void mma_trans( + const typename MarlinScalarType::FragA& a_frag, + const typename MarlinScalarType::FragB& frag_b, + const typename MarlinScalarType::FragB& frag_b2, + typename MarlinScalarType::FragC& frag_c) { + const uint32_t* a = reinterpret_cast(&a_frag); + const uint32_t* b = reinterpret_cast(&frag_b); + const uint32_t* b2 = reinterpret_cast(&frag_b2); + float* c = reinterpret_cast(&frag_c); + using scalar_t = typename MarlinScalarType::scalar_t; + if constexpr (!std::is_same::value || k_size != 16) { + static_assert(!use_fp16_accum); + } + + if constexpr (k_size == 16) { + if constexpr (std::is_same::value && !use_fp16_accum) { +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ == 750 + float* c = reinterpret_cast(&frag_c); + asm volatile( + "mma.sync.aligned.m16n8k8.row.col.f32.f16.f16.f32 " + "{%0,%1,%2,%3}, {%4,%5}, {%6}, {%7,%8,%9,%10};\n" + : "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3]) + : "r"(b[0]), "r"(b2[0]), "r"(a[0]), "f"(c[0]), "f"(c[1]), "f"(c[2]), + "f"(c[3])); + asm volatile( + "mma.sync.aligned.m16n8k8.row.col.f32.f16.f16.f32 " + "{%0,%1,%2,%3}, {%4,%5}, {%6}, {%7,%8,%9,%10};\n" + : "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3]) + : "r"(b[1]), "r"(b2[1]), "r"(a[1]), "f"(c[0]), "f"(c[1]), "f"(c[2]), + "f"(c[3])); +#else + float* c = reinterpret_cast(&frag_c); + asm volatile( + "mma.sync.aligned.m16n8k16.row.col.f32.f16.f16.f32 " + "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n" + : "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3]) + : "r"(b[0]), "r"(b2[0]), "r"(b[1]), "r"(b2[1]), "r"(a[0]), "r"(a[1]), + "f"(c[0]), "f"(c[1]), "f"(c[2]), "f"(c[3])); +#endif + } else if constexpr (std::is_same::value && + use_fp16_accum) { +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ == 750 + uint32_t* c = reinterpret_cast(&frag_c); + asm volatile( + "mma.sync.aligned.m16n8k8.row.col.f16.f16.f16.f16 " + "{%0,%1}, {%2,%3}, {%4}, {%5,%6};\n" + : "=r"(c[0]), "=r"(c[1]) + : "r"(b[0]), "r"(b2[0]), "r"(a[0]), "r"(c[0]), "r"(c[1])); + asm volatile( + "mma.sync.aligned.m16n8k8.row.col.f16.f16.f16.f16 " + "{%0,%1}, {%2,%3}, {%4}, {%5,%6};\n" + : "=r"(c[0]), "=r"(c[1]) + : "r"(b[1]), "r"(b2[1]), "r"(a[1]), "r"(c[0]), "r"(c[1])); +#else + uint32_t* c = reinterpret_cast(&frag_c); + asm volatile( + "mma.sync.aligned.m16n8k16.row.col.f16.f16.f16.f16 " + "{%0,%1}, {%2,%3,%4,%5}, {%6,%7}, {%8,%9};\n" + : "=r"(c[0]), "=r"(c[1]) + : "r"(b[0]), "r"(b2[0]), "r"(b[1]), "r"(b2[1]), "r"(a[0]), "r"(a[1]), + "r"(c[0]), "r"(c[1])); +#endif + } else if constexpr (std::is_same::value) { + float* c = reinterpret_cast(&frag_c); + asm volatile( + "mma.sync.aligned.m16n8k16.row.col.f32.bf16.bf16.f32 " + "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n" + : "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3]) + : "r"(b[0]), "r"(b2[0]), "r"(b[1]), "r"(b2[1]), "r"(a[0]), "r"(a[1]), + "f"(c[0]), "f"(c[1]), "f"(c[2]), "f"(c[3])); + } else if constexpr (std::is_same::value) { + float* c = reinterpret_cast(&frag_c); + asm volatile( + "mma.sync.aligned.m16n8k16.row.col.f32.e4m3.e4m3.f32 " + "{%0,%1,%2,%3}, {%4,%5}, {%6}, {%7,%8,%9,%10};\n" + : "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3]) + : "r"(b[0]), "r"(b2[0]), "r"(a[0]), "f"(c[0]), "f"(c[1]), "f"(c[2]), + "f"(c[3])); + } else if constexpr (std::is_same::value) { + int32_t* c = reinterpret_cast(&frag_c); + asm volatile( + "mma.sync.aligned.m16n8k16.row.col.s32.s8.s8.s32.satfinite " + "{%0,%1,%2,%3}, {%4,%5}, {%6}, {%7,%8,%9,%10};\n" + : "=r"(c[0]), "=r"(c[1]), "=r"(c[2]), "=r"(c[3]) + : "r"(b[0]), "r"(b2[0]), "r"(a[0]), "r"(c[0]), "r"(c[1]), "r"(c[2]), + "r"(c[3])); + } + } else { + if constexpr (std::is_same::value) { + float* c = reinterpret_cast(&frag_c); + asm volatile( + "mma.sync.aligned.m16n8k32.row.col.f32.e4m3.e4m3.f32 " + "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n" + : "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3]) + : "r"(b[0]), "r"(b2[0]), "r"(b[1]), "r"(b2[1]), "r"(a[0]), "r"(a[1]), + "f"(c[0]), "f"(c[1]), "f"(c[2]), "f"(c[3])); + } else if constexpr (std::is_same::value) { + int32_t* c = reinterpret_cast(&frag_c); +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ == 750 + asm volatile( + "mma.sync.aligned.m8n8k16.row.col.s32.s8.s8.s32.satfinite " + "{%0,%1}, {%2}, {%3}, {%4,%5};\n" + : "=r"(c[0]), "=r"(c[1]) + : "r"(b[0]), "r"(a[0]), "r"(c[0]), "r"(c[1])); + asm volatile( + "mma.sync.aligned.m8n8k16.row.col.s32.s8.s8.s32.satfinite " + "{%0,%1}, {%2}, {%3}, {%4,%5};\n" + : "=r"(c[2]), "=r"(c[3]) + : "r"(b2[1]), "r"(a[0]), "r"(c[2]), "r"(c[3])); + asm volatile( + "mma.sync.aligned.m8n8k16.row.col.s32.s8.s8.s32.satfinite " + "{%0,%1}, {%2}, {%3}, {%4,%5};\n" + : "=r"(c[0]), "=r"(c[1]) + : "r"(b[0]), "r"(a[1]), "r"(c[0]), "r"(c[1])); + asm volatile( + "mma.sync.aligned.m8n8k16.row.col.s32.s8.s8.s32.satfinite " + "{%0,%1}, {%2}, {%3}, {%4,%5};\n" + : "=r"(c[2]), "=r"(c[3]) + : "r"(b2[1]), "r"(a[1]), "r"(c[2]), "r"(c[3])); +#else + asm volatile( + "mma.sync.aligned.m16n8k32.row.col.s32.s8.s8.s32.satfinite " + "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n" + : "=r"(c[0]), "=r"(c[1]), "=r"(c[2]), "=r"(c[3]) + : "r"(b[0]), "r"(b2[0]), "r"(b[1]), "r"(b2[1]), "r"(a[0]), "r"(a[1]), + "r"(c[0]), "r"(c[1]), "r"(c[2]), "r"(c[3])); +#endif + } + } +} + +} // namespace MARLIN_NAMESPACE_NAME \ No newline at end of file diff --git a/csrc/quantization/gptq_marlin/marlin_template.h b/csrc/quantization/gptq_marlin/marlin_template.h index 22bb71e482ce..c7b53696c122 100644 --- a/csrc/quantization/gptq_marlin/marlin_template.h +++ b/csrc/quantization/gptq_marlin/marlin_template.h @@ -26,6 +26,7 @@ #include "marlin.cuh" #include "marlin_dtypes.cuh" #include "dequant.h" +#include "marlin_mma.h" #include "core/scalar_type.hpp" #define STATIC_ASSERT_SCALAR_TYPE_VALID(scalar_t) \ @@ -35,7 +36,7 @@ namespace MARLIN_NAMESPACE_NAME { -#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800 +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 750 template -__device__ inline void mma( - const typename MarlinScalarType::FragA& a_frag, - const typename MarlinScalarType::FragB& frag_b, - typename MarlinScalarType::FragC& frag_c, int idx = 0) { - const uint32_t* a = reinterpret_cast(&a_frag); - const uint32_t* b = reinterpret_cast(&frag_b); - using scalar_t = typename MarlinScalarType::scalar_t; - if constexpr (k_size == 16) { - if constexpr (std::is_same::value) { - float* c = reinterpret_cast(&frag_c); - asm volatile( - "mma.sync.aligned.m16n8k16.row.col.f32.f16.f16.f32 " - "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n" - : "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3]) - : "r"(a[0]), "r"(a[1]), "r"(a[2]), "r"(a[3]), "r"(b[0]), "r"(b[1]), - "f"(c[0]), "f"(c[1]), "f"(c[2]), "f"(c[3])); - } else if constexpr (std::is_same::value) { - float* c = reinterpret_cast(&frag_c); - asm volatile( - "mma.sync.aligned.m16n8k16.row.col.f32.bf16.bf16.f32 " - "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n" - : "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3]) - : "r"(a[0]), "r"(a[1]), "r"(a[2]), "r"(a[3]), "r"(b[0]), "r"(b[1]), - "f"(c[0]), "f"(c[1]), "f"(c[2]), "f"(c[3])); - } else if constexpr (std::is_same::value) { - float* c = reinterpret_cast(&frag_c); - asm volatile( - "mma.sync.aligned.m16n8k16.row.col.f32.e4m3.e4m3.f32 " - "{%0,%1,%2,%3}, {%4,%5}, {%6}, {%7,%8,%9,%10};\n" - : "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3]) - : "r"(a[idx * 2]), "r"(a[idx * 2 + 1]), "r"(b[idx]), "f"(c[0]), - "f"(c[1]), "f"(c[2]), "f"(c[3])); - } else if constexpr (std::is_same::value) { - int32_t* c = reinterpret_cast(&frag_c); - asm volatile( - "mma.sync.aligned.m16n8k16.row.col.s32.s8.s8.s32.satfinite " - "{%0,%1,%2,%3}, {%4,%5}, {%6}, {%7,%8,%9,%10};\n" - : "=r"(c[0]), "=r"(c[1]), "=r"(c[2]), "=r"(c[3]) - : "r"(a[idx * 2]), "r"(a[idx * 2 + 1]), "r"(b[idx]), "r"(c[0]), - "r"(c[1]), "r"(c[2]), "r"(c[3])); - } - } else if (k_size == 32) { - if constexpr (std::is_same::value) { - float* c = reinterpret_cast(&frag_c); - asm volatile( - "mma.sync.aligned.m16n8k32.row.col.f32.e4m3.e4m3.f32 " - "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n" - : "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3]) - : "r"(a[0]), "r"(a[1]), "r"(a[2]), "r"(a[3]), "r"(b[0]), "r"(b[1]), - "f"(c[0]), "f"(c[1]), "f"(c[2]), "f"(c[3])); - } else if constexpr (std::is_same::value) { - int32_t* c = reinterpret_cast(&frag_c); - asm volatile( - "mma.sync.aligned.m16n8k32.row.col.s32.s8.s8.s32.satfinite " - "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n" - : "=r"(c[0]), "=r"(c[1]), "=r"(c[2]), "=r"(c[3]) - : "r"(a[0]), "r"(a[1]), "r"(a[2]), "r"(a[3]), "r"(b[0]), "r"(b[1]), - "r"(c[0]), "r"(c[1]), "r"(c[2]), "r"(c[3])); - } - } -} - -template -__device__ inline void mma_trans( - const typename MarlinScalarType::FragA& a_frag, - const typename MarlinScalarType::FragB& frag_b, - const typename MarlinScalarType::FragB& frag_b2, - typename MarlinScalarType::FragC& frag_c) { - const uint32_t* a = reinterpret_cast(&a_frag); - const uint32_t* b = reinterpret_cast(&frag_b); - const uint32_t* b2 = reinterpret_cast(&frag_b2); - float* c = reinterpret_cast(&frag_c); - using scalar_t = typename MarlinScalarType::scalar_t; - if constexpr (k_size == 16) { - if constexpr (std::is_same::value) { - float* c = reinterpret_cast(&frag_c); - asm volatile( - "mma.sync.aligned.m16n8k16.row.col.f32.f16.f16.f32 " - "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n" - : "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3]) - : "r"(b[0]), "r"(b2[0]), "r"(b[1]), "r"(b2[1]), "r"(a[0]), "r"(a[1]), - "f"(c[0]), "f"(c[1]), "f"(c[2]), "f"(c[3])); - } else if constexpr (std::is_same::value) { - float* c = reinterpret_cast(&frag_c); - asm volatile( - "mma.sync.aligned.m16n8k16.row.col.f32.bf16.bf16.f32 " - "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n" - : "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3]) - : "r"(b[0]), "r"(b2[0]), "r"(b[1]), "r"(b2[1]), "r"(a[0]), "r"(a[1]), - "f"(c[0]), "f"(c[1]), "f"(c[2]), "f"(c[3])); - } else if constexpr (std::is_same::value) { - float* c = reinterpret_cast(&frag_c); - asm volatile( - "mma.sync.aligned.m16n8k16.row.col.f32.e4m3.e4m3.f32 " - "{%0,%1,%2,%3}, {%4,%5}, {%6}, {%7,%8,%9,%10};\n" - : "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3]) - : "r"(b[0]), "r"(b2[0]), "r"(a[0]), "f"(c[0]), "f"(c[1]), "f"(c[2]), - "f"(c[3])); - } else if constexpr (std::is_same::value) { - int32_t* c = reinterpret_cast(&frag_c); - asm volatile( - "mma.sync.aligned.m16n8k16.row.col.s32.s8.s8.s32.satfinite " - "{%0,%1,%2,%3}, {%4,%5}, {%6}, {%7,%8,%9,%10};\n" - : "=r"(c[0]), "=r"(c[1]), "=r"(c[2]), "=r"(c[3]) - : "r"(b[0]), "r"(b2[0]), "r"(a[0]), "r"(c[0]), "r"(c[1]), "r"(c[2]), - "r"(c[3])); - } - } else { - if constexpr (std::is_same::value) { - float* c = reinterpret_cast(&frag_c); - asm volatile( - "mma.sync.aligned.m16n8k32.row.col.f32.e4m3.e4m3.f32 " - "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n" - : "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3]) - : "r"(b[0]), "r"(b2[0]), "r"(b[1]), "r"(b2[1]), "r"(a[0]), "r"(a[1]), - "f"(c[0]), "f"(c[1]), "f"(c[2]), "f"(c[3])); - } else if constexpr (std::is_same::value) { - int32_t* c = reinterpret_cast(&frag_c); - asm volatile( - "mma.sync.aligned.m16n8k32.row.col.s32.s8.s8.s32.satfinite " - "{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9}, {%10,%11,%12,%13};\n" - : "=r"(c[0]), "=r"(c[1]), "=r"(c[2]), "=r"(c[3]) - : "r"(b[0]), "r"(b2[0]), "r"(b[1]), "r"(b2[1]), "r"(a[0]), "r"(a[1]), - "r"(c[0]), "r"(c[1]), "r"(c[2]), "r"(c[3])); - } - } -} - // Instruction for loading a full 16x16 matrix fragment of operand A from shared // memory, directly in tensor core layout. template @@ -415,6 +285,17 @@ __global__ void Marlin( if constexpr (a_type_id == vllm::kFE4M3fn.id()) return; #endif + #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ == 750 + // Turing TensorCore only supports fp16 and int8 + if constexpr (a_type_id != vllm::kFloat16.id() && a_type_id != vllm::kS8.id()) + return; + #endif + + #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ == 750 + constexpr bool use_fp16_accum = a_type_id == vllm::kFloat16.id(); + #else + constexpr bool use_fp16_accum = false; + #endif using Adtype = MarlinScalarType; using Cdtype = MarlinScalarType; const int4* A = A0; @@ -873,10 +754,6 @@ __global__ void Marlin( constexpr int sh_s_size = has_act_order ? (act_s_max_num_groups * s_sh_stride) : (stages * s_sh_stage); int4* sh_s = sh_zp + (stages * zp_sh_stage); - // shared memory reused by reduction should be smaller than - // shared memory used by weight. - static_assert(thread_m_blocks * 16 * thread_n_blocks * 16 / 8 <= - stages * b_sh_stage); int4* sh_a = sh_s + sh_s_size; // Register storage for double buffer of shared memory reads. @@ -1395,11 +1272,13 @@ __global__ void Marlin( #pragma unroll for (int i = 0; i < thread_m_blocks; i++) { if constexpr (m_block_size_8) { - mma_trans(frag_a[k2][i], frag_b0, frag_b1, - frag_c[i][j][0]); + mma_trans(frag_a[k2][i], frag_b0, frag_b1, + frag_c[i][j][0]); } else { - mma(frag_a[k2][i], frag_b0, frag_c[i][j][0]); - mma(frag_a[k2][i], frag_b1, frag_c[i][j][1]); + mma(frag_a[k2][i], frag_b0, + frag_c[i][j][0]); + mma(frag_a[k2][i], frag_b1, + frag_c[i][j][1]); } } } @@ -1433,10 +1312,12 @@ __global__ void Marlin( #pragma unroll for (int i = 0; i < thread_m_blocks; i++) { - mma(frag_a[k2][i], frag_b[0], - (group_blocks == -1 ? frag_c : frag_c_tmp)[i][j][0]); - mma(frag_a[k2][i], frag_b[1], - (group_blocks == -1 ? frag_c : frag_c_tmp)[i][j][1]); + mma( + frag_a[k2][i], frag_b[0], + (group_blocks == -1 ? frag_c : frag_c_tmp)[i][j][0]); + mma( + frag_a[k2][i], frag_b[1], + (group_blocks == -1 ? frag_c : frag_c_tmp)[i][j][1]); } if constexpr (group_blocks != -1) { @@ -1956,6 +1837,21 @@ __global__ void Marlin( // While this pattern may not be the most readable, other ways of writing // the loop seemed to noticeably worse performance after compilation. if (slice_iters == 0) { + // convert fp16 accum to fp32 for reduction + if constexpr (use_fp16_accum) { + #pragma unroll + for (int i = 0; i < (thread_m_blocks * (is_a_8bit ? 2 : 4) * 2); i++) { + float* frag_c_part_float = reinterpret_cast(frag_c) + i * 4; + scalar_t* frag_c_part_half = + reinterpret_cast(frag_c_part_float); + + #pragma unroll + for (int i = 3; i >= 0; i--) { + frag_c_part_float[i] = Cdtype::num2float(frag_c_part_half[i]); + } + } + } + if constexpr (is_a_8bit) { float frag_a_s[2 * thread_m_blocks]; diff --git a/vllm/model_executor/layers/quantization/awq_marlin.py b/vllm/model_executor/layers/quantization/awq_marlin.py index 3ed15ed7dd42..314848721a80 100644 --- a/vllm/model_executor/layers/quantization/awq_marlin.py +++ b/vllm/model_executor/layers/quantization/awq_marlin.py @@ -121,7 +121,7 @@ def get_supported_act_dtypes(cls) -> list[torch.dtype]: @classmethod def get_min_capability(cls) -> int: - return 80 + return 75 @classmethod def get_config_filenames(cls) -> list[str]: diff --git a/vllm/model_executor/layers/quantization/fp8.py b/vllm/model_executor/layers/quantization/fp8.py index f2b66a2beb6d..800340ed6043 100644 --- a/vllm/model_executor/layers/quantization/fp8.py +++ b/vllm/model_executor/layers/quantization/fp8.py @@ -253,7 +253,7 @@ def get_supported_act_dtypes(cls) -> list[torch.dtype]: @classmethod def get_min_capability(cls) -> int: - return 80 + return 75 @classmethod def get_config_filenames(cls) -> list[str]: diff --git a/vllm/model_executor/layers/quantization/gptq_marlin.py b/vllm/model_executor/layers/quantization/gptq_marlin.py index 6e5dcfe59b2f..347c7b2008d1 100644 --- a/vllm/model_executor/layers/quantization/gptq_marlin.py +++ b/vllm/model_executor/layers/quantization/gptq_marlin.py @@ -181,7 +181,7 @@ def get_supported_act_dtypes(cls) -> list[torch.dtype]: @classmethod def get_min_capability(cls) -> int: - return 80 + return 75 @classmethod def get_config_filenames(cls) -> list[str]: diff --git a/vllm/model_executor/layers/quantization/modelopt.py b/vllm/model_executor/layers/quantization/modelopt.py index d5d7e7bfaae7..aa3937d4c03f 100644 --- a/vllm/model_executor/layers/quantization/modelopt.py +++ b/vllm/model_executor/layers/quantization/modelopt.py @@ -871,7 +871,7 @@ def get_supported_act_dtypes(self) -> list[torch.dtype]: @classmethod def get_min_capability(cls) -> int: - return 80 + return 75 @classmethod def override_quantization_method( From b19c650d93f9ecfb81c73305a643555bdee68262 Mon Sep 17 00:00:00 2001 From: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Date: Tue, 16 Dec 2025 17:47:53 -0500 Subject: [PATCH 31/73] [CI] Skip ci failure test (#30804) Signed-off-by: yewentao256 --- tests/compile/distributed/test_fusions_e2e.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/compile/distributed/test_fusions_e2e.py b/tests/compile/distributed/test_fusions_e2e.py index bd326f1157d8..80086c4e03a9 100644 --- a/tests/compile/distributed/test_fusions_e2e.py +++ b/tests/compile/distributed/test_fusions_e2e.py @@ -523,6 +523,8 @@ def run_model(compile_config: int | CompilationConfig, model: str, **model_kwarg list[tuple[Any, ...]](flat_product(MODELS_GROUP_FP8, CUSTOM_OPS_QUANT_RMS_NORM)), ) @pytest.mark.parametrize("inductor_graph_partition", [True, False]) +# TODO: remove skip after we fix the fusion thoroughly +@pytest.mark.skipif(is_blackwell(), reason="Temporarily disabled on Blackwell") def test_rms_group_quant( model_name: str, model_kwargs: dict[str, Any], @@ -562,7 +564,7 @@ def test_rms_group_quant( splitting_ops=splitting_ops, # Common mode=CompilationMode.VLLM_COMPILE, - pass_config=PassConfig(eliminate_noops=True, enable_fusion=True), + pass_config=PassConfig(eliminate_noops=True, fuse_norm_quant=True), # Inductor caches custom passes by default as well via uuid inductor_compile_config={"force_disable_caches": True}, ) From 5177d061b4850ce2b2631378717390ae64c873af Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Tue, 16 Dec 2025 17:56:02 -0500 Subject: [PATCH 32/73] [Perf][Kernels] Vectorize `csrc/activations_kernels.cu` (#29512) Signed-off-by: mgoin --- benchmarks/kernels/benchmark_activation.py | 4 +- csrc/activation_kernels.cu | 210 +++++++++++++++++---- 2 files changed, 176 insertions(+), 38 deletions(-) diff --git a/benchmarks/kernels/benchmark_activation.py b/benchmarks/kernels/benchmark_activation.py index 66268b71b3de..d31e67057d8f 100644 --- a/benchmarks/kernels/benchmark_activation.py +++ b/benchmarks/kernels/benchmark_activation.py @@ -13,8 +13,8 @@ from vllm.utils.argparse_utils import FlexibleArgumentParser from vllm.utils.torch_utils import STR_DTYPE_TO_TORCH_DTYPE -batch_size_range = [1, 16, 32, 64, 128] -seq_len_range = [1, 16, 64, 128, 256, 512, 1024, 2048, 4096] +batch_size_range = [1, 16, 128] +seq_len_range = [1, 16, 64, 1024, 4096] intermediate_size = [3072, 9728, 12288] configs = list(itertools.product(batch_size_range, seq_len_range, intermediate_size)) diff --git a/csrc/activation_kernels.cu b/csrc/activation_kernels.cu index a4a880f13cf7..8268065ef02c 100644 --- a/csrc/activation_kernels.cu +++ b/csrc/activation_kernels.cu @@ -15,19 +15,61 @@ __device__ __forceinline__ scalar_t compute(const scalar_t& x, const scalar_t& y) { return act_first ? ACT_FN(x) * y : x * ACT_FN(y); } -// Activation and gating kernel template. +// Check if all pointers are 16-byte aligned for int4 vectorized access +__device__ __forceinline__ bool is_16byte_aligned(const void* ptr) { + return (reinterpret_cast(ptr) & 15) == 0; +} + +// Activation and gating kernel template. template __global__ void act_and_mul_kernel( scalar_t* __restrict__ out, // [..., d] const scalar_t* __restrict__ input, // [..., 2, d] const int d) { + constexpr int VEC_SIZE = 16 / sizeof(scalar_t); const int64_t token_idx = blockIdx.x; - for (int64_t idx = threadIdx.x; idx < d; idx += blockDim.x) { - const scalar_t x = VLLM_LDG(&input[token_idx * 2 * d + idx]); - const scalar_t y = VLLM_LDG(&input[token_idx * 2 * d + d + idx]); - out[token_idx * d + idx] = compute(x, y); + const scalar_t* x_ptr = input + token_idx * 2 * d; + const scalar_t* y_ptr = x_ptr + d; + scalar_t* out_ptr = out + token_idx * d; + + // Check alignment for 128-bit vectorized access. + // All three pointers must be 16-byte aligned for safe int4 operations. + const bool aligned = is_16byte_aligned(x_ptr) && is_16byte_aligned(y_ptr) && + is_16byte_aligned(out_ptr); + + if (aligned && d >= VEC_SIZE) { + // Fast path: 128-bit vectorized loop + const int4* x_vec = reinterpret_cast(x_ptr); + const int4* y_vec = reinterpret_cast(y_ptr); + int4* out_vec = reinterpret_cast(out_ptr); + const int num_vecs = d / VEC_SIZE; + const int vec_end = num_vecs * VEC_SIZE; + + for (int i = threadIdx.x; i < num_vecs; i += blockDim.x) { + int4 x = VLLM_LDG(&x_vec[i]), y = VLLM_LDG(&y_vec[i]), r; + auto* xp = reinterpret_cast(&x); + auto* yp = reinterpret_cast(&y); + auto* rp = reinterpret_cast(&r); +#pragma unroll + for (int j = 0; j < VEC_SIZE; j++) { + rp[j] = compute(xp[j], yp[j]); + } + out_vec[i] = r; + } + // Scalar cleanup for remaining elements + for (int i = vec_end + threadIdx.x; i < d; i += blockDim.x) { + out_ptr[i] = compute(VLLM_LDG(&x_ptr[i]), + VLLM_LDG(&y_ptr[i])); + } + } else { + // Scalar fallback for unaligned data or small d + for (int64_t idx = threadIdx.x; idx < d; idx += blockDim.x) { + const scalar_t x = VLLM_LDG(&x_ptr[idx]); + const scalar_t y = VLLM_LDG(&y_ptr[idx]); + out_ptr[idx] = compute(x, y); + } } } @@ -120,50 +162,115 @@ template __global__ void act_and_mul_kernel_with_param( scalar_t* __restrict__ out, const scalar_t* __restrict__ input, const int d, const float param) { + constexpr int VEC_SIZE = 16 / sizeof(scalar_t); const int64_t token_idx = blockIdx.x; - for (int64_t idx = threadIdx.x; idx < d; idx += blockDim.x) { - const scalar_t x = VLLM_LDG(&input[token_idx * 2 * d + idx]); - const scalar_t y = VLLM_LDG(&input[token_idx * 2 * d + d + idx]); - out[token_idx * d + idx] = ACT_FN(x, param) * y; + const scalar_t* x_ptr = input + token_idx * 2 * d; + const scalar_t* y_ptr = x_ptr + d; + scalar_t* out_ptr = out + token_idx * d; + + // Check alignment for 128-bit vectorized access + const bool aligned = is_16byte_aligned(x_ptr) && is_16byte_aligned(y_ptr) && + is_16byte_aligned(out_ptr); + + if (aligned && d >= VEC_SIZE) { + // Fast path: 128-bit vectorized loop + const int4* x_vec = reinterpret_cast(x_ptr); + const int4* y_vec = reinterpret_cast(y_ptr); + int4* out_vec = reinterpret_cast(out_ptr); + const int num_vecs = d / VEC_SIZE; + const int vec_end = num_vecs * VEC_SIZE; + + for (int i = threadIdx.x; i < num_vecs; i += blockDim.x) { + int4 x = VLLM_LDG(&x_vec[i]), y = VLLM_LDG(&y_vec[i]), r; + auto* xp = reinterpret_cast(&x); + auto* yp = reinterpret_cast(&y); + auto* rp = reinterpret_cast(&r); +#pragma unroll + for (int j = 0; j < VEC_SIZE; j++) { + rp[j] = ACT_FN(xp[j], param) * yp[j]; + } + out_vec[i] = r; + } + // Scalar cleanup for remaining elements + for (int i = vec_end + threadIdx.x; i < d; i += blockDim.x) { + out_ptr[i] = ACT_FN(VLLM_LDG(&x_ptr[i]), param) * VLLM_LDG(&y_ptr[i]); + } + } else { + // Scalar fallback for unaligned data or small d + for (int64_t idx = threadIdx.x; idx < d; idx += blockDim.x) { + const scalar_t x = VLLM_LDG(&x_ptr[idx]); + const scalar_t y = VLLM_LDG(&y_ptr[idx]); + out_ptr[idx] = ACT_FN(x, param) * y; + } } } template __device__ __forceinline__ T swigluoai_and_mul(const T& gate, const T& up, float alpha, float limit) { - // clamp gate: min=None, max=limit - const float gate_f = (float)gate; - const float clamped_gate = gate_f > limit ? limit : gate_f; - - // clamp up: min=-limit, max=limit - const float up_f = (float)up; - const float clamped_up = - up_f > limit ? limit : (up_f < -limit ? -limit : up_f); - - // glu = gate * sigmoid(gate * alpha) - const float sigmoid_val = 1.0f / (1.0f + expf(-clamped_gate * alpha)); - const float glu = clamped_gate * sigmoid_val; - - // (up + 1) * glu - return (T)((clamped_up + 1.0f) * glu); + // Clamp gate to (-inf, limit] and up to [-limit, limit] + const float g = fminf((float)gate, limit); + const float u = fmaxf(fminf((float)up, limit), -limit); + // glu = gate * sigmoid(gate * alpha), then return (up + 1) * glu + return (T)((u + 1.0f) * g / (1.0f + expf(-g * alpha))); } +// Interleaved gate/up: input has [gate0, up0, gate1, up1, ...]. template __global__ void swigluoai_and_mul_kernel( scalar_t* __restrict__ out, // [..., d] - const scalar_t* __restrict__ input, // [..., 2, d] + const scalar_t* __restrict__ input, // [..., 2 * d] (interleaved) const int d, const float alpha, const float limit) { + // For interleaved data: input has 2*d elements per token (gate/up pairs) + // output has d elements per token + constexpr int VEC_SIZE = 16 / sizeof(scalar_t); + constexpr int PAIRS = VEC_SIZE / 2; // Number of gate/up pairs per int4 load const int64_t token_idx = blockIdx.x; - // TODO: Vectorize loads and stores. - for (int64_t idx = threadIdx.x; idx < d; idx += blockDim.x) { - // gate = x[..., ::2] (even indices) - const scalar_t gate = VLLM_LDG(&input[token_idx * 2 * d + 2 * idx]); - // up = x[..., 1::2] (odd indices) - const scalar_t up = VLLM_LDG(&input[token_idx * 2 * d + 2 * idx + 1]); - - out[token_idx * d + idx] = ACT_FN(gate, up, alpha, limit); + const scalar_t* in_ptr = input + token_idx * 2 * d; + scalar_t* out_ptr = out + token_idx * d; + + // Check alignment for 128-bit vectorized access on input. + // For output we use int2 (64-bit) which has 8-byte alignment requirement. + const bool in_aligned = is_16byte_aligned(in_ptr); + const bool out_aligned = + (reinterpret_cast(out_ptr) & 7) == 0; // 8-byte for int2 + + if (in_aligned && out_aligned && d >= PAIRS) { + // Fast path: vectorized loop + // Each int4 load gives VEC_SIZE elements = PAIRS gate/up pairs + // Each int2 store writes PAIRS output elements + const int4* in_vec = reinterpret_cast(in_ptr); + int2* out_vec = reinterpret_cast(out_ptr); + const int num_vecs = d / PAIRS; + const int vec_end = num_vecs * PAIRS; + + for (int i = threadIdx.x; i < num_vecs; i += blockDim.x) { + int4 v = VLLM_LDG(&in_vec[i]); + int2 r; + auto* vp = reinterpret_cast(&v); + auto* rp = reinterpret_cast(&r); +#pragma unroll + for (int j = 0; j < PAIRS; j++) { + rp[j] = ACT_FN(vp[2 * j], vp[2 * j + 1], alpha, limit); + } + out_vec[i] = r; + } + // Scalar cleanup for remaining elements + for (int i = vec_end + threadIdx.x; i < d; i += blockDim.x) { + out_ptr[i] = ACT_FN(VLLM_LDG(&in_ptr[2 * i]), + VLLM_LDG(&in_ptr[2 * i + 1]), alpha, limit); + } + } else { + // Scalar fallback for unaligned data or small d + for (int64_t idx = threadIdx.x; idx < d; idx += blockDim.x) { + // gate = x[..., ::2] (even indices) + const scalar_t gate = VLLM_LDG(&in_ptr[2 * idx]); + // up = x[..., 1::2] (odd indices) + const scalar_t up = VLLM_LDG(&in_ptr[2 * idx + 1]); + out_ptr[idx] = ACT_FN(gate, up, alpha, limit); + } } } @@ -217,10 +324,41 @@ __global__ void activation_kernel( scalar_t* __restrict__ out, // [..., d] const scalar_t* __restrict__ input, // [..., d] const int d) { + constexpr int VEC_SIZE = 16 / sizeof(scalar_t); const int64_t token_idx = blockIdx.x; - for (int64_t idx = threadIdx.x; idx < d; idx += blockDim.x) { - const scalar_t x = VLLM_LDG(&input[token_idx * d + idx]); - out[token_idx * d + idx] = ACT_FN(x); + const scalar_t* in_ptr = input + token_idx * d; + scalar_t* out_ptr = out + token_idx * d; + + // Check alignment for 128-bit vectorized access + const bool aligned = is_16byte_aligned(in_ptr) && is_16byte_aligned(out_ptr); + + if (aligned && d >= VEC_SIZE) { + // Fast path: 128-bit vectorized loop + const int4* in_vec = reinterpret_cast(in_ptr); + int4* out_vec = reinterpret_cast(out_ptr); + const int num_vecs = d / VEC_SIZE; + const int vec_end = num_vecs * VEC_SIZE; + + for (int i = threadIdx.x; i < num_vecs; i += blockDim.x) { + int4 v = VLLM_LDG(&in_vec[i]), r; + auto* vp = reinterpret_cast(&v); + auto* rp = reinterpret_cast(&r); +#pragma unroll + for (int j = 0; j < VEC_SIZE; j++) { + rp[j] = ACT_FN(vp[j]); + } + out_vec[i] = r; + } + // Scalar cleanup for remaining elements + for (int i = vec_end + threadIdx.x; i < d; i += blockDim.x) { + out_ptr[i] = ACT_FN(VLLM_LDG(&in_ptr[i])); + } + } else { + // Scalar fallback for unaligned data or small d + for (int64_t idx = threadIdx.x; idx < d; idx += blockDim.x) { + const scalar_t x = VLLM_LDG(&in_ptr[idx]); + out_ptr[idx] = ACT_FN(x); + } } } From d51899612d4a6e5f2afad5f433eac8eaded52fb6 Mon Sep 17 00:00:00 2001 From: TJian Date: Wed, 17 Dec 2025 07:32:43 +0800 Subject: [PATCH 33/73] [ROCm] [Bugfix] Fix torch sdpa hallucination (#30789) Signed-off-by: tjtanaa --- vllm/attention/ops/vit_attn_wrappers.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/vllm/attention/ops/vit_attn_wrappers.py b/vllm/attention/ops/vit_attn_wrappers.py index 46c7d83dfa5c..892c4209c01e 100644 --- a/vllm/attention/ops/vit_attn_wrappers.py +++ b/vllm/attention/ops/vit_attn_wrappers.py @@ -16,6 +16,7 @@ import torch import torch.nn.functional as F +from vllm.platforms import current_platform from vllm.utils.torch_utils import direct_register_custom_op @@ -89,6 +90,13 @@ def torch_sdpa_wrapper( v: torch.Tensor, cu_seqlens: torch.Tensor, ) -> torch.Tensor: + # Never remove the contiguous logic for ROCm + # Without it, hallucinations occur with the backend + if current_platform.is_rocm(): + q = q.contiguous() + k = k.contiguous() + v = v.contiguous() + outputs = [] lens = (cu_seqlens[1:] - cu_seqlens[:-1]).tolist() From 8d71371cd9a6479a978c193322614abcdc5d0a02 Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Tue, 16 Dec 2025 18:40:47 -0500 Subject: [PATCH 34/73] Replace deprecated enable_fusion with fuse_norm_quant in test_rms_group_quant (#30817) Signed-off-by: mgoin From f9068d33220467c7f59ae60f74378bde5f975a39 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicol=C3=B2=20Lucchesi?= Date: Wed, 17 Dec 2025 00:54:45 +0100 Subject: [PATCH 35/73] [MM] Pass FA version in ViT Attn (#30756) Signed-off-by: NickLucche Co-authored-by: Cyrus Leung --- vllm/attention/layers/mm_encoder_attention.py | 6 ++++++ vllm/attention/ops/vit_attn_wrappers.py | 9 ++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/vllm/attention/layers/mm_encoder_attention.py b/vllm/attention/layers/mm_encoder_attention.py index c9107ebcab85..8b3dee1340b9 100644 --- a/vllm/attention/layers/mm_encoder_attention.py +++ b/vllm/attention/layers/mm_encoder_attention.py @@ -10,6 +10,7 @@ vit_flash_attn_wrapper, vit_torch_sdpa_wrapper, ) +from vllm.attention.utils.fa_utils import get_flash_attn_version from vllm.config import MultiModalConfig from vllm.logger import init_logger from vllm.model_executor.custom_op import CustomOp @@ -101,6 +102,10 @@ def __init__( self.attn_backend, ) + if self.is_flash_attn_backend: + assert self.flash_attn_varlen_func is not None + self._fa_version = get_flash_attn_version() + logger.info_once(f"Using {self.attn_backend} for MMEncoderAttention.") @classmethod @@ -204,6 +209,7 @@ def _forward_fa( max_seqlen=max_seqlen, batch_size=bsz, is_rocm_aiter=(self.attn_backend == AttentionBackendEnum.ROCM_AITER_FA), + fa_version=self._fa_version, ) return output diff --git a/vllm/attention/ops/vit_attn_wrappers.py b/vllm/attention/ops/vit_attn_wrappers.py index 892c4209c01e..5a74e1310133 100644 --- a/vllm/attention/ops/vit_attn_wrappers.py +++ b/vllm/attention/ops/vit_attn_wrappers.py @@ -28,11 +28,15 @@ def flash_attn_maxseqlen_wrapper( max_seqlen: torch.Tensor, batch_size: int, is_rocm_aiter: bool, + fa_version: int, ) -> torch.Tensor: + kwargs = {} if is_rocm_aiter: from aiter import flash_attn_varlen_func else: from vllm.attention.utils.fa_utils import flash_attn_varlen_func + + kwargs["fa_version"] = fa_version q, k, v = (einops.rearrange(x, "b s ... -> (b s) ...") for x in [q, k, v]) output = flash_attn_varlen_func( q, @@ -44,6 +48,7 @@ def flash_attn_maxseqlen_wrapper( max_seqlen_k=max_seqlen.item(), dropout_p=0.0, causal=False, + **kwargs, ) context_layer = einops.rearrange(output, "(b s) h d -> b s h d", b=batch_size) return context_layer @@ -57,6 +62,7 @@ def flash_attn_maxseqlen_wrapper_fake( max_seqlen: torch.Tensor, batch_size: int, is_rocm_aiter: bool, + fa_version: int, ) -> torch.Tensor: return torch.empty_like(q) @@ -76,9 +82,10 @@ def vit_flash_attn_wrapper( max_seqlen: torch.Tensor, batch_size: int, is_rocm_aiter: bool, + fa_version: int, ) -> torch.Tensor: return torch.ops.vllm.flash_attn_maxseqlen_wrapper( - q, k, v, cu_seqlens, max_seqlen, batch_size, is_rocm_aiter + q, k, v, cu_seqlens, max_seqlen, batch_size, is_rocm_aiter, fa_version ) From 481d63f2a9f88efed4c353dd21d806db348092a6 Mon Sep 17 00:00:00 2001 From: Amr Mahdi Date: Wed, 17 Dec 2025 02:41:57 +0200 Subject: [PATCH 36/73] [docker] Allow kv_connectors install to fail on arm64 (#30806) Signed-off-by: Amr Mahdi --- docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index ae2624ace67b..e61021b6eeb8 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -621,7 +621,7 @@ ENV UV_HTTP_TIMEOUT=500 RUN --mount=type=cache,target=/root/.cache/uv \ --mount=type=bind,source=requirements/kv_connectors.txt,target=/tmp/kv_connectors.txt,ro \ if [ "$INSTALL_KV_CONNECTORS" = "true" ]; then \ - uv pip install --system -r /tmp/kv_connectors.txt; \ + uv pip install --system -r /tmp/kv_connectors.txt || true; \ fi ENV VLLM_USAGE_SOURCE production-docker-image From c4abe59b1e6b7b113973a91868ef4e713e551af0 Mon Sep 17 00:00:00 2001 From: "Grzegorz K. Karch" Date: Wed, 17 Dec 2025 02:06:28 +0100 Subject: [PATCH 37/73] Fix nemotron_nas intermediate_size computation (#30795) Signed-off-by: Grzegorz Karch --- vllm/model_executor/models/nemotron_nas.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/vllm/model_executor/models/nemotron_nas.py b/vllm/model_executor/models/nemotron_nas.py index 19a942a5277c..83ef5e7e1282 100644 --- a/vllm/model_executor/models/nemotron_nas.py +++ b/vllm/model_executor/models/nemotron_nas.py @@ -169,10 +169,13 @@ def __init__( self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) if not self._is_no_op_ffn: - ffn_mult = block_config.ffn.ffn_mult - intermediate_size = _ffn_mult_to_intermediate_size( - ffn_mult, config.hidden_size - ) + if hasattr(block_config.ffn, "ffn_mult"): + ffn_mult = block_config.ffn.ffn_mult + intermediate_size = _ffn_mult_to_intermediate_size( + ffn_mult, config.hidden_size + ) + else: + intermediate_size = block_config.ffn.intermediate_size self.mlp = LlamaMLP( hidden_size=self.hidden_size, From ff0c04bcc3693e1e90dd3a0bd6bc9b3394f0a657 Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Tue, 16 Dec 2025 20:52:14 -0500 Subject: [PATCH 38/73] Update model-hosting-container-standards to 0.1.10 (#30815) Signed-off-by: Michael Goin --- requirements/common.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements/common.txt b/requirements/common.txt index 31c8fb404f63..426d281c2670 100644 --- a/requirements/common.txt +++ b/requirements/common.txt @@ -50,5 +50,5 @@ ijson # Required for mistral streaming tool parser setproctitle # Used to set process names for better debugging and monitoring openai-harmony >= 0.0.3 # Required for gpt-oss anthropic == 0.71.0 -model-hosting-container-standards >= 0.1.9, < 1.0.0 -mcp \ No newline at end of file +model-hosting-container-standards >= 0.1.10, < 1.0.0 +mcp From 1ec8d583efc041bebbc9319fefca58f99b8b399a Mon Sep 17 00:00:00 2001 From: Fadi Arafeh <115173828+fadara01@users.noreply.github.com> Date: Wed, 17 Dec 2025 04:21:07 +0000 Subject: [PATCH 39/73] [CPU] Add action to automatically label CPU related PRs (#30678) Signed-off-by: Fadi Arafeh --- .github/mergify.yml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/.github/mergify.yml b/.github/mergify.yml index 3ad79f93bc7a..3e4e21efe39d 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -235,6 +235,20 @@ pull_request_rules: add: - rocm +- name: label-cpu + description: Automatically apply cpu label + conditions: + - label != stale + - files~=^(?!.*kv_offload)(?!.*cpu_offload).*\bcpu.* + actions: + label: + add: + - cpu + assign: + users: + - "fadara01" + - "aditew01" + - name: label-structured-output description: Automatically apply structured-output label conditions: From 35f5f6fdcdc31bff3cf1971aa786b203c982fefe Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Wed, 17 Dec 2025 12:21:19 +0800 Subject: [PATCH 40/73] [CI/Build] Fix compatibility between #30244 and #30396 (#30787) Signed-off-by: DarkLight1337 --- tests/compile/distributed/test_fusions_e2e.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/compile/distributed/test_fusions_e2e.py b/tests/compile/distributed/test_fusions_e2e.py index 80086c4e03a9..960b5b4bd7ad 100644 --- a/tests/compile/distributed/test_fusions_e2e.py +++ b/tests/compile/distributed/test_fusions_e2e.py @@ -564,7 +564,9 @@ def test_rms_group_quant( splitting_ops=splitting_ops, # Common mode=CompilationMode.VLLM_COMPILE, - pass_config=PassConfig(eliminate_noops=True, fuse_norm_quant=True), + pass_config=PassConfig( + fuse_norm_quant=True, fuse_act_quant=True, eliminate_noops=True + ), # Inductor caches custom passes by default as well via uuid inductor_compile_config={"force_disable_caches": True}, ) From 63a120b354cfd27a812f48e20e5d9363b8efc460 Mon Sep 17 00:00:00 2001 From: shanjiaz <43143795+shanjiaz@users.noreply.github.com> Date: Wed, 17 Dec 2025 00:01:04 -0500 Subject: [PATCH 41/73] bump up compressed tensors version to 0.13.0 (#30799) Signed-off-by: shanjiaz Co-authored-by: Dipika Sikka --- requirements/common.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/common.txt b/requirements/common.txt index 426d281c2670..7c89385da6ba 100644 --- a/requirements/common.txt +++ b/requirements/common.txt @@ -37,7 +37,7 @@ pyyaml six>=1.16.0; python_version > '3.11' # transitive dependency of pandas that needs to be the latest version for python 3.12 setuptools>=77.0.3,<81.0.0; python_version > '3.11' # Setuptools is used by triton, we need to ensure a modern version is installed for 3.12+ so that it does not try to import distutils, which was removed in 3.12 einops # Required for Qwen2-VL. -compressed-tensors == 0.12.2 # required for compressed-tensors +compressed-tensors == 0.13.0 # required for compressed-tensors depyf==0.20.0 # required for profiling and debugging with compilation config cloudpickle # allows pickling lambda functions in model_executor/models/registry.py watchfiles # required for http server to monitor the updates of TLS files From 4c7a8bc2423f1b56a1792e14f494c79426c07827 Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Wed, 17 Dec 2025 00:29:03 -0500 Subject: [PATCH 42/73] Update note comment for flashinfer attention warmup (#30711) Signed-off-by: mgoin --- vllm/model_executor/warmup/kernel_warmup.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/vllm/model_executor/warmup/kernel_warmup.py b/vllm/model_executor/warmup/kernel_warmup.py index 95f5982bc8c7..98b28d3e5292 100644 --- a/vllm/model_executor/warmup/kernel_warmup.py +++ b/vllm/model_executor/warmup/kernel_warmup.py @@ -49,13 +49,12 @@ def _is_flashinfer_backend(backend): except NotImplementedError: return False - # NOTE: we add check for empty attn_groups to avoid errors when - # deploying models such as E instances and encoder-only models. - # As for those models, worker.model_runner.attn_groups is empty. - # This change is made during EPD feature development. if ( not worker.model_runner.is_pooling_model and worker.model_runner.attn_groups + # NOTE: This should be `any` instead of `all` but other hybrid attention + # backends don't support this dummy run. Once we remove + # `build_for_cudagraph_capture`, we can change it to `any`. and all( _is_flashinfer_backend(group.backend) for groups in worker.model_runner.attn_groups From b1997df1cb5b3cc52dbe5ed4eb34df44e25b5255 Mon Sep 17 00:00:00 2001 From: "Li, Jiang" Date: Wed, 17 Dec 2025 15:25:12 +0800 Subject: [PATCH 43/73] [Bugfix][CPU] Fix CPU backend ROPE dispatch for VL models (#30829) Signed-off-by: jiang1.li Signed-off-by: Li, Jiang Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> --- vllm/model_executor/layers/rotary_embedding/common.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/vllm/model_executor/layers/rotary_embedding/common.py b/vllm/model_executor/layers/rotary_embedding/common.py index 3e6584dbc3da..50660c6ecc22 100644 --- a/vllm/model_executor/layers/rotary_embedding/common.py +++ b/vllm/model_executor/layers/rotary_embedding/common.py @@ -264,6 +264,15 @@ def forward_hip( return output + def forward_cpu( + self, + x: torch.Tensor, + cos: torch.Tensor, + sin: torch.Tensor, + ) -> torch.Tensor: + # TODO (bigPYJ1151): need to enable fused CPU ROPE here + return self.forward_native(x, cos, sin) + def extra_repr(self) -> str: s = f"is_neox_style={self.is_neox_style}" s += f"enable_fp32_compute={self.enable_fp32_compute}" From 254881d94da1554aa20d798c0890234923d04899 Mon Sep 17 00:00:00 2001 From: Yan Ma Date: Wed, 17 Dec 2025 16:28:13 +0800 Subject: [PATCH 44/73] [XPU] fix broken fp8 online quantization for XPU platform (#30831) Signed-off-by: Yan Ma --- .../layers/quantization/ipex_quant.py | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/vllm/model_executor/layers/quantization/ipex_quant.py b/vllm/model_executor/layers/quantization/ipex_quant.py index 463c74c1c148..f33ee43727f1 100644 --- a/vllm/model_executor/layers/quantization/ipex_quant.py +++ b/vllm/model_executor/layers/quantization/ipex_quant.py @@ -27,6 +27,10 @@ from vllm.model_executor.layers.quantization.fp8 import Fp8Config, Fp8LinearMethod from vllm.model_executor.layers.quantization.gptq import GPTQLinearMethod from vllm.model_executor.layers.quantization.utils.quant_utils import is_layer_skipped +from vllm.model_executor.layers.quantization.utils.w8a8_utils import ( + maybe_create_device_identity, +) +from vllm.model_executor.parameter import ModelWeightParameter from vllm.model_executor.utils import set_weight_attrs from vllm.platforms import current_platform @@ -305,6 +309,37 @@ class XPUFp8LinearMethod(Fp8LinearMethod): def __init__(self, quant_config: Fp8Config): super().__init__(quant_config) + def create_weights( + self, + layer: torch.nn.Module, + input_size_per_partition: int, + output_partition_sizes: list[int], + input_size: int, + output_size: int, + params_dtype: torch.dtype, + **extra_weight_attrs, + ): + maybe_create_device_identity() + + output_size_per_partition = sum(output_partition_sizes) + weight_loader = extra_weight_attrs.get("weight_loader") + layer.logical_widths = output_partition_sizes + layer.input_size_per_partition = input_size_per_partition + layer.output_size_per_partition = output_size_per_partition + layer.orig_dtype = params_dtype + layer.weight_block_size = None + weight = ModelWeightParameter( + data=torch.empty( + output_size_per_partition, + input_size_per_partition, + dtype=params_dtype, + ), + input_dim=1, + output_dim=0, + weight_loader=weight_loader, + ) + layer.register_parameter("weight", weight) + def process_weights_after_loading(self, layer: Module) -> None: # If checkpoint not serialized fp8, quantize the weights. if not self.quant_config.is_checkpoint_fp8_serialized: From a4e36fb0dfb5f6ae294277afd297f534a5c59585 Mon Sep 17 00:00:00 2001 From: Robin <863579016@qq.com> Date: Wed, 17 Dec 2025 16:37:57 +0800 Subject: [PATCH 45/73] [Bugfix][Frontend] Prevent IndexError in MiniMax M2 tool parser during streaming extraction (#30555) Signed-off-by: WangErXiao <863579016@qq.com> --- tests/tool_use/test_minimax_m2_tool_parser.py | 119 ++++++++++++++++++ vllm/tool_parsers/minimax_m2_tool_parser.py | 22 +++- 2 files changed, 137 insertions(+), 4 deletions(-) create mode 100644 tests/tool_use/test_minimax_m2_tool_parser.py diff --git a/tests/tool_use/test_minimax_m2_tool_parser.py b/tests/tool_use/test_minimax_m2_tool_parser.py new file mode 100644 index 000000000000..cf1835b1928b --- /dev/null +++ b/tests/tool_use/test_minimax_m2_tool_parser.py @@ -0,0 +1,119 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project + +import json + +import pytest + +from vllm.tool_parsers.minimax_m2_tool_parser import ( + MinimaxM2ToolParser, +) + +pytestmark = pytest.mark.cpu_test + + +class FakeTokenizer: + """Minimal fake tokenizer that exposes the attributes used by the + parser: a truthy model_tokenizer marker and a vocab mapping for the + special tokens. + """ + + def __init__(self): + self.model_tokenizer = True + # The parser will look up start/end tokens by their literal strings + self.vocab = { + "": 1, + "": 2, + } + + def get_vocab(self): + return self.vocab + + +@pytest.fixture +def minimax_m2_tool_parser(): + return MinimaxM2ToolParser(FakeTokenizer()) + + +def test_extract_tool_calls_streaming_incremental(minimax_m2_tool_parser): + parser = minimax_m2_tool_parser + parser._reset_streaming_state() + chunks = [ + "", + '', + '', + "Seattle", + "", + ] + previous = "" + for chunk in chunks: + current = previous + chunk + delta = chunk + parser.extract_tool_calls_streaming( + previous_text=previous, + current_text=current, + delta_text=delta, + previous_token_ids=[], + current_token_ids=[], + delta_token_ids=[], + request=None, + ) + previous = current + + assert len(parser.prev_tool_call_arr) == 1 + entry = parser.prev_tool_call_arr[0] + + assert entry["name"] == "get_weather" + args = entry["arguments"] + assert args["city"] == "Seattle" + + +def test_streaming_minimax_m2_multiple_invokes(minimax_m2_tool_parser): + parser = minimax_m2_tool_parser + parser._reset_streaming_state() + + chunks = [ + "", + '', + '', + '["technology", "events"]', + '', + '["OpenAI", "latest", "release"]', + "", + '', + '', + '["technology", "events"]', + '', + '["Gemini", "latest", "release"]', + "", + "", + ] + previous = "" + for chunk in chunks: + current = previous + chunk + delta = chunk + parser.extract_tool_calls_streaming( + previous_text=previous, + current_text=current, + delta_text=delta, + previous_token_ids=[], + current_token_ids=[], + delta_token_ids=[], + request=None, + ) + previous = current + + assert len(parser.prev_tool_call_arr) == 2 + + for entry, expect_model in zip(parser.prev_tool_call_arr, ["OpenAI", "Gemini"]): + assert entry["name"] == "search_web" + args = json.dumps(entry["arguments"]) + assert "technology" in args and "events" in args + assert expect_model in args + + # check streamed_args_for_tool for serving_chat.py + for index in range(2): + expected_call = parser.prev_tool_call_arr[index].get("arguments", {}) + expected_call = json.dumps(expected_call) + actual_call = parser.streamed_args_for_tool[index] + assert expected_call == actual_call diff --git a/vllm/tool_parsers/minimax_m2_tool_parser.py b/vllm/tool_parsers/minimax_m2_tool_parser.py index dcb2b64f6e73..a1ab75f548bf 100644 --- a/vllm/tool_parsers/minimax_m2_tool_parser.py +++ b/vllm/tool_parsers/minimax_m2_tool_parser.py @@ -122,6 +122,8 @@ def _reset_streaming_state(self): self.streaming_request = None # Clear previous tool call history to avoid state pollution self.prev_tool_call_arr.clear() + # Reset streamed args tracking + self.streamed_args_for_tool.clear() def _extract_name(self, name_str: str) -> str: """Extract name from quoted string.""" @@ -421,9 +423,12 @@ def extract_tool_calls_streaming( self.prev_tool_call_arr.append( { "name": self.current_function_name, - "arguments": "{}", # Placeholder, will be updated later + "arguments": {}, # Placeholder, will be updated later } ) + # Initialize streamed_args_for_tool for this tool call + if len(self.streamed_args_for_tool) <= self.current_tool_index: + self.streamed_args_for_tool.append("") # Send header with function info return DeltaMessage( @@ -445,6 +450,9 @@ def extract_tool_calls_streaming( # Send opening brace if not sent yet if self.in_function and not self.json_started: self.json_started = True + # Update streamed_args_for_tool for opening brace + if self.current_tool_index < len(self.streamed_args_for_tool): + self.streamed_args_for_tool[self.current_tool_index] += "{" return DeltaMessage( tool_calls=[ DeltaToolCall( @@ -493,7 +501,7 @@ def extract_tool_calls_streaming( args = parsed_tool.function.arguments self.prev_tool_call_arr[self.current_tool_index][ "arguments" - ] = args + ] = json.loads(args) except Exception: pass # Ignore parsing errors during streaming @@ -505,7 +513,9 @@ def extract_tool_calls_streaming( ) ] ) - + # Update streamed_args_for_tool for closing brace + if self.current_tool_index < len(self.streamed_args_for_tool): + self.streamed_args_for_tool[self.current_tool_index] += "}" # Reset state for next tool self.json_closed = True self.in_function = False @@ -630,7 +640,11 @@ def extract_tool_calls_streaming( ) self.param_count += 1 - + # Update streamed_args_for_tool for this tool call + if self.current_tool_index < len(self.streamed_args_for_tool): + self.streamed_args_for_tool[self.current_tool_index] += ( + json_fragment + ) return DeltaMessage( tool_calls=[ DeltaToolCall( From 8862b215c6e469d35f921764c1a030866d001ba5 Mon Sep 17 00:00:00 2001 From: Asaf Joseph Gardin <39553475+Josephasafg@users.noreply.github.com> Date: Wed, 17 Dec 2025 10:48:53 +0200 Subject: [PATCH 46/73] [Mamba] Removed disable cascade attn in MambaModelConfig (#30712) Signed-off-by: asafg <39553475+Josephasafg@users.noreply.github.com> --- vllm/model_executor/models/config.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/vllm/model_executor/models/config.py b/vllm/model_executor/models/config.py index 4b08472538db..a3624b1cfa5f 100644 --- a/vllm/model_executor/models/config.py +++ b/vllm/model_executor/models/config.py @@ -308,12 +308,6 @@ def verify_and_update_config(cls, vllm_config: "VllmConfig") -> None: if cache_config.mamba_block_size is None: cache_config.mamba_block_size = model_config.max_model_len - # TODO(tdoublep): remove once cascade attention is supported - logger.info( - "Disabling cascade attention since it is not supported for hybrid models." - ) - model_config.disable_cascade_attn = True - class HybridAttentionMambaModelConfig(VerifyAndUpdateConfig): @classmethod From d3ebe344dc871ef72fb93d75a6cd73b02336036f Mon Sep 17 00:00:00 2001 From: Xinyu Chen Date: Wed, 17 Dec 2025 17:43:00 +0800 Subject: [PATCH 47/73] CustomOp: grouped topk (#29575) Signed-off-by: Xinyu Chen --- tests/kernels/moe/test_grouped_topk.py | 10 ++-- .../layers/fused_moe/__init__.py | 4 +- .../layers/fused_moe/fused_moe.py | 52 +++++++++++++++++++ vllm/model_executor/layers/fused_moe/layer.py | 23 +++++--- 4 files changed, 75 insertions(+), 14 deletions(-) diff --git a/tests/kernels/moe/test_grouped_topk.py b/tests/kernels/moe/test_grouped_topk.py index 662e0723b758..d26fe50b815b 100644 --- a/tests/kernels/moe/test_grouped_topk.py +++ b/tests/kernels/moe/test_grouped_topk.py @@ -9,8 +9,8 @@ import torch from vllm.model_executor.layers.fused_moe.fused_moe import ( + GroupedTopk, fused_grouped_topk, - grouped_topk, ) from vllm.platforms import current_platform @@ -50,15 +50,17 @@ def test_grouped_topk( with monkeypatch.context() as m: m.setenv("VLLM_USE_FUSED_MOE_GROUPED_TOPK", "0") - baseline_topk_weights, baseline_topk_ids = grouped_topk( - hidden_states=hidden_states, - gating_output=gating_output, + grouped_topk = GroupedTopk( topk=topk, renormalize=renormalize, num_expert_group=num_expert_group, topk_group=topk_group, scoring_func=scoring_func, routed_scaling_factor=routed_scaling_factor, + ) + baseline_topk_weights, baseline_topk_ids = grouped_topk( + hidden_states=hidden_states, + gating_output=gating_output, e_score_correction_bias=e_score_correction_bias, ) diff --git a/vllm/model_executor/layers/fused_moe/__init__.py b/vllm/model_executor/layers/fused_moe/__init__.py index d71cfc5ad820..8fee4038b60b 100644 --- a/vllm/model_executor/layers/fused_moe/__init__.py +++ b/vllm/model_executor/layers/fused_moe/__init__.py @@ -77,11 +77,11 @@ def get_config() -> dict[str, Any] | None: BatchedTritonExperts, ) from vllm.model_executor.layers.fused_moe.fused_moe import ( + GroupedTopk, TritonExperts, fused_experts, fused_topk, get_config_file_name, - grouped_topk, ) from vllm.model_executor.layers.fused_moe.triton_deep_gemm_moe import ( TritonOrDeepGemmExperts, @@ -91,7 +91,7 @@ def get_config() -> dict[str, Any] | None: "fused_topk", "fused_experts", "get_config_file_name", - "grouped_topk", + "GroupedTopk", "cutlass_moe_fp8", "cutlass_moe_fp4", "cutlass_moe_w4a8_fp8", diff --git a/vllm/model_executor/layers/fused_moe/fused_moe.py b/vllm/model_executor/layers/fused_moe/fused_moe.py index b286c3bc6fc0..20782e2712f2 100644 --- a/vllm/model_executor/layers/fused_moe/fused_moe.py +++ b/vllm/model_executor/layers/fused_moe/fused_moe.py @@ -16,6 +16,7 @@ from vllm import _custom_ops as ops from vllm._aiter_ops import rocm_aiter_ops from vllm.logger import init_logger +from vllm.model_executor.custom_op import CustomOp from vllm.model_executor.layers.batch_invariant import ( vllm_is_batch_invariant, ) @@ -1286,6 +1287,57 @@ def grouped_topk( return topk_weights.to(torch.float32), topk_ids.to(torch.int32) +@CustomOp.register("grouped_topk") +class GroupedTopk(CustomOp): + """GroupedTopk used by the Deepseek-V2 and Deepseek-V3 model.""" + + def __init__( + self, + topk: int, + renormalize: bool, + num_expert_group: int = 0, + topk_group: int = 0, + scoring_func: str = "softmax", + routed_scaling_factor: float = 1.0, + ) -> None: + super().__init__() + self.native_impl = grouped_topk + self.topk = topk + self.renormalize = renormalize + self.num_expert_group = num_expert_group + self.topk_group = topk_group + self.scoring_func = scoring_func + self.routed_scaling_factor = routed_scaling_factor + + def forward_native( + self, + hidden_states: torch.Tensor, + gating_output: torch.Tensor, + e_score_correction_bias: torch.Tensor | None = None, + ) -> tuple[torch.Tensor, torch.Tensor]: + return self.native_impl( + hidden_states, + gating_output, + self.topk, + self.renormalize, + self.num_expert_group, + self.topk_group, + self.scoring_func, + self.routed_scaling_factor, + e_score_correction_bias, + ) + + def forward_cuda( + self, + hidden_states: torch.Tensor, + gating_output: torch.Tensor, + e_score_correction_bias: torch.Tensor | None = None, + ) -> tuple[torch.Tensor, torch.Tensor]: + return self.forward_native( + hidden_states, gating_output, e_score_correction_bias + ) + + @torch.compile(dynamic=True, backend=current_platform.simple_compile_backend) def eplb_map_to_physical_and_record( topk_ids: torch.Tensor, diff --git a/vllm/model_executor/layers/fused_moe/layer.py b/vllm/model_executor/layers/fused_moe/layer.py index b39ce415a0f8..db97d6eb88ea 100644 --- a/vllm/model_executor/layers/fused_moe/layer.py +++ b/vllm/model_executor/layers/fused_moe/layer.py @@ -67,7 +67,7 @@ def _eplb_map_to_physical_and_record( return topk_ids eplb_map_to_physical_and_record = _eplb_map_to_physical_and_record -from vllm.model_executor.layers.fused_moe.fused_moe import grouped_topk +from vllm.model_executor.layers.fused_moe.fused_moe import GroupedTopk from vllm.model_executor.layers.fused_moe.rocm_aiter_fused_moe import ( # noqa: E501 rocm_aiter_grouped_topk, ) @@ -1594,19 +1594,26 @@ def valid_grouping() -> bool: grouped_topk_impl = partial( rocm_aiter_grouped_topk, num_fused_shared_experts=self.num_fused_shared_experts, + topk=self.top_k, + renormalize=self.renormalize, + num_expert_group=self.num_expert_group, + topk_group=self.topk_group, + scoring_func=self.scoring_func, + routed_scaling_factor=self.routed_scaling_factor, ) else: - grouped_topk_impl = grouped_topk + grouped_topk_impl = GroupedTopk( + topk=self.top_k, + renormalize=self.renormalize, + num_expert_group=self.num_expert_group, + topk_group=self.topk_group, + scoring_func=self.scoring_func, + routed_scaling_factor=self.routed_scaling_factor, + ) topk_weights, topk_ids = grouped_topk_impl( hidden_states=hidden_states, gating_output=router_logits, - topk=self.top_k, - renormalize=self.renormalize, - num_expert_group=self.num_expert_group, - topk_group=self.topk_group, - scoring_func=self.scoring_func, - routed_scaling_factor=self.routed_scaling_factor, e_score_correction_bias=self.e_score_correction_bias, ) elif self.e_score_correction_bias is not None: From 42c55ea3df4b08a5a6b8cb02ca2f875718dd1eeb Mon Sep 17 00:00:00 2001 From: Sheng Lin Date: Wed, 17 Dec 2025 17:52:58 +0800 Subject: [PATCH 48/73] [NIXL][Bugfix] Fix NIXL/RDMA registration failure over CuMemAllocator (#29569) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Somoku Co-authored-by: Nicolò Lucchesi --- csrc/cumem_allocator.cpp | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/csrc/cumem_allocator.cpp b/csrc/cumem_allocator.cpp index 78dc840a98b6..6c2c18a6602d 100644 --- a/csrc/cumem_allocator.cpp +++ b/csrc/cumem_allocator.cpp @@ -107,6 +107,16 @@ void create_and_map(unsigned long long device, ssize_t size, CUdeviceptr d_mem, prop.location.id = device; prop.allocFlags.compressionType = CU_MEM_ALLOCATION_COMP_NONE; +#ifndef USE_ROCM + int flag = 0; + CUDA_CHECK(cuDeviceGetAttribute( + &flag, CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WITH_CUDA_VMM_SUPPORTED, + device)); + if (flag) { // support GPUDirect RDMA if possible + prop.allocFlags.gpuDirectRDMACapable = 1; + } +#endif + #ifndef USE_ROCM // Allocate memory using cuMemCreate CUDA_CHECK(cuMemCreate(p_memHandle, size, &prop, 0)); From 89173de54833c05a40f3a6082a35145699d3aeca Mon Sep 17 00:00:00 2001 From: Andrew Xia Date: Wed, 17 Dec 2025 17:53:02 +0800 Subject: [PATCH 49/73] [Doc][ResponsesAPI] add documentation (#30840) Signed-off-by: Andrew Xia Co-authored-by: Andrew Xia --- docs/serving/openai_compatible_server.md | 27 ++++++++++++++++++++++++ vllm/entrypoints/openai/protocol.py | 18 ++++++++++++---- 2 files changed, 41 insertions(+), 4 deletions(-) diff --git a/docs/serving/openai_compatible_server.md b/docs/serving/openai_compatible_server.md index 0e29204f8947..6a08f872def1 100644 --- a/docs/serving/openai_compatible_server.md +++ b/docs/serving/openai_compatible_server.md @@ -47,6 +47,8 @@ We currently support the following OpenAI APIs: - [Completions API](#completions-api) (`/v1/completions`) - Only applicable to [text generation models](../models/generative_models.md). - *Note: `suffix` parameter is not supported.* +- [Responses API](#responses-api) (`/v1/responses`) + - Only applicable to [text generation models](../models/generative_models.md). - [Chat Completions API](#chat-api) (`/v1/chat/completions`) - Only applicable to [text generation models](../models/generative_models.md) with a [chat template](../serving/openai_compatible_server.md#chat-template). - *Note: `user` parameter is ignored.* @@ -229,6 +231,31 @@ The following extra parameters are supported: --8<-- "vllm/entrypoints/openai/protocol.py:chat-completion-extra-params" ``` +### Responses API + +Our Responses API is compatible with [OpenAI's Responses API](https://platform.openai.com/docs/api-reference/responses); +you can use the [official OpenAI Python client](https://github.com/openai/openai-python) to interact with it. + +Code example: [examples/online_serving/openai_responses_client_with_tools.py](../../examples/online_serving/openai_responses_client_with_tools.py) + +#### Extra parameters + +The following extra parameters in the request object are supported: + +??? code + + ```python + --8<-- "vllm/entrypoints/openai/protocol.py:responses-extra-params" + ``` + +The following extra parameters in the response object are supported: + +??? code + + ```python + --8<-- "vllm/entrypoints/openai/protocol.py:responses-response-extra-params" + ``` + ### Embeddings API Our Embeddings API is compatible with [OpenAI's Embeddings API](https://platform.openai.com/docs/api-reference/embeddings); diff --git a/vllm/entrypoints/openai/protocol.py b/vllm/entrypoints/openai/protocol.py index 94dde4564ea0..a3c347cb1bd3 100644 --- a/vllm/entrypoints/openai/protocol.py +++ b/vllm/entrypoints/openai/protocol.py @@ -1654,13 +1654,23 @@ class ResponsesResponse(OpenAIBaseModel): usage: ResponseUsage | None = None user: str | None = None - # --8<-- [start:responses-extra-params] + # --8<-- [start:responses-response-extra-params] # These are populated when enable_response_messages is set to True # NOTE: custom serialization is needed # see serialize_input_messages and serialize_output_messages - input_messages: ResponseInputOutputMessage | None = None - output_messages: ResponseInputOutputMessage | None = None - # --8<-- [end:responses-extra-params] + input_messages: ResponseInputOutputMessage | None = Field( + default=None, + description=( + "If enable_response_messages, we can show raw token input to model." + ), + ) + output_messages: ResponseInputOutputMessage | None = Field( + default=None, + description=( + "If enable_response_messages, we can show raw token output of model." + ), + ) + # --8<-- [end:responses-response-extra-params] # NOTE: openAI harmony doesn't serialize TextContent properly, # TODO: this fixes for TextContent, but need to verify for tools etc From 095b9814213a2d6492adbe60f5db68972af29ea0 Mon Sep 17 00:00:00 2001 From: "Ye (Charlotte) Qi" Date: Wed, 17 Dec 2025 01:54:21 -0800 Subject: [PATCH 50/73] [Kernels][FI] Skip trtllm attention when num_kv_heads=1 (#30842) Signed-off-by: Ye (Charlotte) Qi --- .../test_flashinfer_trtllm_attention.py | 35 +++++++++++++++++++ vllm/utils/flashinfer.py | 22 +++++++++++- 2 files changed, 56 insertions(+), 1 deletion(-) diff --git a/tests/kernels/attention/test_flashinfer_trtllm_attention.py b/tests/kernels/attention/test_flashinfer_trtllm_attention.py index 06a7085a82ba..220d827b9d5f 100644 --- a/tests/kernels/attention/test_flashinfer_trtllm_attention.py +++ b/tests/kernels/attention/test_flashinfer_trtllm_attention.py @@ -455,3 +455,38 @@ def test_flashinfer_trtllm_prefill_with_baseline( torch.testing.assert_close(output, output_trtllm, atol=atol, rtol=rtol), f"{torch.max(torch.abs(output - output_trtllm))}", ) + + +def test_trtllm_attention_rejects_num_kv_heads_1() -> None: + """Test that TRTLLM attention correctly rejects num_kv_heads=1. + + When num_kv_heads=1 (MQA), the KV cache strides become degenerate + (stride_heads == stride_batch), which causes CUDA's cuTensorMapEncodeTiled + to fail because TMA descriptors cannot handle degenerate 4D tensors with + singleton dimensions. + + This test verifies that can_use_trtllm_attention returns False for + num_kv_heads=1 configurations. + """ + from vllm.utils.flashinfer import can_use_trtllm_attention + + # num_kv_heads=1 should be rejected + assert not can_use_trtllm_attention(num_qo_heads=64, num_kv_heads=1), ( + "can_use_trtllm_attention should return False for num_kv_heads=1" + ) + assert not can_use_trtllm_attention(num_qo_heads=32, num_kv_heads=1), ( + "can_use_trtllm_attention should return False for num_kv_heads=1" + ) + + # num_kv_heads > 1 should be accepted (if platform supports it) + # Note: This may return False on non-Blackwell platforms, which is fine + result_kv8 = can_use_trtllm_attention(num_qo_heads=64, num_kv_heads=8) + result_kv1 = can_use_trtllm_attention(num_qo_heads=64, num_kv_heads=1) + + # Even if platform doesn't support TRTLLM, num_kv_heads=1 should never + # return True when num_kv_heads > 1 returns True + if result_kv8: + assert not result_kv1, ( + "If TRTLLM is supported for num_kv_heads=8, " + "it must be rejected for num_kv_heads=1" + ) diff --git a/vllm/utils/flashinfer.py b/vllm/utils/flashinfer.py index 1c2710be3173..6bbe02348eaf 100644 --- a/vllm/utils/flashinfer.py +++ b/vllm/utils/flashinfer.py @@ -305,7 +305,18 @@ def can_use_trtllm_attention(num_qo_heads: int, num_kv_heads: int) -> bool: if force_use_trtllm_attention() is False: return False has_trtllm = supports_trtllm_attention() - return has_trtllm and (num_qo_heads % num_kv_heads == 0) + # num_kv_heads=1 is not supported due to TMA descriptor building limitations. + # When num_kv_heads=1, the KV cache strides become degenerate (stride_heads == + # stride_batch), which causes CUDA's cuTensorMapEncodeTiled to fail because + # TMA descriptors cannot handle degenerate 4D tensors with singleton dimensions. + # See: https://fburl.com/352mrydz + if has_trtllm and num_kv_heads == 1: + logger.warning_once( + "TRTLLM attention does not support num_kv_heads=1. " + "This configuration causes TMA descriptor building to fail due to " + "degenerate tensor strides. Falling back to FlashInfer attention." + ) + return has_trtllm and (num_qo_heads % num_kv_heads == 0) and (num_kv_heads != 1) def use_trtllm_attention( @@ -355,6 +366,15 @@ def use_trtllm_attention( ) return False + # num_kv_heads=1 is not supported + if num_kv_heads == 1: + if force_use_trtllm: + logger.warning_once( + "TRTLLM attention does not support num_kv_heads=1, " + "but --attention-config.use_trtllm_attention is set to 1" + ) + return False + if has_spec and not is_prefill: # Speculative decoding requires TRTLLM attention for decodes logger.info_once("Using TRTLLM attention (enabled for speculative decoding).") From 28a2c91e3d94f1b6998c0f3cf75f80affca14f77 Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Wed, 17 Dec 2025 04:55:30 -0500 Subject: [PATCH 51/73] [UX] Make `vllm bench serve` discover model by default and use --input-len (#30816) Signed-off-by: mgoin --- tests/benchmarks/test_serve_cli.py | 9 ++-- vllm/benchmarks/serve.py | 83 +++++++++++++++++++++++++++--- 2 files changed, 79 insertions(+), 13 deletions(-) diff --git a/tests/benchmarks/test_serve_cli.py b/tests/benchmarks/test_serve_cli.py index 90d685c966d3..c579b3806986 100644 --- a/tests/benchmarks/test_serve_cli.py +++ b/tests/benchmarks/test_serve_cli.py @@ -19,21 +19,18 @@ def server(): @pytest.mark.benchmark def test_bench_serve(server): + # Test default model detection and input/output len command = [ "vllm", "bench", "serve", - "--model", - MODEL_NAME, "--host", server.host, "--port", str(server.port), - "--dataset-name", - "random", - "--random-input-len", + "--input-len", "32", - "--random-output-len", + "--output-len", "4", "--num-prompts", "5", diff --git a/vllm/benchmarks/serve.py b/vllm/benchmarks/serve.py index f5d8ea5a975a..12756d1700c9 100644 --- a/vllm/benchmarks/serve.py +++ b/vllm/benchmarks/serve.py @@ -10,8 +10,10 @@ vllm bench serve \ --backend \ --label \ - --model \ + --model \ --dataset-name \ + --input-len \ + --output-len \ --request-rate \ --num-prompts """ @@ -57,6 +59,33 @@ ) +async def get_first_model_from_server( + base_url: str, headers: dict | None = None +) -> str: + """Fetch the first model from the server's /v1/models endpoint.""" + models_url = f"{base_url}/v1/models" + async with aiohttp.ClientSession() as session: + try: + async with session.get(models_url, headers=headers) as response: + response.raise_for_status() + data = await response.json() + if "data" in data and len(data["data"]) > 0: + return data["data"][0]["id"] + else: + raise ValueError( + f"No models found on the server at {base_url}. " + "Make sure the server is running and has models loaded." + ) + except (aiohttp.ClientError, json.JSONDecodeError) as e: + raise RuntimeError( + f"Failed to fetch models from server at {models_url}. " + "Check that:\n" + "1. The server is running\n" + "2. The server URL is correct\n" + f"Error: {e}" + ) from e + + class TaskType(Enum): GENERATION = "generation" POOLING = "pooling" @@ -1025,8 +1054,26 @@ def add_cli_args(parser: argparse.ArgumentParser): parser.add_argument( "--model", type=str, - required=True, - help="Name of the model.", + required=False, + default=None, + help="Name of the model. If not specified, will fetch the first model " + "from the server's /v1/models endpoint.", + ) + parser.add_argument( + "--input-len", + type=int, + default=None, + help="General input length for datasets. Maps to dataset-specific " + "input length arguments (e.g., --random-input-len, --sonnet-input-len). " + "If not specified, uses dataset defaults.", + ) + parser.add_argument( + "--output-len", + type=int, + default=None, + help="General output length for datasets. Maps to dataset-specific " + "output length arguments (e.g., --random-output-len, --sonnet-output-len). " + "If not specified, uses dataset defaults.", ) parser.add_argument( "--tokenizer", @@ -1332,10 +1379,6 @@ async def main_async(args: argparse.Namespace) -> dict[str, Any]: raise ValueError("For exponential ramp-up, the start RPS cannot be 0.") label = args.label - model_id = args.model - model_name = args.served_model_name - tokenizer_id = args.tokenizer if args.tokenizer is not None else args.model - tokenizer_mode = args.tokenizer_mode if args.base_url is not None: api_url = f"{args.base_url}{args.endpoint}" @@ -1356,6 +1399,18 @@ async def main_async(args: argparse.Namespace) -> dict[str, Any]: else: raise ValueError("Invalid header format. Please use KEY=VALUE format.") + # Fetch model from server if not specified + if args.model is None: + print("Model not specified, fetching first model from server...") + model_id = await get_first_model_from_server(base_url, headers) + print(f"Using model: {model_id}") + else: + model_id = args.model + + model_name = args.served_model_name + tokenizer_id = args.tokenizer if args.tokenizer is not None else model_id + tokenizer_mode = args.tokenizer_mode + tokenizer = get_tokenizer( tokenizer_id, tokenizer_mode=tokenizer_mode, @@ -1368,6 +1423,20 @@ async def main_async(args: argparse.Namespace) -> dict[str, Any]: "'--dataset-path' if required." ) + # Map general --input-len and --output-len to all dataset-specific arguments + if args.input_len is not None: + args.random_input_len = args.input_len + args.sonnet_input_len = args.input_len + + if args.output_len is not None: + args.random_output_len = args.output_len + args.sonnet_output_len = args.output_len + args.sharegpt_output_len = args.output_len + args.custom_output_len = args.output_len + args.hf_output_len = args.output_len + args.spec_bench_output_len = args.output_len + args.prefix_repetition_output_len = args.output_len + # when using random datasets, default to ignoring EOS # so generation runs to the requested length if ( From bc7110b8bc630f9c20e9acb8bd8267c3c4fd0aa1 Mon Sep 17 00:00:00 2001 From: Zhengxu Chen Date: Wed, 17 Dec 2025 04:55:56 -0500 Subject: [PATCH 52/73] [compile] Disable aot when eager backend is used. (#30810) Signed-off-by: zhxchen17 --- vllm/compilation/decorators.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/vllm/compilation/decorators.py b/vllm/compilation/decorators.py index d1ee995ee895..40bde97ac61d 100644 --- a/vllm/compilation/decorators.py +++ b/vllm/compilation/decorators.py @@ -435,7 +435,10 @@ def __call__(self, *args, **kwargs): return self.aot_compiled_fn(self, *args, **kwargs) if self.compiled: - assert not envs.VLLM_USE_AOT_COMPILE + assert ( + not envs.VLLM_USE_AOT_COMPILE + or self.vllm_config.compilation_config.backend == "eager" + ) return TorchCompileWithNoGuardsWrapper.__call__(self, *args, **kwargs) # This is the path for the first compilation. @@ -508,7 +511,11 @@ def patched_inline_call(self_): _torch27_patch_tensor_subclasses(), torch._inductor.config.patch(**inductor_config_patches), ): - if envs.VLLM_USE_AOT_COMPILE: + use_aot_compile = envs.VLLM_USE_AOT_COMPILE + if self.vllm_config.compilation_config.backend == "eager": + logger.warning("Detected eager backend, disabling AOT compile.") + use_aot_compile = False + if use_aot_compile: self.aot_compiled_fn = self.aot_compile(*args, **kwargs) output = self.aot_compiled_fn(self, *args, **kwargs) assert aot_compilation_path is not None From 15a8514d010421c9451d10a5ea06847cb74e3376 Mon Sep 17 00:00:00 2001 From: Zhengxu Chen Date: Wed, 17 Dec 2025 04:56:24 -0500 Subject: [PATCH 53/73] [compile] Ignore VLLM_FORCE_AOT_LOAD from cache factors (#30809) Signed-off-by: zhxchen17 --- vllm/envs.py | 1 + 1 file changed, 1 insertion(+) diff --git a/vllm/envs.py b/vllm/envs.py index 7e072a588591..2f8158d88d6c 100755 --- a/vllm/envs.py +++ b/vllm/envs.py @@ -1654,6 +1654,7 @@ def compile_factors() -> dict[str, object]: "VLLM_CI_USE_S3", "VLLM_MODEL_REDIRECT_PATH", "VLLM_HOST_IP", + "VLLM_FORCE_AOT_LOAD", "S3_ACCESS_KEY_ID", "S3_SECRET_ACCESS_KEY", "S3_ENDPOINT_URL", From 87c1dd986eee8104302bd54fe5e40d93c6f832e4 Mon Sep 17 00:00:00 2001 From: danielafrimi <45691845+danielafrimi@users.noreply.github.com> Date: Wed, 17 Dec 2025 11:56:38 +0200 Subject: [PATCH 54/73] [Fix]Load kv-cache dtype from hf_quant_config.json automatically (fix for reverted PR) (#30785) Signed-off-by: <> Co-authored-by: root --- vllm/engine/arg_utils.py | 9 ++++- vllm/utils/torch_utils.py | 75 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 83 insertions(+), 1 deletion(-) diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index ca19e468914c..03720bd2516d 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -93,6 +93,7 @@ from vllm.utils.argparse_utils import FlexibleArgumentParser from vllm.utils.mem_constants import GiB_bytes from vllm.utils.network_utils import get_ip +from vllm.utils.torch_utils import resolve_kv_cache_dtype_string from vllm.v1.sample.logits_processor import LogitsProcessor if TYPE_CHECKING: @@ -106,6 +107,7 @@ LoadFormats = Any UsageContext = Any + logger = init_logger(__name__) # object is used to allow for special typing forms @@ -1361,12 +1363,17 @@ def create_engine_config( f"dcp_size={self.decode_context_parallel_size}." ) + # Resolve "auto" kv_cache_dtype to actual value from model config + resolved_cache_dtype = resolve_kv_cache_dtype_string( + self.kv_cache_dtype, model_config + ) + cache_config = CacheConfig( block_size=self.block_size, gpu_memory_utilization=self.gpu_memory_utilization, kv_cache_memory_bytes=self.kv_cache_memory_bytes, swap_space=self.swap_space, - cache_dtype=self.kv_cache_dtype, + cache_dtype=resolved_cache_dtype, is_attention_free=model_config.is_attention_free, num_gpu_blocks_override=self.num_gpu_blocks_override, sliding_window=sliding_window, diff --git a/vllm/utils/torch_utils.py b/vllm/utils/torch_utils.py index c97efce312b5..b82e0171b7f7 100644 --- a/vllm/utils/torch_utils.py +++ b/vllm/utils/torch_utils.py @@ -24,6 +24,10 @@ ModelConfig = object IntermediateTensors = object +import logging + +logger = logging.getLogger(__name__) + STR_DTYPE_TO_TORCH_DTYPE = { "float32": torch.float32, @@ -49,6 +53,13 @@ } +MODELOPT_TO_VLLM_KV_CACHE_DTYPE_MAP = { + # TODO: Add more modelopt kv cache dtype + # mappings here when it supported by some attention backend + # (for example supports nvfp4). + "fp8": "fp8_e4m3", +} + T = TypeVar("T") @@ -194,6 +205,70 @@ def get_kv_cache_torch_dtype( return torch_dtype +def get_kv_cache_quant_algo_string(quant_cfg: dict[str, Any]) -> str | None: + """Get the KV cache quantization algorithm string from the quantization config. + + Maps various FP8 format names to vLLM's standard cache dtype strings. + Returns None if no kv_cache_quant_algo is specified. + Returns "auto" if the value is not recognized/supported. + """ + # Mapping from model config values to vLLM cache_dtype strings + + quant_method = quant_cfg.get("quant_method", "") + if quant_method.startswith("modelopt"): + quantization_inner = quant_cfg.get("quantization", quant_cfg) + # Check if quant config is specified and use kv cache quant algo + kv_algo = quantization_inner.get("kv_cache_quant_algo") or quant_cfg.get( + "kv_cache_quant_algo" + ) + if isinstance(kv_algo, str): + kv_algo_lower = kv_algo.lower() + + # Try to map to vLLM's standard format + if kv_algo_lower in MODELOPT_TO_VLLM_KV_CACHE_DTYPE_MAP: + return MODELOPT_TO_VLLM_KV_CACHE_DTYPE_MAP[kv_algo_lower] + else: + # Unknown/unsupported format - return "auto" as safe fallback + logger.warning( + "WARNING: Unknown kv_cache_quant_algo '%s' in model " + "config. Supported values: %s. Falling back to 'auto'.", + kv_algo, + list(MODELOPT_TO_VLLM_KV_CACHE_DTYPE_MAP.keys()), + ) + return "auto" + return None + + +def get_kv_cache_quant_algo_dtype(quant_cfg: dict[str, Any]) -> torch.dtype | None: + """Get the KV cache quantization algorithm dtype from the quantization config.""" + kv_algo_str = get_kv_cache_quant_algo_string(quant_cfg) + if kv_algo_str is not None and kv_algo_str != "auto": + # Only convert if we have a valid dtype string (not "auto" fallback) + return STR_DTYPE_TO_TORCH_DTYPE[kv_algo_str] + return None + + +def resolve_kv_cache_dtype_string( + kv_cache_dtype: str, model_config: ModelConfig +) -> str: + """Resolve 'auto' kv_cache_dtype to the actual string value from model config. + Returns the resolved cache_dtype string. + """ + if kv_cache_dtype != "auto": + return kv_cache_dtype + + hf_cfg = getattr(model_config, "hf_config", None) + if hf_cfg is not None: + quant_cfg = getattr(hf_cfg, "quantization_config", None) + if quant_cfg is not None: + kv_algo_str = get_kv_cache_quant_algo_string(quant_cfg) + if kv_algo_str is not None: + return kv_algo_str + + # Default to auto (will be handled by downstream code) + return "auto" + + def kv_cache_dtype_str_to_dtype( kv_cache_dtype: str, model_config: ModelConfig ) -> torch.dtype: From 1cc37cf507f9f90ba11a0bb1a63faf585f9987fa Mon Sep 17 00:00:00 2001 From: Zhengxu Chen Date: Wed, 17 Dec 2025 05:00:12 -0500 Subject: [PATCH 55/73] [compile] Recompile graph module during Dynamo cache loading. (#30743) Signed-off-by: Zhengxu Chen --- vllm/compilation/caching.py | 1 + 1 file changed, 1 insertion(+) diff --git a/vllm/compilation/caching.py b/vllm/compilation/caching.py index ce482572b401..fc02a08f7426 100644 --- a/vllm/compilation/caching.py +++ b/vllm/compilation/caching.py @@ -104,6 +104,7 @@ def deserialize_compile_artifacts(cls, data: bytes) -> "VllmSerializableFunction state = pickle.loads(data) fake_mode = FakeTensorMode(shape_env=ShapeEnv()) state["graph_module"] = GraphPickler.loads(state["graph_module"], fake_mode) + state["graph_module"].recompile() state["example_inputs"] = GraphPickler.loads(state["example_inputs"], fake_mode) vllm_backend = VllmBackend(get_current_vllm_config(), state["prefix"]) From 5b17c00c2ce7750888ab98dc20b144a08cdb74e9 Mon Sep 17 00:00:00 2001 From: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Date: Wed, 17 Dec 2025 05:00:35 -0500 Subject: [PATCH 56/73] [Bug] Fix AttributeError: 'ColumnParallelLinear' object has no attribute `weight_scale_inv` (#30823) Signed-off-by: yewentao256 --- vllm/model_executor/layers/quantization/utils/fp8_utils.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/vllm/model_executor/layers/quantization/utils/fp8_utils.py b/vllm/model_executor/layers/quantization/utils/fp8_utils.py index ea6874558516..bdc3d1fc7232 100644 --- a/vllm/model_executor/layers/quantization/utils/fp8_utils.py +++ b/vllm/model_executor/layers/quantization/utils/fp8_utils.py @@ -1437,14 +1437,17 @@ def maybe_post_process_fp8_weight_block(layer: torch.nn.Module): layer.orig_dtype, layer.weight ) if should_use_deepgemm: + scale_attr = ( + "weight_scale_inv" if hasattr(layer, "weight_scale_inv") else "weight_scale" + ) dg_weight, dg_weight_scale = deepgemm_post_process_fp8_weight_block( wq=layer.weight.data, - ws=layer.weight_scale_inv.data, + ws=getattr(layer, scale_attr).data, quant_block_shape=tuple(layer.weight_block_size), use_e8m0=is_deep_gemm_e8m0_used(), ) replace_parameter(layer, "weight", dg_weight) - replace_parameter(layer, "weight_scale_inv", dg_weight_scale) + replace_parameter(layer, scale_attr, dg_weight_scale) def expert_weight_is_col_major(x: torch.Tensor) -> bool: From 55e260a9e7ed5a043ab3474060fa97113e72b530 Mon Sep 17 00:00:00 2001 From: Chauncey Date: Wed, 17 Dec 2025 18:27:30 +0800 Subject: [PATCH 57/73] [Refactor] [4/N] Move VLLM_SERVER_DEV endpoints into the serve directory (#30749) Signed-off-by: chaunceyjiang --- .../scripts/hardware_ci/run-amd-test.sh | 1 - .buildkite/test-amd.yaml | 37 ++++--- .buildkite/test-pipeline.yaml | 34 ++++--- .buildkite/test_areas/entrypoints.yaml | 23 ++++- .buildkite/test_areas/tool_use.yaml | 13 --- tests/entrypoints/instrumentator/__init__.py | 0 .../test_metrics.py | 5 +- tests/entrypoints/rpc/__init__.py | 0 .../{openai => rpc}/test_collective_rpc.py | 2 +- tests/entrypoints/sleep/__init__.py | 0 .../{openai => sleep}/test_sleep.py | 2 +- vllm/entrypoints/openai/api_server.py | 98 +------------------ vllm/entrypoints/serve/__init__.py | 29 ++++++ vllm/entrypoints/serve/cache/__init__.py | 0 vllm/entrypoints/serve/cache/api_router.py | 61 ++++++++++++ .../serve/instrumentator/server_info.py | 40 ++++++++ vllm/entrypoints/serve/rpc/__init__.py | 0 vllm/entrypoints/serve/rpc/api_router.py | 61 ++++++++++++ vllm/entrypoints/serve/sleep/api_router.py | 4 - 19 files changed, 259 insertions(+), 151 deletions(-) delete mode 100644 .buildkite/test_areas/tool_use.yaml create mode 100644 tests/entrypoints/instrumentator/__init__.py rename tests/entrypoints/{openai => instrumentator}/test_metrics.py (99%) create mode 100644 tests/entrypoints/rpc/__init__.py rename tests/entrypoints/{openai => rpc}/test_collective_rpc.py (96%) create mode 100644 tests/entrypoints/sleep/__init__.py rename tests/entrypoints/{openai => sleep}/test_sleep.py (98%) create mode 100644 vllm/entrypoints/serve/cache/__init__.py create mode 100644 vllm/entrypoints/serve/cache/api_router.py create mode 100644 vllm/entrypoints/serve/instrumentator/server_info.py create mode 100644 vllm/entrypoints/serve/rpc/__init__.py create mode 100644 vllm/entrypoints/serve/rpc/api_router.py diff --git a/.buildkite/scripts/hardware_ci/run-amd-test.sh b/.buildkite/scripts/hardware_ci/run-amd-test.sh index 864eb470bb0a..08da34d81d11 100755 --- a/.buildkite/scripts/hardware_ci/run-amd-test.sh +++ b/.buildkite/scripts/hardware_ci/run-amd-test.sh @@ -141,7 +141,6 @@ if [[ $commands == *" entrypoints/openai "* ]]; then --ignore=entrypoints/openai/test_audio.py \ --ignore=entrypoints/openai/test_shutdown.py \ --ignore=entrypoints/openai/test_completion.py \ - --ignore=entrypoints/openai/test_sleep.py \ --ignore=entrypoints/openai/test_models.py \ --ignore=entrypoints/openai/test_lora_adapters.py \ --ignore=entrypoints/openai/test_return_tokens_as_ids.py \ diff --git a/.buildkite/test-amd.yaml b/.buildkite/test-amd.yaml index 3c9b8cbedcf0..e8f99100a8de 100644 --- a/.buildkite/test-amd.yaml +++ b/.buildkite/test-amd.yaml @@ -128,7 +128,7 @@ steps: - tests/entrypoints/ commands: - pytest -v -s entrypoints/openai/tool_parsers - - pytest -v -s entrypoints/ --ignore=entrypoints/llm --ignore=entrypoints/openai --ignore=entrypoints/offline_mode --ignore=entrypoints/test_chat_utils.py --ignore=entrypoints/pooling + - pytest -v -s entrypoints/ --ignore=entrypoints/llm --ignore=entrypoints/openai --ignore=entrypoints/rpc --ignore=entrypoints/sleep --ignore=entrypoints/instrumentator --ignore=entrypoints/offline_mode --ignore=entrypoints/test_chat_utils.py --ignore=entrypoints/pooling - label: Entrypoints Integration Test (LLM) # 30min timeout_in_minutes: 40 @@ -148,7 +148,7 @@ steps: - pytest -v -s entrypoints/llm/test_generate.py # it needs a clean process - pytest -v -s entrypoints/offline_mode # Needs to avoid interference with other tests -- label: Entrypoints Integration Test (API Server) # 100min +- label: Entrypoints Integration Test (API Server 1) # 100min timeout_in_minutes: 130 mirror_hardwares: [amdexperimental] agent_pool: mi325_1 @@ -162,10 +162,28 @@ steps: - tests/entrypoints/test_chat_utils commands: - export VLLM_WORKER_MULTIPROC_METHOD=spawn - - PYTHONPATH=/vllm-workspace pytest -v -s entrypoints/openai/test_collective_rpc.py # PYTHONPATH is needed to import custom Worker extension - - pytest -v -s entrypoints/openai --ignore=entrypoints/openai/test_chat_with_tool_reasoning.py --ignore=entrypoints/openai/test_oot_registration.py --ignore=entrypoints/openai/test_tensorizer_entrypoint.py --ignore=entrypoints/openai/correctness/ --ignore=entrypoints/openai/test_collective_rpc.py --ignore=entrypoints/openai/tool_parsers/ + - pytest -v -s entrypoints/openai --ignore=entrypoints/openai/test_chat_with_tool_reasoning.py --ignore=entrypoints/openai/test_oot_registration.py --ignore=entrypoints/openai/test_tensorizer_entrypoint.py --ignore=entrypoints/openai/correctness/ --ignore=entrypoints/openai/tool_parsers/ - pytest -v -s entrypoints/test_chat_utils.py +- label: Entrypoints Integration Test (API Server 2) + timeout_in_minutes: 50 + mirror_hardwares: [amdexperimental] + agent_pool: mi325_1 + # grade: Blocking + working_dir: "/vllm-workspace/tests" + fast_check: true + torch_nightly: true + source_file_dependencies: + - vllm/ + - tests/entrypoints/sleep + - tests/entrypoints/rpc + - tests/tool_use + commands: + - export VLLM_WORKER_MULTIPROC_METHOD=spawn + - pytest -v -s entrypoints/sleep + - pytest -v -s tool_use + - PYTHONPATH=/vllm-workspace pytest -v -s entrypoints/rpc + - label: Entrypoints Integration Test (Pooling) timeout_in_minutes: 50 mirror_hardwares: [amdexperimental] @@ -751,17 +769,6 @@ steps: # Transcription WER check is skipped because encoder-decoder models are not supported on ROCm, see https://github.com/vllm-project/vllm/issues/27442 - pytest -s entrypoints/openai/correctness/ -- label: OpenAI-Compatible Tool Use # 23 min - timeout_in_minutes: 35 - mirror_hardwares: [amdexperimental, amdproduction] - agent_pool: mi325_1 - # grade: Blocking - fast_check: false - source_file_dependencies: - - vllm/ - - tests/tool_use - commands: - - pytest -v -s tool_use ##### models test ##### diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index 8e6d32f71f22..b4de630b0941 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -114,7 +114,7 @@ steps: - tests/entrypoints/ commands: - pytest -v -s entrypoints/openai/tool_parsers - - pytest -v -s entrypoints/ --ignore=entrypoints/llm --ignore=entrypoints/openai --ignore=entrypoints/offline_mode --ignore=entrypoints/test_chat_utils.py --ignore=entrypoints/pooling + - pytest -v -s entrypoints/ --ignore=entrypoints/llm --ignore=entrypoints/rpc --ignore=entrypoints/sleep --ignore=entrypoints/instrumentator --ignore=entrypoints/openai --ignore=entrypoints/offline_mode --ignore=entrypoints/test_chat_utils.py --ignore=entrypoints/pooling - label: Entrypoints Integration Test (LLM) # 30min timeout_in_minutes: 40 @@ -132,7 +132,7 @@ steps: - pytest -v -s entrypoints/llm/test_generate.py # it needs a clean process - pytest -v -s entrypoints/offline_mode # Needs to avoid interference with other tests -- label: Entrypoints Integration Test (API Server) # 100min +- label: Entrypoints Integration Test (API Server 1) # 100min timeout_in_minutes: 130 mirror_hardwares: [amdexperimental] working_dir: "/vllm-workspace/tests" @@ -144,10 +144,26 @@ steps: - tests/entrypoints/test_chat_utils commands: - export VLLM_WORKER_MULTIPROC_METHOD=spawn - - PYTHONPATH=/vllm-workspace pytest -v -s entrypoints/openai/test_collective_rpc.py # PYTHONPATH is needed to import custom Worker extension - - pytest -v -s entrypoints/openai --ignore=entrypoints/openai/test_chat_with_tool_reasoning.py --ignore=entrypoints/openai/test_oot_registration.py --ignore=entrypoints/openai/test_tensorizer_entrypoint.py --ignore=entrypoints/openai/correctness/ --ignore=entrypoints/openai/test_collective_rpc.py --ignore=entrypoints/openai/tool_parsers/ + - pytest -v -s entrypoints/openai --ignore=entrypoints/openai/test_chat_with_tool_reasoning.py --ignore=entrypoints/openai/test_oot_registration.py --ignore=entrypoints/openai/test_tensorizer_entrypoint.py --ignore=entrypoints/openai/correctness/ --ignore=entrypoints/openai/tool_parsers/ - pytest -v -s entrypoints/test_chat_utils.py +- label: Entrypoints Integration Test (API Server 2) + timeout_in_minutes: 50 + mirror_hardwares: [amdexperimental] + working_dir: "/vllm-workspace/tests" + fast_check: true + torch_nightly: true + source_file_dependencies: + - vllm/ + - tests/entrypoints/sleep + - tests/entrypoints/rpc + - tests/tool_use + commands: + - export VLLM_WORKER_MULTIPROC_METHOD=spawn + - pytest -v -s entrypoints/sleep + - PYTHONPATH=/vllm-workspace pytest -v -s entrypoints/rpc + - pytest -v -s tool_use + - label: Entrypoints Integration Test (Pooling) timeout_in_minutes: 50 mirror_hardwares: [amdexperimental] @@ -666,16 +682,6 @@ steps: commands: # LMEval+Transcription WER check - pytest -s entrypoints/openai/correctness/ -- label: OpenAI-Compatible Tool Use # 23 min - timeout_in_minutes: 35 - mirror_hardwares: [amdexperimental] - fast_check: false - source_file_dependencies: - - vllm/ - - tests/tool_use - commands: - - pytest -v -s tool_use - ##### models test ##### - label: Basic Models Tests (Initialization) diff --git a/.buildkite/test_areas/entrypoints.yaml b/.buildkite/test_areas/entrypoints.yaml index 0a789be943f3..5b16ea9c1ad0 100644 --- a/.buildkite/test_areas/entrypoints.yaml +++ b/.buildkite/test_areas/entrypoints.yaml @@ -10,7 +10,7 @@ steps: - tests/entrypoints/ commands: - pytest -v -s entrypoints/openai/tool_parsers - - pytest -v -s entrypoints/ --ignore=entrypoints/llm --ignore=entrypoints/openai --ignore=entrypoints/offline_mode --ignore=entrypoints/test_chat_utils.py --ignore=entrypoints/pooling + - pytest -v -s entrypoints/ --ignore=entrypoints/llm --ignore=entrypoints/rpc --ignore=entrypoints/sleep --ignore=entrypoints/instrumentator --ignore=entrypoints/openai --ignore=entrypoints/offline_mode --ignore=entrypoints/test_chat_utils.py --ignore=entrypoints/pooling - label: Entrypoints Integration (LLM) timeout_in_minutes: 40 @@ -25,7 +25,7 @@ steps: - pytest -v -s entrypoints/llm/test_generate.py # it needs a clean process - pytest -v -s entrypoints/offline_mode # Needs to avoid interference with other tests -- label: Entrypoints Integration (API Server) +- label: Entrypoints Integration (API Server 1) timeout_in_minutes: 130 working_dir: "/vllm-workspace/tests" source_file_dependencies: @@ -34,11 +34,26 @@ steps: - tests/entrypoints/test_chat_utils commands: - export VLLM_WORKER_MULTIPROC_METHOD=spawn - - PYTHONPATH=/vllm-workspace pytest -v -s entrypoints/openai/test_collective_rpc.py # PYTHONPATH is needed to import custom Worker extension - - pytest -v -s entrypoints/openai --ignore=entrypoints/openai/test_chat_with_tool_reasoning.py --ignore=entrypoints/openai/test_oot_registration.py --ignore=entrypoints/openai/test_tensorizer_entrypoint.py --ignore=entrypoints/openai/correctness/ --ignore=entrypoints/openai/test_collective_rpc.py --ignore=entrypoints/openai/tool_parsers/ + - pytest -v -s entrypoints/openai --ignore=entrypoints/openai/test_chat_with_tool_reasoning.py --ignore=entrypoints/openai/test_oot_registration.py --ignore=entrypoints/openai/test_tensorizer_entrypoint.py --ignore=entrypoints/openai/correctness/ --ignore=entrypoints/openai/tool_parsers/ - pytest -v -s entrypoints/test_chat_utils.py +- label: Entrypoints Integration (API Server 2) + timeout_in_minutes: 130 + working_dir: "/vllm-workspace/tests" + source_file_dependencies: + - vllm/ + - tests/tool_use + - tests/entrypoints/sleep + - tests/entrypoints/instrumentator + - tests/entrypoints/rpc + commands: + - export VLLM_WORKER_MULTIPROC_METHOD=spawn + - PYTHONPATH=/vllm-workspace pytest -v -s entrypoints/rpc + - pytest -v -s entrypoints/instrumentator + - pytest -v -s entrypoints/sleep + - pytest -v -s tool_use + - label: Entrypoints Integration (Pooling) timeout_in_minutes: 50 working_dir: "/vllm-workspace/tests" diff --git a/.buildkite/test_areas/tool_use.yaml b/.buildkite/test_areas/tool_use.yaml deleted file mode 100644 index 69527a121422..000000000000 --- a/.buildkite/test_areas/tool_use.yaml +++ /dev/null @@ -1,13 +0,0 @@ -group: Tool use -depends_on: - - image-build -steps: -- label: OpenAI-Compatible Tool Use - timeout_in_minutes: 35 - mirror_hardwares: [amdexperimental] - fast_check: false - source_file_dependencies: - - vllm/ - - tests/tool_use - commands: - - pytest -v -s tool_use diff --git a/tests/entrypoints/instrumentator/__init__.py b/tests/entrypoints/instrumentator/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/entrypoints/openai/test_metrics.py b/tests/entrypoints/instrumentator/test_metrics.py similarity index 99% rename from tests/entrypoints/openai/test_metrics.py rename to tests/entrypoints/instrumentator/test_metrics.py index 65a6fd20bd0d..9f2ad105a380 100644 --- a/tests/entrypoints/openai/test_metrics.py +++ b/tests/entrypoints/instrumentator/test_metrics.py @@ -14,11 +14,10 @@ from prometheus_client.parser import text_string_to_metric_families from transformers import AutoTokenizer +from tests.conftest import LocalAssetServer +from tests.utils import RemoteOpenAIServer from vllm import version -from ...conftest import LocalAssetServer -from ...utils import RemoteOpenAIServer - MODELS = { "text": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "multimodal": "HuggingFaceTB/SmolVLM-256M-Instruct", diff --git a/tests/entrypoints/rpc/__init__.py b/tests/entrypoints/rpc/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/entrypoints/openai/test_collective_rpc.py b/tests/entrypoints/rpc/test_collective_rpc.py similarity index 96% rename from tests/entrypoints/openai/test_collective_rpc.py rename to tests/entrypoints/rpc/test_collective_rpc.py index cbd6b02f05dc..56d93a427315 100644 --- a/tests/entrypoints/openai/test_collective_rpc.py +++ b/tests/entrypoints/rpc/test_collective_rpc.py @@ -37,7 +37,7 @@ def server(): "--max-num-seqs", "128", "--worker-extension-cls", - "tests.entrypoints.openai.test_collective_rpc.TestWorkerExtension", + "tests.entrypoints.rpc.test_collective_rpc.TestWorkerExtension", ] with RemoteOpenAIServer( MODEL_NAME, diff --git a/tests/entrypoints/sleep/__init__.py b/tests/entrypoints/sleep/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/entrypoints/openai/test_sleep.py b/tests/entrypoints/sleep/test_sleep.py similarity index 98% rename from tests/entrypoints/openai/test_sleep.py rename to tests/entrypoints/sleep/test_sleep.py index 5f94ac6da2c2..260dcd00bae9 100644 --- a/tests/entrypoints/openai/test_sleep.py +++ b/tests/entrypoints/sleep/test_sleep.py @@ -4,7 +4,7 @@ import requests from prometheus_client.parser import text_string_to_metric_families -from ...utils import RemoteOpenAIServer +from tests.utils import RemoteOpenAIServer MODEL_NAME = "meta-llama/Llama-3.2-1B" diff --git a/vllm/entrypoints/openai/api_server.py b/vllm/entrypoints/openai/api_server.py index 5d0eacae34dd..bca9571e3934 100644 --- a/vllm/entrypoints/openai/api_server.py +++ b/vllm/entrypoints/openai/api_server.py @@ -17,21 +17,20 @@ from collections.abc import AsyncGenerator, AsyncIterator, Awaitable from contextlib import asynccontextmanager from http import HTTPStatus -from typing import Annotated, Any, Literal +from typing import Annotated, Any import model_hosting_container_standards.sagemaker as sagemaker_standards import pydantic import uvloop -from fastapi import APIRouter, Depends, FastAPI, Form, HTTPException, Query, Request +from fastapi import APIRouter, Depends, FastAPI, Form, HTTPException, Request from fastapi.exceptions import RequestValidationError from fastapi.middleware.cors import CORSMiddleware -from fastapi.responses import JSONResponse, Response, StreamingResponse +from fastapi.responses import JSONResponse, StreamingResponse from starlette.concurrency import iterate_in_threadpool from starlette.datastructures import URL, Headers, MutableHeaders, State from starlette.types import ASGIApp, Message, Receive, Scope, Send import vllm.envs as envs -from vllm.config import VllmConfig from vllm.engine.arg_utils import AsyncEngineArgs from vllm.engine.protocol import EngineClient from vllm.entrypoints.anthropic.protocol import ( @@ -639,97 +638,6 @@ async def create_translations( return StreamingResponse(content=generator, media_type="text/event-stream") -if envs.VLLM_SERVER_DEV_MODE: - logger.warning( - "SECURITY WARNING: Development endpoints are enabled! " - "This should NOT be used in production!" - ) - - PydanticVllmConfig = pydantic.TypeAdapter(VllmConfig) - - @router.get("/server_info") - async def show_server_info( - raw_request: Request, - config_format: Annotated[Literal["text", "json"], Query()] = "text", - ): - vllm_config: VllmConfig = raw_request.app.state.vllm_config - server_info = { - "vllm_config": str(vllm_config) - if config_format == "text" - else PydanticVllmConfig.dump_python(vllm_config, mode="json", fallback=str) - # fallback=str is needed to handle e.g. torch.dtype - } - return JSONResponse(content=server_info) - - @router.post("/reset_prefix_cache") - async def reset_prefix_cache( - raw_request: Request, - reset_running_requests: bool = Query(default=False), - reset_external: bool = Query(default=False), - ): - """ - Reset the local prefix cache. - - Optionally, if the query parameter `reset_external=true` - also resets the external (connector-managed) prefix cache. - - Note that we currently do not check if the prefix cache - is successfully reset in the API server. - - Example: - POST /reset_prefix_cache?reset_external=true - """ - logger.info("Resetting prefix cache...") - - await engine_client(raw_request).reset_prefix_cache( - reset_running_requests, reset_external - ) - return Response(status_code=200) - - @router.post("/reset_mm_cache") - async def reset_mm_cache(raw_request: Request): - """ - Reset the multi-modal cache. Note that we currently do not check if the - multi-modal cache is successfully reset in the API server. - """ - logger.info("Resetting multi-modal cache...") - await engine_client(raw_request).reset_mm_cache() - return Response(status_code=200) - - @router.post("/collective_rpc") - async def collective_rpc(raw_request: Request): - try: - body = await raw_request.json() - except json.JSONDecodeError as e: - raise HTTPException( - status_code=HTTPStatus.BAD_REQUEST.value, - detail=f"JSON decode error: {e}", - ) from e - method = body.get("method") - if method is None: - raise HTTPException( - status_code=HTTPStatus.BAD_REQUEST.value, - detail="Missing 'method' in request body", - ) - # For security reason, only serialized string args/kwargs are passed. - # User-defined `method` is responsible for deserialization if needed. - args: list[str] = body.get("args", []) - kwargs: dict[str, str] = body.get("kwargs", {}) - timeout: float | None = body.get("timeout") - results = await engine_client(raw_request).collective_rpc( - method=method, timeout=timeout, args=tuple(args), kwargs=kwargs - ) - if results is None: - return Response(status_code=200) - response: list[Any] = [] - for result in results: - if result is None or isinstance(result, dict | list): - response.append(result) - else: - response.append(str(result)) - return JSONResponse(content={"results": response}) - - def load_log_config(log_config_file: str | None) -> dict | None: if not log_config_file: return None diff --git a/vllm/entrypoints/serve/__init__.py b/vllm/entrypoints/serve/__init__.py index c4fcc92db931..260fd44a02cc 100644 --- a/vllm/entrypoints/serve/__init__.py +++ b/vllm/entrypoints/serve/__init__.py @@ -4,8 +4,19 @@ from fastapi import FastAPI +import vllm.envs as envs +from vllm.logger import init_logger + +logger = init_logger(__name__) + def register_vllm_serve_api_routers(app: FastAPI): + if envs.VLLM_SERVER_DEV_MODE: + logger.warning( + "SECURITY WARNING: Development endpoints are enabled! " + "This should NOT be used in production!" + ) + from vllm.entrypoints.serve.lora.api_router import ( attach_router as attach_lora_router, ) @@ -29,6 +40,18 @@ def register_vllm_serve_api_routers(app: FastAPI): attach_sleep_router(app) + from vllm.entrypoints.serve.rpc.api_router import ( + attach_router as attach_rpc_router, + ) + + attach_rpc_router(app) + + from vllm.entrypoints.serve.cache.api_router import ( + attach_router as attach_cache_router, + ) + + attach_cache_router(app) + from vllm.entrypoints.serve.tokenize.api_router import ( attach_router as attach_tokenize_router, ) @@ -58,3 +81,9 @@ def register_vllm_serve_api_routers(app: FastAPI): ) attach_health_router(app) + + from vllm.entrypoints.serve.instrumentator.server_info import ( + attach_router as attach_server_info_router, + ) + + attach_server_info_router(app) diff --git a/vllm/entrypoints/serve/cache/__init__.py b/vllm/entrypoints/serve/cache/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/vllm/entrypoints/serve/cache/api_router.py b/vllm/entrypoints/serve/cache/api_router.py new file mode 100644 index 000000000000..d65989546327 --- /dev/null +++ b/vllm/entrypoints/serve/cache/api_router.py @@ -0,0 +1,61 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project + + +from fastapi import APIRouter, FastAPI, Query, Request +from fastapi.responses import Response + +import vllm.envs as envs +from vllm.engine.protocol import EngineClient +from vllm.logger import init_logger + +logger = init_logger(__name__) + +router = APIRouter() + + +def engine_client(request: Request) -> EngineClient: + return request.app.state.engine_client + + +@router.post("/reset_prefix_cache") +async def reset_prefix_cache( + raw_request: Request, + reset_running_requests: bool = Query(default=False), + reset_external: bool = Query(default=False), +): + """ + Reset the local prefix cache. + + Optionally, if the query parameter `reset_external=true` + also resets the external (connector-managed) prefix cache. + + Note that we currently do not check if the prefix cache + is successfully reset in the API server. + + Example: + POST /reset_prefix_cache?reset_external=true + """ + logger.info("Resetting prefix cache...") + + await engine_client(raw_request).reset_prefix_cache( + reset_running_requests, reset_external + ) + return Response(status_code=200) + + +@router.post("/reset_mm_cache") +async def reset_mm_cache(raw_request: Request): + """ + Reset the multi-modal cache. Note that we currently do not check if the + multi-modal cache is successfully reset in the API server. + """ + logger.info("Resetting multi-modal cache...") + await engine_client(raw_request).reset_mm_cache() + return Response(status_code=200) + + +def attach_router(app: FastAPI): + if not envs.VLLM_SERVER_DEV_MODE: + return + app.include_router(router) diff --git a/vllm/entrypoints/serve/instrumentator/server_info.py b/vllm/entrypoints/serve/instrumentator/server_info.py new file mode 100644 index 000000000000..1a69dfacae1c --- /dev/null +++ b/vllm/entrypoints/serve/instrumentator/server_info.py @@ -0,0 +1,40 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project + + +from typing import Annotated, Literal + +import pydantic +from fastapi import APIRouter, FastAPI, Query, Request +from fastapi.responses import JSONResponse + +import vllm.envs as envs +from vllm.config import VllmConfig +from vllm.logger import init_logger + +logger = init_logger(__name__) + + +router = APIRouter() +PydanticVllmConfig = pydantic.TypeAdapter(VllmConfig) + + +@router.get("/server_info") +async def show_server_info( + raw_request: Request, + config_format: Annotated[Literal["text", "json"], Query()] = "text", +): + vllm_config: VllmConfig = raw_request.app.state.vllm_config + server_info = { + "vllm_config": str(vllm_config) + if config_format == "text" + else PydanticVllmConfig.dump_python(vllm_config, mode="json", fallback=str) + # fallback=str is needed to handle e.g. torch.dtype + } + return JSONResponse(content=server_info) + + +def attach_router(app: FastAPI): + if not envs.VLLM_SERVER_DEV_MODE: + return + app.include_router(router) diff --git a/vllm/entrypoints/serve/rpc/__init__.py b/vllm/entrypoints/serve/rpc/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/vllm/entrypoints/serve/rpc/api_router.py b/vllm/entrypoints/serve/rpc/api_router.py new file mode 100644 index 000000000000..54f582c408d5 --- /dev/null +++ b/vllm/entrypoints/serve/rpc/api_router.py @@ -0,0 +1,61 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project + +import json +from http import HTTPStatus +from typing import Any + +from fastapi import APIRouter, FastAPI, HTTPException, Request +from fastapi.responses import JSONResponse, Response + +import vllm.envs as envs +from vllm.engine.protocol import EngineClient +from vllm.logger import init_logger + +logger = init_logger(__name__) + +router = APIRouter() + + +def engine_client(request: Request) -> EngineClient: + return request.app.state.engine_client + + +@router.post("/collective_rpc") +async def collective_rpc(raw_request: Request): + try: + body = await raw_request.json() + except json.JSONDecodeError as e: + raise HTTPException( + status_code=HTTPStatus.BAD_REQUEST.value, + detail=f"JSON decode error: {e}", + ) from e + method = body.get("method") + if method is None: + raise HTTPException( + status_code=HTTPStatus.BAD_REQUEST.value, + detail="Missing 'method' in request body", + ) + # For security reason, only serialized string args/kwargs are passed. + # User-defined `method` is responsible for deserialization if needed. + args: list[str] = body.get("args", []) + kwargs: dict[str, str] = body.get("kwargs", {}) + timeout: float | None = body.get("timeout") + results = await engine_client(raw_request).collective_rpc( + method=method, timeout=timeout, args=tuple(args), kwargs=kwargs + ) + if results is None: + return Response(status_code=200) + response: list[Any] = [] + for result in results: + if result is None or isinstance(result, dict | list): + response.append(result) + else: + response.append(str(result)) + return JSONResponse(content={"results": response}) + + +def attach_router(app: FastAPI): + if not envs.VLLM_SERVER_DEV_MODE: + return + app.include_router(router) diff --git a/vllm/entrypoints/serve/sleep/api_router.py b/vllm/entrypoints/serve/sleep/api_router.py index bc01e185315c..c0e4c3028b2e 100644 --- a/vllm/entrypoints/serve/sleep/api_router.py +++ b/vllm/entrypoints/serve/sleep/api_router.py @@ -52,9 +52,5 @@ async def is_sleeping(raw_request: Request): def attach_router(app: FastAPI): if not envs.VLLM_SERVER_DEV_MODE: return - logger.warning( - "SECURITY WARNING: Development endpoints are enabled! " - "This should NOT be used in production!" - ) app.include_router(router) From b0a5c93787fe095fde236b9523a7dfc92f962057 Mon Sep 17 00:00:00 2001 From: "Kevin H. Luu" Date: Wed, 17 Dec 2025 02:30:56 -0800 Subject: [PATCH 58/73] [ci] Sync test areas yaml file with test-pipeline (#30862) Signed-off-by: Kevin H. Luu --- .buildkite/test_areas/e2e_integration.yaml | 19 +------------------ .buildkite/test_areas/lm_eval.yaml | 4 ++-- .buildkite/test_areas/lora.yaml | 2 ++ .buildkite/test_areas/models_basic.yaml | 2 ++ .buildkite/test_areas/pytorch.yaml | 4 +++- 5 files changed, 10 insertions(+), 21 deletions(-) diff --git a/.buildkite/test_areas/e2e_integration.yaml b/.buildkite/test_areas/e2e_integration.yaml index 93d389815eda..2e0857986c3f 100644 --- a/.buildkite/test_areas/e2e_integration.yaml +++ b/.buildkite/test_areas/e2e_integration.yaml @@ -32,6 +32,7 @@ steps: - label: Prime-RL Integration (2 GPUs) timeout_in_minutes: 30 optional: true + soft_fail: true num_gpus: 2 working_dir: "/vllm-workspace" source_file_dependencies: @@ -39,21 +40,3 @@ steps: - .buildkite/scripts/run-prime-rl-test.sh commands: - bash .buildkite/scripts/run-prime-rl-test.sh - -- label: DeepSeek V2-Lite Async EPLB Accuracy - timeout_in_minutes: 60 - gpu: h100 - optional: true - num_gpus: 4 - working_dir: "/vllm-workspace" - commands: - - bash .buildkite/scripts/scheduled_integration_test/deepseek_v2_lite_ep_async_eplb.sh 0.25 1319 8030 - -- label: Qwen3-Next-80B-A3B-Instruct MTP Async EPLB Accuracy - timeout_in_minutes: 60 - gpu: h100 - optional: true - num_gpus: 4 - working_dir: "/vllm-workspace" - commands: - - bash .buildkite/scripts/scheduled_integration_test/qwen3_next_mtp_async_eplb.sh 0.8 1319 8040 diff --git a/.buildkite/test_areas/lm_eval.yaml b/.buildkite/test_areas/lm_eval.yaml index 9af43e0c375a..e2498512bdef 100644 --- a/.buildkite/test_areas/lm_eval.yaml +++ b/.buildkite/test_areas/lm_eval.yaml @@ -9,7 +9,7 @@ steps: - vllm/model_executor/layers/quantization autorun_on_main: true commands: - - pytest -s -v evals/gsm8k/test_gsm8k_correctness.py --config-list-file=configs/models-small.txt --tp-size=1 + - pytest -s -v evals/gsm8k/test_gsm8k_correctness.py --config-list-file=configs/models-small.txt - label: LM Eval Large Models (4 GPUs)(A100) gpu: a100 @@ -43,4 +43,4 @@ steps: - csrc/ - vllm/model_executor/layers/quantization commands: - - pytest -s -v evals/gsm8k/test_gsm8k_correctness.py --config-list-file=configs/models-blackwell.txt --tp-size=1 + - pytest -s -v evals/gsm8k/test_gsm8k_correctness.py --config-list-file=configs/models-blackwell.txt diff --git a/.buildkite/test_areas/lora.yaml b/.buildkite/test_areas/lora.yaml index 809b4138f44b..59ade40cc8f5 100644 --- a/.buildkite/test_areas/lora.yaml +++ b/.buildkite/test_areas/lora.yaml @@ -22,6 +22,8 @@ steps: # FIXIT: find out which code initialize cuda before running the test # before the fix, we need to use spawn to test it - export VLLM_WORKER_MULTIPROC_METHOD=spawn + # Alot of these tests are on the edge of OOMing + - export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True # There is some Tensor Parallelism related processing logic in LoRA that # requires multi-GPU testing for validation. - pytest -v -s -x lora/test_chatglm3_tp.py diff --git a/.buildkite/test_areas/models_basic.yaml b/.buildkite/test_areas/models_basic.yaml index 39a5d51c4883..2a86596a6d60 100644 --- a/.buildkite/test_areas/models_basic.yaml +++ b/.buildkite/test_areas/models_basic.yaml @@ -9,6 +9,7 @@ steps: source_file_dependencies: - vllm/ - tests/models/test_initialization.py + - tests/models/registry.py commands: # Run a subset of model initialization tests - pytest -v -s models/test_initialization.py::test_can_initialize_small_subset @@ -20,6 +21,7 @@ steps: source_file_dependencies: - vllm/model_executor/models/ - tests/models/test_initialization.py + - tests/models/registry.py commands: # Only when vLLM model source is modified - test initialization of a large # subset of supported models (the complement of the small subset in the above diff --git a/.buildkite/test_areas/pytorch.yaml b/.buildkite/test_areas/pytorch.yaml index 703c82eb1a91..332d5202d833 100644 --- a/.buildkite/test_areas/pytorch.yaml +++ b/.buildkite/test_areas/pytorch.yaml @@ -13,7 +13,9 @@ steps: # tests covered elsewhere. # Use `find` to launch multiple instances of pytest so that # they do not suffer from https://github.com/vllm-project/vllm/issues/28965 - - "find compile/ -maxdepth 1 -name 'test_*.py' -exec pytest -s -v {} \\;" + # However, find does not normally propagate error codes, so we combine it with xargs + # (using -0 for proper path handling) + - "find compile/ -maxdepth 1 -name 'test_*.py' -print0 | xargs -0 -n1 -I{} pytest -s -v '{}'" - label: PyTorch Fullgraph Smoke Test timeout_in_minutes: 30 From 0919e258dd867499a9cf21bec825b6cc5ed4acbc Mon Sep 17 00:00:00 2001 From: baoqian426 <1354987947@qq.com> Date: Wed, 17 Dec 2025 19:32:34 +0800 Subject: [PATCH 59/73] [Bugfix] deepseek-V3.2 self.weights_proj has no bias (#30841) Signed-off-by: baoqian <1354987947@qq.com> Signed-off-by: baoqian426 <1354987947@qq.com> --- vllm/model_executor/models/deepseek_v2.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/vllm/model_executor/models/deepseek_v2.py b/vllm/model_executor/models/deepseek_v2.py index 146124153c79..6670143cda25 100644 --- a/vllm/model_executor/models/deepseek_v2.py +++ b/vllm/model_executor/models/deepseek_v2.py @@ -835,7 +835,11 @@ def __init__( ) self.k_norm = LayerNorm(self.head_dim, eps=1e-6) self.weights_proj = ReplicatedLinear( - hidden_size, self.n_head, quant_config=None, prefix=f"{prefix}.weights_proj" + hidden_size, + self.n_head, + bias=False, + quant_config=None, + prefix=f"{prefix}.weights_proj", ) self.softmax_scale = self.head_dim**-0.5 From e59ce3b35e96e07b64d6bf396f3a7b9e9cf13f23 Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Wed, 17 Dec 2025 11:33:50 +0000 Subject: [PATCH 60/73] Fix lazy import (#30858) Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- vllm/v1/structured_output/utils.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/vllm/v1/structured_output/utils.py b/vllm/v1/structured_output/utils.py index cb5ad99cfbdf..74df0fa06767 100644 --- a/vllm/v1/structured_output/utils.py +++ b/vllm/v1/structured_output/utils.py @@ -20,9 +20,9 @@ if TYPE_CHECKING: import outlines_core as oc + import transformers.convert_slow_tokenizer as convert_slow_tokenizer import transformers.file_utils as file_utils import xgrammar as xgr - from transformers.convert_slow_tokenizer import bytes_to_unicode from vllm.tokenizers import TokenizerLike from vllm.v1.worker.gpu_input_batch import InputBatch @@ -30,8 +30,8 @@ xgr = LazyLoader("xgr", globals(), "xgrammar") oc = LazyLoader("oc", globals(), "outlines_core") file_utils = LazyLoader("file_utils", globals(), "transformers.file_utils") - bytes_to_unicode = LazyLoader( - "bytes_to_unicode", globals(), "transformers.convert_slow_tokenizer" + convert_slow_tokenizer = LazyLoader( + "convert_slow_tokenizer", globals(), "transformers.convert_slow_tokenizer" ) TokenizerLike = object @@ -202,7 +202,9 @@ def _reduced_vocabulary( A Dict of token string -> equivalent token ids """ - unicode_to_bytes = {v: k for k, v in bytes_to_unicode().items()} + unicode_to_bytes = { + v: k for k, v in convert_slow_tokenizer.bytes_to_unicode().items() + } def convert_token_to_string(token: str) -> str: string = tokenizer.convert_tokens_to_string([token]) From c8d9bd8b608d922406c33ad8df376baf6c790e8f Mon Sep 17 00:00:00 2001 From: Hank_ <37239608+ILikeIneine@users.noreply.github.com> Date: Wed, 17 Dec 2025 19:58:16 +0800 Subject: [PATCH 61/73] chores: adjust the attn register param order (#30688) Signed-off-by: Hank --- vllm/attention/backends/registry.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/attention/backends/registry.py b/vllm/attention/backends/registry.py index eaa0fa1d5db3..ed0021db204a 100644 --- a/vllm/attention/backends/registry.py +++ b/vllm/attention/backends/registry.py @@ -201,8 +201,8 @@ def clear_override(self) -> None: def register_backend( backend: AttentionBackendEnum | MambaAttentionBackendEnum, - is_mamba: bool = False, class_path: str | None = None, + is_mamba: bool = False, ) -> Callable[[type], type]: """Register or override a backend implementation. From 674ce4d9758e9f12840b75727521d2f26967c442 Mon Sep 17 00:00:00 2001 From: Jialin Ouyang Date: Wed, 17 Dec 2025 03:58:43 -0800 Subject: [PATCH 62/73] [Fix] uniform decode batch check (#30747) Signed-off-by: Jialin Ouyang --- tests/v1/worker/test_gpu_model_runner.py | 84 ++++++++++++++++++++++++ vllm/v1/worker/gpu_model_runner.py | 45 ++++++++++--- 2 files changed, 121 insertions(+), 8 deletions(-) diff --git a/tests/v1/worker/test_gpu_model_runner.py b/tests/v1/worker/test_gpu_model_runner.py index 7b8c4268a523..59f1ac705829 100644 --- a/tests/v1/worker/test_gpu_model_runner.py +++ b/tests/v1/worker/test_gpu_model_runner.py @@ -1110,3 +1110,87 @@ def test_hybrid_cache_integration(model_runner, dist_init): runner._update_states(scheduler_output) assert _is_req_scheduled(runner, req_id) assert _is_req_state_block_table_match(runner, req_id) + + +def test_is_uniform_decode() -> None: + # Normal + assert GPUModelRunner._is_uniform_decode( + max_num_scheduled_tokens=1, + uniform_decode_query_len=1, + num_tokens=16, + num_reqs=16, + ) + assert not GPUModelRunner._is_uniform_decode( + max_num_scheduled_tokens=2, + uniform_decode_query_len=1, + num_tokens=16, + num_reqs=16, + ) + assert not GPUModelRunner._is_uniform_decode( + max_num_scheduled_tokens=1, + uniform_decode_query_len=1, + num_tokens=16, + num_reqs=15, + ) + # Spec decoding + assert GPUModelRunner._is_uniform_decode( + max_num_scheduled_tokens=5, + uniform_decode_query_len=5, + num_tokens=30, + num_reqs=6, + ) + assert not GPUModelRunner._is_uniform_decode( + max_num_scheduled_tokens=5, + uniform_decode_query_len=4, + num_tokens=30, + num_reqs=6, + ) + assert not GPUModelRunner._is_uniform_decode( + max_num_scheduled_tokens=5, + uniform_decode_query_len=5, + num_tokens=30, + num_reqs=7, + ) + # Force uniform decode + assert GPUModelRunner._is_uniform_decode( + max_num_scheduled_tokens=1, + uniform_decode_query_len=1, + num_tokens=16, + num_reqs=16, + force_uniform_decode=True, + ) + assert GPUModelRunner._is_uniform_decode( + max_num_scheduled_tokens=2, + uniform_decode_query_len=1, + num_tokens=16, + num_reqs=16, + force_uniform_decode=True, + ) + assert GPUModelRunner._is_uniform_decode( + max_num_scheduled_tokens=1, + uniform_decode_query_len=1, + num_tokens=16, + num_reqs=15, + force_uniform_decode=True, + ) + assert not GPUModelRunner._is_uniform_decode( + max_num_scheduled_tokens=1, + uniform_decode_query_len=1, + num_tokens=16, + num_reqs=16, + force_uniform_decode=False, + ) + assert not GPUModelRunner._is_uniform_decode( + max_num_scheduled_tokens=2, + uniform_decode_query_len=1, + num_tokens=16, + num_reqs=16, + force_uniform_decode=False, + ) + assert not GPUModelRunner._is_uniform_decode( + max_num_scheduled_tokens=1, + uniform_decode_query_len=1, + num_tokens=16, + num_reqs=15, + force_uniform_decode=False, + ) diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index 1db5bc99fff6..a44150432434 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -2777,6 +2777,27 @@ def _model_forward( **model_kwargs, ) + @staticmethod + def _is_uniform_decode( + max_num_scheduled_tokens: int, + uniform_decode_query_len: int, + num_tokens: int, + num_reqs: int, + force_uniform_decode: bool | None = None, + ) -> bool: + """ + Checks if it's a decode batch with same amount scheduled tokens + across all requests. + """ + return ( + ( + (max_num_scheduled_tokens == uniform_decode_query_len) + and (num_tokens == max_num_scheduled_tokens * num_reqs) + ) + if force_uniform_decode is None + else force_uniform_decode + ) + def _determine_batch_execution_and_padding( self, num_tokens: int, @@ -2798,14 +2819,12 @@ def _determine_batch_execution_and_padding( torch.Tensor | None, CUDAGraphStat | None, ]: - num_tokens_padded = self._pad_for_sequence_parallelism(num_tokens) - uniform_decode = ( - ( - (max_num_scheduled_tokens == self.uniform_decode_query_len) - and (num_tokens_padded == max_num_scheduled_tokens * num_reqs) - ) - if force_uniform_decode is None - else force_uniform_decode + uniform_decode = self._is_uniform_decode( + max_num_scheduled_tokens=max_num_scheduled_tokens, + uniform_decode_query_len=self.uniform_decode_query_len, + num_tokens=num_tokens, + num_reqs=num_reqs, + force_uniform_decode=force_uniform_decode, ) # Encoder-decoder models only support CG for decoder_step > 0 (no enc_output # is present). Also, chunked-prefill is disabled, so batch are uniform. @@ -2819,6 +2838,7 @@ def _determine_batch_execution_and_padding( else force_has_lora ) + num_tokens_padded = self._pad_for_sequence_parallelism(num_tokens) dispatch_cudagraph = ( lambda num_tokens, disable_full: self.cudagraph_dispatcher.dispatch( num_tokens=num_tokens, @@ -2834,6 +2854,15 @@ def _determine_batch_execution_and_padding( num_tokens_padded, use_cascade_attn or has_encoder_output ) num_tokens_padded = batch_descriptor.num_tokens + if self.compilation_config.pass_config.enable_sp: + assert ( + batch_descriptor.num_tokens + % self.vllm_config.parallel_config.tensor_parallel_size + == 0 + ), ( + "Sequence parallelism requires num_tokens to be " + "a multiple of tensor parallel size" + ) # Extra coordination when running data-parallel since we need to coordinate # across ranks From 2069fe8e6968148075ade0c92c240a768ec5eede Mon Sep 17 00:00:00 2001 From: "rongfu.leng" Date: Wed, 17 Dec 2025 20:14:45 +0800 Subject: [PATCH 63/73] [Docs] fix function name (#30748) Signed-off-by: rongfu.leng --- docs/design/plugin_system.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/design/plugin_system.md b/docs/design/plugin_system.md index b0ca2dad23d5..0fd448c2153c 100644 --- a/docs/design/plugin_system.md +++ b/docs/design/plugin_system.md @@ -109,7 +109,7 @@ Every plugin has three parts: - `init_device`: This function is called to set up the device for the worker. - `initialize_cache`: This function is called to set cache config for the worker. - `load_model`: This function is called to load the model weights to device. - - `get_kv_cache_spaces`: This function is called to generate the kv cache spaces for the model. + - `get_kv_cache_spec`: This function is called to generate the kv cache spec for the model. - `determine_available_memory`: This function is called to profiles the peak memory usage of the model to determine how much memory can be used for KV cache without OOMs. - `initialize_from_config`: This function is called to allocate device KV cache with the specified kv_cache_config - `execute_model`: This function is called every step to inference the model. From 507e81644a2f68a7937714c9130a31c531f730d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=AB=98=E9=91=AB=E5=B4=A7?= <50285788+SongDI911@users.noreply.github.com> Date: Wed, 17 Dec 2025 23:10:59 +0800 Subject: [PATCH 64/73] Adapt the old parameter enable_thinking in chat_template_kwargs (#30852) Signed-off-by: xinsong.gao <1418762819@qq.com> Co-authored-by: Chauncey --- vllm/reasoning/deepseek_v3_reasoning_parser.py | 2 ++ vllm/tokenizers/deepseek_v32.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/vllm/reasoning/deepseek_v3_reasoning_parser.py b/vllm/reasoning/deepseek_v3_reasoning_parser.py index 6604f70badbc..4e6758586bf4 100644 --- a/vllm/reasoning/deepseek_v3_reasoning_parser.py +++ b/vllm/reasoning/deepseek_v3_reasoning_parser.py @@ -26,6 +26,8 @@ def __init__(self, tokenizer: PreTrainedTokenizerBase, *args, **kwargs): chat_kwargs = kwargs.pop("chat_template_kwargs", {}) or {} thinking = bool(chat_kwargs.pop("thinking", False)) + enable_thinking = bool(chat_kwargs.pop("enable_thinking", False)) + thinking = thinking or enable_thinking if thinking: self._parser = DeepSeekR1ReasoningParser(tokenizer, *args, **kwargs) diff --git a/vllm/tokenizers/deepseek_v32.py b/vllm/tokenizers/deepseek_v32.py index bf279a5cf67c..d519b61ddb76 100644 --- a/vllm/tokenizers/deepseek_v32.py +++ b/vllm/tokenizers/deepseek_v32.py @@ -50,6 +50,8 @@ def apply_chat_template( **kwargs, ) -> str | list[int]: thinking = kwargs.get("thinking", False) + enable_thinking = kwargs.get("enable_thinking", False) + thinking = thinking or enable_thinking thinking_mode = "thinking" if not thinking: thinking_mode = "chat" From a9c0e32bc09d80be08e0e390f70f5c1244cebdad Mon Sep 17 00:00:00 2001 From: KimHyemin <102578109+www-spam@users.noreply.github.com> Date: Thu, 18 Dec 2025 00:11:18 +0900 Subject: [PATCH 65/73] [Model] Gemma3: Support untied word embeddings (#30827) Signed-off-by: www-spam --- vllm/model_executor/models/gemma3.py | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/vllm/model_executor/models/gemma3.py b/vllm/model_executor/models/gemma3.py index 40f6d100c767..70f72b5cb9be 100644 --- a/vllm/model_executor/models/gemma3.py +++ b/vllm/model_executor/models/gemma3.py @@ -39,7 +39,10 @@ from vllm.model_executor.layers.logits_processor import LogitsProcessor from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.layers.rotary_embedding import get_rope -from vllm.model_executor.layers.vocab_parallel_embedding import VocabParallelEmbedding +from vllm.model_executor.layers.vocab_parallel_embedding import ( + ParallelLMHead, + VocabParallelEmbedding, +) from vllm.model_executor.model_loader.weight_utils import ( default_weight_loader, maybe_remap_kv_scale_name, @@ -532,12 +535,20 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): super().__init__() self.config = config - # currently all existing Gemma models have `tie_word_embeddings` enabled - assert config.tie_word_embeddings self.quant_config = quant_config self.model = Gemma3Model( vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model") ) + + self.lm_head = ParallelLMHead( + config.vocab_size, + config.hidden_size, + quant_config=quant_config, + prefix=maybe_prefix(prefix, "lm_head"), + ) + if config.tie_word_embeddings: + self.lm_head = self.lm_head.tie_weights(self.model.embed_tokens) + self.logits_processor = LogitsProcessor( config.vocab_size, soft_cap=config.final_logit_softcapping ) @@ -565,7 +576,7 @@ def compute_logits( self, hidden_states: torch.Tensor, ) -> torch.Tensor | None: - logits = self.logits_processor(self.model.embed_tokens, hidden_states) + logits = self.logits_processor(self.lm_head, hidden_states) return logits def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]: From d35b32ae07425d236b3040ad81ecb8ac3b9deb7b Mon Sep 17 00:00:00 2001 From: NickLucche Date: Thu, 27 Nov 2025 11:10:59 +0000 Subject: [PATCH 66/73] wip Signed-off-by: NickLucche --- .../kv_transfer/kv_connector/v1/base.py | 7 + .../kv_connector/v1/nixl_connector.py | 132 +++++++++++++++--- vllm/v1/core/kv_cache_manager.py | 4 + vllm/v1/core/sched/scheduler.py | 15 +- vllm/v1/kv_cache_interface.py | 2 +- 5 files changed, 140 insertions(+), 20 deletions(-) diff --git a/vllm/distributed/kv_transfer/kv_connector/v1/base.py b/vllm/distributed/kv_transfer/kv_connector/v1/base.py index c05e5485a835..74a0f8f830c9 100644 --- a/vllm/distributed/kv_transfer/kv_connector/v1/base.py +++ b/vllm/distributed/kv_transfer/kv_connector/v1/base.py @@ -105,6 +105,13 @@ def request_finished_all_groups( Optional KVTransferParams to be included in the request outputs returned by the engine. """ + # TODO FOR ASYNC USE THSI ONE + # theres issue about overallocating memory as if all layers weer not sliding windo + # shoudl I turn it on experimentally HMA? something like an env var for turning it on + # so that nixl doesnt turn it on by default + # and keep disabling HMA for all other connectors + # what model should I test with, gemma3? + raise NotImplementedError diff --git a/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py b/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py index fb4b8ac391af..81656b08fdc2 100644 --- a/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +++ b/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py @@ -24,6 +24,7 @@ from vllm.attention.selector import get_attn_backend from vllm.config import VllmConfig from vllm.distributed.kv_transfer.kv_connector.utils import TpKVTopology +from vllm.distributed.kv_transfer.kv_connector.v1 import SupportsHMA from vllm.distributed.kv_transfer.kv_connector.v1.base import ( CopyBlocksOp, KVConnectorBase_V1, @@ -204,7 +205,7 @@ def compute_nixl_compatibility_hash( @dataclass class RemoteMeta: - block_ids: list[int] + block_ids: tuple[list[int], ...] host: str port: int engine_id: str @@ -213,9 +214,9 @@ class RemoteMeta: @dataclass class ReqMeta: - local_block_ids: list[int] + local_block_ids: tuple[list[int], ...] # To be used when logical block size does not match the kernel block size - local_physical_block_ids: list[int] + local_physical_block_ids: tuple[list[int], ...] tp_size: int remote: RemoteMeta | None = None @@ -243,7 +244,7 @@ def _add_new_req( def add_new_req_to_save( self, request_id: ReqId, - local_block_ids: list[int], + local_block_ids: tuple[list[int], ...], kv_transfer_params: dict[str, Any], ): self.reqs_to_save[request_id] = self._add_new_req( @@ -267,7 +268,7 @@ def add_new_req_to_recv( self.reqs_to_recv[request_id] = req -class NixlConnector(KVConnectorBase_V1): +class NixlConnector(KVConnectorBase_V1, SupportsHMA): def __init__( self, vllm_config: VllmConfig, @@ -275,7 +276,9 @@ def __init__( kv_cache_config: Optional["KVCacheConfig"] = None, ): super().__init__(vllm_config, role, kv_cache_config) + print("NixlConnector init", kv_cache_config.kv_cache_groups, "\n", flush=True) + # USe config to figure out which layers have less blocks assert vllm_config.kv_transfer_config is not None assert vllm_config.kv_transfer_config.engine_id is not None self.engine_id: EngineId = vllm_config.kv_transfer_config.engine_id @@ -287,7 +290,9 @@ def __init__( self.connector_worker: NixlConnectorWorker | None = None elif role == KVConnectorRole.WORKER: self.connector_scheduler = None - self.connector_worker = NixlConnectorWorker(vllm_config, self.engine_id) + self.connector_worker = NixlConnectorWorker( + vllm_config, self.engine_id, kv_cache_config + ) ############################################################ # Class Methods @@ -346,6 +351,17 @@ def request_finished( assert self.connector_scheduler is not None return self.connector_scheduler.request_finished(request, block_ids) + def request_finished_all_groups( + self, + request: "Request", + block_ids: tuple[list[int], ...], + ) -> tuple[bool, dict[str, Any] | None]: + print( + f"request_finished_all_groups: {request.request_id}, {block_ids}", + flush=True, + ) + return self.connector_scheduler.request_finished(request, block_ids) + def set_xfer_handshake_metadata( self, metadata: dict[int, KVConnectorHandshakeMetadata] ) -> None: @@ -612,7 +628,9 @@ def get_num_new_matched_tokens( def update_state_after_alloc( self, request: "Request", blocks: "KVCacheBlocks", num_external_tokens: int ): + # TODO ALREADY OK WITH HMA BUT ITS SYNC ONLY SO NOT FOR US params = request.kv_transfer_params + print("update_state_after_alloc", params, "\n\n", flush=True) logger.debug( "NIXLConnector update_state_after_alloc: " "num_external_tokens=%s, kv_transfer_params=%s", @@ -630,13 +648,14 @@ def update_state_after_alloc( # prefilled blocks need to be saved to host memory before transfer. # save all blocks - block_ids = blocks.get_block_ids()[0] + block_ids = blocks.get_block_ids(allow_none=True) # TODO: skip the blocks that are already in the host xfer buffer. # Currently, the host xfer buffer block is 1-to-1 mapped to device # kv blocks, so host blocks won't be flushed as long as its device # block is not overwritten; and it will be safe to skip saving them # to host xfer buffer. - if block_ids: + if block_ids is not None: + # FIXME handle tuple of blocks here self._reqs_need_save[request.request_id] = (request, block_ids) elif params.get("do_remote_prefill"): if params.get("remote_block_ids"): @@ -652,11 +671,22 @@ def update_state_after_alloc( # If remote_blocks and num_external_tokens = 0, we have # a full prefix cache hit on the D worker. We need to call # send_notif in _read_blocks to free the memory on the P. + + # blocks that do not yet have a hash hence they're not full..? ok yeah + # these are the blocks that must be pulled (partial prefix cache hit) + # TODO sync with Chen on how prefix cache work with HMA + # FIXME remote <> local len blocks mismatch local_block_ids = ( - blocks.get_unhashed_block_ids() + blocks.get_unhashed_block_ids_all_groups() if num_external_tokens > 0 else [] ) + print( + f"update_state_after_alloc local_block_ids unhashed: {local_block_ids}\n", + flush=True, + ) + # ok so if num_external_tokens==0, we just record the request here but dont actually + # read from worker, just send_notif # Get unhashed blocks to pull from remote. self._reqs_need_recv[request.request_id] = ( request, @@ -707,13 +737,15 @@ def build_connector_meta( self._reqs_in_batch = set() self._reqs_not_processed = set() self._reqs_need_send = {} + if len(meta.reqs_to_recv) > 0: + print("build_connector_meta", meta.reqs_to_recv, "\n", flush=True) return meta def request_finished( self, request: "Request", - block_ids: list[int], + block_ids: list[int] | tuple[list[int], ...], ) -> tuple[bool, dict[str, Any] | None]: """ Once a request is finished, determine whether request blocks @@ -753,7 +785,12 @@ def request_finished( # TODO: check whether block_ids actually ever be 0. If not we could # remove the conditional below - delay_free_blocks = len(block_ids) > 0 + # FIXME + print(f"block_ids: {block_ids}\n\n", flush=True) + if isinstance(block_ids, tuple): + delay_free_blocks = any(len(group) > 0 for group in block_ids) + else: + delay_free_blocks = len(block_ids) > 0 if delay_free_blocks: # Prefill request on remote. It will be read from D upon completion @@ -782,7 +819,7 @@ def request_finished( class NixlConnectorWorker: """Implementation of Worker side methods""" - def __init__(self, vllm_config: VllmConfig, engine_id: str): + def __init__(self, vllm_config: VllmConfig, engine_id: str, kv_cache_config: Optional["KVCacheConfig"] = None): if NixlWrapper is None: logger.error("NIXL is not available") raise RuntimeError("NIXL is not available") @@ -792,6 +829,7 @@ def __init__(self, vllm_config: VllmConfig, engine_id: str): # Config. self.vllm_config = vllm_config self.block_size = vllm_config.cache_config.block_size + self.kv_cache_config = kv_cache_config if vllm_config.kv_transfer_config is None: raise ValueError("kv_transfer_config must be set for NixlConnector") @@ -1224,12 +1262,28 @@ def register_kv_caches(self, kv_caches: dict[str, torch.Tensor]): # Enable different block lengths for different layers when MLA is used. self.block_len_per_layer = list[int]() self.slot_size_per_layer = list[int]() # HD bytes in kv terms + print("SPLIT K AND V", split_k_and_v, "\n", flush=True) for layer_name, cache_or_caches in xfer_buffers.items(): + # These are actually already ~grouped at this point (2384263) + # model.layers.0.self_attn.attn, model.layers.2.self_attn.attn, + # model.layers.4.self_attn.attn, model.layers.6.self_attn.attn..) + # print(layer_name,"\n", flush=True) cache_list = cache_or_caches if split_k_and_v else [cache_or_caches] for cache in cache_list: + print("layer_name", layer_name, cache.shape, "\n", flush=True) base_addr = cache.data_ptr() if base_addr in seen_base_addresses: + # NOTE (NickLucche) HMA employs memory pooling to share tensors + # across groups. This results in skipping all tensors but the ones + # pointed to by group0. Also, generally we will have more blocks + # per tensor but fewer regions. + print( + "base_addr already in seen_base_addresses", + layer_name, + "\n", + flush=True, + ) continue kernel_block_size = cache.shape[block_size_position] @@ -1253,6 +1307,7 @@ def register_kv_caches(self, kv_caches: dict[str, torch.Tensor]): if tensor_size_bytes is None: tensor_size_bytes = curr_tensor_size_bytes self.num_blocks = cache.shape[0] + print("NUM OF BLOCKS", self.num_blocks, "\n", flush=True) assert cache.shape[0] == self.num_blocks, ( "All kv cache tensors must have the same number of blocks" @@ -1276,7 +1331,6 @@ def register_kv_caches(self, kv_caches: dict[str, torch.Tensor]): caches_data.append( (base_addr, curr_tensor_size_bytes, self.device_id, "") ) - logger.debug( "Different block lengths collected: %s", set(self.block_len_per_layer) ) @@ -1372,7 +1426,24 @@ def register_local_xfer_handler( data copy correctness. """ block_size_ratio = self.block_size // block_size + print( + "register_local_xfer_handler block_size_ratio", + block_size_ratio, + block_size, + self.block_size, + "\n", + flush=True, + ) + assert block_size_ratio == 1 blocks_data = [] + print( + "seen_base_addresses/num_blocks", + len(self.seen_base_addresses), + self.num_blocks * block_size_ratio, + "\n", + flush=True, + ) + print("kv topo", self.kv_topo.is_kv_layout_blocks_first, "\n", flush=True) for i, base_addr in enumerate(self.seen_base_addresses): # The new block_len is using prefill block_len; # and num_blocks is multiple with N @@ -1406,6 +1477,12 @@ def register_local_xfer_handler( ) descs = self.nixl_wrapper.get_xfer_descs(blocks_data, self.nixl_memory_type) + print( + "register_local_xfer_handler NUM OF descs", + len(blocks_data), + "\n", + flush=True, + ) # NIXL_INIT_AGENT to be used for preparations of local descs. return self.nixl_wrapper.prep_xfer_dlist("NIXL_INIT_AGENT", descs) @@ -1987,6 +2064,13 @@ def _read_blocks_for_req(self, req_id: str, meta: ReqMeta): meta.remote.engine_id, req_id, ) + print( + "read_blocks_for_req", + meta.local_physical_block_ids, + meta.remote_block_ids, + "\n", + flush=True, + ) self._read_blocks( request_id=req_id, dst_engine_id=meta.remote.engine_id, @@ -2089,6 +2173,7 @@ def _read_blocks( block_size_ratio=block_size_ratio, ) else: + # FIXME verify that we can remove this case # TODO(mgoin): remove this once we have hybrid memory allocator # Optimization for models with local attention (Llama 4) local_descs_list = [] @@ -2151,6 +2236,7 @@ def _read_blocks( ) # mark all (logical) blocks for this request as invalid if meta := self._recving_metadata.get(request_id): + # FIXME should mark blocks per group here too! self._invalid_block_ids.update(meta.local_block_ids) self.xfer_stats.record_failed_transfer() if handle is not None: @@ -2179,7 +2265,7 @@ def get_mapped_blocks(self, block_ids, block_size_ratio): def _get_block_descs_ids( self, engine_id: str, - block_ids: list[int], + block_ids: tuple[list[int], ...], layer_idx: int | None = None, block_size_ratio: float | None = None, ) -> np.ndarray: @@ -2202,18 +2288,29 @@ def _get_block_descs_ids( # Otherwise, we assume we have MLA and select i-th layer assert self.num_layers == self.num_regions region_ids = np.arange(layer_idx, layer_idx + 1) - + # NOTE (NickLucche) With HMA, every kv group has the same number of layers and + # layers from different groups share the same kv tensor. + # eg block_ids=[[1, 2], [3]]->blocks [1, 2] need to be read across all regions, + # same for [3], but group0-group1 blocks will always differ (different areas). + # Therefore we can just flatten the block_ids and compute the descs ids for all + # groups at once. + print("get_block_descs_ids", block_ids, "\n", flush=True) num_blocks = self.dst_num_blocks[engine_id] if block_size_ratio is not None: num_blocks = int(num_blocks * block_size_ratio) # Compute the desc ids for each block. region_ids = region_ids[:, None] - block_ids = np.array(block_ids)[None, :] + block_ids = np.concatenate(block_ids)[None, :] descs_ids = region_ids * num_blocks + block_ids + print( + "get_block_descs_ids num output", len(descs_ids.flatten()), "\n", flush=True + ) return descs_ids.flatten() - def _logical_to_kernel_block_ids(self, block_ids: list[int]) -> list[int]: + def _logical_to_kernel_block_ids( + self, block_ids: tuple[list[int], ...] + ) -> tuple[list[int], ...]: """ Convert logical block ids to kernel physical block ids. This is required when the logical block size (the one set by the user) @@ -2222,6 +2319,7 @@ def _logical_to_kernel_block_ids(self, block_ids: list[int]) -> list[int]: if self._physical_blocks_per_logical_kv_block == 1: # Noop when physical and logical block sizes are the same return block_ids + # FIXME should you just flatten the tuple here? Result should be the same block_ids_np = np.array(block_ids) block_arange = np.arange(0, self._physical_blocks_per_logical_kv_block).reshape( 1, -1 diff --git a/vllm/v1/core/kv_cache_manager.py b/vllm/v1/core/kv_cache_manager.py index dc08894e4e77..eb53600003f4 100644 --- a/vllm/v1/core/kv_cache_manager.py +++ b/vllm/v1/core/kv_cache_manager.py @@ -84,6 +84,10 @@ def get_unhashed_block_ids(self) -> list[int]: assert len(self.blocks) == 1, "Only one group is supported" return [block.block_id for block in self.blocks[0] if block.block_hash is None] + def get_unhashed_block_ids_all_groups(self) -> list[int]: + """Get block_ids of unhashed blocks from KVCacheBlocks instance.""" + return [[block.block_id for block in group if block.block_hash is None] for group in self.blocks] + def new_empty(self) -> "KVCacheBlocks": """ Creates a new KVCacheBlocks instance with no blocks. diff --git a/vllm/v1/core/sched/scheduler.py b/vllm/v1/core/sched/scheduler.py index 2ebda1588d44..ac6948027158 100644 --- a/vllm/v1/core/sched/scheduler.py +++ b/vllm/v1/core/sched/scheduler.py @@ -1572,6 +1572,7 @@ def _connector_finished( block_ids = self.kv_cache_manager.get_block_ids(request.request_id) + # TODO change to supports_hma if not isinstance(self.connector, SupportsHMA): # NOTE(Kuntai): We should deprecate this code path after we enforce # all connectors to support HMA. @@ -1612,13 +1613,23 @@ def _update_waiting_for_remote_kv(self, request: Request) -> bool: self.failed_recving_kv_req_ids.remove(request.request_id) else: # Now that the blocks are ready, actually cache them. - (block_ids,) = self.kv_cache_manager.get_block_ids(request.request_id) - num_computed_tokens = len(block_ids) * self.block_size + # FIXME this should only be changed if hma is enabled + # FIXME group with env var changes + # (block_ids,) = self.kv_cache_manager.get_block_ids(request.request_id) + block_ids = self.kv_cache_manager.get_block_ids(request.request_id) + # FIXME Same thing here, these are blocks across layers now!! + print("SCHEDULER block_ids", block_ids,"\n", flush=True) + # Get number of blocks on full attention layer, we can retrieve at most + # this many tokens + num_computed_tokens = max(len(group) for group in block_ids) * self.block_size # Handle the case where num request tokens less than one block. + print("SCHEDULER request.num_tokens", request.num_tokens,"\n", flush=True) + # FIXME I don't understand why we do this and not just req.num_tokens num_computed_tokens = min(num_computed_tokens, request.num_tokens) if num_computed_tokens == request.num_tokens: num_computed_tokens -= 1 # This will cache the blocks iff caching is enabled. + # FIXME I think this should be per-group..? self.kv_cache_manager.cache_blocks(request, num_computed_tokens) # Update the request state for scheduling. diff --git a/vllm/v1/kv_cache_interface.py b/vllm/v1/kv_cache_interface.py index 751862aa9c76..69e1e153ce3f 100644 --- a/vllm/v1/kv_cache_interface.py +++ b/vllm/v1/kv_cache_interface.py @@ -399,6 +399,6 @@ class KVCacheConfig: For models with only one type of attention, there is only one group that contains all layers. For models with multiple types of attention, there will be multiple groups, - see `_get_kv_cache_config_uniform_page_size` for more details. + see `_get_kv_cache_groups_uniform_page_size` for more details. """ kv_cache_groups: list[KVCacheGroupSpec] From 13d0e7c1d04b9c83f95a776bc099d367c4aa8851 Mon Sep 17 00:00:00 2001 From: NickLucche Date: Wed, 17 Dec 2025 14:11:31 -0500 Subject: [PATCH 67/73] wip Signed-off-by: NickLucche --- .../kv_transfer/kv_connector/v1/base.py | 7 ------- .../kv_connector/v1/nixl_connector.py | 20 +++++++++++-------- vllm/v1/core/kv_cache_manager.py | 3 ++- vllm/v1/core/sched/scheduler.py | 1 - 4 files changed, 14 insertions(+), 17 deletions(-) diff --git a/vllm/distributed/kv_transfer/kv_connector/v1/base.py b/vllm/distributed/kv_transfer/kv_connector/v1/base.py index 74a0f8f830c9..c05e5485a835 100644 --- a/vllm/distributed/kv_transfer/kv_connector/v1/base.py +++ b/vllm/distributed/kv_transfer/kv_connector/v1/base.py @@ -105,13 +105,6 @@ def request_finished_all_groups( Optional KVTransferParams to be included in the request outputs returned by the engine. """ - # TODO FOR ASYNC USE THSI ONE - # theres issue about overallocating memory as if all layers weer not sliding windo - # shoudl I turn it on experimentally HMA? something like an env var for turning it on - # so that nixl doesnt turn it on by default - # and keep disabling HMA for all other connectors - # what model should I test with, gemma3? - raise NotImplementedError diff --git a/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py b/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py index 81656b08fdc2..5067f7fe9bb9 100644 --- a/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +++ b/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py @@ -738,11 +738,14 @@ def build_connector_meta( self._reqs_not_processed = set() self._reqs_need_send = {} if len(meta.reqs_to_recv) > 0: + # FIXME ReqMeta(local_block_ids=[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 330], + # this looks wrong, you shouldnt write to 0..? or is it like just skip the transfer? print("build_connector_meta", meta.reqs_to_recv, "\n", flush=True) return meta - def request_finished( + def + ( self, request: "Request", block_ids: list[int] | tuple[list[int], ...], @@ -785,7 +788,6 @@ def request_finished( # TODO: check whether block_ids actually ever be 0. If not we could # remove the conditional below - # FIXME print(f"block_ids: {block_ids}\n\n", flush=True) if isinstance(block_ids, tuple): delay_free_blocks = any(len(group) > 0 for group in block_ids) @@ -803,6 +805,11 @@ def request_finished( self._reqs_need_send[request.request_id] = ( time.perf_counter() + envs.VLLM_NIXL_ABORT_REQUEST_TIMEOUT ) + # FIXME HMA will "pad" groups with fewer blocks with 0s (eg SWA ones). + # Here we un-pad blocks to send the actual remote blocks to be read. + # Actually, wait for https://github.com/vllm-project/vllm/pull/30166, on first + # scheduling step blocks are over-allocated as if all layers were FA. + block_ids = tuple([block for block in group if block != 0] for group in block_ids) return delay_free_blocks, dict( do_remote_prefill=True, @@ -1264,10 +1271,6 @@ def register_kv_caches(self, kv_caches: dict[str, torch.Tensor]): self.slot_size_per_layer = list[int]() # HD bytes in kv terms print("SPLIT K AND V", split_k_and_v, "\n", flush=True) for layer_name, cache_or_caches in xfer_buffers.items(): - # These are actually already ~grouped at this point (2384263) - # model.layers.0.self_attn.attn, model.layers.2.self_attn.attn, - # model.layers.4.self_attn.attn, model.layers.6.self_attn.attn..) - # print(layer_name,"\n", flush=True) cache_list = cache_or_caches if split_k_and_v else [cache_or_caches] for cache in cache_list: @@ -2065,9 +2068,9 @@ def _read_blocks_for_req(self, req_id: str, meta: ReqMeta): req_id, ) print( - "read_blocks_for_req", + "read_blocks_for_req local and REMOTE blocks", meta.local_physical_block_ids, - meta.remote_block_ids, + meta.remote.block_ids, "\n", flush=True, ) @@ -2209,6 +2212,7 @@ def _read_blocks( local_block_descs_ids = np.concatenate(local_descs_list) remote_block_descs_ids = np.concatenate(remote_descs_list) + # FIXME blows up here assert len(local_block_descs_ids) == len(remote_block_descs_ids) # Prepare transfer with Nixl. diff --git a/vllm/v1/core/kv_cache_manager.py b/vllm/v1/core/kv_cache_manager.py index eb53600003f4..a175ce0d6d3f 100644 --- a/vllm/v1/core/kv_cache_manager.py +++ b/vllm/v1/core/kv_cache_manager.py @@ -86,7 +86,8 @@ def get_unhashed_block_ids(self) -> list[int]: def get_unhashed_block_ids_all_groups(self) -> list[int]: """Get block_ids of unhashed blocks from KVCacheBlocks instance.""" - return [[block.block_id for block in group if block.block_hash is None] for group in self.blocks] + # Skip padding blocks. + return [[block.block_id for block in group if block.block_hash is None and block.block_id != 0] for group in self.blocks] def new_empty(self) -> "KVCacheBlocks": """ diff --git a/vllm/v1/core/sched/scheduler.py b/vllm/v1/core/sched/scheduler.py index ac6948027158..0ba5a53a0872 100644 --- a/vllm/v1/core/sched/scheduler.py +++ b/vllm/v1/core/sched/scheduler.py @@ -1624,7 +1624,6 @@ def _update_waiting_for_remote_kv(self, request: Request) -> bool: num_computed_tokens = max(len(group) for group in block_ids) * self.block_size # Handle the case where num request tokens less than one block. print("SCHEDULER request.num_tokens", request.num_tokens,"\n", flush=True) - # FIXME I don't understand why we do this and not just req.num_tokens num_computed_tokens = min(num_computed_tokens, request.num_tokens) if num_computed_tokens == request.num_tokens: num_computed_tokens -= 1 From 49b509f6164e256f11519f07f68a11b48ceeb74c Mon Sep 17 00:00:00 2001 From: NickLucche Date: Thu, 18 Dec 2025 08:38:22 -0500 Subject: [PATCH 68/73] wip Signed-off-by: NickLucche --- vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py b/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py index 5067f7fe9bb9..7dd8b1ec1fde 100644 --- a/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +++ b/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py @@ -744,8 +744,7 @@ def build_connector_meta( return meta - def - ( + def request_finished( self, request: "Request", block_ids: list[int] | tuple[list[int], ...], From 99d026807e1e21c68af632f1951c638838c59334 Mon Sep 17 00:00:00 2001 From: Yifan Qiao Date: Fri, 19 Dec 2025 18:37:39 +0000 Subject: [PATCH 69/73] fix: remove skipped blocks before passing them to the connector when a request finishes Signed-off-by: Yifan Qiao --- vllm/v1/core/kv_cache_manager.py | 11 +++++++++++ vllm/v1/core/sched/scheduler.py | 7 +++++++ 2 files changed, 18 insertions(+) diff --git a/vllm/v1/core/kv_cache_manager.py b/vllm/v1/core/kv_cache_manager.py index a175ce0d6d3f..f8eaea1b9a8b 100644 --- a/vllm/v1/core/kv_cache_manager.py +++ b/vllm/v1/core/kv_cache_manager.py @@ -382,6 +382,17 @@ def free(self, request: Request) -> None: """ self.coordinator.free(request.request_id) + def remove_skipped_blocks(self, request_id: str, num_tokens_need_slot: int) -> None: + """Remove the blocks that are no longer needed from `blocks` and replace + the removed blocks with null_block. + + Args: + request_id: The request ID. + num_tokens_need_slot: The number of tokens that need a slot, including + tokens already computed and tokens to be computed. + """ + self.coordinator.remove_skipped_blocks(request_id, num_tokens_need_slot) + def evict_blocks(self, block_ids: set[int]) -> None: """evict blocks from the prefix cache by their block IDs. diff --git a/vllm/v1/core/sched/scheduler.py b/vllm/v1/core/sched/scheduler.py index 0ba5a53a0872..c4b7c61bc630 100644 --- a/vllm/v1/core/sched/scheduler.py +++ b/vllm/v1/core/sched/scheduler.py @@ -1570,6 +1570,13 @@ def _connector_finished( if self.connector is None: return False, None + # Free any out-of-window prefix blocks before we hand the block table to + # the connector. + self.kv_cache_manager.remove_skipped_blocks( + request_id=request.request_id, + num_tokens_need_slot=request.num_tokens, + ) + block_ids = self.kv_cache_manager.get_block_ids(request.request_id) # TODO change to supports_hma From ae80edfc9b587e5ee122b334ccbe62bd33eaffe3 Mon Sep 17 00:00:00 2001 From: NickLucche Date: Mon, 22 Dec 2025 11:08:30 -0500 Subject: [PATCH 70/73] is_null instead of 0 check Signed-off-by: NickLucche --- vllm/v1/core/kv_cache_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/v1/core/kv_cache_manager.py b/vllm/v1/core/kv_cache_manager.py index f8eaea1b9a8b..696d9bdfe541 100644 --- a/vllm/v1/core/kv_cache_manager.py +++ b/vllm/v1/core/kv_cache_manager.py @@ -87,7 +87,7 @@ def get_unhashed_block_ids(self) -> list[int]: def get_unhashed_block_ids_all_groups(self) -> list[int]: """Get block_ids of unhashed blocks from KVCacheBlocks instance.""" # Skip padding blocks. - return [[block.block_id for block in group if block.block_hash is None and block.block_id != 0] for group in self.blocks] + return [[block.block_id for block in group if block.block_hash is None and not block.is_null] for group in self.blocks] def new_empty(self) -> "KVCacheBlocks": """ From 2eb9904f1dc5dec484ae73c8fff094612a146936 Mon Sep 17 00:00:00 2001 From: NickLucche Date: Tue, 6 Jan 2026 05:10:57 -0500 Subject: [PATCH 71/73] get_sw_clippped_blocks to fix over-allocation for swa on D Signed-off-by: NickLucche --- .../kv_connector/v1/nixl_connector.py | 47 ++++++++++++++----- 1 file changed, 36 insertions(+), 11 deletions(-) diff --git a/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py b/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py index 7dd8b1ec1fde..4f7f0ef5571f 100644 --- a/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +++ b/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py @@ -49,6 +49,7 @@ from vllm.utils.network_utils import make_zmq_path, make_zmq_socket from vllm.v1.attention.backends.utils import get_kv_cache_layout from vllm.v1.core.sched.output import SchedulerOutput +from vllm.v1.kv_cache_interface import SlidingWindowSpec from vllm.v1.worker.block_table import BlockTable if TYPE_CHECKING: @@ -285,7 +286,7 @@ def __init__( if role == KVConnectorRole.SCHEDULER: self.connector_scheduler: NixlConnectorScheduler | None = ( - NixlConnectorScheduler(vllm_config, self.engine_id) + NixlConnectorScheduler(vllm_config, self.engine_id, kv_cache_config) ) self.connector_worker: NixlConnectorWorker | None = None elif role == KVConnectorRole.WORKER: @@ -470,10 +471,11 @@ def get_handshake_metadata(self) -> KVConnectorHandshakeMetadata | None: class NixlConnectorScheduler: """Implementation of Scheduler side methods""" - def __init__(self, vllm_config: VllmConfig, engine_id: str): + def __init__(self, vllm_config: VllmConfig, engine_id: str, kv_cache_config: Optional["KVCacheConfig"] = None): self.vllm_config = vllm_config self.block_size = vllm_config.cache_config.block_size self.engine_id: EngineId = engine_id + self.kv_cache_config = kv_cache_config self.side_channel_host = envs.VLLM_NIXL_SIDE_CHANNEL_HOST self.side_channel_port = ( envs.VLLM_NIXL_SIDE_CHANNEL_PORT @@ -506,12 +508,35 @@ def __init__(self, vllm_config: VllmConfig, engine_id: str): # remote prefill or aborted. self._reqs_not_processed: set[ReqId] = set() + # Gather Sliding Window sizes for each kv cache group (if any) + # in number of blocks per SW group. + sw_sizes_tokens = [group.kv_cache_spec.sliding_window if isinstance(group.kv_cache_spec, SlidingWindowSpec) else 0 for group in kv_cache_config.kv_cache_groups] + self.sw_sizes = [n_tokens // self.block_size for n_tokens in sw_sizes_tokens] + print(f"sw_sizes: {self.sw_sizes}\n", flush=True) + def shutdown(self): self._stop_event.set() if self._nixl_handshake_listener_t is not None: self._nixl_handshake_listener_t.join() self._nixl_handshake_listener_t = None + + def get_sw_clippped_blocks(self, block_ids: tuple[list[int], ...]) -> tuple[list[int], ...]: + """ + Clip the number of blocks to the sliding window size for each kv cache group + that employs SWA. + This is necessary because the KV Cache manager initially allocates blocks for + the entire sequence length, and successively cleans up blocks that are outside + the window prior to the `request_finished_all_groups` hook. + """ + # NOTE (NickLucche) This logic is currently handled at the connector level + # because offloading connectors might want to receive the whole sequence even + # for SWA groups. We will abstract this logic once the interface is more stable + assert len(block_ids) == len(self.sw_sizes), "Number of KV cache groups must match" + print("CLIPPING BLOCKS", block_ids) + print("to ", tuple([blocks[-self.sw_sizes[i]:] for i, blocks in enumerate(block_ids)]), "\n", flush=True) + return tuple([blocks[-self.sw_sizes[i]:] for i, blocks in enumerate(block_ids)]) + def set_xfer_handshake_metadata( self, metadata: dict[int, KVConnectorHandshakeMetadata] ) -> None: @@ -628,9 +653,7 @@ def get_num_new_matched_tokens( def update_state_after_alloc( self, request: "Request", blocks: "KVCacheBlocks", num_external_tokens: int ): - # TODO ALREADY OK WITH HMA BUT ITS SYNC ONLY SO NOT FOR US params = request.kv_transfer_params - print("update_state_after_alloc", params, "\n\n", flush=True) logger.debug( "NIXLConnector update_state_after_alloc: " "num_external_tokens=%s, kv_transfer_params=%s", @@ -655,6 +678,7 @@ def update_state_after_alloc( # block is not overwritten; and it will be safe to skip saving them # to host xfer buffer. if block_ids is not None: + block_ids = self.get_sw_clippped_blocks(block_ids) # FIXME handle tuple of blocks here self._reqs_need_save[request.request_id] = (request, block_ids) elif params.get("do_remote_prefill"): @@ -681,6 +705,9 @@ def update_state_after_alloc( if num_external_tokens > 0 else [] ) + local_block_ids = self.get_sw_clippped_blocks(local_block_ids) + # FIXME we're allocating one more here for the SWA ones, which break len(local)==len(remote)..? + # this is still 17 print( f"update_state_after_alloc local_block_ids unhashed: {local_block_ids}\n", flush=True, @@ -738,8 +765,6 @@ def build_connector_meta( self._reqs_not_processed = set() self._reqs_need_send = {} if len(meta.reqs_to_recv) > 0: - # FIXME ReqMeta(local_block_ids=[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 330], - # this looks wrong, you shouldnt write to 0..? or is it like just skip the transfer? print("build_connector_meta", meta.reqs_to_recv, "\n", flush=True) return meta @@ -804,10 +829,11 @@ def request_finished( self._reqs_need_send[request.request_id] = ( time.perf_counter() + envs.VLLM_NIXL_ABORT_REQUEST_TIMEOUT ) - # FIXME HMA will "pad" groups with fewer blocks with 0s (eg SWA ones). - # Here we un-pad blocks to send the actual remote blocks to be read. - # Actually, wait for https://github.com/vllm-project/vllm/pull/30166, on first - # scheduling step blocks are over-allocated as if all layers were FA. + # NOTE HMA will "mark" empty/null blocks in groups with 0s (eg SWA ones), + # trimming down after allocating for the whole sequence length. + # Here we "unpad" blocks to send the actual remote blocks to be read. + # Equal to `get_sw_clippped_blocks` in functionality but for P, after + # manager has cleaned up blocks and marked them as null. block_ids = tuple([block for block in group if block != 0] for group in block_ids) return delay_free_blocks, dict( @@ -2211,7 +2237,6 @@ def _read_blocks( local_block_descs_ids = np.concatenate(local_descs_list) remote_block_descs_ids = np.concatenate(remote_descs_list) - # FIXME blows up here assert len(local_block_descs_ids) == len(remote_block_descs_ids) # Prepare transfer with Nixl. From 7576e55097e3a8eab2b2a1f6fd88c36d57a8d711 Mon Sep 17 00:00:00 2001 From: NickLucche Date: Tue, 6 Jan 2026 05:39:45 -0500 Subject: [PATCH 72/73] fix issue with null blocks on P being one extra (17) by clipping Signed-off-by: NickLucche --- .../kv_transfer/kv_connector/v1/nixl_connector.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py b/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py index 4f7f0ef5571f..e4a2edc4499c 100644 --- a/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +++ b/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py @@ -812,7 +812,7 @@ def request_finished( # TODO: check whether block_ids actually ever be 0. If not we could # remove the conditional below - print(f"block_ids: {block_ids}\n\n", flush=True) + print(f"request_finished block_ids: {block_ids}\n\n", flush=True) if isinstance(block_ids, tuple): delay_free_blocks = any(len(group) > 0 for group in block_ids) else: @@ -830,11 +830,11 @@ def request_finished( time.perf_counter() + envs.VLLM_NIXL_ABORT_REQUEST_TIMEOUT ) # NOTE HMA will "mark" empty/null blocks in groups with 0s (eg SWA ones), - # trimming down after allocating for the whole sequence length. + # trimming down after allocating for the whole sequence length. Empty + # blocks are always at the start of the list. # Here we "unpad" blocks to send the actual remote blocks to be read. - # Equal to `get_sw_clippped_blocks` in functionality but for P, after - # manager has cleaned up blocks and marked them as null. - block_ids = tuple([block for block in group if block != 0] for group in block_ids) + block_ids = self.get_sw_clippped_blocks(block_ids) + print(f"request_finished unpadded block_ids: {block_ids}\n\n", flush=True) return delay_free_blocks, dict( do_remote_prefill=True, From 4f17655e20f801e8c143596945cb4bb6d0165591 Mon Sep 17 00:00:00 2001 From: NickLucche Date: Tue, 6 Jan 2026 05:55:58 -0500 Subject: [PATCH 73/73] remove llama4 opt Signed-off-by: NickLucche --- .../kv_connector/v1/nixl_connector.py | 114 +++--------------- 1 file changed, 14 insertions(+), 100 deletions(-) diff --git a/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py b/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py index e4a2edc4499c..b59c248d93fb 100644 --- a/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +++ b/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py @@ -698,16 +698,13 @@ def update_state_after_alloc( # blocks that do not yet have a hash hence they're not full..? ok yeah # these are the blocks that must be pulled (partial prefix cache hit) - # TODO sync with Chen on how prefix cache work with HMA - # FIXME remote <> local len blocks mismatch + # TODO sync with Chen on prefix cache + HMA local_block_ids = ( blocks.get_unhashed_block_ids_all_groups() if num_external_tokens > 0 else [] ) local_block_ids = self.get_sw_clippped_blocks(local_block_ids) - # FIXME we're allocating one more here for the SWA ones, which break len(local)==len(remote)..? - # this is still 17 print( f"update_state_after_alloc local_block_ids unhashed: {local_block_ids}\n", flush=True, @@ -994,10 +991,6 @@ def __init__(self, vllm_config: VllmConfig, engine_id: str, kv_cache_config: Opt self.model_config = vllm_config.model_config self.cache_config = vllm_config.cache_config - # TODO(mgoin): remove this once we have hybrid memory allocator - # Optimization for models with local attention (Llama 4) - # List of block window sizes for each layer for local attention - self.block_window_per_layer: list[int | None] = [] self.use_mla = self.model_config.use_mla backend = get_attn_backend( @@ -1294,12 +1287,10 @@ def register_kv_caches(self, kv_caches: dict[str, torch.Tensor]): # Enable different block lengths for different layers when MLA is used. self.block_len_per_layer = list[int]() self.slot_size_per_layer = list[int]() # HD bytes in kv terms - print("SPLIT K AND V", split_k_and_v, "\n", flush=True) for layer_name, cache_or_caches in xfer_buffers.items(): cache_list = cache_or_caches if split_k_and_v else [cache_or_caches] for cache in cache_list: - print("layer_name", layer_name, cache.shape, "\n", flush=True) base_addr = cache.data_ptr() if base_addr in seen_base_addresses: # NOTE (NickLucche) HMA employs memory pooling to share tensors @@ -1396,28 +1387,6 @@ def register_kv_caches(self, kv_caches: dict[str, torch.Tensor]): self.src_xfer_side_handles[self.block_size] = self.src_xfer_side_handle - # TODO(mgoin): Hybrid memory allocator is currently disabled for - # models with local attention (Llama 4). Can remove this once enabled. - if self.model_config.hf_config.model_type == "llama4": - from transformers import Llama4TextConfig - - assert isinstance(self.model_config.hf_text_config, Llama4TextConfig) - llama4_config = self.model_config.hf_text_config - no_rope_layers = llama4_config.no_rope_layers - chunk_size = llama4_config.attention_chunk_size - chunk_block_size = math.ceil(chunk_size / self.block_size) - for layer_idx in range(self.num_layers): - # no_rope_layers[layer_idx] == 0 means NoPE (global) - # Any other value means RoPE (local chunked) - is_local_attention = no_rope_layers[layer_idx] != 0 - block_window = chunk_block_size if is_local_attention else None - self.block_window_per_layer.append(block_window) - logger.debug( - "Llama 4 block window per layer mapping: %s", - self.block_window_per_layer, - ) - assert len(self.block_window_per_layer) == self.num_layers - # After KV Caches registered, listen for new connections. agent_metadata = NixlAgentMetadata( engine_id=self.engine_id, @@ -2186,56 +2155,15 @@ def _read_blocks( # workers will issue xfers to parts of the P worker remote kv caches. # Get descs ids. - local_block_descs_ids: np.ndarray - remote_block_descs_ids: np.ndarray - - if not self.block_window_per_layer: - # Default case: assume global attention - remote_block_descs_ids = self._get_block_descs_ids( - dst_engine_id, - remote_block_ids, - ) - local_block_descs_ids = self._get_block_descs_ids( - self.engine_id, - local_block_ids, - block_size_ratio=block_size_ratio, - ) - else: - # FIXME verify that we can remove this case - # TODO(mgoin): remove this once we have hybrid memory allocator - # Optimization for models with local attention (Llama 4) - local_descs_list = [] - remote_descs_list = [] - for layer_idx, block_window in enumerate(self.block_window_per_layer): - # For each layer: - if block_window is None: - # If not chunked, we just use the - # full block lists (global attention) - layer_local_block_ids = local_block_ids - layer_remote_block_ids = remote_block_ids - else: - # If chunked, get the last block_window blocks - layer_local_block_ids = local_block_ids[-block_window:] - layer_remote_block_ids = remote_block_ids[-block_window:] - - # Get descs ids for the layer. - layer_local_desc_ids = self._get_block_descs_ids( - dst_engine_id, - layer_local_block_ids, - layer_idx, - ) - layer_remote_desc_ids = self._get_block_descs_ids( - self.engine_id, - layer_remote_block_ids, - layer_idx, - block_size_ratio=block_size_ratio, - ) - - local_descs_list.append(layer_local_desc_ids) - remote_descs_list.append(layer_remote_desc_ids) - - local_block_descs_ids = np.concatenate(local_descs_list) - remote_block_descs_ids = np.concatenate(remote_descs_list) + remote_block_descs_ids = self._get_block_descs_ids( + dst_engine_id, + remote_block_ids, + ) + local_block_descs_ids = self._get_block_descs_ids( + self.engine_id, + local_block_ids, + block_size_ratio=block_size_ratio, + ) assert len(local_block_descs_ids) == len(remote_block_descs_ids) @@ -2294,35 +2222,21 @@ def _get_block_descs_ids( self, engine_id: str, block_ids: tuple[list[int], ...], - layer_idx: int | None = None, block_size_ratio: float | None = None, ) -> np.ndarray: """ Get the descs ids for a set of block ids. - If layer_idx is provided, we use the region_ids for the given layer. - Otherwise, we use all regions. + When HMA is enabled number of descriptors across kv cache groups might differ. + A single flattened array is returned for all groups anyway. """ - if layer_idx is None: - region_ids = np.arange(self.num_regions) - else: - assert layer_idx < self.num_layers - if self.num_layers < self.num_regions: - # If we have more regions than layers, we assume that - # the regions are organized as [K0, V0, K1, V1, ...] - # and we select K_i and V_i - assert 2 * self.num_layers == self.num_regions - region_ids = np.arange(2 * layer_idx, 2 * layer_idx + 2) - else: - # Otherwise, we assume we have MLA and select i-th layer - assert self.num_layers == self.num_regions - region_ids = np.arange(layer_idx, layer_idx + 1) + region_ids = np.arange(self.num_regions) # NOTE (NickLucche) With HMA, every kv group has the same number of layers and # layers from different groups share the same kv tensor. # eg block_ids=[[1, 2], [3]]->blocks [1, 2] need to be read across all regions, # same for [3], but group0-group1 blocks will always differ (different areas). # Therefore we can just flatten the block_ids and compute the descs ids for all # groups at once. - print("get_block_descs_ids", block_ids, "\n", flush=True) + print("get_block_descs_ids", block_ids, "\n") num_blocks = self.dst_num_blocks[engine_id] if block_size_ratio is not None: num_blocks = int(num_blocks * block_size_ratio)