From 9b219c7030afda163d0756ce4cb8618814710ba4 Mon Sep 17 00:00:00 2001 From: 01267596 Date: Wed, 5 Nov 2025 07:20:11 +0000 Subject: [PATCH 1/7] [feat] add draft_model spec_decode Signed-off-by: 01267596 --- vllm_ascend/attention/utils.py | 31 +++ vllm_ascend/core/schedule_config.py | 5 + vllm_ascend/patch/platform/patch_config.py | 5 - vllm_ascend/spec_decode/__init__.py | 3 + vllm_ascend/spec_decode/draft_proposer.py | 275 +++++++++++++++++++++ vllm_ascend/spec_decode/eagle_proposer.py | 90 +++++-- vllm_ascend/spec_decode/interface.py | 1 + vllm_ascend/worker/model_runner_v1.py | 6 +- 8 files changed, 388 insertions(+), 28 deletions(-) create mode 100644 vllm_ascend/spec_decode/draft_proposer.py diff --git a/vllm_ascend/attention/utils.py b/vllm_ascend/attention/utils.py index ede83f74a5..b52083666a 100644 --- a/vllm_ascend/attention/utils.py +++ b/vllm_ascend/attention/utils.py @@ -106,6 +106,13 @@ class AscendCommonAttentionMetadata: prefill_context_parallel_metadata: Optional[ AscendPrefillContextParallelMetadata] = None + max_seq_len: int = -1 + + def batch_size(self) -> int: + return self.seq_lens_cpu.shape[0] + + def query_lens(self) -> torch.Tensor: + return self.query_start_loc[1:] - self.query_start_loc[:-1] def split_decodes_and_prefills( common_attn_metadata: AscendCommonAttentionMetadata, @@ -212,3 +219,27 @@ def transdata(nd_mat, block_size: tuple = (16, 16)): nz_mat, (nz_mat.shape[0], nz_mat.shape[1] * nz_mat.shape[2], nz_mat.shape[3])) return nz_mat + +def extend_flat_seqs( + seqs: torch.Tensor, + end_locs: torch.Tensor, + new_vals: torch.Tensor + ) -> torch.Tensor: + """ + This function appends a single new value into multiple sequences + that are stored in a flat format. E.g. + [x1, x2, y1] and [x3, y2] become [x1, x2, x3, y1, y2] + """ + new_len = seqs.shape[0] + new_vals.shape[0] + new_seqs = torch.zeros(new_len, device=seqs.device, dtype=seqs.dtype) + # indices for previous seqs + start_locs = end_locs[:-1] + 1 + seqs_new_idxs = torch.ones_like(seqs) + seqs_new_idxs[start_locs] += 1 + seqs_new_idxs = seqs_new_idxs.cumsum(0) - 1 + # indices for new values + new_val_idxs = end_locs + 1 + torch.arange(new_vals.shape[0], device=seqs.device) + # assign seqs and new vals + new_seqs[seqs_new_idxs] = seqs + new_seqs[new_val_idxs] = new_vals + return new_seqs \ No newline at end of file diff --git a/vllm_ascend/core/schedule_config.py b/vllm_ascend/core/schedule_config.py index 32d63cbc40..cbc3f977bd 100644 --- a/vllm_ascend/core/schedule_config.py +++ b/vllm_ascend/core/schedule_config.py @@ -27,6 +27,7 @@ class AscendSchedulerConfig(SchedulerConfig): enable_chunked_prefill: bool = False max_long_partial_prefills: int = 1 + max_num_partial_prefills: int = 1 long_prefill_token_threshold: int = MAX_INT policy: str = "fcfs" scheduler_cls: Union[str, Type[object]] = ( @@ -47,6 +48,7 @@ def initialize_from_config( # Override default values into original SchedulerConfig scheduler_config["enable_chunked_prefill"] = False scheduler_config["max_long_partial_prefills"] = None + scheduler_config["max_num_partial_prefills"] = None scheduler_config["long_prefill_token_threshold"] = None scheduler_config["policy"] = "fcfs" scheduler_config["scheduler_cls"] = ( @@ -78,6 +80,9 @@ def __post_init__(self, *args) -> None: self.max_long_partial_prefills = 1 self.long_prefill_token_threshold = MAX_INT + if self.max_num_partial_prefills is None: + self.max_num_partial_prefills = 1 + if self.long_prefill_token_threshold is None or \ self.long_prefill_token_threshold <= 0: if self.max_model_len is None: diff --git a/vllm_ascend/patch/platform/patch_config.py b/vllm_ascend/patch/platform/patch_config.py index d6150383f0..94f9131001 100644 --- a/vllm_ascend/patch/platform/patch_config.py +++ b/vllm_ascend/patch/platform/patch_config.py @@ -155,11 +155,6 @@ def __post_init__(self): ) else: self.method = "draft_model" - raise NotImplementedError( - "Speculative decoding with draft model is not " - "supported yet. Please consider using other " - "speculative decoding methods such as ngram, medusa, " - "eagle, or deepseek_mtp.") # Replace hf_config for EAGLE draft_model if self.method in ("eagle", "eagle3"): diff --git a/vllm_ascend/spec_decode/__init__.py b/vllm_ascend/spec_decode/__init__.py index 3e17944c5c..857066642c 100644 --- a/vllm_ascend/spec_decode/__init__.py +++ b/vllm_ascend/spec_decode/__init__.py @@ -20,6 +20,7 @@ from vllm_ascend.spec_decode.mtp_proposer import MtpProposer from vllm_ascend.spec_decode.ngram_proposer import NgramProposer from vllm_ascend.torchair.torchair_mtp_proposer import TorchairMtpProposer +from vllm_ascend.spec_decode.draft_proposer import DraftModelProposer def get_spec_decode_method(method, @@ -35,6 +36,8 @@ def get_spec_decode_method(method, if is_torchair_graph: return TorchairMtpProposer(vllm_config, device, runner) return MtpProposer(vllm_config, device, runner) + elif method == 'draft_model': + return DraftModelProposer(vllm_config, device, runner) else: raise ValueError("Unknown speculative decoding method: " f"{method}") diff --git a/vllm_ascend/spec_decode/draft_proposer.py b/vllm_ascend/spec_decode/draft_proposer.py new file mode 100644 index 0000000000..e314ab20bb --- /dev/null +++ b/vllm_ascend/spec_decode/draft_proposer.py @@ -0,0 +1,275 @@ +from dataclasses import dataclass, replace +from typing import Any + +import torch + +from vllm.attention.layer import Attention +from vllm.config import VllmConfig, get_layers_from_vllm_config +from vllm.config.speculative import SpeculativeConfig +from vllm.logger import init_logger +from vllm.model_executor.model_loader import get_model +from vllm.v1.core.sched.output import SchedulerOutput +from vllm.v1.sample.metadata import SamplingMetadata +from vllm.v1.spec_decode.metadata import SpecDecodeMetadata +from vllm.v1.spec_decode.eagle import PADDING_SLOT_ID + +from vllm_ascend.spec_decode.eagle_proposer import SpecDecodeBaseProposer +from vllm_ascend.attention.attention_v1 import AscendMetadata +from vllm_ascend.attention.utils import extend_flat_seqs + +logger = init_logger(__name__) + + +class DraftModelProposer(SpecDecodeBaseProposer): + def __init__( + self, + vllm_config: VllmConfig, + device: torch.device, + runner=None, + ): + super().__init__( + vllm_config=vllm_config, + device=device, + pass_hidden_states_to_model=False, + runner=runner, + ) + self.draft_model_config = vllm_config.speculative_config.draft_model_config + self._raise_if_mrope() + self._raise_if_padded_drafter_batch() + self._raise_if_vocab_size_mismatch() + self._raise_if_draft_tp_mismatch() + + + def generate_token_ids(self, + valid_sampled_token_ids: list[list[int]], + sampling_metadata: SamplingMetadata = None, + scheduler_output: SchedulerOutput = None, + spec_decode_metadata: SpecDecodeMetadata = None, + positions: torch.Tensor = None, + num_scheduled_tokens: int = 0, + hidden_states: torch.Tensor = None, + attn_metadata=None, + aux_hidden_states: torch.Tensor = None): + + attn_metadata = self._get_atten_dict(scheduler_output) + attn_metadata = attn_metadata[self.attn_layer_name] + next_token_ids: list[int] = [] + for i, token_ids in enumerate(valid_sampled_token_ids): + if token_ids: + # Common case. + next_token_id = token_ids[-1] + else: + # Partial prefill (rare case). + # Get the next token id from the request state. + req_id = self.runner.input_batch.req_ids[i] + req_state = self.runner.requests[req_id] + seq_len = (req_state.num_computed_tokens + + scheduler_output.num_scheduled_tokens[req_id]) + + next_token_id = req_state.get_token_id(seq_len) + next_token_ids.append(next_token_id) + next_token_ids = torch.tensor(next_token_ids, + dtype=torch.int32, + device=self.device) + + if spec_decode_metadata is None: + # input_ids can be None for multimodal models. + target_token_ids = self.runner.input_ids[:num_scheduled_tokens] + target_positions = positions[:num_scheduled_tokens] + cu_num_tokens =attn_metadata.query_start_loc + else: + num_draft_tokens = spec_decode_metadata.num_draft_tokens + num_rejected_tokens = [n + 1 - len(valid_sampled_token_ids[i]) if n > 0 else 0 + for i, n in enumerate(num_draft_tokens) + ] + num_rejected_tokens = torch.tensor( + num_rejected_tokens, + dtype=torch.int32, + device=self.device, + ) + num_tokens = num_scheduled_tokens - sum(num_rejected_tokens) + cu_num_tokens, token_indices = self.prepare_inputs( + attn_metadata.query_start_loc, num_rejected_tokens, + num_tokens) + target_token_ids = self.runner.input_ids[token_indices] + target_positions = positions[token_indices] + + (target_token_ids, target_positions, + target_slot_mapping, cu_num_tokens) = merge_next_token_ids_into_token_ids( + input_token_ids=target_token_ids, + input_positions=target_positions, + cad=attn_metadata, + next_token_ids=next_token_ids, + block_size=self.block_size, + max_model_len=self.vllm_config.model_config.max_model_len, + arange=self.arange, + cu_num_tokens=cu_num_tokens) + + draft_token_ids = self._propose( + target_token_ids=target_token_ids, + target_positions=target_positions, + target_hidden_states=None, + target_slot_mapping=target_slot_mapping.to(torch.int32), + next_token_ids=next_token_ids, + cu_num_tokens=cu_num_tokens, + block_table=attn_metadata.block_tables, + sampling_metadata=sampling_metadata, + ) + spec_token_ids = draft_token_ids.tolist() + + return spec_token_ids + + + + def _raise_if_mrope(self): + if self.draft_model_config.uses_mrope: + raise NotImplementedError( + "Speculative Decoding with draft models does not support M-RoPE yet" + ) + + def _raise_if_padded_drafter_batch(self): + if not self.vllm_config.speculative_config.disable_padded_drafter_batch: + raise NotImplementedError( + "Speculative Decoding with draft models does not support " + "padded drafter batch yet. Please pass --disable-padded-drafter-batch " + "in the speculative_config." + ) + + def _raise_if_vocab_size_mismatch(self): + speculative_config = self.vllm_config.speculative_config + if ( + speculative_config.method == "draft_model" + and speculative_config.target_model_config is not None + and speculative_config.draft_model_config is not None + ): + target_vocab_size = speculative_config.target_model_config.get_vocab_size() + draft_vocab_size = speculative_config.draft_model_config.get_vocab_size() + if target_vocab_size != draft_vocab_size: + raise ValueError( + f"Target and draft model should have the same vocabulary size. " + f"Target model vocab_size={target_vocab_size}. " + f"Draft model vocab_size={draft_vocab_size}. " + f"Using models with different tokenizers can cause out-of-bounds " + f"errors during speculative decoding." + ) + + def _raise_if_draft_tp_mismatch(self): + # Note(Tomas Ruiz) If we run the target model with TP > 1 and + # the draft model with TP = 1, then the different TP ranks collide. + # Specifically when all ranks compile the draft model on rank 0 + # (because TP=1), then the torch compile cache is overwritten and corrupted. + # We need a mechanism like this: https://github.com/vllm-project/vllm/pull/5414 + # To prevent this error, we assert that both TP sizes must be the same. + spec_cfg: SpeculativeConfig = self.vllm_config.speculative_config + tgt_tp = spec_cfg.target_parallel_config.tensor_parallel_size + draft_tp = spec_cfg.draft_parallel_config.tensor_parallel_size + if draft_tp != tgt_tp: + raise ValueError( + f"Currently, 'draft_tensor_parallel_size' and 'tensor_parallel_size' " + f"must be the same. Got {draft_tp} and {tgt_tp}. " + "Please pass 'draft_tensor_parallel_size' in the speculative_config." + ) + + def set_input_ids_first_pass( + self, + target_token_ids: torch.Tensor, + next_token_ids: torch.Tensor, + num_tokens: int, + last_token_indices: torch.Tensor, + ) -> None: + self.input_ids[:num_tokens] = target_token_ids + + def load_model(self, target_model: Any) -> None: + """Takes target_model to satisfy the type checker.""" + + # This must be computed before loading the draft model + # because that mutates the forward_context of the vllm_config + target_attn_layer_names = set( + get_layers_from_vllm_config(self.vllm_config, Attention).keys() + ) + + from vllm.compilation.backends import set_model_tag + + draft_vllm_config: VllmConfig = create_vllm_config_for_draft_model( + target_model_vllm_config=self.vllm_config + ) + logger.info( + "Starting to load draft model %s. TP=%d, rank=%d", + draft_vllm_config.model_config.model, + draft_vllm_config.parallel_config.tensor_parallel_size, + draft_vllm_config.parallel_config.rank, + ) + with set_model_tag("draft_model"): + self.model = get_model(vllm_config=draft_vllm_config, prefix="draft_model") + + # This must be computed after loading the draft model + # because that mutates the forward_context of the vllm_config + draft_attn_layer_names = ( + get_layers_from_vllm_config(self.vllm_config, Attention).keys() + - target_attn_layer_names + ) + self.attn_layer_name = next(iter(draft_attn_layer_names)) + +def create_vllm_config_for_draft_model( + target_model_vllm_config: VllmConfig, +) -> VllmConfig: + """The vllm_config is configured for the target model, e.g. + its quant_config and parallel_config. But the draft model is potentially + quantized differently, and has potentially different tensor_parallel_size. + This function creates a new vllm_config configured for the draft model. + The vllm_config is useful when loading the draft model with get_model(). + """ + old = target_model_vllm_config + new_parallel_config = replace(old.speculative_config.draft_parallel_config, + rank=old.parallel_config.rank + ) + + new: VllmConfig = replace(old, + quant_config=None, # quant_config is recomputed in __init__() + model_config=old.speculative_config.draft_model_config, + parallel_config=new_parallel_config, + ) + return new + +def merge_next_token_ids_into_token_ids( + input_token_ids: torch.Tensor, + input_positions: torch.Tensor, + cad: AscendMetadata, + next_token_ids: torch.Tensor, + block_size: int, + max_model_len: int, + arange: torch.Tensor, + cu_num_tokens + ): + """ + Merges the next token ids with the existing token ids into a flat sequence. + Does the same for the positions, computes new slot mapping, + and updates the common_attn_metadata. The inputs are not modified in-place. + """ + query_end_locs = cu_num_tokens[1:] - 1 + new_token_ids = extend_flat_seqs( + seqs=input_token_ids, end_locs=query_end_locs, new_vals=next_token_ids + ) + logger.warning("new_token_ids: {}".format(new_token_ids)) + + # append new positions + positions_to_append = input_positions[query_end_locs] + 1 + new_positions = extend_flat_seqs( + seqs=input_positions, end_locs=query_end_locs, new_vals=positions_to_append + ) + # recompute slot mapping + batch_size, n_blocks_per_req = cad.block_tables.shape + req_indices = torch.arange(batch_size, device=cad.query_start_loc.device) + + query_lens = cu_num_tokens[1:] - cu_num_tokens[:-1] + req_indices = torch.repeat_interleave(req_indices, query_lens.to(cad.query_start_loc.device) + 1) + block_table_indices = req_indices * n_blocks_per_req + new_positions // block_size + block_nums = cad.block_tables.view(-1)[block_table_indices] + block_offsets = new_positions % block_size + new_slot_mapping = block_nums * block_size + block_offsets + # Mask out the position ids that exceed the max model length. + exceeds_max_model_len = new_positions >= max_model_len + new_slot_mapping.masked_fill_(exceeds_max_model_len, PADDING_SLOT_ID) + + cu_num_tokens = cu_num_tokens + arange[: len(cu_num_tokens)] + return (new_token_ids, new_positions, new_slot_mapping, cu_num_tokens) \ No newline at end of file diff --git a/vllm_ascend/spec_decode/eagle_proposer.py b/vllm_ascend/spec_decode/eagle_proposer.py index 74e2917806..4f94eb8639 100644 --- a/vllm_ascend/spec_decode/eagle_proposer.py +++ b/vllm_ascend/spec_decode/eagle_proposer.py @@ -34,16 +34,23 @@ PADDING_SLOT_ID = -1 -class EagleProposer(Proposer): +class SpecDecodeBaseProposer(Proposer): def __init__(self, vllm_config: VllmConfig, device: torch.device, + pass_hidden_states_to_model: bool, runner=None): - self.name = SpecDcodeType.EAGLE if vllm_config.speculative_config.method == "eagle" else SpecDcodeType.EAGLE3 + if vllm_config.speculative_config.method == "eagle": + self.name = SpecDcodeType.EAGLE + elif vllm_config.speculative_config.method == "draft_model": + self.name = SpecDcodeType.DRAFT_MODEL + else: + self.name = SpecDcodeType.EAGLE3 self.vllm_config = vllm_config self.device = device self.runner = runner + self.pass_hidden_states_to_model = pass_hidden_states_to_model self.block_size = vllm_config.cache_config.block_size # We need to get the hidden size from the draft model config because @@ -143,11 +150,13 @@ def dummy_run(self, self.vllm_config, moe_comm_type=moe_comm_type, num_tokens=num_tokens): - self.model( + model_kwargs = dict( input_ids=self.input_ids[:num_tokens], positions=self.positions[:num_tokens], - hidden_states=self.hidden_states[:num_tokens], ) + if self.pass_hidden_states_to_model: + model_kwargs["hidden_states"] = self.hidden_states[:num_tokens] + self.model(**model_kwargs) def generate_token_ids(self, valid_sampled_token_ids: list[list[int]], @@ -160,7 +169,7 @@ def generate_token_ids(self, attn_metadata=None, aux_hidden_states: torch.Tensor = None): - attn_metadata = self._get_eagle_atten_dict(scheduler_output) + attn_metadata = self._get_atten_dict(scheduler_output) next_token_ids: list[int] = [] for i, token_ids in enumerate(valid_sampled_token_ids): if token_ids: @@ -228,7 +237,7 @@ def generate_token_ids(self, spec_token_ids = draft_token_ids.tolist() return spec_token_ids - def _get_eagle_atten_dict( + def _get_atten_dict( self, scheduler_output: "SchedulerOutput", ): @@ -431,12 +440,15 @@ def _propose( target_hidden_states) assert target_hidden_states.shape[-1] == self.hidden_size + + # Shift the input ids by one token. - # E.g., [a1, b1, b2, c1, c2, c3] -> [b1, b2, c1, c2, c3, c3] - self.input_ids[:num_tokens - 1] = target_token_ids[1:] - # Replace the last token with the next token. - # E.g., [b1, b2, c1, c2, c3, c3] -> [a2, b2, b3, c2, c3, c4] - self.input_ids[last_token_indices] = next_token_ids + # # E.g., [a1, b1, b2, c1, c2, c3] -> [b1, b2, c1, c2, c3, c3] + # self.input_ids[:num_tokens - 1] = target_token_ids[1:] + # # Replace the last token with the next token. + # # E.g., [b1, b2, c1, c2, c3, c3] -> [a2, b2, b3, c2, c3, c4] + # self.input_ids[last_token_indices] = next_token_ids + self.set_input_ids_first_pass(target_token_ids, next_token_ids, num_tokens, last_token_indices) seq_lens = (target_positions[last_token_indices] + 1).int() query_lens = cu_num_tokens[1:] - cu_num_tokens[:-1] @@ -483,15 +495,24 @@ def _propose( self.positions[:num_tokens] = target_positions.to(device) self.hidden_states[:num_tokens] = target_hidden_states attn_metadata.block_tables = block_table.to(device) + model_kwargs = { + "input_ids": self.input_ids[:num_input_tokens], + "positions": self.positions[:num_input_tokens] + } + if self.pass_hidden_states_to_model: + model_kwargs["hidden_states"] = self.hidden_states[:num_input_tokens] + with set_ascend_forward_context(attn_metadata, self.vllm_config, moe_comm_type=moe_comm_type, num_tokens=num_input_tokens): - last_hidden_states, hidden_states = self.model( - input_ids=self.input_ids[:num_input_tokens], - positions=self.positions[:num_input_tokens], - hidden_states=self.hidden_states[:num_input_tokens], - ) + ret_hidden_states = self.model(**model_kwargs) + if not self.model_returns_tuple(): + last_hidden_states = ret_hidden_states + hidden_states = ret_hidden_states + else: + last_hidden_states, hidden_states = ret_hidden_states + sample_hidden_states = last_hidden_states[last_token_indices] logits = self.model.compute_logits(sample_hidden_states) draft_token_ids = logits.argmax(dim=-1) @@ -586,16 +607,23 @@ def _propose( attn_metadata.attn_mask = attn_mask attn_metadata.block_tables = block_table.to(device) # Run the model. + model_kwargs = { + "input_ids": self.input_ids[:input_batch_size], + "positions": self.positions[:input_batch_size] + } + if self.pass_hidden_states_to_model: + model_kwargs["hidden_states"] = self.hidden_states[:input_batch_size] with set_ascend_forward_context(attn_metadata, self.vllm_config, moe_comm_type=moe_comm_type, num_tokens=input_batch_size): - last_hidden_states, hidden_states = self.model( - input_ids=self.input_ids[:input_batch_size], - positions=self.positions[:input_batch_size], - hidden_states=self.hidden_states[:input_batch_size], - ) + ret_hidden_states = self.model(**model_kwargs) + if not self.model_returns_tuple(): + last_hidden_states = ret_hidden_states + hidden_states = ret_hidden_states + else: + last_hidden_states, hidden_states = ret_hidden_states hidden_states = hidden_states[:batch_size] logits = self.model.compute_logits(last_hidden_states[:batch_size]) @@ -694,3 +722,23 @@ def _prepare_inputs( torch.cumsum(num_tokens_per_req, dim=0, out=cu_num_tokens[1:]) return cu_num_tokens, token_indices + + def set_input_ids_first_pass( + self, + target_token_ids: torch.Tensor, + next_token_ids: torch.Tensor, + num_tokens: int, + last_token_indices: torch.Tensor, + ) -> None: + self.input_ids[: num_tokens - 1] = target_token_ids[1:] + self.input_ids[last_token_indices] = next_token_ids + + def model_returns_tuple(self) -> bool: + return self.name != SpecDcodeType.DRAFT_MODEL + +class EagleProposer(SpecDecodeBaseProposer): + def __init__(self, + vllm_config: VllmConfig, + device: torch.device, + runner=None): + super().__init__(vllm_config, device, pass_hidden_states_to_model=True, runner=runner) \ No newline at end of file diff --git a/vllm_ascend/spec_decode/interface.py b/vllm_ascend/spec_decode/interface.py index 3f0a36b13c..ddbabd17ff 100644 --- a/vllm_ascend/spec_decode/interface.py +++ b/vllm_ascend/spec_decode/interface.py @@ -13,6 +13,7 @@ class SpecDcodeType(enum.Enum): EAGLE = 1 EAGLE3 = 2 MTP = 4 + DRAFT_MODEL = 5 class Proposer: diff --git a/vllm_ascend/worker/model_runner_v1.py b/vllm_ascend/worker/model_runner_v1.py index 8e4acdd0ec..163260f3c0 100644 --- a/vllm_ascend/worker/model_runner_v1.py +++ b/vllm_ascend/worker/model_runner_v1.py @@ -133,6 +133,7 @@ from vllm_ascend.sample.rejection_sampler import AscendRejectionSampler from vllm_ascend.spec_decode import get_spec_decode_method from vllm_ascend.spec_decode.eagle_proposer import EagleProposer +from vllm_ascend.spec_decode.draft_proposer import DraftModelProposer from vllm_ascend.spec_decode.interface import SpecDcodeType from vllm_ascend.spec_decode.mtp_proposer import MtpProposer from vllm_ascend.torchair.torchair_mtp_proposer import TorchairMtpProposer @@ -591,7 +592,7 @@ def _set_up_drafter(self): # Set up speculative decoding. self.spec_attn_mask = None self.drafter: Optional[Union[NgramProposer, EagleProposer, MtpProposer, - TorchairMtpProposer]] = None + TorchairMtpProposer, DraftModelProposer]] = None self.actual_seq_lengths_q: list[int] = [] self.decode_token_per_req = 1 if self.speculative_config: @@ -1941,7 +1942,8 @@ def _build_attn_state(self, num_reqs, num_scheduled_tokens, # Speculative decoding. elif np.all(num_valid_tokens == 1): if self.drafter and (self.drafter.name == SpecDcodeType.EAGLE - or self.drafter.name == SpecDcodeType.EAGLE3): + or self.drafter.name == SpecDcodeType.EAGLE3 + or self.drafter.name == SpecDcodeType.DRAFT_MODEL): attn_state = AscendAttentionState.ChunkedPrefill else: attn_state = AscendAttentionState.SpecDecoding From 29376fd4d886183e76368e07945d8bc9c5cdf33d Mon Sep 17 00:00:00 2001 From: 01267596 Date: Wed, 5 Nov 2025 08:34:43 +0000 Subject: [PATCH 2/7] [feat] add draft_model spec_decode Signed-off-by: 01267596 --- vllm_ascend/spec_decode/draft_proposer.py | 146 +++++++++++----------- 1 file changed, 76 insertions(+), 70 deletions(-) diff --git a/vllm_ascend/spec_decode/draft_proposer.py b/vllm_ascend/spec_decode/draft_proposer.py index e314ab20bb..ea6bf339ea 100644 --- a/vllm_ascend/spec_decode/draft_proposer.py +++ b/vllm_ascend/spec_decode/draft_proposer.py @@ -1,8 +1,7 @@ -from dataclasses import dataclass, replace +from dataclasses import replace from typing import Any import torch - from vllm.attention.layer import Attention from vllm.config import VllmConfig, get_layers_from_vllm_config from vllm.config.speculative import SpeculativeConfig @@ -10,12 +9,11 @@ from vllm.model_executor.model_loader import get_model from vllm.v1.core.sched.output import SchedulerOutput from vllm.v1.sample.metadata import SamplingMetadata -from vllm.v1.spec_decode.metadata import SpecDecodeMetadata from vllm.v1.spec_decode.eagle import PADDING_SLOT_ID - -from vllm_ascend.spec_decode.eagle_proposer import SpecDecodeBaseProposer +from vllm.v1.spec_decode.metadata import SpecDecodeMetadata from vllm_ascend.attention.attention_v1 import AscendMetadata from vllm_ascend.attention.utils import extend_flat_seqs +from vllm_ascend.spec_decode.eagle_proposer import SpecDecodeBaseProposer logger = init_logger(__name__) @@ -39,18 +37,18 @@ def __init__( self._raise_if_vocab_size_mismatch() self._raise_if_draft_tp_mismatch() - - def generate_token_ids(self, - valid_sampled_token_ids: list[list[int]], - sampling_metadata: SamplingMetadata = None, - scheduler_output: SchedulerOutput = None, - spec_decode_metadata: SpecDecodeMetadata = None, - positions: torch.Tensor = None, - num_scheduled_tokens: int = 0, - hidden_states: torch.Tensor = None, - attn_metadata=None, - aux_hidden_states: torch.Tensor = None): - + def generate_token_ids( + self, + valid_sampled_token_ids: list[list[int]], + sampling_metadata: SamplingMetadata = None, + scheduler_output: SchedulerOutput = None, + spec_decode_metadata: SpecDecodeMetadata = None, + positions: torch.Tensor = None, + num_scheduled_tokens: int = 0, + hidden_states: torch.Tensor = None, + attn_metadata=None, + aux_hidden_states: torch.Tensor = None, + ): attn_metadata = self._get_atten_dict(scheduler_output) attn_metadata = attn_metadata[self.attn_layer_name] next_token_ids: list[int] = [] @@ -63,23 +61,26 @@ def generate_token_ids(self, # Get the next token id from the request state. req_id = self.runner.input_batch.req_ids[i] req_state = self.runner.requests[req_id] - seq_len = (req_state.num_computed_tokens + - scheduler_output.num_scheduled_tokens[req_id]) + seq_len = ( + req_state.num_computed_tokens + + scheduler_output.num_scheduled_tokens[req_id] + ) next_token_id = req_state.get_token_id(seq_len) next_token_ids.append(next_token_id) - next_token_ids = torch.tensor(next_token_ids, - dtype=torch.int32, - device=self.device) - + next_token_ids = torch.tensor( + next_token_ids, dtype=torch.int32, device=self.device + ) + if spec_decode_metadata is None: # input_ids can be None for multimodal models. target_token_ids = self.runner.input_ids[:num_scheduled_tokens] target_positions = positions[:num_scheduled_tokens] - cu_num_tokens =attn_metadata.query_start_loc + cu_num_tokens = attn_metadata.query_start_loc else: num_draft_tokens = spec_decode_metadata.num_draft_tokens - num_rejected_tokens = [n + 1 - len(valid_sampled_token_ids[i]) if n > 0 else 0 + num_rejected_tokens = [ + n + 1 - len(valid_sampled_token_ids[i]) if n > 0 else 0 for i, n in enumerate(num_draft_tokens) ] num_rejected_tokens = torch.tensor( @@ -88,22 +89,24 @@ def generate_token_ids(self, device=self.device, ) num_tokens = num_scheduled_tokens - sum(num_rejected_tokens) - cu_num_tokens, token_indices = self.prepare_inputs( - attn_metadata.query_start_loc, num_rejected_tokens, - num_tokens) + cu_num_tokens, token_indices = self._prepare_inputs( + attn_metadata.query_start_loc, num_rejected_tokens, num_tokens + ) target_token_ids = self.runner.input_ids[token_indices] - target_positions = positions[token_indices] + target_positions = positions[token_indices] - (target_token_ids, target_positions, - target_slot_mapping, cu_num_tokens) = merge_next_token_ids_into_token_ids( - input_token_ids=target_token_ids, - input_positions=target_positions, - cad=attn_metadata, - next_token_ids=next_token_ids, - block_size=self.block_size, - max_model_len=self.vllm_config.model_config.max_model_len, - arange=self.arange, - cu_num_tokens=cu_num_tokens) + (target_token_ids, target_positions, target_slot_mapping, cu_num_tokens) = ( + merge_next_token_ids_into_token_ids( + input_token_ids=target_token_ids, + input_positions=target_positions, + cad=attn_metadata, + next_token_ids=next_token_ids, + block_size=self.block_size, + max_model_len=self.vllm_config.model_config.max_model_len, + arange=self.arange, + cu_num_tokens=cu_num_tokens, + ) + ) draft_token_ids = self._propose( target_token_ids=target_token_ids, @@ -118,8 +121,6 @@ def generate_token_ids(self, spec_token_ids = draft_token_ids.tolist() return spec_token_ids - - def _raise_if_mrope(self): if self.draft_model_config.uses_mrope: @@ -135,23 +136,23 @@ def _raise_if_padded_drafter_batch(self): "in the speculative_config." ) - def _raise_if_vocab_size_mismatch(self): - speculative_config = self.vllm_config.speculative_config - if ( - speculative_config.method == "draft_model" - and speculative_config.target_model_config is not None - and speculative_config.draft_model_config is not None - ): - target_vocab_size = speculative_config.target_model_config.get_vocab_size() - draft_vocab_size = speculative_config.draft_model_config.get_vocab_size() - if target_vocab_size != draft_vocab_size: - raise ValueError( - f"Target and draft model should have the same vocabulary size. " - f"Target model vocab_size={target_vocab_size}. " - f"Draft model vocab_size={draft_vocab_size}. " - f"Using models with different tokenizers can cause out-of-bounds " - f"errors during speculative decoding." - ) + def _raise_if_vocab_size_mismatch(self): + speculative_config = self.vllm_config.speculative_config + if ( + speculative_config.method == "draft_model" + and speculative_config.target_model_config is not None + and speculative_config.draft_model_config is not None + ): + target_vocab_size = speculative_config.target_model_config.get_vocab_size() + draft_vocab_size = speculative_config.draft_model_config.get_vocab_size() + if target_vocab_size != draft_vocab_size: + raise ValueError( + f"Target and draft model should have the same vocabulary size. " + f"Target model vocab_size={target_vocab_size}. " + f"Draft model vocab_size={draft_vocab_size}. " + f"Using models with different tokenizers can cause out-of-bounds " + f"errors during speculative decoding." + ) def _raise_if_draft_tp_mismatch(self): # Note(Tomas Ruiz) If we run the target model with TP > 1 and @@ -210,6 +211,7 @@ def load_model(self, target_model: Any) -> None: ) self.attn_layer_name = next(iter(draft_attn_layer_names)) + def create_vllm_config_for_draft_model( target_model_vllm_config: VllmConfig, ) -> VllmConfig: @@ -220,17 +222,19 @@ def create_vllm_config_for_draft_model( The vllm_config is useful when loading the draft model with get_model(). """ old = target_model_vllm_config - new_parallel_config = replace(old.speculative_config.draft_parallel_config, - rank=old.parallel_config.rank + new_parallel_config = replace( + old.speculative_config.draft_parallel_config, rank=old.parallel_config.rank ) - - new: VllmConfig = replace(old, + + new: VllmConfig = replace( + old, quant_config=None, # quant_config is recomputed in __init__() model_config=old.speculative_config.draft_model_config, parallel_config=new_parallel_config, ) return new + def merge_next_token_ids_into_token_ids( input_token_ids: torch.Tensor, input_positions: torch.Tensor, @@ -239,8 +243,8 @@ def merge_next_token_ids_into_token_ids( block_size: int, max_model_len: int, arange: torch.Tensor, - cu_num_tokens - ): + cu_num_tokens, +): """ Merges the next token ids with the existing token ids into a flat sequence. Does the same for the positions, computes new slot mapping, @@ -251,7 +255,7 @@ def merge_next_token_ids_into_token_ids( seqs=input_token_ids, end_locs=query_end_locs, new_vals=next_token_ids ) logger.warning("new_token_ids: {}".format(new_token_ids)) - + # append new positions positions_to_append = input_positions[query_end_locs] + 1 new_positions = extend_flat_seqs( @@ -260,9 +264,11 @@ def merge_next_token_ids_into_token_ids( # recompute slot mapping batch_size, n_blocks_per_req = cad.block_tables.shape req_indices = torch.arange(batch_size, device=cad.query_start_loc.device) - + query_lens = cu_num_tokens[1:] - cu_num_tokens[:-1] - req_indices = torch.repeat_interleave(req_indices, query_lens.to(cad.query_start_loc.device) + 1) + req_indices = torch.repeat_interleave( + req_indices, query_lens.to(cad.query_start_loc.device) + 1 + ) block_table_indices = req_indices * n_blocks_per_req + new_positions // block_size block_nums = cad.block_tables.view(-1)[block_table_indices] block_offsets = new_positions % block_size @@ -270,6 +276,6 @@ def merge_next_token_ids_into_token_ids( # Mask out the position ids that exceed the max model length. exceeds_max_model_len = new_positions >= max_model_len new_slot_mapping.masked_fill_(exceeds_max_model_len, PADDING_SLOT_ID) - - cu_num_tokens = cu_num_tokens + arange[: len(cu_num_tokens)] - return (new_token_ids, new_positions, new_slot_mapping, cu_num_tokens) \ No newline at end of file + + cu_num_tokens = cu_num_tokens + arange[: len(cu_num_tokens)] + return (new_token_ids, new_positions, new_slot_mapping, cu_num_tokens) From 458036db31972cd3b0510a36f77d70a625711849 Mon Sep 17 00:00:00 2001 From: 01267596 Date: Wed, 5 Nov 2025 08:37:01 +0000 Subject: [PATCH 3/7] [feat] add draft_model spec_decode Signed-off-by: 01267596 --- vllm_ascend/spec_decode/draft_proposer.py | 1 - 1 file changed, 1 deletion(-) diff --git a/vllm_ascend/spec_decode/draft_proposer.py b/vllm_ascend/spec_decode/draft_proposer.py index ea6bf339ea..b27b2caccb 100644 --- a/vllm_ascend/spec_decode/draft_proposer.py +++ b/vllm_ascend/spec_decode/draft_proposer.py @@ -254,7 +254,6 @@ def merge_next_token_ids_into_token_ids( new_token_ids = extend_flat_seqs( seqs=input_token_ids, end_locs=query_end_locs, new_vals=next_token_ids ) - logger.warning("new_token_ids: {}".format(new_token_ids)) # append new positions positions_to_append = input_positions[query_end_locs] + 1 From e45c2a67b8acca47e3884869d9e4bf5954839336 Mon Sep 17 00:00:00 2001 From: 01267596 Date: Wed, 5 Nov 2025 09:27:01 +0000 Subject: [PATCH 4/7] [feat]add draft_model spec_decode Signed-off-by: 01267596 --- vllm_ascend/spec_decode/draft_proposer.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/vllm_ascend/spec_decode/draft_proposer.py b/vllm_ascend/spec_decode/draft_proposer.py index b27b2caccb..fbc3ce7eee 100644 --- a/vllm_ascend/spec_decode/draft_proposer.py +++ b/vllm_ascend/spec_decode/draft_proposer.py @@ -88,9 +88,8 @@ def generate_token_ids( dtype=torch.int32, device=self.device, ) - num_tokens = num_scheduled_tokens - sum(num_rejected_tokens) cu_num_tokens, token_indices = self._prepare_inputs( - attn_metadata.query_start_loc, num_rejected_tokens, num_tokens + attn_metadata, num_rejected_tokens ) target_token_ids = self.runner.input_ids[token_indices] target_positions = positions[token_indices] From 85ef466471582c495e44607b16a62b836c4307b3 Mon Sep 17 00:00:00 2001 From: 01267596 Date: Wed, 5 Nov 2025 09:46:07 +0000 Subject: [PATCH 5/7] fix format Signed-off-by: 01267596 --- vllm_ascend/attention/utils.py | 72 +- vllm_ascend/spec_decode/__init__.py | 15 +- vllm_ascend/spec_decode/eagle_proposer.py | 12 +- vllm_ascend/worker/model_runner_v1.py | 2596 ++++++++++++--------- 4 files changed, 1551 insertions(+), 1144 deletions(-) diff --git a/vllm_ascend/attention/utils.py b/vllm_ascend/attention/utils.py index b52083666a..82eca81dc9 100644 --- a/vllm_ascend/attention/utils.py +++ b/vllm_ascend/attention/utils.py @@ -16,8 +16,9 @@ class AscendPrefillContextParallelMetadata: num_actual_tokens_pcp_padded: Optional[int] = None - num_computed_tokens_of_pcp_dcp: Optional[list[Optional[list[Optional[ - list[int]]]]]] = None + num_computed_tokens_of_pcp_dcp: Optional[ + list[Optional[list[Optional[list[int]]]]] + ] = None q_head_idx_tensor: torch.Tensor = None @@ -47,7 +48,7 @@ class AscendCommonAttentionMetadata: """ Per-batch attention metadata, shared across layers and backends. AttentionMetadataBuilder instances use it to construct per-layer metadata. - + For many of the tensors we keep both GPU and CPU versions. """ @@ -104,16 +105,18 @@ class AscendCommonAttentionMetadata: sin: torch.Tensor = None prefill_context_parallel_metadata: Optional[ - AscendPrefillContextParallelMetadata] = None + AscendPrefillContextParallelMetadata + ] = None max_seq_len: int = -1 - - def batch_size(self) -> int: + + def batch_size(self) -> int: return self.seq_lens_cpu.shape[0] - def query_lens(self) -> torch.Tensor: + def query_lens(self) -> torch.Tensor: return self.query_start_loc[1:] - self.query_start_loc[:-1] + def split_decodes_and_prefills( common_attn_metadata: AscendCommonAttentionMetadata, decode_threshold: int = 1, @@ -197,7 +200,8 @@ def trans_rope_weight(weight, rope_dim): nope_part = weight[..., :-rope_dim, :] rope_part = weight[..., -rope_dim:, :] reordered_rope_part = torch.cat( - (rope_part[..., ::2, :], rope_part[..., 1::2, :]), dim=-2) + (rope_part[..., ::2, :], rope_part[..., 1::2, :]), dim=-2 + ) return torch.cat((nope_part, reordered_rope_part), dim=-2).contiguous() @@ -210,36 +214,34 @@ def transdata(nd_mat, block_size: tuple = (16, 16)): nz_mat = torch.permute( torch.reshape( nd_mat, - (r // block_size[0], block_size[0], c // block_size[1], - block_size[1]), + (r // block_size[0], block_size[0], c // block_size[1], block_size[1]), ), [2, 0, 1, 3], ) nz_mat = torch.reshape( - nz_mat, - (nz_mat.shape[0], nz_mat.shape[1] * nz_mat.shape[2], nz_mat.shape[3])) + nz_mat, (nz_mat.shape[0], nz_mat.shape[1] * nz_mat.shape[2], nz_mat.shape[3]) + ) return nz_mat -def extend_flat_seqs( - seqs: torch.Tensor, - end_locs: torch.Tensor, - new_vals: torch.Tensor - ) -> torch.Tensor: - """ - This function appends a single new value into multiple sequences - that are stored in a flat format. E.g. - [x1, x2, y1] and [x3, y2] become [x1, x2, x3, y1, y2] - """ - new_len = seqs.shape[0] + new_vals.shape[0] - new_seqs = torch.zeros(new_len, device=seqs.device, dtype=seqs.dtype) - # indices for previous seqs - start_locs = end_locs[:-1] + 1 - seqs_new_idxs = torch.ones_like(seqs) - seqs_new_idxs[start_locs] += 1 - seqs_new_idxs = seqs_new_idxs.cumsum(0) - 1 - # indices for new values - new_val_idxs = end_locs + 1 + torch.arange(new_vals.shape[0], device=seqs.device) - # assign seqs and new vals - new_seqs[seqs_new_idxs] = seqs - new_seqs[new_val_idxs] = new_vals - return new_seqs \ No newline at end of file + +def extend_flat_seqs( + seqs: torch.Tensor, end_locs: torch.Tensor, new_vals: torch.Tensor +) -> torch.Tensor: + """ + This function appends a single new value into multiple sequences + that are stored in a flat format. E.g. + [x1, x2, y1] and [x3, y2] become [x1, x2, x3, y1, y2] + """ + new_len = seqs.shape[0] + new_vals.shape[0] + new_seqs = torch.zeros(new_len, device=seqs.device, dtype=seqs.dtype) + # indices for previous seqs + start_locs = end_locs[:-1] + 1 + seqs_new_idxs = torch.ones_like(seqs) + seqs_new_idxs[start_locs] += 1 + seqs_new_idxs = seqs_new_idxs.cumsum(0) - 1 + # indices for new values + new_val_idxs = end_locs + 1 + torch.arange(new_vals.shape[0], device=seqs.device) + # assign seqs and new vals + new_seqs[seqs_new_idxs] = seqs + new_seqs[new_val_idxs] = new_vals + return new_seqs diff --git a/vllm_ascend/spec_decode/__init__.py b/vllm_ascend/spec_decode/__init__.py index 857066642c..886957491d 100644 --- a/vllm_ascend/spec_decode/__init__.py +++ b/vllm_ascend/spec_decode/__init__.py @@ -23,21 +23,18 @@ from vllm_ascend.spec_decode.draft_proposer import DraftModelProposer -def get_spec_decode_method(method, - vllm_config, - device, - runner, - is_torchair_graph=False): +def get_spec_decode_method( + method, vllm_config, device, runner, is_torchair_graph=False +): if method == "ngram": return NgramProposer(vllm_config, device, runner) elif method in ["eagle", "eagle3"]: return EagleProposer(vllm_config, device, runner) - elif method == 'deepseek_mtp': + elif method == "deepseek_mtp": if is_torchair_graph: return TorchairMtpProposer(vllm_config, device, runner) return MtpProposer(vllm_config, device, runner) - elif method == 'draft_model': + elif method == "draft_model": return DraftModelProposer(vllm_config, device, runner) else: - raise ValueError("Unknown speculative decoding method: " - f"{method}") + raise ValueError(f"Unknown speculative decoding method: {method}") diff --git a/vllm_ascend/spec_decode/eagle_proposer.py b/vllm_ascend/spec_decode/eagle_proposer.py index 4f94eb8639..e5e4a31dde 100644 --- a/vllm_ascend/spec_decode/eagle_proposer.py +++ b/vllm_ascend/spec_decode/eagle_proposer.py @@ -188,7 +188,7 @@ def generate_token_ids(self, next_token_ids = torch.tensor(next_token_ids, dtype=torch.int32, device=self.device) - eagle_attn_metadata = attn_metadata[self.attn_layer_name] + draft_attn_metadata = attn_metadata[self.attn_layer_name] if spec_decode_metadata is None: # input_ids can be None for multimodal models. target_token_ids = self.runner.input_ids[:num_scheduled_tokens] @@ -199,8 +199,8 @@ def generate_token_ids(self, dim=-1) else: target_hidden_states = hidden_states[:num_scheduled_tokens] - target_slot_mapping = eagle_attn_metadata.slot_mapping - cu_num_tokens = eagle_attn_metadata.query_start_loc + target_slot_mapping = draft_attn_metadata.slot_mapping + cu_num_tokens = draft_attn_metadata.query_start_loc else: num_draft_tokens = spec_decode_metadata.num_draft_tokens num_rejected_tokens = [ @@ -213,7 +213,7 @@ def generate_token_ids(self, device=self.device, ) cu_num_tokens, token_indices =\ - self._prepare_inputs(eagle_attn_metadata, num_rejected_tokens) + self._prepare_inputs(draft_attn_metadata, num_rejected_tokens) target_token_ids = self.runner.input_ids[token_indices] target_positions = positions[token_indices] if self.name == SpecDcodeType.EAGLE3: @@ -221,7 +221,7 @@ def generate_token_ids(self, [h[token_indices] for h in aux_hidden_states], dim=-1) else: target_hidden_states = hidden_states[token_indices] - target_slot_mapping = eagle_attn_metadata.slot_mapping[ + target_slot_mapping = draft_attn_metadata.slot_mapping[ token_indices] draft_token_ids = self._propose( @@ -231,7 +231,7 @@ def generate_token_ids(self, target_slot_mapping=target_slot_mapping, next_token_ids=next_token_ids, cu_num_tokens=cu_num_tokens, - block_table=eagle_attn_metadata.block_tables, + block_table=draft_attn_metadata.block_tables, sampling_metadata=sampling_metadata, ) spec_token_ids = draft_token_ids.tolist() diff --git a/vllm_ascend/worker/model_runner_v1.py b/vllm_ascend/worker/model_runner_v1.py index 163260f3c0..0e12aa1049 100644 --- a/vllm_ascend/worker/model_runner_v1.py +++ b/vllm_ascend/worker/model_runner_v1.py @@ -38,6 +38,7 @@ import torch._dynamo.cache_size import torch.distributed as dist import torch.nn as nn +import vllm_ascend.envs as envs_ascend from tqdm import tqdm # type: ignore from vllm.attention import AttentionType, get_attn_backend from vllm.attention.backends.abstract import AttentionBackend @@ -102,8 +103,6 @@ gather_mm_placeholders, sanity_check_mm_encoder_outputs, scatter_mm_placeholders) - -import vllm_ascend.envs as envs_ascend from vllm_ascend.ascend_config import get_ascend_config from vllm_ascend.ascend_forward_context import (MoECommType, set_ascend_forward_context) @@ -132,8 +131,8 @@ from vllm_ascend.sample.logits_processor import build_logitsprocs from vllm_ascend.sample.rejection_sampler import AscendRejectionSampler from vllm_ascend.spec_decode import get_spec_decode_method -from vllm_ascend.spec_decode.eagle_proposer import EagleProposer from vllm_ascend.spec_decode.draft_proposer import DraftModelProposer +from vllm_ascend.spec_decode.eagle_proposer import EagleProposer from vllm_ascend.spec_decode.interface import SpecDcodeType from vllm_ascend.spec_decode.mtp_proposer import MtpProposer from vllm_ascend.torchair.torchair_mtp_proposer import TorchairMtpProposer @@ -164,7 +163,6 @@ from vllm.attention.layer import Attention from vllm.config import CompilationLevel from vllm.utils import LazyLoader, is_pin_memory_available - from vllm_ascend.models.layers.mla import AscendMultiHeadLatentAttention else: from vllm.attention.layer import MLAAttention @@ -210,8 +208,7 @@ def graph_capture(device: torch.device): in order to explicitly distinguish the kernels to capture from other kernels possibly launched on background in the default stream. """ - graph_capture_context = GraphCaptureContext( - torch.npu.Stream(device=device)) + graph_capture_context = GraphCaptureContext(torch.npu.Stream(device=device)) stream = graph_capture_context.stream # we use nullcontext now @@ -229,7 +226,6 @@ def graph_capture(device: torch.device): # Wrapper for ModelRunnerOutput to support overlapped execution. class AsyncNPUModelRunnerOutput(AsyncModelRunnerOutput): - def __init__( self, model_runner_output: ModelRunnerOutput, @@ -252,7 +248,8 @@ def __init__( with torch.npu.stream(async_output_copy_stream): async_output_copy_stream.wait_stream(default_stream) self._sampled_token_ids_cpu = self._sampled_token_ids.to( - 'cpu', non_blocking=True) + "cpu", non_blocking=True + ) self._async_copy_ready_event.record() def get_output(self) -> ModelRunnerOutput: @@ -275,7 +272,6 @@ def get_output(self) -> ModelRunnerOutput: class NPUModelRunner(LoRAModelRunnerMixin): - def __init__(self, vllm_config: VllmConfig, device: torch.device): self.vllm_config = vllm_config self.model_config = vllm_config.model_config @@ -288,19 +284,22 @@ def __init__(self, vllm_config: VllmConfig, device: torch.device): self.scheduler_config = vllm_config.scheduler_config self.speculative_config = vllm_config.speculative_config self.block_size = vllm_config.cache_config.block_size - self.max_num_blocks_per_req = cdiv(self.model_config.max_model_len, - self.block_size) + self.max_num_blocks_per_req = cdiv( + self.model_config.max_model_len, self.block_size + ) self.max_num_tokens = self.scheduler_config.max_num_batched_tokens - decode_max_num_seqs = getattr(self.scheduler_config, - 'decode_max_num_seqs', 0) - self.max_num_reqs = max(self.scheduler_config.max_num_seqs, - decode_max_num_seqs) + decode_max_num_seqs = getattr(self.scheduler_config, "decode_max_num_seqs", 0) + self.max_num_reqs = max(self.scheduler_config.max_num_seqs, decode_max_num_seqs) self.dp_size = vllm_config.parallel_config.data_parallel_size self.dp_rank = vllm_config.parallel_config.data_parallel_rank - self.pcp_size = get_prefill_context_model_parallel_world_size( - ) if prefill_context_parallel_enable() else 1 - self.pcp_rank = get_prefill_context_model_parallel_rank( - ) if self.pcp_size > 1 else 0 + self.pcp_size = ( + get_prefill_context_model_parallel_world_size() + if prefill_context_parallel_enable() + else 1 + ) + self.pcp_rank = ( + get_prefill_context_model_parallel_rank() if self.pcp_size > 1 else 0 + ) self.dcp_size = get_dcp_group().world_size self.dcp_rank = get_dcp_group().rank_in_group self.device = device @@ -336,13 +335,15 @@ def __init__(self, vllm_config: VllmConfig, device: torch.device): else: self.chunked_prefill_enabled = True self.weight_prefetch_method = WeightPrefetchMethod( - self.ascend_config.weight_prefetch_config) + self.ascend_config.weight_prefetch_config + ) if self.cache_config.cache_dtype == "auto": self.kv_cache_dtype = self.dtype else: self.kv_cache_dtype = STR_DTYPE_TO_TORCH_DTYPE[ - self.cache_config.cache_dtype] + self.cache_config.cache_dtype + ] # use_hybrid_blocks: if hybrid blocks is used. self.use_hybrid_blocks: bool = False self.need_accepted_tokens: bool = False @@ -355,25 +356,26 @@ def __init__(self, vllm_config: VllmConfig, device: torch.device): self.max_num_tokens, self.model_config.get_hidden_size(), dtype=self.dtype, - numpy=False) - self.is_token_ids = self._make_buffer(self.max_num_tokens, - dtype=torch.bool) + numpy=False, + ) + self.is_token_ids = self._make_buffer(self.max_num_tokens, dtype=torch.bool) # Set up Attention - self.use_sparse = hasattr(self.vllm_config.model_config.hf_config, - "index_topk") - self.attn_backend = get_attn_backend(0, - self.dtype, - None, - self.block_size, - use_mla=self.model_config.use_mla, - use_sparse=self.use_sparse) + self.use_sparse = hasattr(self.vllm_config.model_config.hf_config, "index_topk") + self.attn_backend = get_attn_backend( + 0, + self.dtype, + None, + self.block_size, + use_mla=self.model_config.use_mla, + use_sparse=self.use_sparse, + ) if self.pcp_size > 1: self.attn_mask_builder = None else: self.attn_mask_builder = AttentionMaskBuilder( - self.scheduler_config.max_num_batched_tokens, self.dtype, - self.device) + self.scheduler_config.max_num_batched_tokens, self.dtype, self.device + ) self._set_up_drafter() @@ -387,36 +389,40 @@ def __init__(self, vllm_config: VllmConfig, device: torch.device): self._may_pad_kv_consumer_num_seq() # Persistent batch. - self.input_ids = torch.zeros(self.max_num_tokens, - dtype=torch.int32, - device=self.device) - self.positions = torch.zeros(self.max_num_tokens, - dtype=torch.int64, - device=self.device) - self.query_start_loc = torch.zeros(self.max_num_reqs + 1, - dtype=torch.int32, - device=self.device) - self.seq_lens = torch.zeros(self.max_num_reqs, - dtype=torch.int32, - device=self.device) - - if self.vllm_config.model_config.use_mla and \ - self.compilation_config.cudagraph_mode == CUDAGraphMode.FULL_DECODE_ONLY: + self.input_ids = torch.zeros( + self.max_num_tokens, dtype=torch.int32, device=self.device + ) + self.positions = torch.zeros( + self.max_num_tokens, dtype=torch.int64, device=self.device + ) + self.query_start_loc = torch.zeros( + self.max_num_reqs + 1, dtype=torch.int32, device=self.device + ) + self.seq_lens = torch.zeros( + self.max_num_reqs, dtype=torch.int32, device=self.device + ) + + if ( + self.vllm_config.model_config.use_mla + and self.compilation_config.cudagraph_mode == CUDAGraphMode.FULL_DECODE_ONLY + ): rope_dim = self.model_config.hf_text_config.qk_rope_head_dim - self.cos = torch.ones(self.max_num_reqs * - self.decode_token_per_req, - 1, - 1, - rope_dim, - dtype=self.dtype, - device=self.device) - self.sin = torch.zeros(self.max_num_reqs * - self.decode_token_per_req, - 1, - 1, - rope_dim, - dtype=self.dtype, - device=self.device) + self.cos = torch.ones( + self.max_num_reqs * self.decode_token_per_req, + 1, + 1, + rope_dim, + dtype=self.dtype, + device=self.device, + ) + self.sin = torch.zeros( + self.max_num_reqs * self.decode_token_per_req, + 1, + 1, + rope_dim, + dtype=self.dtype, + device=self.device, + ) else: self.cos = None self.sin = None @@ -434,80 +440,80 @@ def __init__(self, vllm_config: VllmConfig, device: torch.device): # identical position IDs, making M-RoPE functionally equivalent to # 1D-RoPE. # See page 5 of https://arxiv.org/abs/2409.12191 - self.mrope_positions = torch.zeros((3, self.max_num_tokens + 1), - dtype=torch.int64, - device=self.device) + self.mrope_positions = torch.zeros( + (3, self.max_num_tokens + 1), dtype=torch.int64, device=self.device + ) self.mrope_positions_cpu = torch.zeros( (3, self.max_num_tokens + 1), dtype=torch.int64, device="cpu", - pin_memory=True) + pin_memory=True, + ) self.mrope_positions_np = self.mrope_positions_cpu.numpy() # OPTIMIZATION: Cache the tensors rather than creating them every step. - self.arange_np: npt.NDArray[np.int32] = np.arange(max( - self.max_num_reqs + 1, self.model_config.max_model_len, - self.max_num_tokens), - dtype=np.int32) + self.arange_np: npt.NDArray[np.int32] = np.arange( + max( + self.max_num_reqs + 1, + self.model_config.max_model_len, + self.max_num_tokens, + ), + dtype=np.int32, + ) # NOTE(woosuk): These tensors are "stateless", i.e., they are literally # a faster version of creating a new tensor every time. Thus, we should # not make any assumptions about the values in these tensors. - self.input_ids_cpu = torch.zeros(self.max_num_tokens, - dtype=torch.int32, - device="cpu", - pin_memory=True) - self.positions_cpu = torch.zeros(self.max_num_tokens, - dtype=torch.int64, - device="cpu", - pin_memory=True) + self.input_ids_cpu = torch.zeros( + self.max_num_tokens, dtype=torch.int32, device="cpu", pin_memory=True + ) + self.positions_cpu = torch.zeros( + self.max_num_tokens, dtype=torch.int64, device="cpu", pin_memory=True + ) self.positions_np = self.positions_cpu.numpy() - self.query_start_loc_cpu = torch.zeros(self.max_num_reqs + 1, - dtype=torch.int32, - device="cpu", - pin_memory=True) + self.query_start_loc_cpu = torch.zeros( + self.max_num_reqs + 1, dtype=torch.int32, device="cpu", pin_memory=True + ) self.query_start_loc_np = self.query_start_loc_cpu.numpy() - self.seq_lens_cpu = torch.zeros(self.max_num_reqs, - dtype=torch.int32, - device="cpu", - pin_memory=True) + self.seq_lens_cpu = torch.zeros( + self.max_num_reqs, dtype=torch.int32, device="cpu", pin_memory=True + ) self.seq_lens_np = self.seq_lens_cpu.numpy() - self.pcp_allgather_restore_idx = torch.zeros(self.max_num_tokens, - dtype=torch.int32, - device=self.device) + self.pcp_allgather_restore_idx = torch.zeros( + self.max_num_tokens, dtype=torch.int32, device=self.device + ) self.num_pcp_pads = torch.zeros(self.max_num_reqs, dtype=torch.int32) - self.pcp_padded_slot_mapping = torch.zeros(self.max_num_tokens, - dtype=torch.int32, - device=self.device) + self.pcp_padded_slot_mapping = torch.zeros( + self.max_num_tokens, dtype=torch.int32, device=self.device + ) self.num_actual_tokens_pcp_padded = 0 if self.speculative_config and self.pcp_size > 1: - self.input_ids_pcp_full = torch.zeros(self.max_num_tokens, - dtype=torch.int32, - device="cpu", - pin_memory=True) - self.query_start_loc_pcp_full = torch.zeros(self.max_num_reqs + 1, - dtype=torch.int32, - device="cpu", - pin_memory=True) - self.query_start_loc_pcp_full_np = self.query_start_loc_pcp_full.numpy( + self.input_ids_pcp_full = torch.zeros( + self.max_num_tokens, dtype=torch.int32, device="cpu", pin_memory=True + ) + self.query_start_loc_pcp_full = torch.zeros( + self.max_num_reqs + 1, dtype=torch.int32, device="cpu", pin_memory=True + ) + self.query_start_loc_pcp_full_np = self.query_start_loc_pcp_full.numpy() + self.positions_pcp_full = torch.zeros( + self.max_num_tokens, dtype=torch.int64, device="cpu", pin_memory=True ) - self.positions_pcp_full = torch.zeros(self.max_num_tokens, - dtype=torch.int64, - device="cpu", - pin_memory=True) self.positions_np_pcp_full = self.positions_pcp_full.numpy() self.use_aclgraph = self._use_aclgraph() self.aclgraph_batch_sizes = list( - reversed(self.compilation_config.cudagraph_capture_sizes)) + reversed(self.compilation_config.cudagraph_capture_sizes) + ) - self.uniform_decode_query_len = 1 if not self.speculative_config else \ - 1 + self.speculative_config.num_speculative_tokens + self.uniform_decode_query_len = ( + 1 + if not self.speculative_config + else 1 + self.speculative_config.num_speculative_tokens + ) # aclgraph dispatcher for runtime aclgraph dispatching. self.aclgraph_dispatcher = CudagraphDispatcher(self.vllm_config) # Cached outputs. - self._draft_token_ids: Optional[Union[list[list[int]], - torch.Tensor]] = None + self._draft_token_ids: Optional[Union[list[list[int]], torch.Tensor]] = None # NOTE: we need to use `in_profile_run` to determine whether `enable_force_load_balance` is True self.in_profile_run = False @@ -521,31 +527,36 @@ def __init__(self, vllm_config: VllmConfig, device: torch.device): ) else: self.reserved_mc2_mask = None - self.dynamic_eplb = self.ascend_config.dynamic_eplb or self.ascend_config.expert_map_record_path + self.dynamic_eplb = ( + self.ascend_config.dynamic_eplb or self.ascend_config.expert_map_record_path + ) if self.dynamic_eplb: EPLBParamUtils.check_dynamic_eplb(self.ascend_config.dynamic_eplb) EPLBParamUtils.check_expert_map_record_path( - self.ascend_config.expert_map_record_path) + self.ascend_config.expert_map_record_path + ) self.is_eplb_warmuped = False self.policy_type = self.ascend_config.eplb_policy_type self.eplb_loader = D2DExpertWeightLoader() self.manager = Manager() - self.shared_dict = self.manager.dict({ - "expert_map": None, - "moe_load": None, - "expert_maps": None - }) - self.eplb_process = EplbProcess(shared_dict=self.shared_dict, - policy_type=self.policy_type, - enable_d2d=True) + self.shared_dict = self.manager.dict( + {"expert_map": None, "moe_load": None, "expert_maps": None} + ) + self.eplb_process = EplbProcess( + shared_dict=self.shared_dict, + policy_type=self.policy_type, + enable_d2d=True, + ) self.process = self.eplb_process._launch_process() ascend_config = get_ascend_config() - self.eplb_updator = EplbUpdator(ascend_config, self.eplb_loader, - self.eplb_process, self.process) + self.eplb_updator = EplbUpdator( + ascend_config, self.eplb_loader, self.eplb_process, self.process + ) self.use_async_scheduling = self.scheduler_config.async_scheduling - self.async_output_copy_stream = torch.npu.Stream() if \ - self.use_async_scheduling else None + self.async_output_copy_stream = ( + torch.npu.Stream() if self.use_async_scheduling else None + ) # Input Batch # NOTE(Chen): Ideally, we should initialize the input batch inside # `initialize_kv_cache` based on the kv cache config. However, as in @@ -565,61 +576,75 @@ def __init__(self, vllm_config: VllmConfig, device: torch.device): block_sizes=[self.block_size], is_spec_decode=bool(self.vllm_config.speculative_config), logitsprocs=build_logitsprocs( - self.vllm_config, self.device, self.pin_memory, + self.vllm_config, + self.device, + self.pin_memory, self.is_pooling_model, - self.vllm_config.model_config.logits_processors), + self.vllm_config.model_config.logits_processors, + ), is_pooling_model=self.is_pooling_model, kernel_block_sizes=[[self.vllm_config.cache_config.block_size]], - cp_kv_cache_interleave_size=self.parallel_config. - cp_kv_cache_interleave_size - if prefill_context_parallel_enable() else 1, + cp_kv_cache_interleave_size=self.parallel_config.cp_kv_cache_interleave_size + if prefill_context_parallel_enable() + else 1, + ) + self.num_accepted_tokens = self._make_buffer( + self.max_num_reqs, dtype=torch.int64 ) - self.num_accepted_tokens = self._make_buffer(self.max_num_reqs, - dtype=torch.int64) - self.num_draft_tokens = self._make_buffer(self.max_num_reqs, - dtype=torch.int32) + self.num_draft_tokens = self._make_buffer(self.max_num_reqs, dtype=torch.int32) # Only relevant for multimodal models self.mm_registry = MULTIMODAL_REGISTRY self.supports_mm_inputs = self.mm_registry.supports_multimodal_inputs( - self.model_config) + self.model_config + ) if self.supports_mm_inputs: - self.is_mm_embed = self._make_buffer(self.max_num_tokens, - dtype=torch.bool) + self.is_mm_embed = self._make_buffer(self.max_num_tokens, dtype=torch.bool) # TODO: EVS Support (Video tokens pruning) (see vllm#22980) self.is_multimodal_pruning_enabled = False def _set_up_drafter(self): # Set up speculative decoding. self.spec_attn_mask = None - self.drafter: Optional[Union[NgramProposer, EagleProposer, MtpProposer, - TorchairMtpProposer, DraftModelProposer]] = None + self.drafter: Optional[ + Union[ + NgramProposer, + EagleProposer, + MtpProposer, + TorchairMtpProposer, + DraftModelProposer, + ] + ] = None self.actual_seq_lengths_q: list[int] = [] self.decode_token_per_req = 1 if self.speculative_config: spec_token_num = self.speculative_config.num_speculative_tokens assert spec_token_num > 0 self.decode_token_per_req = 1 + spec_token_num - self.spec_attn_mask = torch.triu(torch.ones(2048, - 2048, - dtype=torch.bool), - diagonal=1).to(self.device) + self.spec_attn_mask = torch.triu( + torch.ones(2048, 2048, dtype=torch.bool), diagonal=1 + ).to(self.device) if get_pp_group().is_last_rank: self.drafter = self._get_drafter() if vllm_version_is("0.11.0"): self.rejection_sampler = AscendRejectionSampler() else: - self.rejection_sampler = AscendRejectionSampler( - self.sampler) + self.rejection_sampler = AscendRejectionSampler(self.sampler) self.actual_seq_lengths_q = list( - range(self.decode_token_per_req, self.max_num_tokens + 1, - self.decode_token_per_req)) - self.discard_request_indices = self._make_buffer(self.max_num_reqs, - dtype=torch.int64) + range( + self.decode_token_per_req, + self.max_num_tokens + 1, + self.decode_token_per_req, + ) + ) + self.discard_request_indices = self._make_buffer( + self.max_num_reqs, dtype=torch.int64 + ) self.num_discarded_requests = 0 def _get_drafter(self): - return get_spec_decode_method(self.speculative_config.method, - self.vllm_config, self.device, self) + return get_spec_decode_method( + self.speculative_config.method, self.vllm_config, self.device, self + ) def _may_pad_kv_consumer_num_seq(self): # For Full Graph + MTP in a PD (Prefill/Decode) disaggregation scenario, @@ -635,28 +660,29 @@ def _init_mc2_tokens_capacity(self): max_num_tokens = self.compilation_config.cudagraph_capture_sizes[0] else: # NOTE: To save memory, we cap the max number of tokens to 512. - max_num_tokens = min( - self.max_num_reqs * self.uniform_decode_query_len, 512) + max_num_tokens = min(self.max_num_reqs * self.uniform_decode_query_len, 512) tp_size = self.parallel_config.tensor_parallel_size # Use integer arithmetic for ceiling division. num_tokens_per_tp_rank = (max_num_tokens + tp_size - 1) // tp_size self.mc2_tokens_capacity: int = num_tokens_per_tp_rank * tp_size - def _make_buffer(self, - *size: Union[int, torch.SymInt], - dtype: torch.dtype, - numpy: bool = True) -> CpuGpuBuffer: + def _make_buffer( + self, *size: Union[int, torch.SymInt], dtype: torch.dtype, numpy: bool = True + ) -> CpuGpuBuffer: # Bfloat16 torch tensors cannot be directly cast to a numpy array, so # if a bfloat16 buffer is needed without a corresponding numpy array, # don't bother instantiating the numpy array. - return CpuGpuBuffer(*size, - dtype=dtype, - device=self.device, - pin_memory=self.pin_memory, - with_numpy=numpy) + return CpuGpuBuffer( + *size, + dtype=dtype, + device=self.device, + pin_memory=self.pin_memory, + with_numpy=numpy, + ) def _update_states_after_model_execute( - self, output_token_ids: torch.Tensor) -> None: + self, output_token_ids: torch.Tensor + ) -> None: """Update the cached states after model execution. This is used for MTP/EAGLE for hybrid models, as in linear attention, @@ -669,22 +695,42 @@ def _update_states_after_model_execute( return # Find the number of accepted tokens for each sequence. - num_accepted_tokens = (torch.cat( - [ - output_token_ids, - torch.full((output_token_ids.size(0), 1), - -1, - device=output_token_ids.device), - ], - dim=1) == -1).int().argmax(-1).cpu().numpy() + num_accepted_tokens = ( + ( + torch.cat( + [ + output_token_ids, + torch.full( + (output_token_ids.size(0), 1), + -1, + device=output_token_ids.device, + ), + ], + dim=1, + ) + == -1 + ) + .int() + .argmax(-1) + .cpu() + .numpy() + ) for i, num_tokens in enumerate(num_accepted_tokens): self.input_batch.num_accepted_tokens_cpu[i] = num_tokens def _use_aclgraph(self) -> bool: if vllm_version_is("0.11.0"): - return self.compilation_config.cudagraph_mode != CUDAGraphMode.NONE and self.compilation_config.level == CompilationLevel.PIECEWISE and not self.model_config.enforce_eager + return ( + self.compilation_config.cudagraph_mode != CUDAGraphMode.NONE + and self.compilation_config.level == CompilationLevel.PIECEWISE + and not self.model_config.enforce_eager + ) else: - return self.compilation_config.cudagraph_mode != CUDAGraphMode.NONE and self.compilation_config.mode == CompilationMode.VLLM_COMPILE and not self.model_config.enforce_eager + return ( + self.compilation_config.cudagraph_mode != CUDAGraphMode.NONE + and self.compilation_config.mode == CompilationMode.VLLM_COMPILE + and not self.model_config.enforce_eager + ) def _update_states(self, scheduler_output: "SchedulerOutput") -> None: # Remove finished requests from the cached states. @@ -723,8 +769,10 @@ def _update_states(self, scheduler_output: "SchedulerOutput") -> None: sampling_params = new_req_data.sampling_params pooling_params = new_req_data.pooling_params - if sampling_params and \ - sampling_params.sampling_type == SamplingType.RANDOM_SEED: + if ( + sampling_params + and sampling_params.sampling_type == SamplingType.RANDOM_SEED + ): generator = torch.Generator(device=self.device) generator.manual_seed(sampling_params.seed) else: @@ -732,7 +780,8 @@ def _update_states(self, scheduler_output: "SchedulerOutput") -> None: if pooling_params: assert (task := pooling_params.task) is not None, ( - "You did not set `task` in the API") + "You did not set `task` in the API" + ) model = cast(VllmModelForPooling, self.get_model()) to_update = model.pooler.get_pooling_updates(task) to_update.apply(pooling_params) @@ -779,21 +828,20 @@ def _update_states(self, scheduler_output: "SchedulerOutput") -> None: new_token_ids = req_data.new_token_ids[i] # Add the sampled token(s) from the previous step (if any). # This doesn't include "unverified" tokens like spec tokens. - num_new_tokens = (num_computed_tokens + len(new_token_ids) - - req_state.num_tokens) + num_new_tokens = ( + num_computed_tokens + len(new_token_ids) - req_state.num_tokens + ) if num_new_tokens == 1: # Avoid slicing list in most common case. req_state.output_token_ids.append(new_token_ids[-1]) elif num_new_tokens > 0: - req_state.output_token_ids.extend( - new_token_ids[-num_new_tokens:]) + req_state.output_token_ids.extend(new_token_ids[-num_new_tokens:]) # Update the block IDs. if not resumed_from_preemption: if new_block_ids is not None: # Append the new blocks to the existing block IDs. - for block_ids, new_ids in zip(req_state.block_ids, - new_block_ids): + for block_ids, new_ids in zip(req_state.block_ids, new_block_ids): block_ids.extend(new_ids) else: assert new_block_ids is not None @@ -810,11 +858,9 @@ def _update_states(self, scheduler_output: "SchedulerOutput") -> None: continue # Update the persistent batch. - self.input_batch.num_computed_tokens_cpu[req_index] = ( - num_computed_tokens) + self.input_batch.num_computed_tokens_cpu[req_index] = num_computed_tokens if new_block_ids is not None: - self.input_batch.block_table.append_row( - new_block_ids, req_index) + self.input_batch.block_table.append_row(new_block_ids, req_index) # For the last rank, we don't need to update the token_ids_cpu # because the sampled tokens are already cached. @@ -823,21 +869,22 @@ def _update_states(self, scheduler_output: "SchedulerOutput") -> None: start_token_index = num_computed_tokens end_token_index = num_computed_tokens + len(new_token_ids) self.input_batch.token_ids_cpu[ - req_index, - start_token_index:end_token_index] = new_token_ids - self.input_batch.num_tokens_no_spec[ - req_index] = end_token_index + req_index, start_token_index:end_token_index + ] = new_token_ids + self.input_batch.num_tokens_no_spec[req_index] = end_token_index self.input_batch.num_tokens[req_index] = end_token_index # Add spec_token_ids to token_ids_cpu. - spec_token_ids = ( - scheduler_output.scheduled_spec_decode_tokens.get(req_id, ())) + spec_token_ids = scheduler_output.scheduled_spec_decode_tokens.get( + req_id, () + ) if spec_token_ids: num_spec_tokens = len(spec_token_ids) start_index = self.input_batch.num_tokens_no_spec[req_index] end_token_index = start_index + num_spec_tokens self.input_batch.token_ids_cpu[ - req_index, start_index:end_token_index] = spec_token_ids + req_index, start_index:end_token_index + ] = spec_token_ids # NOTE(woosuk): `num_tokens` here may include spec tokens. self.input_batch.num_tokens[req_index] += num_spec_tokens @@ -878,7 +925,7 @@ def _init_mrope_positions(self, req_state: CachedRequestState): use_audio_in_video = True if vllm_version_is("0.11.0"): - req_state.mrope_positions, req_state.mrope_position_delta = \ + req_state.mrope_positions, req_state.mrope_position_delta = ( MRotaryEmbedding.get_input_positions_tensor( req_state.prompt_token_ids, hf_config=self.model_config.hf_config, @@ -888,9 +935,10 @@ def _init_mrope_positions(self, req_state: CachedRequestState): audio_feature_lengths=audio_feature_lengths, use_audio_in_video=use_audio_in_video, ) + ) else: if supports_mrope(self.model): - req_state.mrope_positions, req_state.mrope_position_delta = \ + req_state.mrope_positions, req_state.mrope_position_delta = ( self.model.get_mrope_input_positions( req_state.prompt_token_ids, hf_config=self.model_config.hf_config, @@ -900,10 +948,11 @@ def _init_mrope_positions(self, req_state: CachedRequestState): audio_feature_lengths=audio_feature_lengths, use_audio_in_video=use_audio_in_video, ) + ) def _sync_metadata_across_dp( - self, num_tokens: int, - with_prefill: bool) -> tuple[int, Optional[torch.Tensor], bool]: + self, num_tokens: int, with_prefill: bool + ) -> tuple[int, Optional[torch.Tensor], bool]: # TODO: In vLLM, the only thing that needs to be synced is num_tokens, but in # our case, we still need to sync the other two flags as well. So we need to # include them in the all_reduce operation, and more over, we CANNOT skip it @@ -914,15 +963,15 @@ def _sync_metadata_across_dp( return num_tokens, None, with_prefill # Sync num_tokens, with_prefill across dp ranks - num_tokens_tensor = torch.tensor([ - num_tokens if i == self.dp_rank else 0 for i in range(self.dp_size) - ], - dtype=torch.int32, - device="npu") + num_tokens_tensor = torch.tensor( + [num_tokens if i == self.dp_rank else 0 for i in range(self.dp_size)], + dtype=torch.int32, + device="npu", + ) - flags_tensor = torch.tensor([int(with_prefill)], - dtype=torch.int32, - device="npu") + flags_tensor = torch.tensor( + [int(with_prefill)], dtype=torch.int32, device="npu" + ) packed_tensor = torch.cat([num_tokens_tensor, flags_tensor]) @@ -936,10 +985,9 @@ def _sync_metadata_across_dp( global_with_prefill = bool(synced_flags[0]) # Create a tensor for num_tokens_after_padding - num_tokens_after_padding = torch.tensor([max_tokens_across_dp] * - self.dp_size, - device="cpu", - dtype=torch.int32) + num_tokens_after_padding = torch.tensor( + [max_tokens_across_dp] * self.dp_size, device="cpu", dtype=torch.int32 + ) return max_tokens_across_dp, num_tokens_after_padding, global_with_prefill @@ -974,21 +1022,28 @@ def get_supported_tasks(self) -> "tuple[SupportedTask, ...]": return tuple(tasks) - def _make_attention_mask(self, seq_lens, position, - attn_state) -> torch.Tensor: + def _make_attention_mask(self, seq_lens, position, attn_state) -> torch.Tensor: if self.pcp_size > 1: return None if self.attn_mask_builder is None: raise ValueError("Attn mask builder is None") # Pooling situation. - if self.model_config.runner_type == "pooling" and self.model_config.pooler_config.pooling_type == "CLS": + if ( + self.model_config.runner_type == "pooling" + and self.model_config.pooler_config.pooling_type == "CLS" + ): return self.attn_mask_builder.get_pooling_mask(self.device) # Chunk Prefill situation. - elif attn_state == AscendAttentionState.ChunkedPrefill and not self.vllm_config.model_config.use_mla and not self.use_sparse: + elif ( + attn_state == AscendAttentionState.ChunkedPrefill + and not self.vllm_config.model_config.use_mla + and not self.use_sparse + ): if self.dcp_size > 1: max_seq_len = max(seq_lens.max().item(), 0) return self.attn_mask_builder.get_attn_mask( - max_seq_len, self.dtype, self.device) + max_seq_len, self.dtype, self.device + ) else: return self.attn_mask_builder.get_splitfuse_attn_mask() @@ -996,11 +1051,11 @@ def _make_attention_mask(self, seq_lens, position, elif attn_state == AscendAttentionState.PrefillNoCache: max_seq_len = max(seq_lens.max().item(), 0) return self.attn_mask_builder.get_attn_mask( - max_seq_len, self.dtype, self.device) + max_seq_len, self.dtype, self.device + ) # Prefill with cache hit. elif attn_state == AscendAttentionState.PrefillCacheHit: - return self.attn_mask_builder.get_attn_mask( - 2048, self.dtype, self.device) + return self.attn_mask_builder.get_attn_mask(2048, self.dtype, self.device) # Decode-only situation. else: return None @@ -1011,18 +1066,15 @@ def _calc_mrope_positions(self, scheduler_output: "SchedulerOutput"): req = self.requests[req_id] assert req.mrope_positions is not None - num_computed_tokens = \ - self.input_batch.num_computed_tokens_cpu[index] - num_scheduled_tokens = \ - scheduler_output.num_scheduled_tokens[req_id] + num_computed_tokens = self.input_batch.num_computed_tokens_cpu[index] + num_scheduled_tokens = scheduler_output.num_scheduled_tokens[req_id] num_prompt_tokens = length_from_prompt_token_ids_or_embeds( - req.prompt_token_ids, req.prompt_embeds) + req.prompt_token_ids, req.prompt_embeds + ) if num_computed_tokens + num_scheduled_tokens > num_prompt_tokens: - prompt_part_len = max(0, - num_prompt_tokens - num_computed_tokens) - completion_part_len = max( - 0, num_scheduled_tokens - prompt_part_len) + prompt_part_len = max(0, num_prompt_tokens - num_computed_tokens) + completion_part_len = max(0, num_scheduled_tokens - prompt_part_len) else: prompt_part_len = num_scheduled_tokens completion_part_len = 0 @@ -1036,8 +1088,9 @@ def _calc_mrope_positions(self, scheduler_output: "SchedulerOutput"): src_start = num_computed_tokens src_end = num_computed_tokens + prompt_part_len - self.mrope_positions_cpu[:, dst_start:dst_end] = \ - req.mrope_positions[:, src_start:src_end] + self.mrope_positions_cpu[:, dst_start:dst_end] = req.mrope_positions[ + :, src_start:src_end + ] mrope_pos_ptr += prompt_part_len @@ -1062,7 +1115,8 @@ def _execute_mm_encoder(self, scheduler_output: "SchedulerOutput"): # Batch the multi-modal inputs. mm_kwargs, mm_hashes_pos = self._batch_mm_kwargs_from_scheduler( - scheduler_output) + scheduler_output + ) encoder_outputs = [] if vllm_version_is("0.11.0"): @@ -1087,8 +1141,7 @@ def _execute_mm_encoder(self, scheduler_output: "SchedulerOutput"): # 2. A list or tuple (length: num_items) of tensors, each of shape # (feature_size, hidden_size) in case the feature size is dynamic # depending on the input multimodal items. - curr_group_outputs = self.model.get_multimodal_embeddings( - **mm_kwargs_group) + curr_group_outputs = self.model.get_multimodal_embeddings(**mm_kwargs_group) sanity_check_mm_encoder_outputs( curr_group_outputs, @@ -1141,19 +1194,20 @@ def _gather_mm_embeddings_0110( self, scheduler_output: "SchedulerOutput", ) -> list[torch.Tensor]: - def _iter_mm_features(req_state: CachedRequestState): assert req_state.mm_features is not None for mm_feature in req_state.mm_features: pos_info = mm_feature.mm_position - yield mm_feature.identifier, pos_info, getattr( - pos_info, "is_embed", None) + yield ( + mm_feature.identifier, + pos_info, + getattr(pos_info, "is_embed", None), + ) mm_embeds: list[torch.Tensor] = [] for req_id in self.input_batch.req_ids: - num_scheduled_tokens = scheduler_output.num_scheduled_tokens[ - req_id] + num_scheduled_tokens = scheduler_output.num_scheduled_tokens[req_id] req_state = self.requests[req_id] num_computed_tokens = req_state.num_computed_tokens @@ -1174,8 +1228,7 @@ def _iter_mm_features(req_state: CachedRequestState): assert start_idx < end_idx encoder_output = self.encoder_cache.get(mm_hash, None) - assert encoder_output is not None, \ - f"Encoder cache miss for {mm_hash}." + assert encoder_output is not None, f"Encoder cache miss for {mm_hash}." if is_embed is not None: is_embed = is_embed[start_idx:end_idx] @@ -1203,11 +1256,9 @@ def _gather_mm_embeddings( for req_id in self.input_batch.req_ids: mm_embeds_req: list[torch.Tensor] = [] - num_scheduled_tokens = scheduler_output.num_scheduled_tokens[ - req_id] + num_scheduled_tokens = scheduler_output.num_scheduled_tokens[req_id] req_state = self.requests[req_id] - num_computed_tokens = \ - req_state.num_computed_tokens + shift_computed_tokens + num_computed_tokens = req_state.num_computed_tokens + shift_computed_tokens for mm_feature in req_state.mm_features: # type: ignore pos_info = mm_feature.mm_position @@ -1235,15 +1286,15 @@ def _gather_mm_embeddings( mm_hash = mm_feature.identifier encoder_output = self.encoder_cache.get(mm_hash, None) - assert encoder_output is not None,\ - f"Encoder cache miss for {mm_hash}." + assert encoder_output is not None, f"Encoder cache miss for {mm_hash}." if (is_embed := pos_info.is_embed) is not None: is_embed = is_embed[start_idx:end_idx] req_start_pos = req_start_idx + start_pos - num_computed_tokens - is_mm_embed[req_start_pos+start_idx:req_start_pos + end_idx] \ - = True if is_embed is None else is_embed + is_mm_embed[req_start_pos + start_idx : req_start_pos + end_idx] = ( + True if is_embed is None else is_embed + ) mm_embeds_item = gather_mm_placeholders( encoder_output[start_idx:end_idx], @@ -1278,8 +1329,9 @@ def _get_cumsum_and_arange( return cu_num_tokens, arange - def _prepare_input_ids(self, total_num_scheduled_tokens: int, - cu_num_tokens: np.ndarray) -> None: + def _prepare_input_ids( + self, total_num_scheduled_tokens: int, cu_num_tokens: np.ndarray + ) -> None: """Prepare the input IDs for the current batch. Carefully handles the `prev_sampled_token_ids` which can be cached @@ -1289,8 +1341,8 @@ def _prepare_input_ids(self, total_num_scheduled_tokens: int, if self.input_batch.prev_sampled_token_ids is None: # Normal scheduling case self.input_ids[:total_num_scheduled_tokens].copy_( - self.input_ids_cpu[:total_num_scheduled_tokens], - non_blocking=True) + self.input_ids_cpu[:total_num_scheduled_tokens], non_blocking=True + ) if self.is_multimodal_model or self.enable_prompt_embeds: self.inputs_embeds.copy_to_gpu(total_num_scheduled_tokens) self.is_token_ids.copy_to_gpu(total_num_scheduled_tokens) @@ -1312,15 +1364,15 @@ def _prepare_input_ids(self, total_num_scheduled_tokens: int, # last token in each common request. flattened_index = cu_num_tokens[cur_index].item() - 1 flattened_indices.append(flattened_index) - indices_match &= (prev_index == flattened_index) + indices_match &= prev_index == flattened_index max_flattened_index = max(max_flattened_index, flattened_index) num_commmon_tokens = len(flattened_indices) if num_commmon_tokens < total_num_scheduled_tokens: # If not all requests are decodes from the last iteration, # We need to copy the input_ids_cpu to the NPU first. self.input_ids[:total_num_scheduled_tokens].copy_( - self.input_ids_cpu[:total_num_scheduled_tokens], - non_blocking=True) + self.input_ids_cpu[:total_num_scheduled_tokens], non_blocking=True + ) if self.is_multimodal_model or self.enable_prompt_embeds: self.inputs_embeds.copy_to_gpu(total_num_scheduled_tokens) self.is_token_ids.copy_to_gpu(total_num_scheduled_tokens) @@ -1334,26 +1386,26 @@ def _prepare_input_ids(self, total_num_scheduled_tokens: int, # The indices are both the same permutation of 0..N-1 so # we can copy directly using a single slice. self.input_ids[:num_commmon_tokens].copy_( - self.input_batch.prev_sampled_token_ids[:num_commmon_tokens, - 0], - non_blocking=True) + self.input_batch.prev_sampled_token_ids[:num_commmon_tokens, 0], + non_blocking=True, + ) self.is_token_ids.gpu[:num_commmon_tokens] = True return # Upload the index tensors asynchronously # so the scatter can be non-blocking. - input_ids_index_tensor = torch.tensor(flattened_indices, - dtype=torch.int64, - pin_memory=self.pin_memory).to( - self.device, - non_blocking=True) + input_ids_index_tensor = torch.tensor( + flattened_indices, dtype=torch.int64, pin_memory=self.pin_memory + ).to(self.device, non_blocking=True) prev_common_req_indices_tensor = torch.tensor( - prev_common_req_indices, - dtype=torch.int64, - pin_memory=self.pin_memory).to(self.device, non_blocking=True) - self.input_ids.scatter_(dim=0, - index=input_ids_index_tensor, - src=self.input_batch.prev_sampled_token_ids[ - prev_common_req_indices_tensor, 0]) + prev_common_req_indices, dtype=torch.int64, pin_memory=self.pin_memory + ).to(self.device, non_blocking=True) + self.input_ids.scatter_( + dim=0, + index=input_ids_index_tensor, + src=self.input_batch.prev_sampled_token_ids[ + prev_common_req_indices_tensor, 0 + ], + ) def _may_reorder_batch(self, scheduler_output: "SchedulerOutput") -> None: """ @@ -1377,15 +1429,27 @@ def _may_reorder_batch(self, scheduler_output: "SchedulerOutput") -> None: reorder_batch_to_split_decodes_and_prefills( self.input_batch, scheduler_output, - decode_threshold=self.reorder_batch_threshold) + decode_threshold=self.reorder_batch_threshold, + ) def _prepare_inputs( self, scheduler_output: "SchedulerOutput", intermediate_tensors: Optional[IntermediateTensors] = None, - ) -> tuple[dict[str, Any], torch.Tensor, np.ndarray, int, torch.Tensor, - int, torch.Tensor, SpecDecodeMetadata, Optional[torch.Tensor], - Optional[torch.Tensor], Optional[torch.Tensor], int]: + ) -> tuple[ + dict[str, Any], + torch.Tensor, + np.ndarray, + int, + torch.Tensor, + int, + torch.Tensor, + SpecDecodeMetadata, + Optional[torch.Tensor], + Optional[torch.Tensor], + Optional[torch.Tensor], + int, + ]: total_num_scheduled_tokens = scheduler_output.total_num_scheduled_tokens assert total_num_scheduled_tokens > 0 num_reqs = self.input_batch.num_reqs @@ -1400,58 +1464,60 @@ def _prepare_inputs( tokens = [scheduler_output.num_scheduled_tokens[i] for i in req_ids] num_scheduled_tokens = np.array(tokens, dtype=np.int32) - req_indices = np.repeat(self.arange_np[:num_reqs], - num_scheduled_tokens) + req_indices = np.repeat(self.arange_np[:num_reqs], num_scheduled_tokens) _, arange = self._get_cumsum_and_arange(num_scheduled_tokens) positions_np = np.add( self.input_batch.num_computed_tokens_cpu[req_indices], arange, ) - self.input_batch.block_table.compute_slot_mapping( - req_indices, positions_np) - self.input_batch.block_table.commit_slot_mapping( - total_num_scheduled_tokens) - tokens, position_pcp, pcp_unpad_mask = self._update_tokens_for_pcp( - tokens) + self.input_batch.block_table.compute_slot_mapping(req_indices, positions_np) + self.input_batch.block_table.commit_slot_mapping(total_num_scheduled_tokens) + tokens, position_pcp, pcp_unpad_mask = self._update_tokens_for_pcp(tokens) num_scheduled_tokens = np.array(tokens, dtype=np.int32) # update total_num_scheduled_tokens total_num_scheduled_tokens = sum(num_scheduled_tokens[:num_reqs]) total_num_pcp_pads = sum(self.num_pcp_pads) max_num_scheduled_tokens = max(tokens) - num_valid_tokens = np.array([ - num_tokens - - len(scheduler_output.scheduled_spec_decode_tokens.get(i, [])) - for num_tokens, i in zip(tokens, req_ids) - ], - dtype=np.int32) - - if (self.use_aclgraph and total_num_scheduled_tokens - <= self.aclgraph_batch_sizes[-1]): + num_valid_tokens = np.array( + [ + num_tokens + - len(scheduler_output.scheduled_spec_decode_tokens.get(i, [])) + for num_tokens, i in zip(tokens, req_ids) + ], + dtype=np.int32, + ) + + if ( + self.use_aclgraph + and total_num_scheduled_tokens <= self.aclgraph_batch_sizes[-1] + ): # Add padding to the batch size. num_input_tokens = self.vllm_config.pad_for_cudagraph( - total_num_scheduled_tokens) + total_num_scheduled_tokens + ) elif self.use_aclgraph and enable_sp(self.vllm_config): # When using aclgraph, if total_num_scheduled_tokens exceeds the maximum graph size, # the model will fall back to running its FX graph in eager mode. # In this case, when sequence parallelism is enabled, we need to pad tokens to align # with tp_size because pad_size cannot be captured by the FX graph tp_size = self.vllm_config.parallel_config.tensor_parallel_size - num_input_tokens = math.ceil( - total_num_scheduled_tokens / tp_size) * tp_size + num_input_tokens = math.ceil(total_num_scheduled_tokens / tp_size) * tp_size else: # Eager mode. num_input_tokens = total_num_scheduled_tokens # Get the attention state. - attn_state = self._build_attn_state(num_reqs, num_scheduled_tokens, - num_valid_tokens) + attn_state = self._build_attn_state( + num_reqs, num_scheduled_tokens, num_valid_tokens + ) self.attn_state = attn_state # type: ignore # Determine if it's a splitfuse batch with_prefill = attn_state not in [ - AscendAttentionState.DecodeOnly, AscendAttentionState.SpecDecoding + AscendAttentionState.DecodeOnly, + AscendAttentionState.SpecDecoding, ] self.query_lens = torch.from_numpy(num_scheduled_tokens) @@ -1459,9 +1525,9 @@ def _prepare_inputs( # Get info across DP ranks. # NOTE: maybe_padded_num_tokens is only used when using TorchAir with DP, # Otherwise, it's just max_tokens_across_dp_cpu - (maybe_padded_num_tokens, num_tokens_across_dp, - with_prefill) = self._sync_metadata_across_dp(num_input_tokens, - with_prefill) + (maybe_padded_num_tokens, num_tokens_across_dp, with_prefill) = ( + self._sync_metadata_across_dp(num_input_tokens, with_prefill) + ) # TODO: Now that num_input_tokens is basically identical with maybe_padded_num_tokens # We should consider removing maybe_padded_num_tokens later @@ -1473,19 +1539,19 @@ def _prepare_inputs( # Get request indices. # E.g., [2, 5, 3] -> [0, 0, 1, 1, 1, 1, 1, 2, 2, 2] - req_indices = np.repeat(self.arange_np[:num_reqs], - num_scheduled_tokens) + req_indices = np.repeat(self.arange_np[:num_reqs], num_scheduled_tokens) # cu_num_tokens: [2, 5, 3] -> [2, 7, 10] # arange: [0, 1, 0, 1, 2, 3, 4, 0, 1, 2] - cu_num_tokens, arange = self._get_cumsum_and_arange( - num_scheduled_tokens) + cu_num_tokens, arange = self._get_cumsum_and_arange(num_scheduled_tokens) if self.pcp_size > 1: positions_np = self.positions_np[:total_num_scheduled_tokens] - np.add(self.input_batch.num_computed_tokens_cpu[req_indices], - position_pcp[:total_num_scheduled_tokens], - out=positions_np) + np.add( + self.input_batch.num_computed_tokens_cpu[req_indices], + position_pcp[:total_num_scheduled_tokens], + out=positions_np, + ) else: self.positions_np[:total_num_scheduled_tokens] = positions_np @@ -1497,35 +1563,41 @@ def _prepare_inputs( # Only relevant for models using M-RoPE (e.g, Qwen2-VL) self.mrope_positions[:, :total_num_scheduled_tokens].copy_( self.mrope_positions_cpu[:, :total_num_scheduled_tokens], - non_blocking=True) + non_blocking=True, + ) # Get token indices. # E.g., [0, 1, 0, 1, 2, 3, 4, 0, 1, 2] # -> [0, 1, M, M + 1, M + 2, M + 3, M + 4, 2 * M, 2 * M + 1, 2 * M + 2] # where M is the max_model_len. - token_indices = (positions_np + - req_indices * self.input_batch.token_ids_cpu.shape[1]) + token_indices = ( + positions_np + req_indices * self.input_batch.token_ids_cpu.shape[1] + ) token_indices_tensor = torch.from_numpy(token_indices) # Prepare input_ids. # NOTE(woosuk): We use torch.index_select instead of np.take here # because torch.index_select is much faster than np.take for large # tensors. - torch.index_select(self.input_batch.token_ids_cpu_tensor.flatten(), - 0, - token_indices_tensor, - out=self.input_ids_cpu[:total_num_scheduled_tokens]) + torch.index_select( + self.input_batch.token_ids_cpu_tensor.flatten(), + 0, + token_indices_tensor, + out=self.input_ids_cpu[:total_num_scheduled_tokens], + ) is_token_ids = self.input_batch.is_token_ids.flatten() torch.index_select( is_token_ids, 0, token_indices_tensor, - out=self.is_token_ids.cpu[:total_num_scheduled_tokens]) + out=self.is_token_ids.cpu[:total_num_scheduled_tokens], + ) # Because we did not pre-allocate a massive prompt_embeds CPU tensor on # the InputBatch, we need to fill in the prompt embeds into the expected # spots in the GpuModelRunner's pre-allocated prompt_embeds tensor. - if self.input_batch.req_prompt_embeds and (self.is_multimodal_model or - self.enable_prompt_embeds): + if self.input_batch.req_prompt_embeds and ( + self.is_multimodal_model or self.enable_prompt_embeds + ): output_idx = 0 for req_idx in range(num_reqs): num_sched = num_scheduled_tokens[req_idx] @@ -1554,26 +1626,25 @@ def _prepare_inputs( actual_num_sched = actual_end - start_pos if actual_num_sched > 0: - self.inputs_embeds.cpu[output_idx:output_idx + - actual_num_sched].copy_( - req_embeds[start_pos:actual_end] - ) + self.inputs_embeds.cpu[ + output_idx : output_idx + actual_num_sched + ].copy_(req_embeds[start_pos:actual_end]) output_idx += num_sched self.query_start_loc_np[0] = 0 - self.query_start_loc_np[1:num_reqs + 1] = cu_num_tokens - self.query_start_loc[:num_reqs + 1].copy_( - self.query_start_loc_cpu[:num_reqs + 1], non_blocking=True) + self.query_start_loc_np[1 : num_reqs + 1] = cu_num_tokens + self.query_start_loc[: num_reqs + 1].copy_( + self.query_start_loc_cpu[: num_reqs + 1], non_blocking=True + ) self.seq_lens_np[:num_reqs] = ( - self.input_batch.num_computed_tokens_cpu[:num_reqs] + - num_scheduled_tokens) - self.seq_lens[:num_reqs].copy_(self.seq_lens_cpu[:num_reqs], - non_blocking=True) + self.input_batch.num_computed_tokens_cpu[:num_reqs] + num_scheduled_tokens + ) + self.seq_lens[:num_reqs].copy_(self.seq_lens_cpu[:num_reqs], non_blocking=True) # Fill unused with -1. Needed for reshape_and_cache - self.query_start_loc[num_reqs + 1:].fill_(-1) + self.query_start_loc[num_reqs + 1 :].fill_(-1) self.seq_lens[num_reqs:].fill_(0) self.query_lens = torch.from_numpy(num_scheduled_tokens) @@ -1582,18 +1653,20 @@ def _prepare_inputs( self._prepare_input_ids(total_num_scheduled_tokens, cu_num_tokens) self.positions_cpu[total_num_scheduled_tokens:num_input_tokens].zero_() self.positions[:num_input_tokens].copy_( - self.positions_cpu[:num_input_tokens], non_blocking=True) + self.positions_cpu[:num_input_tokens], non_blocking=True + ) # Make Attention metadata positions_cpu = self.positions_cpu[:num_input_tokens] positions = self.positions[:num_input_tokens] seq_lens_cpu = self.seq_lens_cpu[:num_reqs] - attn_state = self._build_attn_state(num_reqs, num_scheduled_tokens, - num_valid_tokens) - self.attn_mask = self._make_attention_mask(seq_lens=seq_lens_cpu, - position=positions_cpu, - attn_state=attn_state) + attn_state = self._build_attn_state( + num_reqs, num_scheduled_tokens, num_valid_tokens + ) + self.attn_mask = self._make_attention_mask( + seq_lens=seq_lens_cpu, position=positions_cpu, attn_state=attn_state + ) self.attn_state = attn_state # type: ignore self.with_prefill = with_prefill @@ -1603,9 +1676,7 @@ def _prepare_inputs( # Record the index of requests that should not be sampled, # so that we could clear the sampled tokens before returning - num_tokens = [ - self.requests[r].num_tokens for r in self.input_batch.req_ids - ] + num_tokens = [self.requests[r].num_tokens for r in self.input_batch.req_ids] num_tokens_np = np.array(num_tokens, dtype=np.int32) num_reqs = self.input_batch.num_reqs if self.pcp_size == 1: @@ -1613,14 +1684,15 @@ def _prepare_inputs( else: # while pcp > 1, we need the original num_scheduled_tokens before split # to calculate discard_requests_mask - original_seq_lens_np = ( - self.input_batch.num_computed_tokens_cpu[:num_reqs] + - np.array(list(scheduler_output.num_scheduled_tokens.values()))) + original_seq_lens_np = self.input_batch.num_computed_tokens_cpu[ + :num_reqs + ] + np.array(list(scheduler_output.num_scheduled_tokens.values())) discard_requests_mask = original_seq_lens_np < num_tokens_np discard_request_indices = np.nonzero(discard_requests_mask)[0] self.num_discarded_requests = len(discard_request_indices) - self.discard_request_indices.np[:self.num_discarded_requests] = ( - discard_request_indices) + self.discard_request_indices.np[: self.num_discarded_requests] = ( + discard_request_indices + ) self.discard_request_indices.copy_to_gpu(self.num_discarded_requests) # _prepare_inputs may reorder the batch, so we must gather @@ -1637,12 +1709,12 @@ def _prepare_inputs( mm_embeds = self._gather_mm_embeddings_0110(scheduler_output) if mm_embeds: inputs_embeds = self.model.get_input_embeddings( - input_ids, mm_embeds) + input_ids, mm_embeds + ) else: inputs_embeds = self.model.get_input_embeddings(input_ids) else: - mm_embeds, is_mm_embed = self._gather_mm_embeddings( - scheduler_output) + mm_embeds, is_mm_embed = self._gather_mm_embeddings(scheduler_output) inputs_embeds = self.model.get_input_embeddings( input_ids, @@ -1651,8 +1723,7 @@ def _prepare_inputs( ) # TODO(woosuk): Avoid the copy. Optimize. - self.inputs_embeds.gpu[:total_num_scheduled_tokens].copy_( - inputs_embeds) + self.inputs_embeds.gpu[:total_num_scheduled_tokens].copy_(inputs_embeds) inputs_embeds = self.inputs_embeds.gpu[:num_input_tokens] input_ids = None elif self.enable_prompt_embeds and get_pp_group().is_first_rank: @@ -1668,14 +1739,15 @@ def _prepare_inputs( # If a batch only has token ids, then including the embedding layer # in the acl graph will be more performant (like in the else case # below). - token_ids_idx = self.is_token_ids.gpu[:total_num_scheduled_tokens] \ - .nonzero(as_tuple=False) \ + token_ids_idx = ( + self.is_token_ids.gpu[:total_num_scheduled_tokens] + .nonzero(as_tuple=False) .squeeze(1) + ) # Some tokens ids may need to become embeds if token_ids_idx.numel() > 0: token_ids = self.input_ids[token_ids_idx] - tokens_to_embeds = self.model.get_input_embeddings( - input_ids=token_ids) + tokens_to_embeds = self.model.get_input_embeddings(input_ids=token_ids) self.inputs_embeds.gpu[token_ids_idx] = tokens_to_embeds inputs_embeds = self.inputs_embeds.gpu[:num_input_tokens] @@ -1689,8 +1761,12 @@ def _prepare_inputs( inputs_embeds = None positions = self.positions[:num_input_tokens] input_ids, positions = self._update_input_ids_and_positions( - input_ids, positions, num_input_tokens, with_prefill, - maybe_padded_num_tokens) + input_ids, + positions, + num_input_tokens, + with_prefill, + maybe_padded_num_tokens, + ) if get_pp_group().is_first_rank: intermediate_tensors = None @@ -1699,14 +1775,13 @@ def _prepare_inputs( assert self.intermediate_tensors is not None for k, v in intermediate_tensors.items(): self.intermediate_tensors[k][:num_input_tokens].copy_( - v[:num_input_tokens], non_blocking=True) - intermediate_tensors = IntermediateTensors({ - k: v[:num_input_tokens] - for k, v in self.intermediate_tensors.items() - }) - - use_spec_decode = len( - scheduler_output.scheduled_spec_decode_tokens) > 0 + v[:num_input_tokens], non_blocking=True + ) + intermediate_tensors = IntermediateTensors( + {k: v[:num_input_tokens] for k, v in self.intermediate_tensors.items()} + ) + + use_spec_decode = len(scheduler_output.scheduled_spec_decode_tokens) > 0 if not use_spec_decode: # NOTE(woosuk): Due to chunked prefills, the batch may contain # partial requests. While we should not sample any token @@ -1714,9 +1789,11 @@ def _prepare_inputs( # We will ignore the sampled tokens from the partial requests. # TODO: Support prompt logprobs. spec_decode_metadata = None - logits_indices = torch.from_numpy( - cu_num_tokens - ) * self.pcp_size - self.num_pcp_pads[:num_reqs] - 1 + logits_indices = ( + torch.from_numpy(cu_num_tokens) * self.pcp_size + - self.num_pcp_pads[:num_reqs] + - 1 + ) logits_indices = logits_indices.to(self.device, non_blocking=True) else: # pcp not supported now @@ -1725,13 +1802,16 @@ def _prepare_inputs( # Iterate over the dictionary rather than all requests since not all # requests have draft tokens. num_draft_tokens = np.zeros(num_reqs, dtype=np.int32) - for req_id, draft_token_ids in ( - scheduler_output.scheduled_spec_decode_tokens.items()): + for ( + req_id, + draft_token_ids, + ) in scheduler_output.scheduled_spec_decode_tokens.items(): req_idx = self.input_batch.req_id_to_index[req_id] num_draft_tokens[req_idx] = len(draft_token_ids) spec_decode_metadata = self._calc_spec_decode_metadata( - num_draft_tokens, cu_num_tokens) + num_draft_tokens, cu_num_tokens + ) logits_indices = spec_decode_metadata.logits_indices self.num_draft_tokens.np[:num_reqs] = num_draft_tokens self.num_draft_tokens.np[num_reqs:].fill(0) @@ -1739,34 +1819,40 @@ def _prepare_inputs( # Used in the below loop. # query_start_loc_cpu = self.query_start_loc.cpu[:num_reqs + 1] - num_computed_tokens_cpu = ( - self.input_batch.num_computed_tokens_cpu_tensor[:num_reqs]) + num_computed_tokens_cpu = self.input_batch.num_computed_tokens_cpu_tensor[ + :num_reqs + ] self.spec_decode_common_attn_metadata = None if use_spec_decode and self.need_accepted_tokens: self.num_accepted_tokens.np[:num_reqs] = ( - self.input_batch.num_accepted_tokens_cpu[:num_reqs]) + self.input_batch.num_accepted_tokens_cpu[:num_reqs] + ) self.num_accepted_tokens.np[num_reqs:].fill(1) self.num_accepted_tokens.copy_to_gpu() is_prefill = len(scheduler_output.scheduled_new_reqs) > 0 if self.speculative_config and self.pcp_size > 1 and is_prefill: self._generate_pcp_mtp_input( - num_reqs, scheduler_output.total_num_scheduled_tokens, - scheduler_output.num_scheduled_tokens) + num_reqs, + scheduler_output.total_num_scheduled_tokens, + scheduler_output.num_scheduled_tokens, + ) # prepare pcp meta data long_seq_metadata = self._generate_pcp_metadata( - total_num_scheduled_tokens, seq_lens_cpu) + total_num_scheduled_tokens, seq_lens_cpu + ) # Prepare the attention metadata for each KV cache group and make layers # in the same group share the same metadata. for kv_cache_group_id, kv_cache_group_spec in enumerate( - self.kv_cache_config.kv_cache_groups): - slot_mapping_size = (total_num_scheduled_tokens - if self.pcp_size == 1 else - total_num_scheduled_tokens * self.pcp_size - - total_num_pcp_pads) - if isinstance(kv_cache_group_spec.kv_cache_spec, - EncoderOnlyAttentionSpec): + self.kv_cache_config.kv_cache_groups + ): + slot_mapping_size = ( + total_num_scheduled_tokens + if self.pcp_size == 1 + else total_num_scheduled_tokens * self.pcp_size - total_num_pcp_pads + ) + if isinstance(kv_cache_group_spec.kv_cache_spec, EncoderOnlyAttentionSpec): # Encoder-only layers do not have KV cache, so we need to # create a dummy block table and slot mapping for them. blk_table_tensor = torch.zeros( @@ -1775,7 +1861,7 @@ def _prepare_inputs( device=self.device, ) slot_mapping = torch.zeros( - (total_num_scheduled_tokens, ), + (total_num_scheduled_tokens,), dtype=torch.int64, device=self.device, ) @@ -1785,29 +1871,27 @@ def _prepare_inputs( slot_mapping = blk_table.slot_mapping[:slot_mapping_size] blk_table.slot_mapping[slot_mapping_size:].fill_(0) if self.pcp_size > 1: - slot_mapping_for_pcp = blk_table.slot_mapping[: - long_seq_metadata - . - num_actual_tokens_pcp_padded] + slot_mapping_for_pcp = blk_table.slot_mapping[ + : long_seq_metadata.num_actual_tokens_pcp_padded + ] slot_mapping_for_pcp[slot_mapping_size:].fill_(-1) assert pcp_unpad_mask is not None - pcp_padded_slot_mapping = self.pcp_padded_slot_mapping[: - pcp_unpad_mask - . - shape[ - 0]] + pcp_padded_slot_mapping = self.pcp_padded_slot_mapping[ + : pcp_unpad_mask.shape[0] + ] pcp_padded_slot_mapping.fill_(-1) - pcp_padded_slot_mapping[ - pcp_unpad_mask] = slot_mapping_for_pcp[: - slot_mapping_size] - slot_mapping_for_pcp[:long_seq_metadata. - num_actual_tokens_pcp_padded] = pcp_padded_slot_mapping + pcp_padded_slot_mapping[pcp_unpad_mask] = slot_mapping_for_pcp[ + :slot_mapping_size + ] + slot_mapping_for_pcp[ + : long_seq_metadata.num_actual_tokens_pcp_padded + ] = pcp_padded_slot_mapping slot_mapping = slot_mapping_for_pcp # Make AscendCommonAttentionMetadata common_attn_metadata = AscendCommonAttentionMetadata( - query_start_loc=self.query_start_loc[:num_reqs + 1], - query_start_loc_cpu=self.query_start_loc_cpu[:num_reqs + 1], + query_start_loc=self.query_start_loc[: num_reqs + 1], + query_start_loc_cpu=self.query_start_loc_cpu[: num_reqs + 1], seq_lens_cpu=self.seq_lens_cpu[:num_reqs], seq_lens=self.seq_lens_cpu[:num_reqs], num_reqs=num_reqs, @@ -1831,54 +1915,74 @@ def _prepare_inputs( prefill_context_parallel_metadata=long_seq_metadata, ) - if self.speculative_config and \ - self.spec_decode_common_attn_metadata is None: + if ( + self.speculative_config + and self.spec_decode_common_attn_metadata is None + ): self.spec_decode_common_attn_metadata = common_attn_metadata for attn_group in self.attn_groups[kv_cache_group_id]: common_prefix_len = 0 extra_attn_metadata_args = {} builder = attn_group.get_metadata_builder() - if isinstance(builder, GDNAttentionMetadataBuilder - ) or self.model_config.runner_type == "pooling": + if ( + isinstance(builder, GDNAttentionMetadataBuilder) + or self.model_config.runner_type == "pooling" + ): if use_spec_decode: extra_attn_metadata_args = dict( - num_accepted_tokens=self.num_accepted_tokens. - gpu[:num_reqs], - num_draft_tokens=self.num_draft_tokens. - gpu[:num_reqs], + num_accepted_tokens=self.num_accepted_tokens.gpu[:num_reqs], + num_draft_tokens=self.num_draft_tokens.gpu[:num_reqs], ) attn_metadata_i = builder.build( common_prefix_len=common_prefix_len, common_attn_metadata=common_attn_metadata, - **extra_attn_metadata_args) + **extra_attn_metadata_args, + ) else: attn_metadata_i = builder.build( common_prefix_len=common_prefix_len, common_attn_metadata=common_attn_metadata, model=self.get_model(), - **extra_attn_metadata_args) + **extra_attn_metadata_args, + ) for layer_name in attn_group.layer_names: attn_metadata[layer_name] = attn_metadata_i if lmhead_tp_enable(): - max_num_reqs_across_dp = maybe_padded_num_tokens if not with_prefill else self.max_num_reqs + max_num_reqs_across_dp = ( + maybe_padded_num_tokens if not with_prefill else self.max_num_reqs + ) logits_indices = nn.functional.pad( - logits_indices, - (0, max_num_reqs_across_dp - logits_indices.shape[0])) - - return (attn_metadata, positions, num_scheduled_tokens, - num_input_tokens, num_tokens_across_dp, - maybe_padded_num_tokens, logits_indices, spec_decode_metadata, - input_ids, inputs_embeds, intermediate_tensors, - max_num_scheduled_tokens) - - def _generate_process_reqs_hidden_states(self, attn_metadata, with_prefill, - maybe_padded_num_tokens, - input_ids, positions, - intermediate_tensors, - inputs_embeds): + logits_indices, (0, max_num_reqs_across_dp - logits_indices.shape[0]) + ) + + return ( + attn_metadata, + positions, + num_scheduled_tokens, + num_input_tokens, + num_tokens_across_dp, + maybe_padded_num_tokens, + logits_indices, + spec_decode_metadata, + input_ids, + inputs_embeds, + intermediate_tensors, + max_num_scheduled_tokens, + ) + + def _generate_process_reqs_hidden_states( + self, + attn_metadata, + with_prefill, + maybe_padded_num_tokens, + input_ids, + positions, + intermediate_tensors, + inputs_embeds, + ): assert self.model is not None hidden_states = self.model( input_ids=input_ids, @@ -1888,29 +1992,37 @@ def _generate_process_reqs_hidden_states(self, attn_metadata, with_prefill, ) forward_context = get_forward_context() - if forward_context.cudagraph_runtime_mode == CUDAGraphMode.FULL \ - and not self.use_sparse: + if ( + forward_context.cudagraph_runtime_mode == CUDAGraphMode.FULL + and not self.use_sparse + ): # TODO: maybe_padded_num_tokens will be removed, use num_input_tokens instead if self.vllm_config.model_config.use_mla: if self.pcp_size * self.dcp_size > 1: # FIXME: Try using `auto_dispatch_capture=True` - update_mla_attn_dcp_pcp_params(self.update_stream, - forward_context, - maybe_padded_num_tokens, - self.speculative_config) + update_mla_attn_dcp_pcp_params( + self.update_stream, + forward_context, + maybe_padded_num_tokens, + self.speculative_config, + ) else: # FIXME: Try using `auto_dispatch_capture=True` - update_mla_attn_params(self.update_stream, forward_context, - maybe_padded_num_tokens, - self.speculative_config) + update_mla_attn_params( + self.update_stream, + forward_context, + maybe_padded_num_tokens, + self.speculative_config, + ) else: if self.pcp_size * self.dcp_size > 1: - update_attn_dcp_pcp_params(self.update_stream, - forward_context, - maybe_padded_num_tokens) + update_attn_dcp_pcp_params( + self.update_stream, forward_context, maybe_padded_num_tokens + ) else: - update_attn_params(self.update_stream, forward_context, - maybe_padded_num_tokens) + update_attn_params( + self.update_stream, forward_context, maybe_padded_num_tokens + ) if get_forward_context().sp_enabled: hidden_states = tensor_model_parallel_all_gather(hidden_states, 0) @@ -1920,35 +2032,44 @@ def _generate_process_reqs_hidden_states(self, attn_metadata, with_prefill, if self.pcp_size > 1: hidden_states = get_pcp_group().all_gather( - hidden_states[:self.num_actual_tokens_pcp_padded // - self.pcp_size], 0) + hidden_states[: self.num_actual_tokens_pcp_padded // self.pcp_size], 0 + ) hidden_states = torch.index_select( - hidden_states, 0, - self.pcp_allgather_restore_idx[:hidden_states.shape[0]]) + hidden_states, + 0, + self.pcp_allgather_restore_idx[: hidden_states.shape[0]], + ) return hidden_states - def _build_attn_state(self, num_reqs, num_scheduled_tokens, - num_valid_tokens): + def _build_attn_state(self, num_reqs, num_scheduled_tokens, num_valid_tokens): ascend_config = get_ascend_config() if np.array_equal(self.seq_lens_np[:num_reqs], num_scheduled_tokens): attn_state = AscendAttentionState.PrefillNoCache # We assume it is the decode stage, where prefill occurs but only one token is not hit in cache. elif np.all(num_scheduled_tokens == 1): attn_state = AscendAttentionState.DecodeOnly - if self.speculative_config and self.speculative_config.method == 'deepseek_mtp': + if ( + self.speculative_config + and self.speculative_config.method == "deepseek_mtp" + ): # SpecDecoding now supports seq_len=1 and seq_len=2 # In Prefilling Decoding Disaggregation scenario, SpecDecoding need to supports seq_len=1 attn_state = AscendAttentionState.SpecDecoding # Speculative decoding. elif np.all(num_valid_tokens == 1): - if self.drafter and (self.drafter.name == SpecDcodeType.EAGLE - or self.drafter.name == SpecDcodeType.EAGLE3 - or self.drafter.name == SpecDcodeType.DRAFT_MODEL): + if self.drafter and ( + self.drafter.name == SpecDcodeType.EAGLE + or self.drafter.name == SpecDcodeType.EAGLE3 + or self.drafter.name == SpecDcodeType.DRAFT_MODEL + ): attn_state = AscendAttentionState.ChunkedPrefill else: attn_state = AscendAttentionState.SpecDecoding # splitfuse - elif not ascend_config.ascend_scheduler_config.enabled or self.chunked_prefill_enabled: + elif ( + not ascend_config.ascend_scheduler_config.enabled + or self.chunked_prefill_enabled + ): attn_state = AscendAttentionState.ChunkedPrefill else: attn_state = AscendAttentionState.PrefillCacheHit @@ -1957,9 +2078,14 @@ def _build_attn_state(self, num_reqs, num_scheduled_tokens, def _update_graph_pad_size(self, with_prefill, graph_pad_size): self.graph_pad_size = -1 - def _update_input_ids_and_positions(self, input_ids, positions, - num_input_tokens, with_prefill, - maybe_padded_num_tokens): + def _update_input_ids_and_positions( + self, + input_ids, + positions, + num_input_tokens, + with_prefill, + maybe_padded_num_tokens, + ): if self.uses_mrope: positions = self.mrope_positions[:, :num_input_tokens] return input_ids, positions @@ -1986,13 +2112,15 @@ def _calc_spec_decode_metadata( cu_num_sampled_tokens = np.cumsum(num_sampled_tokens, dtype=np.int32) total_num_sampled_tokens = cu_num_sampled_tokens[-1] # Step 2. [0, 0, 0, 0, 4, 5, 5, 5, 8, 9, 9] - cumsums_offsets = np.repeat(cu_num_sampled_tokens - num_sampled_tokens, - num_sampled_tokens) + cumsums_offsets = np.repeat( + cu_num_sampled_tokens - num_sampled_tokens, num_sampled_tokens + ) # Step 3. [0, 1, 2, 3, 0, 0, 1, 2, 0, 0, 1] arange = self.arange_np[:total_num_sampled_tokens] - cumsums_offsets # Step 4. [0, 0, 0, 0, 103, 104, 104, 104, 206, 207, 207] logits_indices = np.repeat( - cu_num_scheduled_tokens - num_sampled_tokens, num_sampled_tokens) + cu_num_scheduled_tokens - num_sampled_tokens, num_sampled_tokens + ) # Step 5. [0, 1, 2, 3, 103, 104, 105, 106, 206, 207, 208] logits_indices += arange @@ -2004,28 +2132,35 @@ def _calc_spec_decode_metadata( cu_num_draft_tokens = np.cumsum(num_draft_tokens, dtype=np.int32) total_num_draft_tokens = cu_num_draft_tokens[-1] # [0, 0, 0, 3, 3, 5] - cumsums_offsets = np.repeat(cu_num_draft_tokens - num_draft_tokens, - num_draft_tokens) + cumsums_offsets = np.repeat( + cu_num_draft_tokens - num_draft_tokens, num_draft_tokens + ) # [0, 1, 2, 0, 1, 0] arange = self.arange_np[:total_num_draft_tokens] - cumsums_offsets # [0, 0, 0, 5, 5, 9] target_logits_indices = np.repeat( - cu_num_sampled_tokens - num_sampled_tokens, num_draft_tokens) + cu_num_sampled_tokens - num_sampled_tokens, num_draft_tokens + ) # [0, 1, 2, 5, 6, 9] target_logits_indices += arange # TODO: Optimize the CPU -> NPU copy. cu_num_draft_tokens = torch.from_numpy(cu_num_draft_tokens).to( - self.device, non_blocking=True) + self.device, non_blocking=True + ) if not vllm_version_is("0.11.0"): cu_num_sampled_tokens = torch.from_numpy(cu_num_sampled_tokens).to( - self.device, non_blocking=True) - logits_indices = torch.from_numpy(logits_indices).to(self.device, - non_blocking=True) + self.device, non_blocking=True + ) + logits_indices = torch.from_numpy(logits_indices).to( + self.device, non_blocking=True + ) target_logits_indices = torch.from_numpy(target_logits_indices).to( - self.device, non_blocking=True) + self.device, non_blocking=True + ) bonus_logits_indices = torch.from_numpy(bonus_logits_indices).to( - self.device, non_blocking=True) + self.device, non_blocking=True + ) # Compute the draft token ids. # draft_token_indices: [ 1, 2, 3, 105, 106, 208] @@ -2070,47 +2205,49 @@ def apply_grammar_bitmask( # request in the batch, as the logit indices are offset by this amount. struct_out_req_batch_indices: dict[str, int] = {} cumulative_offset = 0 - seq = sorted(self.input_batch.req_id_to_index.items(), - key=lambda x: x[1]) + seq = sorted(self.input_batch.req_id_to_index.items(), key=lambda x: x[1]) for req_id, batch_index in seq: logit_index = batch_index + cumulative_offset cumulative_offset += len( - scheduler_output.scheduled_spec_decode_tokens.get(req_id, [])) + scheduler_output.scheduled_spec_decode_tokens.get(req_id, []) + ) if req_id in scheduler_output.structured_output_request_ids: struct_out_req_batch_indices[req_id] = logit_index out_indices = [] # Reorder the bitmask to match the order of the requests in the batch. - sorted_bitmask = np.zeros_like(grammar_bitmask, - shape=(logits.shape[0], - grammar_bitmask.shape[1])) + sorted_bitmask = np.zeros_like( + grammar_bitmask, shape=(logits.shape[0], grammar_bitmask.shape[1]) + ) cumulative_index = 0 if vllm_version_is("0.11.0"): seq = sorted( scheduler_output.structured_output_request_ids.items(), - key=lambda x: x[1]) + key=lambda x: x[1], + ) for req_id, _ in seq: logit_index = struct_out_req_batch_indices[req_id] num_spec_tokens = len( - scheduler_output.scheduled_spec_decode_tokens.get( - req_id, [])) + scheduler_output.scheduled_spec_decode_tokens.get(req_id, []) + ) for i in range(1 + num_spec_tokens): - sorted_bitmask[logit_index + i] = \ - grammar_bitmask[cumulative_index + i] + sorted_bitmask[logit_index + i] = grammar_bitmask[ + cumulative_index + i + ] out_indices.append(logit_index + i) cumulative_index += 1 + num_spec_tokens else: for req_id in scheduler_output.structured_output_request_ids: num_spec_tokens = len( - scheduler_output.scheduled_spec_decode_tokens.get( - req_id, [])) + scheduler_output.scheduled_spec_decode_tokens.get(req_id, []) + ) if req_id in struct_out_req_batch_indices: logit_index = struct_out_req_batch_indices[req_id] for i in range(1 + num_spec_tokens): - sorted_bitmask[logit_index + - i] = grammar_bitmask[cumulative_index + - i] + sorted_bitmask[logit_index + i] = grammar_bitmask[ + cumulative_index + i + ] out_indices.append(logit_index + i) cumulative_index += 1 + num_spec_tokens grammar_bitmask = sorted_bitmask @@ -2149,9 +2286,16 @@ def propose_draft_token_ids( draft_token_ids = None else: draft_token_ids = self.drafter.generate_token_ids( - valid_sampled_token_ids, sampling_metadata, scheduler_output, - spec_decode_metadata, positions, num_scheduled_tokens, - hidden_states, attn_metadata, aux_hidden_states) + valid_sampled_token_ids, + sampling_metadata, + scheduler_output, + spec_decode_metadata, + positions, + num_scheduled_tokens, + hidden_states, + attn_metadata, + aux_hidden_states, + ) return draft_token_ids def _pool( @@ -2163,16 +2307,16 @@ def _pool( finished_recving: Optional[set[str]] = None, kv_connector_output: Optional["KVConnectorOutput"] = None, ) -> ModelRunnerOutput: - assert self.input_batch.num_reqs ==\ - len(self.input_batch.pooling_params), \ - "Either all or none of the requests in" \ - " a batch must be pooling request" + assert self.input_batch.num_reqs == len(self.input_batch.pooling_params), ( + "Either all or none of the requests in a batch must be pooling request" + ) hidden_states = hidden_states[:num_scheduled_tokens] pooling_metadata = self.input_batch.pooling_metadata - pooling_metadata.build_pooling_cursor(num_scheduled_tokens_np.tolist(), - device=hidden_states.device) - seq_lens_cpu = self.seq_lens_cpu[:self.input_batch.num_reqs] + pooling_metadata.build_pooling_cursor( + num_scheduled_tokens_np.tolist(), device=hidden_states.device + ) + seq_lens_cpu = self.seq_lens_cpu[: self.input_batch.num_reqs] model = cast(VllmModelForPooling, self.model) raw_pooler_output = model.pooler( @@ -2187,7 +2331,8 @@ def _pool( pooler_output: list[Optional[torch.Tensor]] = [] for raw_output, seq_len, prompt_len in zip( - raw_pooler_output, seq_lens_cpu, pooling_metadata.prompt_lens): + raw_pooler_output, seq_lens_cpu, pooling_metadata.prompt_lens + ): output = raw_output if seq_len == prompt_len else None pooler_output.append(output) @@ -2201,8 +2346,9 @@ def _pool( kv_connector_output=kv_connector_output, ) - def _select_moe_comm_method(self, num_tokens: int, - with_prefill: bool) -> Optional[MoECommType]: + def _select_moe_comm_method( + self, num_tokens: int, with_prefill: bool + ) -> Optional[MoECommType]: """1. If expert parallel is not enabled, we use all-gather since MC2 and all-to-all are designed for expert parallelism. 2. If expert parallel is enabled, we need to consider the soc version and the @@ -2229,15 +2375,18 @@ def _select_moe_comm_method(self, num_tokens: int, return None soc_version = get_ascend_soc_version() - quant_type = getattr(self.vllm_config.model_config.hf_config, - 'moe_quantize', None) + quant_type = getattr( + self.vllm_config.model_config.hf_config, "moe_quantize", None + ) model_type = self.vllm_config.model_config.hf_config.model_type if not self.parallel_config.enable_expert_parallel: moe_comm_type = MoECommType.ALLGATHER elif soc_version in {AscendSocVersion.A2}: - if (num_tokens <= self.mc2_tokens_capacity - and self.parallel_config.world_size_across_dp >= 16): + if ( + num_tokens <= self.mc2_tokens_capacity + and self.parallel_config.world_size_across_dp >= 16 + ): moe_comm_type = MoECommType.MC2 else: # Currently, w4a8_dynamic does not support allgatherep @@ -2247,9 +2396,11 @@ def _select_moe_comm_method(self, num_tokens: int, moe_comm_type = MoECommType.ALLGATHER elif soc_version in {AscendSocVersion.A3}: - moe_comm_type = (MoECommType.MC2 - if num_tokens <= self.mc2_tokens_capacity else - MoECommType.ALLTOALL) + moe_comm_type = ( + MoECommType.MC2 + if num_tokens <= self.mc2_tokens_capacity + else MoECommType.ALLTOALL + ) else: raise ValueError(f"Unsupported soc_version: {soc_version}") @@ -2264,8 +2415,7 @@ def _select_moe_comm_method(self, num_tokens: int, moe_comm_type = MoECommType.ALLGATHER if is_global_first_rank(): - logger.debug(f"num_tokens: {num_tokens}, " - f"moe_comm_type: {moe_comm_type}") + logger.debug(f"num_tokens: {num_tokens}, moe_comm_type: {moe_comm_type}") return moe_comm_type @torch.inference_mode() @@ -2288,61 +2438,80 @@ def execute_model( if self.dynamic_eplb: self.eplb_updator.forward_before() - (attn_metadata, positions, num_scheduled_tokens_np, - num_input_tokens, num_tokens_across_dp, maybe_padded_num_tokens, - logits_indices, spec_decode_metadata, input_ids, inputs_embeds, - intermediate_tensors, - max_query_len) = (self._prepare_inputs(scheduler_output, - intermediate_tensors)) + ( + attn_metadata, + positions, + num_scheduled_tokens_np, + num_input_tokens, + num_tokens_across_dp, + maybe_padded_num_tokens, + logits_indices, + spec_decode_metadata, + input_ids, + inputs_embeds, + intermediate_tensors, + max_query_len, + ) = self._prepare_inputs(scheduler_output, intermediate_tensors) if self.dynamic_eplb: self.eplb_updator.take_update_info_from_eplb_process() - moe_comm_type = self._select_moe_comm_method(num_input_tokens, - self.with_prefill) + moe_comm_type = self._select_moe_comm_method( + num_input_tokens, self.with_prefill + ) uniform_decode = (max_query_len == self.uniform_decode_query_len) and ( scheduler_output.total_num_scheduled_tokens - == self.input_batch.num_reqs * max_query_len) - batch_descriptor = BatchDescriptor(num_tokens=num_input_tokens, - uniform_decode=uniform_decode) - aclgraph_runtime_mode, batch_descriptor = \ - self.aclgraph_dispatcher.dispatch(batch_descriptor) + == self.input_batch.num_reqs * max_query_len + ) + batch_descriptor = BatchDescriptor( + num_tokens=num_input_tokens, uniform_decode=uniform_decode + ) + aclgraph_runtime_mode, batch_descriptor = self.aclgraph_dispatcher.dispatch( + batch_descriptor + ) # Run forward pass with ProfileExecuteDuration().capture_async("forward"): with set_ascend_forward_context( - attn_metadata, - self.vllm_config, - num_tokens=num_input_tokens, - num_tokens_across_dp=num_tokens_across_dp, - with_prefill=self.with_prefill, - reserved_mc2_mask=self.reserved_mc2_mask, - moe_comm_type=moe_comm_type, - aclgraph_runtime_mode=aclgraph_runtime_mode, - batch_descriptor=batch_descriptor, - num_actual_tokens=scheduler_output. - total_num_scheduled_tokens, - prefetch_stream=self.prefetch_stream, - model_instance=self.model, - weight_prefetch_method=self.weight_prefetch_method): + attn_metadata, + self.vllm_config, + num_tokens=num_input_tokens, + num_tokens_across_dp=num_tokens_across_dp, + with_prefill=self.with_prefill, + reserved_mc2_mask=self.reserved_mc2_mask, + moe_comm_type=moe_comm_type, + aclgraph_runtime_mode=aclgraph_runtime_mode, + batch_descriptor=batch_descriptor, + num_actual_tokens=scheduler_output.total_num_scheduled_tokens, + prefetch_stream=self.prefetch_stream, + model_instance=self.model, + weight_prefetch_method=self.weight_prefetch_method, + ): self.maybe_setup_kv_connector(scheduler_output) hidden_states = self._generate_process_reqs_hidden_states( - attn_metadata, self.with_prefill, maybe_padded_num_tokens, - input_ids, positions, intermediate_tensors, inputs_embeds) + attn_metadata, + self.with_prefill, + maybe_padded_num_tokens, + input_ids, + positions, + intermediate_tensors, + inputs_embeds, + ) self.maybe_wait_for_kv_save() finished_sending, finished_recving = self.get_finished_kv_transfer( - scheduler_output) + scheduler_output + ) aux_hidden_states = None if self.drafter and self.drafter.name == SpecDcodeType.EAGLE3: hidden_states, aux_hidden_states = hidden_states kv_connector_output = KVConnectorOutput( - finished_sending=finished_sending, - finished_recving=finished_recving) + finished_sending=finished_sending, finished_recving=finished_recving + ) finished_sending = None finished_recving = None with ProfileExecuteDuration().capture_async("post process"): @@ -2350,9 +2519,10 @@ def execute_model( # to make sure we are synced across pp ranks # TODO: Support overlapping mirco-batches # https://github.com/vllm-project/vllm/issues/18019 - broadcast_pp_output = \ - self.parallel_config.distributed_executor_backend \ - == "external_launcher" and len(get_pp_group().ranks) > 0 + broadcast_pp_output = ( + self.parallel_config.distributed_executor_backend == "external_launcher" + and len(get_pp_group().ranks) > 0 + ) if not get_pp_group().is_last_rank: # For mid-pipeline stages, return the hidden states. if not broadcast_pp_output: @@ -2360,57 +2530,62 @@ def execute_model( return hidden_states assert isinstance(hidden_states, IntermediateTensors) get_pp_group().send_tensor_dict( - hidden_states.tensors, all_gather_group=get_tp_group()) + hidden_states.tensors, all_gather_group=get_tp_group() + ) logits = None else: if self.input_batch.pooling_params: return self._pool( hidden_states, scheduler_output.total_num_scheduled_tokens, - num_scheduled_tokens_np, finished_sending, - finished_recving, kv_connector_output) + num_scheduled_tokens_np, + finished_sending, + finished_recving, + kv_connector_output, + ) sample_hidden_states = hidden_states[logits_indices] logits = self.model.compute_logits(sample_hidden_states) if broadcast_pp_output: - model_output_broadcast_data = { - "logits": logits.contiguous(), - } if logits is not None else {} - model_output_broadcast_data = get_pp_group( - ).broadcast_tensor_dict(model_output_broadcast_data, - src=len(get_pp_group().ranks) - 1) + model_output_broadcast_data = ( + { + "logits": logits.contiguous(), + } + if logits is not None + else {} + ) + model_output_broadcast_data = get_pp_group().broadcast_tensor_dict( + model_output_broadcast_data, src=len(get_pp_group().ranks) - 1 + ) assert model_output_broadcast_data is not None logits = model_output_broadcast_data["logits"] # Apply structured output bitmasks if present if vllm_version_is("0.11.0"): if scheduler_output.grammar_bitmask is not None: - logits = self.apply_grammar_bitmask( - scheduler_output, logits) + logits = self.apply_grammar_bitmask(scheduler_output, logits) else: if scheduler_output.structured_output_request_ids: - logits = self.apply_grammar_bitmask( - scheduler_output, logits) + logits = self.apply_grammar_bitmask(scheduler_output, logits) with ProfileExecuteDuration().capture_async("Sample"): # Sample the next token and get logprobs if needed. sampling_metadata = self.input_batch.sampling_metadata if spec_decode_metadata is None: if lmhead_tp_enable() and logits is not None: - logits = logits[:self.input_batch.num_reqs] + logits = logits[: self.input_batch.num_reqs] sampler_output = self.sampler( logits=logits, sampling_metadata=sampling_metadata, ) else: if lmhead_tp_enable() and logits is not None: - logits = logits[:len(spec_decode_metadata.logits_indices)] + logits = logits[: len(spec_decode_metadata.logits_indices)] # When indexing with a tensor (bonus_logits_indices), PyTorch # creates a new tensor with separate storage from the original # logits tensor. This means any in-place operations on bonus_logits # won't affect the original logits tensor. assert logits is not None - bonus_logits = logits[ - spec_decode_metadata.bonus_logits_indices] + bonus_logits = logits[spec_decode_metadata.bonus_logits_indices] sampler_output = self.sampler( logits=bonus_logits, sampling_metadata=sampling_metadata, @@ -2420,8 +2595,7 @@ def execute_model( # Just like `bonus_logits`, `target_logits` is a new tensor with # separate storage from the original `logits` tensor. Therefore, # it is safe to update `target_logits` in place. - target_logits = logits[ - spec_decode_metadata.target_logits_indices] + target_logits = logits[spec_decode_metadata.target_logits_indices] output_token_ids = self.rejection_sampler( spec_decode_metadata, None, # draft_probs @@ -2433,8 +2607,9 @@ def execute_model( if self.need_accepted_tokens: self._update_states_after_model_execute(output_token_ids) - discard_sampled_tokens_req_indices = \ - self.discard_request_indices.np[:self.num_discarded_requests] + discard_sampled_tokens_req_indices = self.discard_request_indices.np[ + : self.num_discarded_requests + ] for i in discard_sampled_tokens_req_indices: generator = self.input_batch.generators.get(int(i)) if generator is not None: @@ -2443,18 +2618,18 @@ def execute_model( # Copy some objects so they don't get modified after returning. # This is important when using async scheduling. req_ids_output_copy = self.input_batch.req_ids.copy() - req_id_to_index_output_copy = \ - self.input_batch.req_id_to_index.copy() + req_id_to_index_output_copy = self.input_batch.req_id_to_index.copy() # NOTE: NPU -> CPU Sync happens here. # Move as many CPU operations as possible before this sync point. logprobs_tensors = sampler_output.logprobs_tensors - logprobs_lists = logprobs_tensors.tolists() \ - if logprobs_tensors is not None else None + logprobs_lists = ( + logprobs_tensors.tolists() if logprobs_tensors is not None else None + ) # Compute prompt logprobs if needed. prompt_logprobs_dict = self._get_prompt_logprobs_dict( - hidden_states[:scheduler_output.total_num_scheduled_tokens], + hidden_states[: scheduler_output.total_num_scheduled_tokens], scheduler_output, ) @@ -2477,18 +2652,17 @@ def execute_model( valid_sampled_token_ids[int(i)].clear() else: valid_sampled_token_ids = [] - invalid_req_indices = discard_sampled_tokens_req_indices.tolist( - ) + invalid_req_indices = discard_sampled_tokens_req_indices.tolist() invalid_req_indices_set = set(invalid_req_indices) assert sampled_token_ids.shape[-1] == 1 # Cache the sampled tokens on the NPU and avoid CPU sync. # These will be copied into input_ids in the next step # when preparing inputs. - self.input_batch.prev_sampled_token_ids = \ - sampled_token_ids - self.input_batch.prev_sampled_token_ids_invalid_indices = \ + self.input_batch.prev_sampled_token_ids = sampled_token_ids + self.input_batch.prev_sampled_token_ids_invalid_indices = ( invalid_req_indices_set + ) self.input_batch.prev_req_id_to_index = { req_id: i for i, req_id in enumerate(self.input_batch.req_ids) @@ -2501,8 +2675,9 @@ def execute_model( # between the first-stage worker and the last-stage worker. for req_idx in range(num_sampled_tokens): if self.use_async_scheduling: - sampled_ids = [-1] * 1 if \ - req_idx not in invalid_req_indices_set else None + sampled_ids = ( + [-1] * 1 if req_idx not in invalid_req_indices_set else None + ) else: sampled_ids = valid_sampled_token_ids[req_idx] if not sampled_ids: @@ -2513,12 +2688,11 @@ def execute_model( assert end_idx <= self.model_config.max_model_len, ( "Sampled token IDs exceed the max model length. " f"Total number of tokens: {end_idx} > max_model_len: " - f"{self.model_config.max_model_len}") + f"{self.model_config.max_model_len}" + ) - self.input_batch.token_ids_cpu[req_idx, - start_idx:end_idx] = sampled_ids - self.input_batch.is_token_ids[req_idx, - start_idx:end_idx] = True + self.input_batch.token_ids_cpu[req_idx, start_idx:end_idx] = sampled_ids + self.input_batch.is_token_ids[req_idx, start_idx:end_idx] = True self.input_batch.num_tokens_no_spec[req_idx] = end_idx self.input_batch.num_tokens[req_idx] = end_idx req_id = self.input_batch.req_ids[req_idx] @@ -2541,9 +2715,11 @@ def propose_draft_token_ids(sampled_token_ids): with ProfileExecuteDuration().capture_async("Draft"): if self.speculative_config: - use_padded_batch_for_eagle = self.speculative_config and \ - self.speculative_config.method == "deepseek_mtp" and \ - not self.speculative_config.disable_padded_drafter_batch + use_padded_batch_for_eagle = ( + self.speculative_config + and self.speculative_config.method == "deepseek_mtp" + and not self.speculative_config.disable_padded_drafter_batch + ) if use_padded_batch_for_eagle: # EAGLE speculative decoding can use the GPU sampled tokens # as inputs, and does not need to wait for bookkeeping to finish. @@ -2556,7 +2732,7 @@ def propose_draft_token_ids(sampled_token_ids): if has_kv_transfer_group(): get_kv_transfer_group().clear_connector_metadata() - extra_args = ({"kv_connector_output": kv_connector_output}) + extra_args = {"kv_connector_output": kv_connector_output} model_runner_output = ModelRunnerOutput( req_ids=req_ids_output_copy, @@ -2571,12 +2747,16 @@ def propose_draft_token_ids(sampled_token_ids): durations = ProfileExecuteDuration().pop_captured_sync() if durations: dr_str = [ - f"[{tag}]:{duration:.2f}ms" - for tag, duration in durations.items() + f"[{tag}]:{duration:.2f}ms" for tag, duration in durations.items() ] - captured_name = "Decode" if self.attn_state == AscendAttentionState.DecodeOnly else "Prefill" - logger.info("Profile execute duration [%s]:%s", captured_name, - " ".join(dr_str)) + captured_name = ( + "Decode" + if self.attn_state == AscendAttentionState.DecodeOnly + else "Prefill" + ) + logger.info( + "Profile execute duration [%s]:%s", captured_name, " ".join(dr_str) + ) if self.dynamic_eplb: self.eplb_updator.forward_end() if not self.use_async_scheduling: @@ -2601,19 +2781,21 @@ def take_draft_token_ids(self) -> Optional[DraftTokenIds]: return DraftTokenIds(req_ids, draft_token_ids) def kv_connector_no_forward( - self, scheduler_output: "SchedulerOutput") -> ModelRunnerOutput: + self, scheduler_output: "SchedulerOutput" + ) -> ModelRunnerOutput: with set_ascend_forward_context(None, self.vllm_config): self.maybe_setup_kv_connector(scheduler_output) - finished_sending, finished_recving = ( - self.get_finished_kv_transfer(scheduler_output)) + finished_sending, finished_recving = self.get_finished_kv_transfer( + scheduler_output + ) # For the case of no forward caused by receiving remote kv, # one round of dummy inference is necessary # to prevent hang over the collective calls. output = copy.copy(EMPTY_MODEL_RUNNER_OUTPUT) output.kv_connector_output = KVConnectorOutput( - finished_sending=finished_sending, - finished_recving=finished_recving) + finished_sending=finished_sending, finished_recving=finished_recving + ) return output @staticmethod @@ -2623,8 +2805,7 @@ def maybe_setup_kv_connector(scheduler_output: "SchedulerOutput"): kv_connector = get_kv_transfer_group() assert isinstance(kv_connector, KVConnectorBase_V1) assert scheduler_output.kv_connector_metadata is not None - kv_connector.bind_connector_metadata( - scheduler_output.kv_connector_metadata) + kv_connector.bind_connector_metadata(scheduler_output.kv_connector_metadata) kv_connector.start_load_kv(get_forward_context()) @@ -2639,7 +2820,8 @@ def get_finished_kv_transfer( ) -> tuple[Optional[set[str]], Optional[set[str]]]: if has_kv_transfer_group(): return get_kv_transfer_group().get_finished( - scheduler_output.finished_req_ids) + scheduler_output.finished_req_ids + ) return None, None def _build_dummy_attn_metadata( @@ -2655,8 +2837,9 @@ def _build_dummy_attn_metadata( attn_metadata: Optional[dict[str, Any]] = None if force_attention or aclgraph_runtime_mode == CUDAGraphMode.FULL: - assert with_prefill is False, \ + assert with_prefill is False, ( "Full decode graph only supports uniform batch now." + ) attn_metadata = {} @@ -2664,47 +2847,57 @@ def _build_dummy_attn_metadata( self.seq_lens_np[:num_reqs] = seq_lens self.seq_lens_np[num_reqs:] = 0 - cu_num_tokens, arange = self._get_cumsum_and_arange( - num_scheduled_tokens) - query_start_loc_tensor = torch.Tensor(cu_num_tokens).to( - self.device).to(torch.int32) - self.query_start_loc[1:num_reqs + 1] = query_start_loc_tensor - self.query_start_loc_cpu[1:num_reqs + - 1] = torch.Tensor(cu_num_tokens) + cu_num_tokens, arange = self._get_cumsum_and_arange(num_scheduled_tokens) + query_start_loc_tensor = ( + torch.Tensor(cu_num_tokens).to(self.device).to(torch.int32) + ) + self.query_start_loc[1 : num_reqs + 1] = query_start_loc_tensor + self.query_start_loc_cpu[1 : num_reqs + 1] = torch.Tensor(cu_num_tokens) - num_computed_tokens_cpu = ( - self.input_batch.num_computed_tokens_cpu_tensor[:num_reqs]) + num_computed_tokens_cpu = self.input_batch.num_computed_tokens_cpu_tensor[ + :num_reqs + ] for kv_cache_group_id, kv_cache_group_spec in enumerate( - self.kv_cache_config.kv_cache_groups): + self.kv_cache_config.kv_cache_groups + ): block_table_tensor = self.input_batch.block_table[ - kv_cache_group_id].get_device_tensor() + kv_cache_group_id + ].get_device_tensor() slot_mapping = self.input_batch.block_table[ - kv_cache_group_id].slot_mapping - self.cp_kv_recover_idx = torch.zeros(self.max_num_tokens, - dtype=torch.int32, - device=self.device) + kv_cache_group_id + ].slot_mapping + self.cp_kv_recover_idx = torch.zeros( + self.max_num_tokens, dtype=torch.int32, device=self.device + ) long_seq_metadata = self._generate_pcp_metadata( - num_tokens, self.seq_lens_cpu) + num_tokens, self.seq_lens_cpu + ) if long_seq_metadata is not None: - pcp_world_size = get_pcp_group( - ).world_size if prefill_context_parallel_enable() else 1 + pcp_world_size = ( + get_pcp_group().world_size + if prefill_context_parallel_enable() + else 1 + ) dcp_world_size = get_dcp_group().world_size - num_computed_tokens_of_pcp_dcp = [[ - [0] * dcp_world_size for _ in range(pcp_world_size) - ] for _ in range(num_tokens)] - long_seq_metadata.num_computed_tokens_of_pcp_dcp = num_computed_tokens_of_pcp_dcp + num_computed_tokens_of_pcp_dcp = [ + [[0] * dcp_world_size for _ in range(pcp_world_size)] + for _ in range(num_tokens) + ] + long_seq_metadata.num_computed_tokens_of_pcp_dcp = ( + num_computed_tokens_of_pcp_dcp + ) if self.speculative_config: query_start_loc = torch.tensor( [0] + self.actual_seq_lengths_q[:num_reqs], device=self.device, - dtype=torch.int32) + dtype=torch.int32, + ) else: - query_start_loc = self.query_start_loc[:num_reqs + 1] + query_start_loc = self.query_start_loc[: num_reqs + 1] common_attn_metadata = AscendCommonAttentionMetadata( query_start_loc=query_start_loc, - query_start_loc_cpu=self.query_start_loc_cpu[:num_reqs + - 1], + query_start_loc_cpu=self.query_start_loc_cpu[: num_reqs + 1], seq_lens_cpu=self.seq_lens_cpu, seq_lens=self.seq_lens_cpu[:num_reqs], num_reqs=num_reqs, @@ -2724,14 +2917,15 @@ def _build_dummy_attn_metadata( prefill_context_parallel_metadata=long_seq_metadata, ) attn_state = AscendAttentionState.DecodeOnly - if self.speculative_config and \ - self.speculative_config.method == "deepseek_mtp": + if ( + self.speculative_config + and self.speculative_config.method == "deepseek_mtp" + ): attn_state = AscendAttentionState.SpecDecoding common_metadata = CommonAttentionMetadata( - query_start_loc=self.query_start_loc[:num_reqs + 1], - query_start_loc_cpu=self.query_start_loc_cpu[:num_reqs + - 1], + query_start_loc=self.query_start_loc[: num_reqs + 1], + query_start_loc_cpu=self.query_start_loc_cpu[: num_reqs + 1], seq_lens_cpu=self.seq_lens_cpu[:num_reqs], seq_lens=self.seq_lens_cpu[:num_reqs], num_reqs=num_reqs, @@ -2740,59 +2934,78 @@ def _build_dummy_attn_metadata( slot_mapping=slot_mapping, num_computed_tokens_cpu=num_computed_tokens_cpu, max_query_len=max_query_len, - max_seq_len=seq_lens) + max_seq_len=seq_lens, + ) for attn_group in self.attn_groups[kv_cache_group_id]: builder = attn_group.get_metadata_builder() if isinstance(builder, AscendAttentionMetadataBuilder): attn_metadata_full_attention = builder.build_for_graph_capture( - common_attn_metadata, attn_state, self.get_model()) + common_attn_metadata, attn_state, self.get_model() + ) elif isinstance(builder, GDNAttentionMetadataBuilder): - attn_metadata_gdn_attention = builder.build_for_cudagraph_capture( - common_metadata) + attn_metadata_gdn_attention = ( + builder.build_for_cudagraph_capture(common_metadata) + ) for layer_name in kv_cache_group_spec.layer_names: if "linear_attn" in layer_name: - attn_metadata[ - layer_name] = attn_metadata_gdn_attention + attn_metadata[layer_name] = attn_metadata_gdn_attention else: - attn_metadata[ - layer_name] = attn_metadata_full_attention + attn_metadata[layer_name] = attn_metadata_full_attention return attn_metadata - def _generate_dummy_run_hidden_states(self, with_prefill, - is_torchair_compile, input_ids, - positions, attn_metadata, num_tokens, - intermediate_tensors, inputs_embeds): - hidden_states = self.model(input_ids=input_ids, - positions=positions, - intermediate_tensors=intermediate_tensors, - inputs_embeds=inputs_embeds) + def _generate_dummy_run_hidden_states( + self, + with_prefill, + is_torchair_compile, + input_ids, + positions, + attn_metadata, + num_tokens, + intermediate_tensors, + inputs_embeds, + ): + hidden_states = self.model( + input_ids=input_ids, + positions=positions, + intermediate_tensors=intermediate_tensors, + inputs_embeds=inputs_embeds, + ) forward_context = get_forward_context() assert forward_context is not None - if forward_context.cudagraph_runtime_mode == CUDAGraphMode.FULL and \ - not forward_context.capturing and not self.use_sparse: + if ( + forward_context.cudagraph_runtime_mode == CUDAGraphMode.FULL + and not forward_context.capturing + and not self.use_sparse + ): if self.vllm_config.model_config.use_mla: # FIXME: Try using `auto_dispatch_capture=True` if self.pcp_size * self.dcp_size > 1: # FIXME: Try using `auto_dispatch_capture=True` - update_mla_attn_dcp_pcp_params(self.update_stream, - forward_context, - positions.shape[0], - self.speculative_config) + update_mla_attn_dcp_pcp_params( + self.update_stream, + forward_context, + positions.shape[0], + self.speculative_config, + ) else: # FIXME: Try using `auto_dispatch_capture=True` - update_mla_attn_params(self.update_stream, forward_context, - positions.shape[0], - self.speculative_config) + update_mla_attn_params( + self.update_stream, + forward_context, + positions.shape[0], + self.speculative_config, + ) else: if self.pcp_size * self.dcp_size > 1: - update_attn_dcp_pcp_params(self.update_stream, - forward_context, - positions.shape[0]) + update_attn_dcp_pcp_params( + self.update_stream, forward_context, positions.shape[0] + ) else: - update_attn_params(self.update_stream, forward_context, - positions.shape[0]) + update_attn_params( + self.update_stream, forward_context, positions.shape[0] + ) if self.drafter and self.drafter.name == SpecDcodeType.EAGLE3: hidden_states, _ = hidden_states @@ -2812,7 +3025,9 @@ def _dummy_run( ) -> torch.Tensor: # only support eager mode and piecewise graph now assert aclgraph_runtime_mode is None or aclgraph_runtime_mode in { - CUDAGraphMode.NONE, CUDAGraphMode.PIECEWISE, CUDAGraphMode.FULL + CUDAGraphMode.NONE, + CUDAGraphMode.PIECEWISE, + CUDAGraphMode.FULL, } # In multi-DP scenarios, there may be situations where all DP groups are executing dummy runs. @@ -2826,9 +3041,9 @@ def _dummy_run( with_prefill = True # Padding for DP - (num_tokens, num_tokens_across_dp, - with_prefill) = self._sync_metadata_across_dp(num_tokens, - with_prefill) + (num_tokens, num_tokens_across_dp, with_prefill) = ( + self._sync_metadata_across_dp(num_tokens, with_prefill) + ) moe_comm_type = self._select_moe_comm_method(num_tokens, with_prefill) @@ -2845,8 +3060,7 @@ def _dummy_run( # When setting max_query_len = 1, we switch to and capture the optimized # routine of FA2 for pure decode, i.e., Flashdecode + an optimization # for GQA/MQA. - max_query_len = self.uniform_decode_query_len if uniform_decode else \ - num_tokens + max_query_len = self.uniform_decode_query_len if uniform_decode else num_tokens # Set num_scheduled_tokens based on num_tokens and max_num_seqs # for dummy run with LoRA so that the num_reqs collectively @@ -2862,22 +3076,21 @@ def _dummy_run( if with_prefill: num_reqs = num_tokens else: - num_reqs = (num_tokens + self.decode_token_per_req - - 1) // self.decode_token_per_req + num_reqs = ( + num_tokens + self.decode_token_per_req - 1 + ) // self.decode_token_per_req num_reqs = min(num_reqs, max_num_reqs) min_tokens_per_req = num_tokens // num_reqs num_scheduled_tokens_list = [min_tokens_per_req] * num_reqs num_scheduled_tokens_list[-1] += num_tokens % num_reqs assert sum(num_scheduled_tokens_list) == num_tokens assert len(num_scheduled_tokens_list) == num_reqs - num_scheduled_tokens = np.array(num_scheduled_tokens_list, - dtype=np.int32) + num_scheduled_tokens = np.array(num_scheduled_tokens_list, dtype=np.int32) if not self.in_profile_run and self.dynamic_eplb: self.eplb_updator.forward_before() - with self.maybe_dummy_run_with_lora(self.lora_config, - num_scheduled_tokens): + with self.maybe_dummy_run_with_lora(self.lora_config, num_scheduled_tokens): if self.is_multimodal_model: input_ids = None inputs_embeds = self.inputs_embeds.gpu[:num_tokens] @@ -2899,26 +3112,27 @@ def _dummy_run( if self.intermediate_tensors is None: self.intermediate_tensors = ( self.model.make_empty_intermediate_tensors( - batch_size=num_tokens, - dtype=self.dtype, - device=self.device)) - intermediate_tensors = IntermediateTensors({ - k: v[:num_tokens] - for k, v in self.intermediate_tensors.items() - }) + batch_size=num_tokens, dtype=self.dtype, device=self.device + ) + ) + intermediate_tensors = IntermediateTensors( + {k: v[:num_tokens] for k, v in self.intermediate_tensors.items()} + ) # filter out the valid batch descriptor - _ag_mode, batch_descriptor = \ - self.aclgraph_dispatcher.dispatch( - BatchDescriptor(num_tokens=num_tokens, - uniform_decode=uniform_decode)) + _ag_mode, batch_descriptor = self.aclgraph_dispatcher.dispatch( + BatchDescriptor(num_tokens=num_tokens, uniform_decode=uniform_decode) + ) if aclgraph_runtime_mode is not None: # we allow forcing NONE when the dispatcher disagrees to support # warm ups for aclgraph capture - assert aclgraph_runtime_mode == CUDAGraphMode.NONE or \ - aclgraph_runtime_mode == _ag_mode, ( + assert ( + aclgraph_runtime_mode == CUDAGraphMode.NONE + or aclgraph_runtime_mode == _ag_mode + ), ( f"Aclgraph runtime mode mismatch at dummy_run. " - f"Expected {_ag_mode}, but got {aclgraph_runtime_mode}.") + f"Expected {_ag_mode}, but got {aclgraph_runtime_mode}." + ) else: aclgraph_runtime_mode = _ag_mode @@ -2934,37 +3148,43 @@ def _dummy_run( num_scheduled_tokens=num_scheduled_tokens, ) - need_dummy_logits = (not self.in_profile_run - and lmhead_tp_enable()) + need_dummy_logits = not self.in_profile_run and lmhead_tp_enable() if need_dummy_logits: - max_num_reqs_across_dp = num_tokens if not with_prefill else max_num_reqs - dummy_indices = torch.zeros(max_num_reqs_across_dp, - dtype=torch.int32) + max_num_reqs_across_dp = ( + num_tokens if not with_prefill else max_num_reqs + ) + dummy_indices = torch.zeros(max_num_reqs_across_dp, dtype=torch.int32) def dummy_compute_logits(hidden_states): - return self.model.compute_logits( - hidden_states[dummy_indices]) + return self.model.compute_logits(hidden_states[dummy_indices]) with set_ascend_forward_context( - attn_metadata, - self.vllm_config, - num_tokens=num_tokens, - num_tokens_across_dp=num_tokens_across_dp, - with_prefill=with_prefill, - in_profile_run=self.in_profile_run, - reserved_mc2_mask=self.reserved_mc2_mask, - moe_comm_type=moe_comm_type, - num_actual_tokens=0, - aclgraph_runtime_mode=aclgraph_runtime_mode, - batch_descriptor=batch_descriptor, - prefetch_stream=self.prefetch_stream, - model_instance=self.model, - weight_prefetch_method=self.weight_prefetch_method): + attn_metadata, + self.vllm_config, + num_tokens=num_tokens, + num_tokens_across_dp=num_tokens_across_dp, + with_prefill=with_prefill, + in_profile_run=self.in_profile_run, + reserved_mc2_mask=self.reserved_mc2_mask, + moe_comm_type=moe_comm_type, + num_actual_tokens=0, + aclgraph_runtime_mode=aclgraph_runtime_mode, + batch_descriptor=batch_descriptor, + prefetch_stream=self.prefetch_stream, + model_instance=self.model, + weight_prefetch_method=self.weight_prefetch_method, + ): hidden_states = self._generate_dummy_run_hidden_states( - with_prefill, is_torchair_compile, input_ids, positions, - attn_metadata, num_tokens, intermediate_tensors, - inputs_embeds) + with_prefill, + is_torchair_compile, + input_ids, + positions, + attn_metadata, + num_tokens, + intermediate_tensors, + inputs_embeds, + ) if need_dummy_logits: dummy_compute_logits(hidden_states) @@ -2976,10 +3196,10 @@ def dummy_compute_logits(hidden_states): num_reqs=num_reqs, num_tokens_across_dp=num_tokens_across_dp, aclgraph_runtime_mode=aclgraph_runtime_mode, - batch_descriptor=batch_descriptor) + batch_descriptor=batch_descriptor, + ) if need_dummy_logits: - self.drafter.model.compute_logits( - hidden_states[dummy_indices]) + self.drafter.model.compute_logits(hidden_states[dummy_indices]) if self.in_profile_run and self.dynamic_eplb: self.model.clear_all_moe_loads() if not self.in_profile_run and self.dynamic_eplb: @@ -2999,16 +3219,21 @@ def profile_run(self) -> None: # Trigger compilation for general shape. with self.set_in_profile_run(): hidden_states = self._dummy_run( - self.max_num_tokens // - self.pcp_size if self.pcp_size > 1 else self.max_num_tokens, - with_prefill=True) + self.max_num_tokens // self.pcp_size + if self.pcp_size > 1 + else self.max_num_tokens, + with_prefill=True, + ) # MC2 will consume additional NPU memory. # Therefore, we need to run the MC2 path once here to complete its initialization, # allowing vLLM to correctly estimate the maximum memory required. - if self.max_num_tokens > self.mc2_tokens_capacity and \ - self._select_moe_comm_method( - self.mc2_tokens_capacity, - with_prefill=True) == MoECommType.MC2: + if ( + self.max_num_tokens > self.mc2_tokens_capacity + and self._select_moe_comm_method( + self.mc2_tokens_capacity, with_prefill=True + ) + == MoECommType.MC2 + ): self._dummy_run(self.mc2_tokens_capacity, with_prefill=True) output = None @@ -3019,12 +3244,11 @@ def profile_run(self) -> None: # For profile, have maximum num_reqs and that collectively have # maximum num_tokens. min_tokens_per_req = self.max_num_tokens // self.max_num_reqs - num_scheduled_tokens_list = [min_tokens_per_req - ] * self.max_num_reqs - num_scheduled_tokens_list[ - -1] += self.max_num_tokens % self.max_num_reqs - num_scheduled_tokens = np.array(num_scheduled_tokens_list, - dtype=np.int32) + num_scheduled_tokens_list = [min_tokens_per_req] * self.max_num_reqs + num_scheduled_tokens_list[-1] += self.max_num_tokens % self.max_num_reqs + num_scheduled_tokens = np.array( + num_scheduled_tokens_list, dtype=np.int32 + ) logit_indices = np.cumsum(num_scheduled_tokens) - 1 # TODO: need to rum a dummy sampler for generate task hidden_states = hidden_states[logit_indices] @@ -3051,9 +3275,9 @@ def _dummy_pooler_run_task( req_num_tokens = num_tokens // num_reqs - dummy_token_ids = torch.zeros((num_reqs, req_num_tokens), - dtype=torch.int32, - device=self.device) + dummy_token_ids = torch.zeros( + (num_reqs, req_num_tokens), dtype=torch.int32, device=self.device + ) model = cast(VllmModelForPooling, self.get_model()) dummy_pooling_params = PoolingParams(task=task) @@ -3070,19 +3294,22 @@ def _dummy_pooler_run_task( pooling_params=[dummy_pooling_params] * num_reqs, ) - dummy_metadata.build_pooling_cursor(num_scheduled_tokens_list, - device=hidden_states.device) + dummy_metadata.build_pooling_cursor( + num_scheduled_tokens_list, device=hidden_states.device + ) try: - return model.pooler(hidden_states=hidden_states, - pooling_metadata=dummy_metadata) + return model.pooler( + hidden_states=hidden_states, pooling_metadata=dummy_metadata + ) except RuntimeError as e: - if 'out of memory' in str(e): + if "out of memory" in str(e): raise RuntimeError( "CUDA out of memory occurred when warming up pooler " f"({task=}) with {num_reqs} dummy requests. Please try " "lowering `max_num_seqs` or `gpu_memory_utilization` when " - "initializing the engine.") from e + "initializing the engine." + ) from e else: raise e @@ -3121,36 +3348,45 @@ def load_model(self) -> None: from vllm.model_executor.layers.linear import ( MergedColumnParallelLinear, QKVParallelLinear, RowParallelLinear) + for module in self.model.modules(): - if isinstance(module, - (MergedColumnParallelLinear, - QKVParallelLinear, RowParallelLinear)): + if isinstance( + module, + ( + MergedColumnParallelLinear, + QKVParallelLinear, + RowParallelLinear, + ), + ): module.weight.data = self._convert_torch_format( - module.weight.data) + module.weight.data + ) if self.drafter: logger.info("Loading drafter model...") self.drafter.load_model(self.model) if self.drafter.name == SpecDcodeType.EAGLE3: self.model.set_aux_hidden_state_layers( - self.model.get_eagle3_aux_hidden_state_layers()) + self.model.get_eagle3_aux_hidden_state_layers() + ) if self.lora_config: - self.model = self.load_lora_model(self.model, self.vllm_config, - self.device) - logger.info("Loading model weights took %.4f GB", - m.consumed_memory / float(2**30)) + self.model = self.load_lora_model( + self.model, self.vllm_config, self.device + ) + logger.info( + "Loading model weights took %.4f GB", m.consumed_memory / float(2**30) + ) # wrap the model with full graph wrapper if needed. if self.compilation_config.cudagraph_mode.has_full_cudagraphs(): self.update_stream: torch.npu.Stream = torch.npu.Stream() set_graph_params(self.compilation_config.cudagraph_capture_sizes) - self.model = ACLGraphWrapper(self.model, - self.vllm_config, - runtime_mode=CUDAGraphMode.FULL) + self.model = ACLGraphWrapper( + self.model, self.vllm_config, runtime_mode=CUDAGraphMode.FULL + ) def _convert_torch_format(self, tensor): - if ACL_FORMAT == ACL_FORMAT_FRACTAL_NZ \ - and not is_enable_nz(): + if ACL_FORMAT == ACL_FORMAT_FRACTAL_NZ and not is_enable_nz(): return tensor tensor = torch_npu.npu_format_cast(tensor, ACL_FORMAT) return tensor @@ -3167,12 +3403,14 @@ def initialize_kv_cache(self, kv_cache_config: KVCacheConfig) -> None: self.may_add_encoder_only_layers_to_kv_cache_config() # NOTE(cmq): initialize_attn_backend must before using self.attn_groups self.initialize_attn_backend(kv_cache_config) - self.use_hybrid_blocks = (len(self.attn_groups) > 1) + self.use_hybrid_blocks = len(self.attn_groups) > 1 # NOTE: Currently, we determine whether we need `num_accepted_tokens` through `MambaSpec`. - self.need_accepted_tokens = any([ - isinstance(attn_group[0].kv_cache_spec, MambaSpec) - for attn_group in self.attn_groups - ]) + self.need_accepted_tokens = any( + [ + isinstance(attn_group[0].kv_cache_spec, MambaSpec) + for attn_group in self.attn_groups + ] + ) self.may_reinitialize_input_batch(kv_cache_config) kv_caches = self.initialize_kv_cache_tensors(kv_cache_config) @@ -3180,15 +3418,15 @@ def initialize_kv_cache(self, kv_cache_config: KVCacheConfig) -> None: if has_kv_transfer_group(): get_kv_transfer_group().register_kv_caches(kv_caches) - def _align_memory(self, tensor: torch.Tensor, - alignment: int) -> torch.Tensor: + def _align_memory(self, tensor: torch.Tensor, alignment: int) -> torch.Tensor: data_ptr = tensor.data_ptr() aligned_addr = (data_ptr + alignment - 1) // alignment * alignment offset = (aligned_addr - data_ptr) // tensor.element_size() - return tensor[int(offset):] + return tensor[int(offset) :] def initialize_kv_cache_tensors( - self, kv_cache_config: KVCacheConfig) -> dict[str, torch.Tensor]: + self, kv_cache_config: KVCacheConfig + ) -> dict[str, torch.Tensor]: """ Initialize the memory buffer for KV cache. @@ -3201,16 +3439,18 @@ def initialize_kv_cache_tensors( # Initialize the memory buffer for KV cache kv_cache_raw_tensors = self._allocate_kv_cache_tensors(kv_cache_config) # Change the memory buffer to the desired shape - kv_caches = self._reshape_kv_cache_tensors(kv_cache_config, - kv_cache_raw_tensors) + kv_caches = self._reshape_kv_cache_tensors( + kv_cache_config, kv_cache_raw_tensors + ) - bind_kv_cache(kv_caches, - self.compilation_config.static_forward_context, - self.kv_caches) + bind_kv_cache( + kv_caches, self.compilation_config.static_forward_context, self.kv_caches + ) return kv_caches def _allocate_kv_cache_tensors( - self, kv_cache_config: KVCacheConfig) -> dict[str, torch.Tensor]: + self, kv_cache_config: KVCacheConfig + ) -> dict[str, torch.Tensor]: """ Initializes the KV cache buffer with the correct size. The buffer needs to be reshaped to the desired shape before being used by the models. @@ -3225,36 +3465,42 @@ def _allocate_kv_cache_tensors( corresponding memory buffer for KV cache. dict[str, tuple(torch.Tensor, torch.Tensor)] A map between layer names to their corresponding memory buffer for K cache and V cache. - """ + """ # init kv cache tensors - kv_cache_raw_tensors: dict[str, Union[torch.Tensor, - Optional[torch.Tensor]]] = {} + kv_cache_raw_tensors: dict[ + str, Union[torch.Tensor, Optional[torch.Tensor]] + ] = {} # llmdatadist need the addr of cache tensor be aligned with 2M alignment = 2 * 1024 * 1024 for kv_cache_tensor in kv_cache_config.kv_cache_tensors: # TODO: REFACTOR ME to sharing hybrid cache for idx in range(len(kv_cache_tensor.shared_by)): layer_name = kv_cache_tensor.shared_by[idx] - if "linear_attn" in layer_name and layer_name not in kv_cache_raw_tensors.keys( + if ( + "linear_attn" in layer_name + and layer_name not in kv_cache_raw_tensors.keys() ): # for mamba linear attention if self.vllm_config.kv_transfer_config is None: - tensor = torch.zeros(kv_cache_tensor.size, - dtype=torch.int8, - device=self.device) + tensor = torch.zeros( + kv_cache_tensor.size, dtype=torch.int8, device=self.device + ) else: cache_size_aligned = kv_cache_tensor.size + alignment - tensor = torch.zeros(cache_size_aligned, - dtype=torch.int8, - device=self.device) - tensor = self._align_memory( - tensor, alignment)[:kv_cache_tensor.size] + tensor = torch.zeros( + cache_size_aligned, dtype=torch.int8, device=self.device + ) + tensor = self._align_memory(tensor, alignment)[ + : kv_cache_tensor.size + ] for layer_name_inner in kv_cache_tensor.shared_by: # shared the kvcache between the self_attn specs in the same group if "linear_attn" in layer_name_inner: kv_cache_raw_tensors[layer_name_inner] = tensor - elif "attn" in layer_name and layer_name not in kv_cache_raw_tensors.keys( + elif ( + "attn" in layer_name + and layer_name not in kv_cache_raw_tensors.keys() ): # NOTE: We need to init k cache tensor (nope cache tensor in mla) and # v cache tensor (rope cache tensor in mla) separately to support llmdatadist, @@ -3262,8 +3508,10 @@ def _allocate_kv_cache_tensors( # For deepseek mla, we need to spilt cache tensor accrodding to the nope head dim # and rope head dim. if self.model_config.is_deepseek_mla: - head_size = self.model_config.hf_text_config.qk_rope_head_dim + \ - self.model_config.hf_text_config.kv_lora_rank + head_size = ( + self.model_config.hf_text_config.qk_rope_head_dim + + self.model_config.hf_text_config.kv_lora_rank + ) dsa_k_cache_factor = None dsa_k_cache_size = None @@ -3276,61 +3524,87 @@ def _allocate_kv_cache_tensors( # FullAttentionSpec allocate 2 * mla page size bytes, # and we use half of that for k cache in DSA dsa_k_cache_factor = 2 - k_tensor_split_factor = 2 * head_size / self.model_config.hf_text_config.kv_lora_rank - v_tensor_split_factor = 2 * head_size / self.model_config.hf_text_config.qk_rope_head_dim - dsa_k_cache_size = int(kv_cache_tensor.size // - dsa_k_cache_factor) + k_tensor_split_factor = ( + 2 + * head_size + / self.model_config.hf_text_config.kv_lora_rank + ) + v_tensor_split_factor = ( + 2 + * head_size + / self.model_config.hf_text_config.qk_rope_head_dim + ) + dsa_k_cache_size = int( + kv_cache_tensor.size // dsa_k_cache_factor + ) else: # for other deepseek models, use MLAAttentionSpec - k_tensor_split_factor = head_size / self.model_config.hf_text_config.kv_lora_rank - v_tensor_split_factor = head_size / self.model_config.hf_text_config.qk_rope_head_dim + k_tensor_split_factor = ( + head_size / self.model_config.hf_text_config.kv_lora_rank + ) + v_tensor_split_factor = ( + head_size + / self.model_config.hf_text_config.qk_rope_head_dim + ) - k_tensor_size = int(kv_cache_tensor.size // - k_tensor_split_factor) - v_tensor_size = int(kv_cache_tensor.size // - v_tensor_split_factor) + k_tensor_size = int(kv_cache_tensor.size // k_tensor_split_factor) + v_tensor_size = int(kv_cache_tensor.size // v_tensor_split_factor) # for other attentions, e.g., self_attn, sliding window attn if self.vllm_config.kv_transfer_config is None: - k_tensor = torch.zeros(k_tensor_size, - dtype=torch.int8, - device=self.device) - v_tensor = torch.zeros(v_tensor_size, - dtype=torch.int8, - device=self.device) + k_tensor = torch.zeros( + k_tensor_size, dtype=torch.int8, device=self.device + ) + v_tensor = torch.zeros( + v_tensor_size, dtype=torch.int8, device=self.device + ) #### k cache: for deepseek sparse attention if dsa_k_cache_factor is not None: dsa_k_cache_tensor = torch.zeros( - dsa_k_cache_size, - dtype=torch.int8, - device=self.device) + dsa_k_cache_size, dtype=torch.int8, device=self.device + ) else: - k_tensor = torch.zeros(k_tensor_size + alignment, - dtype=torch.int8, - device=self.device) - v_tensor = torch.zeros(v_tensor_size + alignment, - dtype=torch.int8, - device=self.device) - k_tensor = self._align_memory( - k_tensor, alignment)[:k_tensor_size] - v_tensor = self._align_memory( - v_tensor, alignment)[:v_tensor_size] + k_tensor = torch.zeros( + k_tensor_size + alignment, + dtype=torch.int8, + device=self.device, + ) + v_tensor = torch.zeros( + v_tensor_size + alignment, + dtype=torch.int8, + device=self.device, + ) + k_tensor = self._align_memory(k_tensor, alignment)[ + :k_tensor_size + ] + v_tensor = self._align_memory(v_tensor, alignment)[ + :v_tensor_size + ] #### k cache: for deepseek sparse attention - if dsa_k_cache_factor is not None and dsa_k_cache_size is not None: + if ( + dsa_k_cache_factor is not None + and dsa_k_cache_size is not None + ): dsa_k_cache_tensor = torch.zeros( dsa_k_cache_size + alignment, dtype=torch.int8, - device=self.device) + device=self.device, + ) dsa_k_cache_tensor = self._align_memory( - dsa_k_cache_tensor, - alignment)[:dsa_k_cache_size] + dsa_k_cache_tensor, alignment + )[:dsa_k_cache_size] for layer_name_inner in kv_cache_tensor.shared_by: # shared the kvcache between the self_attn specs in the same group - if ("attn" in layer_name_inner - and "linear_attn" not in layer_name_inner): - kv_cache_raw_tensors[layer_name_inner] = (k_tensor, v_tensor) if \ - not self.use_sparse else (k_tensor, v_tensor, dsa_k_cache_tensor) + if ( + "attn" in layer_name_inner + and "linear_attn" not in layer_name_inner + ): + kv_cache_raw_tensors[layer_name_inner] = ( + (k_tensor, v_tensor) + if not self.use_sparse + else (k_tensor, v_tensor, dsa_k_cache_tensor) + ) layer_names = set() for group in kv_cache_config.kv_cache_groups: @@ -3338,8 +3612,9 @@ def _allocate_kv_cache_tensors( if layer_name in self.runner_only_attn_layers: continue layer_names.add(layer_name) - assert layer_names == set(kv_cache_raw_tensors.keys( - )), "Some layers are not correctly initialized" + assert layer_names == set(kv_cache_raw_tensors.keys()), ( + "Some layers are not correctly initialized" + ) return kv_cache_raw_tensors @@ -3372,16 +3647,24 @@ def _reshape_kv_cache_tensors( if isinstance(kv_cache_spec, FullAttentionSpec): raw_dsa_k_tensor = None if self.use_sparse: - raw_k_tensor, raw_v_tensor, raw_dsa_k_tensor = kv_cache_raw_tensors[ # type: ignore - layer_name] + raw_k_tensor, raw_v_tensor, raw_dsa_k_tensor = ( + kv_cache_raw_tensors[ # type: ignore + layer_name + ] + ) assert raw_dsa_k_tensor is not None - sum_page_size_bytes = raw_k_tensor.numel( - ) + raw_v_tensor.numel() + raw_dsa_k_tensor.numel() + sum_page_size_bytes = ( + raw_k_tensor.numel() + + raw_v_tensor.numel() + + raw_dsa_k_tensor.numel() + ) else: raw_k_tensor, raw_v_tensor = kv_cache_raw_tensors[ # type: ignore - layer_name] - sum_page_size_bytes = raw_k_tensor.numel( - ) + raw_v_tensor.numel() + layer_name + ] + sum_page_size_bytes = ( + raw_k_tensor.numel() + raw_v_tensor.numel() + ) assert raw_k_tensor is not None assert raw_v_tensor is not None assert sum_page_size_bytes % kv_cache_spec.page_size_bytes == 0 @@ -3396,26 +3679,36 @@ def _reshape_kv_cache_tensors( # the min of all `num_blocks`. Verify it here. assert num_blocks >= kv_cache_config.num_blocks - if self.vllm_config.additional_config.get( - "kv_cache_dtype", None) == 'int8': + if ( + self.vllm_config.additional_config.get("kv_cache_dtype", None) + == "int8" + ): kv_cache_shape = attn_backend.get_bsh_kv_cache_shape( - num_blocks, kv_cache_spec.block_size, + num_blocks, + kv_cache_spec.block_size, kv_cache_spec.num_kv_heads, - kv_cache_spec.head_size) - elif hasattr(attn_backend, "get_supported_block_size" - ) and self.use_hybrid_blocks: + kv_cache_spec.head_size, + ) + elif ( + hasattr(attn_backend, "get_supported_block_size") + and self.use_hybrid_blocks + ): block_size = attn_backend.get_supported_block_size()[0] block_size_chunk = kv_cache_spec.block_size // block_size kv_cache_shape = attn_backend.get_kv_cache_shape( - num_blocks * block_size_chunk, block_size, + num_blocks * block_size_chunk, + block_size, kv_cache_spec.num_kv_heads, - kv_cache_spec.head_size) + kv_cache_spec.head_size, + ) else: kv_cache_shape = self.attn_backend.get_kv_cache_shape( - num_blocks, kv_cache_spec.block_size, + num_blocks, + kv_cache_spec.block_size, kv_cache_spec.num_kv_heads, - kv_cache_spec.head_size) + kv_cache_spec.head_size, + ) dtype = kv_cache_spec.dtype if not self.model_config.is_deepseek_mla: k_shape = kv_cache_shape[1:] @@ -3424,35 +3717,47 @@ def _reshape_kv_cache_tensors( # k_cache: nope_cache v_cache: rope_cache mla_num_blocks, mla_block_size, num_kv_heads, _ = kv_cache_shape k_shape = [ - mla_num_blocks, mla_block_size, num_kv_heads, - self.model_config.hf_text_config.kv_lora_rank + mla_num_blocks, + mla_block_size, + num_kv_heads, + self.model_config.hf_text_config.kv_lora_rank, ] v_shape = [ - mla_num_blocks, mla_block_size, num_kv_heads, - self.model_config.hf_text_config.qk_rope_head_dim + mla_num_blocks, + mla_block_size, + num_kv_heads, + self.model_config.hf_text_config.qk_rope_head_dim, ] k_cache = raw_k_tensor.view(dtype).view(k_shape) k_cache = self._convert_torch_format(k_cache) v_cache = raw_v_tensor.view(dtype).view(v_shape) v_cache = self._convert_torch_format(v_cache) if self.use_sparse and raw_dsa_k_tensor is not None: - dsa_k_cache_shape = (num_blocks, - kv_cache_spec.block_size, 1, 128) + dsa_k_cache_shape = ( + num_blocks, + kv_cache_spec.block_size, + 1, + 128, + ) dsa_k_cache_size = ( - num_blocks - ) * kv_cache_spec.block_size * 128 * dtype.itemsize - dsa_k_cache = raw_dsa_k_tensor[:dsa_k_cache_size].view( - dtype).view(dsa_k_cache_shape) + (num_blocks) + * kv_cache_spec.block_size + * 128 + * dtype.itemsize + ) + dsa_k_cache = ( + raw_dsa_k_tensor[:dsa_k_cache_size] + .view(dtype) + .view(dsa_k_cache_shape) + ) kv_caches[layer_name] = (k_cache, v_cache, dsa_k_cache) else: kv_caches[layer_name] = (k_cache, v_cache) elif isinstance(kv_cache_spec, MambaSpec): raw_tensor = kv_cache_raw_tensors[layer_name] assert raw_tensor is not None - assert raw_tensor.numel( - ) % kv_cache_spec.page_size_bytes == 0 - num_blocks = raw_tensor.numel( - ) // kv_cache_spec.page_size_bytes + assert raw_tensor.numel() % kv_cache_spec.page_size_bytes == 0 + num_blocks = raw_tensor.numel() // kv_cache_spec.page_size_bytes # `num_blocks` is the number of blocks the model runner can use. # `kv_cache_config.num_blocks` is the number of blocks that @@ -3465,11 +3770,11 @@ def _reshape_kv_cache_tensors( state_tensors = [] storage_offset_bytes = 0 - for (shape, dtype) in zip(kv_cache_spec.shapes, - kv_cache_spec.dtypes): + for shape, dtype in zip(kv_cache_spec.shapes, kv_cache_spec.dtypes): dtype_size = get_dtype_size(dtype) num_element_per_page = ( - kv_cache_spec.page_size_bytes // dtype_size) + kv_cache_spec.page_size_bytes // dtype_size + ) target_shape = (num_blocks, *shape) stride = torch.empty(target_shape).stride() target_stride = (num_element_per_page, *stride[1:]) @@ -3488,8 +3793,7 @@ def _reshape_kv_cache_tensors( return kv_caches - def may_reinitialize_input_batch(self, - kv_cache_config: KVCacheConfig) -> None: + def may_reinitialize_input_batch(self, kv_cache_config: KVCacheConfig) -> None: """ Re-initialize the input batch if the block sizes are different from `[self.cache_config.block_size]`. This usually happens when there @@ -3501,8 +3805,7 @@ def may_reinitialize_input_batch(self, block_sizes = [ kv_cache_group.kv_cache_spec.block_size for kv_cache_group in kv_cache_config.kv_cache_groups - if not isinstance(kv_cache_group.kv_cache_spec, - EncoderOnlyAttentionSpec) + if not isinstance(kv_cache_group.kv_cache_spec, EncoderOnlyAttentionSpec) ] # Generate kernel_block_sizes that matches each block_size @@ -3511,10 +3814,9 @@ def may_reinitialize_input_batch(self, # For other backends (like Mamba), use [0] (no splitting) kernel_block_sizes = [] for kv_cache_group_id, kv_cache_group in enumerate( - kv_cache_config.kv_cache_groups): - - if isinstance(kv_cache_group.kv_cache_spec, - EncoderOnlyAttentionSpec): + kv_cache_config.kv_cache_groups + ): + if isinstance(kv_cache_group.kv_cache_spec, EncoderOnlyAttentionSpec): continue elif isinstance(kv_cache_group.kv_cache_spec, AttentionSpec): # This is an attention backend that supports virtual @@ -3530,9 +3832,11 @@ def may_reinitialize_input_batch(self, supported_sizes = backend.get_supported_block_size() # If no specific sizes supported, use cache config # block_size - kernel_block_size_list = (supported_sizes - if supported_sizes else - [self.cache_config.block_size]) + kernel_block_size_list = ( + supported_sizes + if supported_sizes + else [self.cache_config.block_size] + ) else: # Fallback to cache config block_size if no backend found kernel_block_size_list = [self.cache_config.block_size] @@ -3545,13 +3849,14 @@ def may_reinitialize_input_batch(self, # to kernel_block_sizes[0] kernel_block_sizes.append([0]) - if block_sizes != [ - self.cache_config.block_size - ] or kernel_block_sizes != [[self.cache_config.block_size]]: + if block_sizes != [self.cache_config.block_size] or kernel_block_sizes != [ + [self.cache_config.block_size] + ]: assert self.cache_config.cpu_offload_gb == 0, ( "Cannot re-initialize the input batch when CPU weight " "offloading is enabled. See https://github.com/vllm-project/vllm/pull/18298 " # noqa: E501 - "for more details.") + "for more details." + ) self.input_batch = InputBatch( max_num_reqs=self.max_num_reqs, max_model_len=self.model_config.max_model_len, @@ -3565,7 +3870,9 @@ def may_reinitialize_input_batch(self, is_pooling_model=self.is_pooling_model, num_speculative_tokens=( self.vllm_config.speculative_config.num_speculative_tokens - if self.vllm_config.speculative_config else 0), + if self.vllm_config.speculative_config + else 0 + ), kernel_block_sizes=kernel_block_sizes, ) @@ -3574,8 +3881,7 @@ def may_add_encoder_only_layers_to_kv_cache_config(self) -> None: Add encoder-only layers to the KV cache config. """ block_size = self.vllm_config.cache_config.block_size - encoder_only_attn_specs: dict[AttentionSpec, - list[str]] = defaultdict(list) + encoder_only_attn_specs: dict[AttentionSpec, list[str]] = defaultdict(list) attn_layers = get_layers_from_vllm_config(self.vllm_config, Attention) for layer_name, attn_module in attn_layers.items(): if attn_module.attn_type == AttentionType.ENCODER_ONLY: @@ -3583,23 +3889,24 @@ def may_add_encoder_only_layers_to_kv_cache_config(self) -> None: block_size=block_size, num_kv_heads=attn_module.num_kv_heads, head_size=attn_module.head_size, - dtype=self.kv_cache_dtype) + dtype=self.kv_cache_dtype, + ) encoder_only_attn_specs[attn_spec].append(layer_name) self.runner_only_attn_layers.add(layer_name) if len(encoder_only_attn_specs) > 0: - assert len( - encoder_only_attn_specs - ) == 1, "Only support one encoder-only attention spec now" + assert len(encoder_only_attn_specs) == 1, ( + "Only support one encoder-only attention spec now" + ) spec, layer_names = encoder_only_attn_specs.popitem() self.kv_cache_config.kv_cache_groups.append( - KVCacheGroupSpec(layer_names=layer_names, kv_cache_spec=spec)) + KVCacheGroupSpec(layer_names=layer_names, kv_cache_spec=spec) + ) def initialize_attn_backend(self, kv_cache_config: KVCacheConfig) -> None: """ Initialize the attention backends and attention metadata builders. """ - assert len(self.attn_groups) == 0, \ - "Attention backends are already initialized" + assert len(self.attn_groups) == 0, "Attention backends are already initialized" class AttentionGroupKey(NamedTuple): attn_backend: type[AttentionBackend] @@ -3609,8 +3916,8 @@ def get_attn_backends_for_group( kv_cache_group_spec: KVCacheGroupSpec, ) -> dict[AttentionGroupKey, list[str]]: layers = get_layers_from_vllm_config( - self.vllm_config, AttentionLayerBase, - kv_cache_group_spec.layer_names) + self.vllm_config, AttentionLayerBase, kv_cache_group_spec.layer_names + ) attn_backends = {} attn_backend_layers = defaultdict(list) # Dedupe based on full class name; this is a bit safer than @@ -3623,39 +3930,38 @@ def get_attn_backends_for_group( full_cls_name = attn_backend.full_cls_name() layer_kv_cache_spec = kv_cache_group_spec.kv_cache_spec if isinstance(layer_kv_cache_spec, UniformTypeKVCacheSpecs): - layer_kv_cache_spec = layer_kv_cache_spec.kv_cache_specs[ - layer_name] + layer_kv_cache_spec = layer_kv_cache_spec.kv_cache_specs[layer_name] key = (full_cls_name, layer_kv_cache_spec) - attn_backends[key] = AttentionGroupKey(attn_backend, - layer_kv_cache_spec) + attn_backends[key] = AttentionGroupKey( + attn_backend, layer_kv_cache_spec + ) attn_backend_layers[key].append(layer_name) - return { - attn_backends[k]: v - for k, v in attn_backend_layers.items() - } + return {attn_backends[k]: v for k, v in attn_backend_layers.items()} def create_attn_groups( attn_backends_map: dict[AttentionBackend, list[str]], ) -> list[AttentionGroup]: attn_groups: list[AttentionGroup] = [] - for (attn_backend, - kv_cache_spec), layer_names in attn_backends_map.items(): + for (attn_backend, kv_cache_spec), layer_names in attn_backends_map.items(): attn_metadata_builders = [] - attn_metadata_builders.append(attn_backend.get_builder_cls()( - kv_cache_spec, - layer_names, - self.vllm_config, - self.device, - )) - attn_group = AttentionGroup(attn_backend, - attn_metadata_builders, - layer_names, kv_cache_spec) + attn_metadata_builders.append( + attn_backend.get_builder_cls()( + kv_cache_spec, + layer_names, + self.vllm_config, + self.device, + ) + ) + attn_group = AttentionGroup( + attn_backend, attn_metadata_builders, layer_names, kv_cache_spec + ) attn_groups.append(attn_group) return attn_groups for kv_cache_group_spec in kv_cache_config.kv_cache_groups: attn_backends = get_attn_backends_for_group( # type: ignore - kv_cache_group_spec) + kv_cache_group_spec + ) self.attn_groups.append(create_attn_groups(attn_backends)) # Calculate reorder batch threshold (if needed) @@ -3681,16 +3987,17 @@ def calculate_reorder_batch_threshold(self) -> None: # check that if any backends reorder batches; that the reordering # is compatible (e.g., decode threshold is the same) reorder_batch_threshold_i = ( - attn_metadata_builder_i.reorder_batch_threshold) + attn_metadata_builder_i.reorder_batch_threshold + ) if reorder_batch_threshold_i is not None: if self.reorder_batch_threshold is not None: - if reorder_batch_threshold_i != \ - self.reorder_batch_threshold: + if reorder_batch_threshold_i != self.reorder_batch_threshold: raise ValueError( f"Attention backend reorders decodes with " f"threshold {reorder_batch_threshold_i} but other " f"backend uses threshold " - f"{self.reorder_batch_threshold}") + f"{self.reorder_batch_threshold}" + ) else: self.reorder_batch_threshold = reorder_batch_threshold_i @@ -3709,8 +4016,7 @@ def get_kv_cache_spec_v0110(self) -> dict[str, KVCacheSpec]: kv_cache_spec: dict[str, KVCacheSpec] = {} attn_layers = get_layers_from_vllm_config(self.vllm_config, Attention) for layer_name, attn_module in attn_layers.items(): - if (kv_tgt_layer := - attn_module.kv_sharing_target_layer_name) is not None: + if (kv_tgt_layer := attn_module.kv_sharing_target_layer_name) is not None: # The layer doesn't need its own KV cache and will use that of # the target layer. We skip creating a KVCacheSpec for it, so # that KV cache management logic will act as this layer does @@ -3733,7 +4039,8 @@ def get_kv_cache_spec_v0110(self) -> dict[str, KVCacheSpec]: num_kv_heads=attn_module.num_kv_heads, head_size=attn_module.head_size, dtype=self.kv_cache_dtype, - cache_dtype_str=self.cache_config.cache_dtype) + cache_dtype_str=self.cache_config.cache_dtype, + ) else: # TODO(cmq): This is a hack way to fix deepseek kvcache when # using DSA. Fix the spec in vLLM is a finnal way. @@ -3741,31 +4048,36 @@ def get_kv_cache_spec_v0110(self) -> dict[str, KVCacheSpec]: block_size=block_size, num_kv_heads=attn_module.num_kv_heads, head_size=attn_module.head_size, - dtype=self.kv_cache_dtype) - elif attn_module.attn_type in (AttentionType.ENCODER, - AttentionType.ENCODER_ONLY): + dtype=self.kv_cache_dtype, + ) + elif attn_module.attn_type in ( + AttentionType.ENCODER, + AttentionType.ENCODER_ONLY, + ): # encoder-only attention does not need KV cache. continue elif attn_module.attn_type == AttentionType.ENCODER_DECODER: raise NotImplementedError else: - raise ValueError( - f"Unknown attention type: {attn_module.attn_type}") + raise ValueError(f"Unknown attention type: {attn_module.attn_type}") mamba_layers = get_layers_from_vllm_config(self.vllm_config, MambaBase) if len(mamba_layers) > 0: - if (self.vllm_config.speculative_config is not None - and self.vllm_config.model_config.hf_config.model_type - not in ["qwen3_next"]): + if ( + self.vllm_config.speculative_config is not None + and self.vllm_config.model_config.hf_config.model_type + not in ["qwen3_next"] + ): raise NotImplementedError( - "Mamba with speculative decoding is not supported yet.") + "Mamba with speculative decoding is not supported yet." + ) if self.vllm_config.cache_config.enable_prefix_caching: raise NotImplementedError( - "Prefix caching is not supported for Mamba yet.") + "Prefix caching is not supported for Mamba yet." + ) max_model_len = self.vllm_config.model_config.max_model_len - page_size_padded = ( - self.vllm_config.cache_config.mamba_page_size_padded) + page_size_padded = self.vllm_config.cache_config.mamba_page_size_padded # Set block_size to max_model_len, so that mamba model will always # have only one block in the KV cache. @@ -3778,7 +4090,9 @@ def get_kv_cache_spec_v0110(self) -> dict[str, KVCacheSpec]: mamba_type=mamba_module.mamba_type, num_speculative_blocks=( self.speculative_config.num_speculative_tokens - if self.speculative_config else 0), + if self.speculative_config + else 0 + ), ) return kv_cache_spec @@ -3797,12 +4111,12 @@ def get_kv_cache_spec(self) -> dict[str, KVCacheSpec]: block_size = self.vllm_config.cache_config.block_size use_mla = self.vllm_config.model_config.use_mla kv_cache_spec: dict[str, KVCacheSpec] = {} - attn_layers = get_layers_from_vllm_config(self.vllm_config, - AttentionLayerBase) + attn_layers = get_layers_from_vllm_config(self.vllm_config, AttentionLayerBase) for layer_name, attn_module in attn_layers.items(): if isinstance(attn_module, Attention): - if (kv_tgt_layer := - attn_module.kv_sharing_target_layer_name) is not None: + if ( + kv_tgt_layer := attn_module.kv_sharing_target_layer_name + ) is not None: # The layer doesn't need its own KV cache and will use that of # the target layer. We skip creating a KVCacheSpec for it, so # that KV cache management logic will act as this layer does @@ -3821,16 +4135,18 @@ def get_kv_cache_spec(self) -> dict[str, KVCacheSpec]: block_size=block_size, num_kv_heads=attn_module.num_kv_heads, head_size=attn_module.head_size, - dtype=self.kv_cache_dtype) - elif attn_module.attn_type in (AttentionType.ENCODER, - AttentionType.ENCODER_ONLY): + dtype=self.kv_cache_dtype, + ) + elif attn_module.attn_type in ( + AttentionType.ENCODER, + AttentionType.ENCODER_ONLY, + ): # encoder-only attention does not need KV cache. continue elif attn_module.attn_type == AttentionType.ENCODER_DECODER: raise NotImplementedError else: - raise ValueError( - f"Unknown attention type: {attn_module.attn_type}") + raise ValueError(f"Unknown attention type: {attn_module.attn_type}") elif isinstance(attn_module, MLAAttention): if use_mla and not self.use_sparse: @@ -3839,7 +4155,8 @@ def get_kv_cache_spec(self) -> dict[str, KVCacheSpec]: num_kv_heads=1, head_size=attn_module.head_size, dtype=self.kv_cache_dtype, - cache_dtype_str=self.cache_config.cache_dtype) + cache_dtype_str=self.cache_config.cache_dtype, + ) else: # TODO(cmq): This is a hack way to fix deepseek kvcache when # using DSA. Fix the spec in vLLM is a finnal way. @@ -3847,22 +4164,26 @@ def get_kv_cache_spec(self) -> dict[str, KVCacheSpec]: block_size=block_size, num_kv_heads=1, head_size=attn_module.head_size, - dtype=self.kv_cache_dtype) + dtype=self.kv_cache_dtype, + ) mamba_layers = get_layers_from_vllm_config(self.vllm_config, MambaBase) if len(mamba_layers) > 0: - if (self.vllm_config.speculative_config is not None - and self.vllm_config.model_config.hf_config.model_type - not in ["qwen3_next"]): + if ( + self.vllm_config.speculative_config is not None + and self.vllm_config.model_config.hf_config.model_type + not in ["qwen3_next"] + ): raise NotImplementedError( - "Mamba with speculative decoding is not supported yet.") + "Mamba with speculative decoding is not supported yet." + ) if self.vllm_config.cache_config.enable_prefix_caching: raise NotImplementedError( - "Prefix caching is not supported for Mamba yet.") + "Prefix caching is not supported for Mamba yet." + ) max_model_len = self.vllm_config.model_config.max_model_len - page_size_padded = ( - self.vllm_config.cache_config.mamba_page_size_padded) + page_size_padded = self.vllm_config.cache_config.mamba_page_size_padded # Set block_size to max_model_len, so that mamba model will always # have only one block in the KV cache. @@ -3875,7 +4196,9 @@ def get_kv_cache_spec(self) -> dict[str, KVCacheSpec]: mamba_type=mamba_module.mamba_type, num_speculative_blocks=( self.speculative_config.num_speculative_tokens - if self.speculative_config else 0), + if self.speculative_config + else 0 + ), ) return kv_cache_spec @@ -3887,7 +4210,7 @@ def initialize_aclgraph_capture(self) -> None: for attn_group in self._attn_group_iterator(): builder = attn_group.get_metadata_builder() graph_support = None - if hasattr(builder, 'aclgraph_support'): + if hasattr(builder, "aclgraph_support"): graph_support = builder.aclgraph_support.value else: graph_support = builder.cudagraph_support.value @@ -3898,68 +4221,93 @@ def initialize_aclgraph_capture(self) -> None: # This is an imitation of compilation_config.splitting_ops_contain_attention() splitting_ops_contain_attention = ( self.compilation_config.splitting_ops is not None - and all(op in self.compilation_config.splitting_ops for op in [ - "vllm.mla_forward", - ])) + and all( + op in self.compilation_config.splitting_ops + for op in [ + "vllm.mla_forward", + ] + ) + ) # Flexible resolve the aclgraph mode aclgraph_mode = self.compilation_config.cudagraph_mode # check graph for mixed batch is supported - if aclgraph_mode.mixed_mode() == CUDAGraphMode.FULL \ - and min_ag_support != AttentionCGSupport.ALWAYS: - msg = (f"ACLGraphMode.{aclgraph_mode.name} is not supported " - f"with {min_ag_builder_name} backend (support: " - f"{min_ag_support})") + if ( + aclgraph_mode.mixed_mode() == CUDAGraphMode.FULL + and min_ag_support != AttentionCGSupport.ALWAYS + ): + msg = ( + f"ACLGraphMode.{aclgraph_mode.name} is not supported " + f"with {min_ag_builder_name} backend (support: " + f"{min_ag_support})" + ) if min_ag_support == AttentionCGSupport.NEVER: # if not supported any full graphs, just raise it. - msg += "; please try cudagraph_mode=PIECEWISE, and "\ + msg += ( + "; please try cudagraph_mode=PIECEWISE, and " "make sure compilation level is piecewise" + ) raise ValueError(msg) # attempt to resolve the full graph related mode if splitting_ops_contain_attention: msg += "; setting cudagraph_mode=FULL_AND_PIECEWISE" aclgraph_mode = self.compilation_config.cudagraph_mode = ( - CUDAGraphMode.FULL_AND_PIECEWISE) + CUDAGraphMode.FULL_AND_PIECEWISE + ) else: msg += "; setting cudagraph_mode=FULL_DECODE_ONLY" aclgraph_mode = self.compilation_config.cudagraph_mode = ( - CUDAGraphMode.FULL_DECODE_ONLY) + CUDAGraphMode.FULL_DECODE_ONLY + ) logger.warning(msg) # double check that we can support full graph if they are requested # even after automatic downgrades - if aclgraph_mode.has_full_cudagraphs() \ - and min_ag_support == AttentionCGSupport.NEVER: - raise ValueError(f"CUDAGraphMode.{aclgraph_mode.name} is not " - f"supported with {min_ag_builder_name} backend (" - f"support:{min_ag_support}) " - "; please try cudagraph_mode=PIECEWISE, " - "and make sure compilation level is piecewise") + if ( + aclgraph_mode.has_full_cudagraphs() + and min_ag_support == AttentionCGSupport.NEVER + ): + raise ValueError( + f"CUDAGraphMode.{aclgraph_mode.name} is not " + f"supported with {min_ag_builder_name} backend (" + f"support:{min_ag_support}) " + "; please try cudagraph_mode=PIECEWISE, " + "and make sure compilation level is piecewise" + ) self.aclgraph_dispatcher.initialize_cudagraph_keys( - self.compilation_config.cudagraph_mode, - self.uniform_decode_query_len) + self.compilation_config.cudagraph_mode, self.uniform_decode_query_len + ) - def _capture_aclgraphs(self, compilation_cases: list[int], - aclgraph_runtime_mode: CUDAGraphMode, - uniform_decode: bool): - assert aclgraph_runtime_mode != CUDAGraphMode.NONE and \ - aclgraph_runtime_mode in [CUDAGraphMode.FULL, - CUDAGraphMode.PIECEWISE] + def _capture_aclgraphs( + self, + compilation_cases: list[int], + aclgraph_runtime_mode: CUDAGraphMode, + uniform_decode: bool, + ): + assert ( + aclgraph_runtime_mode != CUDAGraphMode.NONE + and aclgraph_runtime_mode in [CUDAGraphMode.FULL, CUDAGraphMode.PIECEWISE] + ) # Only rank 0 should print progress bar during capture if is_global_first_rank(): logger.info( "Starting to capture ACL graphs for cases: %s, " - "mode: %s, uniform_decode: %s", compilation_cases, - aclgraph_runtime_mode.name, uniform_decode) + "mode: %s, uniform_decode: %s", + compilation_cases, + aclgraph_runtime_mode.name, + uniform_decode, + ) compilation_cases = tqdm( compilation_cases, disable=not self.load_config.use_tqdm_on_load, desc="Capturing ACL graphs ({}, {})".format( "decode" if uniform_decode else "mixed prefill-decode", - aclgraph_runtime_mode.name)) + aclgraph_runtime_mode.name, + ), + ) # We skip EPLB here since we don't want to record dummy metrics for num_tokens in compilation_cases: for _ in range(self.compilation_config.cudagraph_num_of_warmups): @@ -3968,21 +4316,26 @@ def _capture_aclgraphs(self, compilation_cases: list[int], # if we want to warm up attention or not. This is # different from the case where `FULL` implies capture # attention while `PIECEWISE` implies no attention. - force_attention = (aclgraph_runtime_mode == CUDAGraphMode.FULL) - self._dummy_run(num_tokens, - aclgraph_runtime_mode=CUDAGraphMode.NONE, - force_attention=force_attention, - uniform_decode=uniform_decode) - self._dummy_run(num_tokens, - aclgraph_runtime_mode=aclgraph_runtime_mode, - force_attention=force_attention, - uniform_decode=uniform_decode) + force_attention = aclgraph_runtime_mode == CUDAGraphMode.FULL + self._dummy_run( + num_tokens, + aclgraph_runtime_mode=CUDAGraphMode.NONE, + force_attention=force_attention, + uniform_decode=uniform_decode, + ) + self._dummy_run( + num_tokens, + aclgraph_runtime_mode=aclgraph_runtime_mode, + force_attention=force_attention, + uniform_decode=uniform_decode, + ) def _capture_model(self): if not self.use_aclgraph: logger.warning( "Skipping ACL graph capture. To turn on ACL graph capture, " - "ensure `aclraph_mode` was not manually set to `NONE`") + "ensure `aclraph_mode` was not manually set to `NONE`" + ) return else: self.initialize_aclgraph_capture() @@ -4002,11 +4355,12 @@ def _capture_model(self): self._capture_aclgraphs( compilation_cases, aclgraph_runtime_mode=aclgraph_runtime_mode, - uniform_decode=False) + uniform_decode=False, + ) except Exception as e: error_msg = str(e) - error_code = '0x7020023' - pattern = r'retCode=([^,\s\.]+)' + error_code = "0x7020023" + pattern = r"retCode=([^,\s\.]+)" match = re.search(pattern, error_msg) if match: retCode = match.group(1) @@ -4020,21 +4374,28 @@ def _capture_model(self): "1. Manually configure the compilation_config parameter " "with a reduced set of sizes: '{\"cudagraph_capture_sizes\":[size1, size2, size3, ...]}'.\n" "2. Utilize ACLgraph's full graph mode as an alternative to the piece-wise approach.\n\n" - f"{str(e)}") + f"{str(e)}" + ) raise - if aclgraph_mode.decode_mode() == CUDAGraphMode.FULL and \ - aclgraph_mode.separate_routine(): - max_num_tokens = self.scheduler_config.max_num_seqs * \ - self.uniform_decode_query_len + if ( + aclgraph_mode.decode_mode() == CUDAGraphMode.FULL + and aclgraph_mode.separate_routine() + ): + max_num_tokens = ( + self.scheduler_config.max_num_seqs * self.uniform_decode_query_len + ) decode_cudagraph_batch_sizes = [ - x for x in self.aclgraph_batch_sizes if x <= max_num_tokens - and x >= self.uniform_decode_query_len + x + for x in self.aclgraph_batch_sizes + if x <= max_num_tokens and x >= self.uniform_decode_query_len ] compilation_cases_decode = sorted(decode_cudagraph_batch_sizes) # TODO: refactor this when vLLM supports mtp>1 - if not all(x % self.uniform_decode_query_len == 0 - for x in decode_cudagraph_batch_sizes): + if not all( + x % self.uniform_decode_query_len == 0 + for x in decode_cudagraph_batch_sizes + ): raise ValueError( "In the MTP fullgraph scenario, each graph size must be an integer multiple of " f"(num_speculative_tokens + 1): {self.uniform_decode_query_len}. " @@ -4045,7 +4406,8 @@ def _capture_model(self): self._capture_aclgraphs( compilation_cases=compilation_cases_decode, aclgraph_runtime_mode=CUDAGraphMode.FULL, - uniform_decode=True) + uniform_decode=True, + ) # Disable aclgraph capturing globally, so any unexpected aclgraph # capturing will be detected and raise an error after here. @@ -4055,7 +4417,6 @@ def _capture_model(self): set_cudagraph_capturing_enabled(False) def capture_model(self) -> None: - compilation_counter.num_gpu_runner_capture_triggers += 1 start_time = time.perf_counter() @@ -4068,8 +4429,11 @@ def capture_model(self) -> None: elapsed_time = end_time - start_time npu_graph_size = start_free_npu_memory - end_free_npu_memory # This usually takes 5~20 seconds. - logger.info("Graph capturing finished in %.0f secs, took %.2f GiB", - elapsed_time, npu_graph_size / (1 << 30)) + logger.info( + "Graph capturing finished in %.0f secs, took %.2f GiB", + elapsed_time, + npu_graph_size / (1 << 30), + ) def _get_prompt_logprobs_dict( self, @@ -4087,7 +4451,6 @@ def _get_prompt_logprobs_dict( # maintainable loop over optimal performance. completed_prefill_reqs = [] for req_id, num_prompt_logprobs in num_prompt_logprobs_dict.items(): - num_tokens = scheduler_output.num_scheduled_tokens[req_id] # Get metadata for this request. @@ -4097,7 +4460,8 @@ def _get_prompt_logprobs_dict( continue num_prompt_tokens = len(request.prompt_token_ids) prompt_token_ids = torch.tensor(request.prompt_token_ids).to( - self.device, non_blocking=True) + self.device, non_blocking=True + ) # Set up target LogprobsTensors object. logprobs_tensors = in_progress_dict.get(req_id) @@ -4105,7 +4469,8 @@ def _get_prompt_logprobs_dict( # Create empty logprobs CPU tensors for the entire prompt. # If chunked, we'll copy in slice by slice. logprobs_tensors = LogprobsTensors.empty_cpu( - num_prompt_tokens - 1, num_prompt_logprobs + 1) + num_prompt_tokens - 1, num_prompt_logprobs + 1 + ) in_progress_dict[req_id] = logprobs_tensors # Determine number of logits to retrieve. @@ -4135,27 +4500,29 @@ def _get_prompt_logprobs_dict( # then there is prompt logprob generated for each index. req_idx = self.input_batch.req_id_to_index[req_id] offset = self.query_start_loc_np[req_idx].item() - prompt_hidden_states = hidden_states[offset:offset + num_logits] + prompt_hidden_states = hidden_states[offset : offset + num_logits] logits = self.model.compute_logits(prompt_hidden_states) # Get the "target" tokens for each index. For prompt at index i, # the token at prompt index i+1 is the "sampled" token we want # to gather the logprob for. - tgt_token_ids = prompt_token_ids[start_tok:start_tok + num_logits] + tgt_token_ids = prompt_token_ids[start_tok : start_tok + num_logits] # Compute prompt logprobs. logprobs = self.sampler.compute_logprobs(logits) token_ids, logprobs, ranks = self.sampler.gather_logprobs( - logprobs, num_prompt_logprobs, tgt_token_ids) + logprobs, num_prompt_logprobs, tgt_token_ids + ) # Transfer NPU->CPU async. chunk_slice = slice(start_idx, start_idx + num_logits) logprobs_tensors.logprob_token_ids[chunk_slice].copy_( - token_ids, non_blocking=True) - logprobs_tensors.logprobs[chunk_slice].copy_(logprobs, - non_blocking=True) + token_ids, non_blocking=True + ) + logprobs_tensors.logprobs[chunk_slice].copy_(logprobs, non_blocking=True) logprobs_tensors.selected_token_ranks[chunk_slice].copy_( - ranks, non_blocking=True) + ranks, non_blocking=True + ) # Remove requests that have completed prefill from the batch # num_prompt_logprobs_dict. @@ -4186,42 +4553,48 @@ def _update_tokens_for_pcp(self, tokens): return tokens, None, None tokens = np.array(tokens, dtype=np.int32) num_decode_reqs = sum( - self.input_batch.num_computed_tokens_cpu[:num_reqs] >= - self.input_batch.num_prompt_tokens[:num_reqs]) - num_padded_scheduled_tokens = np.ceil( - tokens / - (2 * self.pcp_size)).astype(np.int32) * (2 * self.pcp_size) + self.input_batch.num_computed_tokens_cpu[:num_reqs] + >= self.input_batch.num_prompt_tokens[:num_reqs] + ) + num_padded_scheduled_tokens = np.ceil(tokens / (2 * self.pcp_size)).astype( + np.int32 + ) * (2 * self.pcp_size) num_padded_scheduled_tokens[:num_decode_reqs] = self.pcp_size self.num_pcp_pads = num_padded_scheduled_tokens - tokens - cu_padded_tokens, pcp_padded_arange = \ - self._get_cumsum_and_arange(num_padded_scheduled_tokens) + cu_padded_tokens, pcp_padded_arange = self._get_cumsum_and_arange( + num_padded_scheduled_tokens + ) unpad_mask = torch.from_numpy( - pcp_padded_arange < np.repeat(tokens, num_padded_scheduled_tokens)) + pcp_padded_arange < np.repeat(tokens, num_padded_scheduled_tokens) + ) pcp_tokens = num_padded_scheduled_tokens // self.pcp_size pcp_chunk_sizes = (pcp_tokens // 2).clip(min=1) _, pcp_arange = self._get_cumsum_and_arange(pcp_tokens) _, pcp_chunk_arange = self._get_cumsum_and_arange(pcp_chunk_sizes) - pcp_head_chunk_mask = pcp_arange < np.repeat(pcp_chunk_sizes, - pcp_tokens) + pcp_head_chunk_mask = pcp_arange < np.repeat(pcp_chunk_sizes, pcp_tokens) def get_current_rank_positions(cu_tokens, rank): positions_start_loc = np.zeros_like(cu_tokens) positions_start_loc[1:] = cu_tokens[:-1] positions = np.zeros(len(pcp_head_chunk_mask), dtype=np.int32) head_start_loc = positions_start_loc + rank * pcp_chunk_sizes - tail_start_loc = positions_start_loc + \ - (2 * self.pcp_size - rank - 1) * pcp_chunk_sizes - positions[pcp_head_chunk_mask] = pcp_chunk_arange + \ - np.repeat(head_start_loc, pcp_chunk_sizes) + tail_start_loc = ( + positions_start_loc + (2 * self.pcp_size - rank - 1) * pcp_chunk_sizes + ) + positions[pcp_head_chunk_mask] = pcp_chunk_arange + np.repeat( + head_start_loc, pcp_chunk_sizes + ) # Decode reqs do not have tail chunks. - positions[~pcp_head_chunk_mask] = \ - pcp_chunk_arange[num_decode_reqs:] + \ - np.repeat(tail_start_loc, pcp_chunk_sizes)[num_decode_reqs:] + positions[~pcp_head_chunk_mask] = ( + pcp_chunk_arange[num_decode_reqs:] + + np.repeat(tail_start_loc, pcp_chunk_sizes)[num_decode_reqs:] + ) return positions positions = get_current_rank_positions( - np.zeros(num_reqs, dtype=np.int32), self.pcp_rank) + np.zeros(num_reqs, dtype=np.int32), self.pcp_rank + ) # Decode tokens are duplicate and their positions always be 0. positions[:num_decode_reqs] = 0 @@ -4230,8 +4603,9 @@ def get_current_rank_positions(cu_tokens, rank): for rank_i in range(self.pcp_size) ] all_positions_tensor = torch.from_numpy(np.concatenate(all_positions)) - self.pcp_allgather_restore_idx[:all_positions_tensor.shape[0]].copy_( - all_positions_tensor.float().argsort().long(), non_blocking=True) + self.pcp_allgather_restore_idx[: all_positions_tensor.shape[0]].copy_( + all_positions_tensor.float().argsort().long(), non_blocking=True + ) pcp_tokens[:num_decode_reqs] = 1 return pcp_tokens, positions, unpad_mask @@ -4248,11 +4622,17 @@ def _get_pcp_local_seq_lens( num_requests = seq_lens.size(0) total_world_size = pcp_world_size * dcp_world_size seq_lens_tiled = seq_lens.unsqueeze(-1).repeat(1, total_world_size) - rank_offsets = (torch.arange(total_world_size, - dtype=torch.int32).unsqueeze(0).repeat( - num_requests, 1)) - base = (seq_lens_tiled // cp_kv_cache_interleave_size // - total_world_size * cp_kv_cache_interleave_size) + rank_offsets = ( + torch.arange(total_world_size, dtype=torch.int32) + .unsqueeze(0) + .repeat(num_requests, 1) + ) + base = ( + seq_lens_tiled + // cp_kv_cache_interleave_size + // total_world_size + * cp_kv_cache_interleave_size + ) remainder = seq_lens_tiled - base * total_world_size remainder = torch.clip( remainder - rank_offsets * cp_kv_cache_interleave_size, @@ -4260,13 +4640,16 @@ def _get_pcp_local_seq_lens( cp_kv_cache_interleave_size, ) dcp_local_seq_lens = (base + remainder).reshape( - [-1, pcp_world_size, dcp_world_size]) + [-1, pcp_world_size, dcp_world_size] + ) return dcp_local_seq_lens def _generate_pcp_metadata(self, total_num_scheduled_tokens, seq_lens): num_reqs = self.input_batch.num_reqs - num_decodes = sum(self.input_batch.num_computed_tokens_cpu[:num_reqs] - >= self.input_batch.num_prompt_tokens[:num_reqs]) + num_decodes = sum( + self.input_batch.num_computed_tokens_cpu[:num_reqs] + >= self.input_batch.num_prompt_tokens[:num_reqs] + ) num_actual_tokens_pcp_padded = total_num_scheduled_tokens * self.pcp_size self.num_actual_tokens_pcp_padded = num_actual_tokens_pcp_padded long_seq_metadata = None @@ -4296,47 +4679,58 @@ def _generate_pcp_metadata(self, total_num_scheduled_tokens, seq_lens): chunk_len = seq_len // 2 chunk_seqlens.append(chunk_len) q_head_idx.extend( - list(range(q_req_offset, q_req_offset + chunk_len))) + list(range(q_req_offset, q_req_offset + chunk_len)) + ) kv_with_q_head_nomask_idx.extend( list( - range(kv_req_offset, kv_req_offset + - chunk_len * q_head_chunk_id))) + range( + kv_req_offset, + kv_req_offset + chunk_len * q_head_chunk_id, + ) + ) + ) kv_with_q_head_mask_idx.extend( list( range( kv_req_offset + chunk_len * q_head_chunk_id, - kv_req_offset + chunk_len * - (q_head_chunk_id + 1)))) - kv_with_q_head_nomask_seqlens.append(chunk_len * - q_head_chunk_id) + kv_req_offset + chunk_len * (q_head_chunk_id + 1), + ) + ) + ) + kv_with_q_head_nomask_seqlens.append(chunk_len * q_head_chunk_id) q_tail_idx.extend( list( - range(q_req_offset + chunk_len, - q_req_offset + chunk_len * 2))) + range( + q_req_offset + chunk_len, q_req_offset + chunk_len * 2 + ) + ) + ) kv_with_q_tail_nomask_idx.extend( list( - range(kv_req_offset, kv_req_offset + - chunk_len * q_tail_chunk_id))) + range( + kv_req_offset, + kv_req_offset + chunk_len * q_tail_chunk_id, + ) + ) + ) kv_with_q_tail_mask_idx.extend( list( range( kv_req_offset + chunk_len * q_tail_chunk_id, - kv_req_offset + chunk_len * - (q_tail_chunk_id + 1)))) - kv_with_q_tail_nomask_seqlens.append(chunk_len * - q_tail_chunk_id) + kv_req_offset + chunk_len * (q_tail_chunk_id + 1), + ) + ) + ) + kv_with_q_tail_nomask_seqlens.append(chunk_len * q_tail_chunk_id) q_req_offset += seq_len kv_req_offset += seq_len * self.pcp_size # Convert lists to tensors and move to device def _list_to_tensor(lst, device, dtype=torch.int32): - tensor_npu = torch.zeros(len(lst), - dtype=dtype, - device=device) - tensor_npu.copy_(torch.tensor(lst, dtype=dtype), - non_blocking=True) + tensor_npu = torch.zeros(len(lst), dtype=dtype, device=device) + tensor_npu.copy_(torch.tensor(lst, dtype=dtype), non_blocking=True) return tensor_npu q_head_idx_tensor = _list_to_tensor(q_head_idx, self.device) @@ -4345,70 +4739,76 @@ def _list_to_tensor(lst, device, dtype=torch.int32): self.q_tail_idx_tensor = q_tail_idx_tensor q_full_idx = torch.cat([q_head_idx_tensor, q_tail_idx_tensor]) - q_full_idx = q_full_idx.to(torch.float32).argsort().to( - torch.int32) + q_full_idx = q_full_idx.to(torch.float32).argsort().to(torch.int32) self.q_full_idx = q_full_idx self.kv_idx_names = { - 'kv_with_q_head_nomask_idx_tensor': - kv_with_q_head_nomask_idx, - 'kv_with_q_head_mask_idx_tensor': kv_with_q_head_mask_idx, - 'kv_with_q_tail_nomask_idx_tensor': - kv_with_q_tail_nomask_idx, - 'kv_with_q_tail_mask_idx_tensor': kv_with_q_tail_mask_idx + "kv_with_q_head_nomask_idx_tensor": kv_with_q_head_nomask_idx, + "kv_with_q_head_mask_idx_tensor": kv_with_q_head_mask_idx, + "kv_with_q_tail_nomask_idx_tensor": kv_with_q_tail_nomask_idx, + "kv_with_q_tail_mask_idx_tensor": kv_with_q_tail_mask_idx, } for key, value in self.kv_idx_names.items(): tensor_npu = _list_to_tensor(value, self.device) self.kv_idx_names[key] = tensor_npu attn_mask_seqlens = torch.tensor( - [chunk_seqlens, chunk_seqlens], dtype=torch.int32) + [chunk_seqlens, chunk_seqlens], dtype=torch.int32 + ) head_attn_nomask_seqlens = torch.tensor( - [chunk_seqlens, kv_with_q_head_nomask_seqlens], - dtype=torch.int32) + [chunk_seqlens, kv_with_q_head_nomask_seqlens], dtype=torch.int32 + ) tail_attn_nomask_seqlens = torch.tensor( - [chunk_seqlens, kv_with_q_tail_nomask_seqlens], - dtype=torch.int32) + [chunk_seqlens, kv_with_q_tail_nomask_seqlens], dtype=torch.int32 + ) if self.vllm_config.model_config.use_mla: pcp_prefill_mask = torch.triu( - torch.ones(512, - 512, - device=self.device, - dtype=self.dtype), 1) + torch.ones(512, 512, device=self.device, dtype=self.dtype), 1 + ) else: pcp_prefill_mask = torch.triu( - torch.full((2048, 2048), - True, - device=self.device, - dtype=torch.bool), 1) + torch.full( + (2048, 2048), True, device=self.device, dtype=torch.bool + ), + 1, + ) self.extra_long_seq_kwargs = { - 'attn_mask_seqlens': attn_mask_seqlens, - 'head_attn_nomask_seqlens': head_attn_nomask_seqlens, - 'tail_attn_nomask_seqlens': tail_attn_nomask_seqlens, - 'pcp_prefill_mask': pcp_prefill_mask + "attn_mask_seqlens": attn_mask_seqlens, + "head_attn_nomask_seqlens": head_attn_nomask_seqlens, + "tail_attn_nomask_seqlens": tail_attn_nomask_seqlens, + "pcp_prefill_mask": pcp_prefill_mask, } - long_seq_metadata.pcp_allgather_restore_idx = self.pcp_allgather_restore_idx[: - num_actual_tokens_pcp_padded] + long_seq_metadata.pcp_allgather_restore_idx = ( + self.pcp_allgather_restore_idx[:num_actual_tokens_pcp_padded] + ) long_seq_metadata.q_head_idx_tensor = self.q_head_idx_tensor long_seq_metadata.q_tail_idx_tensor = self.q_tail_idx_tensor long_seq_metadata.q_full_idx = self.q_full_idx long_seq_metadata.kv_with_q_head_nomask_idx_tensor = self.kv_idx_names[ - 'kv_with_q_head_nomask_idx_tensor'] + "kv_with_q_head_nomask_idx_tensor" + ] long_seq_metadata.kv_with_q_head_mask_idx_tensor = self.kv_idx_names[ - 'kv_with_q_head_mask_idx_tensor'] + "kv_with_q_head_mask_idx_tensor" + ] long_seq_metadata.kv_with_q_tail_nomask_idx_tensor = self.kv_idx_names[ - 'kv_with_q_tail_nomask_idx_tensor'] + "kv_with_q_tail_nomask_idx_tensor" + ] long_seq_metadata.kv_with_q_tail_mask_idx_tensor = self.kv_idx_names[ - 'kv_with_q_tail_mask_idx_tensor'] + "kv_with_q_tail_mask_idx_tensor" + ] long_seq_metadata.attn_mask_seqlens = self.extra_long_seq_kwargs[ - 'attn_mask_seqlens'] + "attn_mask_seqlens" + ] long_seq_metadata.head_attn_nomask_seqlens = self.extra_long_seq_kwargs[ - 'head_attn_nomask_seqlens'] + "head_attn_nomask_seqlens" + ] long_seq_metadata.tail_attn_nomask_seqlens = self.extra_long_seq_kwargs[ - 'tail_attn_nomask_seqlens'] + "tail_attn_nomask_seqlens" + ] long_seq_metadata.pcp_prefill_mask = self.extra_long_seq_kwargs[ - 'pcp_prefill_mask'] + "pcp_prefill_mask" + ] self.long_seq_metadata = long_seq_metadata return long_seq_metadata @@ -4427,28 +4827,36 @@ def _generate_pcp_mtp_input( num_scheduled_tokens_pcp_full = np.empty(num_reqs, dtype=np.int32) for i, req_id in enumerate(self.input_batch.req_ids): num_scheduled_tokens_pcp_full[i] = num_scheduled_tokens[req_id] - req_indices_pcp_full = np.repeat(self.arange_np[:num_reqs], - num_scheduled_tokens_pcp_full) + req_indices_pcp_full = np.repeat( + self.arange_np[:num_reqs], num_scheduled_tokens_pcp_full + ) cu_num_tokens_pcp_full = np.cumsum(num_scheduled_tokens_pcp_full) self.query_start_loc_pcp_full_np[0] = 0 - self.query_start_loc_pcp_full_np[1:num_reqs + - 1] = cu_num_tokens_pcp_full - self.query_start_loc_pcp_full_np[num_reqs + 1:].fill(-1) + self.query_start_loc_pcp_full_np[1 : num_reqs + 1] = cu_num_tokens_pcp_full + self.query_start_loc_pcp_full_np[num_reqs + 1 :].fill(-1) cumsums_offsets_pcp_full = np.repeat( cu_num_tokens_pcp_full - num_scheduled_tokens_pcp_full, - num_scheduled_tokens_pcp_full) - arange_pcp_full = self.arange_np[: - total_num_scheduled_tokens_pcp_full] - cumsums_offsets_pcp_full - positions_np_pcp_full = self.positions_np_pcp_full[: - total_num_scheduled_tokens_pcp_full] - np.add(self.input_batch.num_computed_tokens_cpu[req_indices_pcp_full], - arange_pcp_full, - out=positions_np_pcp_full) + num_scheduled_tokens_pcp_full, + ) + arange_pcp_full = ( + self.arange_np[:total_num_scheduled_tokens_pcp_full] + - cumsums_offsets_pcp_full + ) + positions_np_pcp_full = self.positions_np_pcp_full[ + :total_num_scheduled_tokens_pcp_full + ] + np.add( + self.input_batch.num_computed_tokens_cpu[req_indices_pcp_full], + arange_pcp_full, + out=positions_np_pcp_full, + ) token_indices_pcp_full = ( - positions_np_pcp_full + - req_indices_pcp_full * self.input_batch.token_ids_cpu.shape[1]) + positions_np_pcp_full + + req_indices_pcp_full * self.input_batch.token_ids_cpu.shape[1] + ) torch.index_select( self.input_batch.token_ids_cpu_tensor.flatten(), 0, torch.from_numpy(token_indices_pcp_full), - out=self.input_ids_pcp_full[:total_num_scheduled_tokens_pcp_full]) + out=self.input_ids_pcp_full[:total_num_scheduled_tokens_pcp_full], + ) From 9bad93932e2419855f56130b8361e8096512c6ae Mon Sep 17 00:00:00 2001 From: 01267596 Date: Mon, 10 Nov 2025 01:52:48 +0000 Subject: [PATCH 6/7] fix --- vllm_ascend/spec_decode/draft_proposer.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/vllm_ascend/spec_decode/draft_proposer.py b/vllm_ascend/spec_decode/draft_proposer.py index fbc3ce7eee..1b87a7305d 100644 --- a/vllm_ascend/spec_decode/draft_proposer.py +++ b/vllm_ascend/spec_decode/draft_proposer.py @@ -94,8 +94,8 @@ def generate_token_ids( target_token_ids = self.runner.input_ids[token_indices] target_positions = positions[token_indices] - (target_token_ids, target_positions, target_slot_mapping, cu_num_tokens) = ( - merge_next_token_ids_into_token_ids( + num_reqs = self.runner.input_batch.num_reqs + (target_token_ids, target_positions, target_slot_mapping, cu_num_tokens) = merge_next_token_ids_into_token_ids( input_token_ids=target_token_ids, input_positions=target_positions, cad=attn_metadata, @@ -104,8 +104,8 @@ def generate_token_ids( max_model_len=self.vllm_config.model_config.max_model_len, arange=self.arange, cu_num_tokens=cu_num_tokens, + num_reqs=num_reqs ) - ) draft_token_ids = self._propose( target_token_ids=target_token_ids, @@ -243,6 +243,7 @@ def merge_next_token_ids_into_token_ids( max_model_len: int, arange: torch.Tensor, cu_num_tokens, + num_reqs ): """ Merges the next token ids with the existing token ids into a flat sequence. @@ -261,7 +262,7 @@ def merge_next_token_ids_into_token_ids( ) # recompute slot mapping batch_size, n_blocks_per_req = cad.block_tables.shape - req_indices = torch.arange(batch_size, device=cad.query_start_loc.device) + req_indices = torch.arange(num_reqs, device=cad.query_start_loc.device) query_lens = cu_num_tokens[1:] - cu_num_tokens[:-1] req_indices = torch.repeat_interleave( From 68a7919fd686a740cb6e256f40303be6b68739c9 Mon Sep 17 00:00:00 2001 From: 01267596 Date: Mon, 10 Nov 2025 02:14:39 +0000 Subject: [PATCH 7/7] [feat] add draft_model spec_decode Signed-off-by: 01267596 --- ...l => _e2e_nightly_single_node_models.yaml} | 123 ++++++---- .github/workflows/_e2e_test.yaml | 2 +- .github/workflows/accuracy_test.yaml | 85 ------- .../vllm_ascend_test_nightly_a2.yaml | 42 ++++ .../vllm_ascend_test_nightly_a3.yaml | 7 + ...dels.yaml => vllm_ascend_test_report.yaml} | 38 ++- .../assets/disaggregated_prefill_pull.png | Bin 0 -> 202324 bytes .../assets/disaggregated_prefill_push.png | Bin 0 -> 217548 bytes docs/source/assets/eplb.png | Bin 0 -> 25735 bytes docs/source/community/versioning_policy.md | 7 +- .../contribution/multi_node_test.md | 2 +- .../feature_guide/KV_Cache_Pool_Guide.md | 83 +++++++ .../feature_guide/Multi_Token_Prediction.md | 112 +++++++++ .../feature_guide/disaggregated_prefill.md | 103 ++++++++ .../feature_guide/eplb_swift_balancer.md | 222 ++++++++++++++++++ .../developer_guide/feature_guide/index.md | 4 + .../performance/optimization_and_tuning.md | 4 +- docs/source/faqs.md | 8 +- docs/source/index.md | 2 +- docs/source/tutorials/DeepSeek-V3.2-Exp.md | 40 +--- .../multi_node_pd_disaggregation_mooncake.md | 17 +- docs/source/tutorials/multi_npu.md | 2 +- docs/source/tutorials/multi_npu_moge.md | 2 +- .../tutorials/multi_npu_quantization.md | 2 +- docs/source/tutorials/single_npu.md | 2 +- .../source/tutorials/single_npu_qwen2.5_vl.md | 2 +- .../tutorials/single_npu_qwen2_audio.md | 2 +- .../single_npu_qwen3_quantization.md | 2 +- .../configuration/additional_config.md | 8 +- .../user_guide/feature_guide/dynamic_batch.md | 4 +- .../user_guide/feature_guide/graph_mode.md | 8 +- docs/source/user_guide/feature_guide/index.md | 1 + .../feature_guide/kv_pool_mooncake.md | 186 ++++++++------- docs/source/user_guide/feature_guide/lora.md | 2 +- .../user_guide/feature_guide/quantization.md | 2 +- .../user_guide/feature_guide/sleep_mode.md | 2 +- docs/source/user_guide/release_notes.md | 10 +- .../support_matrix/supported_features.md | 2 +- .../support_matrix/supported_models.md | 134 +++++------ ..._balance_proxy_layerwise_server_example.py | 5 +- tests/e2e/multicard/test_external_launcher.py | 2 + tests/e2e/multicard/test_qwen3_next.py | 56 +++++ .../models/test_deepseek_r1_w8a8_eplb.py | 70 +++--- .../e2e/nightly/models/test_qwen2_5_vl_32b.py | 2 +- .../e2e/nightly/models/test_qwen2_5_vl_7b.py | 2 +- .../models/test_qwen3_235b_a22b_w8a8_eplb.py | 70 +++--- .../nightly/models/test_qwen3_235b_w8a8.py | 107 +++++++++ .../e2e/nightly/models/test_qwen3_30b_w8a8.py | 92 ++++++++ .../e2e/nightly/models/test_qwen3_32b_int8.py | 2 +- .../multi_node/scripts/build_mooncake.sh | 49 ++-- tests/e2e/nightly/multi_node/scripts/run.sh | 57 +---- tests/e2e/singlecard/test_camem.py | 3 + tests/ut/attention/test_attention_v1.py | 18 +- tests/ut/attention/test_mla_v1.py | 6 +- .../distributed/mooncake/test_config_data.py | 68 ++++++ .../kv_connector/test_mooncake_connector.py | 10 +- .../test_mooncake_layerwise_connector.py | 16 +- tests/ut/test_platform.py | 30 +++ tests/ut/torchair/test_torchair_mla.py | 6 +- tests/ut/worker/test_worker_v1.py | 3 + vllm_ascend/ascend_forward_context.py | 4 +- vllm_ascend/attention/attention_v1.py | 101 +++++--- vllm_ascend/attention/mla_v1.py | 205 +++++++--------- vllm_ascend/attention/utils.py | 4 +- vllm_ascend/compilation/acl_graph.py | 59 +++-- .../llmdatadist_c_mgr_connector.py | 3 +- .../distributed/mooncake/config_data.py | 92 +++++++- vllm_ascend/distributed/mooncake_connector.py | 4 + .../mooncake_layerwise_connector.py | 13 +- vllm_ascend/distributed/utils.py | 14 ++ vllm_ascend/envs.py | 2 +- vllm_ascend/models/__init__.py | 4 + vllm_ascend/models/qwen3_next.py | 18 ++ vllm_ascend/models/qwen3_next_mtp.py | 109 +++++++++ vllm_ascend/ops/casual_conv1d.py | 2 +- vllm_ascend/ops/fused_moe/prepare_finalize.py | 33 ++- vllm_ascend/ops/fused_moe/token_dispatcher.py | 2 +- .../patch/platform/patch_mamba_config.py | 2 +- vllm_ascend/spec_decode/__init__.py | 4 +- vllm_ascend/spec_decode/mtp_proposer.py | 43 +++- vllm_ascend/torchair/torchair_mla.py | 14 +- vllm_ascend/utils.py | 11 + vllm_ascend/worker/model_runner_v1.py | 48 +++- vllm_ascend/worker/worker_v1.py | 7 +- 84 files changed, 2006 insertions(+), 740 deletions(-) rename .github/workflows/{_accuracy_test.yaml => _e2e_nightly_single_node_models.yaml} (62%) delete mode 100644 .github/workflows/accuracy_test.yaml rename .github/workflows/{vllm_ascend_test_models.yaml => vllm_ascend_test_report.yaml} (88%) create mode 100644 docs/source/assets/disaggregated_prefill_pull.png create mode 100644 docs/source/assets/disaggregated_prefill_push.png create mode 100644 docs/source/assets/eplb.png create mode 100644 docs/source/developer_guide/feature_guide/KV_Cache_Pool_Guide.md create mode 100644 docs/source/developer_guide/feature_guide/Multi_Token_Prediction.md create mode 100644 docs/source/developer_guide/feature_guide/disaggregated_prefill.md create mode 100644 docs/source/developer_guide/feature_guide/eplb_swift_balancer.md rename examples/disaggregated_prefill_v1/mooncake_connector_store_deployment_guide.md => docs/source/user_guide/feature_guide/kv_pool_mooncake.md (63%) create mode 100644 tests/e2e/nightly/models/test_qwen3_235b_w8a8.py create mode 100644 tests/e2e/nightly/models/test_qwen3_30b_w8a8.py create mode 100644 tests/ut/distributed/mooncake/test_config_data.py create mode 100644 vllm_ascend/models/qwen3_next_mtp.py diff --git a/.github/workflows/_accuracy_test.yaml b/.github/workflows/_e2e_nightly_single_node_models.yaml similarity index 62% rename from .github/workflows/_accuracy_test.yaml rename to .github/workflows/_e2e_nightly_single_node_models.yaml index b9d155f231..b7d55945b0 100644 --- a/.github/workflows/_accuracy_test.yaml +++ b/.github/workflows/_e2e_nightly_single_node_models.yaml @@ -1,4 +1,21 @@ -name: 'accuracy test' +# +# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# This file is a part of the vllm-ascend project. +# + +name: 'e2e nightly models test' on: workflow_call: @@ -16,7 +33,7 @@ on: image: required: true type: string - model_name: + model_list: required: true type: string upload: @@ -24,38 +41,44 @@ on: type: boolean default: false -jobs: - accuracy_tests: +# Bash shells do not use ~/.profile or ~/.bashrc so these shells need to be explicitly +# declared as "shell: bash -el {0}" on steps that need to be properly activated. +# It's used to activate ascend-toolkit environment variables. +defaults: + run: + shell: bash -el {0} + +# only cancel in-progress runs of the same workflow +# and ignore the lint / 1 card / 2 cards / 4 cards test type +concurrency: + group: ${{ github.workflow }}-${{ github.ref }}-${{ inputs.runner }}-${{inputs.model_list}} + cancel-in-progress: true +jobs: + e2e-nightly: + name: ${{inputs.model_list}} accuracy test runs-on: ${{ inputs.runner }} - name: ${{ inputs.model_name }} accuracy container: image: swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/cann:8.3.rc1-910b-ubuntu22.04-py3.11 env: VLLM_USE_MODELSCOPE: True - # 1. If version specified (work_dispatch), do specified branch accuracy test - # 2. If no version (labeled PR), do accuracy test by default ref: - # The branch, tag or SHA to checkout. When checking out the repository that - # triggered a workflow, this defaults to the reference or SHA for that event. - # Otherwise, uses the default branch. GHA_VLLM_ASCEND_VERSION: ${{ inputs.vllm-ascend }} - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - - name: Set model name as output - id: set_output + - name: Check npu and CANN info run: | - echo "model_name=${{ inputs.model_name }}" >> $GITHUB_OUTPUT + npu-smi info + cat /usr/local/Ascend/ascend-toolkit/latest/"$(uname -i)"-linux/ascend_toolkit_install.info - name: Config mirrors run: | - sed -Ei 's@(ports|archive).ubuntu.com@cache-service.nginx-pypi-cache.svc.cluster.local:8081@g' /etc/apt/sources.list - pip config set global.index-url http://cache-service.nginx-pypi-cache.svc.cluster.local/pypi/simple - pip config set global.trusted-host cache-service.nginx-pypi-cache.svc.cluster.local + sed -i 's|ports.ubuntu.com|mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list + pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple apt-get update -y apt install git -y + git config --global url."https://gh-proxy.test.osinfra.cn/https://github.com/".insteadOf https://github.com/ + + - name: Checkout vllm-project/vllm-ascend repo + uses: actions/checkout@v4 - name: Install system dependencies run: | @@ -73,9 +96,16 @@ jobs: working-directory: ./vllm-empty run: | VLLM_TARGET_DEVICE=empty pip install -e . - + + - name: Install vllm-project/vllm-ascend + env: + PIP_EXTRA_INDEX_URL: https://mirrors.huaweicloud.com/ascend/repos/pypi + run: | + pip install -r requirements-dev.txt + pip install -v -e . + - name: Install Ascend toolkit & triton_ascend (for Qwen3-Next-80B-A3B-Instruct) - if: ${{ inputs.model_name == 'Qwen3-Next-80B-A3B-Instruct' }} + if: ${{ inputs.runner == 'linux-aarch64-a2-4' && contains(inputs.model_list, 'Qwen3-Next-80B-A3B-Instruct') }} shell: bash -l {0} run: | wget -q https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/Ascend-BiSheng-toolkit_aarch64.run -O /tmp/Ascend-BiSheng-toolkit_aarch64.run @@ -108,14 +138,6 @@ jobs: path: ./vllm-ascend ref: ${{ env.GHA_VLLM_ASCEND_VERSION }} - - name: Install vllm-project/vllm-ascend - working-directory: ./vllm-ascend - env: - PIP_EXTRA_INDEX_URL: https://mirrors.huaweicloud.com/ascend/repos/pypi - run: | - pip install -r requirements-dev.txt - pip install -v -e . - - name: Get vLLM commit hash and URL working-directory: ./vllm-empty run: | @@ -149,11 +171,12 @@ jobs: pip show vllm | grep "Version:" | awk '{print "GHA_VLLM_VERSION="$2}' | sed 's/+.*//' } >> "$GITHUB_ENV" - - name: Run accuracy test + - name: Run vllm-project/vllm-ascend accuracy test id: report env: VLLM_WORKER_MULTIPROC_METHOD: spawn VLLM_USE_MODELSCOPE: True + VLLM_CI_RUNNER: ${{ inputs.runner }} VLLM_VERSION: ${{ env.GHA_VLLM_VERSION }} VLLM_COMMIT: ${{ env.VLLM_COMMIT }} VLLM_ASCEND_VERSION: ${{ env.GHA_VLLM_ASCEND_VERSION || github.ref }} @@ -162,24 +185,44 @@ jobs: TORCH_VERSION: ${{ env.GHA_TORCH_VERSION }} TORCH_NPU_VERSION: ${{ env.GHA_TORCH_NPU_VERSION }} run: | - model_base_name=$(basename ${{ inputs.model_name }}) - markdown_name="${model_base_name}" - echo "markdown_name=$markdown_name" >> $GITHUB_OUTPUT mkdir -p ./benchmarks/accuracy - pytest -sv ./tests/e2e/models/test_lm_eval_correctness.py \ - --config ./tests/e2e/models/configs/${{ inputs.model_name }}.yaml + echo "Received model_list: ${{ inputs.model_list }}" + models=$(echo '${{ inputs.model_list }}' | jq -r '.[]') + any_failure=0 + for model in $models; do + echo "Running test for model: $model" + pytest -sv ./tests/e2e/models/test_lm_eval_correctness.py \ + --config "./tests/e2e/models/configs/${model}.yaml" || { + echo "Test failed for model: $model" + any_failure=1 + } + done + + if [ $any_failure -ne 0 ]; then + exit 1 + fi - name: Generate step summary if: ${{ always() }} run: | - cat ./benchmarks/accuracy/${{ steps.report.outputs.markdown_name }}.md >> $GITHUB_STEP_SUMMARY + models=$(echo '${{ inputs.model_list }}' | jq -r '.[]') + for model in $models; do + echo "Processing model: $model" + model_base_name=$(basename "$model") + cat ./benchmarks/accuracy/${model_base_name}.md >> $GITHUB_STEP_SUMMARY + done + + - name: Set artifact timestamp + id: ts + run: | + echo "artifact_ts=$(date -u +%Y%m%dT%H%M%SZ)" >> $GITHUB_OUTPUT - name: Upload Report if: ${{ inputs.upload == true }} uses: actions/upload-artifact@v5 with: - name: "report-${{ env.GHA_VLLM_ASCEND_VERSION }}-${{ steps.report.outputs.markdown_name }}" - path: ./benchmarks/accuracy/${{ steps.report.outputs.markdown_name }}.md + name: report-${{ env.GHA_VLLM_ASCEND_VERSION }}-${{ steps.ts.outputs.artifact_ts }} + path: ./benchmarks/accuracy/ if-no-files-found: warn retention-days: 90 - overwrite: true + overwrite: true \ No newline at end of file diff --git a/.github/workflows/_e2e_test.yaml b/.github/workflows/_e2e_test.yaml index 476948ba3b..6bbd4ba64f 100644 --- a/.github/workflows/_e2e_test.yaml +++ b/.github/workflows/_e2e_test.yaml @@ -182,7 +182,7 @@ jobs: pytest -sv tests/e2e/multicard/test_torchair_graph_mode.py pytest -sv tests/e2e/multicard/test_data_parallel.py pytest -sv tests/e2e/multicard/test_expert_parallel.py - # pytest -sv tests/e2e/multicard/test_external_launcher.py + pytest -sv tests/e2e/multicard/test_external_launcher.py pytest -sv tests/e2e/multicard/test_single_request_aclgraph.py pytest -sv tests/e2e/multicard/test_fused_moe_allgather_ep.py pytest -sv tests/e2e/multicard/test_ilama_lora_tp2.py diff --git a/.github/workflows/accuracy_test.yaml b/.github/workflows/accuracy_test.yaml deleted file mode 100644 index 7a1b5c398e..0000000000 --- a/.github/workflows/accuracy_test.yaml +++ /dev/null @@ -1,85 +0,0 @@ -# -# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# This file is a part of the vllm-ascend project. -# - -# This test will be triggered: -# - PR labeled with: 'accuracy-test' & 'ready-for-test' -name: ascend test / accuracy - -on: - pull_request: - branches: - - 'main' - - '*-dev' - types: [ labeled, synchronize ] - -# Bash shells do not use ~/.profile or ~/.bashrc so these shells need to be explicitly -# declared as "shell: bash -el {0}" on steps that need to be properly activated. -# It's used to activate ascend-toolkit environment variables. -defaults: - run: - shell: bash -el {0} - -# only cancel in-progress runs of the same workflow -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - run: - name: "" - strategy: - matrix: - # Only top series models should be listed in here - include: - - runner: a2-1 - model_name: Qwen3-8B - - runner: a2-1 - model_name: Qwen2.5-VL-7B-Instruct - # To do: This model has a bug that needs to be fixed and readded - # - runner: a2-1 - # model_name: Qwen2-Audio-7B-Instruct - - runner: a2-2 - model_name: Qwen3-30B-A3B - - runner: a2-2 - model_name: Qwen3-VL-30B-A3B-Instruct - - runner: a2-2 - model_name: DeepSeek-V2-Lite - - runner: a2-4 - model_name: Qwen3-Next-80B-A3B-Instruct - - runner: a2-1 - model_name: Qwen3-8B-W8A8 - - runner: a2-1 - model_name: Qwen3-VL-8B-Instruct - - runner: a2-1 - model_name: Qwen2.5-Omni-7B - - runner: a2-1 - model_name: Meta-Llama-3.1-8B-Instruct - - runner: a2-4 - model_name: Qwen3-30B-A3B-W8A8 - fail-fast: false - # test will be triggered when tag 'accuracy-test' & 'ready-for-test' - if: >- - ${{ - contains(github.event.pull_request.labels.*.name, 'accuracy-test') && - contains(github.event.pull_request.labels.*.name, 'ready-for-test') - }} - uses: ./.github/workflows/_accuracy_test.yaml - with: - vllm: v0.11.0 - runner: linux-aarch64-${{ matrix.runner }} - image: swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/cann:8.3.rc1-910b-ubuntu22.04-py3.11 - model_name: ${{ matrix.model_name }} diff --git a/.github/workflows/vllm_ascend_test_nightly_a2.yaml b/.github/workflows/vllm_ascend_test_nightly_a2.yaml index 19fc3b5dc9..72b97d419e 100644 --- a/.github/workflows/vllm_ascend_test_nightly_a2.yaml +++ b/.github/workflows/vllm_ascend_test_nightly_a2.yaml @@ -27,6 +27,7 @@ on: pull_request: branches: - 'main' + types: [ labeled, synchronize ] # Bash shells do not use ~/.profile or ~/.bashrc so these shells need to be explicitly # declared as "shell: bash -el {0}" on steps that need to be properly activated. @@ -88,3 +89,44 @@ jobs: config_file_path: ${{ matrix.test_config.config_file_path }} secrets: KUBECONFIG_B64: ${{ secrets.KUBECONFIG_A2_B64 }} + + single-node-accuracy-tests: + if: >- + ${{ + github.event_name == 'schedule' || + github.event_name == 'workflow_dispatch' || + ( + contains(github.event.pull_request.labels.*.name, 'accuracy-test') && + contains(github.event.pull_request.labels.*.name, 'ready-for-test') + ) + }} + strategy: + fail-fast: false + matrix: + test_config: + - os: linux-aarch64-a2-1 + model_list: + - Qwen3-8B + - Qwen2.5-VL-7B-Instruct + # TODO: This model has a bug that needs to be fixed and readded + # - Qwen2-Audio-7B-Instruct + - Qwen3-8B-W8A8 + - Qwen3-VL-8B-Instruct + - Qwen2.5-Omni-7B + - Meta-Llama-3.1-8B-Instruct + - os: linux-aarch64-a2-2 + model_list: + - Qwen3-30B-A3B + - Qwen3-VL-30B-A3B-Instruct + - DeepSeek-V2-Lite + - Qwen3-30B-A3B-W8A8 + - os: linux-aarch64-a2-4 + model_list: + - Qwen3-Next-80B-A3B-Instruct + uses: ./.github/workflows/_e2e_nightly_single_node_models.yaml + with: + vllm: v0.11.0 + runner: ${{ matrix.test_config.os }} + model_list: ${{ toJson(matrix.test_config.model_list) }} + image: swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/cann:8.2.rc1-910b-ubuntu22.04-py3.11 + upload: false diff --git a/.github/workflows/vllm_ascend_test_nightly_a3.yaml b/.github/workflows/vllm_ascend_test_nightly_a3.yaml index d880a8bffb..00e0565956 100644 --- a/.github/workflows/vllm_ascend_test_nightly_a3.yaml +++ b/.github/workflows/vllm_ascend_test_nightly_a3.yaml @@ -78,6 +78,12 @@ jobs: - name: qwq-32b-a3 os: linux-aarch64-a3-4 tests: tests/e2e/nightly/models/test_qwq_32b.py + - name: qwen3-30b-w8a8 + os: linux-aarch64-a3-2 + tests: tests/e2e/nightly/models/test_qwen3_30b_w8a8.py + - name: qwen3-235b-w8a8 + os: linux-aarch64-a3-16 + tests: tests/e2e/nightly/models/test_qwen3_235b_w8a8.py uses: ./.github/workflows/_e2e_nightly_single_node.yaml with: vllm: v0.11.0 @@ -119,3 +125,4 @@ jobs: config_file_path: ${{ matrix.test_config.config_file_path }} secrets: KUBECONFIG_B64: ${{ secrets.KUBECONFIG_B64 }} + \ No newline at end of file diff --git a/.github/workflows/vllm_ascend_test_models.yaml b/.github/workflows/vllm_ascend_test_report.yaml similarity index 88% rename from .github/workflows/vllm_ascend_test_models.yaml rename to .github/workflows/vllm_ascend_test_report.yaml index beba0e4464..0f7a06586e 100644 --- a/.github/workflows/vllm_ascend_test_models.yaml +++ b/.github/workflows/vllm_ascend_test_report.yaml @@ -20,18 +20,15 @@ # 2. pull_request change the related files # 3. workflow_dispatch with models input -name: ascend test / models +name: ascend test / accuracy report on: - schedule: - # Runs every 6 hours - - cron: '0 */6 * * *' pull_request: branches: - 'main' - '*-dev' paths: - - '.github/workflows/vllm_ascend_test_models.yaml' + - '.github/workflows/vllm_ascend_test_report.yaml' - 'tests/e2e/models/test_lm_eval_correctness.py' workflow_dispatch: inputs: @@ -60,27 +57,26 @@ concurrency: jobs: run: strategy: + fail-fast: false matrix: include: - - model_name: Qwen3-8B - runner: a2-1 - - model_name: Qwen2.5-VL-7B-Instruct - runner: a2-1 - - model_name: Qwen2-Audio-7B-Instruct - runner: a2-1 - - model_name: Qwen3-30B-A3B - runner: a2-2 - - model_name: Qwen3-VL-30B-A3B-Instruct - runner: a2-2 - - model_name: DeepSeek-V2-Lite - runner: a2-2 - fail-fast: false - uses: ./.github/workflows/_accuracy_test.yaml + - runner: linux-aarch64-a2-1 + model_list: + - Qwen3-8B + - Qwen2.5-VL-7B-Instruct + # TODO: This model has a bug that needs to be fixed and readded + # - Qwen2-Audio-7B-Instruct + - runner: linux-aarch64-a2-2 + model_list: + - Qwen3-30B-A3B + - Qwen3-VL-30B-A3B-Instruct + - DeepSeek-V2-Lite + uses: ./.github/workflows/_e2e_nightly_single_node_models.yaml with: vllm: v0.11.0 - runner: linux-aarch64-${{ matrix.runner }} + runner: ${{ matrix.runner }} image: swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/cann:8.3.rc1-910b-ubuntu22.04-py3.11 - model_name: ${{ matrix.model_name }} + model_list: ${{ toJson(matrix.model_list) }} upload: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.vllm-ascend-version == 'latest' }} create_pr: diff --git a/docs/source/assets/disaggregated_prefill_pull.png b/docs/source/assets/disaggregated_prefill_pull.png new file mode 100644 index 0000000000000000000000000000000000000000..4f01dafa5e46ac3222e221c29508362211a0e21e GIT binary patch literal 202324 zcmeFZhd z6eqIB_whP+uAYuMEfpKpo;`bLH8su~ z?Ab$(+Oy|?69qZ^m(YCU`|z*5UIywadtP>M%)%cB9F(<{_v|T8q~5e4gTE;~G%kDX z*>fxk`QP5eDdM#~dv@|Q&np}GTF!loN@eWZF5MaBx-qZt?11JzfzU}B+l~`yYDP3^ ziL!*~%lgf?)AaNhahdxC1X#rd$>aD0l9t^SHiCUd(-joN8pO18l}f9YHIy6cLN`9b~hLS$8+k=v0cR73WCs#!n9%lSz20r9b~pIqb5L zI1TEgra|A8S64ODltO}ClX}fxn3kUi-1;+MlM}Q|DQs3rp&Ci^0d*g~Ef9$uO|kRN z_J+If&#D2eP1G@FowGkH7bX&wrZY4Qa97^kxVHMcO|)q`!<}<`WoLU7AF@@t!}RsF zdyYFV9I-J?6D{1fFL-Bb#iNN96~40=vaLJl{QL#>=j%~USm5hG3C&{1kew|bR&m?& z@&nz-3X_axNeeaJ_%mSDlx$*oG&V^1N;S=p+q>&&XRckSdvwyc(kgiE)Xnd`A3Rjy z$=m;~HSRNJ`K!xCvieIe%pz3j6eLHrk_7r+U2DVk78zb7$QI;}hn&4RbI)DiAFE>) zHe+{?AI&++C>6XYf_*1I>c3fW@%RlgGqyV!mtw}+gT%}9FK zvHPhSt74!p%{kI~>~_Bji)AR#&2htU7k#n0P`ok{AeudtmQw8fqhAV3YnCB>9owU; zl(ka~KkA)43eJ^RTz&OJl9<=@CwPXv2RZH`&k(gZq{d>w*m<4Zy~cCUrL@*LZcJk* zc%kuN)Kpt4p`P09%|v!Y^Qi?@=?X2_!qGZyCA_O`%jSqTn5E-#abO}GfBlH80O zj!b{_OX&*HD+1D{w)1);#rfN7mV>2x^vTIv!+Emr0BIUi+v2 zO8!2pwiMwe<%7o_x}VTwx<(wwf$iRkCP)=z`Qsy4u!T9nW;|Vkj=AyU&kk+BGk$&v z-@@+xi3<&jc7C<%BmsmhpYf+};i~ET3sTp?%vEEVg>8MipPQ6o>bgvV$=I#TvXPrgu+POi+!hlr>t}0BX z@cP@?0z5dz-JR{lPNe|~l+3)s&gP{`eOSQ}UM z@QpSDjJW^OOdx}1F-2V0>hh5L1Ru@5xL@Diwc>EZR<4j<3RLx=ZR`ReCnR#DHXwC7 zOW&0%Xi0y&?4MUV$Q5`PE@K1sHZFduZ}P)~iM6<@@RoRXll7x`@?41+Jb7a)Eb%fG zDy`-FQjcyT_g%#H9`4$ny69B!)_O8FUiFad$Tq>=hu@ALDkl#p5FvzCbqupaIB-R;f^{H@7)=4v!pOH7Mn z?0o%>qK+nR&9$OkI`)U_NiF5RC#q3w+~nKftYzRFsc84Zw}*pPzD1~~z^c+YNrCFm zbY~7%HG~<{UYEY&WaMI%u3ug)bg9zeypHzh{w#0hlnU0tbmw18HN%tFqrAWOvQfv* z*KPbFj-CS3E%-<>-P%sY4WT)3Z8$=KuZU~5BpkaUmulhj%^UfBdEccg$ynO$vuj%` zqZgHONynMxGg3s{K=#;>l<$g1SgZo+A)J*OD0_TL69+Tuo!kODd=j*=Of>I*Y4J|X z=hrZsl2whb=ggN1hc6ys5jfE|lSu}cMmCPZtXpRBqlihSAK87ly3OBhWw2H@H;rP` zwp3BN3j1~eGM254l|&a0MtZk}`6(=I^%fk}gsj+B&6U?~o|RF^kumNb*$Vh~R2ZEi z2F%)j<_WT%JUIHhr?Xi*?U48k9%EUjNrQ@{IcmUD!)I&bSuk}m7iDT1xV|(iRgEN- zbgI8=*;*d*Sj9)NBWtPOTE0RfD3e%qeMq|I_ea4sUVLf&Hr3Ge*B1Q4Ghl=9ITGs= zA5Q1QQO8uYgQs#dB?KObCM^vq6Y`2*TQ`P^xea3sFrI@pQB#nd9;opV$74lPRAM<5 z{F|jDZ-lc$>N3p2lcz-%Th#~Wce5Wmdy7VCd({qty=GEn?gW8ibRg;*@BEnLUw@2=t*1cc>P>7c)*L-BXXlBb(C=$ z@$J$>LT;^$#ysda3NZTjsAO20G~e!?F^ib9b|`MRB3F#+!Xx%VZz^=9T4+@0#PA^G zP?Mu>x9=ZkvGlcZaxO+v)?rgvk@JP!fVMGPn1dx_G1sF)fi-48WYj*Qd|oaDju1Y{ zb~jKV1%|#z_PCMN&Be|M?Br~X?|kjbbOwYYP4iM8bJXzBN`OxkP5EOVw@$sr#gub_3hi`j1kZ%~c|J z_;}7qDQ_)JDKKZ>U(QH=llNT^7-%^@%!+a|uf@`G62R`F;q_rzcLy^hZ^<>&-?Y}(t2>u-6^6u4t za;+`piOaxCJOsr~Qne$^`gfC&VxacVC3P@NJ|Pap>t8FoiVO=e!7UK+li~M<+FAbJ zi~bx!vDOkHTt+p~DamVP%gd4~tV-p-R)hr4v-33-;3|HT_Yqe)DQs0IU$edb3b_@;(OkII{HW4DZx!KT_n*pd z+fK9C_iu1K^N)RCRPlwv(5}%vH(r_w-1B{ikS0^2#{|@ev4po!LWb|%Pd=G}l;KQG z$Wif!FZ5q=PxhCXhaLTU_nM{87xO*r#v1PY^PB|2*(cYa1d;L^~rh(do`3eMA*y_fxgM@!Pg0TWsaD_vsNCf+k zYlXHc#={1wizPd}tpbS=|2PMuKfgWfpb)ZY3?+jgXw)q;5CoeAPx}5I_dlQ1I5Bh- zf&l~vzSZt`my^=j(O|b>$4!t^H`)ygQ+V>&x!_G=D{>`b@YWxbe;jDhccfoIyl8hD zZANPRNZ*43DsdraXY+I8lhdvnuDQ_lT7k%OlrD zmY)kL!82Vz)7fnsLSNTQi?gJ={^tu6D9eDKRR);uLfxE&&lg3JZNu>lI}TM}r$MEK z>i%o}==qV_Ch(XH_X6ISPIm}r_?(dK4={2Z`ysonFy*dbgVO@6W;Y z)e|}K-RfNJC_{!Nh-pzsjmhl1vHX*rS;+LB`-!5*tZuI5IA$=!{{f9yP@uk6Ix!?Z(#etoUqqI_+i)Sve{tot z2Nbi&uLUC6v(EY)>M77}L+K|YNu&uW44*wmnzfzsZFTWs$I-eVQwRceAIK{_@Ysd- zJc;4_h6UlGb*m?Ko7AxXYz$H*;Z78#*V$sap?dRxE|rk+?1I?WV&pheEpAc5k8HQ? zw&<|&{V%ShV&hL;OqtAZ6?3m4qFH5cT!qr%-QEgV?WNt0q83`1*4a2HkCxC6c(p&m zW*O0-Ox4H28FZqR`y@K$88|~Wi3S1VcTXd|$)c5s#0@ayC@8feMJ(QizniZ+`}Z-R zYF~xYdr@1OHDT!3twCpDWTlrfx8}4`zkA8mX z1zc#ajiEjb3h9^rcHwZhQ~z}wh5FKWzupE`@ZQ00L-t|F4qE z&Ar2uNLF`;JnZz9motq2&7~EefkFu7INW3KrM1;SOJ97%0_5M;23~c?2WaQX>kyb@ zVjJhd@$d^xbaa4vIGI~#4s5ieMknd=?F(4lIDE(M;1kaNqJRKvmpQ^$-WF3nk#IJWIMLdwFLU6*ti)tJr4Ng!W#-OXFs0(SUGog55jvL{ zGnpml9TP$P<23s@9}kemA*2Lm6|+8hmb;WC`FjB|LXyxRNWRIA{lIXtlrRuD(< z5P0J|lZ$*~B7!ZO9`YMrsBaw&So!8-SMCFlf}r-$`Qr^UQA<9QX8k88kBhDSD6Mx^ zFi}a(30~)a>7_rwcO*D<-zgq(=(bo2_uDm-xn#$2D#(#bL*Eljqm(onydkV(qN-nQ z*P37h*@jemR*9YJ<1frS61+B*$`+!Y9MPF!oe)ICM{yyDM8bx~_M2D7lyM3x!wl48 z2RS^>4T}!4Aa1`>xU^$vm-A+^x89Vz~wmH={-lC7|)eJ$RvXi+V zMa~$+KQ58=si-iUZ9iqqZALFjVG6R(Gx5;yDn!zRQzI33e0^%cb%B?<%3aD`Hy|KJ zKri#MWpqkb!Z}ZdEu8=9D*IAep>?JT{WkBDHYO8u`bb)K<^ZcyYAsWtv~Hfw_DzMy z`nSKi*KdtSCop;(j$Rl%zyXz7aG=FzP<0{2_nYUFV}ru!ji8)VMe39BUb{JV#&GdkV{~7y^m!;wEiEY)0)L8mQQ0(R20`>WXT2HZLAG^&7i$ z(AMsH>Wzq{@n{}2hlh}4K7aOitn8_XC*kaINyTm814lc>Yg#HXC_*Fv5^|9j;^J(# zGH&GZvHJaiQ)yX=L}Na_FF(<7I@9HmZNj+}FDs(NWKD~sPzTS!V>k6=2$^2vGwkS$ z7qhk#o5BUZ1S9iJQwR7Uz|;N>c=E%K_57E=I1n20dk_BTdMK#FV0rkqhrF0sjII1e zS9ZX!qha;XHAd0%pqnFp#L7qmhPEv;9&MLW^l&i2?r!S`>KWPK4m&;5!e<@xWn8HN z3ADm=sOk2MROo`lKCDHEc|s{8zJ#xr{Y9B<+7sE$EWdL?u@lZrmgB~IZqF+tIS}2H z$Qd?;Y?j#OgSOYYq3J!+sd|7$%S4^83NSjhkki}^Q?(~u1y)8DEhUV(3|GX{OB}VW zc;>kb_cAL9ZpT|E-1iKBfg_#RpVnREKz>*w_eB3ew3L{0n2LPF1|+_gaY4S1`i}A| z!(Mi*%H28RX6v$i#p1<03Ip+|5gGCWjP7mxZk?k%4UY?NdS(ZOlE;KGPThi>Y|zlS zxm=|8e&X;M*Xl3!cz`xy?@&if7@fTKkSyX8?8;f@xE{yDY(rw{dC{YVuLIXD3K%bx z-WlonRHR$F$>^cy_-Wv_m7psNMLNe>XS@< zJOvBo(Y2riL#oNSusFH3ncPI@&8(-4ahW$iYkEpDl_u}ofwJaUQ_Fh7wskmVt_j~; z7WEW8$J23drc$nqJKo(4;?uc^BPJhYPz$GaZ52_Xs*0ylLGf|S6Y{FoZ5j`6ity<12#rfw-uQ5;3SAGCWD))^tVK+` zzqg20_l|QA(+B=={lUrkUa9yaWD+sejN^_=zB*(VR4AzKY%1ap;$EwB`kf4kJ`xkX zq@t^4M}<3ZcxnMP)29{PXxHPSz~p3zY4oyJ9;H)UJx;@=ShJL^FLbS$ELVL-yOV-m z<-|5`qS`!RoPrTuy17!n6JOuP#_rUssWPUL*65hBG|Yc9^#L}XvFzEayl#fTNXCap zX&Z^}>oQoTy`X?s8+Q0&lBOsSzmLzyJQ*reG-X+-xLMe^qzz?{vAFZ?iP#_-j*z7e z!r<7O-zyXwG6i39C`|@)C@*|`vY;!TWgTu8zLh>MTbi>o<+tFwCTUuuh&>;(6dqC| z$~f?-Iv%Z`9da5oHu~sk!t5Vkoi)Bt`Qb6~A4ya@aToBb)2oEl>CI*{z8WPh+2sB{ zygR+?;lo5lPH4|P&teZIXm?PC%i_esy{LI)C{T}X{F;2AD-uELO_r_}e)Is(tlu=@ z$ij%{A%WNKC9^JCLkxpQ&=R&`*10mmY%Y80L!lLO+9I`Z#)&H2hD~``Mlz+Cj34`Z zip>LwxcsMzUabVaxey4Co<;|Ux&-4;eTJe%n~*N?^cZSE|Yuga3uSMtepN<=U)2F*Iui74wj`^VHdxhu zLY>*ne>Y~s8N2MXETq7j&9_6VzPS^yiT=$2yRt^{`&2udimhxoubBBb zvoXH~R$RB)@i`mXBOMnHP#oA>qg!(+ffilok>LBis1PgiZq7iU(#w^|77I_+DYbJ& zEl!X7-k5pAhAKsqMy%0{==kt?-wR_Of1CwqV%#$|;V0usVzAQsOzv7~>LkALd!GK$ z%|p2g6g4rc8(LAx=m%5ymh>8Q9-(z~(#t;lD35|~d)#lEX1XVmJVtDG>Ee)xoJm|? zu_dRQ-m|CHRf=6a86LOmRXd$y?53zTaEEhcUOicz|8v>yM8pwSUH8SlCkr3m(?Bk6 zla$p+ROL=4s1g%Yj+2>HU%Ls|_Kv|YPs17%vv%_g^cPB8oHN(uLzpS2 z{gSCsX*c4?9`HZ>%u_QQ@Pam0Up{W+c+H8Ec1lkhI@86LVoG&2?$~1VM=f{w?`gCu zDy&?Vf6K8iqUq3CA@YOa$LA)r#MLn^OL_+c;_9rmGXo`=B5qXcHli(82_cWjMYN-u z_YL64DtZ?49(Sk=lqji~KFUUwrZ(IrwAy*dFUb3eUT(`wCt@+vTAxV(^W{=;V{LwfHr9+ezwp z79|uXiVoF3DRn|Y3H@b(V@+#q_~@+iYBHM6GrLp09W1=aW;gDnMzJ$~;Qv|*sN;fa z(Vn#aVP9yW?Hu*bQ(!qZT}@0d`U91h)Z*?vHjy7Pq3d zb6L;iojBfeO+Qa-2W@xqAosppO{7;bm6;mlpZPsVdFQ-m?IF1AL_Eq_%Oaxkw%kx3 zlbaY@gsnt`XTc2rxKuZF#;8N>edG8W-H}b$6+la`&iQp}qJkt4V|G1UF;Yvx0~2_z#Kz(^9{&IZxNaV;|TW5=aexq8!GIJ1bkrb11nFg(Uuw_6_-wega{(Xm_)<0+l^Z3dQ%< zki(qIK%;@;V9dG@MTexwQf~o9SNQ>!kL+xJrABN_MGM()k1)$G&sFH}tKIsNt-=wZ z9CuFTP9Ce*&i8@UY0Q;hMpf@HPhy(Wxll=5lI5l^j? zf&6SXld6GyJ{&bfr&-0UbuGkOe{rySUzx1snEy08fkAsgl` zX*v-qbuNY{eAS(5q=uPlyC(7k)i_50hvBTLt7&u9dCyJp z&q8#!Zak+#`MOgcF*P|EVFdp_O*-y)G$A?rBBxvLid5*<@fVaymL{B+^6wu%pdV)s z6vF5AT|Ns!%3+x<2LB+92;XgT>S2S1+%jE!{62Oy!|4&-MshW+=`XrGhtjZ7Cj>hm z`=VgTmBh+`GP&~7i~v;gD-JRH7x2VH2Tp5A9Ol^ zW{XJb9L+%|EUfeS^YRZr#(WGo_YAvpdcj8cP)u`N_H1{qx)7hCP>JAs{)9-)A8CgN z^~SO!oeTnl!v$H)J(RbfJ3poP#@gL%q#I7@>s5;i z71t%EXhx>jcC|+fa>v)oF)}~2^u$h|Jf8NcP*IS%`B8e2j4{3P{=Fl>y)!0~p%06_ zU{_m&_H;mCtk8?`3Aqhj&x<}|$I}{Pf2LhbCh_Dp1{N_PC_1v3m>=VWtRw^zjlp*qZ3h&Fv*dE`VH|%CDH2)rV$xwjT2_ zkH3G|snI=~C*pie;B!^+2SwJbedp%gf>B$-Z1}xo=lru1IThZzDkuDrFzq~%^D^dy zq~YQMTY=W2q#d)sx)Qo`%1NPCNkEi|D1Tg_zzCUfAYpToleo0^J+ok4e{`1!9 z64)2vC;S9%dkHR6+Lg-i3priWL6q;U)Ubs#GICS&iT?dY9? zg^o^En{$uLfju1Xr^W4)Z{3n5WHPAMEDe8YJ?i%xL=Y+4BCW@GWPj@)_ro++YI*Z- zTO>5TWqz*{>%S;ZVP#TfkNciSR!7MbV`xurFNS04jOdizq78jmiILl&AgFiPgkO$3 zH&N5nYc%V#*!czy0e4@q=C!aOJ{!iU@X(!z4JUlH-)o-5=SIBsjk)tAj zv$Q&{x}=?4TAcj*p5eyQT$9w(CmzAK5gO$oqow zsB9L)FaWHb${Q}5lN> zAxbId`Wyb(d;VJtz2!M)Z11$%)!SON4H$vyC!EGx_+@*qew>Q65!nyTGNpkkQ%U?@ zrCjw8Di_najC*&+m2xHc3Rs%Z2?5JqXl~CqgJFl!TIgXX=2@s0oSdKAj$!SH+l&L$ zn$&?lbA#fFI>a@AmKP5z>;H8I2=MpCOX8cNQ*PwCG!V4YaQ=)_D)%H`Ff^Vt{7yXj z+w#os82&fEtMpf0WlWZ5dkUAff3Ge2G4Ou#|M7x2pI6lQyDJZlM2qgdUq-9V_ZNvu z+cKjke?5~fOo#Cs~%0x=S{w?}qJ4pF0eijF}gEX(K+5L=N)n6#})X}>>GVa^9KWH@yva)J%99W7wGA9`uJnrpA9@) z*EB@aqxEl|Zm_l>qZ*78(1@Rn|J>`y`07yyzfYU+>*mfJdbV(;16NCe?lD)?0h8*Y z6>qdTv_=udLFR{5K%ZSOJ-`b%la)|{#P8UyaDpa%cEeS0@ffw>1P8QCd5+<#ZtU$I zUk>!Kg?CN|_B4|5wDKiLkTKWip!YMfOcp%rG&0q)^XXJr>6LR|K=HqlSieiS4>2c~ z?t9X<$k3VM=Q6d%35V}4@|Gr!B@;TLg+|!f?(Dn6(0R8rVkIhvkoGIKEVBN?MBa5u zQNNJ`a(>Sm-aL>BrQoNCWBt?O(9ml-yEt8O&1!5>ux?t)FFp=yV?@C-&FW+{@GOqe zo}QyrKnRILZ~u!qIx>Es;9xVez@w{*1cyzp#2vQzE<}~6<)!vSN_4r{=>njH`x_5z zkl_#Pv{$MW>F4Px<&Ign5#9Gu^r!KWQ#?2OlwPmhoz-gBd(g$cH{$vus+fH;Jc7QQ zCC?k)gr18GmFGG18QUlC5HoXt-r8R_=4V`zt@dH&O?z$Dm{}Y>3uBj`5-UGej%iad zcdu`DoE~2tdG)ocM(gad1)V1bt+{`NdxUv=0rEUrL>-cPpOJBglK1=iS&fL05$cis zBR*KlZKZfyrN;d^v^+wr#>y1;CO9Y24y4Wlo zoe%N6{uuUW%=^Z+S>IX&L1h)a=T-4f2oIHRf62DDH=X8!ECAxpy_4NdD;&Z8$3iss z4Zz5_j$J6bSc@&bnT}tiZDKZ~YH(&C>STutPG77v3OeQQE5r$FZnBe>1KsyK|ofNt1I>xbXQm!OjzGk6Cbt? zVnBd`BY%Mc3=ywkR+yy1F!Hf0L$XA``+aa5gVa z<2=Int4v2#;I_r*cL9HTH?h+p;=`vWrvLcy86mhZZ!x7KkLx^C5*=P{CC;rQpCi zP2=N}Hv0A>KZL6XPly~AcG}_@wrR@-OTbC=mu`^z;q*z}iN28|bRh^4RfnE-;m zjz~8UwHgQaE5-h0$M0zgKc-Tx61=uwJNF52+#0ljEA)o}-cD>`kIGGu6WHZsDgN_# z^2BBaxT10xc?9$|VxyJHn;_ruq_+a`&W#$#!)v_8cW`|xAdzAHEoOlJybf&;c`UL; zf5}98H+f(N;Tk~BhQIM5OlL?(CkvWHBDRS@oE<(pz}ie^7?F+m6y$ydE*+5@KH4$; z?0*i^T^F?21CbdX;P;LYBV$ls zT2|o`D!CygqZ5CEtNm-MO3g9drB?L|pno7_Trf}rSiB(eA0k2jizz}yOm0x{oYnzp zN;@SU9d+o~1$+zi@JYMG{efNH5(w{rxpFoNtKgnvyi_~(;=CQa9O^1ixrIQzGEK6HkKI_;?Bycjg5yWFl7 z=v3eiL5K!kapG^z1X>9Q80E%!piW3%y+ng@w0L{-uO#(<7W<<5s{kUq7&S@~FynNq zbRSjxKh~p_qgV?!s&X0d01l6|(>%k2pa_G-fIP4bg)B7vF9S@00`c7?`20qja1#h} z(n&jMjv)dMMIh;4w6k|>F7Os$l;nqR_GptfkOC#={oP1z0PZ7#m?v<|5UU>Ss(?6a zQ@B7X#fC6|mLUEj{o-Q;J2i{Qr~h+*PEZQMNfJ7;npTMui`u7(o&uObVs8;Ct(4z? zyh1BWt_qD&CeDKttaEoam3=chysr;c(;~PYNzU{iiKmIkuyy0NSkXEe(ni6d0dMoK zC6W{;oWP91&07 zIV|8zUBcN#YnKfE^-$^ClGaBR*?~x1QR2xM7S0E*H`PE5;1h5$_v$4szUVWE4>ZH^) z(CigVk$jFMg>>!*8iNHnkaYW)KT)Ky3ZA+h#_Hy#rpS7`r-mG_ox0by$cN;$mJ&YCVFo{;&4nHaxm zpa!c%s0=8D6{V^L*Ma@Q0u^ZXHM^)NZQsSD^jA0VE%1djlB)c#D_piyCc++}622ZE zW0mo+M)Wx@uBzqpFW>zt2DMCOH`=B-nqI2<;-#17M`wNo` zp6pTHCi1S=H!}}&Pv(XDXdNDsWC!X&9&gm51;`A*0}j+7F(rSu;=L1r10(1({ghj# zo^~JFS4TwH7~i_5$Gjc5Y=nu;r{@emg{UeaGE2$&Vah-7W`x7uo(1tWs1tsZ$`)hO zN=N-PF&mYl!rylea&Pc$e{4dn9jw zM&x;U{7EU}aQ0dGG;I6>9XbAguCDKRnhZeC7kNbZWEd zt%kh^|9vej;XfdTiULJUE7o%5*SCp>Mz2TwU0!(DLCm0p%oA7N`jw+oX(w=Y#k67q zs!;C>5b*NNG2+g>&;A(%jHBpT?Jn?4sBLY9vj}On?x17-_>UlB4`!3rYynVHYr@ znSoL(csvlB;Xx^J2vLh8(vcakyUi7lEJc8S35A$~;X~?0hDiSD=*7(%G}Dg?F)O6% zrDb(=XCnk2bI=W$8u&f=AwPr*^c-mt4mu#G@12C4Oiss{`OiQk0An+JAgmXb!o~sN zKDP43AtwURfN(G;x$P?;+`*=a+l^x1I2KIOjKK?I@`uuyUtqJr-s^INEXizZb z!3av@2V#(ZF}G!QT9%>MR^RqWGNNa_TMY##=>0)@fB&Cra4t7pMVyOgn2($zoE^fs zjxWfoo6zjn8DnoF@;yY2wHO6LQ$08(SnR(}=>fTh%T{*+ zmgkPzx2Gp7#ho;cyMaf8CT(I(55iFKNm4~qj0n#0usA3y@@^v7$`O*v@S0}YWXOLMo%ng@@dMjFpA9=U3<%r*d}(jfk_?UH2V zNr75l{&|J%5xPv=cfp#45#T@r+-_ok>g@aTQjz#x4LtgLcOdk95uhfWtiR;G`3o$} zj<^8QDasH~RGQbUKuk=`MF(%M5y}u59)zscksc5pYOlgIut@S(goDBGzK`MxW*vdX z2$?ll`1Ni+5>&b25KshbplicJ44{T5ynyK3u%L(ILL_Gxjr9k*ibu0S*0q3yqDK-A zk*%BoLh=yM@%Pm^DUqfjO)2OTB}H7-m+vZh5UGGy2b1EpaT z3{Eipf)d38_97f~B??;UlKjIEx$#hA(7qnNaS=&mn6>GQCMXkFUqT%5fRVM9m4|AB zrEk9%53(NfLPp`B*^@WF_2n8q>7ocFj8}MKW}e1kBUmh1MKEKb9Ptt}4c&qCO10zs95!&z|Fw(6bjm3r@L$YVvW zXQ6R*sl2gcIjpE)Nt`br;1+dC?+i4#>24u{TQF~8rF z7^|*9^+A(C_rWNngTUyPyO$@nr-}3Y@|g>R0S_u&m_hKJe&jG7ybkjak&shl5zQzb zgr0PWT6nk+Z0jc`q#~!HzM{V!nd?csgUAFq=@to{iLK+{RpPM~-^o&A{HO$t!?_#wGt2CQ-h zNW)E3DBX0iJA^?Y##hqnKl)#&BI9Me8Lmg9M|>w%H=eu19dZ(h#|>Oh9{Sx`AzQiW z`&@ToFyE7-u*FjwCW(xTJZrp$dt?rJh>T^Po7imd3tc3;j?5+zj&|^5>LO!VgKuyB zWUKz_{9I!iUd?P^NHE0cO1(S_gq z(s6d4;}#fA8MshFDfDtX-Gz+N z*-~6Q-sZDslq~S{VFP*{daW&Jh{WSgY+At33{4hN1iVGsdZQ;4hi4w?a>wlJ2gg1} zeTk76@aH>ItSw_LGHYePor5*q+1|1f&$(`Dlw!*kAr<4wILue4AD+va8nc4KDv;H# zGtMvWn|NDUvuEhu3mGP3Pr2gF(d*I~FZe4rIjXs@1iy5PP6F)0Wp;KySuKa|~Q(JiE zeQZoomx<8&RP85|zIQd|jfvg-MKh`Xj|#J=8Fw6UmeZ9+6elGGmt_j`pIU$S>u@{@ zDy@A^jFgmP2D&%do|ro`(#6Jiqa^*G7ot7yqvyTZxUHO*>_w%~ze`^J1hG>2!9eG@ zJ;l$vFDkkpi2vi{w6SE0tN-Bjm>BrkOShE2wqxmSPe8*?!OH1~j9G=Lt*M{_Vs_7G zhjSAW_=+b)S)yO}>f9=g)2u}PU zk)!3D@I_C_KM(UU>HZ%Ko@w`#>Mi86eu`!)xFkDvd;j=NK`m|XW_tDJqDwQfZI#9) z*Bi{pODq~5UMkUTczd8ktwHX9)@!Wfr5aAE9C@Yf41#~JzB;ao@3Yyy5|c(F!4mF< z_ecAyt<8iB%xz+X3oLB5_7r@x{dwDG#P9U-;I(gi3ao8vZzDr{I(<0#6)har!Q7nd zcGcdrl>FhhduaR8E_||o-kdVu7T=wAD{OGcZ-Kgr2) zbZT|B8eQa=(CThoy2!DUdC*if%1br9AhAmQ^IKW1iRQJ4MVcQeWs#JG;{}1XSq$iX zWrwJKJS`icbtx!|r20`*R;_K;6L~`JM@5-EwF}0Bb29L~9}%)>VvoM(%gIbXsOnl5 zPZPS4jy~J)(dQU19>Qf685LAVEx#$x`HQ*x4NcK9=;LUL6ctgdh{{V_j9Qs7O@+Zd zW16znk@xB}(>BK9c^{i13kE+gPIxbHGTcC=+!bG| z1${q<%}@G%Ud_F7czQ!adK&uoi?vB1A%%~_4o6Alt{#G6h0p0oeN;sG6r-Y_Xa2Y` znw*L#)TLb>siJ4uV3u*Iea5qW98*&t$7ZuG>Tgl;^6tqZxyjL3m!aQfksW((az3uu z>vE$@oSRNIO)lZM8f33O2)t*&ph6$cuaWkZhfX@JB`o`imXiwcJZ;phecGW&rRh7qDXTMcrKVHM<^(7nuq$f-5wXMA9{T{Z=H7!pw4h70TbqWJ2q2R}dA#FLnI%xa zWLBkRqptnj)?*F-aXM_?JJh@1M?l9I7y47QWjP0{-81F(L zwA2kP`WhnX3_;k`Ky%-s4LJB64+c5Q=_ZHzTD(}=n9bf|#VyP5UpHpELGk{%D zfnE?PKZ(Umq`SPN1Q;@YV2_r*A8>@E#p5hWT3o85h;}r-5v>ZFD60H?0a}50d&x#k~THkgdO{Ubj8Lq02Unr zWaLT>8krULOCb-#_T{NsEEW#-l7Q(ZL1~9_qBW5Yrjh`QhnszeVGx=ZW>ql*R6r3>B7^v( zn;9cFE4m*9wt&oWz`$4-IV}~iBqWgAcn)w2x}f!?y8~3D9V!GU7;;3hGszNP!N#0L zkUtpdnj0+F(S!*lXM`!oBE4Ld3e;OoFpEUmaHBrtl$@`^OA7#PMaCYHNnsK`U>qkU zjg;q~|6pOzt@l4W}e=9rzf*m(c@M<`S7O%n3Ua{GPNTv)vUT zQwMX<+LY@dlinU+?xeA}2&h(sktr%-x(!%24Wf zSVTrUu?)KM^pSL2oTW#hdWWGuY9;obulEMKc!Sg4r*Ou97*#w&o(WQV}x z18VFMbHKnn%B~@#AhsPphxAZk2k8~0&R6jpV#M|&5H<#@m> z_*6y-3YalUuHaQ04&gH}^eK~U-QPhffIU@7rP;kqVk(b}?BOFH zK0y%yQRX0|D*b-XGqK|?lD_Ejpm%|S0qMd&Q^dd!srYD~rEexOUxq|i&u^jySzoU{ z&~}bsJ8(>hMko3V08gZbwVMYXQ5$*xO3`J1Fji7{KoA0%(~=7L86TG@m-c+*Glo}9W5a+k|()!-c_qT}T&2yLrmS=zq>E2=>lUc+IK zldkbTUjokwN3afr%0>+BCWNy~kGp|cECU2G96=!yM6PYo_OFkb^Fzucy$T~eA9(}D zvd7h5Gr%{ukexx^;NSs9*y3>~I~sZU&bCYZIG`F7kSwCf$fjS{0TT5aj^+~09zKkC zeSqpXky2^)&F1LNHfo3H-=|j^|CA?D%Aq530U~`QL@;<;k2C4j7askW9g;(%;k_=> zCFXC=jFzOpytx#@VdyCI!5k{9HRJyH$^>%iFl6J!9k;+FCMu0#1Cz0k#e0R0Apn@$ zx0SZeD%HCb*)KnKu}C2hJv*jgd@)DoH*HVRo5M`= z0#(TZ5IZ=hwX~Pcs@A<95YUciaQYAZEr~5=mw<(@X-8u3pAN6el%Q-3f%Gy+VP`s+ zxeP(P>zdkA5v7x2E4!he-?Itk|6dqWtRGK=_KUM_}mbf)ty3SoN+Gg@G1#zohjVLN1rdiW*i#=a*qj# za>%&y0`REKMU9w_JOzODi0{OzAdAXWgKRGKx>jlWSHNNzhk$5dW&csl#NH$P8Nv40 z;`On(7LDoNnX9T3{*qGI#a>gry4BH2&?yR5Be5r0_^0FG+#;N$Ahe63fDJ|P=!JoD>*)~ucvKway z$enfwrekSigJY~~&~@2Oq~TfD;poSS4-s&;8Q$Y4Z}eM*;v<&F z^ynWob|9xWefp=y0t1m|{0H|wqkUrt;&$tRO&&X;18GPtTj$`E%ZUg>mlL#nm&XuP zI`cAH(=a~Iv<$|LfK$!-6>!`x^7EbwXr8hBpKynA`cGZHFDoH=zU-|AxNZ>YCCgmm zTSIohxZa2F^N{UFa$Y}3Y1ratbqyzmGuY+>oC8;w zu0T~_GL$Cm>U|xkM$3to=xk#tIkFd;L-6J?=c>RAsq1TAW)^K$6X}(k3NM~d<>+oh zBc0>tj0<`?7_&huJX&fX_$#1@X{Fs(@K=3h;7$S4N{AV8kdITMax+bU(;Xl_tycaQ zzMG#vduEx*jX2G6=()~rp1Vkh>y=6Dk|`9s5Wh$b=pHK5M`p)^lMS@du9>MaN}k!{ zfw(vKTKwV`j}oL_K9XtemKpUNnb>UgyXSQxevyZu*^!!wk`1{blJ9AGZTdEYjtn=c zPFYVvt8@NRg&w%q_uDN5guc~s)S>y$LqOyblwcMV3oY^$=bM+fKNVO2KZbuADsny{OmJpwUpDmsGDW@o>gn+> zdekmAp0q|thCKHobd(>==p8=n+|_oEozqQ5cI3C6@GrXt z$Bl`BG`8@<4x7rmPZNwgs&L5b zl?*1#WZVyQ6?s~ixtez-yTGt_@gMi+E^Bs#7pw9HZ8?I%wak`+^R%529sa{TzkOD% z_FdQ~zfa<&H)R0pRI?+eQxKw#GiKUH6=}d=syCCXac-W||Eec>q#V+FhUyNW67h}+}YJMT5BfDTn$F3T-EVRI0b*vN0DJgz_o|Fo_6;wpW`&@o)AXpxr3B`qbTs5>tb zdEZ^J^C!wS^QupMZF**r17|*^bMpI%s`}@O%{{``Fdm-utd)-SFj&1dBax`-&eB9H z#d09zKorf;X-uveRP!T+{ye66daapVadKlJZA>eVCLY~41ECuVE#wFI_t7nYK49QtNG_<)E{~udt9#7@^ z{rx6ojxuE`Lm4t=+GGgXW-=8*hDwHH8_KL?%1km3nYObfQPd_whK74I5d++=+8EO~p+k_cRF0O0ZFTaeD zlZ4fzSMYLIYF-s4&0%|g;|ZEkCJBq2l*My*-d3I@5gzv@pC`?gzR|y;LCi38&}h%Z zSLm3SGsg7wGM#t&QX*}JI?K}2rWwY<`aBQtkvb-7MM7Dn zi|1bS_Fv3ekzZd?Ix{)ytGIKbTPFDNH2ah6!PXVK>+FO)R6VV#n77A;?A9>Hdb_K4 zcgo1UB>8^Zyx|sA&2ZVq=((d#_D?Z45J!rX3z{j++%C55qGOi3K3wZQsXo+bpXkE- z_+F#r)3V)XS7R9+Ng<0|qwi^b<^Hlc=vdp7XI zupv!6D=Sy=P%8ZG%X=D+`a&k1?3OCid@L0eMOkuK_7|_{MrsD5VSLuOMS{mo~ykM$%XTE-0 z&DGgSKeWGBLCWcIdVg<*l+&m5+M+SB8>Md(ir@7@OXZbxvK*3r?y(D~AFst#T%% zL{D^Ayv23Mh_}f_w|I7VZhMMp(Y#8%xKT&k@K^X<=xMB=S)?)6=L_nL-fHnnw945k zeeyEd4t?@3h-U&U`ISYPa#^AcXd`2WUeO%-gcM&{aw2+d}(xF zi{9?e>hL&)-Wqc2gx<;(XY__boV9-+*XlpotXHM|KDpysNf_2ft8-;;j2_R&XF^72 zmxu`|!beDQZuE2dt(`O!Qq)|-=N`v0?PB2fP_I*1wVA;_s^?=QGEQ$4yg0)5g1s-J z{<+zB>jSlY@g(^xW<0%+CQZ}k1YV@lKE=^QuR#vqS= z|Fpn0N#+;r05hLSZ|AQP79Gb7=@}ol(%IuO(%3X(e!W4i)tVx<^YkV+Jh&ffX;j?X zBOj*fw|dAq<)o}6?;Uy$tGDMC3x_Vy8kz=sc?iiWAN&Z(yl$)~u7XjbxOL6JIqRjd zL$Z4Xu1hyIPM(0X5t02t!CoM|(kd@mH5&Z84(m{PuA<_YyR+BKYmQMKQtP@-1&+Rp zW6OJFBkuJ@?x)kL7rBYkkIl0lKVojK_E7zl@!Y}FY?o(RhM@p}T-xMZvt-dph??u8dE-r<=~hX-x( zqypE%9A`ooRp$|Sat{J;+lKE-k%*2vOIpZU9VI+FmdDZ{@R$bmg}&U0whp0p`#E=~ zg^6-CK*Xk0(lw3y+^S_BtFEbv9KXF@NRuE<^Y<#Undf{Nb~PVt?m81tOx@a5jgxXByVEUN~@^htc?pym@r zx6eeOCy&N#atF^}V#1`Cgq4T16%HO%S`xH8w_5x{qH*5>V;##J*Xv}leH`0?)c62{ zIb7O6*jPl`N^fI zLCGv#wxz~O!_}u(rSE76z|q6lmVme4IQ#vr5RZBFrMhO*JHw}S=bPLMni6*K{ptMa z9ACnqbfR#J^pQZW{fw~vqO_beDX?V#^(-~WDvcR}e~*)5F< zM>Tej)PwiLNhHEMDe(5`z&+eyrt$EzMAXSQ_I1_!l-*vEsIH&QiMN21aT8I?dvTMidrP~@KNlFakW>7=IvSwC^lkp zw_}GV-@K+w}qH|0`(S}@<(E(CN>|?V9u|7 z9AF~p*r?c1tVl}5EA^=|yfWT!9Jvy5Z^HkqA)M_ywj3}yeoVC7fbZ26;ltR`a?x4A z=Zco4@I9-j3@@U*U$f38p(UhZB`G*wV*Dja$-1v>h&PG1euLy0& z8tG=Q6MEe{a|h$`Vq)kVGw)x&A}*lgF2v!}?YTisPhkC3Xs_N^DaRqd(46Lr&qliv?v;LG zn{m^hrL6Vn!TNUuK#&T)tZKtY5*>v{{jNz3)OPtu-{-_1(oe})H zQm0wQ%u7AdPXF|odcDV(16MN+JR9oeB#?Y1qeVLjZ-Va6ywO{^0$+0f!7tIyg7leQ z2sDQ5Tf(uwWILZ(xPJZDo9k#tWBSZ^VbSL0zN{=#wY*7zkD5bSSHGYQD~FPLA7>5$ zr><3Cn=D7|N{~F@=B#(uw5&+pbMX0e`~#Jsk*IFMP*nHn&VMhu&aL4Gb1atvwO6)S zF75=ejL!G>&L&tcN$ITY9em$AoAsjdXkp5ac#p@#qZPl6^a?Q-tnTy4%sjhVAv<$j zv`auo{0eoRs{=7l2m7jPQ{{0*V|5*y>+L-R@zbYMw1Zu(I2JyS!QXMTC7@0|(TriwI{ar~}sN(wcW8FPc# z6J5dE9OIX(Z-;QmL@g;AuvavNB!upot6IuplS%j;Wc}p2J~Pesw56W`OlgTyDwv;1 zn%~vc9TT7hZJ*ClGs9OIeW)`6NvDTNqmR0X$`>c=|yTVz?Fl&Y9A7-gy1}WOu^7z&> zI%v9=+I)$_{bro%BF=AdoxgVd`^4?1zIIZSvF7zAZ?%<)by^p9_s%)*jDeJK?BSN( z-P_&6R-Qa>=_KT02#z*E@F*Ya(yW%2ROnYs$6t9SGnn#Yb7JX?WJW7aV=_FhHPxlU z$W%MlZd*FalVGGMEK98h3g445ERTzfTDmw$x&4Bn#jRSmJz`g0)l_a*EVp$-Cx-dr zl5gMg-~*c*e%}bpqpa);W);&5SC#?|L<9E|m{pEi?rbt&{23E18n{C!`jv0>wwvJl zcKu~PCW1Bk1`{)t#>SBa8rA!z_XMN*o~2GYKr1TFMS4o`)M2+2nq4N78+P2jST1~9 z(NNBTT`{NlD-BFa+;j5T3>S7P|KX*nH2$X`t|0;^ zBD6GACX{U-t(WPTJfoIWQ%I)v%)9PpfkGz&rfnHhE2bPT{j}g89_77@+f)^O`E7`k zYsc`f#6$=^uj0WG0j!&ZTKK!-V}j#rOaA*vzIhM2BQ;J}hhODrI-KKKpKVITrrjl( z(q)+S4AaW4L#;r`X~B@Xs{o_vx1`u9`C5i9yMnC>?GX<>>7xmKiNp=A<~4zA%fc!< z;=53HskW@f;m=8y9#St(><-NfgUVH=6MHA#ckZy*XQb>2z6$RNsgp`$r7}p}dS0`~ zB>Gn_qhAc$<+}8KLFL0b_{mV;aia^n&KPAfhRqF)xBg@GuxT}-G|>|^aGF$Hrpov4 zV`Drg!gR>vRBwojxWWSo>LmGZNA9Uf#wYI8%X=VE#Fw`xIQP2XQ`X$QgPt*GCo4AP zHP-TLZ2rx#|Dv07GB|>fDJIVL<8d|GobD19*6t$0(_a53mogW>+u<9uhp;;mYWHv7 z{S}!nS|FA7J-++6sI&#=m5_;*;~VWuoSSt_=6&k!*I8er%2rM&SWByQ2;>GpUs_ed zFODJ6;XA;b?zM^Y)kFlJT12@@~e}30F#Xi4~?F&E&qVcjb(@GPMWOF3i%oNZXMMepIJS z(=(05h}A<^e(v%c$L%$IV&He?sX2pO$?C(&@Z6E0)$jNNx4sl*``Hw8zkf8(c>S3S zDFu74Fpl4{aEkO?3ewz}9geRRQ#l`rd?DdirUt7+5A8hf0nICA3UJ%Y$QwI=(7lpyce*syih zdhV|~YZuwR8GS!-3y>E;)v?$)p)$Lbk#QF=ws}2f|PWngOq<1@w*hi(3)Q*rO zqmqVQmp$jJW%{-Ks2+SqG)GUG6gt!Z~bw^2xN~d|yjle({W1 z(sJ!N8(Q{0g`-Z_<16u+mJNK5KO_!JYq_`=%E&=sdj47mMR!H>@FJ&X$Fr{ORq`t_ z-B^KLu*ZKN8=4GQ0)0U1WMZbR-?&GB@wNPQ5fgUzVPl_JHv9{5mu>gN#tQvOOfjy* z^LyTyro+_k*}I@yZt>i!w4KIx-;;tuu8Sk<-_MWbuF^a#41>fAHS;dj*Y z`}qEhhp7|Lj6Fm%fDc>fQ(Jek^T#iq6tl@?DHRtp&1EST7t_iWKYJ;cG`o4a_Gp0Y zR~4aQtyvfL-??Y*{YvQ0=?)#VE*oq2*Sp=aV6^yb5wFv-M3TS9koqdc^F~GFrPN9` znOW%&v$(w>d%9j|!#h$@USC6cwRt*gk}|9sKk;%GsR@%O{%>-#!^)@g+ZUmYoVs#j zdcbdtv!z;Izv}EHKGs2*FkkgpsEf33Aj^Nl?IJ7b9a~l#G5nFPdcb~-tb)z&yu*eI zN4EBk_D$n^)U1L2Lh|qQrcs%^;qfT}{|wapZRVE6(O7{LGp+(U(a!r_GIWC}R{pH< z7u4vx9KCWZQav;H2{rDcUEZEw#`Gh&zG#pP?idFGOxAWd&6rlu=2)+8ChsW?lc*#s zEstllH4_(Q#+1ge>CvuH$rDu>URluu*t@`{nHqH36Y-pWKZkg%%6A;Cke9AQZw zqRbCW3VL9H=9&~-@TpFreorv66?(q9KpNH7_-1eSa-+6)MU17!q+l#NYJ{+u9W_eW zl@m2aV8~%#kP>BXHNh!Xd1x$~3L0v7u;*SEPA;~W&keMuSTU7$9Muk6sl8?+lExZ6 zO*mIJbEiA|SK5SV9t)oZ<^ro`k`bn${metv1d_hWmaQdIwb(xGnB;NE&yV%4@tX10 zOJVj8r3wulZuKw_jaWG`>!Nm{X|j^dTX=x6=Y%%}JX3|bu=$LY>=ns2-bQIsFm^Y? zp#q}K-fS((fVbl>1(})pk}U!n7@}5g3>#$Aof2_dwM%Mqe2Xh^hznm&w1$HvAC(`PHlES((+?v6pEsbPA^z za~NtQo8B#&KZyI4(WN%GF*f`mZ5-PqBR2Rd>%bhX`Z4RQr%?ue`&u^@fEE3qd7XOD z-APo3J)80u_q&W!!$Y?P^Gz5|(l-~k(I~yFPJiU0unV+ls*JI(ViLt;+Vj$0dD)V} z5;kw_#M*Nfc(Dnom!B`&b%E9IRNs7Nn}d!r_N&%|$X97yY*ud>wvAjJO*!~d*?6!C znok0-nUYblyo!A@%rqZ3O@{lKijV4q`D0}zlf-zgojAi|e)*#^_3XY+a~piF$96d; zv+v%&d#{OZh4q&@LtLDVJwh|Y?C`Yut4dHBmCCJus_#E<+lfQW~o(eApoNg~R+IWPLs9x6h<` z3N0MT_V>y4Sr`)72-x@?aNg^-;C^i{W!p-=?mLT-5AVKlVMNC((i|p*huj$A2AJJ-ladR*`$8u*xqdP&By--yNgrOO zW2@gMPb6hFnGWYiw(4^$wI0`wjWANu>(n6X<=vlfv7ExlL_K8sB-kUFO-~`^$MuBH zFRFM`gAU4bwkoW6Fn(9z?M~&+pKAMVm##I}D0k#$4yZibAaQV*&&<%3C>4BYK~MLn0fi@?ya$d?PfuzKewh(M}c8veTIb9d-CI4xBBLwz>X3RkE5^1N?I}{KCySeBrwvrbx-y5h_2cNIR zm)sw3-T%tVbd^%C?i;b<%0|P1C;X3tSMm4*5+<|fB1JEx6)?OOq#K}s!SDkUre<^> zwo;A!Is`I+&C6V*)8?fS%2aBiZhOIXo5QnGDvYD;P?+lg!R~kQv1~)qYX?SQ_n1Rx z%J-Y@cs6NcsU@lo=eo_R|?>Z;fUE=SQFK^u)_KxABM{aXgZi|OWxphGbbD&^C(8I`e zYa#eIM{$c?%UQ`Dt=Y?d?0qY#n_q?W%Hrpk1&1Ub?+d;^XDUdq^FB23u%1dw`HJ+k zNnFk*Q{`NhRoO;%A%z>2!TBm{Yr(J2cJ|5{Npqh(c=3(U&PYaF#y<-#7nygI=Y6F6 z6|UFD>q)kHzS(>4y~X|R+lj(4k#?Pju&+wz{9g!%3a$R&dw6x!vTu+_Mq^*I6SXXR zn}PK#bP9y&OiU#6GlU2mIS$u_2L4HgYbi*S{F=)h)FIxX`c^I4>5jIxakG8o$pY!m zJYD%!p{%5^7^kfGBN~laOL|6YaAN=7oOlz!s`~At4ItYA9oW_Qkd3A=8qqTsjws*4 z5+#aE)%SCR4joilJ!hpJmTA9p(V70UYvZ+EuTQ67?9bAr2j)Ir&va#TH2c@jvVC}2 zvo>1D+Z>Di0rRF0=*`{g*^Iw>nI`owVj7;Nw4zy@*_-0~*`-;kukhH;csoY1;nST; zSC&AS#X5U;)^`!Q9O_8}bhGZ*nTohJ)S%NkM3A=?mK&hZ9TX?a6*f<38+%Fy?`&h< z3y*s0U3&A`hEPu{$9}{UQ=E6c^yRqKtE9>x31hxXx&$*MiO~k&-H>bzaS&Z@&LO1qS(VCc648*`oMIrUE+>K+@ zvtKWL8e1X`7uQlrbAa2&7EOmM+6owCu@w_QP+dy~WCI)l7rMXhj5%jkm8jtDKp2W5 z^ZlPlv*R0J?B}tLk_m|9kYun#U_i5GJe*^q@-EEEdgok~Ir+64%`Jhm14y?fK4Rcg zSxY~()}%UJ-8qimz{;ZEb_q+FOV{EZ4qAA2vFt; z{8*j`c`I#I(G34VIeO)dG2$YakabdIGh70d!iG}YPbUy2-KxK5Dgd|83yr;T*Kn4` zj4kGyCY6}rRB=1E07DM4-9kUx%hD^SJe9eW1Bfn+|9p5oIE>j|Y;Fb-4w}d{*6;^l zFRfhVd3flIayCeceqbJ{)X^y)fJTg&Z41dV?)BZjw_sQasd~S_0jh+|dr++A+~75n z#(x+F^%lsbacx|z^)M)hhFpa$0kZ=NjvWW`wO<=1;&6}Y=+B3DJln|QC;XJiy#@nJ zlklmhK^Kj_NJ&C-1s*#KHinI{3_b?AX@Lrl`5TG#5-n1{fB_1C8JpmpLW8&^*TOk0 zFT!p841NePG+Z9#Bk=upFoKLOTlkf<>E8Xck)sFp9-xm2lh^M3h#08Npr}8B^!MAE z#-xIw=HynPVIqhW>*AC7WlsP7z-ypEw1XqexC8Srigz?c!~zMaOZJf59RPYf4!T&f z7MmJ*CUB;M2MAJgl!D0y`O#k`=2cxGzKaC4TDdWC37Iv(cpp+5m+W$JrDc~E3_3~+ zor#VNnUe^=BGi$6T85Z*-?^y{cy$;U2rRgdht*7y_DW1&FefqRlv zat7`$=zM%&4B?OT7uF#=T9rPX<;X7yrXJD1XSxKB_BlM-cStXI(3pOpILA*IOd|+7 z5`N-jlM=GpS9l%WC&;RE}qC-==A74K#g~Tr0b^BBk21Ej~qm2;4jnJ}59J7%^j1~x3TuASZ zd`upT!s`Wo7ch*?3`C4{@-kSN>7+r@$!%rw^fY)8pgWaB7C<9;;*%qwr{VhD052@u z+yBo)1oeUO10;|m+e0=SRJi$-4NdBzpIhh-ER$4)_IF5a`~UoYFbaX`09vxe6-1Cb zaKGhX-d28T>vjl<4Elh_P%`}A3&o*6E?RPHc4oe&;4lf%mOyG8t$2=lk_Iy7YJ>H589FQOu4UGTa*9z_?P!K>SUMPGa%Prsf<;}Pe zaByPmyYK^S{g_YpB`6G24v24&fdo zKtGLQpQ#3tY{$Vd9D9K*(!?Xz4FL3j7?{oN+I8rMDdaLreg)u$!@m`eB;`|Rjv72YE)de*m6$zp2kLT!p~^35@&`(RkyJeJHQ?jC zM9U$QiqO7)qWXLRKjFjwEFyN zZw$%cn}UCDjiSK}cAf*jgJG}$DTzCR0heGFi7eQVa2P2;(YVKBMpra>V4#6q5w@Wh z=nOHO9LM23CeD|=L<2hBmLRZ4H~=dxX=X*^cGdVUgniQ$uj`kQuha>>(lJ>NPK@QC z*2Xv|&&MO0gP8em(6@zyUitVoVn{&5rzFpta!;dKGbD(n{iqBx9g8r;PvUMtlU|6~ zG%1+xyD}37IJj_F8sZSFPFJiift;2Z#!0va6MT``?V~64>}UpH5w12IHg9Evx*r`< z-Jxj9i8Ho;Qw2GPq9WW>J7A@a!*|oely>~q4w60NfzybHTJErp+ioH$zxUg;k^4sF zUM$E&Wm0iXnmPo0FD|BGaP~ydVWat`Lnrjo7|@RC4!bo>{S$etpesENY&$x{f^bEH zSco)31gvK>GB*(eCqDNNS5hCaw|zhgJER{U607-~?+&xs;WT2zW6xn9RLKIFeNn6Z zD-ehpxatXc!URAVm^erwqmCjZBaHwpzUXt1cFR0BF_14hPo^2jHg$V^E4*(Y{4XMZ zm_46(PHNK)ezK9(yP^*>!}EDXgDE?wWo%Sp*F)Ar*TdKEtw*oNxB9={db9O*tHZy` zKUevHGOO}YWdUVz*uTR;5c zcE?e~-9uOkgoEN1=RD1+&Z(Q#ApacGiW$Lt$L!R@7zOf6YMo4)ysv>5^70(^*!B=H z;hgnXdvJG+MgGFmODkMmfp(Jp4=aAKY{i=x=_HltQ_Jx}9^y zyAQ(P9y>(>lh+R3mBQ58&Ag?f!^i+Xe=;gH!NvM(ZrLr#nntP{o~`Qjo? zjN$-ph)4+IXri>vGyUdebl}Q554b>0R24KMTEL*i2ZU!@9B*zew`*=n;8C|9cX@St zcOICbA8 zyy!pNab8Ju^nh^1ber?Y2K|`NEYZ>H=@Dq#6hAsOXos0kErxbL93C&glSwBWZ}Jivs?4gz!ZY&jZ~$r;2nECn{3-pkseY@{SC0A znlhfledmwi4V|df89Uo^w!Nh~)l(wx(Cn42?tNNW&*X0%X2U5Lw>Qe?j3qNXu>A~l z5}`2FeX0SoOP?BVQI3n|8JbY-_pwn7u7~))QoWe|^v2BSY=CmW0)xBEgIRHHV~11! zv+Ix8GkL;@I~O(A>c=96vyxFE=WW=u!xZZs1s|U4x0hUoqE#-2Q~nf7iJ-X=@gwAR z_KYsbfq35E6=pSY|DqHlgf;BR&abTqH1%W8)~HiaZctHbZnf~2Iw4Y01)Lz$%Qs+a z7a>+Tzb`R$o!y)!RpX5>cheMbJe|&cf{ui}-gcj!e>pun9PYfiuEqUxF|cLbm^x@< zWMV6Np~8McJyI{tiuU$~&D2)nf_A+WGBeuXs)0STZNXHHM(tR=3sneZ)J6{fHJwqX zxwY8^mDq+xEnYQtbvkWh8JG|VXwz9kMF1#sSu0StL`HS|u9Md1>I{6QC(1`P@HSXZZoftnTjHD#(f7fB2kdb>UxNH@p10_;bC6 zy)Jv+`D?tlOqBmLJ71Xb+hFY5qXMzQZ!YdA&ih^WeLMPOyH1}kbvZ@3pnqc+{?gaq z`;Wkn13!-bcu^_-6u2sjo|YdkY`l=x5sppP5&h=il&dTHE&6LJp(yY{Lniio(K|Ev zqtdtE)7T5|i_=`5nYm2*j1OIAnnK|tj>H56OS=u1O;+&Om#96a6Uo8=1_k6sPty3_$ z%f<3tT5z_E(lwD+Z`qHMT8F=iRO(L#Ud$B@RF>t~5{cIGjNt;-WyP)xR*h;KP9;CD z$D@x8->%oSy3M6cZ0Lw`d2)g#G0rgVedUl+>||O>oJ$X=sF*cFc0?(|l9aha|= zzLUdRlZxr=v-<3_J^h&CUUPrJDzEzK;Y_)%*MtfK##m2pkX$-twK3#yX*dT2Pkk#&wGo!Vp6W5b!6#FH>l)1YS`jLqABtT#{t;%N ztS1|sO6!t8BEAvF=oLbRpkge=#Ac2|<3^?OVV-^SQWay01>gEU>IFD1e|mjOaI`3( zZTZt1cD_=XrVPHAlBtRz$D!HiHoP57@_NJDRchGoU1WOn?QFKb_t zi#UO0n~4XGkYr@dIqrI6>7m-ih#CY*wUr0p?It9L-1{@-skPmbgEL0{AIo(wrSp_AXsT2G<07SC0fT7q*uiKkrfy*E|dR7<}U2x&x!wk1mc4q5JqZ`YbGT7g}iObNEv#&(v)~GeH z{}|b~(|tZBVGy_DLwKy_r`VA&%djsVd4AFh2OC>&wFRraX?Yt@;8bFRt@PJ`Vviywl%e9%*&TQN{X7SvbDzz$ zA(xAv>kF6Wy^97B_OBf3{ZLd|bmS>cjs&0-AlON2UF7 z5&7vZLJ=Pdlc5n%`}b2yyph4mquybb?@T%($h|#U_S&*Oz_(ViP=J$98pa=u=c|em z?rOYFw~3Z0OtL9}2O@k)a`@q7C$xlM_W9}G-kEl9_zJ24H&? z2H3+PPA;-9rA4F3JMPSP6DFZ(qXl4u9}$q0O)x%uVICB#!;p<_c!p#lG}@oX)d(iU z1qnqzC7{a_$^p0n)GgFD=Cc5-fK|dGv`r{cIo2F;P@H+CueP2VDxLLu|45sifGShP80qCJvxQifbPvogsUv2 zJR7OWUkZX48nx{AyI~CNH|v+>@bZPirX{Zk_3+Xyn7uCyKTAw>#)opCIvJYps6R`7 zWR?w-$3M>j+|DozxD#S=XWFBGkj~w$I$`d<1+@kxLM-pNce1OJ5Gn*(_pEM^o=X65 zt+&dV+k;_8LSqnFf8+omj*YxS(4iK)hVcBPHhF&_$J+tGJ8bX;z}*>#N{<#g6Yh`q z1dgDdBk(mj(X1l!&z?Sb!7R-qN@F0sW**`=9r<_#GUCVUF$rW-WSaX&p{5xJ76UE7 zAzmLffc0<52kRede&|!9_0NmO6bi#ht2(&>%i5^sMwC()=BeoGP-@ddyDYX(1PFuP zBhpvk_H8ahgDV17Yn#gj7PjtDO*Q(ac+`wQWBrKeybX!dW-~lE?KNX7PX~C|bmTr= z;v3WzB*Qo6$Y5p$@|tU4P8ZcZxC? zb!8DTNER53Pk9b3d;{Qm6SNh$xnaZ?vj+u2%fqBZ4p?*X+1CrnOEZGo?;o@(} z3+}TO-e3BK;!a=?7!f)*$tqa`dw34eRH;C-3<1Co!h$VZtM5Z`vn(++VQd^V>q^qS=js3Yo^37a(0YxQ7rWYLr(qqPMTZXOLJ46+teS#eCfdca< z-V=5+cu4ropd%JU)22;a?#^fPD93C0R-ha&^zwv8 z6}0LqF;C84j zF`qY;DJ-4hyo{GJ25P;gG>e(ryB(8<@E8 z$fPuhjh22^N9iGrH1HKBZ(xusi}*N^FOP(ZB4ywl5Wf#WyVU$qyR|D*<4cJ5C?%3` z>g8w6E3*ydL~zyZuL${%(RTsBj!WRoJ_`}rZmoDUQy2hf>_3k!{Y^97P6IjXM_3l} z({e5!T1Dxh|H386VDf^0?^H#7B&|ty5Zpk#^YnUs3Y#{tF%2#`UN@BUCWqB)1=uKJmOvWPY7E=fL;A^O1 zo)smMfA=nhjj_9E&oq8^pM}bC{4*31?}g8?a?ohq03y2G#^N}fsJlffZJ2$6AV1fv zkA5@<0&2N5@JtOMSP5!6l>sdl^B!zBk}@I?1_}}VZPsz3pRc*SPlYX#cUa3r0*%}j z>ZGzz2^0b;oy(cIF#556rfeR-HSGX>__^JGbg-7PNbr;yz!=Wua;STCLh> zeV_~T!|3Qxh@i@D&3k&0H};OZv1lkKz`beFK`xt0PyiyYzy0nYqE!#UAp*j&G-`Bj zHbd@AP=kyYirMV{RR9?m{e&*}M87IJjYJ?Kb#S6^Va|$v45l5p`eCUq4BwBI)Pa6< zCWC_I2sBnutGe|a`(_Zfy|JDMePmL3>~+X3Of4wo*ycLdtQ&%UH9^(^XN}}a>rZ9K zj$TRf`QV9Zc&}pS?G*twgG}4w`}Ryz%-H9pdou#DioKZDW64owpBg=0(t)}J!MDCb zP%U!#GEuMjuyyqs{NSK8CI6e``v+coaE!^- z2c0-HwI}kqP#hr*z&L0RY)TjnhCvJsi%9!X14~C#01bBQ-D|B8MJJ==P?mu7A~@kk za6~Ck`{Sd_pWArvb#pB6e&B4H91rb@lDLVp=yCN=kq;zb^Q1+U;}9$?XZX5SIxR@s zClFV*I2?{dPX=QMjf@`UPb$)Wp>qS+pbrn;e61b_V*i%F4LEM_a+Zdbega-7kt)Gi3`opm~EzDwPJu^VSTb)&|vS`Zl9SZghjFbYRJ)#0QV?b z7KGh?pDQgic4k9v25E0ZxD!dDDus5fbCoc&8p-I2Ju}ID{cw#JWa0m<@JI z8Q!K?=}y7E5d%0R|GPXYKHJq))WF8t3_BS4+ankHS<$auR*EjpfJ+BVQwoo9lk%Y?Ns`@t|hk}Up zzj7$pKFE$qvAm*(f)Jy+>cH}`f+05gF5V!dS;(5diITKtZRWeb*3pp`P~Dt~h|ED* zs5o`3odf8<7bZq$RubFDlVS+8x&y+TTPQjh1k5VCI2>D z01bL+zV3bi?VVOX4p5a)!iko%m6wIH0i`^Pm=&8p1sZNgq_I+O+Db5L;@*ouM?TOH z$W{X$86lCWITkq@pbpHRo0y1LQs}s^C{qA`tNa3-ojYH1N^~FKwAH^8$8~x@cmj`o zvW612G8OmN$5Y8xOa}#IPKxmEz_EHLA^@VSg$$PTwyJdfa0>CW$X4st8NfiFCP)1J zP_cZ!NGXT*`@b==DK;5pz?PSKdB;WSzS!O zQ5MeQMkm|xa0nZg$fp?&fXjXb1K_E1AX$=+)i2n>{=C_{8Qdh2vRotve!Yat1pON- zIH}af;RJr_^?kI>^eX5^hd#>z{@6D&SR}G6=dRLU#J{=x&pI44CaPqanz32yOKiY< z*~T4$surue1y=2i-RNu()r8x2uska1qs9?=-l5o?T>4S1*dX|OQ6~SPcDS8nyAG!+ z!FIexlZA>cY++R)G4>8SAAyG%<`5@;0?BS*DSpA}y<7SWU4l3hql)i{uyrsuy#3hC5n6_`K%1fwKfoK6tVj- z)O-g|Ma6Q2qWT9l25X_?1Zz`x5|Us#*sCni0d*4*H>s8?X6+otS?9MFHTc7-&r=U7 z2&Zp=^mh@%ch|xD_1{gD(}p{GV-6o_w?Gcd%EYU-iHnXoz9$fy4#$t>zFheH9hM^u z&O}eqa>$AxkpGXfsg*NqzQ3TY7Y;E@0olVZVNV~F2RKNp4yOtga;_-+N9?h6s{tC4R8~RIDRkUKRqZ z0WwPfhxq}~&H@D#6kF}*^CGy}L4v@d8Li{&144m==(6^M?I8KnGu2h!+xg$3Icyq)cMmC*2E_0cY4;u=XuUs~Y?r9B} zaa4&78lys)OM_gP@pn7`TR}ME-i@L&6Dw7*+y>-th(N^g95Ms+m6BHe$dRLoMw}Me zQGkC1`)f!Z?2pNVvcw(QBw-L_tIxML>MImHK^i%vLD=Sj*CIKLV|xJrI3UJCX@eFL zAFz+N!nT<$s|E@ZY}L>#WQ6qEWbww zf3CeaiEqj_BMd44rMT4%$URmfARv}F1F7(4?I_T)))mo2i*yp8P}?;2_vzEq++~Lh z&oq!+J7{ennG80b8*Qr3Wv_n>MZRd>F98C32%#Of$N6O#LpcEvP7T;2I%xViD!Nh0 z_AU)Wf20Sp=5kn&VIbzh7^ng*vhX*Dn-?^VZKU1_g%Xs=O(uR=oQ|>XcPUiT!@=Rt zECP0*Lhbua;O>TYAXzMK_1nL42=;}HZJ~s4ZZcPih|@irNnAsga^zC#M4vS!?w;2mcYy-=lvp)iqK8Hxvl}|?1lboI5?Y`*yb^K0zr+34Z`89uDvm6VH$phNI4^^Z?%>*qcxr+sx zd0Gk{yoqq!JJ6vK+|Qd-Lc`OBTJXpmgN$IDNi*Qlga$n7L9!Zv*~=o-0<#UkS?(`I zY5EfPJ(oNHXGS3Y^50m7OvJ#H54w6C=vt|v{BWEi0r%MkEfIY7+-O5EBKbFgy5qh| za-nd`uU#fXbx>N7NnoG)ixcijcp=}gS-Oyf)@6tTWc>>ZySd!iqyZ92>bt#@Oj8wy ziQty-w?9Hfiib1|c)R8Hq$s2Z>u;Hk4n4@FP>B`_?IV=%k%d_Z+^mwtPa-#2V+y3r z{fnW?0IL$cF{pY6X{2n~zZJ@LZ~{S=C4k|$91`7cEI_N>DeL!7lo~Dmb0}bjDU}%i z2^ywN@Ys&VLER0~vV$#M{>Fg}Bj1sc!`Q1k<8UCcw9P{hX^I7bLYqr2TK&3W)%<^N zSC0vD|0>E%hMw;*+^`b1KIR~FssWWB+|;6zb8FAmAgC=I%3E)-$Nmag%NJB&9J*L|J`i3< zCDftGLw+x4FM*^sb}O#X5`js3iVbtYhZ0mhe=ChcZpMg`RmQ%2^=y){OazJkvVMQ4#7_@6$r^`v7*mH&r%^d5Wn8e>uI5vKjitUXU>KLpVfq3B2+w{E zloI-YyHoa4lb=UJIagyGjd1k>h{MO`R+xbJvAB$vd~q1wvbASrZ3fh`pCz?-C#lH> zd&0?-n{uf35H+B%eSz3pb$_(S=B(h+1U0sG3eSQ*|LV-x!(?1-kpJ*`nT91nemuwyS{}HT zA><=-6TIr#lnXr%)_s;=M|Jp8FtoHgsxrhrgXdgS*gTUf>Vq28X>R0vw@5pmy62WB zrJUeHYrhps?)ijPSEm^&jVR2ZWJ_yJ;0PW# z{gBlwLm?_iZM|UQdpzjIyZMZs7fFm=4EvX| zJKEWPsRX1V2qP|7;cI!bv*@1ApvHQwx9f}qF{}AgZ%X~wzvD)~Kox{q=k@7@!mTo0 z(4E#9ltP-azmDdNwoDuqs z)5vomYrFB8ZeK!*(vc0^+!+`iN{$KdbQX-+sfL!yM0M0Vm{sd+mePLj0w;$ReQdLl zP*8rw#KHagf$bT_$_<(RZNa9sPrT^dpRZ)8j4AbCw+!cjrVx;jy}aleVYxrDit z)#B4*$2C~}en_`$p$MHf{9g2s!Q?_;!5RcCDmW^=zT+-DiIR1fj>T#m-6~x1fMC|C zX?o8u_&uUwec?X?ie_RKcta@hO=SF~C$lPrQsF`9e6yXE=(Lg7>u!+{nat#Z|?~rZamQF{m(SHkg z_!ACfkjt3q2e>cQSvj$58cSL7>YLVhD%AR=lN~Sy2`bdqgI}&<;4!H#JiJRU%)INZ zj34dV{GfRFQ4zn+h6g7*9^sjs$*fg=J>1fySzW}cvSAx_gXYFm@^#~15>=)(-L|WT z?KG%qO!#bzuf1Z5M*R`RR?6YIqVCOvHMjG}!#FzaD%HUT`YdtZrNUG^W} zUv3Pcz!h>WyoG*<)vO@5lXdwB3s>+^HyR!37ENj~ZE$k7{l5A2@8#(ECbAr##vkj=&H(fNye{k zKADe9oQV_qhw^SzUMt#?-bpZ9&V##O@ZC}UX#OXiOzy^2w%>oE=Kqhf*$v@1<;kH* ze}TsiCNS~Wr(@LN$hZXRwYK0VhCHXkI9wa>6bEv|XUC8h;I~o1nAI}k120_0-do}> z^gOgcwQzxZKcoBMeP*X@vsK7!XM=SWpXy7=P-Cw!M%EUgX1zrT#cHXQWXQo!d;7VS2ed)xwN+ z2AoHt>#u~

QezpAOt{Ic;BPDIsH!aVUb|##Sc@;A1wXqTnYdhhIT&hOM|~JZ-$j zNk8PQjLMp5q|_ed8g_4%e5e%+;NdAN;hGA;<6*>_#TSsdAJ)8i{o0!Er+`s(Y#)Bd z*CxTSM_j-Q* ztJVkWRV*$!de8PCo%Me47_7yo=W8TO-q161zuw$EI-(nLV?;pBeSp`F`pJZJaxFcU zh^8dV`t}W&@&6yP-aH(t_Ye3l``%zMmh9QezGv)9)~toHWGSSEAsPENWZy#(5lP9~ zV&4rFrl?S|%!rDNv1ALs`{?_5e$RD1e|2>^oOAB;-tYVUdM9fBdh+OKYq3-N7Xu3q zJAMgrvEWPlBDvNb*VQZWg@FOAB`1APYJ#|{f`9Y77IAkM?B97j3;^ei`=ft=pzZXY z<65xec5$Y3TK3%8qHZxvix&N)3t+d>+Z^$VMce#v7p^|wTCqLbP zG@$^xXAol6NYnP+9hnNNfq1_Ibt?(C>*c#=*hUuL;w33Y8PAqvXqn96?x!Psq@biu zEg-S@-9}~`*hh5NZJN?DIiB=$Okx$z+f7OR6gs8Psfu9EvE((BPua?*5R6xT+&t&! zgzo3qWKh*{%f;u%(jRwgUVO5U7Ll`#0&P9S=2u{MpnlfgJ|@Q_0)&`Ji`PDXvbHE7 zgMf5^7zsjIa*^S=(|VN2!#nC(1YtuHiia00PT1Nv=~{1tlWYu74gZFFOv#Pu8MJsZ14j;yy_b;;-L&vv1W=NhRv zlRD%pdTViK6thpYqFy6OH%<42eqoYy@q)o(i2sN*s&{W_Cx8ELL~b!@Co%>uM&2)= zb@dULD{?Z}ZUjdNa3vLi%5GN85q=lw7JeM-Ip~}5YCkpdefin4Vn}^)RpBIQ!c}5p zeec$#**(p8d8B|5Xz_B3km#u8J5biAKWx^K7MMZx9wWT zY5HEZVn?UKt>p2c4ue_!uB^*r5mlwDTF?Z%4%P6Qo!lHH7s^t@h}94_ER06m_(|Y!O3RLD>4v9cGqM21D-TWb)PVI!9L`7Zw z)7Vo{Dg0=QQY*)j@dwF-jRm~ltXjs{>D%B?Z;I;U4aDQYu08AU6A}~^{Z7u<&|$KA z8VDtCf$x8#z+eu$bsL;3OVYgdlhxch!zB(JN*cNjYA5~}u!|HACd7K)-IDY^c@nF^c){yz>?poFx#AwGAiAJ&F&%x0!Y4 z1O~bYS{MdD=5g;lzjv9ZTSq>9KW|;BKhjF=njLLt@9q9|%BTz=S?7~c&v?jL<FLP zf0N-$kt{~5Ibe=v(I>ep-1YJ5J^pXd;N|e==EQ;ZM?c=+=@2gA^#)n{BXWicy+bsW8ERSALyvAAT5(u%{AXd;`Vwy#H%Lo_>icb zmATdw7}hsYX;flhlL`>3Zv5=X621-g&CG1m$umW@DH3%gP_U@L$KngDGVsywO9F3c z2~+=V-{7vm&CG?Y-QSr)=<0g!gA!=&{>tpj2G4@9A{tzVTjSuw7d}HXadv(aCkSqn3fB-pyZ|fzO*E?#6A!DBpZb&F7QG z7fAx=TGI9?qH-cUK^{w1Zj9ofeDLyHet>US4Iu)W_9rRl(Rt+-^GWfET5sgQZo19F zE_NL6VLjw2aM$h|U6^_a+GUn+z}xhHPA~@=0E9FKb?KYlSPuB9=B#oZ6V_PXuPw>d zQ$sfXCMiCF`aNl4Tmn^AZ`ISM8~Rsc%f#E;3f|r+bVq%ZK$2hEZ*G0gR}T3$<+M;r zlU8^k5^PMaq$~zZr?kYfg<=BY&Rbv6x2)~;(#@FowAm(G=51IS8((bya)fFI4G)xn zl}ncLP&{%@?tkr0#FgOJK!lbt#!q&9ZyOKNl`>Fej;b8kD!~On}3Ts0u$sIdy`hJFF z7dVT=m-%iOH1+WG9#UP)#_dK>nxy2#t?8I`>VEggJ3F`DwSUICT!y12^6-sQfp+VC z9TxSe!Xjw|?g7ae*^R%~w9g01aWU>!{TR_Q%DA2UVp-ojsON`Ia*7f|OVPT+UOF-3 z7<@Nh&+JP(IHSQB1lzo5d{}+8AUa+YA*j@eb!2xIO4(P5)p5Zb9 zXGM^@l{6z>a6ucjovU2x)XKoL3GHF?$&GK(lDBkwtRRLnF3P*5=CY6#8)=kQZ{RP^&i^L{& z_jEDWA*Tq}Waq@Wl<`x4RpU$C$!%(;37rGMe8*X9BQB6mjZ1`=l<#*;U{>HS)Lk&r z;R0(sO)RE+3Z3}9F8sv!r0h0ocK@M6|3MGYN=cT#73^9sT4WW@RC-Vfg`B;;!|(8I zYv_w{_vPJiIz_b_dYGDvx=O*JruIR1WM4>hAPc@3@i?H!pZ|)0+YFvE9gZruO5Ms| zqNwpwQ<>C?Ksn?C3qEqe=LJ2!aNPV(deDxOnrh%O8RL}0dIKCn33v6EA+~jC@{Uve zE8BvIu3wLAq?^sdt}4IL@>t?tGB;d|PRgM+x)>cYC%+l{{c#TO8PwV49TO2~7m_n9 z&^p(=^PPosKMyK*Ju|vvmUkXqwpS7V(opWZ&!(_Tr`=x4rHk&x@sHSWjheyzn7bkH4QXc`^hamdq5i zFQeMGK{u~B|F~&t(cpxWk;&>#AUmEptl53<%WROf=REE~r|y&7$FxMTlEsm>jg1Z0 zPr`HI)8Q*h3=|=yB~>>h4C+pNQf9W{N7ZSwrIOo5wG6!jKq!Foaed1nEBRtg?S`3d z+*WEtax9%;Ux0|-58dH-&Gw-X7kIKnZaH0+BV#umHC_%!3@t?I<8Ae-8Ww!xf7a>g+#5i04SPx*^xBt;u)S2guv zI4!)EKIzIx#WEM)x&BSR0vidAr;=-t0rQ*to!xGcl=8W`loFbp_tMX;UH$zFxY(Xu4 zmwQ%yy@?MWt_|<3RktLcd_x}by~6Vtd})&ZikYEv%?XLy>-$9|C$?{3}RVM{1Kmb4|2_SSPoaw}wYQt<)J_<6|ujTX)6qDw8Z z${(AhEDi_o6Y0K)KZl@@oUmTC@zH#6`RtZ1V{Oc;075{7|D5?jmk=n_Qjt`_w;B( zQ^%H+ICPqm%El3zO!vOt=hZ|lG^rDCMX|;%u*HNs{6RmBG77aqrELlp{T~0`+4@MC z-(1^VKa}F=yWZ0LrqbWCzEICz>Wo!a6-gr14p#e2JStr$k^eQE3&T9MO%87Msc_JV ziH@?2516(`nS*&!TPOZ%sn$D*u0?Y7O6Z+DNXv{a-2SUcNxizky{67_Zo%pnp?(Kt z*3NNnSGXkI+u2%B@R*qh@AlL-A2+o@V%<~2o{)C)lk~6@Qh%WTs>pxb#HjsD}-eptmq~)~Itk^07`VK4sAWeTTmRu7^34B>^85!Zv9acZkoo~jzk70P$fG7*{OF5f_A4i6MtQy1qGHLm1QJ9U}O9x~HdSt%M8r3<6xWXX* z>XDUklSN&DU*Sl1cqjT`Hl+S$cBqT(c7?do9_H-A=!Qd)_%?5mh^=+OIL?Ff`R^qe zDb=eVf0d7S5!b$%XO0YVbSx$d(c;rsxv+&H_{fp^Hreo~n(JqIj93CrTG?;+;JJNp zGmo~^boCDIDU}nndS@ye+RpZG6&Q)fgeNQcr|GyA;frxQB@drt4!-}z2eW46aAg5q zpb#BdM6!r*htxLHJfx(Vp-wHTI2Qr9*iOT@uKq3B$|0eR#QldamSo29?=07be+DBh zw-~9TdX9AvLspbJPN*HOha+MNY7{Y=%jZ(51UCeo=7LuL?y$V;yfYeUOjKS!Tqn(L z#+(=NA>VrJE_%ioBY+t;C*$@C1P=MuofaFk*^hlVUcT)-7$iPJLaKD)H%+Z~!*|q6 zPLN!iNuvo9`8>az`kNE+Mf=4!M}yeM3zjoBcTlnf&GW~7BaDhnt{*gI5L`;uDHfeq zr2iGR;P4;Ehjc+ZijO`xKjv%7s5IJ|Qh4#P+?8Zr)J2;U=Al_!m1hVG3A~6%Bl!os z$_$0yjhOs%-U%o6^Am^akzkM(rt#eInD?=TeM+0Q&C`ohKdhn`ap@Gc^=kK%o6YGj zoauRwS#DF^{nD{g24Af9PIQ`mW)$ zApWM+;t#wTye=sB<~fUgZ!Jqyb;u3={Ic-o!s`p47AO?kNk8{XCx(Xo?{gLE3HN6r z;l9RLCwP{P@m;j)Q=FpkDXHuL~yao0G*^;Z+3>_~RB$=!)M&&9=1o$ z1f-XFceH#+`s>Wl_LoJ7zFm>Mn0Lk(sa2b?Ravv{qwnb4)V}-h~mRQ?K9xGL(BJu9gjN7(}Q5>t8Tw7-*nfwax_A0O(xG)ps&ExKR z{YfB)noP(^4!(Sg)CJ&AZ;zH;y*6)F_6 z={O-lP1Yb#zB`Rq6$v+!+%6~R)gLn>(!8vJMHm06+SV1yp)k=(gVWJ9g)HJS7jr+a z8{1gt7mB_j(tyeAN}8PQTg}{NhE_a*REuMK)sV4~NpzaweZ9ehabozn6P9Tf6&K40 z7;s{u9C_YtC$Q|P2D8`H+gAcI@UN<1Bg9p1PFF5Np5zmMBb)t2g>IROXm4x4hnv(F zI>*xFW_-R+Hzss>naJA3@*9npKdf-_==6wk-#ul-70sW9w2vI|j&9-X@LKUgHH>wb z)jw2wslR*1miTgv@m?8I=jEC%YOVakl6FP(>*ni+hpua_#YPpLrWJ{v$wMZ>Wuk#_ z9&0;K^JE6*1uGurQ=+z>#tFiTK7#j5iM!6Vo8hGW>c)}j0j&Y`uak>yuH!FjdZ$bF zxj%bdT8f;BONv0`$;DnVAf7cq>LI$jqK(;jrMq64{ia7&I%XKU!@4~OaFJLFj-`cz zI+DB%j2f#dz2)8Z0A?1C3#q7=PCbPPG-fyY#jw`ovC%~Y`f1}vFlzDn2Gbss){!x# zL9Qnn6N>`1tvpq^uVI+e+?=8~(_*@RgMEv{s60x=C#MvrajQ)8EmuRz+RvE2{oLIc z?R(ejM6A>vE--)TLD*ND4H#>JKMyZ?- ziI3XVI+@jry?bj(aH2yYZ?;3HuR)lwtg%JoBM|ahl>O=NFXuTXt?e?U(u`ed+}LH@ z)fNqkvE%du-K~XqV=j(!XXLNVouwZ*J>|t$8KX#vr|jr$7Un#c+S8QBu;h5v|E42@ z;^?P(nMhYyR_`AGAy+e^vu<&7Nq_h*Fh>43NlR!{mtd68wJr`?^xFjbmyjIQW7v+v=k#Fu7|nb>k%FwftcH zE!yyeto--kf0FR439ltG9Yodhi7e`cP2HlhoZ)g3U$N}9dv-bYy{~lXWN0`vv@X*o z(1{K5i4UY?dhEeNUk47A%ar2fu;R}QWAm%`RqicD(cv`3@EaMwQ#LhR(#S20luWY- zp2VkgDcHl#!>vQM!&eEqSYHYvkCDY`9;Q;cJ7_HxJ?!d9-1%MA^jGz_99~(}*b94P z8@6E`K4$LIki3z$WI~@>LRM0T`Q39fE0i^-G)M?fxj!PXhLR$#Utjw=y<)qRulAcd zo}dv=l@Cec%VS`c&rD!t@+jw4czy!L*DRCBR-fxrC5IOI_5wAxFNS_}OULebj>Cxh z_*vl<0N)GH^lQG&D&*07Ig*O;GAEJ$@H{mu(?<_EHx^=<^WrNOo-H*RtJJ`}>j?=G zI;N7vS#2{-Tk5amq{o{1U$b)Uv6RpnoL_633(*s;=nhL+6zeouaArXg6a9&kZ`V(U zv)s4NjaujPsL`JpzzfH9JhXJ6jkC6%s9Cr}iHyK5kL`6PFlerj9aU}jH8-pF@Hj^2p3O?mZn z>lA#h2tTsZ>=(CGgtn!g8cmVtL}WvYs9xeIEncA5?N3{FlhCLkkW-zhy29L;@;+uE z1JFB*D&956Wpl4~kHX@O2IY9--b_qVrO4eq&u7RQcjq4WxrqUXsoBXSYXs$U4piS0 zQ#-m&k2NgU$*7&E8%Rwcoo~#h>VD@Gsw)+XyeP2drre+09e&j<${D9Q5a%{b#iFH3 zxdm!lOU%-@UGymB)b9>IEIGqQEhHkdfl@x19dmq0_{NP8gFH2c$8tJ?Jq6KRw;P1# zN}FQ31@y&mXCpoOhvg|wrJOxmn8ioT{PaQa`^AFDq11K6wz?`JclAc!#Jzm)tM2q4 z6fqGVat)F5Z2>Z@Vyhded)Z4;=Epsv-T!mxNy&EJD-iRj0beoE|8{K}MoR7|p~^J3ZJ z-a0!IPhE}QY)Y$pa+buqz#n})>w8G#>SbXzCU6FCW0xhF%}@o}~-xlUa{qGRt~$yF29YkD>ksQJ6e zOHQ??B&xkQNGKeW!TtVNL}$McHmpZjy-CbLg zK`CPv3!AC}RZb+kDqHy%Nzw+32QmRXpkF!yW@W9T*12jwaHl;uaj2Y5fh!!-*=6PaJ{opHBP@K7)+ zYAX{FoT6cl;~Kabr0QKC^L3N~OQ4#Lc=fFbp;&e1hpHGE(NfKC_>{zud5|C=Ic9Dv{k9{3&H}Z1tj{z^bn{RCJ@ebPMEhFx7 znUn3&t2yH>hHnIw{y4VtDXIkstTKI9?|b=X|4x>yxvM7)a#|eOxE4F(m`3|~V7GZI z#E6u+1D}JeAYO+G!4N-_neR=F2W3S-S31;GA=#-H+MtnCe zEOC*sWDe0)FRECUuqoq^u!SHEdpht4yy|X@QBond#^6G|F*)qiSNsLm`TLCdWc&!$ zPZtAq=O$w&9!N#xKG5Ig7NYSw))x>qJwRbJF;I2AL?=7~E$x`gzwb`{BqW0I{DI}oKBdqDV=?;O$h*k{aW{qZ&D`%3IwG_L4&2uhLQK@7J&3n=A!6w$H zxS;FuFiY`z>0BQ1n)a}*wo1VOmOvv+Lt+qne{J^K;l25U94q$loJ>-mutMq85=XAi?sb9V-Jged`lE5X_xcvckKF|4f#^Km zdqmtGA>vfz-@6@ibCutYkFV<0`%}6oQKeCBM89|^l8%{t(KR;LmGeO?nBG@5kzFQoqFyL+;Yz`^$@kF(43Pu-u^%#?v|AaJyR8yKx+A(G^3OaSel?(x z=m|^>`=yx$y6AA^1(~@zN)@QFs8A334QJQ8KvS28{D|i zKk>mFMvlcFmMOoQUexI6J6`-@;U<1PH&}|qQ~cenoNv$gtlsoJG;=&`H}|0N2i?+% z8Vj2Q@<`+6+VsM<`V6OT4=JQ$4P|JRTr!uy-O*f(an)(c;Nw~>2^tgFNKt0`^+#qg ztnXBuFSec+f3vvXfciVNhpK_-lG$$25`7HS9cp1_#DE*j>%j*7o(D|?&d#7E;5Yij z{ARlGZRu5Rg625I(tKOJ#qNENtF8#`I#&Y^$1kdiyJJxwU+HYuJ)zX_a#}L*#4a+a z`5`G`YBwDJlmt6rN=9LoO_e|VYUoF-5kCC|aa#cjBb%3EdqOY9?+q!&6T5X|(T&db z$ajfT4fSX6<96JqCuDJ6oI)Jhg;-d{i3L{`{yFtn{bJZpL9~ee><6ECG@<~gk)G%M z_x>aEST9zbzF3nv27z=~abcQ4nCQuxDxP<*W)>02)_p)_cGKghpPZWQ`&{?Fa;q!Z zZ$u}OMFy%OZiFGk3tlKR4-kvqi0s|57J}=;*oHKeH{NTH6IJwNL&ff0yv0h)uaFcr zH28a&i7M|>qE@LN!9%$xLRp9^!#IWQiYxI6Yn%Bn#%(davs}ANC$2QX<7(_1(b05v zV##-tg~MurS{Ll;R3pSWSc!w`zTu2~k`dEv!E^kL{i3gJskTCHS^j<5o<{P%PPFE^ zl1*<~6_-Vj+VVAI_5FC=*f+PG*lA9fDqg%&XRzuP<%2+Bw7;Si)Wx6MZ|}*y)a8$g1O{UCjTDm3)5YH_tw{o?1Ubp;xpd(87(XE4oWmz=L%FAK6dcn`YJ44ZF5Q zh0|K0Wn!k|e)G#$P{G~beSgaS=f`+Si5(7AV|xA0fSVPKZG({nz73~Z5iu=uCci^i^-`d(w6(d(&Q11!>ju}XD3<4Z7|NO z>Wil)181&qPoE=`CS+|4??oGEzlRhh5I*V4$E;uI+C7LOO0dr6WQn+)7iH5gJd<;6^(y6 zgkB#RXpZw3SjC5=?nOt;7qFU4dWGEvZtou<$jSCSR*s`qd)}$G0%!*L z@-7v6u+M%s=iud1a@4@0`eacIT8zN;j!@v9bq)mb%mkXB>zIVq=_@EHgR{m{gRrS` zF_fJ5wPWJIp@=FmC`$Gej$C(`C@-cL!b>3qJ!)?#5`Su~hbF~x-0oqqWQqkF*Ei6{vx-ou4)Sn8X3U>6^Rblj=a z8a*pF=W!XqE4Lu3Sa6R^%VihA;gB#Bh{XJmpGz0-LWml(_Z*W`dU2&7X$&JzPR+=P zG`Mo5m5+-(Xe6aVTEQ)j{`qgk&hA8Gcw$9ovTuEzZ2fEPh{W#b^>Z`v;|5a>vONeI z`P48P_ONx83R%`Cdg$0&O7*(QT0P!~Q1*m0-~0Omb?$nXj4>;$3_}wJ+JeIV7MG&x zJLJAZuO|*DEgtV>j7Uk09*|4WWRfbs-L;+Q-_w9_nxKiLqF|!wZE}c6Oj0!Vw}tBd5Zw!l=4`klG6&9>$2*8Q?yMa|xe{WL=H-Yl-3;+-NJnmg{Xp?&f1ZMnfUr-m)Br?^)LKWthe zjD3W-VM8;}70B5uMWQS2Pi4nwKW2-a@GCZX(R3oad#ySpZ@JwSKGFJMy}MrCrT(C< zgK(d?^HzgH_s)t&?5>+rlANN<15 z!biGKaa*aSJ-#vIa{6F{`PBnrlljz@8O#>%?VFDSBOMbuP7gN5eGhRt#fujshC7+b z-x%lf15@BhC-o3<3e&U5v6<96+^y_8+$CPK9Y!DJ#U)bn)mG?)r~;1_iYUBxD;EDJ zsTdFUx;owbqpUND(Ooe_++GXE$n?xyDG}LqELupkI5l%L=9X=>_(>0urGc(=d5#aq ziBi_Q<{0A)>~Spp`L>gkM0NJc_rxN4!GvF)Ij3edgjq1SFH}2D85*M1lvmPBPP#-R zp4eF2ATN06-Ng9`%VnR`Fp~Hq5EtU@P0~i2gc$23{e|fd^C1y0ZD{Yt3!)HDEAQ^crXb-CG)9V)F96QL>4n z5&M&o6zIL&qgG%<)p`@wbE;MJi66%p3-QbMl^A6?Z!DLf-<>O_-nkwwQ-7pL`5Bp^lFr_ zkbD_&%FX(m7)o%BokM7Jz#!dooU^2Y?H;$E!ZjnNxsd1|I7GGRw_v@**xts1WlCfr ztLoqv!GAKe(EP^Y?J;LU0!(m(7^Qx?`nVZcqFjn?D8%!B2(|w zJ^z>P{R~mg;GG`?jW7|u#!Gs@`p^rjcPo#E}SkokR6 zVnjTm|7GTlB3N(0U$In){wE_r#&9WCjWTaHrmpSDA}&~!Ki`LaNvxb6I&Oy{>yBP| zTwfYfjOr;D)2VXIc`?Q`PQApkzSO_iMaAyZ((pg%!Ys}c<88=&4>={G?sZn$i*Z5^ zBnr$ujL5eP3K2|nOo1Jgg;&`Zj+!XcGAh z%!Ho^FXt4?!qqXB>n?&)Bv6HyfO}C1tx>RMTyk;>F4yBQWIWGTPH{?8nnkaF&QTmI z;Q})}H!Q}k3i_-%Fswq^!XQAtG2J#9ri0GO<~em?`z1^d&QX3}@RKEAo|vWTYMxB~ zioUbSg?JHP)zHAoWMzQK94qSiS)b{aM(oF=-H{2>G42ctiE$!j^!VZ) zfTL`U>w-hS5z@({I%rLljh)aXuG_=B>0G*Z23r9*3quUpt`UrlNB%A66YWX86{iQM z{un-2ETYU2TPSg}FC3*osQhdAzr}6~;#Ea0``I8bo?kHPiidH~5REE-v=+kd}Xade#bhCz?Q~#crsk z9$c)=7dDo}Crrpy8r7o{8W8|(t`B^{VLW%fu5ic1TbWH+LMk^VW>Xf>ITxSWnLzf2 zXmzf;?Gb&y>KC>M#5arf+-Fup{A!&k(NhE7Ef>c)7Fx>~0s!n5lKV|3EDO@;-pQ_))^ z{C^7-u}b%;Wf=<#VPT6I4pU9w$S=VVYn%*pvm5)3W(iCwxD7EOxLEZPh3}MofiL|A zy)d7RnBZ9Or{k%2#>w};HK8BL%A}M^aw=9k2YtHGA-E0N9hV`@QmrYSkWJ4z0FAr; z!5Njc!`e+LD?kh*rzD_pd+6L{XvRbh00yW}OP8xaY>5b-QZCo<4;r*|=o$6T0&j%uE9J2g>VUYl!umX^1L#M7e=r@T!r;A6k>SAHSoesAUc)sfjg4KC6 zrb}pX%B;9cc5UaNc_gQQ3*lH6Y{Jj;v;s&}-!qu-&8BhKM`Px5(gfu8vb}k|8pb|1g%I?W*i6O@505qYKg->OJC8Iv|O?VE=NHlr3cN27| zp#bTQ{1iNd>y;k^vkp!+bk~x>RaFnQ(*?9wRQ#rwr$H~p_+ty+l2`AhwGBM7f4v6$ z?iFzAUs5H0!thVG=0KB`Bx^j3@VCKtT&5)1l#nne3l%cmjX;myUyD$HEQGSzBCf%k z+P9Y-#-G{CmdMn;Fp$grOdOGQ%UZqo{L*(e#p$$0T!m0*h3Sy z{w7B}0*eFfEPumj-9+H(Z}VkI_mzAIG|W8n7i(Qk)`Cxig*ob+L1mVi{*!45!N_;g zua9mUyowc_Z+yYMZMK9iNoz80YJZlkJZkf)+HnjzVUIfay8|r%5U-fvPe5<}wqE~S z?KoYS^FudGoApz-MzV1a=r#Q~9&Q6qh3U~{X=bW-HPFnqMeE1?OW8At?SKhzAa*ML z4v6`{S7ATR*5NZIrV2Rw$P{eO=(F3uzo5Wr)3sy{Jv#KX&iun;d#W?xL@=Re33Ri% zxZ0iuZPj~MLan^<_g=8!P$&IL8?!9Ia2u43SZv*`q`owImsvt zNugS&APN>EiPoRz0&5!E{W6L7(=+@0Cg?`*yC3TFNG4-Xa4Yoj`T;9{slxPbxWODC zJ1hYk41;VHcJMmrx0ih#Omf)*ElJxvw$KUf^sU)=E|(v^La$KYz5sTq846G)$YA#s zNTU$O3!Ou|2nH;FeT}cA7c7Xi5@IHXNn*x&umi*yD2Ziz; z3AMf80=--@nED^m3<=OLeu3^>`C!n?wurH}Y0G_r11P>`fP@4GML+jHgTAH#D{!jA zXToIJ9VP$8#t>0bZ|aF-Cc9`>Zgf1-mA^5fdbdj^E z8gxKbUhD6wzH8k-DUH?3@(+m3;8h)|%uDWP@`YQT;t)0}1dY1=&sf0&n`$QV-C;Qy z0H_kdU`$4N$)k9Zjg9-PC}{bk+ybdpD?lBz0u$GFbh&|0bQ2kLSc^_~o0^?E+O~wc zOuqpH!@Qr30ZY6(Fa^f6sj_hTnYNI|u!VbGPLO0KaVsG$B=atHMno^UU@)W;9F(t?W~uo?jjh%Vnbv0q8f3@{zdG8lMEh3a0|XN&s#3 zW{JGzm0j{Uk|?|j$n{X=KKMymm~payhGqt@o%Ofy2F1KXj@ zqV$RduDS&@=X?f6R6KtW{j(xJl>7+rt5yJM0(~x>FUej6G6odUO&=;P1(JA$XP~kU zgy7Pf61*@70HEBm!r%(gWUNTCt!Pn<^BM2=kX2mA7RG!B9sDUrN|+cPI|%Vrn}A}q zf^cC3{0l|}ZkMIV1Snts=nqgl{*eF35`f-NLJfREUFZ`6@NfPgF_|m#NN9vEiiQKi zZloX=uT3>;cg(mqh-#)l^ETlV^hRYco%!vbl_gUk7?=a&zJL1qfp$6rmQll3zT+e9 zz%)T^Mda&8DoEogjRA*(MNx_2B2WeQf-iw)=MoruBLKZm89W=6wQ71-YG;c!NOgk+ z5)lPTVOr8JpfN2+y}97?jcf2^#lf=>?w~XC>K5P*;{FK-zaR?GC+z=IIY60;_@SUG zSvss>PA_Eevp*RNn&QV@ss-EaZb-;`yaA*4h=v&ejPwSYEAMg_(jbK{(2q*=y#b5K z(AUcmvj}K^4ROW}r%|7t-sRP}UH!G;s#zNkcx^q=M#q)ykVGEJqvHBdZra;L(oiM+s7)%7uF`9ZgY2r@z_tJL#HZx#;H9RT=yb9JNc zVyParee3O;TcC@w35fOozJ)w>77_G#27ylVyM@S*21^r;&P7&r0Iib>UV8p|H&~Ok zsQd{WdbOymCLsnWh=qC<4bBGzR1)Wa^p|lGd47Y5X?)jyG=cCl;%N_{HnNrA>Hi#fmpqYLJ0LlIZEj9zdSWbZ<9oZPy zHGYnxoFfrn(M*BiPh?}_F91yoak6X&AaW2Z+a%lu{HG=`DQB1O8wR70$PS*>;LWkD zxLdh!cJY9mUB{2JiNILMv<{uS34)eSmCr@L*g|UbTuwF;%y;@v#=+K_OB=MeSLUgc z<*(P8*4Ie8jo@SU_mE?)PpXI+Dd5yB%y&o&bO+> z+=9CR3(rpg$I~br)B|7 zatTa4oCizGapdkL(1Iv1;)VS`KO+|lVm-aYLK!@O)0rsUe;QQO}JfIvC3fgO%L7LPG7^baYF4n+5zr>UX zA;*jpc@zKst9}SUE836&bnGi=gp>FIfT~Wz_mu;=1-!4m%{9R|W}6aze0X#uY2a>9 z1co`62-;;QHh+D3ccI!rejGuW^$4s4q;n|1(p&+vW|EKWyr<@!69Tuv5U1S#e?>8W zkHnI|$bT13?^^F39|n*h5y@2(6FMRf48VFj0SbHD)^}j8*pHS6CTw{W4te4Ep>U{4 zD`L0l?cLLhJb@5xJ`y=v?Wm}F>xUJfjkO$kG=tBvP4Xaw`_H>z)(D@Vyi6!r_M+(2 zsDTscZ);9ve8IpW)dXgs>=iwPlFOq9rpxk!sUz-$gD#>Pz)o2Kvwwz_X>{i$TszuI zYyl-EhvyEY39N#sRhJy_QYrt$#NxgZP< z=|$+~uV$PiA{q=V3a|&!_7(^=ngFq@2~bG>DO$4fe~Je1X!l{Nau1JWa9!U)G#pUR zRsg)=?Ny0~ccm|X=4Jg)t%G+re*OzLV1_Khv@g5zfCXUIR`&foq>So@**!Mr4hFch zR)EoI1^Hq#pm+W6Zl{Jml!X40JUlOQmJB=xP%mLg5KUqsx2)Eg`84K93gp>=tW8xG zBr{XK0BoeUpt|#Ju|;eV)O&y-K_8<{12%H{xkG&qGl-@$>#C>C^X$An;~mTodNV+i{T|8pg>_~NBSM`WY&DTQ#wtM z34vsz`^sov8VF~h)yy@0i+QDu@P7Kr5cE3O?`S=Fqr5U3Z>+#fOz>DV6TooL|;h! z)+B&QN`vHs7sPWE=(yt*U{MWx<^j?p2%Q$^o+;X6c)LFd>%AvAe3I;$F%@sh-@(17UHZ!lvkn*2WXD~Dr(u|GJ#S`1VP-U74VZlkT05A z?I!{NXlI|=Rb~59SU$5Vjpaf*Tm#qrPf<;ofGMxA(NBF*2*ohUDhfna7V#lVd5P!F zttv;MRTe97h_O&r5;(-Tci^%CkOCPb#op%v)V1_hKyqTs@wU;;qWR@iFFXel`vAb` zdt0C8-;-`Hyi<8Dji=%xBX|L<#09Y6K)X?ZX|!_J-i4gSL&m(pnS)p>7;;3wklq3d z(`+dTrG1_ZNxWqbp>>9Qa77aErOnWAJBV{8kFL4+{63)bY!M9CsblF4JH7D5zkP4_ z4oFGp1&^7kVQoDb<8j^CHrPB4Xte!^D16dPw zcz^<}?kj+JN&W>cPt?2f#TsC&EfRUb*Gu`I`t|{0-AdZcsdB^=q-3U27`pQXazX>1 z+~{Dy*dUkrcz^>mnd?kx;=~sXSKqZjl}eDd?yH5r)|=O@7=Qo<+b+ISiQ4)`iI0z_LR{;ymJIWub>wSf6V*(==V zGppj{0fr6pp|OW*Dj5s>$`>dv8J{n_{po^z^QD*9n}K>a1AF@~3&X;WmlvH4St|#z za7y9Xpr>2u?I2hPNUMjbgK;4?+9N$7STdi7Cb)fB9esZqqioyzZ-5W=U&@UEG|{6s zR|4LvQ5MdBKKacI5>4pCd_a~OZ$s%AV7{BF0_Wr!jV!(~CUxQIye7k&cP8ED(CgD> zX(jSio_`PkqN6~_fJthzDiY+m((E4n-&u`^^3en6OkqI8#73YY@%}rJP1n!N(0pWR zL=)@k{uX?eSknaMC(MW3Orz^yJnjkr0i#c7Z9lZU1GB6qOBpPCgR*59ur*U4av&Zh zJ$i}Z(SzvS4WBKL)o*_+hQ?ovq6^acpFA>pF#wpdKSAWhTYDae>!50VQ_QT)zkIyD zi409Mf+YW9Y+Xzu>-li*)Xa|Pq_s)GJBq^nz|{PPplwb$-k0tvT%CdBqd~E z65TxjX0`=b1OP_%3aA*e%dY4a%?8ikh5)0^`@5Tg`^Eny5~}{QIp&(W|Ifa#UB|EpXd#7Ane=-9Dw}#}ed_*r z*dz;O288V#@5@`90=|+@c10WqFZL`mV>msE=<93;<2s#yFAM)T03x{dV^E$1Dr4Fp zTga`FH#$@k7i^e>1&l9K!z+J-?cTE%6! zx*-xTAe;z21Xd#QU#STI^7}LaOqZUvqe&b5DZ|!TAJ}> zYq`UC8Wi+; zo&*B6sh&$TIo*Q;LGrJB_1nt@Tjl!i`wL}U^8Sxe0$iI3X@E?ghDyyq&)8tV*lG=kq7xMw&_E;3Hxinih|0a60b7HFhd3M&0`w!sGN>759o3&fKv@IKGAN^y zoyZH)4r=P^NqN z<@LJ&+f@*JBWd8yoLW7YPE-S&BMRu}PEBh<7nCaP)Qc_Y&pvVO?BFs7zwF87ri(UM z1sMbooufyIg*htz{RRl@YnM3nJgLhWA@o|!8;MPY)mM))Mf-(`B-z$w1=$}EAMjx3 zj?O>jGNlF2aIpR8a^tl$OJCFBG)$W}vvn-m1Dx~>zdi90*a9;udlx2bmdzEvc4AOQ{LuobLQwv zHRJ=_>cC0O(2pQ#p03om2|i5?xm=mg+gLdWaIc%7?Y9i}^;q($%{k*3(+#;L!S5Ut zcZAh|%xDOH$=EGI_SWlfEKe%B7$lZMF%ST7-v(upN1Z10Ip6Bs^uZ$P!5QgDQQ)-! zxDV67ZHatlS4Dt${;u08m-GfQ1h&}i1EdAi{JX-YtiAc>C_cD@AoIvcLNzO3z1NFF z;8C6wW}&CW%HbzGU!=fQCJ|HspJ$5Z+K|J#mz4hP3R=h!7%_C8jWSxK@ZvQElK_BytM?8qouLS>Z9 zW3-4ABI8(z%HHF5-Mzn`-+!M!-k%5E?)$o4ujjM}hvq76RKbFBzc1-ur04&A3uFLA zo+I#qeSdwr5&S!fcED?O=cg8tW3@uwn^`f>3DkV3BKAs3y4|vT;5=3UItv1f-_FZ0 z2&4v=$yUFySZM`2`YL6X19Z~DEZ5Kvki^ysY#3M&uYhrfqXRd-Q)KM;D-de8@C83> z_17IW>%rp86P)5a;R$VkaSkgv3k(ssRhobM+)-2NJ*Xq~f3GVVnS*^ZUQyu$z8c(O zBm>jpCTLd{=-5w|IZo^3CFf!7BNP%QvQkr~L_xCC7!B^Fq3QdV#DX%5-y zMlz#Jtj#mKuT0Gv$Ao#bzOj{_`a7p?jEd#Nf}%dwDGn`xirb7oyg6XBijb14vEeBK#kf1jW9}|E^AZ zl&jonuyj2U;s$NhmUQCH^K3*T7MF} zv%ziSg3|>ar^)|%$7QfMtAA3EVQ_z!IXPYHwn)Vk+rI&v#rAJ-0S5tCn=k-XjuCJkn(Zx2K@p}e#>7ZR{6ez9OE$Ix9q7e%|NVa={`7I zn%j@T(6^qtpBHbt3&MPM2vWSxe-sDhrxAwbC}j=>F$)Ma1`NUG#5GWy5f3=2;c<|> zJPzHDOhRCPXy?8_>OBS_){PKO4$_>u7q|qKyt2*L6#)sU>^&kYlw>Y=rb8(KfV`S~ zD2mT37JMrH_x;S~i-XAw+S8z&s^?CD#CHfFy#`^wOOvgwZ@?XW1=Lvm0C#AyE1A&1 zmu1AOn(k@FY+`Z?OvHPX^sN{wYDA39g^H50MwMM6#26;0Wa-@pDcs@e%oZbJslUfs zMviJ*6}RqnPq(;oc!1+=a2X+3-{ zX<_o4ZLg%Lzlg1m=@G&KY~>NvDjuL6^UK5C_1<0J=nDUBA20!|SWNcZ_CkTrMqYK^ zB5jgxb!~Rts&{Iqnrz&H2R3pwNqYis$P>lt%hit6uDrvFpZ-w3VS9?#o_g1-wv%j& z+oPLMufTzquzVlvz;82zYYm~ptBavqOywS!43xvYA-db%b;$qedzI`e=I*?d%AHea z|K;zmKBUL!N5x1uci#ecYZAALjQpfBeg3#+hKHp7Np!T6AD=<3XC?+;x zJg7K1Mc0aM6f{=vGcZ4chNpj19fiJH$!bJ%qlBQbm#`H-Mnay5`Hk4dAZVbI`6=ck#2-+te;I;C+0O-y?e|Rq0r3t)izxmX{FNgu&9YIEZ}96- zQXS?!x(*!)-_#poSZtG}`}b3k^aQdPV@z)f;OavXh{0wYX+C*DP#Uk+T8yGN?&%gF zd25-e=mTy%sA2~{lv=Xt-9%deFFt0W{{Jinn$g>*lqHL?6)=R}0oyNu466iLoW|fP zAzl3BR%ZE*ViqXBY@9))Z@&q7(^LEhcPp<_-BO@5SKZ)=$-SX61EyiX7zKgV>J8NU zH@$ccT)IC@p4nsrH{@hZZP@i?^SZ!=dqHBn`#z~NHSyamN@)YUJ!XoOVR6JU-b3fq zqgum=OTzMzhT8F|Ee{6zzY(Nid<#+T1N4f0mg7=ZJ-n7o_!<6%uPXIr*H!~Nxc<(J zrU{R>++g|ASUl6M-c|5}{u|-@RMCy+p9!$(qx)M7L@X{#Q3hC69>h$T#pRF3{(_RRcn&uRXpIPkG}ir z|70B=KS(S`V0b4-STGl;(V;rCcQBGaY_D*Xc|WB~`Wg*eDvEt|?&(D0tC~Ly3vuq7=GQJ{MZ0fWJQYj&YEav) zT3^7m<<}Qh=YC7J??Km6N`ug!!mpz>c6=V{Y5v;Jx=lwK>z~b=+W(DPn*8+w!MaKBOIQR=nAxC;VCBXCHa-fyDW7vF8#^TXMsglAyXKVk~;mpn?rE1lCe z_#Fiy#*)?#gZ1}8ONb4J4nc30l*r-mw5%o!P_ALw^pAL#1xYiA>DVGjlLeh<{{Z#S zO5RweZpeW>q~_|auXtU;A8wgUod`a>b$ZG7&kWg>q@cFK|1Um?GxI`lD6gb(mA!lc z=2l0Mq_Xj85t_dz853sk=PV@|c=eE)Faewa-uwoeod~^>Dnvv9%!>0Eu#>THa&t^? zlx2!B8OpP_T$1-bT|F%$*ghQuS8hXGbkGel-I0_Ls>2d#8N6NG#0;05u%! z1Cj4!P&DyV8o-47Sxg$0E7*%I5tu%Lh6T_w4zl0f z15DmtZOSJ(sc2!dP97%yA%d{aWAGn*447)8bTm)&SOat9qVO8f3_uc;qP5eF*srX+ z%W;6iTfI+5A@kJVN*Cj$@i2SFk+9CDq*>MDe9OPlu}S+F%n-qr|9+YabwDb($J>Wf z`Q%(;)87M9P(?r*%7+d-W%qK03?^*CN4)~wUs~VCIXrCt0|oA9fF94V_fGk$V*z!a3 zE;vvDTG$>u2gZxFOB~&40!spqTy%xGR3XM=xH%-??=1Kou>p`;1yXPwU9txw==N-u z`TurFG5~zn7XuqjB%R{%7YT#CkiKWhbBgC`76J6Cd@)z+%{>X!rRYB4_m_F;y>0+B zUcW7WuQDJ;sRr|4HQ2HK;h|uRhU5#oMNalC$d=&FqU^et363_*1bj35E zG02R-H)O&7-%(&?)v- z!V<8olc3PYT_z_(H>zm|)H3}7Fz!M`XCuI1Aq855#Yz^Z#lDRQ}exE;a zv3mJlDV!UwNFK7J^?54pyDE=&owxy}_S=+Qyn5iKypWlAQmp1PN*kY|h~wfN6}ZN) zo!SB+&3+p!sd@{Fd&xs~8v?fmej1foT-!Lg?!1@RU6dSNi4g4%adP;m>IrmW+S<4; z0oB)v8bDV;Pe~_uM!V~yO#vPezHe?WlKTU|dUc@scc45$`xuZZYr|B-G$CtEhK&x5 zfsOHvIgQngEscG7S!{wDBaRWETp%UAjR!zNty)dz0N*nAd=lCvDiNWe(g9l0?m!^; zofk_**R_1N1U}_NtafpSxn6$CM#(~Cpkx?;wF{UpBPl43H%xH4Hn0H}IU~=DaVo7V zzBBoqFSscSgL3{m+gCc=bUM!723^c2n>UZqEQn$e`#pb$dQxLLn_KfGWb>{@i^WPDO>r8}V&W;R zY~&r@o0EdxzPgE{P*N&LfpTH z+#aA;%lsfdl%+QLy1MuDEul$j=a~kvoLXtCdl-3)V7^l=0e|L{4ddTs5f9?WBxh4g}PjOmj8(0;w0@opycevk_tvnOQqFK ze2X3G6~fTl_GP?*mwypgY5Qu{<2h@ax7f4hnbB$)QHTVB~*QAy3rNxo?hEyxQ! zgT5Fu+k#VlZyyYk1*h-eTfQI|M`ga5s-66mckRy!+<;y-)^ok_I9PhEnfpJ-MaL5q z4L)m4z7lTh8BZ0!tRiNOy%@JcS}}r-i(uj?kAeG zCD0Ragfcn*an(v96yQ-uJNa2NH;p3@SIv(xB9@JXHaO(g5h#!!hwx>l7~y90M~m@7 z#V;Qf^4C|#Kv!!A_j>n0%3BXmm`-UJ`H$vOVIvsRKX zuFO|gsJic~ytgD2EmMCy>4}uAoEC9UJ$%`HiCU|9HWuQVK6ws@uq`n zNi6u1+@=lfS5;}=p59eExmzbIiq9c#?G&E>967o9}_3b`g^X zimI6X+-+39ng*+=4I`MF7Fgr$l9Xne`~>HMn_YvudQ)n1vjtL~^q-v_& zFYp6`=i#-uNi+ps8}Kz8DJLpyYKEwT-^n!r+W-dF-d1zwM4GH!%*NY~5wguE1k&IJ zY-x9Kp>RumI);>|)0!r(%w~4V*eDTsM$1O{C8O7>f0T+!#m^QX|2MlhWOF&3ulF9c zI^_!&)BvpHBzJ63S>HUZbJwh8mxON&0}Xq*3W2Z4agFl({mQC_Tpy?|%`!Hxs1jua zXh)`)a~uI7N13PptzNqcH?@X*Yii~DsE6p$w^$$L14~&#wnxPs*EI7271QTtalDxk3`f`8SM(=;_PNIS205Blw*72+@< z6PDwMsEg1vzZ=6#HMIP~?t&s?l(oLj%vHdE68MJ}?>lM6&3$T0Kt4mh7)IF=I;f4V zK5+%G&-s#7ga1Qj8na~X7G{6$9+dK3X?qQ(ro%`lP^q^%+@y1G-`^HUTAGbGVvfNC zJI37;Vgpd!Qm<-(kyAoSq|F11_J?7uLWhSV11 zR`}>gjkw*6PeQ;Mv$&w*ymtj4@s#`20~abhl%lAqmqB~$d2SjVsO4G+2lMXE6Mi&}8NhjmY4gEiaGVmQRICrW;$JTP zc++3MaALXNv$j_10qlA7Z|r zp99{;^p0Q>AM$7CSXmXU+-M{nw8E`Cjo0d9Ve?DYc_*@1^t^-0vY#$u$PQyr2*>W1 zx@SHZKnvBbMMDzd#ag#_>=!G~2?Vv%a*L2yy5}C7isM#2!2Y8;_>SmI2DCQddI@*x zLe||OiExDDv@v)${{D|UB#~GgqM5HHvor4lf!$W|8YJETZ8kyBrnks~v-DegW5s_R zjPw&bpByz1#-h0IM4;Q;?InP}p=fwQlwfUiuDWynL2)3UcN&57NU^6bc*`o8TyVX5 zbxGJ!)P<4mD4?ChvXyS1XDH!okZ>gzyy5y3#!oCcoWiSadYY+_5E4hr2YC*fodsBMY4{S|3%%C zp}UlQea`vx$_fg~;+Q3SKYyI5HyI zY~1*~U(6)OVTV31UVNE8<=D*sr|Fi`rC4Sx-D33$GiHPHTlJNt{j|+m){fCmmnF=u zrehrC!0C-FP7d)T}b-y^(&j3sm`EEtgr{PH9_(w<9-Wg*Uch6e|ogYviS~P9(z$CwMCCb8EygKb2A5_ea1< z2kC_;MgWx@v<*Z#Z-72=H-0sVUWv_{(aTvvoc#fCJN|(jOw)PDg0AvhA4}YidVR%S zkVAOjr`OrSvyIRquw%+1>3IgYraMzMGX+nkB>~H~ZIG1jGPF)>#9@4Er0Y{LGDU>X z#!WbFM?l;h9l!e(TJsn8XyotW0?<6XhG<-!d`dGfYAQ;$BcwuT zv=Y=9i*z`99u$Trf63sSy~9aYK{a!y&d=T)F!>OPi!}wNfb=K8H_+OeKLUX(;6wor znGg()j<*~IsgroLaOT^_;66soU`&W2tCqV^8_{x8)KmskyyQ2~YJv*U76VOxnB-ILzqShCCb9VF;?p+7&O7SReoRdCX2)v?Y zjUD!|1o0`ZWgox7@+vx5~JbfjXK)24x@g;}*n;zq1@Fh~PMph$uQl zt2u!_?V~rJ+s+EJNAJPjIRv7}j<**@{hs!Wqz@FSu@2qW(cuBKc?7yvNJC`ivsrk$ z-p4@WN3p!gJu+CjSK7v-aZW3sv?D|qq+;xM`Wb&hK74k<3m@YOl#F(IvREq`%jgV! z-VQ{Vhaew8XnNORC?5!Yj#)9$-+6;?lEOY5BlK*!i1Zlx0aDEjE*Sfoju@-u9tIPI zM4HEG1!BRmJPWs5`e<<&zGchMa+P3e7>1Fp*ko(Dyx787OB>qZGP{8al|5w76X&KN zc5x^kKFYAg`KB`edX~#RWXAvvoj~L_ITkA0J@fGdBRz+*Kk5?~eg>2PZUr;}jQG8W zo$BON_Vs*iY3p)MBrmN@b_JwPptX)nAjDaj1KGlu1z0UIyjcphu3dOr|>GBu#`f~L6H=qOje#NW@JHetK^`6I;G4` zjmVqzr~RCsrk7^}nZ_>gdWNx_OYSb~JCU!DqyX$j(cBkm=`$YNAz8(IGr}NI(duv) zd(lmkdudlZfq4)5E^AA!XNAAaPW^B09|Y_qmQz`ley7N<;ZW`!*ST^I|HsL^TvmBl2XC^P%^G+LnQLP=(owTC zkPaM4*K6VSxImBu7Gpb0r|e}b@up^^HKJA7t8jo9LD9e4*r?dSXb|HfhrUAHe99w^h@YrAy3* zwdju56g;b_x#(7I3^{2@Smj1b?o)O)KjSXSU9hGp+7$N92!|o4skN!7Fb)#y!3XPM z_Jx=$)EcG9s0}$gVfk1Dx-xyV?V6sNQGA)On5VT2=wVoH<7@VwBzxAV}_5 z#i*8)I=QZh&z)~PH7E&e31JQ>bgQG9H-OWVk*Gxgf@kg&t*A1F^&pt4nvt{fc~u6M z8MoPCbVVokh~cHhbXfYu+MoI-<@@yW)_Jyo``U*+uD`QvqFyj5+7;Cd=7GZ|OzhoF zOXlKSGl_1{THgJaUZ%L&DkPr}z0eD=t3!`CdvYhso@I!+$m1I;!2>YzPW9Xye(QhDDnvUjafQ;>Q<1E3j8>%hQ891T z983rA0Im2L)AG10(adaH5!Y2Dto&>$=x?3F2VZr-#ix3MX?oeH1n4UdZgsa8UsbwX zJN*uSt^#s#L?b4>#GHbqmp24T8+B>uyFIZAvZOK7q~9E>ZTcycfji_E1zfek@EAnI zyG+ro>A4~Uw74)0;2AoaK3D1ka#=1LC%R3s$U1k>v}y368R)>z2}rD)ee+hp7gBY=ut}pC-r6yQK>Dt;!Z?@saZW4Wy~9$UDxv%H^6Tl5Ey$le*Za1II>%Lf zavw~)?6l3niJ(CHGpIei?r*LW%*X!DPhjFDNO-{>YKI1nnfK zuCsBoy4EAky<)!i>(?pdm)nEu6>T+CWIqalbcUxNsMypUzFR*u!v{@-R7OZ-UPx>O z?wD;EX@{q%wX8&ngA#7*gAj*`J(d#Zp#=l}Kk8=`Tqt_7r8uo$*Jw5)R9GGQVDurI z#J7>w?R!#+iqGd5vjQ|7X(@>xHEc)DIXkXHyng?p&+Cf^KPe}FOZ99ek;2jvym)q5`rNcyOT$54=Jog&V} zfkAu`TjW$FuE5YCd+E&JtBXz?9rGRd_}1x3Z=v!Chx&UrWmm+<-R@E%O=vfzBxd-ddX)6?0_ThsR7 zKJ*6mWxaoA(i-IEwc5x?kYnZ6xM(&$^`M(tmLqSVB=-Y^tT_HU5Uq>mwnWuS^l9g7 zIU>K+xm>m12UZ~oRkWK$za+oP%rjAa$WKA!V@?Af#^zsB=1?vl>Db=zJlC0~{=-y^ z(3qLCE8R_w(?H!0wtKz>mhT=M5^$Q{Yq~T%OPG67OC}9FuXx&lesd|Fg>Sx#J(dxx z*UPFqOP7CUWrLpiH;k`h*4?qza&-)}n|?3~U%&iq6%e>Rg=_vAIEFmg4z-=sL=n`= zI}7$PY1xkf(mR>~?7V%20IDwIHmnI-;b|?Og#3oWzkX%~;Nw{gqSLkZBrGRIqy~#0 zizT%$RP2bJS1r?%;Kz?|U=~j1!}TMC!!wFgu6hz|VeBxc4WTkh+FAXD@BXb#h9lZ# z7Ak(aMZmTotPN@EYJ7FOodsXvy&goWO%z7$+W4nXnw?%h9yt^y|-fJgx zPhP)f-yMnkgu|vtT|s@#_^_0I6*jBO+5PL*73xT3Ie5F({OsIFt-{K_1+MOJ zcBhIVdYyl)?r=Uhznqm6DZ}DYh*)}5?28!Q|*LT zz6zja=7&CTuZDx5F5Td&`{eJ`ke9-jb9b^9oLIPiwJ}=f|@o6ZHUN`rBh}Lils=I zUQFWm;84yh#K_0ShtJ;>^R2mJrSU+m#lk`-^8S{TX9;9Z`J%rn5by@#^VKO5WLs+p z>23o^XAwG<8I2MiF<*b_5y8bLdIOZg+Kc3~8knzHUn`v^3BM#@kPv^Vjq4&1_>@E^ zzZMzFm}*A5-&`J%t38$0Zp9%#Df&#KjBeAKkjZfGP=)Pd!?SN^q@;+u_kmmGNWH$W zgjtS@=z6GM{uivxw;bkgdglh1YqQs4aj(HWCaXbOjx)+OaK2r)PNnDL{sT;`o28L@ zMdrPyJWF2$&zX)(T!Pb`?UkWc;bWGQaIN{&ma5AT;%v=wC)IY`w&hL;`HjVRugxh8 zQ4r^8OH{oM%m{tNo`10@$Fwu$UmZy`d7kOE7~DYgf@XljAAHfdd?s#Mm0I@NNDuYy zkY&@4Zw5J4LAgv`F6KU3KDPcu$~vL$Al}9#cto7K5J_f=rdrqK=l40@ITf+1o#tO5 zz{>K|d;d{o@mlMQZ7&Sov8@8TyeGq}0XKniKNVegP$fsA@9Iz!? zW+B%>;IB6?PX|J8`=3OkMd0#12Dw_hP^6MB%W3mKY$T1<_mE3lAp)* zN}yVQ8+g;wL4{%S|Mp%&i}#6O$XquYqCnU7)LML zA%#{@NnI=&*+?be_R^c32iJvpv$EWG5}g!SGrvH|Ve;>p=WvbON;oYIp#7TIFWwE5 zxRLQ%BEJnpHV_y}bxX>ID?ZPt5bBM$3Y&3}{;J-NA6N5)Gn3dlwIjeA{aXM((@Q}^ zx%YYNWRd<2ehr0p%#kY5!5s&(=gx9g_G9f>o$rHuo5X%{6?{--z;CEf1(@qy3U{M? zY8=woD8(+_KAF}|;*%H1Yjlny@BRiJgQ6MbjREkl3mb~(+?hwgt)-OCqrz5!&iJ+^ zc62LCchZt6c2d5?=w|KpYe19lU~3Y{!b!WV&=(L`oSk!K7Sigu15l(IxHOL3=cZ#H zU;g~L!L25I$Vu?2#6aAsVi-|qWgMKkOIM(08eqDo&Zih3O(P5I;PU_ubSM%806K|X zsh>?jIZtoui6l0{W#H1x{XW{)0BIz6F=0t>!NXjhY}E!}HL)x9(Sqzx?ym7%0yBRi znKuY`eF_#oFpd?4Q-PqVf)6U6>MD@#+9o`l4|`ohNEgG?pS`CcSiIDsF`jESwDdOZ zI=3f2Xj5s58r4ygjO8m5Rdralzu?WMueEPgZphLqP_*Q&ju)ZIx0Y!0**}+TK$GOn z;hZ^{eGW6kP#P#g#ld3-H<5b9eQ2(2wbZ$Hk7_aJ8O4HZxM?=SVr8Bwew?7mA%4yb zSKxmDpphrqkU?yBH;DgJ#{y& zlfCD*87Iu(-vA(;qY_ln!;ZZITn{Kgt*X}-%>JaQ|5=H<`Q`kDemXXtltc}Jg+u3t zv|bULgkje!X?(d`6gT~173z*WT!T+z2K8Cai8+AqJ?M<--7xrR{$xI2m^#NB4|z$m z1N|v#w#DL{LB^wHU7NA*;%zw`w<05&6ZHWo2mT5x8S}-qtLFRb&c<#hk{x4mnlbk2#(@PtjA3f7+kGmy|i(TjB5OWz2)~?Y; z&74J|d1EDlswruNtyEu>jNsrU9mq{wt^ZRlxhM$(AGp~-LWeWovT_IGFhw+D2hkw$^8!rJeheuh^B75jxw}O; z74kDtmBJUX_!$kIDAA6-M$eFp6&=Z^+(L%nT5l#u?1>0xsZ2sZ@sZuaoCJ8rZ2@$h z_4kH>9jZHdHgYcL&-mhNAoJ4Xw=F#U|C<^if(U75Y z6Hp(2Qoo_H5U2CfG2zP-Z`Et>H2N@45>7rsQ&EKGGO$%Rx`aCssZpOt`$4AG?(Hyu zV;wsy8j0u-WIJCg9hpyp)~0fPgHP|++XJCAmxxwi%eV+UO0611VHgZ`ta`f4i=dX+ z&ooB5l>VCEqi}ehmP{d8+`%inDmOK2HklB{1mi6FvJ!N_S4tx{ZgmagU`hBP|H}5> zl`;pxJ1e>}Iu+krep7>@c)gk>v{ofSk8pCsq*1yoCh7gd7+6mL=u>;6S670CwHZHsv({r6%&w0>kI z8jEi z8q*D_{?X_R#vB3F8*Ux8{aWl8JtW<}xd`0==LK}(uFyGeu6L22b+chEAi%t}N0)UP zd$X+L{_%)wdPtNjkU}19qmDn7N^8Ws84T(h_uyvfXuJDqpF~BcriV4;{6aC)ph$AR z&JXq|E=vskB*ei7y)C8g4P8LqR{j}8!-x%npRB^OpWb57r9pJlV2?gEIhDMtgX@)G zG-}OpR-YvtioK7Ss0SU17gDcByNbUBvOefVt_1>Kq1)XtMTNQC>|O!kH`p18EZ{nA z6FWaGN4$c*F!icEr0dOsH`+ngxGR9*AvjDGBgbdo?P9W{KDf0S!ao312hBY$cxpe1 zL_SE#;j7o_0S+jpA1jg|^2B0?hgyDHCE(tO#1)iNTlY`Xn^fKI%^L{OJjc~sRgMx2 zKakrZ_3>w%F0)BS%D$etmrhxHjj2q)>bG*Yua}}l&|mLY+)`bf-Cp1N!)wzO$Bed0 zeg}ae$@3XgEMvWa-Ln@ypfALQao^lj^f3v2!)ew$=Y3Pv{gU(%m~oY2?5GIzBOKVv8q0*vo%Yp!R;B~-y9Q1t;>R0p@L zp0));(tJ(yFP2NOh7vg>vf<;K=g9}1Z%%M2ul&RwKmWI$NB2BQgi@72z#NVtrYY@f zAt~e*h|5{U#>Mj#B}Sv|Vt&%naAHM_%}MKGca5D#)PR3?PHTVp1$MQ!$N$`041egT z(Xu1%6}-zbyZZ+5iiJ82rf)vGVq1^@-`QSd6r90P-J?I@>(bHRdzG-FYnz8-CH@Aj zPQtCCbk~X6E8kVdoU+12HHQGrcTbpxTjsXLGJ;_xp3r#~bUskrS)czjVI4b{8Pc0! zm*V2cY`iY)i{huBu0wT$DP@Z18i@KRqwgVdFBksS#i_$raeW<_K8zX`xFM5%9d<^DWBCvJ8sD5oXM|l zGlObEW)vxPoq5^|nfv1AsXzse7NcsNB#~zhZ@=^XmUwy+SKm(2pJXA|meoiU6uWR| zXF9HA9vTuW^28|tXJ_$63 zEgBWk0YiWP&JsJz;!~al`3uKDrZ)4uE5r#z`#20u35rL@P`%S5yod?FdJZ)P8DBK4 zr}w7$hj+M8(1MT~5S0pGm>@v?F32J+aSWZaU}Pm1M;wLtMV4Rt4C1bSfN-AX1aF5J z9_A1N78g0y{)qyvFG#Hup1z8_n;nIS_ZTF3kJW$TdnQY?KiSE1mPgey}=XO{=ebaTl z_q4D(5L9*;I%sW0bsyC@E(`6OREh4t1wrZBAbvxo8rtMtXKm%&Z;bOF1D*IC$Eo&n zt)I1n>lGp&PQo9qI2i@I_?xuKH&&#p$^@roU1!%5v@Kjt*Ao6WIaj^18Sc<)# zP2~wi*lY;JwaLC-Zzj+tS)kj#Oap{G5E5pnW^)&+FrYtN4EK>0E z%+)hHJO;iO*Iu#9b%gb-N39o?z-Mect6lKIJTN}*v7~fj!`(HIrv#g@~ z$k$^v(vCsJ1fOmlTCR8{OAm>h|D|$1Grk}+VS$&mKo6R8yJ(oFVD!NTGq+Nrq!m#E z60d>d7}%MIK|YJ$OFmF9*H0wiGfxLDkKksGq3|JL!xVq;_HB0SC%w(&xR)(|AgGUg zEJ3ZO@p{$11tdt01EcAZW(oNC{z4IJaoo5Z@Tgyp`M&wYXa4SPZ_8Bs*=tH68}HV8 zodnoc0jRV?Kz@g^OEv2}gKt}-3EfVLzIXcM6>}CEzS(jwJ*Z`|1LE>Ku5Y|c;+2v6 zo#C|+$ltCJaz%B=vv(>=KyK~tb*l-G#Cv4By4M|?vY;LYA}KtAK#bfQV8z}$DlLI1 zAwR`GV-3$C(hec=4Q6G3?xkd8`LMc5V=m8`ciQ3MRfg&z4&z~n6cWh zU=V1YkC7=-ApG0@yPdSe)>~_5E(}^ZejIX;dH~jYh6Rc%ZWq}!9 zPwWoAVZOF}KO9aas+Zv0K}F>XTbgSDOIVsGpyv|93DcpmEXBzOIs5lQ(-NiomBKsG zi5^c7!>c;{57Je_x5oAoLtzN(ig-nb+Q#-!1J6g`+b4{JLvltmk#B)@U54M!_RBW`5G8jBKeA9S9Ezv4IW<9XLNiOnrmWI{?wm8oF!mDI6&je^Yqk&8E43MQ@E7|C}!LIhN_7D1_=v+5T1?T=#*!R z`rKy2`+b!8kXGrJ>oUvbgRo}%gOCxjPy9Z|Uzt-{4|npC13EJ_eZDJ}9DQudFl({A z9M65x^Fu2@*0^)0v68VzG zRf3Fp%tTsJbOOXt=k*_FS59L4C{DP)&^@+?H2ChV{*8QC-K=Cm7JTPWaTyO`Kk5PE3Q^&&8JyDPWJAFI2LBU(^5qD z4}Gb>#j*UDI1G%Z;zV68C*6u$R}HWf+-54*pbPzzg^H#1`hXUzWnTE8RXGv2sYDjP zrJji}4Eb!!{b*RFUvaQH-@oS;ytBSayVDQx3vRDRm~fUgXELDX;M`$Rx{-2_#OGat z^td1%Gf8ym+bX#}idEau;0hjL`_|*Ps6V$Mg&O}0zT5yqCU7N>rDqslLMGV5`I;eaad z#QFvJz5YvmNo6mz>Yd{TB*(qG21ZM6_A#_Fb)^C!*#0JA_H$)ye&K+Ct|U(pbs3AE z92&{LQ}kN?(bRTMTHrk>Oq#-l(Y8oYcsBV?BER0ytEuIzb?hpLwQ<&R*G3?AJ}e| zdBiON9t-;F$r+u0kEPQq^}qKI5G%$){Bt$Rw%zQ1c;=68#Bf~Q&5+t$m&`>7J- z6Ls+r`DqwCK7P_MXJH!D?z&~BhsPqR&HQyV(C@`LOJ#LeeZ0J;C=E$;sRY5k^A3g) z-=gxIBufV|NIAGoW>|4`qw@#rEE;^;44Uy>T2ybgjlqGHkNGSc>GR6bL&PG-!-r6s zD=7l=Ym}6_x4C3Gu3|(GYdDhfA=4p-1OI&~&~M7N%pj-TE6hN4D`e@`z_nJxLI}U8OUD;5uwmyS?!f5pfTRqy3O-*0;D4z`@tyGE>{=j7@*mlhY7Ax5)0B zuA41^BA~!%Y$yAFqp@uF7$c@-VgDT<&1->G96Y#&>;8I%9vFSk{3SWHR z8R7>1xNidOf-4wf?u3T3QGY{zi02f zP2{UJhR5Jbvd>DH7E}Q}l+1&+sM3NcHS!q=Y)u;Q9Pj%;Sx&0qS&ZT9AiQGlw|pLh zb^|aV)`XR6_nLoN*ysc(5_`$aV*3K&r2WN5G_2p^(4?c8v$SHqE1`OOo#c6Jn6Vu9 z!+Yh^cX^F?|5L);ELW*qX%Tte2%YsuinWB=pCG_=#^8)diff{MeJ%GoZCOp%EmQjuFqeovP6cg!T(QtCc*Ig&8-qn>c$UMs=ggy430GST2`G+6UKnZtP z)s!)_amWh@9z_L|u$pNqy?SWEqS{>@(!LX0b)(3xO3C=~_)RaE`~>D}^;c;UMNgi( zz|N_$c1&ypH=Rm47^bD=w#tN{96FhtwyjAoX)=5R%)s5rAu6W_AISC9UmXcb;}b81 zl3OM#Lk}LX%(s9bnzaQ3BA?!D>;l_5-Wz_wgIb*`Uu4!>qK^~2fds58ST9sbEXa_M zTa{v5%8|Nlz{Lxt`z^3+3}xM~rhKL9A}rr31Z;%c4zBslPL~F+ zzFcI^u;9iNT2v3AnP|_lf-P>r$Nx;B3cjo(5hN85M3{LM zC5VppWWzltH;<==t|?JjjJ954-I$bBQ2l~D9_j>+;G@vLz4{p6!ADwqKg(MT>1V7i zJIarUDUBOHGkre{MZW>ne-q>iGE%N&H`)t34>5{2m!fAx>#GNLctPZLE1dPbL*Ash#t38TH5R(o?$2`CIhi%^U z_HJ$i#QZg&-uJ)9580DcQ}>-AkNiJ|i%gN3GD{I??TTWIm&QT?&fC-0Sws*RCs;zVdo! z^MyeDyZTZ3t%RkbFc4J@l479Lo`Zlrh*{ZYP?9opkg1`iHGdllfSxoeVxlpPy;#QE=Pl%(wY`aRclzu)iA z@Av0@KQc3~>vdh{I_G(w=lM*jPwIfo6^ADTnc;qgsHEqE@aE3PC%y_hXS35uJNR(< z)Ng6mCtQ|~a~^mzpnL;Utt0z&G&uFQs#zhj@-evO*S{?2t0Wo2+lC8ka!RF_8563- z@GWG$*^wE-eriW<%=q`NE03*5I3@kEOKa$|-dy<$&o~Ro{nK}y2GEdsApxu(dLKE6 z71L_eZ}h?U?&xp34qe!J<_5PmMvMTA!q1PH$IVpV*cy8siD7PLm9fM$xXIs>G`(T- zMAp|;h(EIle?h@8rc29}-JRVlRYGY4_P}Z1_hN4uw`STfn^D;7`Pp*?RXL*eHhLtF zqT%E5_YeD+T~Bw1H!hIEj(yPlagt>HjFWJ7F^V5iuw^lvxgfqm4EU&{2_Puo)1d1x!1dAe?&m=bByUD`hfeJZWpp!;STz)@E zoytDs3_#ENJk`1jjj=cUQ^=O_wlu~#uOV!&Nd5F57_^)_z(0f7!HUi6D4k2jl5W2^zmed z+yqCa?i|Zm422Wg$Fw*Y^=2yry+DVn)8V4Qj`Y%qqV$ME_na(h#1mPB86+!?V0N4! zzEMw%rUG>Q7200E#q7Jn`582r&w^Ie3>PEJ;mV)9=(VWU$kgkEOq~>8c~Xoe&e~)Ldt|^)7#mNyRG#I#*JC^_y~ed=s-K zi7jyxFS+Bo>qvc2T=w;K=ihZzI0tt2C_G19Xk&UjecB0ynBs$9nl??=<1gZTZ_N+r zrdaOdHu6~87A0wDxsig2NL7pp6lHWa``)EcTI{GM>ulrp`n@t%UeBzjTB_x&T3B+# zpFx(^$r}#`0{8gqSHW{zYCsAy))07Lj&6tn$C{x{#35XP<16(r{$;S1Ul{(RD|?fC zOIfu?i}FQXAyXPM<|a=j@bZ|M-MNuo z?U>xlc+J*>rGfVg-vV2wV)u)*rtGN0_C2^jsGgk9S=_2l^nVW9& z&`rHA)&?HpGf;l2rLv2?jU2x`eq~g0QM*$Xr$UuoGdMGN!>Lm-RT+`39Jh8mIjU8i z^Oyb1`FGfY+ z81f8~Mg45gOHsB7o;9nez8IISsBxCRYIN0Yh_p8`bK?-jXUlS(Ci++l%lx zLp4fcU_En!`@lZpO?u159K38KA<;qfa(xb8kjhcUC<6s2*^@qrOI#`w&h-awt_!3} z***LI`{ko6jVrq!M4x`f`J2-)oGTr>rOT0hL203%ac6SEFj#0$AEhM3$xG_o=+b-E z%)6qP=%6szP{_h{Uybudg;p1DcM_8{SKg`cd-bWz?tt*FFYV#Ma=?F=#Rmow~PCT$5Neh zcDy^t@&Wuc=7V?wTbP2fRxd&9tqg(mxL@{|6$%l3k0lcHJ zfv$JN*KN`(ptxYB(s z7J*4K>$$TT*AbUg`Ss)br#_87-tLr?Pdd@eQ_u1ui!qGe*LY>@_BxCD^z1C)kJRfK zI`Jraoc*Q1ME}?J(>L0*4?Q~(>K4iy&nM|*`)s47&e7}{{rM0hHvRKHn~;moThX=5pKL6mo@qEaxZXLz?P zSxg;2JAJM3r_sJNH7`0b&3Pu-d9Fr%R*~4T+jhverrED>>v8JdA@C%sUwf)K^E1|B zlYKgJa+<%Xl=`u@$Xfiyq}o|Ve*cMBQKFshsfCD&^@8}T2|bstv73jjq)0JysXpRm zvsC9ExObS@Wvd=nk~bImcLSf}TtA_&NN`>WvFpS_jJF!bpHw7ZHA(cRNIJW^J}i&y zaFvcKEi{&>e{Mc~nzT138=A}>2DuD}Oi8=_L~Mn&5xHxd86UpZWwgepR!=ub-;tH6 zVc&57%_(M6Oc1hhR!Im`(3P??Ewrf4mK&DBCtNI=v&y(7|Jm}WyIhOb6 z?AFe$V{Q4Zwa@iAICtd>^IDIth_zK0X&?#j;MxEO7P z>xhOP8u7z9-|$pBm4C_~+CUD)Tv;Ld_A_gatIdmVeE0_nVJ>O)7Q%W;tcC`=rcN`v zu$tX+WqIe+^W(xwifdDlSAoltpqPxI1^cBUeO6rQwe1gq4&(!Af_f!ia9uEo{my1- z%rYSFC)EWUj#*#nG!Q==EOHeJPimRg&1Q~AG(Pv_ z5K~Fom-GcPx=SA8olh1yaBVjY2!q_u-9PXihZMwMk=$O z2KAIHJTvjssR{h27hq6)rJLKbjp?;73K6K63Ass^H z#`(s;runAl9-Vo|Z=DS-X3rvXh&hQ;X4vD$F)XPpe-m2PIfVlGx}CywRui)^Cd^k9 zBnhN)5D=~5!uZPW<&}4OQPBqRjj|*XWHns$Za>$PqwHox)x#Y(&+ov`vr{U}M_rZuiRpre-02r6FJU=;GknMM^D=GYn_ z19B@qiMN7m_n!R)VwJUZ{R{hfy_Yx)ZLB<}HEY>snZnGo&vU)4JxWA|cB++HV8PP& z@RsXN7iyrZcv5uI(jtZwY>-9maT7HxDO9UWM1QNfxRtJm){K3Tu;s+(0}UcRiz&Rd9$85g(Ew>F1MKaZlNf=H?bTTGn|!~RMcppx_jws# z+S@m?d4)5xL;4NlHWJGZ$y_@^;GRt*_Cb5QG>&fZ`%KKyQR4$5as;*`+deLUx}7x9U&VhfKtFS5Wjq4u!MpO=WT3H(`59@zb_* zqME-eC?!d6?HUS~Z>t_=_f^n<=v64ctV)hDQ4)?rTe+FTAoR~|lef_Lctcp<-cbGc z5Jj1how(0b&M=5+>(Mhkulvciv0YuiFPwD{Im}EzEn=GJ zx}@0{O2PaJs9)97KP4JZkz$`2U~t*u!WOvuQKM1mCPtE*%}_QN^cMp@uLc1&w@$8S zx^Bm}8ad!R_5iDhJ%Y9R1|}!@pH(j`oI*YuUu>dX%-?9w^3X&IF9 zhZv82t@l7CIci-(^C%4Ob6Jw+=-cK6<*L9Y@{U~0hvImRW)aMRIiYpac*Q(E@P2*| z&^E@>?UI8ux9T-nyUWedyMPW~dsAR-6-m<3CXfAEkf65F_Nf-}_+gk1o+c9MQZJ^N zBfeyowT04tZk(R{dolKUrrNk%Q6^h_gs#n&n9!VE*n`F*l&%sO^iLa?lfuXW1~3~w z9D0E19f-Ym@G*{d;pEjSZTxyjIh56weaHk(Zb0-hR-u+Gc6Q3w>MY?VHn6)YJRH=u zlLg1cj`O;hI4>HR#L6<+d-n{_t%JcPsJa6lN>5Am8$mZ9O~0r58DNYsx=Qb<48@N3 z%+mDme;!2OE1b{AVuo&(bM-hpDF8dBbEW~(${Fs1#X_X3pwh6yeGK0nr!v@I7t5*r z(1SR`8Oc5GQrcwl*lmUpzEBK&mlP z`+o*o(G(AZ2(y7!rA;u4vdyu?;eQ`PVQPR1^j8@082(bij__OB^$b&n?%y&$icaxq z$OoeHta+}2qWezNQwC#|#YMG(W1QdtMziU|{7qA&wip*_F$aM>Mj)6P02~d|XGjB0 zuH}v9p}t?pwh>v)!s8#R$Ie2h9{?y0Tg(JfQ&i%ywuV*K(fvO+(ml;{nQnt3UOf#9K^0)gv4lv z9t{LWNEot}S)+Rb%KsDp9gqWYcoUM(qhl{y)?xPcKQH-yK^gd*uI~qZ%f1P)7NF7| z2b$1xfsztVtOSxfl9`uM1o-VSWziH8W$rTI=G+C^OYx9IYadib860Anjk|EqMc;7QOwIj zx{p>mH*W$?Y=b@|4d6&gD@ARjV+Ujt&=~&T0>T8LKhm3Lr~pyuqYN4ZQ*71ROu|E} zJX5_MTR`4cfqXF306QNum@y8baL8{o{N%#eC)&sxd;14F(;qvtLadTQp}9r)8_N;0 z3Ml+4z3^B;7?=T}6~9&ty7fm2tnw(O(_lAVPJ7Wb3}0Q&BmKxrFJN}gV3^(m$D|zg z56$XSYRlixgEJ|i@#>921@b9_tG6)$v|cX3&n^X@G4c8&;Pn1&C0QY<29nkW=yMcw zJ%!~1AX40_6I9%KrUjyDAf3`*1|A=@{Q3sXiri-e=awOml6)|>$S&V{H7WPAg9 z8pyDv7+dj)+C0=J$ruh|F!AqcuSVYE;~k2%4luKjizqU&YqVQI^&yMp7SUKgfDgA@+5zO={u(i3M`|EUhH~udy9GwU*AVgRF04Mw{YnV? zuYkjg>^dm_t|NDR{IOn++A|0;2l<7pAO#?IP9xv9neXp*T3Mkz9*mZWsu8sC^&}U# z-+TaC2>OWPbj0^SFl7HQh+V#btFiz8#>1A~FMEdeln!js{)l8{Dr_mu{>T^-X|GaB zvsY1!Xubt~tmfkY;cmSC3RbNz!8eI^76ZCZzl2pq!j%6x{{Q~s3~+`o1>Vp~gaeRs zdJ(;+h)f_YMBMj;P{nmcm0dZ3Pwp%@9-OSmM*(po1$>%!<_Lo}rTa^OP-&n42?XLP z`u!j;koH&y>^dp5Yj+mByvpE=OE6y;5!4EvajQrN(+s?JScu-E+~;5fybk;&n(qh& zQ+?$XxUz2a27@h2Iq+fjzza%kQ1a?%?!0?9H6CP!NMIlckB9*}^_e5~9Vr5J^Nv7uV%O39+T!d(vt#XZn0Thjq*hY%7DNR`VtjJ)R{G`#}79#sdR zy@BHrNf9_dO;y@*V6e19J6cK?+i+@8)%82fC!x(rks_k=rPzo>^nu$lt{=E#DR8o{ z_d?_@kpgT(7HVk$BV3~WUFywW@DBp2))$WbU1%cJAMDa9!($bB4<8C6m~P9>E^d}B z=9W@C0H26_x=Dt?pa2=7U?3TYH^~ZH@kEsw51uav{|uZf&UGF?N3+wyJ%=~O331uk z;57Qtp4F?xz%FOm8G%Gg*C9M3Js?@i>M}|j7tg}Z?}1Jac)-AM#-Zc9gNgGwJp;42 zRkxFD=t!zj4}_k7qV%aQ7$$SD2Ki;}k^YqC!2*&(ahG8^Nm`}w4!2K!BIw7IiWw!N z3#a%}P1IHt0owb!A=L@ox=j!TbBrYkT{LbP$R+MVs3Wuk<36q--qUMnBF#8)<>045 zil+=Ue89d#oRS18Rj)u{$;KrY-f^lq$UqE>r3YuJfN|fS-dCz<<3vLTu#s>3XkVth zbL4)|u0@6#R3f^7hAN&ym=vk8gg{B+NQS76x;3v*sDn{gv5hz^Q9nbCT)W$$qAT!s zR~s@6W^o?AcFczi8!HuqDtzvXU{O+za=I#OoO&h9gwx{hG};_+1|-|}Jh6Lb*!BT_ zo-TU-Edt!cd(2x6;t@AWyKQ(90rq`C!yJncBUXn%G48#N))c5IwYS*NFCpLqDAG&w)k$db*EhhgV6blw}+JkOQ*n99(Il;rlrw70)WF+Li zV6Z;1hxgLMbHqNCI^lA&j0Ho$O5GbS*9RIL>a4MQO)gB; zYz>Wn0HgZ5HVV*5pQ(Z8$;y9)I%SI>fku0YR0zEizf2Hs?7>UeOgj+iBF9#TJcNvl zJNqyO+csS6L8T&C`|a>_v>emI#Q*~T6b(%gTld3s?1pMG}R|D=DJJI zlzWMxjd@q?q{p=HuzPnA?_}(AKgX}<_Dd!gi3iM#m>?q-ntLW#D0d@jH?nzGCvkWU zl^}a>Fg@qf*gY1C=-zITC@jNG`>o2`q}8-Ox3L7$rQB;`rym;ZuL85$|NE7>2wVLlk^gpbAgB? z%V4fYDVQ$`7(E150sYHWUzRc^?OOP&V4n1r`g|h@auNHcITpYd| zpqEqzO3dXu_PUGnUztTd4UJ`Y4&G2~fO%T)T>RbFFpXb{hR7>%SND3h=hi!Z0}Y2z zR~M%bT6e$tlU;?Bj=fawzNg%M-EY(5_ukc3g*LUr$v0eIhTU=eDKaj%lB`vtl@~sC za;2w*el%*fRy~JYsdg^I2-tzwXWs|!{L5hR~xhfWFd%N9`7i)(z=?o=n<4P82km=^vG_m>bL=TMT2Wz6NHfCxAz5ZpX@`; zU&pXz2@t(bWztxm`&0{B|(OtXF<{$|fb zYRyOTOnD|*jKc?5Y(Z7-2pHA$W21l8p_ppaNHZFLJ?n`t^_<(;x5{_?OzM+xJvNU1 zyvO7ZZ9h3z8deGr|Bvww=h%Ujt6KNeq}C7i2MDW)Wbn0cj*g+=Kbkz-tp0t?zcB9R z0RNpn_W!;z5YB!a@_~)N6W__7G``2jKIQB||4Bd1qJs-@p)XccoQL-=Tn_z!>)mOoJNQllr-4Foz07T`#5k#MrSkYpX zh#wRVTXL4eI*B&sXPEuA*cGH--+U%vQFr%V4h04UW@o0FPJA(Bi%A!2)3mSD@` zuEf8-PPVVMkV)|+6f;cxlaoRA8xn4eqyF?xx*Lrp&eCoeo?L(8`bQ8}AGfx~oz`_@ z1c?0outb(@Bz?+?{CRpZV~V=QR2`Kmv63p8R|9iP90fNGhm2Rzf@>_o2kE_+XAj`LE1jz=MZ;5kjgCgjC|gWvE+1S$BEq z0B51`HCU8X9z-&OJb3Udm7{8$pY!m2FR_JE`<*JrZ3!w6PdN#~fD8!8J6%McF>52m z;Crccf{x*z1CG&`T*e;AZ;$nSFvR^&&_Jr5l$klty?eD?W=Cb(v8nr{%nld`yR*0F z$`S)Dky&S6F6$?&w)g2m_wwTngt^Yk9_NWPmtEmL>-zc)e>eLlwHwoY3wu@inhI{i zx`gW7)&42KzfKBoJC{3`BVsabkkww5dokz9SinkViBkq!yDr&5O*>;Jf2~~xe|&)V zSeDx8HRh!8h`AHjSRX?gS!R_z52KZuU=NPGi?xnMj{k|y!vQ8N(?u zfy(M9-B2?2IW{3hrm8|u(ow(1NO?N$n>d+cdto>u`!AmX+zMP>7oy_hqmU%xD4*9g zJ7+S#E1@yJV?;FYDtHO$jqn6)6-+BVcjKYu=ppIj(byqiqykCYs=EHN%iG*WU|6T| zStSc=qLO^4ueg@qz{SP${UGNiXK~2VpEdQ$x7gZYZo&Kcd!z42v!9Nxf84y6VYnfI zZ*|Kj?wj|Plnk<@&60F)4t?ehzxPymD2Zp{MP%>c+*`F@oppT@dnKow=$fz_P|%#$ zS8XIGj^N+nLOk#vaiQ|}nvzV;b`4E_@_e-4$Gzm_D%Hxp^rXnwBYXP-*h{)D`Zm-( zHZJ91x}Wk$w3LV8_w=t}7sA-=_JY!|o)^<~95eHr?!jQ)qzUuL+rC^!5B z-`?*?m-I<2lpJLMiL4?KM=>CJxH^~(tPJjRY1dDq)H9*Od!{akz6q0~a&SQBcXd$= zIhH+7k6SyPZ{Vk!7?B6W=V46z(nWUHcY8qjXU%1s(j7kuizi z`8w$bz?;|*e!e;n7~G;>p5S%#*EIL;7W#^*xS{I0ZLOh}GhLOTuTp+F4dp07W(euC zEHH6YMIM`awUnyxNh_HgDGOQlugZYG)p?+nGi2g?AXM;`k;wrw(#v=%8`xDKqM2R> zL$1(yG!POR0|}^@oK7;jcZYIe!*Bh1ZGW^PQo86f_W$WG|`}GUyv0#4icwf zmkwmQc_~saGd$P8s;E`1++c{#_I?-+59jHH$@;6k@JB~ z@(+Zy!|V(Epn}XVF#c1A{K1d;zE-w$j5170LSBksGUpIThIf)q+n=nMhDK?f(^1E* zST9u?r?DRb;s5fpe{N#5M<<~Q3z1QTrvoitH<)ceriT-V2U2Tj8(a6yfbaMS^3D~D zd3A1b!un_^n!EwYJ*{sxV`NIRHnJZ)=X4V!d{QnWx!B_%BDHrxrq9IgkOIb`vVxEgb)z9 zYeed3#si$P{SyaG$BTkdvIR<8!NFL-(;DvBKTKCp-H-U2U`hT4WhKa&(g!kp1S`)7 z{~9X+h&A`Kdu!(HyG2Ph$a$|adUIPh5R5`y9fxBG-vHtM&Fr?6FOr8cehRD$ya3Z?iMISs&@`fuhC^)YY<-f*_mo?@p$flr zHeUVSmIW&%P?HR{grDQOhSm~gh2&@79@PYOWm!77nqmbg;3y&1= z8`=>MdL}x{*yIm1-KUBoXGvf!j)&a*044P@2B*NkwZO|zufS7Xn8lrcd+V; z@`ADtK4;cT(6tx|wT^hq89j)x+;4YRq|UIvF^QkU8DUh(jp{H2A#+OYP(zaObSXLW z%5z9mM?H=l5BzK=ApMc`Zo~L<_f@zd!;XbK8WY^R*no^ZqwAnpTUwQ|qogTQ^eK8d zcPm#X_KQ4+sFAZoJNK*@rV&u}dtFbLZt@1?S%w=(yfg5p<`K46=VLK zQZP@*=eB|6eUAV_EYk=8L5qWnDpH0$U10rGk>0E*#ci&p8N;yu+90UJCTGHG}_)c2Ilj@QS%7;^1hk+ zD}K%jBb!SuE-}02{>DoDIJtqRPyWz8LgE0j=v7S@tHr*y%814Gp<>&+|^#M&LEnJ8H%KO?J|=vELSAStXwHVFjqhved5W3?01scMwyBd0^Ia2 z#?eXAX{=4vO@L_P?C3clyG|fZ<@_!jI$rv?w<|k5Ow1T!hPR6~n^Fv4Z;WT0-4$53C24L)Doza}JD#v>u#?@EgY7 zG7oJF_wd`pwvEb%zI7qn7suKCXMg+cKQ}Q~kc(yVB3!0#%CA3T zU!bWTyht|%gwW(%w74FjTXE27O2cJ9wpc)N-c0zk)!Tk>2=ISPNt#8xD2!!0(tc(% z5@xT$UxY3#Ewh&W4YqXmCQfh6|8DWlOispdSGN;OZdQ89 zEq;3d1Rh_Q?4N9vlfF?2Bw~@G_ht77G&|{Y=yLemIP66d<4igayv7&aTEj45d`R31 zjfSUE@1ja%jffIJ)@DEudSU!&yP>bfqjW}?bv3br&h2X>q0zLD!TwW3AJH@ET@mL5 zxXz;kf+26{UaHC8fcSEOVSiG&MB*R$iqX%ul0IuHJ}>&*lK!-JZM{hh`NV+xf%u`r z-t7knl_&=Ri6Poa)VPXi+Sb5Pn_;jJ$#)FKs<}qfxLI?J2l{h4MZKcJukh1N)~yBI zLnW|YnxXs#mykT5sUi&*o2sxnXWk%ts;17|7Gf>x!!&dL-4LQa7L*qD!I1`u@Q2L~ z#s@7x8$Mj?&9K=vPk({g@Qup?H^RAUbUU^YmT8wXQ8_Us7Kl1cZ#$@>(P>znx3a&Y zD5??nS!Xmv221yaQxjXsI}59%(xzI5bfPCWN{r6R9-Dz4J@t^v6N&@0?) zU9utZUuSf${O*f`H>bG@v`r?#|FuB3Ex3?_sAg=9q-3 zL%>jaA)7c3(skRT8?w&-lGb+Sqpi=IY{}!$P+0GZE6M6m<=O%!QJK_V06J#_escl= zjF{A&Xz5{Y*jS-4cNGl>TzP1ERi+HIDSLKS>v?44#r3K^#-5fD&jz<7#T@y zRzL?Nu678r#V1+mv<*t_0WY$il*gws{s9G*b5jE~K?;_vUqRRPNlk4a8A-y!ai*c; z*kIa;BNfEdPQeH;`NZ3_X|9LJHaHZr_X+4jwGe!iJU&(%1?kvk-35JOn(p`b-%Lnd z9ZIy6Gs@(<@TiqvBC&wn1R_ADYEL16B`-bNa{fQh@p(YOu*mP0AKWwF0IvBE4)#_L zD5bj2(}3o#w1V2eAQC1)y7^0!#?N6tXp_8oY0Czv{H+Vnk>PTE46TE{PFXI*;zt+q zMtQzPcBJ_xniv1OnF*%KW-mbaw6UIY0qQH%;9TfO!|%QR?{;ql#0y$k2tsd2MAfYs zEAqYOoa{3I26>ujR^(Jr2-mJKs!V!r%!CBV(8>`szoLZWNdl7WehtR2v}PtL6JZ({ zOE2AAJXHLzpaNdceE@&r1L9f}PpUR%`T>KMB=}g61nKKA2}#zrA&?tu{fR_-3@A&9 z52-P*bg(@q;6W4lxbiNcl5i!^o)2B77a(t?bpWUuwEQWjV+P{>vXaf_a%=cn?gOO3 z4oh(;LQd$zdQ0qlZ%vkJQ1%@3q2$SBdIljN4NKjxhtLO`WWsAff7|OBTJfn?x-b>pftxHpkIIZ z-*2Qs`?UOIR_GUH-@YC+G70}m3Vm_u9>~o$D5cO;V_$>|p$Xck6s+{$43siN<<^I} zl)J4w4{NDkndTLwoQyFVSEPTPJk?XG(4^SWB~Km(!=ri62QH1Exuc1-y9YqG1J?52 z@7>hx0b5K=@~i4CG&XkkHgL z5&A-aL!Uz-L(-a?H{~^^?nGhbiq}i%(Fyj~K^8iUv>{QGQ7#QB-A;_Wx$?I^2cLTp z8m?$CT(WT(QO;HYVHrkUYib(WiPSB6cxCB0JoU=8pAdb4LePMhG=~<{alMS3Fhh+S z#LHk))P4j?N|~m`i=%_kRP99$3D?E=-5}%s@7K@~3!pm9#Hu<3f9Kh}fubrX#GniZ zD7TOWhlz%Pz1b_3)8Fb7s4lDpIFOtlHsEOKM@b_5Uv9Y{l`Fq?b;31NWdTB`jBbl=QI={(jX5(xYjjFzdT3*WTA}XLWz@w3K$rumw=A zCXRo8c*Mm_3mowl!OpDy|85CJ@?D5TEM(MChWC@M2)WY{h??OO@T*6m{7F}8cL7)h zSt5j$Au}|j>OW0nG4?(&o{I^#&j8zV^fc4j`0!y-x1mN5FIy9Y1|bEBPNSO@A9N-D zZ9RvTP25l`?9K5^&0}&1Nbeh~oe z*i{&BGj;_~eXm?b50t+Xu!1yl3tF=^k?c|h@DHNhP_+9F1T@h;TLa2hAxMhC#(?@1 zE(0-!gq{rsUcS@K5TLA81Vf87IO>b-Dmdy|Fs4?QuE zjwbItWR&=Pc@g|H{(oOd{!0pgR&Nt*@Mb7=#|${)qc>p{r#scV0Og_2FA)(RZ0N!2P1nGSPcD zVyp0!r!%@GXwgl|3ZY(58-7Pk)X5JHm(>7ZzW_NyFLI7WKnNhkJLHJPg6*buXSV%; z8!qj7Htp1F5hOmtkG_Go{s;U@3JNH(#38kK^6>%L1<0dmq?o?}TgocEq~qa{R=_#_ znE)VYkuWB~l-L8?ij0&mkjlsAODQ`+2>q5vq8DJRl%WRlUlUfn5nX(lE|kMJ&2cjI zuqW(qD4tjgY|A^q_Y%(f-@Jsj-wfWur1NpW+}E4}0b%U(K0rM+2m{;`@EXj`>ieOj zJO}@u))|0UxV1wa3`l)k%mPs&1yyB3LYx)mkPFPjmR0Qm&|16=PCu!VMg_A1{Thoi z?KUi_yVu|)s@Eq$Jj%DAy+4wc^BRfqQBpx+)y8>9iOOlY#s^=h@YVy{vh}yV1kTU@ zey5Z(+)-@@Gt}D5dE=vEgQa|)I{tN(%jued)p-TtloFCd9Vs=l7PKv&%Q<@)jB|E2 z+Q3mlz(71oG0>%n%Jz-&QUGzd+r%KvbcC3jLs2-@CGXgu1CR#o{8zQ8WtUF>diVr9 zIqe!C>FgCc*OQt~HP6!O9Zme3m5u3tD294LP2pZSz+F86O96%?Jpw{&0f3>+b{arj z=mj*1Mx&u!Q#lw>^Ph0idf+s$J-(=7csNHv>h0|#UiSfaUCPKCDS}knclb#_4JnF# z1P1VW0J!X-?S214xuypI>(L6AuV*3KF$92~yD}Ah>Mk^O<+|(WM-6 zZo7;u&X`(@ET>U7(#)(L`7Y9KQn(eepuhj_v3;2$EZUEo-l&8hf<<$KvaK*=d1j;B zqpzau7|b~SEoipGCBm(UW<=#KH4U&wID=AsG!ZX)%Kl87LD*}6X{UvEzhR{3QnlT4 z+vOBA77hlz#R>xkP?1(^qAS`{0l7ukIuavFCFZ^ZfY`^y{}WvBdR!3{DKnr*BKOuS zrH64lt{y#z7LXXe2AM^l$7$CGGXFd%g(ONFEg)if$Q*w3^9h^a1Q-no(ek=n5){zU z>;-p#bn#<0W@A))b2^e<`honfEVd^VGw~0t?M3>@LT#X*t=(e$aNN_x)snyCvk^f8=pRqznyWa{5%_mNq(9t%io_jXko)W0W9&;1!AVz4jZ_b9|Kp1yxwFPKmLU z+W=F*r-Q1?M};^wrY`a+V0Ezv^r$yQZ9q!SuKZ!73+Suyi<bM8WkvQf#BH-(Ls3Vi5U^NvQWFY|18@I)-wGsyAw&kP+AcviqYpH+ zJ!EYt;tDO=-~RlGXW8Y*&NrEiT!|4<`*QufbRwX|=q9#*{UFS>?bZA0PvwB$| zMA_(Mu4|By8)9aK{K`PBrTf>4Zwhb=$tC_|jQSai2-D@l7!Fd@(?5`)tm=9w>Vg$d zh2IP)9sU%EFOHRdE_E;YjxKrTz@9R3=Tnj#M`SGNof(QS&e36c;eQq%r3^m}u)tDP zf`4Ymu7#xms}{-AKxIT{e9OJFJKyr|_IWSbF@dfuvmMhTL!kTh{S^E2pR1jOn+DAE z6-&E{hw+7W?Ln#V=ZTXLK~IX+YL8&s3~fp!WOG912T>HWPPKp!2Oc{Vjd_P zBHOrJ#OKi{sY~ID`5@;}mIh_MaUpZ`?W#c$(`vK>vrjJ=Nli{+Nxz^1g zkWx>8kJNlaD_FQ0bs>8nL>A`breX=bJhhCgqz{}q1SCEh9gocsql%kI`iQy=`)52>RmG^W2y%IVtpH_&cbI5-!I+CxO`XZ^Yf%lV{6t&Ef3|t1~L?Og=f$ z7gQOyMnA0yGY7Inf|($jvR;=kPKuOOzj{l=g^Yj+^d!~627V<}8J-k`?U|}3wwiDt zeCO54%wWllqnK3A^k>_PYHV-iU-%Im%`vNKr84~9=ry0^O!mE2xZT(73jqM>i)k-B zzCWvBFhQ=8%E$K?ikBjq^@u84;g4fMy_f|*pzy`|StMqQ)gkBl(F?3zS^ENIrn}F? z2V@=W8Q9GI%wlk5N)@8pTj$^RFDkmQ)iO2o4H>go9Y9o|&1*4BwgvG`%RIr=Avqb;w8t`)8^^hmis~ zDlp;l)>LhKql{6o@cNbUtAuvVt~!T6&U0pR?IUETX@`tJxNvXaFPlly$=PAx3D+lN z?8TSc$tC*&r9QrjSAzYw402h1H59*VO(XfFa=7RF?uRx7#0VH4Kpo+DDhxYbU|4~4 z<);`6yOq3M*Y2b_YtpIoqRg8-5S$kZuDdu>zGFV;9cJuvc|+U=?~q!oBbpr)oUX;{ z3Xb=I>%PYGZdJ~~6h5mNNHoN*TVDT;S3(tb+;H~OWe7@K??xniE)B(=7d*9pW1EXJ zgV-!o%B?pIligSM;BA~YQvwR!a1q@jY8BEHWT5t!J$5_w&Q$RvGb$OF@^ zGp(2`#-=w&zIDo+G?=rsE68m^Px5q1F|HQHF#&CA9WgB9e-l#}F zI7n#%@Guk00W0-L?sA-R_R|PDz1u0fkG)u$pXo&^5!PXrqHCx({(LXhcrg99Euw@c zJP>1j4H<_Iz>iwnk%3d?3u8sHlr~`HUY}(MKG+|!ro!AMFNq5XZhtE!hT>J)HmDXb zKOnGGSK&XCuRnD=Gvu}z&+apLy z@uGo}5ZeSb-xrUiZ89Vhf(fQhzOD@Ej*p?pmGQwu-YMFNb zK%4^8om%$`nRBWk_wTKZW(y5-u2sjGV*UtehgI)I2@va~zBG*$L0Zw@6dC5Num z;7NTgbD%M`OH_zQST%~7!ykU`oF^SM()raKTmN3a=dNYL$@@c!=1Rla*pgFEgO-An zgsHH79p!02CQcC7IK!|w2C2%exiB6Hu0mYJHYi#Cv}z{BrBM`MKj9NkA{JXyxZ2MQ zUp+Dd!!XcFXnU$Q=uNvI=s3SHPHe$!AyMUx9swz4E%0zn$aR}C{3My1dowDZBlE)? z$E+$*4XdGNY#DRqXPKKHv}@(yYPTd|rJW2O))9L<9uMqAtk3HuXBngG-|Xk-`{__dT&eg;8cfcNE8d*!VSfWm>SEm>xD7$1A{D zbOJB<0=n&;k)k&l7&KN%`M$Cbu+kCuPgUSgx(fMt<~Wr}Qs$$rx&2EQC|n%6s>NcI zEZHkCH=a|I)voedN}Ab#xx1@q?bB`D*OVno=5{t2Z7LMhi=79IsM7Iq))FEM-!{Ht z5MBj(-y0cDL2k=%mjvS|Y5ExM+^!YK>b9?5+6GWtiE)qASJ6UTXnpVx2hrEZ!Uh}$ zUgpYkN!)^>WBI4*1$V;2kmbn&OmNK1Bb*-f6ytsV#`B3)GrQ*QM-~xN@oIGP_YN?n zwy1s0-QQQMKn)$ak?>N5`AMxfud3{Bo-FH<^p2+ugxXxST0zF96t$dVX%6Z4?apqQ zApb~-<``mb2|E=Pd|HJoUVv0iQ(DXIsNIX-X_6s-^&eeoI~tmZ$2dHKF{_-cM-4b$ z40Dj}2#>UmQVnoYhsJFv&k4qC(M|l(Y}V_Ub7mvg*&8s4Yur_aPE_v@IxhBh=Tv_g z-h)_SwQcq6Ru-vG($cKRy{w7rBW4ocT%j}Xa7w4zX{zA`GHA8$Hq0e7{A2q_aMGQ) zZ|X&L!q)7_z>gwS&Fo95tSfQ-gem#bY;E~{my20-8<;icIB=rS2sxj;B0g3mwBNXEreeu6>-aGUWNfCM*~;sLNi-I=y8}8%$D- zis@oa@v>qb@`H~6NaYj5dr`k3dd)_HsXxt94y5}LxTB~O__g~^RAF&KL)xRD@laqh7^BLlII zDyU+F%MrZ7vIU$~1U*e#na7<3-_PL7mrlzJ=86uKl7uwBm~*IQF01uja=+U8{->=i z(ZYm2N^#6>Or&BIl3oEhqA8)H>*?w(!3LWp9swY41o?Zhu|NMo^>FW%d|^?-OSpycCf*A^vc1UYtyaME9v8G0u zStY_%{J9kkd5Ud*FCVtH6ja2RU7DH|AZyB3@r3+M^dI$3-qdSqm5M&tuh1f!-u$}C z>3iV8zRk@|qp+;+>GV7d%#4kUGLey6w#dYr%Ckl4Z)a!Z!Eq6tbeI(ABoh<2Fh}NJ zWHn{3xk!&!XwA2-n$NeJob7$Bu6kzib+2kgruUz~SFHus&Ygu1j!$;(r(nO}uIzTu zX76Lo+*uNK!pe}9>%_b_-u6q3Z|L~3E7udhAKK-E-CDQl94UL#pnob8+Wc3k!JPUx zFjS}?&MWCGSbBN0-6a&QHEC~5+lma|o(vhq7rm1njS=v{Z`=R-se;$#Aj?LLR?$tL zqzgAU_lDJk-cb0skHj$Re>mWD_JEq(#K~8u#pBjD)+fWY4LJ|Q4W&clcR@Mq)y=U6 zj&0(EeCye{``V%YyAK3Zd`u1wh!dFDQMKAT**Jbp%2dZfpfNN>XY9C+Bwf~rm`99> zdC=Ktjf^9Fk?wttaV8o%-YQP&o9ZJ*n(a1;+1MT@7<*LGip{?zYEtbq?jVd=JSqCI zGqyS@X?>>hyYMdlhGqZoomTH^MZZ1KOy^-S_;LJ_gy`Oga1WOeu7tDc_k zfA5UgoK6W}OW)0~(6D!)!`fVk<3~NdV2QU|`u$l0?iOV&zAdeXU7O1aj@qrf-~YtJ z;KuIUK!J)(%<=Nl3qDIj_7>CA2{R5S46l@ zEN6GVpmX2lBfC(4w#c&~iE@53yp+@Cmtvru9>zIuD6HvY<0V+0^@7dwx202~$T7G> z$5m5zu-kB+{}?rK;{EK78h!D&o8-i6+UsNHlM>=xj$7DWudF*rnf5rb51RDWT^zDK zI_$#9km`YFRwrVj@&orxK#x0MPU<$>_(}Vj3w`es>kp`N@RFl&Fm#+_jVX)(a6ZgG zG%N6n-LP`WrT3CPrK<8e4|m$6dQupfk9JDeR8NMe9+Oe-cCvpdc(>rfC%u8p{6i-$ za6Y!ey3f)6&t$9-W=H$af(DZk@}r3p^$cUTE?CBVB%L{Y&P0-Nei2i; z_jPAl_G?E&?E}8~d)myTpLtbbOcw4Z8Mduu>C8fVD`ZAs{HkPI@Z)`PVFw=FkTiWy zSwHK0ZZj}^CWE|Z{kisuX1Td%n9$V{C~8yIN%r!|YxmfH^ogBFwq^f4>l-$EYx=A6 zenlp=*gm5lyUwJkiR&5uv2o4dswLHK+*d8^u zXSnK9?)BuwW1>~vjQEftygzw12QxFCkR!=D@mYJkz&p)v+4USrorNZqCIu{B z%1noYD<4Z%aCfMmT_aGQ@;losW{8P%XK3U5U|u<;EovS(=nliGdv$X!5pz_vJlG}M zHxXIk?qEFbbZ9giGaob2@UUlbhq2W|pL=P0Bz)v$TaI*mkxMkZYL#%mT#DJDiZ$z& zx2i1h`#QDix1o|Y3Z*j?lNCwtsS z9E|UQpRCcL1BqV~+kTJ}Gk>fmera$0M?9u~d4lIjkU`ZimWhbMs&6spE=EV{+9}aR zJRu!U(SCd@bfA>1a3K`ZFt|sSmmNbSL2D}^J86HR6KUon_ zbpzLZ+2K-AD57zMG(P>k;eKnOZsF+Nvj%TE!aOHZM3$S43<+If^Ehe0WYYS4E9;`= z>OiH`?(9YUc$Q?4+F?(oKh_3KrALB&K5rP$e%^Q?WW7>%&Pw*n^qBQ&Xb}q)mj8O7 z{Ng3H#3kPKPfG7z=fa#{5|VaC>RcWhe}XRPZf(p7^WXD&XTm9I!dqX#XG*EQL3IL> zRd$_49oDG|ed?=JJTYOkSY5BGUwPw!R?e*0uf9N%)Wag4*w5b&)FT$GGp`xz!!GH` z_s>7b^~DQ#3;c1aeNhx0IT^P2YM3$g|55eb@l?P4|95F9o9q=zg|hc5tAQjlL?|=k z*z3@+XFkdvnPrnLoH!iWdt}DJu{Q??=XbqN-S_YF{i{chN4?KAUgP{kAZP&9y(C zsKi@**fK)zwF`SB*;O)P-4ka@n#rbqU1W86g5T3-&}3{F`)ELFbr~ZYz@I|eAw0z!Pu=f%_CE6Jb z_jrQ+&pk#Nb|g*fEOSrS7+VT88FcSV!n=>$z!^v`)s>a^a|vG(4=gi- zlsGdo=<8gq3u5^67$YCm`aE!_)~_niHur$xgf)LqRkq?b4#S$K6lT3aYNwtd_DIG2 z9DNI>+VVSQ++;5)OYkM1Lsj}GD;+A){ag)Z-3#8WHZ|D#4S0+dL(?NhwBd~$H?<{_ zcXe`mq#TiYX$GwdU!J{po|lo58+Ub}y~lAmkG9ByI+}MjBlh+^)Zt#8D~myzao!s5 zQngGm3*OdvCvnTup!K;m?}#PevnWsHp=W|B!Q9tFzj_8s#Zo_eolh&rGuB+wuUBa| ziOSiciIt}khqQ`kl&LELsF%Sold-T0(>pMv|D^v0NIh&fI_Ih8ltW9*{*muOe*F@j zO>!%6Q%K1A)oO0Js@;y$oHUJAg{6`+u{eJezXbV|=vwpW%Q7Vz{_{;SacB2Cd_z@C z%bglL@e(Q*icV!D+(`$F>nx;OcUruX@59j>Y8+#7RZ-!A0dQDD`-^ofi5)|Hpxy4t zH~mF=^ISONkY#s*`M!JZ<@5Uk`q*!s=HsJ6Mdc&)>#irW9DL6}1D^%q%asDu9)UIV zLM6e!UpS5ffq%rq-Xk^Dsr?rA==N5Vmmw^9=yvS!6Mle^?T+&5euPlnK=P`9DJ zC6;q9>*{ApVFnq7;^1?nzEJ;&YDArVarJDds4vDCDeRh9ZEE7F#|~?zvds#nX3Rh- zr9JKt-UJ8fX-XkpQ*2&~>6u&KS_K6YJ;em6Lyljt%z1k44e3R#?DA353b9jJE1IWO zPP&b6Nid2T_Dq#{3F^5$AAvXR{Rpo4P#(A_bfjB;;r(L1$;tB8shIkQ7ZS7~zpF@V z21>$R2S<%p4Z?LKyoWOb`}gUD{fJR)$!gVl+dfRr^)AbMT;XBt^f(>53Ri|BrTRRD z5y3QMtarYel%CXr+ebckwptOrvsyg-g@iBqp|ebPePVJLhef$QVll~yB|~w#bgppRlo9hu&YstKY?DEh**Heb zdYxQSKGCno#ka2af!*h*4)2crJw^)A*3}KQsdiiGSUT#A@RI_W9k_WfscFrRH{GA{ z6Bs%7h#o_jqRTtW_mQM!O@w9DZFj~pPr0u<-wvI$ubOnzY9T{lIF@n49PeO#^Ax}X!e@4}{c9%sm52Nrqm@y_BDT)*Q%}I64}7r5t;-X24z&*K-%Fj>7@xXoP$4!k-Pseq(jsPGH5=G8;@rnB&(+|0T5i5D z4|+IZw#i@0{R)*^msB!cfpxA=-?sZ0jI%5Zdpj;0P4lq^(UTQ!Q_6blP5yqGU z5Bs|F1b$Bd9F9;p>-WJcSKxOqSJ2^pvt}q=S5VfB7P}#fgld*2$ z?UGE*<(@x^%w9IZJ;?}lJuK)hUC|VY>@nPW7jrjk+S%o1bjO}UeKOgRtrVX;r#*DC zQug_7N?Y|qN;DC&drUxm;>EGpJ9z7Uv19M_ zt8+3No;}QD+T0DGya@kN+V~tsacGGW)$E6DrjE6vu!V=}Gimix|sLUDZn=LGiS9J-Ng5%0THdF{5i_p5P0b>%q4o-lg133W92t^bs&&Ke5C@8$&akTE*b-HU@m(-au@w21D>;1wS{TWgui6BOPk3= z>4u1M@LlvhMzcqm*U&QaR!~QPkdP zs)>MH8-?-ts7Yr@kzk(EVqaB#v+|Ej;b&}fXX;j%G0}}qIXkSn^iL59H-^jljbfv{ z(yi-PMMo-ET;xQzI?r@aJ-l+?)VFEVu(M8o%7>!Q3T(A+E3=_CD22)M)ZSEFuGt`0m7C)VU_v9KbEUJ#M< z$b^!9YFdZA){>fG3v?YX2Da`pwFfd(v0S0$dz`kiiyF=QiQ&)us^}^dvn* z8xgK|a3}8?9A8&Bh_cDxlgl3U43dNl-Ed9WU8?pm*~q=VsHVzXNok&os)27#%^&en zAx~*K47ZwvhjymKuzHd>MBojIq^CCHOn8HKYzd2<~n~Km}PGmGies* zx%POwr&dnw9iOT!Jzpdy3nuCZ!d$r3@;ZLn%3Fqh*4OhW^;*(?8AaU7*5LzQ6Wq=| zs-kB^S!l8*nir$*AU*dqY^yDLXH%N1g(JpvJ>UMC9xf?3gr^y2WvJ#+39-2ITkLu~ ze7VWPkzvO@9oe{l>B_laYip$1(Mn6#%ZXCeo+jH)uRYP8iC8x^PtDDyr~LFoUlN@w z8Ae;FwUSi>A7Q>8H40))=WjRIwq)%eOHDVdw88)hAKM2?<<_+EqLGGcr^)Gd+zrID zv{I!4D-%Z#FRJ@W&E-pWEUL=5(GA^$uU0WkG)H-%X2Tv7rZt)XmcRDCT|P_no(TPf zamCYK$ceILrPgkMDzYp()Ccvpl>FOK|`?5|VR?6vbFety)G6u*pC?9LDjVw_zkwh+sHgzlIt(AVsv(}d2ltA zSNMoq1NZq997;he@af3Iq3ezar;|<#fr~_r%FF^khASTVM_0CbgF8?$R4&C%Og8WaFn!r<*A?qiUKK z3)=h7wAGZII;bHPGK=Kjk7`;`RQ;&(SmpN$_;z6u`p z2r2Jl3TFyGuIHfV_K&SdzBH~T&^>%)>81JXrdqJ=JIN$*o3MwO(;rKxuj${}wnjaV zQ9B#(N_E$oY^490j-T2}88}@{bA2d2iyyvDof*E-%HrI~gxG1)nOXE*$@Xf%{33~y zso(P+H(R~oHMli=xP=!p_(9^R{#E_Qct(XkEuuocU%33^Pi^y`CVjGD*DcC_`k9!$ zwWFb^laQ1@m#}=1pLO7jj&X4nK2_pG)KU2L@oi~^ z3w1tiLq>3W`KuKQ%=NQIdrb)|-%%-cA)Sd!*X8`@@6xu!p3S)Ju{QN2nvsdlpr+1#HEK|-}HolpWTM-YBe)Y!M`4bj*_5uSI zz0Ex|5hbe?3YsyGNR$qgQcHMynJQihf7kBJ=$!xccHhqvwXsef&Tff0&!$E6=n-*Q z_SsB>HDNB@v~}Ks$Kk&)G8oC26}#t8eYyj1Z!wj7-^HS2_ea}UhU)J(C#+cJSeE0? zn%B;EQFWRGXZWI}Dj!d+Gjm?2YG1T9nXtuetj6H9n5|`#u09pmPZHLkxtRC(y`4?} zseYPgY$BO5ZSsDKBxPT%`0Kb{Gj^P}BZYY_oPQ>L-7{4f-rgvd_aoV*2&Rp*k?*2S zk%xc2TT@ewh)vK9J8uVDO9)>IduwSXIju9OV50Y)0bQTDUe!$2dS7if0rPNWPHRuF zKo9SQFFYL{x&E=@R<}+4fPQf4V9tqCa`JrL#i4^d^v$hKL2w@?7mZoC4h1eBV9aqYC!gX87hN9$B#y<$&u&rVY(AvExy^E?Gljg`e_nxHT2Sctb>k!NT}%O z`zUIST3jR$eb~z`kK9yi?R=t(vO*nAEgHq>oN#E@e`zK|IE=LLE?$&BXl(vG>kvlI z7nt!#e!;zNhauxjHq&w+zYcNCOiF3mA_r%S7vp%cLzl-{bZp&{tIYn$;h@@>vdiEp zmY;0RvzOr~9d3eNCeAi({M~}1F{{HRsd~*GSVfbH{OR7q5Lx5?Bl>>)t4hLRyjX7Ld22i-zVNx<6EKU3B{^u8v;(k;heu zYQO3)VNA~Qzkff8<|qqc9-<#;PShRk4XVq}ZZ`d}lOh?2lQ$idBIXI#ip-5{NuGm< zUWEO+of3umGQa1DH`nabCgGygxVmCb{h$|-DB-%)?9`6%M3PHfIS+(Z_|XmC7vE-f z4Qv=U*4!IZOcU!D)w}Ry=j;UV!Jz$>)l!Dph+2f)9U=Ps~-w~1Hgz6)I$H_2Ve zNl$wWs^QuMh3u2YVV85ScqP^pdK;#Qr=E%y3;H?3M_lC!*%a3hcI6va_D!0Zx4x#` z5H$9t6ZkRWS#s*OKBPjceXZ+tb=CG@RUNkMuoGwaXu=VJts8wJ5tr&&X?uK>&mXS1 zqucXSxLd-UrAL%*Il<`G@G)w~e;`iohM4~Skn2e@Z>ifu~DHpY4yn*-YfiZEmN3 zDv)#Yrp-UiqkY~MJ<2PuOuCeuirU1~R@=SPN;3nf{J+HRJqv7YHxrJlo(ahEqoU$8 zLI}2fT(@SbTWT*+|IbpyUS!;T?LyU?ZW@dTb(g|?((zBmd3Ag6X*3hJ%%%X3V)-Sx zjqw^K@)I+&b#c;Z*~ox1dFs+A+dR>Dk`T*Xx=G|`9r%p$X9Qx@m~k*7BEgu4j);nu zfJS2Z%LA|qSD=wvK4PwHfTqvD#H*vSCrM<6Zn6k>Ch9eo&sEgsIj6i&_s=CJOVVpzwHIk*7)U)6$BMUo zTup9iRy31g-^zWW<03QHoXqD*^W%Rl@OfKF4iH@xMK-DcJ)ZOwjz{vIY&Gr@ky5dF zxqX<2Cgjx)H1@j@XrqF$C#IgJ_LC|PyinBCbkrjz(m}bqc?-8$o@V9bt?A_@ohEcU zE+^g=R5E`d#(&l3&NbTTVQ)0Oor_ZTFBgQ=_D`->Z~w!GCM4;iSH7opozvJ~roif~ z~}%ly?3m6-r7niZ;BvLRMj4WWy6~Zm*+ag&Q-& zr*N%M&WJG&D;woIKYWuWh7lJpcsDovOl4zVrrh;5-9Ogm=@ApKZr-M*L!pK>)etrv z;mmHLzVrx#ehpEJ_OBZ!U2b+?{I8v?9hc7ruKn6qE7;Jt(Q4DN$*0b0Pfzz_hi#N} z>3Wp*6giLY8dVfoW7?|=-1RytHLsak)4)K<^Dt=m&E^#DVmw>F82BkD=^dZ(Y6V)M`!wfl3Vp++SegB z=n!_=>a+_Bwhf==l+B3{NAR21o+h^aMD&i{*|fNxPRI$wKx$70- z7q(;Q*Kz&t4<_nu$7Cxnn|%))KRa0@iBca?V*k|lG@7Jx##re;I!}^raC1H8_qtmi zj4QB>t+Fun>-cFrH5LnVcp)&;`QoJL`K-r}u9f3z|0M(QOiS4` zNofJp{XWm`a0q#S)c zHrM+drV2MI^13Qb`e+r@_p+r(O>A@mHP_ZaoBtvy_Nw!3?1deSZ)C5El?3n4`IMI7(GjJv<^{Zv&4^g%Glmu%F2z!T`QB57H#LdADVG8rFd42wx$puzDeeu^ zqwK`f4??-?*{73FXG5JO(?n{!bKw>0D=vO}Mq6^9VMQD&VS_NaRjrUQ9q}gLc=v8G zdwM6n@>?jS=c7gL(YhWx#)4Y)lzkK@+o(E>6vw1*V+Ds7a;KEjG}(ll?D|z0TT;j# ztEwm)%lAd4l17iB1dZL1^DMlAtqqcXR@j=N=RGg_j@L$=QsKrYdbw7}{u>b5Ea2i6 z=Dx~(mhI!ELX!la?U*!{ftT@fn`Z^@88RF2-@1@CoS)K|jOh^JdeZ1!7>kLXe-&8u zgwY^ib3b({OxbofMVF>gE}?{{Ty{--;2f1c>gAZwK=YYa!>OI?`&M@8x+`Db4J6X1 zPnA4Z2!khcGnFrDMYCL&Id~qzGNAC0H}6MBtC97kG5937O2BQ}OFEqaE9ViAkJ!fw zGY-%bjqknFfp^}NFDz2YCbDWFOW)jkX1L279&Rib4&%oRO)L59-n1pj?a}V_O-DV2 z7wNpzmToB5mEl#qPg|BohVI`wf?u+_?zGEa&%r19=(D5z>#%#mZv>{kJ9KMzj#ac! z`In;hM!ckiJ-(wR%t3X`n5eJbpwRl6h37@$?*jeC*#p~7J=;rEu9g_t&Y3wz^8|1N zoXIM(U0GVu^@t3cda?Q8(fgTJ%TM-5U!Ri|leB`QnHHma9}{x(Wq^8{`L1a4xaTq5?&{PEurHI~7%W~=KhZ<|Dy3|@*@_^gfSFkm=~C#4xW z^J~OEmKq56d^Kw$&0D9r?q*Zu`p$;py_<5#M*7AVP1ML z6D)q}f$@oM#jUZPNktsoYAeNCOwk*cfHUS5k-T&AR2_eyHjoB)@h%*Z!{VfA_8X0g zXseQALPfrMSJ`>77o3*e-tpK_RvJ%;bWd+p0Ay3~b)4P1fj5|FPweYGg|YeU7JM664diLE!u8N{;uWDKX)#OY0pdALDuR0mE z>Dd`;zy?L!hfYlN@2e$x+!}r#9rs+{Q{b|;#O;lCYmS1lMv^bwPr`AeNp&IZs%iIc zA@$zap_fl29B@fezQ*eVC8U{= zuhcg80(^TJ4#%a)WMoxGP3vwc#=3466vE?mqtf@Mv0qZ$#+asm(IMR(7kpKiQtil0jY*WLprE_IAv_jx-!vmp~w5bl$Ui?DSzkK<< zK5>jzXu}m{k{%9THof)x=!J`mEEDoJ>BXW<--0Ru6@g9$jGY~EAhF6twhZBu)M$k( zio7b-?we06Cg^|lA}NtCGK*Bw7Etcz=ujlNB|tiJ!@BY^Xfv1v-{HK+AO=TyD1E*5 z0IR@HMD;dH^eVOD5a#zSo9>@8Z{2g(R-$x&#$P&_^KJS*xb>Z0*iA?2+0cJBS&10W zz!Nn^{Ik#X>@f{6MHdz7lzpgn^G4f6dG{IxdalGvmzkJA50y64UeaXL0uIU0)0YQsJOraLdn9w(&to;XN137&C$CaU=MmLoV4l z@BAnkVIH6l_2nzaY)kfdc3%Ui<{yR7I97BWJWsA@=Xce0uLI*wir^rtd{r;@3< z4|o(U4U>%xiW2|>)E>_r)Rp7C*ItXGe~4S%G4_D1zwX&*)cBIZYyM^Xjji34hs3H> zBSb_kBIe^qFW|zLm`k3Y4X{e#Lni_oiYSCTYSqdo7GzN*n| z&0*iu9M?NHS99*GYkH+Mb0vmt+TP*$#upMefD)kd%w$q{4T3O_MZ(tK0a<4mJxZ9d95*Y@*Juy!g zJqlCUuKCDYzp3&hUYFnhfqksV;-ygy+~Kb;8|qp?;kjJ3bv{Z&r4h)&XBdBDJf|%Z$sF9pX496yqlbhfMdk%)ag35 z@9@9X{*%1Qq@+PDc9YH9yR9`}QUg-k?9@{!n-aCJ1@FGkXjIf+Bd}Uma8o#kIzfZu zSIO~{7lolWJ7y4f(h;;aGR)yc&WsC%=G>XE#{WED^0Tn@XW~>I9}{^7_sXR+JI|5h z)GNd!yx3j7A!m6v)nR5ws{8N`;t3_G(LiKGW6crLMDxZ`v%CLWYZa%0H912KyE57c z8}DX{*?7725j#Iz#?ozq>%{ApH6JD}9cv)BZE?(tMCR5Fh;e=M+?EF2NxG5n)52Yg z(={U+eDaDlMiLy_=(h!D_{@-)NF=z&)6YL+XzF|rQvo_WM@8zf*ed*YVoFxGdNLA& zg#)=?d(1b!savZYkHo~^`^JB*eo){-gmT~cQ=!B-OzoC|`rO%#-AWz*^XB_%RM%b< zTyI`Xs%eg$S7A+BB!Um_(-Q3&ogJ)wGF~1bC0`*s+p5MnV8pkp4=|xJFLG=~rq*KD zftI7_lzX)ET0FRaM9j-5YBd;@>&_FsHDIG1KzT}GU8p3VU-hzqt;yyu{B0)7eFo|% zylsnbW$RZsub8rN`qRzj73)K!*uUQVJ7Kepk_o z>)M)jeCn)f*efi3Qts?Cjhk{m_9M=xnhhMIYL4jf-7h;CfvWe3ZOJ&$*ZDo$=2AlM zAAw@2DSkQyt7=eU90X}-=DR3R8rSQ`+LXunC$u$_vGs{kO@lI@AO!wou zSs$FaGMnr2p~%e^GdPY7lP(~Ue{((q$?EHUFR+8lv6p67>t$r;=OCkK!A zp3KRR;MDCtl`sje53M+|%7E@78wheRA&dD`?{!A#_^z)*KjOS|0p)r%I?@zWeSqI& ze9dL=FCe`z?{|P?4+MQlWUVKf^8UL;5$>Q?&o-FZ540k{wKNG~KywoI)U9w!weB*7 zc04$zJ!(N8APsV;jBKeLR8ah&unE?em(;l~3K(PX0_nN{tQPW_(TP=U)B-u-`xDINb zaGKI)xcvZi`|=64h||4dcKe$7Sl?2p42b8?&&|~PfxCyKpfY-75!^q=HdL8hEBFso zh-BKz>YiNZ2wze)c{FoSVl%_IpNx^QtfOPwiErICfqGL4D@=Gm^bN*U&Q>~9EwxW?uA?)Y)k8)GN)uyJNkzFVcTY>rJVD2(_R3-k0~jh%(P-V z!okPfV`?WgE@Kw_=i$q8VO%!88?4 z4_f8Pi+$#KZ#b_JkxRU&1~+(cEs%rd{;gD)0S%3r-9SsWHj)ubxt#fGSR; zhTqYEZ}k5x8GP+va>G}A6?BLMU(?c%bp>vH+WX(Hy{+B?o9&ydAxrSe)BP%F)7rra zzYHkUz%sU?z<*?hbhW0s5CVJ%d9pnCLceALG^t0jd9Mmmi9>?+4CqL-4*V8cyt2&| z6TFuPh!FDKUn)nSt=A_xZ4 zfuQ4o-@#^(yo{08QufJ+>G`XV%)u4zX;BKIY$BN;!l+UU; zJF_nyH%%CQdJvlwmALWtKXIhs9+xZP0k8^zPk$n2@F-->_j`n6wg8E|Rl%TpHNz1T4n<@S&iB80sZQulu-oZ0${F(dHMh~ z^OnNrx5S7qgaD4gb^?Ss59UzGPqwsqz?>WlIn{P*^7HG@!uW(MIu^jhGj7Rb}d`MvtW zxG{(E=8=q05+N8e+{?Dgu?2Q~7&x21vBLJD?^4;}m z956Yw=fbmnU_Z4c*9hC}Uqa5}J^@2QvAerss5T5TLeqx-LbZFHH(_Y1D350S7cvDh z;Mte7nd#HDY19tijm=`;y?ny<5XN(LZAB5_U0Bgc9Y)rn8uawSDMQ=wC!_(?w*~yf zoeZXU?Y_R}D&$IF<)^n~*gfZ>k^Fv#9smDRj$jWcAjfR2*@s-f2sGU|;e>ug`wza@ z29@OI5bPeN6hGaO$CsJ#75v8-obpqE+;oE4@o5G}{7{KF4yyTcGgA8+Fsec@2tv2aOcIz5i#M zWqSP8s%BA;xBHfsoRt8*vVtzkZN3FR0eTVz0f1Nd-=BDQH~^rIG$HJ;;YM4@!9XL@D#X7RZ`g`JLT&^8Kd2aQ zaRl#<{FwkqETCr?wgHIR1+AjWMqV;?8>u{U!GYiVg}lCf}tKdLpTdt(*x(8MS(VOQ_y$V!IA|*S5VNS9t*;0jQ`3%9G#7V ztt`SatELppCe@&y1fp+8;BP{h4Xn`)VP*%m11-MtNmF6vm<E~1fUo!*#A}@ zdS@rBOlT|ae>f1z9OW~|b^pB^@j>$qIY9_YphrhR1$-cA&5%Vf z*Z4<-l{6)v0?{iffWO|ce(pLyVNU@pkA?0FfY1gmyjws9OwjUAPp0v37UmuglrW*9 zDPH^!IFp3jAGLeQxEe|YGobat#^3E0-h|Q*5*YXQ!0JXr`Ex!;18HojoBlV#&CS7u zdiFaF2ebe3X%2r$3Gh;BYn&s$5fqkHdXR)A7Vwh@`dGd7^}ejl!QdA;5QlYd0fB#i zGwX)zpZ%^Nq@LGzJql0O9Q>H=zpqu!0H^TAyXt3vXt}{?(p2vVHTnKCWXj=xphy!_ zdl|CZAjo|Hr2gat`Ztq9IkB~L3*2Fm{g){*5uh$~5WYE7?CW|kT-6|Y4AAd7{WGEt zZ%rIZyg{(xGu0gnBz71Mf+H#P{UGRy3;6J*?FMYU1HcitQS?Qi4b7ps0YoW*b@_pi ziU{BN4PE1rz$7~_BwGdzy~k#Jk(s}RHo{qJc#my@s1C3@*!e$uW>pP38w6I>&EF=Z z6q&6M4VLqffa`;{p0YW7W55XmL6giplb1~pGbp9p194595R@R$H3Rq#sB}~P5ThaI z=>b@dY=h$(Xo`e{VC>%vAwWXg=mS`S(kQX*CIsV0!6yKz0zBQ8u=PlY^ax%VYd*RM z!jvP3QF~zpjnLJVhaM-8x6Oe2M{D#6zpqC|*Ex~#K7kt3VZ#Eh|Gf1jSx9C&$aB^jcF*K(^*3|*{4wltz%=*g=fJDy`R{HOJHs7}3YcMic)oE5H<+6~%BnYFTzdJQ$;d z{$M(VAbCLJ1{i5XjBRcXNG=q+Vd_`yKiZ`Cm-e0J?PRJ+Q@M37+RcvIa#_QD_KjkdJ$yw6c9b! zl*jMLpO^t`B;-XSeuZm;iflVN%I76@#TLjfBAUz{;Zg8a^S_>>pntC6u*(6uN{0l9 ztz9dXfYSbgYgTYitH*Ir^S*jBaX^+s^-Q2|DV4%cT|+wn3Z`~*#P`C&JAwyG!ll@@ z+j53c+p~c7Br8pUq;=4mDg zYx}J)yI~QOcL)K>XdR^RO-F!Fi?-^6DJ0E#yN1+q^PBn?3d*_@gZ2IM!1GUkv6-pP z7Vt*TgH=m;X%>&)Z;?>MCd3WIPcPFGNYo92=;HwZloF@%1!}>JJw6M<05n1T^qMXS)qu(1uj=U#U~zd~R-5eBPk8;p@_tL2V^q0}iddOVr}f#46#~%K$bj|2J*x>rk=>GtHA`-T7r#H95>*`8e43 zbln{x9g?+h^_r`ueu5Er1`W+3O^BBj1VX9SNb>2SsD@z7D(EEa0%eG!78RH|P>opQ zJbT}>i9k1pVm3hXL6AYr`Nr9{g(L^L!1bW{R!K~<@I!f2J|E$Gl)BPjw(LO=Ts3$3 zCfRS1WG4oqp93SSPtF!#yw(!nhGpwl3+K6C2OTD--%v6JUcE~8eEg?aKfF5&$!T{B z=+Yyla$kBR1B0RdRuF@Xcdtx9LCunf?m=lV{>-f}{y&ml?l5W1JyyC;0z>C&z?YRK zHoxf-0uL0Mwxk1^1J=HLIM|+Z20M67{n=kAf)9Jzu#}S&2=Jw|&@^q|F<_@9CN3f> z(;n!E%(7s~vco|F=NE%0CR-#Z`wOW~Cx7o!Z0h;j2fuh99v5h9Y=OS35H5v|uT@;AuyQQNZVOJEEdCF%m5bdc2ZwvU{X`BP3&>6s(rel!Q zQ^G?)Ulyl>9{h37vrO?Ka7QQvIchW;xK{^UgdOCrpzvFLb<3gGR%eR$*j^Ij&3A38Y&{ur**f}iK8GD9kr2@X z+jai2>~g!#S_CBbA;CKVwJ1>2K<13GJ_a|8*&(VN;cV4mU)n-v9@<;tH9oF7 zoh`^9I2M$I!&`4c;!S=W!Bk0TZ4i6IhEX(DQ}?%9EBS3@!Qz<&f}mx$t*{SJw-cWW zeJuae&|AcWoCsdIoeDNS{c%xFAOZhKS*i2p6`xZ82Fe1f1-06BogH!u`LIw_^w*vGtt( z^RXt-CiE#`+*w(9XT3hT>%+E7d^9(mfD1+ZciW;npjS+?bW@}l9XvY>xub_vs4a)YhX_dP?wFBt0Iyk~zsw7kjCD zOBkr#TSn9cgSV3D0?;S9%e2(u5_IZlkb;?0cakyT9S^~i3+%_AA#~Kl!v%R-Cs7~6 zr;EC!fKF_MAPKreX{ib!QhRb}J=p^O{s^ESi=S%*sSTKBZH-hOl_fMs zKr9tKTAEzT2Vq$P2QCmI_@vBL1Bgu+XO;lH=yc1sv~PU%2N6Yb@YTy#PXSk3q9^Gf zwLF}|3O+Pp*+Vvy>$ zl^oi<)FVyI25}J!TJdm-*%PwwR z75=cQ_qgO(^FL*E#O?rz{&hXA13*xuy_S=N1r=1)uV;V;HPu!v5e-F6zoBcMFX(~~1!y;fs}3L3 zE*rW_3b>mWK^oqfgD`^rm16D%xarmum%2;qefgt4(P66C8MI+74v{)L^tHs@~Y$yGQCNvp%AF#BvK zEuV&Y5DS=%0Z{7Uv4>bhaI)}1P zjo0Ihgs8+lLY4mrN!8*DEntXI+&lA7azFwxV5$uh`UqDS)ea?gp=9{}FB+l)E^r}x z27oyO^GVag{>wBd6r>N$K!j@!2Vpj(Do{>=%W;kX8SV(?bM3kgm}D%p0VQ-$LXY`i zEh-ApP*U*;Mhk=zHFn7-$j)*qYA!%)Nu&TBG1YaTD_ zA(*&a+wT?7i~9l@iX2eX@q>D7AxQVHBQ}5>T3}2tY=f%>M`2(Zj*z~QY~)b+%ffs& zu&vsF)cAKh*AnQ|yWz(cd#nG}$Lg6Dp1*0(<$umz8YukXqn6BG2m>MiPoJ#2$IMF- zod5{SpjA68QyURU-ob7S9t%2JG>uUgW9%UW1VN!%fjfjNJ>OdaKSLg3(h#T(4axbB z57Uo8qfH+*bLau2tI+QioabJU3G;|aFyj5F&u<33=!FOo|L4gOP_wJ?p87dP;Ego2 zYP>Tfhq{Lk{?UH0Dy+2-ZA8obns}H}3(5Q^W;DSZoKiMyw{L4M^zlYPaRoU5E-7>xw-$29^buvIIJpoD#QwLB(2ag)YKOT+Vgdww%4$?2& z*Js@1=7X7o#Ka&={^t;*eumTqcn+zC&n!)AO^lQ)bQc{l`mqj876_DmWA3AZHXQpRDBZ8%rtZFd zr0~+f-iyxw^B(~M=jh#J@AaC@i#IlUAU=fmYODc*au5V!`Cc3qI>2pp=HE>JZ-gAF zRJUz>Wwi2cEg3@t2_fEw1d@PeJ%Qy>S&U>)LUro#!!Qa^9Xx+f)3iH z5o{I(334E|kU_MS><7f$X26pnX&yfI!GqUtfM%H20#MM&M_-4BXo=vFSN0YVGhEeh z*CyLRHj6r4iD(a$LXgHnX|EL_s9!AS3xE?j^m3a7ilNN&cNO|>I|8&*^bjJ4R4FB@ z!m~=%@=Fgjn0jiCnEXlm7$9$I?YZ(94;@-giTmgVEJ467s>0}BbBOmRiLhU>+mw(7 zQty#6QYZ*xdg)mxD~}JO!szBPU<2%sM5!$PlP=uR!D}Fin=L2*;RLYVdg?=HF&|$oyFfAT#0FYf!dT@5d}Q}>HCS&< z8c)o<+vB9yrMqmn?YKw14}ddVO&ECKX89gJ-N04DuT@$BYI;e32Ksas^|ZjcG`-s) zgc&T*$tG~ce7sb_S$iN7m_qcq^{s*HF>5f0{)FuCIK0Fdn2@h!-0?ybU_> z;6UpG8z8rypYX(_AfxZINk$Sy-tGUG;@EV_yd9C3{X+{MlXmTDPtQ`v(Z?81NE|I@>P27{|BlK=Blw6b@&ufE#EYDdgD5Rl{<#ZbplBGlI&4*! zI($(MoYbBU<3$Q6PXX78+M`*PYme{pcwQlPjDObSHLPzWQ*dXnmvYaZgQamWLhqr;mjv!FzP-QZd zQKpa4i?+hjV;MP7(5btm?%5!1{qR^j_EoTS=FbC5gkaAnfY?BTtk}on@F#fgKd~Qb zOLEfF#B;a(TRnJ!iRB{Uz;@wTes`euD>*Elps17+3V0$2M%%PWoqNlpNKi!Eyi=Pd zD{=Rc9s2o*xJsX^^&ZZbxxxzE>L73!3(2<{NFw&gDy=-V4cGD8k7Ezo_4}% z`#bf%QoqL06dvQuUKZ}VXNVY>w&jwOykA-I0rScZ=he`x`HI9`fB{j|o5c9q_1@^6Vwg)Od3DT=y)1Lp>D6)fhRIf_Uigaku_OKj%Ra&@;suU07>$qmFXDjkt8MuO@ev_v+kFB+iCOZS|RmGaL@~|0!9+d8i1fkPzNQ#B+no!VjKpkvR0%o zUiK2YK&Ur5O{Hphwrim-HSB}45^E3O;}T*s6Lm8Tzb$O)gyIxeu-_db%M#J+hII2vqd&J(!Ue3d^ zS-A~t< z1X#F38%uZ{3M>nOTBmrnJM~DH(IPX3{&2p)^!dY?Z~7JMFRoEp|6YDy|_b?cg?ht6l`I}R|ETIyc- zV^qYJH4CFd(M5$ZXr|tbSqw{s*hJf+1dl0I4P?CS`0yS`#f~CRA0v+#_@S&b11WuS zK*5Yu!2y8kb=(D74a>37^lU(95i4tx8xo9=b4+ajIE!iHxgi~4A0*`RR7>gJ{%WXy_jjb`h71+uk#vb;hO7j zquOF>A*JgF5wG?#Ve*9(mYM$iw=z4u9~~TloFjjEsO*wH3Cy>&61O=+;pdSY_fT{$ zl}iCB9R2gr3lqa+=8+p0>6${;Xjmhd6^#8TF00av{Ik_Wl`U%ItefC)#Z*6`JfJ0l z>{f?Oc(q^c{s{L!f{>I2NiwDn(JezA63DX-^8B5o>ZBFib;j|AnU4(&#*Ueg$@C8c zZ(pbu^4NI*N$e>Ts161dHL6G5WzL-+IvH+a4%IGnvZ)+rMoQUSQ4Xh~Qso@?m=K;| zlHUM|(fYinaWwh*2(9PU6ZItXG{La+a`8t;h${w_$t z>*sWKe5ekuITbHctL0ajf+y|D(jxkvlJ2CcM?OzEGeV|m@3pe$z1tw1ASqf=BRSLq6a{xomy+i;$jmnK!8ov>v5aQjF&fqPes@$B0@g#<+Xj%wFemO)ZeBwF{P)c@j6FD`xGJ&tu^(S^K!DZ2hP|w-gK1YB!Y# zuWA=$zGqe{OOi zA0K!BWT`GbM$pjK%i^qt1(~flju*!XZB~zdIRovrSsUa^P6;(<7ZJ%cFo)b#5XMK^ z3LF|9NY-2*vjazUH*FX^LDs&SG%pHTgNFd`fSs*cq42Nr+}U7 zcNf^Vp|u*nw?J7M#cBR<#59PFVp*n7v@ifTRMXRGXs8`) z2>GC9P`pA$h_mbpg9!9=mg5Y`IpS)&EWnE}u7^)dp||W)`SK(c26pwA3tM=VzyB*6 zlftK)z2zql|46Xj^AFzr6a)~C*678rT1sOD7!nCz6GmY@^8bApj!w{y=3bZZsZM3A z6w@Tzjor<@l+L^tM3Q$7p7Do2ZOv^;b-FdRxj{Xot*F=T&S@>-$Q9P?7+R+Z*3b6R zu*TNSJtX7^obBb*`k`M}X}3w9r1ZHye8jf4GX~2Q{3)Yt)R??pY(R7e<1$}26pA-# zF7L&=-DG`UVP6v)XVLFgl}I+^b7*u5KPBEsn~m|goT(BHNMpZ!dN|FdU~0Q}+^j@& zO52X!eiGa*rF3_k_6=2T)g*fA4;{^g<75+d;%Elz;@E>^I5iu3ImE6>gVLvY={U5m zop3*+zY#eHi17a&mut>+>Bo@E~-CE-yEE+pR zwQ^CpCq+?_HjeM{MH3=WQT<9<<$^!{0QuU1fO3)iKI4&}TLZr-UCde_lTyrjKwqQH zR!tfgX*qc2{S6+*_4#=Nd6!7pyJCvks5a#(()Ie6Cq>sTd7*M3h~ntA89-d{QjZFa~~i5fg+LIa7cuEPZoHw`o1FsHuvGsFx-kFXHF&6US2 zqlu+ie&e#qV1#%Xa1Tw2OttmFsLYy^T88RLcYGfC#MdOCKFyX}f#onJ(9up}N`{Od zy~$~(<*Q=WqE^NW+rpP?UsuF~PN+P3L^^hI9I~!yLf+k@`_Sd1KnnS@ZGoJXRDh|S z(yP>&E4*XhtAXoePrw^aUR|MwVPLl8IuVV4ZAb-B-)=FK3}llegESNFatAsG<>!pf zhdQhi6AVvoxouZ;n6WI0Qf+ZLd?MVa=YAdeO&4J>N`ew#`f;$8*`%-yTxZd(#L*}E zGj6vwyPQhT5qZ&DG(PFyJdNEbXf>hyE zgSl0QsRn#ppZ59-7pps9UPaV z2p5CGkc0xQFCs{-!hcu9F|zTe_p@CE}Nvnk5z%q4+T_J)#>D z{1#-?R^(#1*~m=~Mk{0MV=Bvs_;TIU%eOULa7O1Ink*^EOHS2KD-lwpP!g@25|&jA z&k4MNqY=U@a6g4-ki3_% z&b)A(7r%_^mR-TYD6FuE0hq}a`VTB`uF2SL6`1z}Z`D_YpMXp=EXS&Kt^#w2+U34yYFOGZM?U+;|5U$7SZohc5OZ zrS6W(46u$rp!o6;8o=GDAl^mR;LnPW`u936U%2lq2Ygflc3p+QMgFMMSr!G>YD_VE4cAl9mkd7&sJhwAz>w9KH)6)iQdl# zD7b=yg73`E=-yZjKT4mGX%R#%r!-&>>TZsG`x{1H?4iB>Y|v-!_{axUhkBiW6p;7V zPd$brCt4GHnJ~TqaSzabh?sk)vct3^_Jo@uhli6Gn>z^tgcqBlNx?zDTbnCswrO0q z6h0Q5OBNY7QE>Igt6;XaMe6-#nK^FKWi&OySR6W-=vrd>GPVyb*-lVH!o4E~VH$~Z zJ7v(6wyV^BL^Z5qW@Os>$@61n;x!Fk^IPiRhe!S{GU+v39MdB7E*1V&L>=n6HR@IA zxb-q%aI8h3@w5>oDz9Uwek<)@Tb%6)Z6DZj`CIKeAT&t`RyunzFy@e1#z?2in9?Fv z{DFXq%_7LwA4XAgwX$?w^a$)r)WplwLG(tT4-jdLwf#Ew{HUcw+%~iaDDZohhbY#T zO$YBX?*7|-xofK<_hKcp*0IulSlf=!c5}~`UQ6(8&RW(V;mOasSCyE6jCq|0?WL>M zPaP$t&*9nInb_L)T>72%WfzIf(th@ahF%&NQYc$|i-vgmQe;Nn5@wZ5aej#KvC8pOrtFgrgqbp_PrWw=>zj%N0e&XUO*r z`$iyv&ykS-{nI^5UN3H4u4S-j2 z6z&6Vrmy4hatHJNh2A>QqLu=C)OlaUbQ*Q>zU=m*BF_y9Bq_vngbZ*SQMImOFDu$= zlYh?^&5|I4OK-mVKrMiZw~;($TppknWQct!8{57Mhdnm96R8?Xv-~GbR|JXc28`{B zdz-fP1M6;*eGhm|?#F{y1R`Q*I&y`%F6^W6KGNm4sq=M0DZqcbH_6-!Af6K?7{YSGq40S4vM&V}*Lznp671@A2Kmmog~>NZt*-3^?kV_Nt$!`%l6q`O!{ zZY;mIiQv~g;bowfMk4C*APh)7WYUTJt9N71@Z(b#qv6P$e*v|WFuUxWP+ z9HHJH3mrGq+0FY$*>qkRUqSAcUfz?)}FR zJnt&7E#_(Z)e)iks*$^kZ%@zm{d9n}AY0tNDVwQ5D%SGMjbUc&`F}esyK9K|i@Im` z-va6{8_IxX(^Ua1%BV4xy+lym5x#@|O6LD~g2Y9RD3FZ*^9ZC4@=-b} zwkmgHpguC0Z%O+oJ!$lmhiVBt#@5yjb5*ijP2NM5Z4F5AN$3hbWa?S3cl4)OuDpHz z+bmOJ6I~X-%v@2wuE}LZ`UW=^m7F1fs=bV+;9X8Iwm8Dw;PC68`Rng+Fl)n-@l zfIIccJ7<+?xn~Mi!y3zqlsE<(P&#`)RGnI32XTv`i~SybynRMqCR+@lJ;6+wIYW{u zeD5NeoF(DE34DaYRo{Mg`*EIeGYOu+H~H1lc;!#uo8Jl)`=6|MSxI^GRjQckB9xxu zXq5QPu3Ibr?sDlj$`pA#^qSY<@WURFYg}+}7W_*r1wS#3nNi5`^tY!U*R3d#PMHYn z;>UygV|V0Ahp5ASUW@C7!*Q0C`{t^UmP(}xO_G4g8?>UusPnoG1fu?Ev%yzuV zQTiS=7grA{JTz4hsx#x14dleBg=li|o(HH!0`(5GM64jRdh6&OIE%dAI?sU zt166l=|R@W{Bo0O;-w_gSV8ONV>&7tP|iSLy# z#L%-r^5tv_A_+{H&R95=KMvgVmw6+rFYlu!y`xGh%NGfER%J}ux#M@Hg*$RJ6vsQ+ zd-tFG%!|Hz{vfYv_T26!wH~l<VoFm{CPiZ3a!LQ)XG+NYyQXS{Mh| z1pxyms_h?O;vn_$ZRX$WFPn5f5af=6DC-YjbXbzMRx&ha$eC(If|oM&3(fLpr>{-e3mz?TSXrs}HP+xZN(L}%Sk*4Mm((De=n?98<2o|^70bvY5Y_p_Zw`w*oW7#zv>Q*IEwIfTmCP;!s*L$ z-CN=I>WpO@TA+xn4sNXHW-{0??hKRFVIj2JvEK2;Gs-wGRS$c96D1>c8vpsL z^dwYajHMf>PPi_kbDz8YHRX2vv>rp@qs3C9v1nMj<{y(X3e)+TB@qD|@FCV_p(SGw zI=zvbqwp>jqfY+&v2R6&U-tKBYO!>Ch>X8?PHmTtRAZ?nDj=WNN5_7rRiOLqcRVrJ zn)Z{K%cu8y%gl~*0zDZ%2&|9$07Dw)sj#clF4;^AO-{=6=_k_LZPH7 z0IdR?8wsHwOmV64?X>@29wLs`6#)x8-u)&eivdcHWR0#T=Iy-pAH(hCUh4pbOT- zmdDe0pqF`?xA_a7XKZ*+B=gU1H3!aHA%Fv-Nqj4E(vH1+K+4(X{*t&$IWjecMUeNC z>!JDg?O2Q?itRw|PgMHgfa;U(e?7oU0)K$)MTJPQbPp7ZcXtt2$NLy}du+m6f@pwZ{T4-qN#6B%-DU;Xow++CQ-ylz<0Jo0@4 ztrFwWvyTJSEwct2y=|nEGA0qB;ib(r)9g$M>u0K(@^CeY(b;VId#dC@)JAynxun#H zB1(4>rgFx$WabIjJZT}`gU9ZAZrm9NaJIKk5%}XjyZ-Tm*<$t`N;K`5*=s*|sgC>( zt-g4tXL79IW2f@x+ZQjyw^4O`N)B9V?Sjq42RoKu0{+bcC zl;9Udi!5W_l{kXU0w+eowM^#w1zzMgXJY8PNi4jXMZu*Fr0T0>-dSg|S<5@Jb8ZxH zFCH>IFeH`tc|L*LJ)^^8X*$u;*O~@~lT;`< z8plj+2ukgBX5=$!0N|tO$^aLB`v4^V5s#{iOO*c^OqLkR(DnUh17K3F>-YQ4;^Awp z$!?7tDGq#D+&-!es}(h9UJM0yiy9TVJY7MzmVe^iN!C7rfPT_zSMSzRf(5> z^%bCvQL=qt2julL*b-ISewb#hbG9@7bf)4w0J>l`fUhgiHKo)u=2IX|{SsU2nj4mw z#R-1s|NW{7w3as{fgGRp6LUIQebbg5M`t<)AXXPQS^wf9jB~1DY8&|fh=482yn7$S zX-WL1x+Wq=-tFlJJ8%@K19%i43BT)O4}ahhKVhSm0cM>o9i&g-(`EVfGWa>*S4UKL zf4w;Di?|+8vy(v%!phfi)%(HV2~d6=#Hm5<>c^`+ATy&jcTTE{3vkRu5o{8Rc*}Jl z5<>*+5e*pmwOD!-kSLkiUxfzwfJA*bIF7u0X!Je~+j~sE7f6-tCs+Afs){8mRRd3> zkJ6R_R0~uwITU}bxw*rthqu{un0)+UgB1#n5=^n(kww4aDa zUmi9I4<{-sv|WU50PW>7kO|xdIx^&Pgm4Q-Qp*FNappuJKL_9*Bvn2G_NccdBdW>4 zihy>T^b}GjO!2Z67$2^{2d0+v2h+%7Bq$1orB1yT4&bO$580K}Y??Pq%P>`);)h_GL^V?rhX$3F-c6Jpv-1AurW!!aR0F@=T$ziV zyzRAT9&|^C_bxWip(`)Ts^fV3YkfMU0T8@`$>{=& z!vQ@vMk2WD1m#Y0{nD%n3(GHNDG|2k{Kxc{V z;~+FDl<{FFBe2X9NG{Enag3Sn08H!0T7cHFKD7gbacpXw)%B3l=@SqOiDmt$JcA6W zhtSM;t|sfLXRO1{_u8-k_t&C#4)zAC6jb*zBLR1s{b)XXhPq-tK7HDuceBcwSQV_#fZcH-;CJyEye zTd_6evD4(dCp5B(1@jZ5NE?9loSYhioL608s(_B&Sr>J1VHEp00Eh}kw@!nW^8=~k zulK;wV2ms{LL0~V%ZTlf_l0gA2zs*fne`luPF4X^=;{vupOlsWBCZTo>)>bC2#lG( zhL@i_ynefZDQ4RMI13T!R9FV`&j+H{pg-{J81|L9YGg6&C<$J|ERd4`*>*7FX-|Q( zN9Vc>W@W1ZFAm9d)EU%Ic7Zr<;pVUW!VHbF)ycx&>hr|ne?R#NooffpcrFCrs~kR7 zoT1m4AG=?oKyNowW{NKLtJ4GEHv3u-(>t}h<|=nAp?<$xGjKEQ@IUL#0Ei!(Bhb@H zpI(V|qDv@suB>VkJ9SL#EO+cKuRqYuDr;T13vJZ%0~85CRje3mLvp^cOFzeHAk$Z) zY1qyvQ1?5o&N(2c4*UDmr|!}pz5Ydd9_#(fu4PQ$uGb*8i#AipbjH;f(Si!_GZ{l` zO=_7ScIwtM%IyIqs^c_v?VTJk4KWym0I7``u=!ih5+`Z_j76H`#b2X^n(Tm4ew$(- zb2u+i55^%8PXP)@(N&LS&KIW@UE%xyXz(-;fiH2ya=dh4Ih%VY9vc#34{kc_oqc~U z0cnD@HxN;OdY#W!7vIRQ5gz`*mMsBXTo|dr#T|?>KFA}(`#zF=o$HeVf^mdJ*?}19 z=e@nwYn!KLVR=*EN?6j0C%2=P8RX6s`RG6@)?% z(Zf|h=o{e}3AzZnK!R)$9C7qAwlw`1m{(`5+S?h)7PbHcIfe1v9_K>_l^?Q%LYg9t z?r_WM;_1OSmnit6adno+7o!$nHq5-qjWBIyX2?5S#b^u<8`QQlYbz&!p=lx#h#0Km zEoRGQZ~X(`W^c3qvxq=ME~UNQ57!PXq{&TfxT)?lphTD7X>!heCb-vSjz>jk0}S#q zEh!i#ps@x2u@}Bovu3Ki-3mtcCm#3$ACaiUUl|hG);D!(!oOcl0r-13sLMelqBL5W zZ}GECvoIV)E&@rym3=U|>oD>F5?9v?1XK{j-kPuN#M3ezfnYLX^B1_&b%6$##pfEn zBl}5362Lr%8OI1PXQH}V><9!IyTH%_aG3I>mQ2V-ms`h{D1bZ2fz%bKu9*TpDwNM0 zc$w5J=7Cpse zS@}wRKjmj|U!Ld^seG*l`u#-6ROetE$~>-c-Ss2GwmkW2?ld zJe)qT4T!m23a*d%cm%4rxXi`gT$T|KMv^c0H1dUG{1AW0x7w!WN!*|p?Yb5TZNnO^ zqWM8v8BdssD_fh^KB%nMw-!#@msEbd^es7ixfC4TM%3GhuS2sr;FSp;C{EbhJZg9l5O$d!gQ?F9XNlunltZTi*U1E$-WBmrWC(ww5jZ%~ zB^d3_R02bWI8MPM8S(X|MVyyU<)8S})b0>krOSC`R|nFjv3ouzV_ryFzHiVcz=GjW z;7moF``~tF+hg(X!E?EdmHt~W+*{A0J~|>o5x)?B*_a!hzt8aWVw#rd`&xPKHOscW z{v5+uBONL)|7O*J4fTDIC-#jvlTK1gbc70J5veOBR=2tl-&-$um$QvK!k==qlKFS; zcxhuD{qgJJ=9AUJ)I3vI>IXEHsrO=9A#9oq+M|`ZzmR+g!;ss)$d^kXG4rLc)yh{% zfSPSmaA>a#>#f5KDK~WT4H8aZKaFHl*Fkl-cfLTiIN%V zBt;m>d`~N;Wp8DF^hb3ZlFmx_T<0ZPyp%_Ae7v}cD)i9w;d=(_3?*%aU49Dz^VH!c zEjvxc{G@FHf1GRKF+3o; zx-?z00%O=d>650bm=`^+QC2j3Ke1}*K!>*tzeg`NmJ{E3R_EZ96Ek<;*Ey!SK}e+Z zogbx3)zXfRUl)3%iR|mTZ`smzrQ&3PeQk7&9@7bDrzu**_*~~rE3|VrYjI9fNUZc> zv!BZNdhuG;sPGX2Yd9qJHUb@1opXRZ`EYy+91N@hV)|ac{VajW%v$ZP4A>dCwzkOk zs<4oVBsQnpsg>h+N1x6$6OvlKvL;+Pm zE6Nt<5sm?l07K2r*`)XQ;U{ZBM76~k{LxUKmwsGQV}V0~ zzMOU`dmdSevT+mO?3y4=Q+`F}qb>UqDy&06)=>gu8&(P}BFi?sxd|H0>6l8X@=BB0 zbvl!DhgI&Bl9h12f6moW3M(V4tOa|52rfuj?#EI{o${2O5v60L>|l2#S<5u5vYlbs ztziM7x4&x@U}ems=snwPbh~|w)@j9jL%og1%qI1D=eQwS1)bVve_o6EifJ7Taa+0& z)$o|xln^f(tZ>`*e|joJj3ij@5-o8>5Yniik`64(Y1~j*t~L_1nHe>9vw$PH=D@*$ zZW-0=x<~86-N69P2vI+}TtwACle$luz{O!6L=|2VeJT*wl8t&wPeWV!eMrUF9-v2Sw4C4tB&; zQ%O@ZtleHtqq=Ad@13qD-O|@{NUf1WEa;a9q^ZnSn; z(9%t0T8UIT3d@N~h#p3IJ=rs2`1UxmUd=s{G>DC) z+SAm=j!%PrFMm>$Q00rha|(;FjAC`Gk=jaQ4c{-Xm3l>@(t6{@)$wzfjP?fg8vof- zC)3<|E{`yj3-{x0|;XyOczSX)<-);RXm7p!2 zE{l$;@mRPvL9`yGoqlvgJn29e^7Csq5xG_FjSz7;fBR!fB4%jAhn*4C{#V;(^B$za z%7h%abohjmm~y|uCSV%#dhOfHoqGH8z^9pwS{9+J%G5wf*n0ZXBBOCpbmb?j)|i)F zwo>9FZbcG!m+%e?;!amH^QS;IS=&Ol`uD(E?DL(9*)r*fr2==jQ#@Aa-^_T0W@6r8 za!_-7W5f46o+VZeFf#YVL)6l{(kI2Wm{-xDBCWx-T=UDT!mrs$ub3q#$Ag*sk?z`S z)y&&^TYl5B2!^tWxGke-ox0HfeYkP&W|h0T3H*fAGiH2g@oh8qIkydPT*G~?SiN}% zQ-3kBdBuA~IoQbG#5v*Y=e(64N-i5^=@$%Vnvgjz#+FCog-O^4jKd0#Bb}-TmGxLZ z7yUUgZl`3@w78SkbWo7Y`|8i=oj9;9N2G8>hKQsX3`@2D_s#E^4xX2yLzw)HIo^pf zL@(6C@Ml>v@@>^)C(`H@0W+oJqN5G}sK3`4!8X>1lt=X{8c{ePt)DLQR7A&>CV*ebN5({+xjvtv%;OUS0bv z&Q-nt-S!UnJ6Q(0<07V2O9FNI9VLlSX1vj-C7vx)QICp;G$heufjY}g%IxqLa{;G+ zdOdVp)j4vgy`tFK8p^5GgiS_d%kwBjD4MKo&wzpLMasWjQ?l&MQm0;eFo@b$l(!Xa+;? zr_;O2>tGf;KU&M)QqDr+8utY6*RK6K_!6-Thk*3L7nuU;%LILYTvw`ZY8J2d!%e%N zzpCqm*a5-%Y>9TP8Gh-~N}>is_PS8jqrlLxylx^0Z0!^dCk-|mydHb||84}{jj1(( zLJCg2Eh!x)H#aBt1AwbJA!xot?ZJ4H_BA=;D9rbWRx5(rL@rPF^{X6#TeowTe;er* z5WZHvC+|{>Ht}8zOWd0w^@32M#fC7KXI5{Ov=i>v7nv$)&@ss&H(uHX01}}Vy;?$& zXFea+EkNn(Mff*n$z)na!vac{iE@SIP>D`Dm05NWx{l@88x-Fv*Q8jvX?VT@z5U*o z7pzokJ-fpkfjd+JM!1*hE$f)^^@dk7tkHFhaiU9+Kbk{XUyr&;KOubV1QRvX&o3zb zY6FxYW%VVlccYJB7I*1@N8|E8cOxZ1)uAw2)A`*Mn2>G(!gKWYYPAqTWRE4x`K1ip zP;)!I8q2>XFgSTgP(zI7HO%=$MNe-u4!5z7h&(KwuTVR1MBm@^GPUy?>ng@gw`-rb{=AQC7HJw5ZM5-nw>}Txu^yVKCGp z0^JN2AnpCV07Gh=>AyA>!=(Lc;KW-oylxjTCLXDr%%GCg3vgR)e5J)yiNB(mwalJR zKK_$2W{{zD)3!o4uR6Hkky9<0bwkA7S0G(ZzGnigSk)Bo$lSrYS$`gktz4nMZ0y4* z35+42gG0vsi`Z);=e&&Lf|2IIu%-v}45B5qLJ||1uQ?qQ`=W|Pi|ZRH6(j~kzzisySufg-+;w-OXIp! z{W4sG{3-f@_d*p;zC|g9bQ<)I1SZh@104mBP)zR9$gXzKPnjncIp%3qR7@z}nFa<1 z*NVjerU@|lq#hT$z9`rg=(AJ@VGRAC+++D?MEu9o3a1`bB2!YkY-vC^hLjE6gT*L8 zR^4(@VAc8gN7CW@fK{dMRS>2)0i+8PDa>hyE&5=w^`9RJ1ml<$#_{XF8@pmAB+v{A2%v)ZIa`gEqe_|ut2mMntYO1Zt3ar4K?ecQJ zR>+1tqdo*{Of{|!)`Ik8ALeOBL z9>F+yh{=O-lr)&n*omI0ZkE-DE5+MTLA^p#Z8aZq5g}e4N4G87ex>LEmJeKb37lc1 z5W%mQZpCZ55g95}C6JxOngp5ppo;&3!>EiW>bMB(ZlQvP9yz7me*J<;0rXNOel;1; zqwx2%-nRga;}GrSeA}^&X+|W6X<)wSU->ADfXT(P$Xi>p)M?y|CrOBwXfMQ@==euRn*jWPl zipGxN=d~y>8F3)6<;qh&`TEXBQ~hD*l&Z06OxTws1mCG}_B<#O1;xd&^A)cEHCfid zpPa#lOCut64PXW2yDrgbdw3ny+LqUZtYUB^&h*wAlyAz5pO3oLq517;!^4X-e z<)atx6NwisGHT4azk?(;kBT-QiD!GIW2Q62#1|f_&$QG@{n=@T>4qylGjF7pu*Pa= zu7U}p%y2Xv(Ae4A@1$yTVSe|QWF7UPo=Se<3+PJPCg}{Xt<&nA_+00-Q632`ySw-8 zL%tnhOx8wr4_Gt^zg~yJ^v1z{QTh}f_xd8kS~%ZF-j&DU>*Ee#4L+K-qq4{AeX`{U zIZgSgbJ^{qVCqoAx07Z|hDdMpMJ$1*7*dbWVuNmyL>(EceJeMSLW9N{k=^m)dr*M3rpnL|z zt8OcVG>cOJ>T5LZ7K^MlzGlMGZhAoI&HmeB6G~!_#SLkHXPonNivxvV@6{GEdR!8~ zojGW;Ur!`^dohP_Zg(ecs5M>blX{r!8h1D8@Qsm<`ctPpT}-zpuH1wlpib0iJC$G> zn<@f(4A<@Lf5d<@LyZCl@0&i9_6 zY3$6VM(!|ZQ@GYe6I}Blxt*htMgD5r-lhY(RS(OfsBYV4TSfch(%s9AP8C@Xf42x7 z&O=t0Md^M!)*29nbjmKyu|1#Oj717n#crE zOX_>WN zsOIG|SkmOWcgq+r&2*vQ8y-Bz++mUEC+}xNBs1YiT&AA0i5{UMp&sUK%^s*DAGP@t zO{eu9RHkM7>j9G$`_~faQWzz=%Fgxx;FuIKO;$)Hf}3`2teuJueO9JxSRnO&qFCsA zb2~&Z|2CzXCWH{^F|nZn4^_>*okus-xS}P=>&@b59L1}{^cA5L$8SRreqa=QZfItj zshi|h>U_U8ZvZVwtn2&F?!`%8p7}{~(fk&}jeJ)UuxamE`xY5V)VGPKlZxaKz@xvz zFw7Cx;}+IM0_kxp%`)2z5LJhSmT?f}Ug~tWVvJW5c6)&(QuMlw&)E|9^Xc(#i1~1Tc?_GOJWA>^vADbLD~Dum)wGf=onsJ6vo#aez||_ zoR`T6jCnwY73qm1&+RU?9|gDPiuyh|&78lmimiR&=-vg|t%dF+=%zhn?RO9Gtv{Rd z#Nz_vf-C<%|JpL2)$_^=3XD1vvL;)6_Td>hZWdkbD6fM? zI%0|aZ$Ho#YYOM|z@U$6m;XL0{OuoIhU8BZ_oR>#q1FBe%tj0$Ef=6V511ZMtqu}G zADgi5O(LrQ;&U~nbORIik9mFz-Af+5TIzf8t`Bc z7L@q$(P%xOrke<7Oc7}AKDV#WI$jEwN^Y_)$WsVe=GnWny<2Jhw)kOdyi%|(^*4|S zhzib+jF$bF0{)E4m)t3hIlFzJ&)zA7&#e*z zg^Pm@a8whSM?8x|^bP$Dm$#%sYiWPBsP`sOe*q+*H!c7eAz>7Iw?3wxRBXBRl;lc} zT=w{mT>FrO9!xt@-8|oUFBZ6;GS~T1?viWSq0!J6%+csb_u^%?!nBvk`^$dJ(T?i& z;3R`y^rTcw?XQNmFC;o2KJYH4%f6Ta&W;MMO0Oj0cXFRFpoWMUeAWllPhoz=#s#db6g=Tqq)y?X)!}z>MNZ~zGK$ifg@(v zbg2=EFmm+Vj$VU!%Rw&mbU1 zZD8?W%|VK7%n9hd=S>}hrl-%@Fx-s>*mUh6PJKZo^DKERA~&L#)FR?9q%H@D<`~=| z?^XFW={5468`Wyd0jJlQ)*VO#S%Uvr$$&zVS1?oU8bQo+JM%;3mM`6#Cs2Feb;%|Leb!^aBmc zRvMSif9zhL#RZ#gJ{`)geKlNE3wU?~wm$lNJscGPT`! z*2E(S)Synh-8|tpYbZs$8&$Zvr+zcEH_;zNAh;Q#IhzOHiK?m(?Vmw1FoNn{-hbU<=4?Z~B>y;) zHSPXCQeooIhmh9X*C6p2p8&laIFL{JK8?@QM^loH=Td1OM?0eQzb&I=+a(2~3utI{ z3;geEljgQnJF5$PItHA(r@C{Pkqjle1Hhx{3c`jFA;jX>LU&BIYL723Cgu*;&X`la{(Bx|k&oScl8w{@y*LDa3Z&Jfo!Uhy5+!hbxX8u5t%U%J%$3=T`0Z68*Y&bb}&I z7ycOc{yp2t416zZX}EY4`~0IyPr!rhTRW|Mb3&hp-}@I`!);d%2_>weuswH=< zGhSW_%8UmxezF4#DKDDxQA^xNNcOk87)>w+dW&Z`A61v1nL z#I1|2l@z4N7W|+#(9=6A!#7ge`>=v&+0%LdZkI@&kG(`)7U(obWRRNH<=*l|1d=S* zpn3mtIhmk-@qc?-81{3^*(kc;rwS;PbyLk=78|Ep=Amw*RI=oqhzJYp#+`@;srr@Sd}4MU)s%OG~94{Om22#hvQyHE6R2e+%;71r%ah!g{rUi!tlPX1d)oLSAGRO|D= z;KjKEj`v$x6LCi20paJDO(QFDRANU!?a@nBqeovYG;_#DK%o*;on!-xNp{?F=hw6$ zXLS3)-;Ci+FUQ(J(Enpyo5<%07!HxQK47a!Mk@^vZJW)m0GOlQ{Ytq~p^~?$dxzzqx}YP!^XU-a6-B$d=(Fx(*TG^ssatMJ_&G!qJ!o?v>^A zgMb!r`1=;d>c*>IgmlCT9pE$+gfIR2Geeetm3@B36!$tTg!lZzwPZtcvkzs-Dc49rIFiK2F!V~oS|5E_C0Z)+N0Bk$n0@5?pP}zR zROU{w$t0>b4$jlS)4La*#iCocLjx022HXLWK%~-umr@6*d|m z2>9`<+gw9p2H<6TBk!EsOUHI>Qgu=#DELBTQ;P3|bM|h|+xtm8enfkEt$qUKO?mqT zo;~T-Sz<--Ox#c25;7e^d5~CGH0ieTY+BizLX_Sv`JG0PzRmOY01IrL&u5D65cH)D zwSwNm)32y)hsiV5q%*djR9CB+&;k$^6o|Iwg2?h}HnpDo8A{~j30;l%OJ9%Lnhy9a zDfPdP&l`y6^*&&OIwBu{Uo5fxzZKQzk#uxf;MAR^ouSBn7ar`8e`~MIyN56Mw)u@g z#ZVW>=hZOC6*MxvI>zbcJX5=PVVqQxKtI{#dh1)|6O?Nw4yM*+1$bsF}gyn}PBC};tWjj9Z zW&q3+L7Kui!Z#s-3K*mZt;_%aS5Sc&cythWw}vT6T^ZxsOvX{1vGq&oyu zSVBU&n?)J{QPKsaV*!yA38g_PW$6$W7DNGQ1f(UUd#QIWe4g)f|BiP(4*y`|x@P8_ zxn|Du6Y|99vbkZm0qkgn$>bB6@;$XkjuNfx>xzgTUCY4DcLH0XFHPu_mE2Et5%0;~ z(Y*QgqpXZ~ydcx5S93gYEnqjy5!Y24r-$8ILC8qyO!&!&bcO&KG2bI#XZ0y7s&jua z!2Fd-w3hALVb0n2*-0>RQev9!A%BPCsHK5?!X4AT`)wq1db?q(c|G2W^?G!l-YkVS z9%+%umTW4%AQ!W(*DsuS_VG;lZc~JCTQvDiB%ICE|5v5heXM76RKHWZdiEPG^5b`# zWe(KisXS~sZM=QVo{DcNsf2V}U;Bs6Rk^liR4D&*b5KQpgix1h=0>`ETK<-VD29KL2YU0YZtXFH6Zp}Wa!7)S%ZAIXt^d)r9o}Q~fTPieJ)Z-h zilxteW?(FpQ&2Ce+W4|E^nJn&pT_4q-xrgBiRP5>pvcEwy`5~LhBmV0FdAOk$Fu@@ z>D)pu`RNkHTgH19>v=#WD7Jh^(Ei79xx9UNPk#6DrwTI&t?`` zRb|XMEk9ouqpG#ag5dlfS-bwG-V4Cd@EhGRH#R3gt%hQ6_QbU8%)gz-Ldb%|bLc~A zk(+(6JBz$^+UC{x`$Gq?Zx-qww{)Am0k2`qnR5iWGq4}+`7@dV8=LS|REPpw(bhno zV3*zV^?8mVL)?ybtC9fk{j{<==DpTI)p_V=rAN8H2eAhZ718~=;QmDbjJv9Zi`l%hZ#4QJQd$->@(Uda#d72V>l5*UUE(wPIJozf^dcA@c=Z$@}Rt@}fm53^h%p>eP5VuSl^k&KZ zZc9`;mkCI}uM%17#iCN;k09AS8@y-gNeqr~DI8cbJz8X!sXNJipuBrS}rqoDb=D~+{Pl|#rI zY?V84WqXV1{WynvqD~zm6&l@D-&6^JtGu71)RsC9=?YW`aFFk@Zf&QWswP$=ssMT&hhH^20;Pmmvle!U_)y7ElZ0ozO&lXe4 z6Qe$YZCxARN%_P`6r>fFT-fmL!$gB0<iuC7F@yjR50t^z?Hi5rIY@N$F%PAv*wW-laBcL^(AyDp+=Mu{ADtz!|R z?9wvLe(_Y5!2BR=IUBja=Coj1Sl54lPn$r5cG{j?BHfX#YvggX|3h;D;5hmLGsk8H zNqZjD#hQ3%c}|gviH`dkt*4S0nshi}HlLTB0wQI|;W-ct@p@wJKMaYyu3S*L?)}Z2 zHP*DCg=dwF96+i(p3_B=CYS9oFQ3^JsD$e9A}K$=|E`c+^b4-IW8ojdZEDKTdZZSS z@wjY+_*B`q$A0^dJ=Gw>>Wo$5swo+<5$3D|vP!i7kSi(G54x>lrhl~aoZu?Hm>05! zQRzRYD@vLFaLyH1>aIBLUJgzqYh(I`n#H(;{YhRS$-b_z4GHxNDW*0J9uTz?bv9~O z=VdcPoG@Qzx3#R!x5MLs`KOr=CJOy1aIT*!{Q1Et#&lvTx}ct|c-pkl9QuyY8F`Q$ zh>TPX&O?!rQN0AY$f&IS8sG0c3m+%jA%Y+ivqxv@(1s`N;0L#EN34)`=tSgA4)I9& z@~Orjc#1ztdVM)tlWJJ*`kyxa@*Z)j56qUw$HP$=C;h3zrpuEnIZ~>e6@7VSd~;2YsqR_xyxgy@cN%e2&F7=mY{xXFk$takp4P%OP3^t2Yl{}mOr37#VVZ) zTYY`;^;>4eXErIHc|)N;x4XViOLr&uiRZrZXv#Qvv%*u^UgKMFklh6lN+N~Q`m$2s zRMK{dcF0F8r3|OYK9OsDpZok8Mbf10GEXx#VJXGcR)K#~=Cr@p6rN^$*jM)LQ9IQw zsvS;_3Mg;h>8`h2)MiF0mOD552m3j5x#=1wu~?bAw(K1B1L^|hq75drJVTV5 zcV#&qFC9Zv8VZF&Xb-BLyJemyx>Jq0Z)y0*RhuF#^+#5LI4XtGjpLz@(vajWwmOTU z2)2bJ75I0x9ZtVog3liwSTmp&kyiKI%O8h!TyvmW-i7iiX5Na0l|@DPSBE%}E-C831!c*N1G@^BjmT*Al)qA$E;&=WYfS0C@H8x2mm&gQJ$k=19CkpS)j~_D+(vdZ4 zJ-auURy0+gykJ_6kOFs4h056|*z?A>KANuWIx5ICg~59}!B|9J@Q}0nqv@YK6ln`B ze+6t}a){Y)WqmU<7$EX#yU=ZwFY#rKSYfh?RiOQcQ|;v{ znut7B8@W(Js);y2+?Z`~5?C)p6i8l8`Hk8}?E1 zLYd|gEB(9KtYh$`ph(HZ=aF8Y?qa-6Yb>V7J5*~t8_6Bh!vnq>N=vLR963*|sVOqk zR$s8Feniyu+BoZjvJLZYg4`CZM>mbIH;?tFxboX8b>$l{ zf&-=wZejG+aeBGY-1upJ8&wAjViFWR<(Qlmoye?qjfXlbG~3dS%<#UEiP;wCYev#z zo&j>^TQBL}0*=5VqsWfVf zQR%6XU~2OkkhFX*Ea4M0)j)3dS0g*i-}KK5{_&gCXT%SCEYA~v&rL4GUm{)!@t|qs`lB5@1{g@X^qf8GR!*lg)wD##2)nT!7#aq)p z&+QnvFn}2n!l#XgYJ~iLSE*?GbhR%dH#ppXGO^cq+|k%A*3)YH5cO`8ezCTVPx}f+ zIfK3D1{?dIyE#pJ#|_L=LyNKc(t_G_r6dRm4Hh)yV%PWTSFHc>$cPZDtqhR!ryUtk zQe1KyarEzYz*O1HuU{2=obfp-=(Y!5|KnN=wnBhP&(yh%#)ByH?YcnQTZ`$U!eTm} zHf1qCRrlB19~iG--UO{oMsAFdrA5*#FHC=CWN7}AOUDBe)6HuR;YREYBXjppARrjw{|SC3bXXv0px z2rRa48CWh?pN%3g5v_;X7Co+TqZy8NE41!0uxZ?LC`{w&{x-Icg2U%$yDbi7*>B~9 zHamNkrmzzgK1#R#@#q}^$Y-cvOpjJR)KqWv-s8{CH$ilyc5xYFtdxF0XLt&Bj$jAh zq-u*_d!~EWR;sJ`N>sCAi$m}0T3ajq-u`%yGBbtx`sKTOcktVp1eQtqU0VMk7-{4W zdgP*lCA?J{_QaTT4&{$_5fj4qf-z(+xV~I)R4K`G@Xc#x%RbC#dWvLY@2YXl!dFHF zAZ7_S4lHcnTtob6AbmULpAw*pM$7GvvzGgai)2>^Ma=0cykuz~T}ksQE7_Vh6Y@DO zZ7akg>Vaw-t2hM`hFn}nJquuntF#kQ?DPpBb&e2vG$Ei}3d0?Vk|Vt|a|rX26nONU zGg0#nHNxWR{A#Vp+j|Dh@z_-{z~uJnCZk?st|(*P3E_jk?(A0@rul#)y4WJg9uP6h zg55*B>xJo)Ij_(?J6Rj`{*KjW-a{$5XzIN>eU)Eg8OZZakGV$qyFubF8rE&n_T5XD z6ytw2RmGj^?KbSYvgW)7BN>^iw!f7sQIF3r-3mrR>@r?8o_c!x2v#h77SWi;nf?N}a|8HoW$DtU6&uInT-jz_%~6#c0lH<*xZZdg1ku zrZAFH2eu{H48VEAf(^nt!b;dlH`BR0@oka|Z@LwlE(APu4r+!^{BN;~(O6JDUJngF z`VmF%Iv*OKi_<7!rWyRjn%O>T%)vS}nOfy3PW-(<;pgLci}&Y=T9Rh`(zM3{$cTm) zjP`mfcOMX)G)&Vbjqljehp2Q6MeE{R$MwYck9a8(l3w=N;?e3U5lqSDVGts2XL!!V{ZRl&MStD5(@bkE%CSU_O)pCDI>25>?;*Y z@#~KcpO$?1&~hc~(yzuBz)ky?dXST%wa<~AnWx4f-&|I&?_)Q0_IWHA&h@+Wvwlzc zCwhJIMErIFZU{}woB+ytx_+lI5MUJ^u0N+&TMzAjOAGg;9ZI=EblwwAX&S3(PFIu| zaR^I0qB&K2R}Wma?HZ~Kgr7~ZEOl4+CX=r}gio^dn`D;dpVj?u+gJC+Uqp= z)t7ey+wV@z>nEAW=pRj2PZ8zH*wXHI)o}e*CCPaOFB-dqBw7>F9On-vP^fFgJc$_z z*!iAG0))Q6Mp4aT2X$TSa_;00nsY;8oiLTi`O)9whPo^vc_E1s`uC0jE^4XyI_pQB zh&Wd|;cl}?R-KU7(wqx|Y{S$F4Z@a1zM8TMhJKayqva8!r0?yXX8;?UohUh?RT4bh zo`0UX))yQG{_o@&@Ac9i0T?>FA}`9G!tUwCJGX`fieS{@+epi-ayKFvO1`ip=|qwS zC=+elm^Z_}b5iQv*ZP*mdm!*@FPlI6-c7|u8IcCV0DIJJKRANtv5Ikj6dUrCqbuUe zawu7c85w<*b@RAGIQivVk>x<1vFxh3YV3R21#%@j9?S@#v}Xk=!hErYAeG|LN4Qvv zvoV(Ol%+BHJiI*7Ju6{jjOLlEzzBXKX>02?1n^CN8#yL)ury$v{KJ}wrAt9HS?>v< zA|U$&=;d&Qgl1Do!0bxZeifJRlS;sx22mHTpwt&gLdDR7yKpa!b$rGK2SaHB-(j^+ zom72S`G{PiqKb+8bi^1|yhVnhH-$@S3VugWLI!qpQzKQ1l!=93{^{N1SJdFKssu#A zMA5zelGv;8bI4QoY>F^Y0GV1j;>O1*+u_X->9BdozddWluJ@hQJZ zAA$6aH7jZp9oa3b!D(U~4Ngu?sT(2XTHtOfP_JgqQfb-9g^Ehoa`Om4 zP62Z3v(^@>T)%LRVs4DMXI1o!{5iHT_DJ|` zO-m~Gr`$)ZSZoNz!_<%HioJ0oVnO_(z-$tl5Xw4Y>!KMsFQDP+P03O))O(Hfj2PCYpbc4{kRF^6fiyKp zfZ5iOWRuL5Yo0n0XX8Mc#CGB+{apdRmk~{aK+%vPLVcnW1uWR2A!M`^6;#m2&29gar!jNo* z(OkXPaa*KUW9Sd3sp>s}6>k7H+r1DEl4Nyp?055B&cfi`J#6`UW>O$(u-aPL8I_%_ z3ZtbY{57)5{+-tzN{7w>IQ`iuo<%Zn7tF&~h^u^pDatMe^~Whl=);4`6rW}e|rnFylJanLU{L^;A9wxzS(Z82U5NVjuIv?A??GC`H)^R4!|bXD`F zBf6ib$wu8p_N7f0s;QiJ9y-b@ZbUfP5oM9Y>L-Raz24UUmAXF~Qx91{jRH$tOlQs$ zm14|Kl{BU6%0SEGo04|VglX6GPprG^aoU$z=?llx2N@*IsHqF@^x9s;V`N(9My!)6M8%?mF}a84$KU34^h^t)Q;d@NpkN@`LMRP*>6vVS4MAAcas=9K4V;ZF0z zW7ybU1Nr+_iw+}YlF3)U!S3m+dhK1~qCCiNsOv?qv3|^4_gkb*W`6%rAj$Y65xiHq z%cl>kkFg@{e~f5ohNKnGI6uq(DHb)}SEl0;c#gP>a;4+of}A0Z6w0@<4NqtCij5+g z9=FFp*{6hFLyCT%!PFmvZ&{aojlE0gNhBtH=G;LBo&=&GjN-GU^&;G)=DvVZM{^2Y z+qbG4wj&YwlaVT9VYj-E`f^nUAU5+1Jww>3-I6J#dI7|T(`Q7n1!;2wWAI@-lfB5ZzTKq!sbXd^3@+kB)}HrX z69TED!K9U=vorH2inwt0+J|6wlv3uw z{Y$j)3o^ERouP2zW~?1$JhnybtKcqV3nt1{6F_STnVBJr>|ms07yw$mL&y3nVKozE z!ac1Y&ScKXEu8V@#hI87^cy_rHB#(UM0)I}7WLd`uAO1ZB(M|seG&LHl*EP^0 zMH1`&UJp)fnI`DA?OQzux7L8G&>W5Sjo#-bwma(Mp{{;s{sqE!M+_lH%1y}M0RHre z>&V2rm3MpHZZNggddMMuPQ<^eskLq&)nxl45VFI`RP&66u>TWyeGhdWEeAK*O{=!Z zz`E*I{FfIY*4ST+?cb2xfgl0o;2QObI_KG@-j9 z?O{wE6>?z=DnJUMbg1xe7`GHoMBf10Q+p+a265d0Ljlnw@#2z~vm9UTokA;@3r$oh ztv1VS(uh8wnjqJnxo!}6K7HGu(A9qwQ&e^ zU)tw0^w60|704Nxn=TEz!ApmkgNK>j#<5LB!w2t&_dNoyS4!Vqllow=__@b_eC4FR zv{)8!>@!7`;+%9c9EmK3as!I?0V2=4pT?&W*h<4)kg-D(mSPwd3}tUNr|j|r_#hoR z>NwKbvk)JbnAlgVa&xprKX`(<(G={o+DWV-$|ZW#Pja(>uw9N| zH+^zMXM)*SGz6^@72P6#wqc{S#bEV{Jg;?gA1s*xqMyi{^TvR6T9a@-#RUngRlCR2^07_G#^{+O;$y6J$#!9*146fXK8EKY6)ogHtF^c9Q>d z0fQths#Z*!NN*~~e4UqB{rX)h+TkilizYqcaPsua-cWwYlM;Q=j|)uFx{0f*Xoeq% zEy(_hj{CLff#boC0RH8sFUBw8%@ft=lUQ77;B_Y4>ly2c18hVS0;iLHGz%pIh(VEW zX1!;mboeWL3vt7%OqLJDLx>lrq^blmqu299hDzNxc4dSNT)qjBR-X)et&NL%w8@b- z%501!mOY!;|8+XCHOkpby(cD9CQ|@&G8*=@Z|dt?9UrOsE#p@@=H08Q&AvY+M1QbA zlPSPMc5rROYsenb+pzY4{2Yy*(l~aBbi-0dHk`f?-p-+gmt8{~3+r^>%<5~TV)cm; z!Xc-@{%$l&W$}-{WqKQdH77A2}Zg$plBHu*NY% zrT#Zqmpt_7c)q-vnrU&jWC`Y7zR;ip2Z36SQOphTt!i7 z!gym+sxi|!wHprEJ)+BwqP=R}r^;|xe?${q4yl5XST8AtCr0@rh9>WUMz&Cs0=zFwTdUG9@^qh5R0T zlYh!qD01d2w;!aNS7ME`=&luS3~B`pX&Nq0Ig1e*R8sl?%EI;jL1p5i6OX7+tEW|A zAs2U=Nj%^{ZkX(bOKI~wrTIoxV%CkoG=AB8^rs23rN7z+5RZWk7S2*5!8_IcLY$&y zPntb@pOv|n)G7UPoACa{9(g~sVx{g0GxwEd|19UJjS0MoDd*9J-xEF9beH%&+G z_6?yHCx;1m_rnu(q7hz?5dvdvdo~u!OcR;T2dNf`FP#VEe8orI4ww!uAK)EGb<770 zD|6Dr7nav6u(W#Hmw@8cr~7+^^!hed0<~VXrJg zScHA9RjB*2`%7*1x1|cF*jqM^eh=I5Wfwv2W@(DdS4-YH?vEnkC?1T~Vn)pTt{^D! zXSMlfHHJO*+m^BSlwq3Oo8_95svF#Y2;rB}`cob4*j=%lIJh0JZKX+xAVV_nw{1jL z_JpUH;{Foe#AeQfYmXiY-D)VOry%-H_8>XHS@9+L6;&Ju;`5T0W{KUD3h-Yw=4^7w zGVI&}XQ|ok?&^sCc;VXcp-YFunsbbP2@aC3It}jiN6c&Q0cIo2aR8^-BHbypD8Ovr zM(>VXL8R8H(VzAL10SdM0t=tCThgc?-N=U}(}T*+sGtEe8(n5aRH?i!J(U=9d{In* zlj1G;8fhy;n=bQO+o5IrL#NAni3v|7FOuWJD6)q-(}JQDZ?H?Z!j>H4HL^kIJg}vJ z+bT*3HK-maxUk9Zl08HuQjm3RbT9uAVXyNv-pO>r6niYBODnq$7R&Kn*;8$A=#%nt zt+k+C*%RC9uDE0Qn=-eIkjw%5eQ(_xlf9!xw0$Qq@+v0TxmWg!&&}JF%O$E4f)8V^ zA=WKsOr9ZrmU`MDD=}D*p04rERHO7D9^~W%zv5wDb55asmf4Rz`l=?89k# z`WMIa(b%c9GQYTDxK~MSbN9{ScQ>9c*np^+JY`5Z{&<|Tnw(l^;=WPn&DKlY>hpi+ z@IJ=@9p5)@Koh%`Y`vtrW84X@Fui$sMA^#6csEes1xUrT+EEY>Qwc;=wXn32qODpT zgc8d8KJz2iQX^$|eqmJG2t8@tPu|8Clmu&|1dCB(LSuC_`XWK$!e}xD@;Pz^ik$cx zj|B#?Z!4ngMcJu?spqH_Qc~=31>rBPk(}beZz{f+zO{~_L62fKF$3djQgi8gibyY{ zFOr7n1x7fSl|3v4fw_(Q0GCK$o#?HF39SNc5se0|q`@aG5kBnum={aztXPML;X@f+ z=Nt~|PkZCsFKtYm51Mh1RYM-tr+o)QI*-?+{=~Uf?Tj2)PIV9tVb^vS?Z>2?dUfxJ zk5`%Xj_9}yV6^U7V1@dl)d#XO@)e~~t@Z-8JvW6>mvtl6ohg0}DoN}G>C*oe7ucI2?mDCz;*%T5AV3y#@8<17795`*|ElURAIzR=*}V4x-oT1% zYJ!KlM?j1eo6(&D9)-Kwy&{uRPu3%CaRgf5Z78~QFy0YH}~$#M8tlQkF~|0F+qtfU)G>5`=-xXlLdYrU(&xabT$;}wC`62#Y(N4 zf&I?_ERtlXZ~*L28w_SsDMxl($)| z5tr`Wrjq)Ur@!@tZ|J2S&%R>RRc@nP`)R(fR;_65F5Iw?s+wAHLl4hoOBm@^`NyCI zS0T?J4JrZ71;3WxFP&VrwfHK+T7tDh!nxvR$z6OxRNV%m#F4l%$+Qyq5;_ZlO`9(` zCP}ICYFVZ?^An|mv|&$^6UiD1)jF+H6D3k*pr>0pzfM!YxBHz--UphjhQ2(iw73bvf8?`x$<#)z z$rOpBquP6+#ImO{pJkXM^e0K|ggXx^F;7}MT$D|5+512<2!n^(kWW+KZxyd5- zg4Lwfu*&I^AH!ilW&!JZjcUK0AqXC_p=hVq9c%DzGs8Y_@v)+aarjZfsUW9=BF+wB zpFzOl&&JZ7F@PL!Ec>1?w#E-OrEJM?iuNBPzm1kt@eJ;gJ9Yp1mz>ChC%(K}I5ta! z0NM#t(Q_gWqV8ikVb&=_$;YG|JKDr_W*L-g>pe|#K)=#zjlq1@dwXk|lpN(Ke?3Pd zQfj;XAgD)FyC8(|=rr<9;};Q)`J&Er@!R1ArJW2xae4wsCbp-PwaMq{o9v22;AAY+ z;B)OBo7W&e%jE~=^7ya)!nPC%h{Q;8^I7q?S|3@l5o{Gw^`V_#OWtgZ5tY#CpsB}^ zl8@(N4wJCSpp?o+;45Xv=l0I{s!Ecd>dunQQC^CC9~IHYqNHM@{+d5T=)sC~NeG9w zd>aQYD^op(@PpI)4mK?SHjy#Js>ME36XRWjCM9ZIrXpSTn*eT#)Q z?p_-VH5--N?`^XYpDFT^+cD(9dcMsR6+Uf-rz9n?5^+AGyt9>o0Mv4)o$BdO!MT&} z3;nm;b?_>OSc`ja9;h#0eHFtIvOAttggTxiRCHZEhMU+bGQS)+@Td3a%inj(5y;Ow zTp9SW7f&;w6QH3cGNR;AS;83SI^ab965#@f7H&#(exWDv)b#<%oB66XupnE1}v#=jY!Auw;o3@Yyo+(Qip`lCFc%} znk`@E_)jK+62)@vnR(-o4DerChcuC{*n|D;ZxKM`P)d+mWU>ZB%TM!N0Tenn9Tl?- z4!MCy;tLb9LaHEDBB!hG4JeBZX0@)tN>thkb5SHz+lMOA zP#dXM)g7rO?^tk38h)H_&^lHpPG&?87H7+;;e(FK;e%zDlDnF&tG%w{0P}IRZ*c%C zR4rb@jHb_)5|`)gOHfgfVYAq^F}@~xra{V9Rr(aao#+$?hpch^v#SR3jRWyIEVN0> zXNGp#U!MQc{X}>quIp|nW@)O|@HPj9^=J*>*xGoY#bF~2YtZZI5hcXnM#DKN!9;al z$&){A-m@}&&9K+W23oAh5hyBHWYgM1xT+zXnFMR2kQ6Q+CM=t^$$f>nz216D z?0s!RIf@Lnt23(M=!8Pzj_+JOH9hzEU&n1Vxn>hYkRVrKIjVg(vGRk=FX=%>#tmi@ zVL@RHBV}1i_K{EL(e%qsP-&@}#8cIpcGN@`PDPP_l%_=P!>TP3i`d3`iZ)u_t5Gs~ zsKKoBKBSR^_{d-L_6=Kjl-Bw(ZT%>Nc)%6us*xB z_+kiG8TWF5ocndf!2i)9GSdExsAcG}ioLuJldB2n$ z9dw9{ANm>K+Vt`3P$yMn+&EWN&J7#Xq^f9L(iCNpR&gE_&srAH_F$tv88<__J>NKY z(vHm5a_h2({S)@w@gGGF)NH5<6ZgAZ+k`MJJqwci_Yt?_@m<%5ipU~`b&4X_Q-PcU zI0n74h$m(F6gM-4%_XeNbOiEN6!@^SL9c6s`)@qV0*xi^zu)-a1j}x!3Oy?leF>v$ z4KC#8zL6KW`kw=T*s44w+6gVjPp8H#dERgQzm_uk_Wj-}O}SHRn74?}`jKcXjb6q; z*Ar;=w5o;%$Lh&Luqz`8Wt^ZoHlmisckgkpus2e|*zjm%c>Law)w4+yhoPNbv{(*C z75>wzM7utYao}GuWlq6o{he8-!p8c1ioe(ABj%a{;Pez7U<-*H6B|GfC@n3q3lNB9 zG*m}O7P^kxA7?93#GN=Ma=i;fj%g*yuK;#5Z73i7e#}iW5)jz<|GM;L>WmClW$a36- z;ls6Mk!#6V^}B_=K;!Darv>#fjaX+L7n@%T6~wH(ZVgX&LY^YNvCAeH+CV|iU|qQI zCJ^$)&RkhQAxHIT>fD;(g$uuX%Rj7Zx~bo&)!vzdQZ{IyqkSaSWchVT^BE$y2>S(3 zPB7}O>z%zp-404i9R%uB$@UP+AOw_QuDbaI<~g%nCwpc0HkedW(n?UK7YE%?M}zax zODapPY>a}xqgI2C?sV= z^gLGH3y-Sko^FXL8R`~i%YQT`3#b|iS$PF~P9D6UuG@cI>SG5Dm>C(LI}mt?#aMCY z+1)0x&`>S9LL%dSI9MPjrsP51QB87jsCeHqcd9Eh9sE7p`4L$ z%iR0@3mTeeNcnfG)SpiWG&)xOrVs~=0YE_>@QlG&tC2suBGWO5%Q9%V7Hsk3m6FN| zi(+ylc11DqT}A2SX!aeLf1=gvAT-!nnc z9ODMfs`4pN?4onNxmjgk_FRaxo!6voUoor@4f0cGD$Nam`lY@uu%byW1MM|g-@l0;QLtqKO%BuhhPRvA*;XCI> zBU?s71qQNm;=C)XNatq=}LZ9Pv`)y!MRU`FFW_4Wyn${>MEu;<5;XZgXR{+FiF zq9wV70D4x9*%Clp-H;eaQ6ZXr@U5@SN>h8ZqbvJ;8345gzETS!i1m9Gl=z3^JdoA& z2E_*lIN8L^}Smg^$@?jV|EsbmcLEtfRV)wOUY?4TI`tbDl|aU%pxFf0_YwS6zV5vwq_~q;E(JIBTo(vIoY!e#7p| zc`5ti~dnSwVVX3~ne!nFB9sO=2kO*OWI z0aq4(HKH=M|ITpwA#h{Z-UQ2PDL|E!6A5w}gvVZawQAfY)!r7k6>Yg24Ly?owv6Bj zQ5K}d4wX!N--sOfg9b@6czzdp^G{eg&LLPZq<|*7C9UU+yFkr6Z60T2ax+YVsCv@R zxnCP<=jvAo@H%4+#F1>}{(4ma-rO`sU%G+HpJ;w2fHS~vr<1mZDf-nw^7}a+?P@=vDI%8raj^Rc zpt;4Pn88TO8{LI!jY9IDGEq-<0~bWH9@yIAe?xxbuXh;*)AN@6ZwNhU=D&veYbx>z zT4G|lfKNpr2^g-O&3uVQM|EFafoI5~i`W`t0HQ*EQ8D4=mkt4uQ+BxNCse1M&DYUH z_0d@j4~|YJ?Z*Y4cBsPzy&I2ePH#DfW?*W+eb4vfm4O^JLZ;Mc2-Fo^u9TtUA7e@j6|fX`|3&aOQ@VB{ZakG*dGdORpQ+2qkwF z0eM8)2?+JEhe6AOyUneC~jh9=SazdG%4%dDBoVGTaxi*G(3~&B3CwKfuWc@P&1eB#*!-{?xU7Xrr zE94KsbO1=S;`#RE0cXJ3!}m`?b@S=k1@5!rZs_|!uz)fgr#oyeYX1dMcWJLN#cH7T4Uc z{GPD6b70vacml|E#+#>592wkM@L!PM`}5n@N-lmkj+GbjJ*KgMo5@0(e)7KA2>8Tb zdkbq)Nhh(4Tho8ei^#AKEpJGQ@I1Rkhx1&@n_eF(Pg5{LK$=Ujt4I7V~v zimTeGO_4FmT4M16X=i<~rplN;6LxBS-MQQ`1*Yv+4+ai~#D6^b;KMwV!Sa=OZGdO^j8o3 zQ4)y$C=%=^S^H=FelOp_g2)MqQL`F6vz77>fr>+`Muz9O*W*<){@*(Wb4}P%Dy#fR z0h2pkr==K&=5?oDAB+eFKa;1d;R%wTP3Av$Z1xyv(DiY@M<{u!kXF21bjMetpvA|h zefI0g^;yXS$j9n58;YI_Hzf{20gtQgfdF?AGbt!D0@K&2%T3qhUE_WSNLv)1@Y=ET)}F!XxZ7>wl*ES+H+d6PhHzEycyveS$%CU^s6ILo zej%Dz2APVWA;L^WK=e~Hi+c>{3pq-v6j3uBcSgi+9~>6P#Y85P&9WO8@VZ#F4*#@J z6H?-=$l9wtyWzpg7W_iq!9i9a#c79J=}^c3#wV2mQ{?VH1HvfzR#4i${AGNYUU#jZ z!COt0JYQhQ3&q5>89z#o-?E)2Q+q4FG+@tG6vM!C>cZ8+*lN4bA~fRq9yg?X(O0jb z5e4l|a>z^(S8#oC9Wj0-{dz?iUnKe3uV1-)1%)L-o;M8UEpfApwY>28vKm%!?83Aj z{c8;WRTM$rZJpe>7fZ57rfz3)uk&;?x)YTg^YJ6a^K|m`iw9L*{qj^R^hz{PL;2=T zrst<-CFX%ybE$KL^bK#b^Go_i`lt+lams+q4rSZqnmrP zX@k^M)BY{+K8i*0`D^T2znTCegf-FbJnVK05Fvl2s2+CXL<>8S& zV0m({?L~@KJco=o3@?I+lWK-6tF(jxvLFMv#(G!I4}` z{MoRlkxMtU1=w_8C0wGVojz1F68tf`b3-*^K=G4q3gZ`p7?Bstd=&Mw-XJ$1Is%jd zS$Q>NrW6zRcVwUaf|V(O@R+k&|MM&12iGGr^S2D?lHVf@AOQz!=$L`O^942>ohvF! zhc1ugV;3U1g9``kni5m8wTT{-Lw3P##CSjFX-Zt`C*YC z=+K%49R1EYWXo10;9P!7)0O99BU<)U1KON-Jk`)4BeSKfkoldctE6xYL3f1ADbUAv zCV?L-u(&#Qh`0*?5X$ctAiFKDVXk5R7^=Q}f(i;Z%BPE8+g@pQk|}NiBNsnpE}8zl zQR}6Rx4rgG0gDI2@Qh*CJNh!?`;oZ%Di)=tmxCObj=NQ+jdDkxxU_Nh$%SyW1_k_J zZVOfY!10Ufl3H%?8Rgn0n)%HA&#H4-qZegkI^H6>)+5$rrlr=U9dB{`Na8vY500i0 zGC366V^hYb4|`I$!vsC;AL`t! z7njQP)tm{T1bgfYhf~sIA)-l+hQz~Gn#m{ujxUM$x-n#pH*8fCxvlf**q@`wom4*- z4J%!%EQ!fpHdP$bTW!uN0hO!1cdn=`)W(|7kjJn`O1ajo{GRci^)+H_4fakDbl4-M zCiEnarOv)TZenr97Zp4g1)!P_7H+cgGAtbJ$+c@Q1HvjDyU5t;t8&&gzK$Xg{`41T zc2ep@XsPxYsh<>6k|Tv;1+=a1e&r#M!4Pc#Fgu!6f0Q=ESs89hgh_mTiHH&^O8T*> zlp}BG@P_GFnCt105pjv^8M&ZIfneWozgi>>G%fDtr?2St15@dThhPa^75E8VF%{a? zl2Q}JUw2X~lx2+xy5TAS)WN`8*1u*1*`gF;sx0QY1h%1b{Kud9$#0)#>3FO5`9gkA zj>>tni7Tpf54lx&u5v1rHm`K*Q&6?K9|PYl53_Y>t7Qa#xY7eoe%>ySieL?shKc6h zBG_hV#uRCaO|GY{6{&ecPEQ&pe9QMxkRZyO9BK{PJ&wu&QN1_~td<6WJ_OLDe>+!n z3st=TlH|FS%UW|DYe_z{ofD+kh18H9c;rwy8iDOTJr)hVsSC81@l+h0-7aGnqA^0B$O#D_Zt!bJ^J__Dz z^wFc}LD@0b#Uot$+nxH?t2Fr1ex~h>-Z)-Q-FyEOr8A)_f&ygKSh4HOhhOkYJga5x?{Q;Q}1|gxq9qWi#qc$FkL;)aV;etiC$d*JNxs|2wKy} zwZF?DRhf7uNUR&HCIBLg9O8We0ob`4K*ebjB%+>e3$qSdSZ`chuDpiyPA-)+@BzCx zyhBQoVG!Hdr!U$hu{%~vhb}jAhR2Im(^8RDleJ-~6E6v^YBD0cK0v#8T^37m*!K(2< zop|d%od|&VMhX)b9%vIl9{FPb2PRcyw#68j7mF3;+YnySEbw#>t#4>9X6Cn?hrW8l zOfbJ)$NB!+3&VE{Vb6|FcrCoWkiG z(?Lz&X3sz7xXX_6UFtzyF0)-`^54v|zqyo1bP0qHGowKz=bL)T#?%{}aEqr)?cwAs z_$ru7FC!upd`2A$GQxmQCW4Z2Fh&Ykc6N9uzR>y%`$Z}rBILju0G$L&0jqijQW0e5 z&2`niDFd3i0?D|6ctNWP##Q>gm)>fl0D#?#U@1BQhK6)!;OrK=qxeu5vex9v2M?RO zv=buoCDqh|7JTvuEplHYpZ5zj4(;u9-k^<&R*GrPNYHrj^4D4%XY*F=36nw9Gj>=xljB zR>9RMM|th(1yIlTn`u~e^L?C8u#n-xRH9sKr>GUZ@S|2A z<{%Qx;tB9IJrY5mfzO=C+K#pb9<2wS9TiM+fQwki&4xupR1#C$4|-hpm;;i@JwF-P zpqK7#d=l`}zR(zZ;7QSyVvbNOl%{l%Sw z|4!S$K_*iiK(NDVD!?&)=q>166#=ckcMN@)6XYd6PuO4afMb^ndQU+o!*Vyc9z%lw zy>=-g=z+xcV}%I-KxMqR+uR%fejD7Gqlt3~>Qjvnnn)x!!+Wy!Uw3}-8!C5S)>Hs9 zWP1-dcv7o_t-5n#B=p;t+lir$1F!f1o7uKV$K9nqA@8Xs(I=`RuB4A7YfP9i;{u{flSq$i=Bn(iP|Nnm|aTWtsfH!hr<-8ce zzkMS2_l_UM$?iKu&^v#u2!k&D_f^RV^_?RRWE=gzzwYutL*H+p>H!D&IK%(zmH4Wh zi^eNLaSnscpC5wR-w*W8GOph}*?fAk)lqPb;@05*f+N1_6L4G*Yl{Ifqw)7k`3_BI z*wspa9?u6)pSzBL{?dCn9Nf+Be{SVJQFq?)-;k5Jn;C#z9vC)tGym66j&*@rM}{4+ z$qlR70$H~a=&E4G3jub;4M!-RcjlJ!-}1Wx+4YMl*iZPkCqX9_e<&@68UkjeXaqeN zsz;g3nqUSxCTNHkNkDOA@pqZ~JsnK)@N^w;qZs;OGO{-|huhx-20%1SkxC2$sQ$ku zTfYHlXu&RczW{`cxJJ695HQxY{J%>^&;yo^C$&U=rr$~>NX!rayQhQSy*veRy{C2& zH@cKKCy@Otm`BDY0cZNy*Hnlnj}NZ~b=Xec%6geV^+(=Umq*dw71& zv(~-tb>HjR+g>(P1ID($ORR^YuxzQiWczZeQ;6i{RAhlL7hPY`{Z*V*u?gBU}h5UQoAarH}nVl zE?m0#5BDV`nNto9WhD}^uo4fMOf?o;l%PrINdBeNZ<376{Sd*6#es)gg}FbOb{`1DB*r>*u#lo z5&EOq(77t}t=2TZ%Llz0Co6~4zxq0oL_OL%WU9OF-7Kq_>9iF4!F6FX46>1G_wi=t z%Xj(sZay>jh1{TCYxbFfQYep*`EMk)ybji62h!}4F9gkgFaF4Y><|DvisB_JJ z&O>v8g%CEv*WJRhy9*7>Ti&*R!;SNDbMWHNO3$%n5L;Nv#sP_>B4Zq%L8dJN+#s}; zF4c~NR^;c@6G*omxgjWRvxw(|HtL9%WW$Y-VesUnQ7zBbqE>bph?(0orO9-`#?LH5dU=d=e-L(hPFL^w}0^v!#Y09M!#y5 zN8=mzsp`=ug$G<2oju%G^VKfqSms@QakL$qKr9D_9xr!-d*XkN28R01^?o5<8$k6Ok>jo3mUgwKhnaNO@M1WaSwqNY9&nAg`fExPYIAqeLmeI1z&TY*8@Nqj7 zo{Un#kPRv7kd3jkA24?L%_;}Q}Ph~?pA&IvkJO1NF!R&BE=C}X?@F?XE(weS9 z{?Kwdid9-e#_{w20z7sB33b&4g7}Ja-}aN0S$~P&zbKOS((+0OT~#$_`}a7Nk&8ip z(S*}vTBgNcMfV|7vE!G>%LK+`sa}46u6X;^jD;c<`gSOu*rrTaGz%=-=q!a2w06DK zmb(;}D}|pS7o%(_BGVgJ084qTXP3=(Z_IZ4EbPaOY{=9F!KeLkZn)uHf}Q{ars+U? za5JBw205yWk;E`0RLdrzsjKZYwoa}d@`6mXZ~-^VGQX@mXq|lD(lMim)sG}@XYRyv zMA#I2-zrB#P_;%W0AIOrR%0%Y9yZ8aR_k_9!g%HGQi4Eu^B zQP|};X##x=`i*liX}@DIYhhw!9X@0qgsq|R0AxcoANokQvhw}t_rD+pTOkzyeH^~Z za6mO_74st6v3=2Kl{$V3AGoA~erEoeUO4zs;0s|s>JX}S>FKCU)$4?ha^PqjindXY zC}>n>pT)S$(V=oeqAKBvxhcl2s@n57?G`5@bZ%7j3M}Uy^S%(44zr)!Q!R63oh_UF zu`xU6vmnK&DeO81cx*C(uRL|a0Q2#EwrG?vY*l^GWtt9P6`lc@@ZM4cEjRh3nmL&}OxSenmlL!6?uCn2@5sDPYV0d*;mjZ6 zy;0hW%9s3dCs$iA9fFKvjQBi9VHY)tU;OR2O8p|{d;;TWOAQZV)aonyIPX9f~(se6PD9+q7 z!d#as#)h91vMUAr4Ixgbu_c}ye$R!#HEr%ws$|e_)+=x4n$Sw!Ukv1LUT5D{gX#E95PCI3A1cVT< zQja^112i!1W7FDBydxB=U>n65g~C~;2)STiv_pu_*}HuAbQDvH_eACh41?w$iQ#Uv zOJ8gIeWXCVr4n4NqA|?fof@sGY=p4omW>j9%}rv8rJ#;m`w&#OsSCxg+Tfh@)YIxB z;7N*Z=8XC4RMjtVDT|GpNb-K#sfzn&gAcN2Y(w#qq*LG-YlkU+ckdcK=_^VAUo|n? zy3b@z1l{S+N^&PoVSCduI12647aA)~p82g*H)O5ewB$1PA@$IxU65{X2?7Jc>p5!to+%lANv?BVTNZ z0w{sreH!z%!=X|OdnH@-dE`kv4lUf5Zj~rPtjH)nH@psQ`v;IR|LNGxE!l>g{iS0Q zA!gKZwkrco={9kyF;?;;MA9LO{_7wPmmtFCP8&4_A=RwrWp$I z2ZNGu%LNe%jt(4B+B5Sy$k9~^l)e7;*p$Do&_e}FOMHwuv*CuhkpR~~5|=JF=?2@- z)+WTVplb^!RpIjZA9!m~>y#jg=8$tn;usG?XYy~IDSr%s@N*42HET57;2ihioUG%f zyQZ)Eh#!sRK4I4nm-0DPkjxx`;xu66%-?w?%Kh76r=%fOk7;Xqres=crDDXl#mI}=Ims9arIH{RPtRKpBb9qMh$15R`I9v^ z@-I*#CRb! zCJm}RKGXhpENW#`BPv2Y>NID(v~%`jBS~czPAQf=-ED$Hf(&T9w&*>|cmXv=jJzhs z31_kr?kz`FZHaeQ~wGp4j#r}j7|=nE|${O;voPkP6I= zOR)|(vR>fj;aKe1%E;}zQNSsQkKjn^5ussFV)gOmrgh*cM!@4+NceGs*VhAA<6P*_ z9XO8gj!LNCiXg_91{xoUPng|-iyNw%Z-BV(+HNeuLCM|i{Nd-Jnfnhc(d>Ivz^RUl zlEx}o0N*Jz080&`3bn41B>u$wL~2*ltYP$F5%w?bx5VzZz*fVKwq_D4!E?P*Yz6AK&3Z8zqPE$KH3+dn;<>$TU_b7vAoRJh_F%YPAcB z!v{wxVtN>PQW&%6?Q4P&2@0P07i$vs z?eX2@SdYUye|5}^@p=Rp6{HM+GKiS90if7_`PlM(tHIn4yo1M(+OgX{j8 zpDcbdqqt_3n?_Y_NQW&|WbQr)klT9q>e!t_vCM}xtGPr)q$6&2mZsOmE4EREokNLJhd7X{K)*k=-oYqOUDiMbeTSsuX71b!Y?mmD~J=%!ot1 z#koVYs-4P}W(Wf(dm5Xp)44m=<>(#&xLalQ$-iqJ5`di*7NyTSLj1?UQUblK^v(W~ zuL99$CL_+>5N%;CSfyyxaZrd)OhK;#MUT*ytY20Sb)K%Qkm6tV-#hfTb?@v#W1xEg zH_g1E^Y*dKp#J0hS7X;&Rxjd?RqQbyVAQc*Lp7t`uDiBSZMx&TCttnU908u`SPb{H zDp|QxOPM1W7PsoEpKTk@twQGuJ-eRwQ=18kFE4uyi7$vU?Lwtk-h2XyhbvcPwG$Zv zUZzK`F+iOxiP^E#L)A9zR>=z8cz!5du!=MmP%3rAlz7oi`27#LJz=<+uU_Nd3O?fZ-)^j(biOA>pE zvwg2OJDXSv5j&IzUQWMpCT;_yQQoyV(a|Pv60V*>hr1TuYY`l^(_td{ewWr^%3tZG z<0bU4zozMKpVZgE<=$^T4EcL{ocr8Flk}?H>e6oTZ(ZUgbD4)6v`4G7ttH;!pyq1}t^H}) zxR~VJLUuQu^sAY%j)A6xWJ2n>vHdZ97ozNA4;!5#lx@hgsfmBgG?MH^zqKbHtZ+^1|R*p@!c8R;qpb>RiyPoS`ZCA&>p}t8fTxbe|io} zu11;QKSlKO>VyXDpB&xJ+7a`^4ZkArYx3bmIQ|mUJ&k$nOgb|piEdR6P!)Wdy#_E! zRgU*`mQzp@>IUA?@PyGu*0j7mW*pI$Z2P2a>2g2n5lz|QM%hZron8ITsQajCU(X9A z%Tg`rsBA7o7!#giPs`mY6BRwxYD6mg>(=4U!_r%3~vBTJYAMByd zn~Szp7rtd5+SU?(u#VonRNr=s?9n-LxNy-5n^qY>^e_s)^-n_hTB35}^P|ur&>}=xPxlzGIn=+=D_OiAD2;$5vjKNVISh8$6v{0lSyC&-0YxBeaF`M1MkNV7Adb00=)2$hU4|8(U8RjkysP^8 z$SOHQ2cmesxKy9Xu01DEQk~+pT0W#%Wj4^HKP9A=Z9=!olowhjkRqDT{#%djWQzVB z61uC_fQv3hM?pFL{G^yH|CI9ElY&o)x5M{!(xvL^D>mny*(mLcYD=IyRv4}OVf+nd zmsH2$owf5gYgc?xH7o0V@WCr9@+E6EO2ltC=7DyUE=T#Y>eqn3pZrp_?)M6?x&6$Y zM6>#2$Y0C0rP<6NE^A7hCP^HY_;(%6WPR33AV_)cpbyeVU4WCb_FPpY4(&!igay zm?scgWD^qbAQjFL)H@BcllpVUw8<#5d}}G58i&xqk&BH05aisSrQiQ}m6xZ_} z;rO6kV@z%PZQ)AJ+;jZ|dEdZe)m;kC@IB2CuJ0#Yg!!va$}HndO9*5niL}G|LXy>l zcQWF9X+3IFiF)oI4les+?`^&@wmK;oaB_i2gU#3l5{F*FMXJ&d#vx4uQzgmPxq%cS z4=7C4bhD)S=TVaXYTU@}6wTJzw<@S1<*py9K~XgR3oyay(|NI3HbTKa1g=DVXtoa) z0|O_zM)MFWBR(Se5}6P<6AYXsQpQQq5|@?eoSODN*(cocjypYh&{!skPh7E*3!yYK ztQ&=m>s8Q1=(GlTWlrFWu^jP1E&GfNKL*I+%aHL>Lql-|DL2z;+$tLBhlajndDM6@ zueAzyYfj=5MqC|U=;Q1VMxiF?Z|(gio%Z*?-09uUP(8`hrW95r=IB)D7?z!#Fp|9% z+#on&SgwK|KxnqNr;9z0xQCg;D2(6=`jH`-CQtgo`w9i~ zS~|9uZ+`i^qb@I;yU^bmlYlhBRE^Q*dD5TEu=}d~b&1;7u6xzpcx3}#IbE&;uOj^A z2IEbZ;ay&befVHXC@7v}#u%1f8ZxX(`nDLK*zN}>5>8YR>QuCodxviHLF2*(0$ipt zt^W_#C6}ZJ8bH27^QR13zJK$se0!@@tbLZxPyglaH2z|o4(gL0z4X`>eVMV7IF>}7 z%BBO-Q)|7pEQX5Ws_J2ncIEBcJn_tviAWE#dNQr2y>}!lm&%{3@bRo$#AKF@brc6? z5Fsas-Mzs0^_@d6AYBtsR=kp79gAuoGG1i}HF&ZzCN56N9)R=`^I z_|h|AhGfw+tZpsLRm!tc9k;OwY@N50j(y-^qy?pkbDhAG@;YBx=`XII$P5DBLP*ET z=f_{_^QvjMVbA+l4^GhTb0(5e5+9-S*M^yAt_tKYaM%{cK@q?d_e1Km@xpz_ z2?N~svubfSK9~?0eHBTl6SIdo$c*E$Bl5VJVYr@fV|Q%p*aK;ir9H|fqZWA-6{Eymw$X2&lL-{2wm&ep&AAUmh_v zB22^67!=WdCyObHcQz zOz4_03e#6o=iT*)U#_oa=4caQ-9<@o$3&fuqQU7VK5e>qm1k4xV7`-?+-mR+U;Yf z^-k>S1Fr;W_N4J(;1f*PQ8n0StQ>Hss#s~4$%|Eu@V_=-el-FE!H{Ggiz zfrwcnlp4OEH{AuUS>13?#iPPw2eQH2Z5!U_$*0eT356bSB@^Dx-s$~0c~uKjp3>cP z)y%>r6DMjQbUx{3=qa5Os8QR{3i;~;k2L7n5a9CQ^-;p6#>zO z%}JL3;6l3Nkv301^M<>37Sl9m9OA)E)?nYlynFXrQgq9>{rG5@J&%M^kdu1mok^i0 z6INEc7s9W1p>fi+kYEz2#+&oV+o-Y1*8c>n9r1UHw62V^G7Zf)_cLF6{}2?ef9S25 z(I{zRzhmOtN|3AJ41E;$Mg3+c-NJI9x2;DVMNZoaf4Pd(vRmDiwZP`{llbgt@@dG` zo)gvgb5|}f@~xCrMLYB=K&q1|*lg?v{&R=cmX%R#Ov8w`b(9Rc0`!&hOTYk3FCdJd`?Ej{ z0hzsN{q6BzNd?jbn)Wql84Wb@4F9`qv+Ud=7W3+H6>n}dun_I5Sn}C@-vN5cN_b#G zTjgxEQJdYA4qeX6OSgYr#$EQjAn=5~GW16HFI}_yzT4&`<|GLc)8Nzjp+;}qnb0Ua zlqO!EnF@L2Utr`)`Pol=Vr$_HKhWkG<%^ThZ#xK7;J$p$3<2D$50_{OkOQ2K{&#UO zQ7gjvk5Cr6#Y3O1uc)w`9{oiypz8-M?E>utyk~ljaMLx=6boiItkfyW)asjqZ`XyVs^EOVv(vNG+zGFAJMZ&LvuPrxStL#{s0PQ2Fe zb^djV&=Db_q@jlA2Z}M|ERU{@hSNP>zvW-jWIk}KY~{+bDJETUAp{WSliANZW#P}v z4TX4f-C!f`rRXsM`}fzi*|@}h4m%eD8@8U6q|(ljVcff1NHYEQ9>av9;@MK-Cfhu8 z5);W4iHtgB2n|cz-8=h5IV==Ud}h!!!2Xn?sC8D@F01}Fwo@lI_ncg_j+ZTNVJ&

q6)X zH&EMM;zHraB0C!82jYJS#j~;BnUv03VIy7H6!V8o$p2!Ydn5QlkKdcdXfxaW3RL*J z%R595^b8{>j@@%+YR0=mP^!4^QW+nIo4%+vYlZV<{2swZTk7b!bG{1|;VV<#MQlASu!L?6D5^3;)!psZCSO3l93`zf7@t8-yYtmu3Xv^=u-J8MaGDdHtmld<^n{6nw^}sW7m+UWu4{Nm zdcx|ozxEUI+pMui#s2JM9w*JMGxw=XUC~?C4+`&F9aZr#tUz>$)uI|gxlF|vmN%1X z_ew%61Q=8^=yrafi)%SW4v1%RS>qcu1Uf{7?a2{*uCrHgpYb~w*Z4c%TL~WEEhB_o z3_YvsEYa!F>nGM}L9o%q*_$LfG>^XX&r@A9H{?%6)P+T(#`kGQW4xO-VwG7YnZ}=z zH1ma3Ex)4&q1if&O5ao?p|mk1t5_Y$iMrZvkDv6ZeJ@R}4_m&Q+pJ~+F&ha~pwa85 zGA4v|l#=p0kQIG+J3Ki6G#I49Ts4~Zqv9SNp+av$Z1#1dpJF@-4@bapQB$J0XK<|{ zI-WzBmRXY_60;Y#ArL_o5>~VAGGf+pvK$Kc5zA+ch-`p(NVkOF=;r|G)<{fxNEi=; ztjK}(a~Ah+h!liG^kVX&*Nan7q-oGBy{)u`iv7bkp9LnOM4ckgROJbv@sT#%NuCQ0 zZu9|~it#W}nwE|DTfD26J<9u784sE2DwUIuy79_6)4KsO{L9w1Sk5wge-($WSDGs+ zk}T{q(5ZhvbONVV3JYebk-N?LMbXuc`Cd|!&pPx}mdKIZ5sXc@lNW3x9u1n!P zPQqKsuq`^iKlNMzBYkbo%@c6@`7@xyRDcyJpKN%l3TSKT_N2FLR&(VZ`#UYj`RKv~ zZT1(C`=(-P(e0QY>}JEUQGuCnBQiMl?>gH6k*^*OIl1|By5y2g93>#+m27v}2 zIC(8$W&o!p#O(QTIV8YPNiu6`o26ZA%gj=ixa5DhFzM4CoPHUmbGR2{-iA2Fc8#s> zdUh1*pk0vhWF}$6x#LW=S4*_9Y|TUSO_>*ZQ;+;E95#K2kSot#$)jlFMEY!^R*RhFO@{#lln2((U{a;JIUPbeJ8luH zuq{A6nfg@;cP^d7;OByV!Ip4z!kFTSUodtA(vl|n>B`un@i&Aqi-^ab`I9N_HSaaY zS{&-}YSnI6m2gEoN?QieC9jL=$FOaA@ndJz9!tFmFUTy7{4Dhq((X5(ATUT)h4DJ6 zcRy@K$)ZH?yU9OH?43ncHb_h98$T5;(IriS#AtiaC%j6KhSf)*m|f_)?xWXGMR=T` z8GN!^D&%X0G~{c-&(CX#=Ult@7Wxz?!evR9l)lDC{F>Jy-`|n)a}XLh9^X-sw{kFp zr!YwhJ42gq)KF9G&m%%gm$!FM_6g5>?L^le`KgiYp7qV$snh}!$Xh%(`hL_qjzIFh z+Epz`^uXpp7rkcDQaMT(Fu09?goD>PK-Qyir$V_c9AAI%D6G{ol4n|cqR|6Fy@QDM zsP9DjhqMX$m`-;Bve!id7#JjZs&UkwGr6*PGAQ#^hZQx}eo!pPsegHQFjtP+)aJ!g zx)8@PnGr;*M@-n=j~RLp(qA*AIl*Z_t1X#pOWnIZf4FxG6Y{Pp7tOJJnz)8tQlT+C zVd|z}9OYWS*v7ZKify>Cm0+`niS9U*)$yq{N?R-R_vCceIU zfi|3`1+9R~Ox32=jQrl>bj!;%xRle^Bm6^O7e=$nwR5Lk_u9lQqxf51Uwsd>x7Yk| z9sQy&P1ev(xk4N`sS3oXCYT$B{UYS!5;^T@gu|D+w&nuDsKV0!=?uIZ#f+qGkT9bf zxu*9Y=Tgc#<_X0y7B}{&U@#hAEOmnY8>UrCb%3K-{-k`=RD1N9n!M~Yv}L-*+7hkNn&?%!9B_AtzT%g!;05u_-`|4k#-~c7quPo+ zxxOUvpbR#|G9yJ@u+Bw|W1x$Jc#VH(VRL{%o%-NlO_vumrX% zG%R?b{aBkOA9u0c&s)w?nn<2yQMhOipVb{vyg@2p;^xv7cC84uph!%40uyiPY)E|D z<+v)lC)jeIFxfUOz=(Oqe!1h^Rnu6-Oy)x~@1g`#1|E+Z`v`XivN*0PZpAc&i)P)s zi23v`^O`;;|2%hlg5IbLHNZKyKkEc?c)pss%TatvRHb}UcM&A~0d6}8eBV0*W(n79 zaw_6|sL^0$Ns1Or``I{d<|8umE=4jw`WgJ)6#*cx*q!;FztXYS`!@OK*C*lJnnG(k z@F#l(M9teM%r)yX_V&D{78i_OM=~>3UCx(i1$Foz$>qs^wduNu?lkiOo8807y~TFH zKjB(|Hn&C6p%~xRKg$;)`fX3t_XFn^B6vS3_t5Ve)^ypEM1~7-bNH$ddEo-0P)>TW z*rT;KHIsmlGOq!B^+VO0x<`?j>#@z;0tVVA6C5nO)C?I6h>hMg>damgWW19*P^kaD zRJb1~B7KtQxHU+fp4BYA_}|x*mT`3sKE>5O8nk!0UIsZ~df|wKGmWSQLjC?GCb?nB zlXuT3b}P-nr0?#+`wx{n@Ix?oWO+mMy92bP%P9I#0g8GS`E`*jg{w{Bo|ADZMTqV_ zsa41FF$~UZDvcA+zOW4Cb!v~Gxb@#w&V_$;qx+CkPWkB*$4%FPMjvsf86lOqFNC%} zGp(KTZbvR7eJ{@sItwCybiji+2#kz#=V|NeR5TUpQm!Jk8$su&i(mSPyN zq5vAoJ3h?WK%~$Je~c*Ml>RwyQTW#dOYt2LQ2u}3kEQ_xquDl~!S{qEE0U{Q&kv;N zU=Rl3&rb&x@{FPUWlyT3$hZGp6?h;Bn+h8e`EG!(s{0Vlxc=eGDRt0e{=cg4KT_>U z(21P}ip4(@^r-zn{ih=;l>eWyZpAj-G#lPlA_F`P;q+9!N>g9+d-+1IR1oS z*H_~aN+7d!BGCLfx5ytR?f%*2ZGhL{Vm)sVVfSn8ZCkkgaX-Mv|5wR=(>mbyKhBV0 z;QqDp+9jj^ia(=H<^0**1gP|iK>wq{#0S?q2mJxw$SOay{gZJCP&I<4yrei=?Jf8L z{v(f8T6wsI2zJBwb5I2N4;lV|q*sXSGKyUktOelTD*{3KQ;K33>Yr)Qn3D>v_tWP8 z{S{aTE6PSXhII5qZcf5(Hv#F58JCJ<`c-;l)!}P~Msr9M^%fYCHUzlvY?y%u;H@~t^k)gv zk6+bQ12GiEpa4N6=kT8|wzRhap%8f3tO58(m=K{ko(YE&Kuj^^=^}0(E6RSP98Mh7I;e;HKULcEO|gon}j)dpr-YEyMCa1&dr+u z!(5{5oB1jw!bHn9MHeX2oC>mK(UXwML5>Xj-53C&+uIB{-_I!L-1C6B0}`DxVb)L| zFnA9rwvA-X5PTItbjpT6hVRdTCbRsHb%5u>2W@|ZW146KeeB0oRfyajDWOQv@a#PH za`+xn=GFc}V!wL?-4NH02@qTxLEpo8cl*$VixidVzlWvyz@{ryOB{2cgEob`DK1bX z@z0|Eyw>{SWIt%(({{l*i`!=h=h5{6mCoC7P*p0PQU2%rMl~{>X$6wlAhJuKd#Eni ze*N?XSj9ilDr*T2R#8EMrdl^VdduK+B>1HMZIgjKQ{R4pWgtU}#M7*+%-y`Z(>M zT$lVYk!9p>zr8SA9EtVHz=l>IinI-bcAbC5AS(&j<$Nv?$W&0qq+0+k9%o@M! zHQ?xSxxe{Lsu0oZWaYSuaY&ig*$by#=#*g8g z=PR07og;W0uVLY0(E`#lWV%fsqJ4j93AsuArsghO=A(kBU*q&YBYJk=tRw$$Wl8S@ zLIXiF{y?K#+%HT>a1*zZs&*sHXB94JwL%V!LMtVT9wQ*BpN{KdZsj#~C@S3(;_g9& z%OA2+S2$?{ovIfyR0w*-(r3aW(aeImJ#h_L$Z=oG`7WLE<}(-x)NnUr1-0ssl#9Dd zu^w4XKx{RAQug{f*NKbPNdmTVWmfDeC@fOywvmzYWo|M->p!-{gNf7IK;nLb{3dNR6kP;UARZOU7mzs_Ez308?L+QgX4RNvKac5WPEHH z0{XPl)5@0Te&yWK7mZuQCH5h>^M@k(K zIV$l-ms8SW?ZjQ_BJjXG{j(?r2%5Ht6@1rnn{r!p{;^}dhd-KV)r0~9x*3OCrOkCC4)42Jp*I+}L7FDI* zF#|TtqeoAY(+f4-$pqRyF}BDF#56YL%d*Hs;G6 zj)Bm$;mvP_IRhC8bMITd0<`zE%j~E4U371Ie6Wzwg){KZ8_HGrpc?+QDf-`)q={=O zP&&j^4)UO*hRLZDZIJhe^90)wBM8*tWd^jL!7=7wfkiSFqysZo7`aCBurV@yTG__C z*ZDekegFU;r16fsM=sIK5^jV0LfJ<2%Ldp^e*6lA-`d#w^UZupm)f54fZ8vXM|!uc*RcE$v8w=f7#f-~t1VwSUlE5tooER~of zjP1wo9CnrIyZB50+uJ{H5fbQ*D0<*jWGvOD$&kU|TGxDnb{82Q5?8ZGaci0H^SIu5 zesPV*F{8P`8H1}<;;9Y`c|a5T?MwkPy7(Z{G) zDZ;G5G{j{za}$8b#}(K~+tMd(k!OL!Xs@=;_7`d_U5(qp&(?6s=7Z?oox$JLbfyn zwg=B(uPb9?V5rfHl50BEEmcYDusqp^Jt?o@VxtpXP7p^z(qL1S&NW%19hZF7Zn)1h~ zJxqb92t}PPjr3b$xVs`DzD~0S)gx`yKxYNIgNfPeF*4A=$c0OOQvorCv0KzU2Ev+C*BsxMZ2g;Kia>N;>} zeMt1!Gv(Dh!{MAQDbin( zJhStZW1Ew!vkrZcuNN!$q0pX#JYWQ=!LcA|ozmKfJ8|$Z&qYbUkZM}8_!*-}b-|!* zg4e-r2zLmiEVI3@6G~WzON9!-;;D4A>fI4lU7kyj?>%;Wh=D1`l)gA#@j+~IPv=&`|av)vUiFjN-2mw zNCBb$HP}bN&Y+g0u?Zk};TI3=%B4SiQ;(1h*7+#$JW@F#HTQD;>9;|Epk2gd6&TO1 zBBAEp+5|l3zw0%Ggo5vl|Awbi1P099z?!EZyMvUSNGau$1IH1RV|McQd9U+3-~{{q z-a;wmz#EF1!VNN#5i*?TGX#P@c#xTN3CI|aW6uZ9ZMeDBE*k83%^4!L2I6$fP8Br0 zesAqaDTwPiwt<3@1jVzRlVftX%JcasV9mLsb6$5~0F4CW>w4qmbpHBJBkNa*?VytPRq^htkHXj z@i6cWESl0EMzaat?|znF;j0jfznr2w#@Ko2VEUDt*Hz1^}#mOGX~rGJ?TV577nhEO9b5!G6!l0^T9w85Dz4qx&;7gbE@8Eg|sHyhn;7 z#5vyf_9encK^I;a88#7Df2^uQWQUhg2qF>j4^^Ulb1U_NRY~RXW(0{KIOLxPcRQ%q zP1j(sq0HXi6%aKd>LXu(EZ@$2?nH)XUKu^q;xpsN@Z{yYDMs;kQAVtl6c4dVl1~17 zJ0MvFd}84ulm;&e3*A6LsXj>(qTM}X#`^ONqzGpgO5x80sL$*(*v|Wqp%RCkR=y%f z_$C~JC~LT$Zs@8)V*GCAx#5d|1NwQKh)Hn0WD=67kP5rts)pR>UMl~JRu*Clj4ilG z%9Kh-nB;$?V42J`{k)h^wv%>5@H(OSyQl)*AJU{Vz?_@IK`X6y_j2sxRo%7F|NA5q zmPs~5&zTD?K2U!+sc{nGTDS6BU z3e_LlM2Tw;0#_d>Et0b?LmK%T7#?P!@!3&0H`Skxqp-~b;5rpk-ynQTGu;K8bcBhD z5smPCT$)H(=814hK$bq+eMQfB1gBXG$YG-%rVw1;jOAe@OaixIf{?1bs(|a9vVE51 zQ`@5&-j7N#z^$H73PByC@dJ{|9QY&L{ONm6<&)pX9zxqdjTX4e_xALonq7DW&QT4_ z2X&e}>X3Pe`K0zLpcbbm+jRm#nd*6sle7xQ_zah(*N6{UGlq|NIPenUi;v6v?Qh5u zcDy{0yg6p)Rg_SgCqu)PP5A?HpuFx<*Pe=FK; zgMMbbaW}3Oj&%Py<727i-SgEw*$_!;frgti^e*a+dBmxbFLr2%f)psVE|@e?#Mdb= z8m{)m`)*>~>wz?kXb27yEcX9_b9?}O#8SPCI3yGvi{Zhk5uvi1gB1VXk_sVsB6}ot zWX88-;VC1=>Y1h6!1EZ(y4|VH*qj!p5q2GhLT#whR#5N-g+0E5C=8sQ3Y{ODEzdLg z2OikqwJJ@71&2sb4AfyU!o@IY!RQgae)}<^KZsH-PZ>}N$I!ArRCH3c-$(X;%M$Mm zp8&cH<=_RXCy9@x7!FX%bI5a>a>9|{YXPl$qKddht;=C4+;aqISTc3zLnY;TR_+FS16`Rd>X_4**9?<)?nE&Z+Otj#rh&;&+vaX}}&R zB{+1xmjQc@b2E!dS9vPAQlNfb%YGf^aChn&S9{oSOqeqIF&-^UA2>9evr;!v*D?{{ zAoYB=_HXP%RgGjH*%V7Yk8b6^Nv}Aei28^m5-3z6#y)6#dsI->{IGn2-h76U9n4Pd zdRH1O;}U;C+JOEEzGY%S0{Ye`;~pYX(ptfkr=pMR{#utoe^#oejNi^_T|Fn zZ77qwS=T>beU&Gf zk+YkYE&kBr45W9GcwJGJ*JuCucM95Pb(F@dJ1hUb-zZWY(I?Aza9*XoL$wt=xNii~ zv||JgtyGL#C1r&_YOL!5th3=Uq}7tzF{1tHihtXB7Myf<1Gd35#+#%Km}d8P&|hIF z10>DM{0y(S4fG6ok%LF9du*HVwZ@Iz17%SGbHQYsGLM((4p)pubc{7Dr#o3`NcgyV z-QN-a7yI)}pyr~Jg?R5HtpT5LBQar9TC_R#=R|P+R0Gmluyx8a5u1W>4=GtFY~BRk z7d%NGVj?jFu6mhW0ODvDHT> zMU*5){0jyb8OL8}*%xk4-5w$JNn|f#i(2B4!4OQV|H((R8P*#%9=5=6i7N0FpJ)qc z-)Rpx)grql*0%F5z-UUvphdYYplinjtJ%9UJUUDsUXW=JH6oI!SXHw3Yw+P$mWa7x zOg+r8*PQde`NX4YNQ!i1p`OENQVLnvou5H?+T5FUyy7j=)vb=U8qk2o+ric-4lXBj zkNhSV;!fHaGVnanQv+H{d37s9?LYtM3!t^?$p@#di%j~k7;2fWkwgX}`3eitJajxY z2dSK#wjF=%VV?N}4)Sm}yt0?UXA@qh#3NxrW$*A}=nd<-oJGTWmNLc3xY_f>H_yjD z9E!aWn2d~5nkUY@xd7J(&tGX1)`D)1BPcf(6hdXKu2N>WmLPGPc2|Ru)i)Gibfu zlR53n24o%-Nl&2Ne*$z{vo>OBq)Vb359x8(OHXbcj_`}Qhj)p@w{CUJUKNxYYOBEj z=lVc1AziUIC40H&ZL?G2MoUe@D=!#u@@~1o5x?s#`%#2rtqY+)d&s~R`6@Z;KYV*! zLiAF?Jj+i)rz$YJr+NVF@Ys#LYO!pg&b%XKUV!zQkYp^dlqOLT55PQR{|fE|I{ETR z+NCAN3O4U7P>L63 zZr>j-fIf_2#^c6+34cywJS`F;PZU8WBg6}|4r%{zL#HEZEidbFm*(CU)uzf|j?hzQ zU~HGuH@}b{=6?@0 zm4u2GC}n!vVT3E9%9xu8d=a9eu?l?MrNc)`w_`V}-v_m8Ua7Rb`a95`U~&=dbkT=( zVi=!|aU$xGoQR{xNgHsj!##c%Ynpjv9?u|cb8oA1Q%D@48;|qY!GF7IZN@BduuZwG zr1kfaiPRej@~lz5ZQOj=j|!%EF?q)qK8|z0k87R4sP_Ze{G_ia1Y3*Lc(g?Hk!dLhY ztMo!ZslRMWn__`dwkUTGXDpeihesw`eQ|!mr^Q1mmdx2BEt74_-J>XzU*SW#ia8J+ z+lHq%h1oK3*F0<{GdjpsImFv-e5`KRf^o*s^DF9OR>zOj)H$#*6DMO;f2yMU3X_#Z z$i@tcli7av{IaXg4ohQYCn#GnMwauimzQsN%L+|OVXk3|B_{fvPNX@9PoV4$(TBpy2 zJB&@ssbto{WG9cT)PwOps|ikWrXIU-+6@8i8xwmQ%(NX;!ii4eYX=vmx7giZ$H=r* z_7HwqlPz2PYS>gwv(^0t*PK&iCsi93%v0`8EN}2n_!54=n>=aY))yMnz@9FrVo&pE za#|W5`{ZpST=x5X*CQR=#9FZY;bqC}IqP=E47FkP?VlAnP>kpbqf;jfEkDvW(NFN@ z^lYg{`g7IiZC_Yc?{IRrwFSO%kGC0P!gH9YH6pmCHHe-Z8B^sR0?;oK#?Yhw^G0@e zcQ%p+`jp{+>CtkP;rIBL~1)@vaFOvr2Vds6SJ(Jj*Mno1;jUfTeiMro#4ndSelVK_~O~C z)xuPJ8tL{k7h|EJDIb|SS^3O&*zWfy*{5}E&vxZW%J$3*f}~5t(dO=wU=P)neX)cX z4juwa604rf_fjlthIbU$|GI*!x%2Mz7*L6RFH+RSYqyB-6|3V{C%DNY6Fi~0=N!ex zGz`UbUX>b)q)yOJ=mU`CY7cEzk5}E)UPPwe7EA9*- z)}NL7{ISHA+l$-dYVA(KWr?ZHw+xO?siplMd6zw|Av&>CJ4FBDcV=6LYOHHXdCgc9De zf7&|gqU{qWkG<69UQMPa$1!I}7T-DEs64^labMKpzF+bvI&|MR$9Y`U@jxHBN$R|U z;RpghxtPn)TWd{Z4Og3U!|0=#5>DcVtU}yz;^XcrLntPMj0c&$0CA0gSU`ccVMqt3 znv^MX-d^5{k3=JK!}8Zeg(x)#`A|G-e&>ctj05@-Hkw1nxJAW0MJCjTt!Y0=j;Mtd zuR7jd6DP{+gkrVhnj9aFdt`j3KcYBw!j4f@m^ z`^`H^i4o9P15qK1nH#Ups`bhzZ6v`JPW#P1c?e6DRn8~wvCxvs|Dc!h@NhVu(*@7|4u(~_sG|1begsM>5(-@@pt&u$mWyimBNbF z#a~1r^~fDNe}(uBjhUkfatczUzE z5TpA~?&Mgl&eqgl893o3wH|GwYpLz0xY$7X-9R23IuUgXr?$j9dZ|9=yAMq8S88Kw zLsxUkQkrEE2jH|Ot74ol4o{qVRPdT`e4;ilmMu=#(uAXt`}a}-1ASXQF=#S4`t_#6 z&7DW(g37A}q(JWJGjM0~z*n|{pP2al`P2k2a}jDM>U@)df7+c-O`35HU~itY$oafx zZXrl!NFriJ@Wa@X0i81A%ni;4E8^ljQOBp!2`|Q5#Fn_)8N>}G<3ed7mMl`Zn`*S3 zg^BiJL62GLtQKT5DI+!P0!ezQFnZXtB(NbHrVwrN1 ziL6ReLC?k}R1I7Hz)DP{R{e!}&*OyEnMCKe9GXlZL`^<Z zN8Ad3?cncNMXpHxHTvEL%#XR8=78oMy&wL*=TVI$#W3G>pQ8-5& zdmVc>DVfKLvP!n>W0RApV`NoVl2M|RJ(As!l6A65c8R3l-l&*?)Te$&Y`IHH16p>73r5P%k(a$M4OefEtv2I=Z(n+-R`yqJI;jyE z2G&UNpJ}rpqvrO^ie+}E2#!S=Y;y?_p33QFxfAV}=!u-}%w!MtrVH?vdU*L1%fN*K zi=BQ&_piS_##TOZk6infs~xYvH&gpFf96VAr1pS!r}T1K2=lKlO?+8%gGBr&gFAkzj&M6W|mKHeLOkv{8{bK!#?gazV40{yJIhGpa0RU z-o|t+hhLakj=Z4ADSbFJ`LetH<_A8`$E&w&&Th!Fy7mwC{=6W20b1gMsbNidippjw zTIZda$IDhL9;QvqyLYY^DaWvyri2{F=@ezDzgl+l`_Ns0TlUtgSxvfDrcg;WgD!;{LAd@+h0#)BkIT*%K>u<8~&`$GCGkp~8 z^>_5M5`TX*Feq>d!d!}5hEnHAwn==^exFkP`~oBW=X2tyg3gL!gC)XyCWN`xX0$FD zgrBxQ{)1EMirrJDmW+wh4!%YvLIuL27exZrQuo#;a(Rk9lb{UFgq_zdWniM8R=4ya z^zAE9A@MLbV5Ica3lkO0R4a=#r0j7q^rl&*m&US>4qm5^y`UPks6N7O6hXp}wWn3> z^mO!z=S7=?j22}%BIC}+NHAX{HShV6?-REcnsZ5_^!s(`khj{J%uEwnpcTu9~IC}HERuKZN=)wf7ft0i zlLgc(xgjibi5?oXN^XN}Wo^vCqNLkw4y4EnVL{1GSUdc4eB1Yv-B!rsw-cX|um5s56Ormq`99GTxSuy-L^_d=S9A2IF9V5u z>PPo|fH<7m?3MjBd;R(P7l(k$%Z)Yi=T1kg-~9fH8ks_qqtHp@`>Mef%}veQXIN#% zHxR`Zjn=Q=)Nk)%CP=d@@=&)>34Up-zc8Ka8oMd0K6|O7{GFjkQ$3UoSI%5u$hhRr zFdrw%am9TnK?yf2^!Ra&MZ*P;<@=kr>)tQpdDD;f>ry!TEkgS7iT)04_z`>hN+bJ{ zdUt!HN|}+|uYx0mtIa>1DijL%d{d1&00ioLc$VSDW>%{&a4kqVHnmW)7)a2lOS_yw=ZqJPZi?UGasvB2zmZ4effT z%jH@Z#*VOh1YP(Z^eL~~dXLdz*1lnM`f;w+3wPV9tGf+?MV_%3v5JXuoLmZYFlui$ z(I=~NoX2RBeDJjXv;JBnx+rLH)CMQr?HAMF7hHc9S1VWJ&Bj9*bm@%9p+t0wS^8BOY9TIAl5z~{3Zd?+N%H{(4R{Ivz0*7bUr^sO zTqzS=lrn-Kw;+ig-KA)`=^2`;&%;IUmoQxpw1*Co{_Xk$SMpKZSbzuE&em zeHyE13AZFU%nDH^Q&f+rHf{2$?dr0W*tPC8X`{H>TLVto=f(GBxyJj+tqYA?k)zlP ztxDxMLVDhC*-e~oUG2LfCYec_5F8iFze-v)xa0vf(7ErsPU@IZ(g}Qp>M{n`zvqXr zhl(D`g^9z@VGftnbMQWUt8@SNgHLPiI%y;hsvfu;7~Ie9ww0T>vh_H)qoI^RV&)fq z`7)zbo^Bk?o5g2l(pg=b)tVf^nAd#_9J6|3&r-!ZEyz=Q{bN;mT+UsY7~!7g$MGgr zePdgChzwvR)W-x;W8LYPT)5Iju~rOl#lVae-xrCb zGyS?DFZ@4K-$?XqC1>)$=ka80 zvHjV88N)5J-E%p@X}_d7LDc_&N{gz6Q~!!gHQv)i$q;^@0wZ#J(@< zr+(ZZF%IEh`IoPUC5q_W%f0sg3P5p!zl%d zn<{M_dCBkr25alzoeN*G3H6AC`uB;qRcJA#$}9;iMHILagA{{goZDBq(=cTY^~&}h zyPdhS9K%#-%4Jx$gxjMU!1~xceB#!bl#Ye4O=(3N9e>-)j~HE$4iWa zH+4tvWQ3J2zU_+O`Mr%weN_-K7B!Y~c|vQ+Zt2{T>ypQk_tNV8#Oa*o6ro5B;b2R0 z*-CIEL8eify=&Qte2Kcn?e-y9*}ze|=N|*dCg}`#>C6eNOVEk_JSX-ndxD!zcI)|_ z0EXqMtqw*1N@^ta%*ykfN*KfOBKsWL+_-0t3|HYBo4Sn*lJTFIw#+@=coSPp&dg=T zy_i69uJi34vAhmEYZUJe`S_QGW!h=_^b@>K@vT42Q}-NV^<>y^|1A_OY)RzpSk>bF zymO>_{%|~s(h$y(;)@xY@xI!YGg4wilMq{Szq6-VYm!ZJjk4a-+61FPBE8>>>EDDG zwewxK-`f>(=`c6j3{-P@!A56-$&=)R(~YT&4g*GDZtooP+ymSYt(bZYwbdqOeczl% zt#=}hj){>Yqh^Q44%2wb@zy?5b3d)EN}4RrJFNYwex|E`@Ug9al2fJAmpf%SuIeU6G*7zR5Tz+w*Ys z(@t98Rabr~LCcGk7b2`y&`R8M^`zztGu!5sesauO_|@X*u(8C{v_|IXmWq z;>p#q)N8-8dlfsC297c4@qb*m|BQ}b=?NyZr)$xzut9P=n5reZKqvT zHY&SKQMf-uZu?DK_KToFBjH@XK_kwb`G$SJPZT?vF5H#E9S*&v@S^w%3-w6oHNqdR zPS1_~&Wmg69l`GQxEJU-4ObR&bIU-4&a9Y=LSEXK6zPqIdVSPS&B@5=7t@>h0qJAD zV^_waG*W(Q7LN&UN^c(8l#94$`!P^E&@r3+<#Wp%QO)(AeZ|7#pKb3&|DF!6&*1VY zZWk8g8EGoW0Lds1@&s_1lSw>D#>|qT=KCbOUZ#OGFxKO+1-e9qw!bPmnb2kvk7M!R zu(TIr_V2%*b(e57C+(lGy_PtN3x>N>@7eWhnlr!DUd4Fs;oZVG5*Wn~U3zQVAU}z@ z=;r9JFxlwa6+>B%!=a?G9Bu?wiYorN+OKfT4d%Ug!!!P^)Rod5`u_OnOL~Fd{Dh{6 zQo~1Yc2kuk&vX07&Y#Atj_H424Y-JZS77f!(ZQR1?G>cRgbUB*2BvYRu@$a`S2dAk zWN(!}iAC-3)jps^wKI0&U89X%`q3`z=_19+4F~XAnD9kkO-8&2{=wPjr_BJ6+^O=Q$ z%~Pe?SeIlXZ{<=6){-uoaK{{gI|5_ayi_NDvoD9a8F<&^hQAC8!O<*g5+(9MRjf71 z=i9@0eoMw;Vv=dQNlIX@feU&ogX^~CBXOHp{ZERCZ8ztgC=VXfhvTo7BuaZ>dYF0h zRkIVtvJCV~Ww}Od&Lnzz7rSxsJ$=QtAhVk9Hp_uVu64a^;_uINt}$DfV$L}>BXygX zgvWuxBKD8gDHcg_^Mug@1$m@aa)oo_pf=;32jGi1(5@MItAwJAPIl|O3rBpUnVHv2 zmw%>!tIX`?gA_%stmt@YeT+-id8-A6V$qL#Sao~gYm^iAqWK9gMq(Hf-s>*x5&N}p ze9%!Wh16{w@1ab`g%%L?$sE;}PYwEU7J8&nZJ4}kDO@~+Dh{;1m>4~%?z@R}Hkm_@ zu7|#T(|Obz$X%17*!(kpG%~n^@7@|aApKV7ockBO*G|{4GKBpq+)4`kr;lY&Jbil% zf1VW)8^)NY1latbS^y$s`uN*sDxW~~WhOzSsD7o)7RB)k%)aD2T zwZwoEN?nMqd<{oVRlj*CdjWLz^z?5fJ$L&9bN8LxIDu0zA~5blx(YP>m|^>)7<*7y zhVg13?oizmMfX%@EEH;voY~KBJ2_+&%ipfVRRCItz?+eWiE*p^(?389m*?I2y0`Z2 zGU9Je3SHJP3psVXXCQPLeiN%6W3(!+AMY9|B>!~2c(h4W3~LQyL32jbDCjTNXJ68! zSDkhUk@y=!;SM4s9Ut&Lq%kIeVCld76SWm^e#|nqyb&xX?=Q0{PCSM?d;VV8Frs=B z*qc4I3d$4d^RN_{2h0kdDOJtnB_F>4&JM&L1P&e0u9-l9iD9IWQHTyK2G9T!u}+{E zFv0aI=ohdJqOx8>k`$YeTn|{O-tE|rg9~#QB$4*M*Y>cqCm~Xazo{(kBrdNn_wMZ8 z$mB9COat!>_8h1MRa?^}Q^_Y`jTEecfuJ0;i;q^r%dCcBn(BxM`RSWMz?Rt$#| z_alukb44OJHjvbXST;1j&2HRF8rGwdOz_=s*3WGj_1V0yt8&er$!Vgw$8K_zvq3`)U=UYZsscTzTZp zSQnJj!qMmmPzWF)CJVqL>mHzhwozWYtqP)=rL^0S;Xmc*k4^#KQn0VMm83EoGOi*j zrnCZma<6`71n5*=gXZ7{;-2yWZP6uIop%Bjv_c&dNP>R~epZPc59p0pING&b06PRYv#{M&nG(|~6el^3%b{>Q zJAxnM1uo5wPkG!M5KtBfULtx8n#D5K<-2(e#$krGAyB{srqfp<<-mp_mirJ%%}b7c9fc_+?n@M=DCA@J$2KW9cTfLQTP7$e%%2V{X35pl;`1iW%H{C zKquXVsp+HCy!j?CSj(xgq7&3R4j(+KD!_53Qhro@!>+}~Y(Q?Y>CbQ9r-|O26K`n?()(4-i$Uj;Lf*&vNSdIEg z79wdNkbB)51fhvUk2R1rj3{(&f=W048$@3BSb8iXl=~o9BzF+Ff_m}GxZ+XiATZjx zsRLvr&`UW~($Dc5ENJ$R@JtVbKr|l-Eqeq;>T(2U_YFi#>;jshmk?QM09vfZ@nz^I zxV8Ls!0> zD6N7lyT{JG@@*(Pv$I}h*VW!b1jPF)kY3$(Mr3krAUtkJn5y4f;%omNr1G_gLG{E= zMf2BzC|ubGl*0oNSYA6~)=uL&M3T?QC~-z+B)M4wZhI-*A=y4)WO)6Rr2E02eD)?J zD6M;I1a7HQZNbgNIqRcQ5^`kS-osK-42yzYk`0S@x8=jK6fq=c)4;~h#Dq`B7xWR` zCv`qgz|s`Cw8GD!!Y?yz+pe3;%`0``N&@#2CCt;u$Dickp5$>qximgq-{CA^`O)uv z&}_xGUXzc>fpxep9}mmXVbQZ{Zt8*Ebzf4yrRmqunSjhENha@pI>7s|n% z->FSw0;FtY2SnX%#iY(*1DWWKz$Jl{t}NXI>3+`yQed3t33#0CyAL&c_18A+za#mO z>e*JEy#Dx7|Bue|9v2q|zx5}5%%j%f5-&u^-Bixxr7Cnvx*qM6r>b^dQC%|g7hlTQ z7CQ8R*Xm65Hm>Iv_QjJIQdF(f-x;p|WQ3UF`f2A90u!p@@dW?Q?MwYL1&Ajl;AA~6;I z1ICdUPDo&yEAD+mGEeP(I!WmaIgddf#P6gAkZ~1ZZr${W7rBtH#YKb$~W2 z;&Z{|O~vtsN_+248rN7K$+$fQVMK35DT(!z3Wb`E?t>+9d~Ti6`iA$grg=6?snV(?yWiC-%vKl(h{yUbqv=>NZJ0SsL<*)+vNcl99~FP1ez zf+RFMnp(Th!^y+av|x&?-O6GtOv@%E5<&0BeC%d&z%g=G@g*LK5tg+EZ_AB=VyZbQHkLjOYIx{kp+#{E} zRKswlTJ1~I{`<8|YPHe}Jaqb7%l#F*sXzNeo4)$QEn(wss{WKx>57aJ=z6&tIBvRH z)Neldq7ug)%xH~G#|CKVlR{_mblYVIxVFiIO>yIr8=AMJ_WqVY?U&w)&Mu6UV`km( zz1_sB={UbfKZ9?(bMSbA5RRFR#JT|?NbzTEnQfxoKZ{Cd_8B%|wr2;85+l`!W`pYK z1?n4uf8%#))k(>cYWz}?77vqh%Uh17J@E1rw5ae8AFjA*_!h74WiHrv z*GtRdwbxAxDqceD*1URgZApkyU$Ga;B5)q%2V8n?-rGXa1e#OLbfgum^?ehfAwD~Nq=L*Gh21H@Pax(U-NT`+4E48~gj@GP!@9c>Y z`1!Bi8uo`NYob3UrH|mwiM2`8oE58;s5#TUB8@v!&zqIidNkSm+PtIQmPn(RLnMg#jh8Aj<-ISJbB+M#WOvI*mgsWPFj2E7?uP*W&H8qqA({e3Udg{j9oWvMx< zDGEA)kT1r6_qDZP^wnysSz0mLO@_XY4HI5VmTgy-gqOq_=$B;qUj`Nqc^1sBXXRZD zM&+?Q4z_leOchY6)OV*FpuuwM8BeNmxzN$dlURD$&2*h%e#mY!1wI@rFhIHL+x~Tz zgEV>^bzm0+)7z`cpSR$cyLQ=~4R|3G(xd4%Y*I2qgNw51$1!*DC{_2=3CZ&^RAR@dDT7MF z`eR-{zd;w#&y)2A-3#WuEu#VnRfwop!x^8bi0R<7q@qCHBbmq?XR|BGmKeyaFUk?G z!;=up-)7hMxYPeP$kN@Vl&85;f=s`$9d&I22lf=^TYB!*`nbW4)egljRT6ZWWcmeo z=dci+vb@IMemUtY&&c6QSNo6k?`%~W`M9TcJScHAsF@_b<2J?8aWtL6yfHdNJ_QH# z6HzvWax;Gsh8Pq}y_BBxqytO!)nC$ZmmZ?n>zB8GjpszQT{h5|$xHd!xdYG)#siU@ zTlpE2EVBR(p@&4xZV~-AqUktET0-{+lnsHnqDvOprg-48e&a9D-jU=uffemGBtd7l zZNc!f+Rz*!l`7X|a!5bX&*8@OkhRhe!4jT*L*!H|K>v@YupJF%%%0*Nurm!xg4?JM z@#{_39=ybW{|Ir(po!A=uToQ4Xx#RZ#DHPh5`Pyi%Zg7qk#lH4f^eDJ#1egLiNL`d zLSA#uGJ2#a;YYHU+G|fN(y=X}oJf5ICP3v3*R0!&c537rSIlv(v;ZP3g`Xl938+18 zG7n5e|M#Und7ih&(H4!Xy6F;1eXZ-__F>5fxE9zEJ<8H2)HH)c@J(GA0YW+mi4TwL z7`5O1b(H+4{2^_jnIlWjJ_j&eG*qN#T^OD~ee3_3T5bGrzv+=AwxiOD2}LdVavqV0 z(@-X^=#CX#K@WIr``OGM|KE2+&S+o`R+3N0q8%nZC*bH8&=+Jj%*LjqCi$hHv!@_% zft}RnMppmL`1GF#8QHm>E};idTvqfxSS{165@oUUSGySa15Wb-qhBa%JVwXJ^*djb z66H=tT(@wDjTNL8OI#ae z%<2k$m0+oCp?n99@OX252=EpK2Hde^nbAXH!-zMR4O>?4j}#hEOWqqNB;XI6yJTbe z>Kx>(@u)D8P%5O^(!ywCt9u=3U`2R1GL%ZFu`!I_cbUwH)FJ^*?3jRa*DLiHoP|9J zFCc&8v%kfIkgRb(_)Q*0^sXsdd)yztwBF-Sh17I!4cU8N)AcP9I4cs71o5$3D_}vq z@0b%g2~|;##L>J!WXFxPD?OVl#m&Yu;BM+m{fheE1%`U55iW4px5o?MLa$WG66LV8 z+15r?|Ardmt-P5wYC0k2+C~l{+xD<-r%Pk22>LE1qIPW;gFt4Ug5di#n9mE$co0g* z=$5_8!LFYl5rPXsv-!(A5tbP~Ojg%_r?b2 zxejsU$Er}^{&!a=5)s7Qzkl`wRJ&;&>kzh)kK|Q?J@ap1Dz+P}Ur^vZtXE{aU8!mj zo7Cz)CGwlI--Wjw$!^{ThrpK`22b6fN)rsdI2QEr!tY^v`rF{CdC_erxUnFz9Sqh_ z)t-VAyNGoJiHo%xYwq5uvX9$fEJzvp6YoJSHOYol-2?{w3ormLcCLqj+iJk=AL37) zoN1((Hd{boBHv#PbH>%{)@^~~%BD#UzVNk@rqgJO{$#p@g28`V8cL;Dj>X2q@H22$5MTqP2!=i)FqpwLJK1)}B z!<)aI#-D9`LoKj^N#kr~R9~)B&UfPz7RU_1wR|8d^k+(!SRrR`;{< zKhcL0arV|@nJ-%T4t1m*wWiZEnq{x1%SJ{OOWlnIse3%7!cEdZ4p}yFBcyA ziw=_1&pk7y5C&FGSo5!;)9?>my#zJ{!}HT|I-!N<|Jf2?i-hZ~|CC!r`k@ZPG{RT^ zdTkthOd%MJ?{AKKT|as1CC583d^dco{=0vGwMFK}i!Z*4ah+XYMs&W2NAOP`wC*Ah zg!024eD+&m)$r1&Td1x(J4RYYKr4ar36OPk_v2SleF4fLe2lCA4a`A7FZMQtxD-94!25%R8 zONl$QRA{!?`401AcO`e5}r-&~?(1GxSct1hT_q^*VB)9>9xR*e_5DPMMpw`C)aXxPuf(>l<4AO6S4 zqVGjgBk6U|Go85-h(fD@L}cU#SVRATpp(e^T)zAL!XUPJWB-!syXW6)8#orZxxD_a zQMRFNNMdkV&iJ!Zk_Wa^Obgwd%;Vyav}=2r`SDc$AC8ybo=mI3zqz)HT;Pn0et>Io zi;lbSvQ#I|Q)LmCB4m2gny#2qzvfH%1FHMucj(TM7y^40jr1ux2PgTG))vesCv6cx z+M{MVR^sZN9S0wv#u!5ki|sZvB?XpS7P!(E&W!0W|0Q$S@qS^wvOmo#$1q&}*K>F; z5JeNURGCf4Awzjs4bfZYVGkH^C4C);0K!}>=X58uM#r6xR6*nAFAHt_M1u4Kk^uf# zp7?P14v;DOY^clvpm7|9$Sb`kqZ+-mTWq0p8&CLJUkqh~5xWz=be`kGk)ICz`r-0< zR(X(rSiQE`lAE7Q2YZdI-z);FUk8_? zoKfoA7=}NRtn^)4A8~d*jCSnA{Qy4?+ep;ubRqSozMlhHji!9EpTbH6i3k4+Y0Pru zv8B!-n6^!^>v&6 zWnQ1gMB_EJ4=hp|b|^W%`3b!rdZ$^_$8`=KoZzCPXYEzX&%Z`PVkMzx^io`jE!|;B z$HdP+2+#LnW?%y1cS-uwgE%o$ovh0?!cpX()4yIt{L{hC8%MsEympMlz3OY~ijds| zwQgt$ZH!F*Mx0ezo5UQHL~yrgBC#dORqRRLyXy3Bf$-?VsJvrtfFna+_}^f9=OlhaQjD`CpY~>L1He{{2^?FhMG7kM)sO ze@d;+{o}3Pw$0h9%`OSXkFd40qS32udMt^WHs|_!)V}XG)v2a&Y4Tzi{|6^L6z!Rh zazGvVvWJg#u$$TW84Lw*^4(Xz-2g1#&IgFl$VDi8<*fJI;oXu_CGEI>rp#8vsFRho z@fiM42WEVL?wT>Kcno#4Sm{7vN8N*^Y_`Ls8ltGs=6!(bbbqGhIAMyqzWz`55_w8j z=Ayy@Vgc_cfA8GSbK`z(?0+PT6_l9?w>@}2E&*3e(19~IDqFE-?utar#-7<)1`yHX zZp?-;=iIh`jkG-Sn-*n9e@PHei~-dw?S(Hsg)0O>Q<;@4!D(h|ncRRdFVq`vG$;q= zbf?ZfC)%!uRP2{d?3kF3FjBCxE?9>=ofE;E_tie_|;16{Gm$4 zAv=7Jt70Hn)-TI?R-;^KW5f$OOqm%<%Z-S=u%5S;P(>livKagOiU8L8e zi!s-+=Ft;$z4@IkMkjAUM}*ropDI2-3#UQfMn<9uptOE$U4F;ed;3+OLHYM+CZSyV zhx3I8;QbS5k8K|{+U3oAgBgIjbnWTmIKw3b-8i08;o0NiFSjVK#~B#d&*Z(gr>AEn zE=(r$ouY%Zpxpm+q=aH*F6lO#ye%<4n`{_C+CwI|_+NcZUSuVt^w%8GJLnW9HVBp6 zbIRrB)|e))m+!JG5(iyi*F}Pwq#yp2l_+Jpk(UhEJtV8`)tu zXUlk0a_<~ZP7WNHBb2kXoUkeWXK{Z_uD;jdUCTkCIh7v&j-#KK8O+|I<*DwiC_zM$vgAti;nHm<7FI}|#OF?q5wC(tZ2XbW{47b)t%xMmpc^FKv ze^CfLM+&Cm2B4g<#2FfocyPU(w&5ZMM^jV7(yLp<&iU&nuxgzCCHvZnSR^7MiY-*6 zGX$GC^div3E2*vi{g2(!jmi((HTKf8~-CzMXD3&_8pS2FI(ibK`3SkX`2P-UW_5m+e7v=EZbw%^r!(Vs}0D4DUuEu5TPha9`wua zkXvr!N-K$7p2n-ITR|+qs8w8qt4qJ)HO1BjY1~O^`Kx?w-zVR{pG9!G+71Z{`2OP4 z98}~+(38H9n&8ypcxS*M>o!eO1A|4o#?=tsKpM2rO8C?7@6N72PV0p@w1!#yyFA$? zn4PPlqOZMR$R!UbQC2oe#Q73c4{PF?nTvb(EqgSQBji3peOIiphX`yn1lzMYdZ;;)>c!vrJ84Bi zvr~%#Wp>m8y}LRh|KQF_v+_n8Z2X~rcX*>OCF0(QFm~o@y{_ufPL%h-cUJ$~Y`D`; zO41Of3CCU0S|a9|LRRc!?#~a|Dpln=bHxod7vHw~!_F&A+*`%xI{n_~*3|-D`~=q_ z(-fWvwrUBikS&A3=snvvD}OjOH>?wvorP_6hK&w}T9XZZ_ohq5vGAJ7t=MLM0%(F4 zG`sAaoU=|ERjrOvm>9CudQzim=cm7KtCw&xKJ40D8>Ps$qj}@b8OXf$j;%TABQWvm z7uQwksH?fhJpQrn5os4(nn38i&H=f)Z$vfS>pR^qcKcoR@Oh&lRh#BPeB*TheSRhN z2)&fwG*%^Rigw+k=TX1~nJfa$-aw=FQ7FCcC^ETT?0hU=%S^|ha5MhK%qu-Urx%|r zb3F{Hi&rb08Il8D7&GpT(ZT6A>P{9>4hIIz7UGja*hQsoX5*GvYDB-r-3{I~T;X&< zi725~xbrN+IhMoP>Ac<$^Q6b`^+hlvbI|G5|ak>P$2Bp-p z9FXc!bC@L{bBZ=v7QfO^M_S5$IPgQ?T|BXXvH( z9=g$_C`Byj_7KxqiJVPORV5rKc!;#!j~wR@)YD4D+V(!=a?ZVRL(hya%5}-YB~i?F?-eu38V{FcuyiM_o$PyYBSNpe$eu^yAxbAW zZJoeqfn4xTW#BTtX#f5u)n)5ztkBGH*DC3!j9`vzViN-qpc|%D7H`%Mh%PX-Y5i zuGziv${!6UQL4GGvh#;Ny?Byxs$aFrml;F!BYzb06SNjwh2k*~f|%LzF*S+b7U7za!`-W~WOQ z_HFAVvF(A0NIPdD*5>A!NG>O=cw0CxZ!_Qv_eF2po;*f8#9pg)~ zVg7^Gkh4JVdW{IB5P28H(Z0119|%OK5ojDd68x);;rWD8@tiIP`aJe0 z0K@VGu=XV5E2&zJ`37n!=1b%}feoJb#B2+wUP2l2m;s>Wu=}lx#mZ64^ikb3>CPPE zzzNe+TlUvw&`Sq;N>_ah7oPz?^Z$-C0Ezqcxm!e%Mya(Vp#pT0N8sFF=XQ_yo~zFR zJwkRIVv*9nUNgt2tH;2K%sXT1b;~&h+-ttdX-fV#h^z(d@X>bmI1XsJX0;?TTM8t< z@{3iQ7rltGMnsd)2w`IhHaw1%X7Ci7h1hpL(fjGC>;-)!3{V0Iz%eua_;+%ObXS#L z$<=;lp|OGH@DDo#V$Y@^*AIp8nK3ba!7r^w)Q`Z|Kdgd2fKG;@P#u9)ce{>0U4o2f zrryGmrURqJNKf$p`A|q6QG&MnEK4)Az3w5G61@V0A9o{#0r%oqm^hXe0TEHP%nIk~PNaf>dD}NieT<8EF=teYbVX2`H9=|h%3wPmp5@5QaQ+K@p zgj5>o)xbc#awOVG>2-}BY~Pb@VH1+cFV@xLf_uDr!9(5$|Of4bGBI4VFK6%9rfcT7FkZvyZJ z7ycyy{%_^YB1w1O$MR?v;VQ$SOe~ugit;OGfaJAa8v-iNI`=9z)^xQmcMS_;U=$=D z0TA@Rmkrw2flmRawvI3F{Q%qlS-3)Cu{xuiUU_agCQ&=9M*nwjJm*bLC~(tYya?*Z z-|`m`fApQLxb`g>D(oU{(NLg6vdC-mwA6z=27^sHL}}g5A(#`vSI9F4#o)QWD})Z# zY*5LQ5s0=w3mGND@_$E}$kq6(!DDbLyMC&N8vV9hLvVGx2yF$Ko)(hd~858Tm@|A-%m!k@@GH-xXu8f0KCQk zWIOZkN+qE;4w9I!gu&4QnPHj&%iWVCkcbH|hPzStrH>%#-Hks3mGT;2dEGBK-TilQ z4qwa_JoMkwgaht2GGdrvp^>2=n%c_mh+3)(PLp}O=PnHy604>Y*qHpV6`*o#$Y667 zp!EgXEG(Bl(-0VMf#_Hf8rN$W$K)Y+cHO`;FzxX0DoIm5PZ}sO%0LoXvUF+uU4f$e zYQO`4csl>n6vY0GOi(G7mIowlqZ6WEph7FNpZx|rC-wdxyzU&`zKA-v1(_*#Aa3OT zxOpipdm~IP*2kheVJKz_xb>52aE%d`agG+Cg*TvE|JHZm^o!l?ua^KRsZ1A(^rdyV z&j1hlPton*UPXkbyW{)6y%%Q7h$=J!=J0f)>2W#y4Z-}gU`+vKwdx4o;HNiDHp^dgH7Iq#EC0hqbLKr@A+~RuSD`c#Ae1= z`H!y_rVU3SFBsaryUv2 z8HdxOy;n=r~Rg-7#6EU9$@#4di5 zqdU+5Qz>{QZj|1e7xZ7HL)7_n0&^B7(rK4aTxh4i2yX}?QrW)X^d{&J2)n*m7!XRG zTH{#$O51hS$(3gS8mRtlD0;9OaC@KG*m`opVG_HLp}l)oyXH9ZWITFv%qk3xrDi`4U`&;oFzq0=4@_#}q*H4?TcDJ}-DO&mX#k`t%HzSQ+dK zs?Q-9#qQujr9J;gWP?u|C{lV0V*eXWp!i`F3QbzW%;uRhZU8*&81jNcHdJ%Q9a!Xl z(?Q^j;+WVm4nvh}q(t2SX~a8USjx)!?>hOtE3!)Zrd1pJ)xLgquZJI2XMPg`4}O6A z9=$G8*$3oVcnx#tYVuwF5W*-7L&cC?v(bM5xB7gyp6&P1T=g3+EbFrfly3Y4(45gb z`;mg+CKO=n>%qu&i;USJ-@>>FQTpfoee&QO{7B!`-v?fceJSb?)uioL^bJP2f}t0j zpwoV48(`#L`V7jMyX7KaYYZF}F?9=&w{_9mru#l3j9_K~fl0jWilplfd{Qfk4LC)9 zAa=`jtlt06mp)x5RFoq?Y1K*MO1vIpAh>MS3BqY++$FTs2E&l>Ws5HI(tCdIzrP8M z^u9jxx6e$c$MfIYhDf`R=MC^;Whst00P=ATjymMDcf+_PNfIj&fXDzmgF^)tK@zi& zit9H5eu*efw`{LEfvzClsau5d1epF%5XOr(&~X2asJ^y)kjsRG1SkP*kvFu7}Y%8X8`_ z2|@RSHYgpu5Cy(QB3K6cFM(v^-L2$*Z7kVbcxDn&{A82;d#)j5NL22mo-GBa;r*Hu@ndVtgqZ zdy1<$7ABsWx$xniHlaIVRvjj$-h&lS$0sUNe=ofB_hWnbV*5N|C>e(RvOp+eV~EnH zvmn_|g=JWnzpFG?-pkZkr^zMU{ip$=MFW1I17Dq5M9dJskX(v)!XrF23drQ(66|t+ zzQUDiXBG?ps$~egf2$GuKns3wC7c0a#yYN^=N38<-a-aXpoz1hR zU;-WsjzW1cv2XDcW|6gZz6>2|Ad}W>n9KXt<(OZ{9+04#A`%2f4D3z+-XA z8>#~Dpz_VqhkP^Dh>;^owO1WSS=zWbZ{n4TIA##tju+Txc1E;RaCQ1RMJ<=_52PSb z#;it^vo-s9&v@QOGB)mTi5q?}Se(jha2Q@r%Cc27_S&LbSINqU5c?SK^PZb_qS&=mU z{HxQ5TV-HTp5ul6V47VVXw9AFZ>WX*r{{E4NEL36gn2R4?ZiZ`bSv_ImP zmlv*x^Pm>2wn~?fgoCxf+v=is@@y*a_Z!Zqx}y~z@VT{~*&mD0j-$T&dv~Djs&NaQ z?|wA7^R93xU-WaP9k+-l!aY_En?Z=r(@2Qc*om>`-MOe;DeYJE?YYSQ>-g-wI~3tRE6378Yw6l#R4>6S%gA?uJhPeXL2s9enkiBhXQu8HP})ax0H2? zZKx{m?Z>x;j!XKsi;jH1FkPsw4*$;TK%wbIzjTHE(-d`dxZWF)v)PLlaj9#g<~aWz zuaEaF35@(u^1bBY&2jBY@4NS%#INY()MAm{0km$J5oyW&WR}lqW`Yqq@xrQ#9=;LB6qLTu4grpEtm-YTPSLG8S5ge}%ihnI z03E~nGO@NRvC;x~-?s=4gv(Lcn&av@b8}hqmo7c4e|SBkYTkO|uWF!jd5ma0OJTan?#Tp2r=e7=hgzVIt+drqtSrAB@sjLx5M zXEmCF1>SV2rmT8KsIZt=6>9L#@IC01>&oebNq)Le z#B}03E0FZzWag{-=&J^<4R#k5F+DiX2nSDdpUp5y7}aoNrECldv(69Wt1O~!O%_Kp)yZDeS1KO5XRIq^D=$HcS5ox_ zkan--fzX_%?+M`&vw(U>q{nKNd5G=l2;1rnT; z|FaWoJ|QStk8(Q0on%r^a`W2>Cu3^N2}yNrXdjN=d8iEq$yVf109huHP2lvv?om#$ z^%t$#(Pm0=>g4#8pSwHY{9BMP&6wt4j8mWda9KfRTRqI8faBPMPw(;;!VFmZZnB;E zHM^qCpOQngBe$4Z?qFxQ8ckA_I_H6(+0-chgheaN-}dU~L&22Pie^MOOk+X3`=Q zy@Ta}8&2chGKBB%mDaKfWQ(>K*93y>_Wq1&iW`QWLPZ0|mQAX%D=Xw43k6qt5Cd(^?Kx%V5dI6mWN;Y9^`m^6TD zX(C4qJU;8Gv+(3=H6g0-H3abI^ z-HoWKZ2UmOTH}vh)pPFEt@jBzo+o~dR+j4eEH73z{CM?}LmIlSm@|IL>@Z1HL$+_f zA{CwIA^vSJQXA~r1;Jko=Qt6oq%*)l@EWosg2ID#yz&SP@<#VX=phPugPP5fcd$AA zZ!w=li%8hvNZf~yI*DTumo(T2MlmSSeTB&;=xaP#315A9j-NNAv(=cskj}EHZ6D7@ zDFFKM1e^|h75K<6X{?Szp8Akew%Z{4F2L{1#)zxSi(C>bWtv-vp-;6(I@fNJ^%Rx= zZicYEb$*D)37x3a8qh9U^3Oevo{D}R{Q3!u=*Bk%^W2et9B0fYh(r02{20I=B!*Rh ze21(;L}C-vWWOH42+8}OBsWKaokD<*#JIewsD^~7fBg{w>Dz)1kJ&>koYbQqtq(Nf zikB#1!$*|fjh&sKgXn;8ImM|NP-t$7BwTCVQ4Pz&%I%s~%jJg$3`6aW6;GoBBSvd& z{q(z%vx3l|=IP2d$2Vuwm4I79g#p#p2bCa1H@H8#XC~0dDY}S+=0`ryk$-+e=en}^ zjt+79@(rty0VaP&UJuXX|kSgmk<0-yP-=ZA4?3_MCz6!xb%&+4zBkU{ni?t#x)U7;Zz^?9D3|V74wxydhmNHiRhjLa zn#ZRY>r6k7((%EkG}T~s8u?@7isUHg;`*GSe=QfG=yQ5~>XX|t&&5;7us)-|1U^NH zHDYWd^@{~FDLJ`Zae2h_W`v`v>G$_3L_bx3Gmj;O@yj_6RIL{YFTU&3V{9io$q(Ad zn>)zII>=wsw;jY;TmPX-mnfTcrs-!Ih~WFA%M5We3GJ+}0r(HqIz+{aJ{J9>XDDrd zhCvX1%RWk!ScO$=1LKHSOd7A;xLc{=>~j1=#*c?R7b#aA@lAi}yxp!iS&j{l#%SsE zVcbqIv>m?1^&5Po&qe{xkCll>1O21EwZ{w$MG|4@ceRt+;xPiZw;su~91k6oJfa;^CxO3w)fyLU zW2a(()UG=0Ch^mLd(7LsJ>7oRNRD>29ovCcMR`HPCm7f}=lW}v8HSunVCY!DteGWx z4C`(N&BJpGm6Kl^w|0!14FLTecuE{%x5{qcMbYb(YKh(Lj8%G;a-{A_~LLXS50`2=%kD(mdwbB+X06p4G1A zpleW9Zya~8+^a=P4)*v(na+~hnPf4GnoJFDYxa?0#a&v~1$7r(;0h8-{=hZ^8Ti)l zSI11JfBLQ_LVJ5zBl`~hR)kJr?EQgm?p*iuud@N4A_v`6iKxuQfr9Ik zjHa(eCiTt6jrjV9sIr6SVyuf_@+pO{i(!p7AEKt2O}^-B@dWbuUp|9$72B%Ej{&AQ z?Cw@NQ&ZiVQ0!@@$eLbo&1S|G4G|tR{5Ti{%9E$Mn`4j;^h+QLqX#($zLI9DQp#<0 z;2%9#IDVmhfh?xjpJL$T_q@?7>|eU`Hx9jxgA>W?v#Nck;I-(0S6ZT5==W@-)*3&b z@e&uoK@P#B4&D5%(-tDK2}#~7zDeyPPEEuYt8r24hd2uYKU^zqftuZGVI=-q?X?0~ zq0L5o4l?4p8E%CNDAco=D$q7bdGDMk^CR@=#9rmSt^=jP*06g)4UPk$aj{2vrD>xc zhF;Ww9%$owT~G@JA6|M;9h&66ByqDkjF#T6o*{n@qUhNE+Yn32TI%pTi~bC~E<>30 zUQ+&?)jwBmS~aF1!*$5KD7{ypn#s%HNoVr1i7sBAUfGLE>=jDdw2%Lp+y~4!Lyzzw zKeKAj`sD|g9bR=-RIz6+ewo_lI9#@HfWQoC-Sy~np^ASq1#qub`@ zmr0>ei%|IyMR0fy4*oY~-6R=CTfH;Ynrfv2_KJ;YkoJiV*9KSw`_Fs9p15sh{CWM? zte)silH6EleL9iV=sjDz3j*F05P%b^`N`+(vhbFX^(e|i&yfFBQ*TIi(e&Dh-_(h4 zdVM}Hs9_7;q)iuH5jp!I^{GXsYrdM8ftmL4O7wn>c34Ry^n(GHK;Z<)=iJH=sxAtx zLEJM6_or(BEkx3uG5E<@d(QH_cup5t)vxz=W#I|75C6Hh6VkyM|9kp_wdg72n&Z9; zaq>rBOKeI#BDK-8wGDkYPz#7Fv}A6Zo^{o2^_)T~e|mw=KfoVk-7y(FwDe-*DgJ5j z_Ve!~0F+nqD)qqzX!3=tA-f`H9yrkarjWumH{3VE7IEcBp{f*!>@k(;8;a1zqY>?f z0UW4bkeF8;0;uEG-xJ_C2@hS}b^ z0IKPxKmzy?5F*K{xFfuiA7eupO!#xd(uCYu|22bhFlmK-`;X@2+fI+;!!jA^hL1{~q%H4LEpR{=ctT#=7JIrflp?$-Pw(V0Knc KxEf3ERZ4wRh|w zF(us#f0IM;v<`mR>4-b0u;X{Rp|ckDP8fd1bZ zJ~VByW5*^Dt0=GIY&8BoAXZ1I_1XF)i%aItJ&IWJXQV-T!j?2g>Vs0)f+R;0*g4f7 z1nt}@Jo12HcU-A2+XE6Mn%v^~R<{?EKT|B}{U82(m^AY4l7RiN`r^+oT)z!-$2Rxl zG1%?@^cy#0H_!afA33%E{sW%(WFXt0AHwT+V`={WK9yWOMdj}YE}Bp&{r!E$t|;!m zzsDYlQ;huksOP1apufMz?fLIl2>t)B_#7?|MD!=%QMcBhNe(y>ZETsV|^H zO|Xb2+y$9|MiV+vTqtXtl+&-)IBCh5PwZ#k-TYoY{OA_5k;nX3cWta~Yl5U>c_p1O z(?-+gvYhGW`Xa@bG>!cw8!I<-vrOdGAFINXtQoh=!MZ+#;Z*r!`TJ&eqPg>JR|M4` zp5!pr!5tNK9Ih9#ZhhwLKABZ<$h0HN)HL_&rTvd=^YolYp9IWQa=>~-@yI$;8E4_^ z{SEJYBk~8f5Y=L*S}$k~1*=*=EnZ*fva97u|2Xi$vMI^*i-vgUrjdE5sF_2hWCYrX zwC#IST9U4h+)cj_zxthGWoSX&EuXfq;i&s)x z<SeMN96W+hoyQJ(FDX>M+xmRP1sJzVlhK=L%1P*TypOBUU&tXS(WWz=z-O`8KK1 z_J?jcT}GArbR}J#2ESHDav(L=FnZJB`iImf-jAjXQE|ULbWG}_i}uFG>f~Az^1fG? z?QgsWKV&-fqs#(J8OS|GP_!a)|fP93o>n>Ru?yYv73p5c*XuR~`84higk{W`DH zx-XC`=@VUa>t5JQufFAxWQz6G#qjFIUoSG5sfm>s^4a5wF2fJ|z+f9T4d5+XRR3)3 zaKj_IL$XUhaxml#m>`{_u=PwgiPK|wnF!gO_T(dUE6j3 z*O9z$*B)gJ$KR;hwDe4!EM7g~#ek0a;Pz_v!alnfU~1XJrK#Zg$;h`z*lUtJ7QTJX zG%jheZ@4=1{k_xvqqhp}*EV~K+yZt}NDJA38BcG%>$K)mTSgCiu@!@H2N{LU6cbF| z$01l!=X-KplPw|R*4Yo>Y4=#RGfD#I&E~n1ckd+ zyY1B-CVG;l(cXVSm{}eOmzz=I$Th>LaaUGLb2C+=a6l}Yn){+v-C{J)q3yXm6l?d* zBDS)`vWM`dQHn3@{MMs{{=DeKyIy9oZo7({AHR;>?WXjET?5y6rE70Q-T9;Jeg$kO zv)5w%sV4rr2_sk<$S6dD&UXm46%Yq>7u=9nIGsWTh|YcP?_b~D>V+qbHb1TCuMD0= zD+5ow+2PnEGa%dzb}4}$gv|~uaf8z;=X_+7Q;w-cNehh7a7BjwYl4XpJkXkcd*dEB z$gQu;^X-34rEB>1x;EI}r69$9QPp=Rdt{g^uGnNNIge*otCn~{2(P#X=?tEh?6tu_ z{o=;!f@=oD5oZsPfd^jrb`qH(@1JEdyN*5?@_E6joto!2cHJxbfGqtBrX|DBZ6eonV{I9-r2Z4|LEoq0vQo?+ z>Ti9NK39-&A>ChQvHQ)qM6X@N!3r?9StT{dZTE$or+(!4swE|$Q<~e{PQ3E<#H`uI z)3JLlXBMyg>iA%Y8>>#8&cLu>De$}ni#=|F<8PW|AxqsXSzmB(#w!KW`dztMz@d4f zY3AH9CjxsbTAu5lhs~XDs#`N~`g#%UN9jHaAWizqHZ8 zo{i#m9((mp725`WC0w}FAH2+ltZ+l{k8QggTIetj2X8rY*?@AmG4YWgZ$`8!ZuuOu z3x-pBWpR?FDuf|Qkji6iy1K7)HwFLcM)A(;PwlFY-5m^$VSVV@Alm-1-ST(pR@s7j ziItK+2M+ZJW<0mu$fE3P-H8wUYcq9%Tl@I%lx3Am$j$sde<85Jwf5$>R$t1?_OTQi zPz^Pno~)-cY>YMb1oz?paJ2@%*8b0Wm#Oz2WJCp-X|l(Ho&@*>WAs*c()u@+v>SUJ zTYxYP@5B*nKm`?26ueVtysU?70)4S_!mYyTsy;|VwRF+25v?kJLJ(L3-64q|_uykn zbcQsZke;_HUpixV$}q{{xSWB)TfN175IGzF{h*fII*tFKWBRlPMN{SbA=!xzzQ`;l zuV(RDbHEbbbEcpm{6ERZWp-FVyl)P2KT8xJ-nv4(*PU89jLB-N^|Ds}vlwvV*hh9N zkQMer-3edVs@xDS%9r3iQ)5xR3=xY8`myO(n0_gI`$taVY?k0CArp?2EwF$3P z9%@$p6AbGEJbb?OLIpVXBSB6$V)0u>`TaqBwRrAFH{V;)pT1*L{0tVAs=U?y>O758 zDRvxbB>wmX(T(X^Lwxksr{ugm^TBaohfr+&_lu+;s+@(gN44xL_!X)=Kf&2l%(ojG z$0&ZYdDBpyZ$ALJ4-VpQG38Xl5$lVJ0iPH3ktd<&aTm^IgM?;mgFDM_R4CDX`-=uD zMj`(HyR(KaL(xy%7JtL~K1H%4%ZL(8yVMW6eCR^D=0OzDH~2DAAalud-|4coz!IQn z-EPg}BmQjrL~gr3EUtvc;20E#OHi!MR<@EyoGJMA{6)F-i&wNzPEd}#kGM1l0rkLL z$O>SikV7Smb-*xi>|3Y!T$`}`^XrStJo&D0jE@wph$<-wkSnL%^dTNrfOW?UvI&ep zge5nx3gJ;pv4IM{FY+8S)Dy7Xe_g5-C1C%fH+9Khqslub$eI|0CFHj0^Qk9D2pcQv zH|AK>m3u6He`i2t0Xg8`orggEp&U|=0r~4gEV9z>P(du4S?_@-pmNIK&Nqu?wibsC z=s<+-jCVk#NqfbHlG$Uf`QJ@#K3im5;t5+ST+KvX4Dk<6W0wonP=Bys7%~1j)PDju z*3?zGzejUz^#|_4|M4m1(4*ofY~KG&gU_Ry3TlLzN*yJFIUCOmxK047WIVOhPn0xD zjrktC)4}=@I%12RGhp~KHW0>_>z*BZdb^$R#!Kzab9vnkbyMJ!Bf;n!tp9XyL3r8v z@Cct55O1%>L5M2W{Tgw`X&$s^A*5^V4_V_D@6>t)EQz&?m}&~Mr;ie{^jd(8@7lm$(hYq{l6zl4am~Yui!BK57^@c0u^%Z<8?9y zX*ORA?kpb-res(Cm|Du0SxDtoHG?``unbFy$->$4J#+?#@0VdNI$*9ihnp4fZ4{*} z+TMJ5L!WCyjqQO`!}JHJzHJJ8vBLHWCWT<9rYN66{C0_xc5#Rkv(?|gS&v94OWpl# z(0Lbn#~04ysHyr37C@ybv(+@=?B*f#P(b12QryAIZYpF^OUP=HUHypDF?@eJkXT^^ zEplAzGlHTC&E7Err#9EOsm>djm8tT=4@*abuiPri9lzz$0MS^430#Zla2azfZa-U* zAT}skBP}Z_x=obh!7n$Njhi)i4%4~O=4@Zat|wZ_vVCvAXzn-mT(($SWy|}vb05>; z*r(aYuDjDJCo(}Z)CUoeZ}n&fo-5h?n9Ch2Yn7%DHj-R!l;{8Nk*Uju|FQOuO z@&sg=5NxXZ(a*Z?PzQu^tcBMs7)=0_T4D#7B!p0>)(6fMT$lb4-+DNTQ-l2bhr5Ck zx5ihgxissyYN-G5Sb&9e#M<{hJxX8JtfQic;vunJkW-FcUg6~ZRMMS2(vb=?7Nz%g z+CLQGBCR`_gW=H7l?|;r?gFUBt&iHwKi74O0s6@9{)*5dYB55oYLw0FBxIg<{HEn4 z$q+^C213PSJR6@Krcz%!IM#Sat&a9w_0$J2nSwceGA}KwoVS`*zo{6ze3Pdov2iri zJOv_B0VO4n3ucYgjtG#iL$GosMhcu*K3ffr)oIGi<^R<4-_pIEd2IXlL#X zJvI)nV^l}pbvvl>Xh%_TycEPTc%onvyC)snm~}8%H_SGkikm-Kb~iQ-4ncZv?gci# zNcm;%I1GojbMipg=@Xl|I?aFx9aF0&t`n#{>yz2R@iVNpD>!X%F3k?7c0;wYudHYN zeNnMw{8C$mfXZLu<4rQIga|<@wX4GC$jC@y7@#fqc{ik|<5tPK>Xs7+R zV(>IaZL;JOv6K-JtoG$IA+CDOBPQ#p_r>-5XmhDxX_RvkTz_TQ%LcwZ#N#dB{}aqP zd{e93|MA?3592u3Nxe7MqI=TwjY3j0FdRJ$v}k1rO%H}VRK_$ty*%G z?&$?sD@OF-AN$beg1iOk>^+3kfN;4D3AEEb34ppxJ27|*o0j{!M?M4R`c}@wzTAYp zE~Mw~5j0fzOZ5_s_Q~L$C7wWs$|ILQCa1TvYG(t$Q~B}GrY5_RN*axtoA#klZT@iS znNJSMtVKrgcl}EqK^bByu#xQlax)0e9a1F;N8P_Bh%0B3try%X*09!pI{enTpI<5f zDusP4yT300fDdk18tG_y`w7kX#%IB7cn-iE8y(0VNCzrdy;_OzH915e_&&Q~L@BiE z+H`*<(ZK*C-XhJh+|6r6gMe36IW@UnaQT7JI1vVql>?cDGpav}V)*rNs_%uYEtaPG z>~E`P4_%}1(x4<1xd`_4Zp@~b}a6Uv`5m4@js-^a^k(|3~y^oGehN*npe9GA)B!+_7}3(^UU-GunY%_stPm6 zmwUIpy7F!cvTYR!nY>pg6f;Kgj=m%$-WLI-vGT;PM8s|38FjtR1EJY3k6<5}4LChK zEX#3TzE#Keg*8LF0TSA zGqtGJ+0N)xrvayx`LW9jr)A}yI{jK*)pr<-JXba8KPeo2?}7z}lfeVOa2BtHS$`f% z8n7KG-3k)aOMAlhrQ_`OYmk4(ht9#p8jcYpm@?Zno;0 zsU+le!s4oo@{!?+lC$-vX5VpFd^)5YEY!)q-msNBD@9y;1)AVeUT|w zTRkP~b;wP5Rs59Wu#D;V;H3`&c^zg2+H93n25U)+gPVFyPs5~m8|xf~qA$lNKbdl* zx)M!uu4{?`TC^*w2Rz^~h%M%r3hVFk`;O5>S&Ux^vBQ0*w}@U4WZXo(%WS27H$)dY zYR{L{BZ(`@;i%|%^>DjPoQJUF2T=!JZtmCSkNXrem5fA*cb}5nelu`saz&PJrZ1|6 ze%x6$)ekQd8F3v;M%repL?3F8koO<1#Kk9(q&sOH))Q(%c7s*D*>V&$|oPl%K55gqdlAOcfCkfs(?ofW#>GGM&hE5m1 zO^zwtx-(oO0j-7ka@L%Y2-Si5ePyJvYwJ{aR*aNN4o#hVw(Og04bM->mrarl-+I#^ zCj1Q#MoYk$uhZ|zf`k{_7W%P1?a@ufbE&P-XXNAEh&Z1I$zP*?WGL|jmRoW$JbNEm zI+(AvN?y%jG5;L!co(Rr6T6h*C2V81k3y+bHu_7-M0PcEAJhkRbT{~38gs}KK$kVGu0%KmapwD#m|Dp71i`j`*jWJ0rI<$u zFP@GYt{lR;p2YhPrlDA`|EC)uSz@q_^m##&_~>+COHXUGq7^^>{OoYUe(`R*v;6jA z&{G`Ye5(JZGMFv^P@a!X_E4f$k~F7cqhgLmmpTxPPp^zVHGiD=!;LpEMp<--qXoQ^ z?%b8!yCEmclM>z?>6)&93#!2KGuDV|RiBKQ>8?z03mG+s~-T z)$jNB57m^SZx4!fm=x6X^OVKJ)a;%RQ5F=6J(#%+D2U(BFX;i0Ve@Qq9Cj^**f{_2 z-q*iD);54wJ3&^>w@8UyV(Zu>P3~z=FVw`hjLuDe8cBV zZ(sTiRepGHH581v&YMlrv+5G41)li@oJ?ruMV=?}Jf~}aXx|B+s33je9u{?u#X}?} zd*C!CBhD*;P}XG;Ygqn(lD!ZhP^sh&`uv9p*mf_75x_vEFaQ;2YL65c{Q zD@d8sk8|#<2Rv#xSVUDd+N1@aU41GWJUN;Ynzmbhr&OCh8l0aK?Yj<_bt>fuA@M3ekB;3RC3!YWiFxeH@Kl?=;cUwwLZ3Mpq1s;+CttkqBATw6EOHV)6ok zN~n--@~n?{qNEA87CdiQ`lHWoz zg{gOci6#E7^0kP=rmGRHY}oRJ0>JFeeb`x-n?Ky0T=JM%){ULp#BlzG5Yl2fIJvX| z-Tj&b-m*L2o|sC`evUVEM{T6UH^5T+-3$SYw;01C7-G6;IZU_sx`iBj*K-7JeX~ps zWxwGpGaRN$c4kA(EMx$YcN4MOp-K!`q85_x_0gYour#WElyjVMs-NE!@rHep90)lz zWBl&4l?dtTQSUKSVR5oDp5(o+%^7m|c-%Tl|Gp%d6{G}z6)fTVoi2ee3D!@V8Io<@ zBqX6!=V2>u86;6sU|c!^^ezuuFGB&*KN^K#EKYlRkFs7J1=uu-jN3;tl_HXI%rT7g z-xN-=YB5Kg?||(xm*m4^L#}=^4LD}je{cTURWdm)Zv`LD2C@pNY@4ulo<9D($cQnKaKYi<=B1D`xtKjQ} z`~hy&Y>R^QZ{|zh!CzMDy?dWO^pAYTsoE+jLS7`9YFUzkFTNNZrcWEcpvrlXk>_)g z&gp!*iY&x=99O^w3%h{_vRr~J`L@!DL;be|WZCNnA zs*WmbN9IW1A8}f^a$c>J!5K_tP^vcPT0@;+iNQkB4^f13w5X&@2j-kirnFj@(8AMj zdPfys7=CEFIxCaoC4!;khN7`^8@BWpmQ&;n{+QaO5v}+nUD_hn+0fMwbv4Uu6T9S% zc73$j=bGgOVHW;O9!R>d-*u9Dlyip$&d=e3V9^}zKAshYeyJc<4q4nu??E+YUo91b zh)*Vx_&o$-zs$o^4|4Pg17_>tr+?=U)w&4R-tSE}aZ55e{yEuGE5+MaG22~FUu)MQ zLI9*seABclS}qJJSJ~;Yz_(F%a`}e$Nvrzx$c-eD#z(h`_3{qj`tMsnqBS(E*~96j zMaCq{9(YVOC)-0JT}btTQ-6MD=2bmmY;#0`QcIrcx{Bu@wx)kkxh{&EZk+ z8`Pv8_?Bky+M)P`FyDO~X^oH`a3=Bhq<4<_b6&Uv<9npp`h8uZcSt7%lmTTldcf34HB_*&Ak%FOx(fOkCm=c4Gj_x`w$@&m}G zV2dI!AVspeHx$Cz9;z)Ur|fn~<1})UAoomNn@pKRAAs=tAGQ z8;>Q4`*w)r-h4?7$FRghV9xSKlHAPjp+D}Pjp703ku-#QJniL$?7|~M$+(;+#gtSb zLdUU@MvrFn)uog1yoG}7r&Yp+YL51t_Gm|_*V^x3(z#P5So^DF>>5F2-URN)lv7;Z z<)5MmJ9s&HRA^pZze%Yq7s@1=Gv|rX=56q}je0`|Oa2SJq@j>GWM1#@^2XVONVB@& z@pNmCNcK!#X}^JiwQ%QFav!Jqz{3!ecQ82?quY$|?spee9j4#AP!XRbQ;zQ=+)(R! zg~N?Mdm0whOGzd;JO-#bZuO6^pwLY$Jz4bO(aAssirYSMg|cNh&cU1EIIB~;3hQ^K zIMlC}xw8&bhlg84#l2zUxbPd8prD#CGb#?_VMw9j?B_^RPgx%i^?fEdjngp*BO8C{ z9bb3znnQLe5$SaX|q|`;>jH1uy-T}~i&KcmYx)}LskJ-Vq+QK!bfaQrhFtDoD z72rk3Wtpj^nj(bb6bx223%%M5`T(`wTZ|ftRL>>o4h^-bxZ8(BmSV}$-HQ|aJ(Xz| zcXtB99@Axs!}5A!PAyWh*fX!k?RNM3Ax-Jd7Ed zM4E%$=a}pj)f*#xYq_3tW!Qt>A_l(`0xH3GHyBV)U=J@&pedym?jpLV-BYWWc`mcwm^cfaTJ zKlSX0ooTu19)JnN`bM6q_1bcivTZkEk4xMYM&d%ouiD^8g;jsvm^w<5My4>r@j)fx z{7v3yoY_e`WwF5)?!8new4|pMoo8ye%y;>fu$Lw`xj;`U#K^8Ch9PO=X|Lp_9A16= zQpd=em(@`9>bWsH@})G5TS4d9Ty>3hrIGfXy;9 z3;4(tm}pii%W>>F6stT}&M7x1fEI{|sWoz&IC5@mFlbWEfsLgZpCv$_tK8x)4vT;vMG1O`552>gXLflPA6jg^)2ltFAy71r+ z^YD~TD?d&??#^h4R~Dz?y2f=+3oAiIh1oMhNm0l(_DQUvPG_1iqz(11Su(Lbu@*rb zZ+T7i@Vd|-`wOM`lI-d7S0>9zyxSwmS-&JBp!M6NK2Dg}%*2DvEf_RuKKN-V#%zgc||o^ZBVje}c%C4x!ca zPkmf3d?pM3n60e)vXKjiv7+@!l7feI_sI&d_p*8;pZ(~yQrs!P!J?w6!#lEv>KMt- z;J~02PSKG~vY@Cl%a9NT3`Ctwzo7;SYS7br`}L|to?}A!`$m3xfGp)lj)PVRjp-ce zY~lI$!I7evQ%g*hB&3^K55<+A2=V+k`no^yNdjTyS@vWsJ@{u>*_?rRH9)zZH# z`vqDgoxB6iws>m7My=YbA)F5@$32yvh&7e202hUp!QWN{6{0o^h6~7Hojif%Mr=fO z1Fy&AajTCDSBvTJr;ZVD7Xr!Q<(vo%$WkHzxw3) zXrC{IhuDuehel~i@q{TKk2CB|9VV$9i>HyTz7g^1u@0@{Qz2Eh=q#W?UiC|JE_OR= zEdlLscBZ(+jq0uC`j}dWh&cWLCI*N1p*(?!PP<~8xT=JSFe zYfDGOe6j}SqxI@#*p;!74QEIk&UO7~o&0R2LmtONi-|68G!>4Wb zcU^$}to>tJt+VMEpoKHUBI^-53my8f!;{o_Kp6gtHT|`pKfo4}y*^n&LM^EF{McU| zn;w-P)m3;BKHmRN3j$DW)={?(=h}#M;smb6x}UhNH3o(ry2jbgia; zXpN4+ny3oElaAeg0n#mKn!nNy^B4R~d!&jb0PI>_)3wFl{y~e!p$j^QBrFGBFZ}*~ zS{gb_&`fj+ao#8aa6%1BfIKRPe)`90xD+h6t zk!lJPpox7zo!0)lv>Ta4w0AnK(&-HhK}s|5i3Xc%{v{;)L-`;boY#;E{ORUCPr6q17xNMr(M0iXJ#l<*f8F9(_rsSHd}8k8=UY^0Ex`#c5f9tuVhmcQg|hzT#X z2DQNi6e)2cR=BN)gLEKd2$@=Af+W1we=Ok9>AN=**u8)74ta8FCd8rjvrEFpwL2%U z-03jw1BR=H4v8fG(rq1}(KBnh?epT8)a?f{Z5&@|Yv<<2znx+pfj4!I0~(?D0sQ!1 z`9cMf#o2wNJooX(ulO6UK{E1O{z95M84?y&OIEJ}9}m3ze>W9`)BXrY-g_A^l6fJ- zbAVBHgZ!BqsqJ80ps3o?H~sS>t8hFOP%?qPAWa&g0cPOf?VDs44Mw}(-I8v^#^b7x zTnzVR8!jL4r(j{e|3V~+8#nPkL=2YTx1VnKpAG%(ORY(I<7Kw;fh<|j9rc05*3m__ z`U9A+W(u_8G(C%$jjnE?v;9ABKA;@Rcm#wYhj+h3wJk?6=4%+rQ#PO z*f->f&}qm1yKkU97M}a{m2a;uY7?_=|7>xO91^-!3upCcQy0fK2NA)q*kiTI-2GDo zt0}DYpYJ?+*1;TzscbZeu$&p|2wb_ptjEg79b@#1P!M+wD0pG+RY3MmTPs=b zM)GHbr&pV=-P?VL6bW7ad;G0i;&@ZgpdJJ*;1%bQ#JlBy`os$2(&7{gfaI-!ZA^lv z0@{TJ0=9t0gd6g>u?4{FJ`O~iyMsjD6m)F=YoXZJBAaEPBRV?Vl$;Eo2T=fV$C$vo zZYo*mT#q&C3i$Ca=xKro!loN!2i)brus$-fDrnIL5yO<77ovr5&iI_g=&Y%{e%7XAAn}_ zwfd6wPcX7Q5LKKEQQS!Ky#!feR*9|F=R0T-iP+Jf%^O=R z9M;+*2|o21L><@90o%pS{0=gmCDSXUCjUciTi*whpgshV7nM1F zH^0BHJpKzAk05O*!zoTw*6-C7gu#X>u#k~pMRm<0Km;p?oDGL@ddTzRQZHSD(TQp4 z(rmSi5~SMvuMb&2MXJuvp!TPiDEi^EzZ?`F1VO6rkoSmzqfH&Q$AFau^xtwL zHZyy!4b4cLOF-I?Wi$K!Wx#9{R)BL5GsZ{*Nn#2}mtgeF`|+H*a!>%{(UQ%=Qk~lL zxR^@-!7^9R`p>9MXBH@oIp-i{?A1aPG*YI!Mlq{u-c^=N@v zz;pbO82Q90vyTr@ffBpE0jEg;Y=u?^p&f~6WI=nJV}Z#v0yNl}hwMH0X@F&t3+F$3 zUcvpdl9Z;_h4M_%AYQE;ri^N1TwoTc9AwnAUS9RW)xLtoODW@vm4PEXLrzHD282bd z0DH!K3X^fbp=15Rg@C>L*FDgu|?LHA7TEdr&l@HAf4)8x-G5x zAN5mbK4j>aP)-I(hpS7#)2WuEw>H7A%wFPGKSc=HcD`M59M&v=(0De}s7RG8ZXdJr z@#3V^y!*}9cqnVL8=+hTZ`Sw&lhL2LrjGLSfs~VhYe>oyZ<1v66E!Pq-ywC;$%26` z&59(e{wiy{9)pNLHw)}5hHA;DO|UI39`*O(b^p5Y^PS1=GuRGL?x=aUg1SnS6Fc>h zTI(Z~+**PKN?uE|03!<{iP1)aV(nk~a8XWb+c-pdG6+)w$5zoGz$Rc6|M#gRI4#K` zLe@g(jp{SM5P40_7Y9;up1>S5P}R&iXH){luYiQ)P2vy_ry$)73G!t`UxBGNM4CVa z?zz6=2R|+NPlZcaAAE*~Wmg8`u28kMgsPY*Em;O6I$N3rnGcU9=O&UyJvkc?vnvx+ zJMLQo$yh>b?K%|oeK7MOaIB6n{{f!8tzG}C=`6d7^nPAeVNt;+-g9%RsifJ!(8{zW zza9uNyo+~9Ip+b)24 zes^jYg2_7}mzhC0h0S8v86?0ZPdt4$Os)(1gL3%lnfy%5RPIfKuuSFn17|t0rSr+|eu$SKA@v0M88- zBss`U9sgP8S?*pZD7s;OCK19LQp^FP7^N9bFU)3>T}ewc8WSv>QrI|&?|dM)vBVxG zjl~im639VedGMdsD74_uftsChg78KGS@TW@qM|l$fd(99Y=P(*MrdKB3lU<$*qv+ZJ1fQz>S8%vgkwEF1Xqe-w^qTEOLM)`1Tvk)DW``V3 za}%`QEz^*$7wx`Yy!`KG`3I&!Xj+osBA5RPXcA{*RA~u_Iwo40C_n~21;xpT*NKdS zdNALZ577e54v9aSucMOxZ$B!6iasO=dc4)P=-+sCWHWJCRS(821`K!zyYQ`dAnx7j zgNOlhSO~(igU0dSZf+EceNr@A$Dy$?;Yj|2+~{ zR79cC3Ii%XoDssk0mG4-k&A?q0H%tNY~M6gw2d@T%849JJ^nYH={$`QxIzmh`NepO zN}LgH!WBd+x;dQQc;``XMTB*2sI@Zmn_ptq7Lay8@)ENL2#XphRRLK^BnxwZ+l8I>$h z>{HOW`=LP*^<-HYF`xc=`?4`OiObm@P zL1DM0PzThK42jQeK;gib3j;2>FSCDVpkpM)cvkgJF%~k1wx>@U6>?2BNM)U{N?Z~Y zc*V6Tkk>z=QkPny5!zlacV%N433aSrTCSkcnFsQpz@)2p3?euH##t-isYg)un1a>% zLQE6fWV|9HJHQ&igj0Pi)Lck}#KRYS!=wjE4s)ta%dfA$vM|-rrtI+<~i9Dcp z-?Zjyrf;TI<-Y33PZ|jwbI-P)Xgcb6kXW@YpM7M~Eof0TV`x#may6qhXp!yh?aAoh zkMb=uCX0E#(|Ld&X4{ukd$26t>)!96r82M);imMP!Kz!%fu&~s>ifmP68?%CQ!9ny zV-1!gtDtp7vI}c%>B89vwQtcdMkb3;-K`-!cG(mCzferhP-9CRHVyi8Fz#jI1{lN9YVd${*R_?Ayf`UzOLtND8 z{vDW9j@@vBdZ23Y@w6xSNGNRiXEddeulbU1>>X6wRZheSn42Ny!30fp{CdU&%ti3o zAQnQe&db>fl~BS?IVV5&aQ z+#cAP8|xT?QpxB=hY_?KK@*|kJ-V-(PZt*}_Y2)3x%2Br{bEi^l{6)vHELIiCZH>L z@h)=8;F~h;wnZm|qYj%17<96JC(ui)@pWc0lU1Pi+@-Xk)vJaAz3=#P*OsoU)P;X2 z9IC!)C?L0w=89+IW1^#)XE9blPMjE%To)uDw@IuL{ZL%B{03*PbG}ES_mVYai*kq1 zSw|v*l%ho&hoU88&H3o$i+r+R*C_1owEQvhd&uLC!v#C zOPcbWLJJxszzxJJB4EhIAe|XibOW8yh$Au^h*v%eBOf*fBl$f?x}XW8L;X%iKC^?N z`UDKJgoTcAgocrbynZ6`IvwiS6E_SjL4DHZ+@LW5MORpRUn|NN&bDdxiD?k3oxq{e z?2n}zLJY~aw@tTCEJ5&P=nc8h)g8b0VaBv&aD2d$YpL zAF_kMHH9v6{!Gerrl|JNcz<`GIKb`6?aZnou0{3kMTjG^iySXcB_~gYdqmFe4y2`$ z@guIPVxLOA6>l&1Owe@T^XU^&8AqHo5NP zZaxO-)dpy2{#=x~ZL-StKDnG}M6B{m)=!r8K;suoBeyz0DT3x2X{jGTQnt|R0`S_5 z{d3gDqfVF>zhDhTv2BL1pklTypsz$RXd`mG((_;}fY2ERR&CIQlC>=)9HZBfOZA&_ zxv;mnJ<9mE9cZrfNS<_=E>Df_Ps>o-O5dU9Z_RSv6vu3SfBtsujY#U(+=&@m&AO6D zssfu~vmVDm6PVso96jk(d8zxd)pD^q-|xxFfyW&|)hg5Uwc&#LQVT)VSFJ0}#f~+( z@G;L4Skyxu6|8sfJ@6pZQHfaP;@bwDQBIYlf0e=cjQNSKcQ5Tv@QO(Zeje>OGvFSr zKP?@jFI8>ds21vYTp*G9i%U+2gcog7#!u--jYV(!B_$JISp8gUD3H!YHHV{THt~l1 zG-H`4tD#!UJcps%(yi$gg7FudiOlR<4$Q}!6;1|RWqhhf@ArlF+^8zPm3}LZUL#z2 z{Ep3=Zx`*)CvWLHUbhB$hpO%jpZj1|%IU&pYRmc=5J?X8w>;QxdwN zCiOHGymcb7sCcs64?GhJHG7!-U8dBF6kW1jPwxEM<{-dwqy;BnOtJN)`YFV*^UNJh zyI1D-%v?BJBB5$;v3p5Gc}-}AE0o+U^)Pz?y%Is0=j1M)P!%&@93xSeDKbu(mG<0u zVx*rHi76w4F4J_Za-y*HE8Rl`hs!QJpQCwpt4h9luj-8x;}hJcYIQm~lpk zmQgL2$8R&9r29YMrr-dgp$W&Mo<}b>(DEM^Nuw=Ft;*{hk*x zuJ#70E)sXR2&)(!h@q=nI&x)NH{-=yWAf)C-t#mDf_pnIM9A#vyr5R?E=7^*c$RnP zCI0;}jfp&mlVcCo@1Cus@~*_)b0I4=D<~->7#pQ!>Yg|eov5~RO;vu4=2O$iJg?Cz zpW)c-4^P=9y7ubZ>w~VfxnhPTWW&^-v>e@ryw}VQC-3mIHxk@6JKr!Cy=$pnsk|Fg ze?j4R5Q{%^9>Mf0_6Y5!-X7>WWKfIn;)AMjg(#%VB z=BCs!b=9W9dG(uG)eoU^`H(ie-_*8_|3C$0FRKAhC%_uj>s!y$3TRU=TK~a)$yr+td%W&z+uN2)vbGFsGLCpn64 zL7}%ZDc5H74nPI$UKVH4bmH-kko>83Q$pbyEU*Xj>{XFTnGb#3V{K-i9eXCAo(T6Q zXC!-0leGl(n;E;EJZh=k@1=`lUd7a@O?_K>>3&W2bk}{`iUqUPY?*JxHn;L*diV71 z?>*9cw3n-wulH21+$zZ`=_OlmPcfX5=iyYkw#k0IazjNZxRjQ9H}yE% zl+xjl$Mj{wyDaA1IqoS7g+691x=L0^40IrCjbvhfc1|e51XHH7t6@1?;iTUgfimna z*TT#8xPB2zdtyz%7vGA?K>sh83M+>`RcdlNE-K9^(vhfr>Dc>YkA&iyG}u4IaUM#e zbWd%LiZj84*7$x?7TI+{#C6}7j+>~zv`OIW@UN`8BZ-u^>{D2+VR&$M^OuKJ3r$_9 zoV%jxOM8VlXxTIK(UM4VU9~)~GDi07#&XNZiJQUhZb#l(bT%$8X!UArBoZf2&TFO< zC#6J-H(Og|%RI|B@UB_uM&mrBn&!!BtH!^+Ch3!hz1d8w$-JPo*xE5Exwi$=X@#4( zosfE5N?_i9q$7p+SgP7kYVQTR#2C&!Bl$ydBNCe4cf#z$oH&n-bc`@XH14zNpAmT? zNqv>tH~s$ZkrS_9@X$9chuo8H|1GlY8vj;CV@C5{lH>9AUYgXA?zmm^34#Tjj=u+) z;$K()?0oe41CO0?VIys3=CY{ho|)OP>+d~jGZTNl*DJi_5Xx_KlvwpcHe|qGktwiN zb@6R?g@bXQCnxr~(pEw;BRMzf5?^aRND+^@G%BwiF1yQtB$;&Sa0F|d!kLdr?$)ub%Z@vb?De1W3bOTwjRkQErb&z;uzRm$FwgewqOW-5v_KF zXHTRL5xwH}-CJg8$*wfmPoPd|4TYsK5Yo+M3)YHbDN{pU=Dm!?WW=Butc<|(iwVrm zF1!3%3|ni`8w)51xB=~f-n3!P$!+ixWWX8IJmKs=Vy7C`Tc^cMdqLyFK>BgTNcrVaJZ{7H7Iepq50H z5cARhQz)WyK9NcdLHtprGxU{_977pm~YQkuH@O1YXfGln~} zXrc4-i_4kL_47bxb6$!gfHXBX1aQX_;&Qd1+GZXRiNa*iq8+h1GJQ{FDlhJgsZK4- zf|^lCXFNpxGwSk)uH&dJ+MXx|0^yV^%v;qeKb;$E7euWyf){ePnnZmi1f}Xio1;cX z!_3UXHz>`Gt3$FpRqBRYz&rsBNrsr142EX=>p&r8C)mE{+JIiZ9J*SV)yLuL<%~V{g^55}5PLEaaRG{7@fo8f+)xjp`Xbc}#DSU7WBjNKc%4aSEoLVl{Z>Y&wLv2(iyiezj(pLJv+fpN3OA z5KJf7h$IFL*MXom1ptya`aK#K8c3amg;;DaghFo3&Wd?&Cv2|m z&&A-m*W3^#21*4$` zI5h!2pAvk69KoAg?>`O{3k6)J6x=?5hChJO0$me6Rjd+;k(>E=&Ujh=aIm! zgmXe588OfXIgEr9Tpg9mkLN`5idLx$fYKxY&-eELvH-M-^~noS%ua~ak%3Vf&|@$n zqy}-rt@VKGlzaw!)>-D-N4Qe~i@RdEb#7Z1iuvTJKbE{!9|u1wFj&4{kUzTp0|;8o z0+D~`0A~{Z86^GaqYGQaR?Bll71_`6*08|cF=*hUg_r*1RTVwSOhg6%CAq%<$aC|P zQ3G4Y52cD50_&`=0aZs1r{Zaql!HnHrO*h#azDzmyPLscV_&)IW`c1^#dw2+|0o zURywy4xxJ!@Z4~>+>2k*764vcUIOf|pOis4VI0Opr;BA?v7jl@9-uRjB^}u|33M?G z2n-P~!%~d7SPik31t`2CXK7|2-TFWY(13z)!#I%ZyoTXcz_;B&m*^GizS+70BSk5= zKXn0(Mppz-gpU!F6F=t%7y|;Z)%I@DpD!`k=2(~1&CXn#b84w^d+|z={XQmi3$47-QpxuXV^?(b7f)_`v46lbM{DcO- z1Kww#2n{gK#)9L(RZm#_rJY6JFHrd<*=`~8;A08`vpV-ZfXeQJkhc9oA(7vpzvtl) z;GR^%z8WqinMUUy2SSuBs1)>T%;-Y090$XnvPB?nKsRrZg58)Ha1Eb>6u2c2drKbV znDQCj$FgOVFkQZNO_Ve(@SnX?a3jlL@Z0_B2@pei{XwGR2bTp5mfZYez5E7?dzBC5 zl$s-<8BIjRoG^?RY`b2cIRmM}7j8PK)J`YNc7y1szmT)?BbL#re2pL7%#jbgZU0O| z^yf&-*ArV&+NWQIhLQTp7j{it-O~|18Ffio0Q4q1*mGsr@)a4VYhw>fz|2eLj8f-I z{S6zu4j&r7f?FRD%(E5ov!fNL5+m7x*t>xwb>-;t2}F8H-5Jj}==XJV?2!wVasfPz z{~HHZf2)po)q|8UH)vUIf0oZSpp8;`r)W6j{|QF?4qvZLx`Su@4fqA=mb|7PfG0{A zg${L(yyLBSVnida0=Sf8JpgyQM_yDo9(=qF31-kuYisQd&kN)p!Hs*(8PDPrO;fL% zt64_6KZI*W)Z=@Bhn{wM49b8y4o&X%$|jz%0-$O9&;>c};3r{Hkr+`j*pk#s6VaEA z$iRr5)OCauld-&Hcyv+TH0v)okpqZ_-+Wm~!xfWm2n|B0rwZY%HJXLn3OxM3VSKd@ zyr7BBif%@+0!XA7!5r1N{8(SHA}^lgHhlcAH<6?(aH8d)Q(;1=ngnGwkaP&0gv$gs zl4q_QR0_aQaUqZ#iM__(est%Y=g@Q(lYn8La(IoAqI^t74RXL-Yfy?9bwJ(CD=QMI zqX(c`Il9&C&i~=*&BLK=|Ns9;p-l#bP#8N|vWIL9GxibLi6mS0GM20*J7er)A8XkP z;YN0e>^s>RQrY*V?|Jq9{C!MQ1lwR4!8Y7(^p>O%S60PKQv(eI8(}tAcgdEo4b^Dm4v(_2Y4XV z$MZZL1CM{f<1I_iZQmV0;ey+82&({~qATF7w06#{wN$=Y1Ok{(Ex>lz3}_veM4mr~ z+cxL02Y`1Fy^V*Q<(ozp^UByEJXF0IiM^feO}j6#$Nd2Cahmmw9reohgm*VF2JHT* zjUC|Y42z?ZkYL&HT(4juyK$4>AO=k1F9?Yg1Mt_T0i62`^tg%P6^3vP$bCltZvNBn zk6m}4KmEUnuLhI0gR6jm zY0UEhV`u6R{Z(Cac52@Ce zo|iM3nhEcCxOsr=FqCE)6i$t@kMi!hjK43L=xjE})N=SXSTJXa+v&d3blsR_;wh*# zps7gI7veg`cSNBQlA0H4%pyW=-FSfl=NL5$*n5*2RY?Xdp@yVYZ>5I#I9!wS5Y%(+ zxA4o2_;cWZ1U$uIhxK9ogX?*AJUyWzU@))&&h_Q+3MrWl4M2m5@?sC20#5OzK(#}% zHB5F^IY;SFESCF22UzX+zqN*9!!`>ZkOwLZiD7&|Jo*R<>|ki%9%VFl#Evv72alWs zXq9r^HH(3*UG`pDhHBIzan5pK`ZX{;T>+#(UG>Zu2PtsGwg6@$y{6pZ;#mJrpb&}x zmOT75@@`#EsOSLWm;?u4L4%u~RznFBjPWG*W>bT^@ic`)c-2Sf59vo8#K_rR1_(&+7) zrX5ry^ULmmVl@OnCs-jg*BrPfg+*Q?g~wO7_s2%WR*Ei@c!Pt+k}DR}CS2-ZFc&qE zUk~O*xzt4wMG)bs1)6=`V&8oLlow+RR!2Lln}%U~VBc>)4gq>V=_&}eCDJJ>odY(? zSLK8WQ2A+>$F>^jSUuJ8{SE97Mo$)wCS%0GmE@vC;E$`0WF4v5xfb5`wr#uLMm-)1 z%pfpbpI_M>zDh7UOCug58_enUmG*S@9`Bj&5rH^^?GN$SJ~*($JI;5I_g-LFm6WI0RBskhRWs)nG}Bh^m-|2z z7!);x%=;`Zp{9WubOn+Tz&7=Kw{Wqr-g44)p<9Vbq7kjeM*p$t;;mLI7jWNjdEoG# zVryHdg9wQw)ae%$I2c1CK-u^H!-$VA2jfpgL0Og?7)bqg|F6dR(o2V4wh~1}VDUv?-O*`0lF8^!9snqdp~Z zqF5u*l4z6g!)wW@Z($jQ!mvjHK zCDp7YYHnr+G4*;^oShzbxW1?W3!eW9z0e)Gb(gDJ>>jr6@=soCFfy}0hx+f(JiQt0 zN9K0t$*)C>9zwJ?@xVDz>2RT=&tvrEwUBZJF^flt#8iQsSbKZDA(;Q+B>v>07tYu# z!Q%LgG};s!JYHPWc4*sOy0?Kja9I7cRby*=zszwA;UHMWJSpU;RV6r?@;klZa>MNg zt_Hz|2MsC>x(&t+Rt-)Kul6qNUEZVHyS2x<$Faw^C$@J#eRKTgcGSb-%KoM}M-)1- z^-}0}R8&=Ut> zHjDG1BgO|g)Q>bG*Ej_(9NF?5WCWHJZcY>~t=oSZpU_ex2DrcA=Dac}#ldlP zyY&f*-nEB)uI2BiD}LsqHE)!u*^TLQ6+5*5I1L{`2kS5`j2CyB{ffLv7&Uu+hj+Td zS*a!RTD|!-a7UBGH1y*)jBeRCk5L9*See7wpRqB%y zM|4cNwIFlBW> zv$16^@jmawp$&eCsspY8tGd^T!T!bt%z9_}uy<|Mtftw5%L8xD5M5KNyPwHp*ah5p zHu?RuI~3?;1(m?q9sPWAnNUaRHEonl>pSZ5mTSai0KOG5+dq`u&%%we&S~^|d-)B9 zr)xfPKQ~)XAKFN9p~QM8!bbG`x`LA@W6tg8?%z=L3xnL0ff=IM2cjdX?Wg1dksnoa z?yexUtVAzH{#In4MJ*%;BWB|h5UxYAyBq>xq2ahX zZcd@Fob-X``97_>xjLE=5l;wLh!+t3&Ly+>mI6FzsS3WV$a23~exc{S%r<~Fua*bT zf-RY`1OOW)x%jL}c(}ewg?_Oay1L$Rw85}t72EsO9$giE+>`eqDJu`U<|M=R&xhQ*E>vDyV;vhFYMRht}dA+Vo(%;Xv?HS4b{GT zRF@_LYS5~@!WoxFnGiAPKM}6j?&0Rj40LRFjXW2LSN^?kaWyyTO|{Ar4(V2^A0FQO zU~aUwR1^WR3Q!)U?Ls8gv^%F!5O7P?<_1d$8xJ6NuHN8 zHY}82DZ-=0OGl%I3FOfdr86!MHc0p9t-?hVmD17ZY4nG&6SfirjSH975W>Ip4j9`iV1TtnlEx7(9P{Qjjrom7kLP33 zpPsQ}^WXxK7bW)?-hB7HlQUMW#ffRcbI6?69!WfmNc#M)CNwGJUCrC1^KURglFw>N z^FDEOh%v`jcf9!1dcmz$NDCHf9;H%6J)^bB)UmITd*zSV>SE5wzvF7z#j3UCx`di~ zmFoI+uT)w!aza1PR39_AY-}R(S*fhK(%jL&;S$2k`Q5%06o@Zi$#^Y0o}F_G3el(0 zXm54?@f*%1b4@&v*>fZbop#A9pM=rb0U`HT^dASqEXe{Hr9TE9J{~xKo5r<@5P#~h zJ6B18&K)fhG;H$-KF3J4B6F~HNGYk^95Xscl0Mx-*3&4?QHEGoCbfXCQkD%{TYm}C zYdbI9B!^Ypjn8~qFBxTIR}UY)h`{8p2}cqd0ltXRPtDc;4*zH}suSm)(Yfo7Zg97l z-%T^md~MPt`PZ#JM~Q4Qg8IZ=+DcQYgdC^B8Kc(Btudf6@Gj~i4yi%oey>KB=7*c3 zRTTMh8a$wNUj-Wu2lC!HRGh~HAV}PtetT{obj(^n|KxV5f_popK%U=?Z-q5EBe-HM z<|k0tP;WR)!$b`!PAx;k`~h8_u%;a#^LhZaY=jLHVcHQei;{1;no=C%q#GvDn3wkf zgT_ib9T6hQn3Jmdh`Nl(f(@q2!9OS4+D3@jKaA|B@DO|5yGGcbJ@qu7&e&DWDSmFt zY-m1`*4t<^cW-n20RL|m;_s*+66M$;K5rKMyM52qh9E)7(k>ed>GoGAI zy-3n1g+KcTC8{rAe%$SF)jOLqj+oN_tsK+2yht-H83&hbmBx(JME3Ha5 z9H5xVn!%lJK~}%UxRng8AQ3NuzG7zRp`PcR{8q{iK3RPpOfyD+)ni3UBaHa`Uwg-^t}Yfrk779 z`*q6tPrTKq z^H}<-;>&&&aq6IB(zor8d_=L7a5`QN^v`1lzVJK7M<+6MThlw27Eh5veV0?*^jvP0 z`*J^4m=9XNX5z-3)+`mhNLep|OfTc7#Gv$I-pxt}>48`D@{Wb7NQkm(E*Uvd*lTZ# z#pjw?TspxgWQSIRXuFq%Ixvaodm;r>?v?^n`%Q2=1Ld~rVu=HcBWqM#c1=pqEscNr z70{!>C|`IDxKTCMB-p%OPlH$LN1Rx?!V z0t*h~pG$s6mMhsUAVl|V*+5)us>+L_m1KR zif2P>mjCMeXSZ4XTji8C9t_WLY&5-o*o%IvP19(3AbQN)Vvl4Kc`AK1@mYcBF;@#A zSu}C3_R6G2OE$wqm-BPd37O36>RBV)3Q}(=Xt*uuUCOVmQ>!phN0ZxaqL0EBf+jY0 z-Yt1P`9tlsB(nJ;RWV(e3laN8+$5$3-b=8Kb)0Csa{aIqMcfHh-eNN)mN>&v(xQb?s3~-G6s^ zD3#s|>h$iZcjnJub#?hKe{|M{%qRI%hy}*CG<8E_RgBm771Hg4Q>C+pCVN-4J`veF z^7eP9lmzoFd{8(gZJ8s3*khT5u*zcH@6IS>?m4=BI(*2s^7$FM$)}(@nHwv}-%^w0 zFGunUW$Dba{EF%QNr@y|;4Y75d(Uo=H(SY7{F-gV^*JwvzIrrU@b0Wx6nT3E%g~l) z=fx7f4>vPd{~p)MO@8C$g#EMw@=Emxhxt$F(R9L%92)>8oMTip^(rwuNwsJ1(gZ@! zX>wOtr?B;NzwYCfRFBE_%XnfFrh|TWRo<4**p?8LGc>Rn|JnUyczA1}(kRmREj2$SNuMR`jdTTB z;lxK;S}sqdVpOm&%@f%3E;Z*^`MK)O&oq(ik&Z2LEppv1YTEHSK|9s9?{Lr6Lxc0$ z?Xlg@(j=W0*`GaU8id0i2YW15(nl&8^1$M8pDg?e+_%-_vD~fV*Wi-=>|e?Qzgzj49B0Ps|X}S83f9lE8Fm!s{F1F8`3^hKAc!1TLMZ)*N0p+ z6jnu2+Ojv>5b}k{*oKjEH2+iXd5XKjN#X_}0ryg63clKJ8xB(NjKnW(FTo4%x=YU2 zrIb)=#47X5pT!;P_C(73b-w#vFss?^7V#zaK7OV;LA1j+7hZg6xeS?;EcL3tJg>Op zhFvIKUI+cvSi@uU(oSrRyu@ zE6XryV17)yVxH{pXFr)g;l;;t`S7**RPSIz3QQ3mO0kiv%td8Hc8bo9JNFFbuyud0 zw6@{CSu}xCHjBn9>eDq_2Gr}~?t(YVjY?0>ERtaCWWBr>zS1PH`>Sk3ZQE>a$Ke;Y z>CP`kyk+Z8YDjfADVOon(ykX~cQfb!=tW9C2FHi6kPtIS=D`@WZ$Y?2X|$JCkx_ou z<{PFH3b|X)h=A?WhJIF9E~{Eses=hhXk(H(Rzk*o=zz=U{(S$h7OB*Cv6d}|(Ieo$ zcQ-up=3L>o`?G5uxNnz9{`k_eu2HF^QD!BQoprz?x8YEBj7Yh1FP4w;{g9-lZVhdZ z@04IxiLw%-Y2(%};;+XZ953jpLHN?B^%o28K#w_@mmGa| z8n9Jo5YD~6v=O(+S8{$$HTmIL!fw8np7;uu7*g48b|wp z_}nfxt1%KRq&92SErRl;^{%~aEAq(@``heV+Y6c14JMy&c!O=nb)UjL72lofPy{k_<3KS7lF?He!{Dop!=Y{uQ<3GJK z7AcbQyQ}Mwq^$EEL3N*HXtk*u3I9Q{sjU+8)+_?YGf#pgR}OR(4O9_5`~Fm;&2P{k zVY;x;B`y)g*}oml5`$aFCS0~Qp}5%hUp&7MI}bfW(^qoTO=^evfcR8#tOOZMb+7C- z*;C3X*LkZ`-DXZ%+_Hp6)r4(ZFzQl_-lda#c}J9GE*!ej^N_@R8_4a~_~jJw?8!`* zicqnqItiTgEn4+;V-)B4M^V5%J7YR^IT#D0Ycmh8t8RPqrOvj&-o zWF*{CSG~T{h;n4A@W4=SwGk21$7F_-^g4mju5jG-xdS$fhe#39AhM5Adn-I@y<%S9 z2ZIv*^`+(3?AsQkoS4=zYDCf~Oq83~?N5$}ueYq?)&qZU10i$!D0_jPawe45qfDpJ zu$+KpYrDmJ_PyoKWE@KuPk)RT9gVwn@XdN09|!9J!(>Lkh0l#W`^R3hT zFI@C0$z%vFXU)72X`<>`x^GM@KVNw7Ln93ddN?(DBes3Nx4d;l zvgh+Culiu7iAbY`%9(!O{^Ka<15AvQT8KAxk2$e z7N)mHQm>Km_imc2lUBAKxjL3y^h*AC*XM0jj4}pvq){D4N-$EUS8!olrnQLcY8i>^ z9Ff=pzW&DYc$ABagUhdxkXhPe9TYB;-rr^>EABGUDGEvY)cy6GOy|lRPIa!XsgC8t z1EMKxoF^cLoJctvH>Tdf5$Zor5doe3^S;$Ro8v2;b-W4{mbL!XN-JeE*2<>>8nK~M2HhLCNx zUadAs#d4SR-rcvy%;}e_s~8J#j~v~fFOH{poA&6*q%QAw6l>8kDX~p6lWX|0r7S6VZHJ%H&Je``Vr0KkKhmiYJ12snQ zcgzjG+BD~rewoyZ?AmI*YOhk@cicnP1NC!>7I3Q8ZjYuDKA9mATeFn4!1Q)t6Z5SG zQ_Y1I8Il)orQ+V9+)SO%t8GLcKBf3Z&7`$du_arYeo4-JQPZw34L^rmnO3K{>l^NHGs%8UMTMW!^rYTX= zt?i6@x1ZX(H0DmRzZl&koL`H7v#c0mauEkLTG#W z!v%#9D*+DD=S)F`Vq8(OdpEHtnb{?dbDn+KM1f-l*#~3dZT(yi+cE_reT4)u!%PRS zDT6=FKAgXck8>h~gQy9saC@-9Y@laxhL3aFcr3(960!tCbu?HFN6+9Z^*o$E=7F*InZGONL@h9o2HKr%ugPDsdx^N0M z8Ls^NQpx9`BmK)692|j0-mnV}?`fx4lXTT@482*G)j(+uME8EP$s<^!A7^LLHxiSI zoX`h2pWJTvn#!HqZRcc&l;6zsnqA30hOM_9$5m@~FpV~6(>8LsG>h2YepZr7DCw>G zD49P<&6&3T_@RqCSV-ez*b&7DmtqKeyOz0Bj{u9NtC0y zN|v7ND9X;S`X*1*qou>*nmBH$^kI6J=Iy)tgsE+3-%gc60$i*&dAKelmJn=k13By7 zt6o=DIG=7O1D3morv=oSUH>oii@oPhcP|_dzr3z)O}L^U(ZjV9q1iMtBuTvfh`kfR7JlmRus#ks3%VI( z;>MOUs=4v14%cI>Za>@6Lg8rD?h3ONNREZ~H>r7V<;7Ov6}sb>(sLUcNc^ zfNc`sQ9ou$Mj@|k_oX42u#c9uH(xtE4%}U>L>81@g5|=`4+M$+<&^r4Daev2oa)QN z8sKpJQH>V9gYT}BE;@W+3ydd|9^7yCx?Tr=qDL7jNV>$Y#uhRCS86)WED1gcpqO5~ z33PUyyTun_8LP(*myxt361Iysj%A3q5e|=<<&az09vJ^)e3`^;0qw`xthX=djJ{*t zy8!#?{!EpfbFfan?jo~{=T%Z2vUc9NPr~I=T`n&;C_8p>A&RdNie-}75u?^oupavl z635Oiaxwpw#K79d5j;n~?He4CGJjtcJrJa_3@;t zI|Qyd`IzWfY4`+Vm=rzeT$17iSOMNH`QZTDg`Pn#0_L77Edec5SbmlMP){ZUt91frT%iFcDRe243SdL&W_$a`w5fq=j6As&|>5$6OaT*-pzn)I*X;HA!{Qe>$psO7^U&LLn?Sd?81cMZB&YgSDdoO&+;xzi6mpd z>}!zL|ngj;rg_%x22ezK+=&>H>bX z<~OZMzMyP#N#`<5ii+3k?`x$<`FVeKs#}C7K(XUoAdYFl)}2O{o@cnzqb#Y+l9|3U zWhCGQK_OVem< z%B5aITY@lAcfGR+Bzzjtk&5ClyceY?uZvvytdGc0wKF4|pj7R*%9wqWYxeBqLC!S_ zp*Ts_%!lXfALeNCvU(bT-{#uO0Q}ChSc44@s_3n}+*~D@@L(Zusnr6MiYNb{!6+S{1LP%2k~$+JqE`ZuJo~W3a`B1Ju=3- zksBmAat5Dk5j1o(I%HbmIiW@64FaK=p1dk5&`T}z6Lw|&ejB^@AWEu1?ORITA&SJw~*|bOBw1fnjPvJnf|TL+yw*UhVMxVZm_^jB&o6*daFdLOZi#W zJtEK9y#p6s59`+jM<2euYOHzZc7xbep`P+Lc)%P!DCxB6OK|KCl9>EZ;-Na}Z_m)- zk@Dy@&>GqedFWsp-#zYe!COhrRE!WYJ*Dn}Y$QG0?nt|<^sL>o9<`5iETi|WhpoW} zdQn2~sgZ8|E5>@o73Vy)55aD$==)=fptX^ zY&aPdl6G2<%0VI#z`Gg|{t>-u*@Uqi*~pazBA{XFq>kU(CJ^$Fgo;VmF~o|x{H2V8;MU%OOkN2vfJ{W9At@*^Zh^q z{uSNpJ`(cLln04T94{IiRHHxy>#qpl&-@Ce`nYMNHsX@4-em}cpawx=9;H^ z%cmBpTWMwKOu8mEKR@x#kgREB^P0XwljeGwJ@d=%zPWT=7Z&Ddi~}fRX`&s1*2Py9 zWDp!h*u@gs=hJ0=gwFu`9ABLu4jymy$h9a*eyT~?$pdsg-!}#^SsSlVr?T8BoBR?3N%hHJ_>#==6e6R@MC5~Szbb9lI33q~myB48Z8`Xd1_ zd3btwlcd+H!TTBHj}htDhZc!#QeqSX)C3MzrA84B+LcIk#Wy;9xF_5_#X7eG_kC^_ z5)7z2`LOU0=R;F>3%new8l0_OCMHUd=NsGN4u{?W9 zzo|T*;jz4&kgjmTEb$o^&A!?E6$N|XaUm-l8H%FJN2y-Ao@t%7QY`oSV&;Jq}$ zrP7BrTwXpm^z40&VaNLbX^uKRgDkUZH!A7ygOU$~sqa$8TP~P%yz1pXA4R^W5JegI zn`{WVu~gc2N;4zXm&O4uQqm%M1V}HTGR0o@EA=`PrM+GiwTX&6; zCGf!Gx7rCa2j#$w&Oo^G^^Ed^^bf$Vg1ouBJ@<<5$l+?6IQ&VAy>o-H9^4CxIQ2AR ze^So|W)iB|v?`H0MeorT*im9iM-@8{s;0rjh#YaU;pzn(n65uwTYPI)#q)6^}q8^x<&Pz(q6at*r96pRxZViVX>FX6*176g4n^YL>@_qJ$2S9x!8O9x1b|*9jVI>rD zBcQLHO&+-sr>y+CzB7)e?sk3qp%0mau3}j4H^TJcOsT3>91I}CLc6wK!LL(t=967g z(1WRAv@4V3?N@gld1M~NsHz|P8LMS~;D_DinKh%F5$vD}4fXN+y6GKtOegU8W)$$3 zwgWa}S}oy=@wi*hH=nAbjk7w0(&(#))pD^PzS7*j+TXYCKe{~iy}Y;XA>V($f40=b zv`P{0eSn^nXwC4NwP*B?zbnP?75IWIGmx z7$F^Rlc0an(X>7rLb|ZP7-dB%dT1iJoKgP896hW68p){xxpsYGpMmxTE!TCf>UOU_ zUX32jXnO>aRwcC0Uiv~t4f2+(mwSDV#{izQ0+l0g@LBZZ2Ecqp%cfDbQ(HWRX_1~D z5*8y|Aw)WAhJ)Q&^<#_-a#Gy%7FzKRhiQ^C#+ruI;_n>=rDPF=C}fJDdqCAm3p*`b z)MRXHA?)BX7mv9N+h#^9Sx)ed+W5Mf7g#((nr`DBfv@2b zyChm9RDTUgd)-;0@5h<;NJK2JMzZ|eRL477mOd$!xaaW%y{2tnRJOAwB$eR{A?g)T z5s0@C*G6|w@|HoZrq>a4M=UEmu2a`87|VkV22w8Rp5fT#B30rg7|)KY)ed>+IaEhq zTonf(hhp@Q*)i^!Tq5g+_}Ns)hmPJ@di9uOtNX&Xl55>3)E2EBN>s97K#&4brPw29 zk(4wq=DBlPWGLWwRi8g%ynx1k4r(PQ@%#DbCRaf(7YA39BG0Xn_h{EU8PRW^USI3{ z7=f}=A7{Xl!ZSRnrW5U{+%n3j`VZSr zB1ehHMj8o6`7Tq>kF7%L>uC;KC{QK>@389ajx4$WtPnaFYtklS zZ+l!;{1dfNb}d4PbyHF6xzT8e=|VxMjFh~8^w9*QDPFSKnvo!z4BNF(T$?{@=d8rR`|hWwZ%j!FkzZO0=C*2n_ikiE2+{ik zOkWhrbFtjt1MV<#3{u48Qw|T-NL?5@lw$W+%@YcJ<=uol@kCYqC6YGL?2NKGxt`1VUFP7>wU zC8r)3IGsf4*IU4dRq=4_swH&RJbrdx&&@a0I9++pI|ZoR0W0hZ#~h?RkM{kBad84u z;i4qvWI+FUU+t7gsCpaq#MTu9<8^1#gIbisE=muz7HD_wlr20(V#6p5?ywh>5!dJ_ETs4_PNrShXZES<1l>gg!|vNCi3v#GF?k%+{szE2|#Wt5yy1 zK-wuP*=K$g{(|0A#9^%LBXA}jws14SJi#HOaF$O^Spdu+53VqZa&sny`Kg8SpfVsf z6dzu+Hn*a3*O3BYA#BII-uCr{!?5H=t~#egVuBwo9@=j!}*#BcgdPjIGu| zYodrdN3rxb?M9~feHwB#v~knoM4R#k@b5!(&I^k)UFN_f@&s~wSYj_x*qH&$#by$a zb^%>|95Bp8HZ!@dvLs_Be{u65`JxBD-*T+bkx6_BK!!b$ML&f?vx~Cv6g2F){^a#~ z-0K`j$2J`mW<4)?g3VW;zD$Mw5Xk;I2h#Bbe&`Bq)?0@Xj)ZT21C9<*0iuq40nrhd zfQGocB@+L{Qj)qh#DWI}#b`NzGhx%@+dJCj)<@8**At%OiyO@I?*N1!J)3$A(k}Aa z{agvx(DMUM`4G4r=4${Y!Btgp z|M#Rsx>~Zubdv2Z^t1FU?Ev-k-3u~NlJoEA$zlmrfs0OF@+QDl(+m`LSm?PqYbiJ# zxof$AZtcXH9&qoh&V2D(WjO@WWr+M7DP+}tC62o|7)h%~CxM0xVXMnOLG|%jzlb2d z1UQV`X)U$xt1$3!0aPpW`?>tfaN`E#48W`emPP<666g8B{ugrpm2x1jHmb_?s>o?PR4gjg*BQ5%JYe09iZ%g`82n!)-$mY``j483Vq8Qt;OUu9AP`9|8FA22pMt z=zPU`fcjDwmF>PZ@*R!>4j-N2SY;&1R|v)Uw-T5QAmiRD(+Hq=IggsYf;gBY>hF9( zAZ4)c5NzKgalAkuQly3iR+DBF+882^C2}Tk zg}MUU-M1l}$Hf&v_1!6p^cE;RBkf>uy*L4YLX9%2**C0Oe zszDuaezibB?~sM@GT0UbP#jLl^y9EpP^E?;ad%C@u%VDGj=PBWn}ZOg+a*s)t}sH? zGvMb0jNp4clI;HorRwN=fL;vjB46D3(*Y^)ANUb6@LlLDXN??zlA{F5d61FX>e=pp zrpEvGQuG9%3~eAB&h_sArTwogwn7alpy%$wa^v=)uw}3}KWlv#2Vq!00Iv?PCjxiG z<;yqkc0(z#0v6zz-~GX^l_b;@JAoad;M6e!LG?vN9BK){fE`p%y8)$_xBRn&aw3aI zz>|j}cL93@GM_*!M+RSDnAtQ??|1@%0Kd|D*Q2nHn8B5kKT;Vgnj%~9Xox>Y8~|XH zw~!j%Tv?zMe5iI{!*YQw)9yePjrtnwP#y$lwyvL9Wma?@b4wm_g-i_)({I3K41hB? z!1e>N!+-^=Pw;!_7BD7_8=|p{AY;oA_+>P*Ap@NrowYdRp+fgrK^!4q83fsy1pPHa zoaQA6M!YJ6xDWL3z;1IA1M;XD_yPUO1OQ}&1CpZ@I6tKU`X>}tY)1lY11c-v%&y}c z9DjEw6YR^$%C@r5$^e+s+t6A<_bsgCL|^-7^-F?R8bN_(kge1ZW(`FC(KOFdcCVp$ zNqV4G9~V}&74!m61{?)IA07f_VA;Dyv&cfQML_jMUIlMe`{NiK^>1hS4v4iTP|8GR z2V|j%MkDR`+JO@pLYI9!_X1G)nt`3`hT093om_V@WbkP_ry+gjSq()sxBMdz--W&z z;K$Vc`LqJqnmjcU2nYa0%m7UY#9p#;Aq%t|y$B{?W6y2u`2VrG!brPHGr%WL5(OU7 z=MW0O6;hfGROen#TwwfNDScXB$j{7m}!FSFmvEJknFI^BFO;i6^}c zxJjOn^A6aNEA7B;FyUzV#Hg!`II9bpe3t&ll3`ijz zdmu1U?aSXr(uukK+z{-CDCym%rV&C0S-AgUxW;brF~5SGpm>|eb5UT48FpXrgc4}~ z`D;bovH_Us^8Zl8@o+J1Ah^dj{oiuXxo9&~pp$#T)r+1abRO^hg(bcPz>RL!}I@`bRdoM6Y%D}{}6H05m;~k zQ3%04P#g@(56G{w{C~wv|NYd7rPZg6&gLi|;GAta;cCl#0le^PfByik`@dBNiSwaN z@|EMEuUi>dXt?;^oM~xfJUC)yfve^LWI!c6tAHG@z;yG^vnx&O2;xoWz+#N* zq(hb(C;xoFVu7X%sReb;wmSH#ikuCkdzlcb(k2C`l z0FY@m@)v4&L+EVCUy^e3jTr`x4r71hIW`3Xx0<1b4rDgi09(>q$fmR>;i= z2_rnkHg;{jF>>acZZ|)xOz1oOS0NBSDdG64A!MhXuwdKCo`C~|$#sBm^bSx|_ium}=C1^imkAS)R625Ek#920&)lJHUv)ioXR+Wr3arpu4-(lXCdI@L{VUao}ZS5i+Vx$Uf z?LG+1F<3lnj@>f}pCN1cn@DV~MYuzaja)?g{s!3(b_0lx#5{-Y_|XaD zh917jEyuCG1i!@HN}n$ULtDQ5;8()Zu}k?&qybA^IW;XRQu%5m<*)F_kKPraqO2cMjTwLh2}EK1XotGDHR zav7Hf_4Am)+NPTX)y_}H?zn$1Cf?|*X8SjI{8e|WPQ3Z5c1{Pj*5#bqd^u*KpWFKJ z{Ry)D%m3$dn{RFPj2lzmfQy1!2TJ-invcbp$*IjMrrpI*Mt^WKHEL;oGcD?H9^=2Q z__`$16gZX3h5eN5B2EaX!;|Op8>#FRA4fUw(r64HyQYzrn^WU>{NPA0WK| z7u(eJTkL<(RybRi#iyq19D`rKqH}OBLhF!?^rRC7>=%V%zy3h0Uk%( z0%%<6*?~rH;b&O@K+63)GMo-hD7q?O(XRSHT_Gs`P2t8Q@ULl)n1P)c?F~%Y-vG;P zdk+*1xCyXQjqJc|%E*wtQ{EdOQoc<<*>`rJ64EcEKFQW;uN=$MOf$0E5y=zYVy^^b zsV_sGZ}XsfJQu-rp0RREH%Cd!4gyH?=cX}o)I0}7XpE26NStF+)w3@a=1V3@9q&+S z%q#r=sLu;;S_^Zn;f0vK!yJ%X%V-75{dWpgOHBdmI*RN3&mvb{E@Wl!qL2a|sWa}r z1zg#*6ZTqD@B!&JTiOe?poS-eMSk(4bA|7 z8y+dOyP_;_^+yKr`d7uEX8CUbeRu-+c9al}+yO2GE*{ZiP;of62Vo9K0($O?rWdag zRIMr~=#I>=2}3I>(z<asDgm=_a3n@jf2BF znP6NInpR{qB(f^$s^~5NK8Rox4Mj6UW$hK)yvG{S3^B@2=s&`TrC<0Ih-rmh`4*_n z`zDqbCzzc-3p(XDytxPd6aF5KE=+t{PDfPfB+h*$E_@{}aqktV*Yk!8?3VHOjOzSy zG{k?-X{fosg^@&;b`PzjB^&+BP4=?OOYm~YGfQnt@Nym*UHMV-G4fn<7~@(C@yU-v ziFLhS4~@(hj10_k_HC7nf94uJwwfs_?XSto64(ti)lsW|`gn*wZ6XHYH2GHa4`0|{ zGw|PDJstv=ClP^NGBX{k7uD^efyP%I@#gwf{cbhr_v^0?UF%*Q{T;dt#$2)msh|4q z|2Od)T-8ff6a1$091i!#hXiJBKOWk$veYwr?K3jEweV#uj2L}_M>`y1ipI9wOabyV zWLelFEe)$>EAo0m(QD6**28VGi5Ad6tZh^f5W$| zmj&d;Y)<1`-Ec>iJ$GohutQs0Qaap*t4xoEpMbXZ!&8G7sKHMETNXe6L!rGNy1wbO zE40=ywX?(P64O!!3)`hRXnz)!r~iDP&$w3DART%16F6=28@$Tfr7MSZeRok}Be&4z zvcm>iyTiJUyD0GQ(;dsx?Ye19c?qV-+U$dI`siJAovYimMy4L$*NR+vymas52`mYYkUbXDv+AI-GekC{eGB@<|HnL*7Dm%1dyR%{&`RSHOz)Lxom!@&gOMeo^ zJv8v9qhu3@PscI#Ut7mLD(8bI3O7@_yPBA8|DrnaOL4gGbhtQqNH)c1ceps!hOFf~ z{ZMEmclgS;SW>vU_$9DZmjq^=YqYhwp)2Z|2=xn zQmw=HwOJ-z@9ou@!rqC?MG+qQ2l3vDS) zn?162gkX=Zed$uXFwx;5Z2F+4#Pu{+Q8A?yZc41tck6BRCELFV#2~)p`*)_I35SoX z&F308zW;5Oy}B_LhdGLTM*>XS|`m^}8vbBHwubrZM8R{5Y(u;bS zgEendc_k|W4y4GfbRGkbjU6ldYTSqh)9fld*H9Q?YWsAdtj*5=Iz$0=U(~BM{vL|h z)3dbbbCkRp1RzyO9{+w;0^KpLXRXxNn7VpuyX_vCM~*B_{6&*P!$lLERTgNsUM(*fX)~gvoBK z#%lHYVDAEEjxK&Z7^9V^mBA9n2dQG2NIn4>W;@#QAOB`q=+YZ|E>%%y|bYAg6lK2er~A0xZ`5IHJ` zz~f!o8C>AYAY?2tbujpEes@2_-H5vfq75yo0S&ALOj&Nn@xw6?n>@+k3E7pvOjlj!Reab@gMDYUerEjyK>DK3ap;6aN3!(=e1+5rk?B zSxPkX;xShTjjj&D?+8xhXaUp!KXb6wz7%1$YfJVGbkv(c&Le~O?7eJI^5g<4%^V1- z58Y0V5sjUoS*Ed67RhxCM{Y&vFDK7ZkpT7H|3xvnBGBGjP>dIpelhS0BVr9YIOSL9>o*`E zG5`jH&JVc0gZaY0+pX-@S9WVEDE8g=!~R|H(+f2ngWiTdBQt=L#pC>0 z8s{6E3M8uA2lQEmb(9XvSY*9$Se|a`rsI1GCy;bUioLKN!mBnzjf@q3=^u+=#tfps zAb88j8}Wayz~~;hxP63Qa)35Iv^=YxdzNnx)8}epf58AiuV8UZ7K+%Rmc_XEv&yt*VwV@?@lOS z4qFbonwth+<9Iq)yd4g}Q)vOW!THl%Xzg7F{U>n4u=rp9$*zFrFCg^eWXYU9|9JT? zxGx**Y44!k`7bb{_^MImuWvPZ~DB%?TFm5h+=9VvT{vd2MYl#x{^ zq$p&RaYQ6Cva?s&d;aeCspt2{^Ljlak8?hs_xm2#ecjiUJ=+eA?pe5}si5-}N7@H1 znsTPzd*qdKPB@JLIp~ptnvF59W8+jj^@SJ3l$o~_e!nTW9^D_^d$egtA0k81ioQK# zdWU=@10C2`ztfRvH3JAJrng)o)B(&(bY{vB6lRU=5$r&NB|&mWD0&JU9GB(T6|E^@ z#HE!xMnif`{#wk+1EsqK!Eu-#kWCydDO8sU5HflF12oA`a+az3)>N;r+r;yKlFbU$&~mH;a{kDl=!Vu8OsV(5nj zRk$VqDsy{~N1eTyLOBOz$S!=g<$5hT$>4(WnLOmJ)$S=+B$25(qZCvDztr9vfehbs zHD0O<1=cko@Dy1@l@{pT$zJnQGeHvZAR5=_0Q9QomdW<=Dy0G+!Wxm8Pl<>-)i{VQ ziH53!MwzBaN4`Zy{|FKP=T`s*O(3n$*QFfMI0be;1v|pEG`Sx1X%1XX>0wx>UQT?R zPKd1n=2P}r7Wy#iED->MCb1aymV6lqrK>dy!?mMdl6u}*c5luhA9@007LQc;mg+kWq8F31?*lW%1j#9MLkU`*a7IL4A(#&h zU9)G~kY)zV5T1Ayanxgf)`w2e={o>^6s#(>X%@&{hbgIf3v-zh(Av4&tpv40Mx@1r zR4p4eS+!Qg)%lng-M?QwQa@&vs*gV4U;67{EY~g2!@Ttl zgs=!!Cz8L^UyOmW6L^cj+qy|*1??8MemLfoV(VJMW7P|)?&l{wHUPWO#*DF3>>#y4 zME*d2WFVZ9zCw2q+=lD`scjH4>UZ0Rx&3CEvdC;8tpN!fQ&ox7C9|WU@=Z*S*+dAp zxa&7$Dn4a`=nug|(Zu=y;f*ao@||sByhoVRL!U=bXuSuU(iJ+yT=0R|d(@pRNJk(n zzWq9bJVdM$vBm9%|C zLMAsF95bHUjM(GtxzMq>2*(CogM|Ja8-^7mv)Ds~j13rxuEHM(KrRuHek$F}RFnh& z&qnKzJ@RDu4i1&=9s+O2P)3OTE=tXMgG>3M)%WKZrW+@1PfEP_-3>t)qa0YG>8D`L z#|UuA8yHd7wYx*Y74l&?%^=O80T;OA>6l_1sI2nssqg{gI4>ypmPf;cVjKdb4K-u3 zIIj8mHTG+~eMHHCuuh$Fr@K{F@3!H!*MjDg>1Mjqb+)o)m}>+=j+hMZ4ks-}@f@jU z2B0u2b=Ew>t)(sVS_73uTxVSGv56ySm~1LzpI_3Ydp_wcg8EGTgVkfXA@y66Lsnfj zq*A^0RTzrKbedK6Cwl5@|GSKYS0M&YqN_B9P+gt5bNc=_Bo^z$XcbFPnEv*l9bQMf zI6{kt0f)u0f7I%NcYWrXm-@GyJ0I?6moY2k-2+C5eb!gF9YyVm^tpVdzMQw);V_Fu z-Aq+6zQGnrF0T^n2AM7Ns|B|MP$!o0uwkj6fOuk)Dt3X9=mD=4kMWH+0ioZE*h&N+nXB`cf5?o z>lUB~CPbGkB&GVcaf~0r9Y)iZTwwy$hUTGhD3%iBD+!a$2XA1GDR2Y(rJo{>T>0uK zG9>v8)Pkbq2^d?@r@W5t&43Vko`Vk5M(JX?>$lbE8@0qYcHsW~JuMlKd4MJaQ*T1J z&*|}j;LQI7O9vGN8iZcA0E>D1;{{HC()&B1rdX=an*p8Guz+B^1 zoWg-j{TtvZCMK-}Ms`XNQ!lbT;801VsDk5>*G$fO>sU5$$d{XuX<)mAO#OR6`5Q=Q zK+*4SnaIbYnn8B@;_FVQJsW73a;&E|U14JRMDn>%52rcINUe|e+a0IBkn`H<#PZo< zykZx`$WaiR;;h?LY2S)bKw4o-a;L}PDGQtrN3eL1v%( znt8X8d?5xL-z31{R|CO6VybPR{8h;fC8JNNEiOrgMl_~_YRO7pD4e$X%dBHCDr+RmyWwr z1zI2GP|HJ5lLgdW!vNl%^JF6UR1t_e=RlVB8OXyX^NQ;`Cej9w_TBMxc(`;GZ1ik? zLMb(ZT|o9ORbS3!VlkDo+><2$WHl&G{&3L+uFWWe5Sp#NuD1E4^M_Qi4owi-J>p}y z(HI!HjDaI8SoOi9foU(nLD=MoC?Qin1vjvBaHxTQ!D?6kdr?Bb=eQbN4)^%D#J>j-v_z<=dWfuGpj->}-V#(Oy6kMkiLN~Q4-$_Am_0oA z3`~407=?YC!4>-EMWFjFqO0k6%}#^AgcF59)Njbbv2B{rQi>4o#PuQ}YC`h88$ZUX zMtp*i0Zz}$f9>54TJ92x9rlim?T9>gYY1~NUQEW%bvM2vHtt+dT3NpnP3{_U2ADRTgQaUu4L>UDN zAIfux{-3FZF;==HkXva~OuC+r2w=(Aa@2*!f%kF(FnRlA*Dr+Vq$LlezS}Oy!uH>J zPxD^8Ee7xLE8_)s5)mXAcMkLhO@4x<=hp%eB)dcAV?7@2RgM}0h%09iNkRR4;{Sv) zZ%w7_1E1xicV~gXweJW*R~`~Xfth(4S{+Wj+MT~a=;mfJ7igab12aevvcx9bi4nGB zNTomnLUqL5Y{AdM%O_t$g2|pGV|M`o)@NR7`D{(pDezVVuhwJxxUE#~vd*kTo|TqL zP-8XXEuoF8{)osBU@&VH&hVxB9bU;#^V|8uuc9Dd12X_It=ke*sdoa@rozF0mv^InZah;UN;#BanNH( zMjXktqV{7+I^LRQd>#dJ9r?#G&;bRGjv`k<}k-t!`(Df>88kZK&UiYspEhO*MG-@YLb zl^27*p^+IP+@j;Mneq`wL4NyHqKdIkx|6}kPXWR$$^aH(5Tdv_?SJu#!}(|t@-vbP zHEE0*sWn5sLqIV}03G;trF2@c13OOMJrF@mmJC`fAr0D(H6n}Tiy+4PR>&H7GEtw$ zP^-KHvu43XmJ(XTd{Gf{xMAWGCy`XcRi=Etj15>HoQ$Ul?>qobHryC|^q#dlD%T(? z`v6hqyMtmJd=(M&%!!`?fs~j3Rz=m#b1|oHibw|~OT)<47_=i7%MFrGXFx+W48oyg z&=+}Zz*Qg!*YB5UzC#2^I}qy#1|zQzCPIXsJnvOOSVIt@xF1)fo&du_0>Ul=*X#aI zq|EE-b^#r3UJr+PtCP5zb z1me-i2RAP?TStQTskIQNd(-8@Y2w+{Hr9c&@`V2{^s)?>G#Kt^$#3K6PqZh(L7kts zfCqKmJGTjT2se4FK-l`i)%#-Uj~2;ath|oih!e z{)Vo%a3KlSX_38e52XUEupP~*hHMBC^Y6c)34{@0G==%fu67^zA6{yTF*EffLBO05 zT${SlO+8aDRC6`|E1Y1O^H2~cE|}a8p}xS8OAz>^GKw?YhEu%^H>wN9SabxHr zfD#fM))4%3nc#z+VQrpx zGc`JwGuckYOAHwnOkd6_ZbSKz5vUj&&#$O@L&QFxj(o>o18>KfLnBdj92}(y2!>`R z6xU~L{~^mQh-jpokJQog!+?mx3qWV36Q8kWEp+Y3>M))^Pb=(DBRnM9Ao9~R7kkWN z6R#)^O#;?=5|MBP?zU$kLUS;*;}gE;J6|KbAsLQd`n4!VWJ9TQK3b*^kUf*P_MiO@ z6P~iQLMnC|&S7Y2&e*(xJZ?9}4>)%1}aqvK}0`Z%eX32kFhk!OoETr0740bO7QZ&W3C01R-VJqL6U=OTfT78 zTc~FV9?Ge#1O&K5A1*e4#S!DLI}wg&P2vOB)E5FcM20jJKpjB&H1ZAPlLY;1iI004 z$83%$#y}$~3<~K|i;U@*3mj!Nbt}kHl@Q0?ntuiMhjkZZ5XRUq5(@AiX1=nkTY<|Q zIdlZ-UK@n>-?28PNUfy(44Vs*33*R}!zR@w$uIR=t#2MsqOa(LH-k|jwF@MMw>BZM zjok%^BNnMS0DMu?OPMC$OCJnX4MUwuf@aDLpGT4@tW^`0tw27?e(pIu?Pl&3xZr`1 z&IV9*pnf`rQw@~dF_X<2gnZ!+%|b2QCI%q3l(fsHfndJ6tiZe?q*x`>t;00rry zIEEs<;1d$0{sCvSy)6{+9&%m)jL|QtJiEyEjdT&d+BiDvKpPZeq+L;e&66Yqa^R3W z&Z0Z-<^?!qi3t?mm>Fp0`Inp%pX9KFGb3vOW7?Bn*h<`IQTfI@-E_QgF6g%;DFc<&4agjMMD8kk^{6d0f5f1 zA+HGzjgAqn$}dxD>yo*vzY@(Z@Q?e4l}_rG+8N)AG}F73JjyHb>K24qE8I72!plI; z^~r1r=~gQVquB2NT-pJSHwFzhHL^|=95$#3HS2f;pUd#j!z77q_^c6EN1ar!jxw4; z<8&OZ$j#)3(L#XPB7X`yC|Bde`EDt_K*VQb;2#S1ltRl#)NAc>$f(Ss8WO`W;5;Ob z6U~jZx6`S=n-`~lR-E>==e0|#wU3TG5yMO_`62e9FS?J&|NS_0$8Sw-pl`94x+A|W zq?$q>n++B11Z%$RMH?VL31m4*nTEhD;jJ(!ieOSLU1BfZ5iW#*WScu)x0?iM=d6DeeH3 z*puCq89a%{1kWhFC;V6-NZK~ezS7bKzB$CXnJP_a=`=+SiI+Ri(sV(_P?vN>&I3v$ z9>UxXf&$-4{M#Sj;3&*MP7cwWY(J5o`nAj~@1TcgK40b--h(E?WL_n=lIan|Sz3IX z*IlWa3VQ=dFbp1H&`8i#*I>D{4j(!Q@FTG~n>c+lpJu=aE*lmbu$dqWkM=ze*eL+! z{hy<(3}}m|9KgV+V^rffp{&IR2RHg<3vo2Ek9K-T+2jF2@3oUS%ncBkOy zyK~xHt%uIBt4&>blVPfR9l=f(NP;tmi`IGXYbf%o$&G=%nYe@exhYT~4a38Q1e}42 zn_mnkUL${(+uuuVx(kMZz8IM;*AuMrB1ba)(4ov5BtevBJgM3mnVCNj0_iXLHGxk! zsr_62j28*pSQsg_Uud(X3v3|piLTd#Q3#+Fr_tF3hD&gZy(mklk}q@dl)fD*q6e9d zxBjN#b!?}kxQ0t190Vd44KS#`;G#S3Hd>Vg`5_^e>=33s^C$W=ePLA8Kw-|TON8AlP|Tpcex@*Y)jM$h7u0)P z`j*_6wjs^~x!yT14(kq6m6VgOkD^qTsPG2r)S_bfr))prEpod;I3-lozw!29MPAhK zwBzQX-vt#*O@u;`>59DpiQi#~UqbUYj?yUqIm~(p?Z&vJW^+0&>fMo$FTG~zrxz8q z_VVYkT}Z}_zWV%YbQ6-p^V%Yh4$7Z64uiU?RK>V1p0k~QEl+Yt?Rf3=9PSCO_l0F` zYLGNd9Qe6&`IQZHz)zN&hCa}&$6pXt5`Um(z&Es~t`CP$>_&euVVKndExf_g6M+Wz zlSvdZ1Qub^Vl)>9qo}xn3yAdniu$42HvY>C2xR>CV9=6%sYiVlwsVq&M-akQnc^*` z=D7ssKy?P67-*f@HBiN52Rtl%Y6*u%ZxrOYCGl1*L7eNj&m30m*x@0*TrHy_bHOT`tpQVyZZ^hh>hY~6Dj-ftb$_w94I zwK=q}oOk&=D-G+J3$OuTtQZt8ADoCX%U6h)@W7;2T(0JW`aK~)#-bDDnMaEFoCN2JSt z2S{ql)@>fie1O;tK+|j}GY(VZAI+DNo6jLvl%?O!h8>i5oLTP+XHlMZq?BGJC7S6E zqlvcuB4!eIE7_Q%}}TCEflsG)t*y-=40tzo2d)M<%B1{ z5_N)qn^4VP{9VSseFDY$@%3Z_->n~M$9H@r68TKo+dbSFsj^6KC$R{__hL~)wR*De z>LY5km?~yNPJU5tkt~vsCmfk*1|5Ug;6%}3ZEl~av}4R99=H2h{sI!eCK*LV_Us~u zuAJnlLBFHtCQU?>uG_ik!h|e5-GfVzJ~b|0U^rn-SufnTd^yRZBgp=^z=TsJK4L#SM`407cL^xHAC|ndc4REi;Srz+wm-3f~yI__||!v2JxY2NPqD6A--@g#)huzi<9vX?qm zaSIRg2f>AmvFTrv@D14%sU)BoHovN)N~5h~95@GE*?rOLF@m*kK5hHF%71^3_#kl* z#+8g*{ks75a@|;N65G4OHD4fxYO<(iG1DojFpDi2SxhMjns3Se&p6rm%ZiuTg$SGL z3dTF(ak_0C9cYc{NEIvD`@M_w{F`l5jEnSz8%Z-IQfERvPUFdgpCD$a9w)J>jnA_9 z?5;=u7X@Z#8=>`p5Ft^@+ZoI#-?z_W6{0^-+EWcpmUpl;+1;%gx!v|kaEJbc&;6le z@0?$|olga$w{fqm^P{gHv#|~vSpT?#+*SpEn73f=^tbv`5 zbl#)0jU$I?^}g>eM)}?oRgJNiRoevUwE_{4znbXxwqbmI5=N$A5ux zFJ)#T+LAv^_JP~h03Sr>`ovxX%Wgw)CckCc>W%uC+gGmV!WNh5uge*am#iwsuF3wP zEDY6JZDu5$d^OK``RR8re5s)P!QJ}|nJMGwl-xp!yGcGh-?D zhLmK>t^(hrGgp|tu$f+Py~V0PgWs!q&(VkF(TZyfF2KG&jB7CPhoq2=cnphDJ8gvo z@06ansIxToalGGDCFGwG$#x(wdD8nDkiE60zhzDMoykn-ky59={U90z8 zK)!5d@H0ur)1ztmMoYF$xCG7PZ)tI_hpg~uTvs6-?LKnW>!w3k_&uZk(@j6|7|wh{ z>RTTohR_PY_yC)_Ep&)8K5OsNTI>tRUXr;>UNNgv8QQ5n>t9o znSyl+D_w2F(b0ri+kRB;65*YvnIl*ao-!WneZpLhFZy@`*9zl`2a08Nzj!5k&FET2 z+_F!<9fC3@o{} z8(+TuIvKpIm`Th03MTPNx#>8_*!iAKGn zs=#?Xxq7{F-Wj23Ne_PB&zPVo8BSVETwprsKbqSRaVc`>$hTcUSI(yurT9cm@sDKN zUUTFJe?ozoFgAy%ZtnOBqDxou!fPM?NgmO`B!rShwpFrJKC>TnWQj+!OBc~vA%k53 znu)cl&lOVqBzzfpzZ+b>b-HXNr|%-Wqn}5j-{lX}7t6-8Gk6OKap~&?C@eXTAiWwHH1E$_|Th#vdSB>i||2d zuC^4Ne41|`VpGL9dKG?D*0Ym$;p?+7la1ABY|SN{#G%~hcE}S@7bS)7t=adPDzD! zi%cFqwW^)GDe#c;h)f3wQ6f#QsT+T|ZVq{7iLqLqH1VZ0b^+eEUs&p=2n!}P)3~s2 ztv+sC!RZ=93IjD_lS4l_iFEl3Ew?8Z&yKtL z_Q9x5j8}c`m*W35MDf2B+N;=zi+%0}KcD1;{fvssxW`qQS{4~4IAj?GWiN|MrzsKs zk){2!gbp!5#=h<3D~ro%`xRckd>NlyODAlX`#0+OecJxOx3BLSbvvVhEy6j!ZvL|? zr{^I3)1Y`8GD4S+dD-B%Ldkuj`-MMly5Von*E#+M)cc;OV$9VQDz(>_9a;o82^ae$ zYdz<{j)8+r(QYz}QVd(;Z#m=ndKis=Kh$P21|7!(C=jZc$Pd>BSM=NFoesYn6botN z9fq|1GH=p#0z+;`*kw)Vw;UooC^ODeoaQ*WZKmhSKo&vv6p1;7YEjVD*lBs547N{F z(;Sn*IMVfCnzLH(YBX<aCV@af<& z>Jx4Yj|cC)YiV)~;xabO7@tv~m8ao$v7T71u(7Uv(;68>q2?8|=y^fle2@ZF&{pre zl!ecj+4)LbzC=Z*W_asKl@6DiuL7gvCkr|*r*`kXkR~}QB|0duB)?@nYF>Xzo45N; zN+QK|>;4k~Q545R81P3i=K81ccP9BWWn0`LXer6m)jCJg=FJy6eaV(^gSf6tI#J5D zG2P9{N;#nLEt3^y;mVsc6xXhiQqHW za1jD?AI~St^m;JZSL#w4=lCsWeqQ}8a#44}1-;2rY8>xf`TA@bGFRm#-sw`B8x;Jw z+0h`S*`XZM0k1D>MVZo^P>dfVJ2u?%=@}ahJvTTx$28yI{m%QFZ_UKP>S|7i>n!&Z zw%q{Mkb}8oN68dv=2tU~TGkT(@AtC8x8SSd|mvL8bQs4QQKY6g0 zCo^?4nkM4&MUn$G^op7Xh+HtrbQ>Poq)Ua)r;1)!7_@%0Fu>*5A0JMUYN#>?Lm&c zaGaWt7{2|)Xk*2osN-^lQ7P@Mk#f4_z46zSlghuu0a*LGI&*9P+J#0a^EB2>8N$7o z6O;T)BM%i*FzE-21!MSaB_q=dy6x7hx8>RNFSI%}ux|y5Z9?x9zY?`qcp}I_fX;fT zR#0?Z+pBt3PRuq}?%f&&?~$S_;g1zq?bVK1so5?%rG_@lsnxcwgmEbv+P(XlHa#&( zN*y~)sLL%lX+~+ju8!FO@hR!MCb^;z4d$qupYG05TnR-%1HbCET$3y|W4-Bk-^+>? z=df5}N&Ls*I%@g*>m;?K3i4}3un)b_bXl)hHk^xO`T?c}6MV_*bCDe1wFqs|sIhdC zT1KK*HD78!mvUWCkTQ95T-L|yh=1}rQ(NOctGFyZyZdG>&r@w5Y)P>}%d*FyNG+Y( z=Py$~XKk#ijbER=KEpmuK_}`Rgc(>XvgBfBuR9<=nl3Tz9X$Hmy!1GW$tPF$AmO!0 zQRlN1G^Ey*cK2sk>dJZ&zQ{h-n3SDG2x{?aw^&_~Bbp z48{zz@%QuOihF@{ruY5{F~`yC4X!$;xtkdAcJlKIe07f}EvId$-%Pp>(vRUC21R7) z)P_A4n=MsVB_HKb5G$t{n<1Jlv3Ehedt5P0r$Q#y2UhAwG__qY}G3)^e076*MLi-;;nwzKynGAk*;o{3xboq)jEa}kIWJ#|)>af^sKinq zGzjC0RsDNYcrvCY)>-r|lFXZ0|3xxfFuL{)8Cw|sl(^QWAy;L!q` z1l($ce^-dE;WIArmeMKALE@I2ufy&+8%uU94$1FIm|1;|4Al=PJ{g_IO|jfsp|h0W z{^Zw7p`3WynryRjv=7Shru7gk-T4_41G;YldK#XM%Tw#Ab~{@&Vl3Z3?p3PW7+2tx zy;DdvW%c=YIn6b&8DzH_GNgRD>#Z|FE>YHC@;{_OeZi0S7{aZ&Q}$5`hqzfYNOX$XUY)t3ZDFJ72?Bj1+9cT?o>&pt?ix2s*jcVx|hiqZUg7gF@)o3!#qRg zRE2>|ZGU9^nsDuaA;sz>-E4B_`*2Gl%C5@!0c@$Tug!e;V(AS&G*8<6W)Lk%XcU;| z93&Juo*)Q?saa&n^(k545eIm8F>LRS9kUz!u$pBDOiBLmaJHn#1)ZIqRv#^i7p&$= zl2zkclL|Hdt}GhSY>k@!ZD9w(8U#!`Z<-t}G~iaFe!|EvI1V$lKarOv5+8XB)a`FH zNS~jl-!@Vh^*-M!S*tU9Mnc#4P(?*3G4}9X?paOqZ*ofX+hg_L7HCbboXJ%?U0?f5 zzim_?Y#SHSBCvCk?uOWKz|x2EZ=ZEqFXw0&3O?N$ywb!}|K3})jm>Dq_#V!ZY1mY3 z)D_-ddO2-0qspNnfI_t*TYef9(*%Cn_`-yEizikJCkun+?n8E6@3!>X>Fx9slTC3k^1TBc(bUZP7{smoO! z8RGm>n)TDc6x>}B)gi3kZe38};g~91bLDfqtR>L9eQeHMEjpE1YIHxfrF~=b{?PFJ zV)84wI(T;?Roc{5yKj7p!r2?`+y;wTngAk-W>Gq;0NCC!1Dy(?{v8fOvZIUtwRuLv z4PVq$N%)fK^gYIvZ^TR=BAVuYnXJC_{?0~sE)GMA#(3&pUEW3paex+u`^cDLjDo2& zFz4x<29(K$Oa=8i<*x+E9(?H;6MXrE$+Y?T@~I36Rd`kT4klbi<&0gi0F4mW79;+; z{G|+T!B0bI@)=XmBz138M|afncVT1unm?UKl5o04T#h#v=flT+?Hii2Urnup@1OdCdBB}P8y%$C{o zZ7|B~d#nCle5Waedl9Lqn+pAN`OE(ae88kloUd#Tzr;B5?Pf{Q%rfbs3D2UppWQn1&)TH;Lx`$`vC@;$9gg3M=nTJom^;~>#Akj%M&^nho2Hn{sPKY*@*P>d zP}5vFP172e(U$&5LY$O1jk`VhVJag$K{KpCA2*5^=u9Im-fZWnM(tCqXEgm-3}1Wx z?dFn?sbiA}$e$l=FzM@OLv=06t*snxl}A57&(0=%;+PtK+(U+o_F|@TOf$9HkJGa_ z28ow8{(`kkL@*PO|D;F8{*DwcXwa;lC2)hB{e=Klsk+9l;juv6qP3VcQOZ;UP;E*& z#Bb6*AAGWZl1R7Kql3aKBC^tGgOzt<&wwR*fBU?=YEZdB>P%6!xLiWqqxPeMTP4%? zKRy4l@#WXoPvJ$Wu2-Ix|02FC z?2#5V*&`U6-^WnkYd;S9=-bHm8q{r0-D#K<&&wSD`TN>(3JV(voy6np|2aqs8jK!R z=L|i#=wsawI5d-1|NT92H_a|N6IIFazgLUs^T$E~BwG z%a(LkU_`;xwC!HMuv4dL+Git}EB%3~dASKSrbz>;Nfz8>MJem9K0j+Yp2Bx3J(xxo zW7^#x!fkuQH_cMumR{fP%N}X|Q5bnVI9@AGRF)&u@M%Gcs%mH(cQvNr-MnR-=^Ivh z#N=jA>9~m(CKgZqHS?wJVk)Z-8mvum;+-77Y2`fo$j_>R+2?5??ZFHV3X6HJEgAFqBA4bj z?-X{JNSzM4iJ!K@=S&erxH)yGT|4E^pLza~p8cPkKNFjbrgg4@#M>!TwZmIa(zOP7 zCtn+S%b%f3>-KtSgYo7M<5&|m7-hHP#Z6Rc>|fibGz+iXudpg|sFn z_KtetcdG$UbA|;CgG&utj`)b}JJQ{E_n?tD2#)QP(TRtW*yu>tukmDr9KZfw|5qX9 z=2^PwPVN?3Usvx3OJZzEWeZGk^3U^68GpXNn9L@8YJ{XVkeX`gM{pY-{b+#17*MJl z1$zb0aWvk$+Iv2=dd|OV^yB@mV>rEv7jhSh@f!vKYH{}ApD!CF8reOFaOA^v&Xst@rK2wytv53xTsO zkCGj!KSXXbtu;P;wLidG8IT?N$+_FN(pn!E)cgHUa)==7sgcw^ABsiAF)O*M_YqEi zSl9<3!j^d(gzObbd!Llw+HzwRlJQ*$OLfqjAp0{PY?{-$u@893ow?6jKL>2oe@D3R zwjTTY-0AtY@b8`vOzITITH-vg(L%T@(?4U)*Z>+s+fZR#-b@G@3kVKpWauxpnz7iY*RegEBt6t>H+cTR(AYwm%pbN#wfHFhbuVapQ&<4W4cmx zEFmoXYMi}hn&5;`Vi*@iV0u#Zf>GhKFDx};2kLuWE~wGVQp&{qCd*(qNy{+64(6z} z)4RSDA9Pm_QVhM&3y_2lg(Oq?r;_Dnfo~za1u2sutyFs0f#U8Pg7#{M3sUHXsvIjI zKYdcjBMv6vI*gh8 zZqk>TIiQ)~F={uH&}Tyd8l|t&2ou4io`pjZJN0c@M;=T2To|lV6pA&c=iVuEtVy+z(j z0nzD^XTZxXK=<^dIGc>#W2J7un*1$J{q$L`>l!gCA}>a~V^s`05<+#=XB4s@#x$d3 zQ{pTiR;-LqL-qa5aIOlx%JY9vVKbKbo5Fz32Sn2iA7NMYFBU-9=LZO5@p?P?pLNpj zl#HhWkRBr~K`zmFPlhi5c;@a*g4n85Bmso12odhaQ<u!+ghnSM>htk#4BLJb{KWgqQ_uaw1WT{`za-4$M)zPv>`S1g&}`|>(%10k zUp~5u2?lbOnN(T74w;J)QZ*a9Hw+&+DSk4A&6Iim8JpsZqZ!tKjxJtxJRLoYXzm~H z3VkLZsVqlND2qx{%Jm+VewLLCgPcDT5RMx!$bY`!-Z~qarWr}5s~AHTWfB8Oh8>b& zi@)>(h_|9s2I|u0u#m@-dkt|vale%;PD$W>_m#z@V>fpXkF zr6^>oMxvV+_rQDh&cLM9df5TW?CXKF*PlwXalcD1c-(Ot$>>d=++{tZ8^cx```*{_Cs4wUC@Yh%~=eK(Z}p9fLTl?LRv{cHr~bW<;i%bj}z zwCHRM^WJv#{>R-sI)6t`V1j-X4_|)jcq7KlZK(>*N8Wd3>?Jr#%KCNx!dLbF?)*pi zY~}qsjKr{I)AcZ)!pCOJdZjjEMwerL21qo@|g)>u)i35Ld2uR)T#_!(^OgeOw1%kjO&#`ewvqQmPPXpxsVlClj#hZfB1 zY)QumpXeO=MBsa@ft@Bo&*@Q!0~9pmAzWzBa&xn9XwQZFF#?`cd;z^pbJ|}#yDw-} zIQ;~G;1^l1c6oo3M*fSV_Lj2ztec|KKZoy^Pul5Y%Ld>C$ z?&wdoASg>|%irTI=Q%kim>7(nc+eAjLp=A6bLXv(g+8mV96S7UtkMI^;W!myLR)Hu z=+U469{2#lw5-!{Y%dhz*Iya#=57B-+lw=S{|Eej@9ZZb8TeukyT&oX^f-u0t!g=C z-U(gwzrQ)c4Jz#NPUttkV^u!_#-&sa&x9iqIILCFwfZ3CrnFVjb*179OSlu77z$Fo zQd^Jm-N_^bL(DoH4f8Tl>+~Y+mN?*Az4t^M73uxW)`9(<*8O5bkTykMa2OEwxN8dk zRaj?zHjG{CD6hpm8>%$|jz0Il3Sd{QLt8h!3$qG?^5SP4ib4l;?g{SA!6^Suw!VO z4pB|89vw1xrV$1#M?J0o!{9d1H9JQN!3|NC`K0;Zd^$J)`h|V>Q`s7MgN>tTa}y<_ikqMszmun7*9F6OW6JQelPb60oECS6>43qO<+kZlr{?XAlB&&kxZwI zRz_u-JugJ>(yxa^<*+6CavQF7!oS0RQih*$9G>zVY*WX&{4nz)R8_K}TWhi#E= zJ&l5KorO)vgRlSIPT$>GcJ^p|$ZC9{S9ZlUOps-)phS{uNFkn*bsBhtLj#~g`0oDi z3h3M9R;WpL5HDl9e^N?xYH)XiwpwRKP7ip&1Bj78cw578zfA3CST5|%R`1O|3>lHS zd-&8m)8O+iX#f6uTd8oPj*Cw{|8M${BNqo7y$+8F<<9E?3B{P(#F?Au)Ck?aITlZ8 z3O1?dgO*6{RDzSh*C9I4E4r~29@_t;dT0D<>_7 zIDd`JLF;dw1mSBnN@U-TdFGXyoj^}<6Y~Y_2Fw9v|FnRLo0b>6^z+jhXc2pj=fx>> zq(7qd;jBClApQ19tGZ`hv*vUOyCn^u;?E05@>t^WE=z^RKEP>6W!nardaP<(C z{{~RRx>G<6XM7-rjWr#w9#V|YD{=+_$plU9cc<##(mf=Qyp#>M)K!U!s69~nT7NdQ zW6|7Zt|*kUz_^4`*XawfonWcm&pWt+1# zOAgaK)=$|QS0-R-jVIUP;*i&}$wKOB%wVUKaSq7=xxAb%3}rNz^j2F`InZ!izYo1Q z-9AGRjYvuAna1LuZx(my+MJSAmn@1s8d&7Vs>T{o&+?P$>9O~DU3b#^^6D=Z2%+?I(btgLC{T|cP zg7oiZo+}*Dxn>(0a<85`L)uzYwfGTnRRfh>qC`@mHLPA zggY)sZ|~!@1ao}Y;;-H3RZS?TxIjkc#Hq;?3M#Ki0-#CUe)yY(L@+Y2xetu~^R3#e z!86-;wg=a%v?YEq0y2Mriu2v^Z_JRDJ>4z4O~SYev{a&9)DMjV;UaFG@Dk^ko%Iy>2&B6@z2FCNBDJ(z)Y7{KKHeEEKW3)56p`; zKV{UtyJlTE<{K@gCYb?CQ!gYeJ0IoDJKMtjnMGXPCEg8KywMm_u{Z@L3kr&yic^J| zf}K=LQib)@ZTjtxC%n;#Vr%S&LWlBFDofK?Eb4H8|M)Mbo>cQ$3^y!(#t8)m38ne{ z!^hRg+h;fWA()Eaqa3ABdttgFuN=a)b+5c5kV@;dmf~!7K|zgMXM!74(8`+O^YJoM z0ODE$!$I(B&S;|?lM%`ImuT)h;#z-$lvikLF>9HsL}O_ADZLh=<>%_J221axOPqxk z)DDWHcyYVvE}pq?1Cy|hM_uA%Xa9>{R1k(4gku`n5e`ty5?1rw2Q4RkgiXF26ua)q zhj8I+fjy~<3g{=>dLD&VVbtBxht+2V5{0h_B*N+L^RIfxj*Xk9scZ|^#Tpy6i9!{> z_k3>)YI41bnwLkh=Uab!2sOm+6BrjMmN?R&J44>!Uf={p+)(fJ7_;VnkV+Y6G@Xi* zS%7hYm8jz0P=CZ?_q)brrKb9=y>9FaMg0{tl~zbt3{g;ynf7xJr8k}-?KDx!+Rdwv zJEf(QUU4m(LgnB6PKR=DXN*%?kuv)PYG4A>vDCS~K2U|cN3Wrt(%o**eex6$8&RKV z)nddWS_x*kVwn_Cy*sdkqb#-iD~zXrE7y1@g7SXO1%s<0T<_ERrpOzVvo;I&w+h!( zWBr~I&3OJsJQdG)%wk!qcR_Nv@2d8W^i!GlTUO^5;3z(s)jpdx>g-i3cjD56z*lsU zXd^?&fW+u%?Ro}{+Qb??=euK+?5Uji(9L>Sw;zBNMCZ&rdy@khd%^Y-Ne)0e|L~0M z6ibanX1AVW%cZ0DZ1(0i?SKaCf|z1MsqYlq`6WEHKq7qUd|_a$fk_k87rpvWtFrs? z{EO!v(JW0J1ek}6Jp+-E2B1amhvG)VuZ@0?&PUFqBbtWH3o5$hhxSq0yAWi0eVqQc ztzPO>(asVP*p@8n-+S;F?{`%6Ib!zRdH73z`LV~;lr5WSZP#t?!&_$9h=fsAC1$Ln z>};$|mY+ahYh5L6L{!dQ|agSb~$=;ajRO^ zVh4x7Q`gVFe%o}8-LFBX=m908^Pt9L5iAQPtlW;Cn1M}GzSf(=m0?mRd4j08@HHUO ze!IP(v*EQ%Ban7}+}E|!tBZ-a*qcLQu6|I_(WFAGTZ_Tr586PT11TcjHa=PX(YB2F zm3ok;>d^;{`|-gUXTCq{Fiqv-=3t!(UdSA*1z`jYY#L&oX7SFfZZ6jO$YE9R!dHge zP6&H6co($x#|B9Gbt4($b5~*UFBwtB=r|b!u?h-b+4H|C@#J4PcNIIsEjR8x0o|R< zbg-Ej+DB-Au+_7x=Q(Jjo<*lU_aJw3Z)Z~agJCqAFnz05$xR9EnRTzi3x-%lN22+3 zmEb3YV)Z&t29mG%uWR{~)9`6>VqJh@Yi<&V%f-B6F=pqkQXA)3aK)@c?V>$@{L{oR;*?@QJUL!$Tm(Q%F5`#@hE}eSh*g zaPfY6Pb(YJub6bZ={LphgD4gaYcHaq2E-Z+CR9CBdlZc}6z=Xz9D$&LYi(b?2YtO< z#W1PTmmZ*=hO6n;k$nzc^vfAf43deyyF1@cVp{ib@u5O-9hGu(n<4#yF=*V+H_Y5_ zyj-HKc_TS^6m==#(VieC`@3rQ&x0qUogGOt>wzVPAR#)PX{4Lhaj8~9J^$@;r5)b@T1Kfku12F$_=6)#E zFOp|piIVh^lyk%Y&)j5s!1vO1oV_0d0OP76Ei;vUhC$I^Vj}C0( z8NTbW1)Q8uGKl789#IZ{@~>>Rl8b|Ki1R`t22p*?=BYQwo`U)1=Gkax#Xm9&49*M)_m% zIIMpfv+DRQq`G?L5BFROHTrc`LNk4?O!7hWxjL$Kkxa=}61>IaS0*X?jL4&-w*02H z%KVgpSD&;!i%z30t`^J+V=>aSXr>7LRk{;l>{Ck5s5UiNav4gyBXZwxhrK7oNI15Z z0#4q*Bw|v4V49i7O5v>O<^ix$&%ldk_jU2dWvlJYDmMBc?1?NSQJh*KeN+Pl+M#O5 z!WK6pp3)Orl-MHOhLOb~`O|c)xE>aqH3=7RSLQyIFd2bd<9TyjDWke)3FXC`{e6p_ z+jb}14S!afI;YUhhhDD?I5nwss93dO(ua*i+>)3lI;t-0>3ibvFYy%pUQtq={g+Qt z4xh}3m{pTLvh!PPMf#Dg*vcj5FWYf;V_u&FZTG zyaTa$-{y^DR(MW>k7OIT(#S_`Lqj3l5*>Gl*eM?|UpRDNIxU1LgUYQa`08V|_DcfR zCy!vx-yvG5Nd9nGN>;2kFODzZ;ap^c8LF61DYThoQkI&X zlN=beI6X}=*4)-hg@(H`+JCfZSF&e+eZ*+H7e~qWWB4)-OFJw4X|Rgl$jX7)KrzmV2h;lc0fJ)x!c-vFo?ymIKX4!+$@@)e5* z-ahtq5Ns{ASw5T)d8G{W)K{DCG$yY3^!&DjZE&xTTbHH5gy&8O^*PIZ^R zZ(lbv<9iYEb0&V`>t)4&G|zttJ1M33HgA@q>VFUo#M+L|i=sX;E5`z*Dqmw*>B!uB zgp`E2gj|F$LDxRo)_!Ub`@%H$fb0#f&ssr%rX>v=uy*W==(lyA(>b;Vxj?X;l1foBPjeQ19A zttBtNgXYC9HI8V3()qxWybz*6$kBjjvO#f_`Zr5ntvD=U8Mu!l55sz8=YhI|ZDwk| zr^>n$i8Xina)u=iVjmt3-(Hju_(6fA`A*YNc2!<&DXuj(13Dg+j#7GNX3X9=OipBo z9!$m5+^&8tK>Dds^iy%jpQBLx*PmM5%d00a^@_}|ymT}+TLOj_R~p|cVKOz(YzohD zTo>tkV2pqAI4l~Q zlexLg^O5KRHFYPvO|Dh0O$q=hQqDMoxuVT7Vz!$r|a-SryoGgCCdovdAx1}FEf87al z_nq!^k7Hcq6Xqn6ylGL@;}eF#L`$7c%EQsQ?{bHGFwaRTasgPL*CkhI^R{Ms=nKCy zbcjc*=~!4xm}U=I@_NPK6>Gw_B|9B0Oz1W1-_J@Gh|Vi7?H_zwX;IT;M+o6ADaBnT zosLdcd3N7!*WMVj5YpzGwg`<|fp=Q9@T9Y%TK9&z1D?wmX+O z$#7GgR;8&9e>hTxK1B*zKyTRi1qxYBe)a~{{(h35!eBa_YBv%r>u83HiazSngt}*E zi|4TNvI_e7coGjOqtDgyw&e( z68p2l5i(|M+ZVmO1F0lC3o$1iHeU?p)Uvheee6VJ*pv&M5ZQZy6{F-CK+~x9o{LQ^ zCo(c+{QD+PgkS;9;G>u@i<|tyYqVCUR5F+-l&PX}xt4uMZ9Q{C{;cJ--d2~@*4Q%f zPT$+Tlhh+614b?tFh>pNax==E%Bv#viFrTm7H1Q$A0r-_ET^+}FkuvT>x8YdWZ?75?kK=ek}Bl zBjD!or6oW+dDz3;Kc_4};=$Z^rr%hh%AqQ;!g&+N`b6*xd)?fwFx$Jj)4Rwk83mOB z=!*MSDac^f%k00M&Uyz~wA@(aP4U29WI8N;{EG?Q>rXWH8Q2hqtjmY5)Vx>Jk6U`h z;>#Lw8T*ix%1P9ir=osXo_sK&Crtc8;(x=ER(H=ICEX>(R~+fHeQ6&ZY9%{+e-O_Y zw>!oQYNSVa*}l}*`LPaPb{Kr;Dpn$Qv9XW2%4Kk(xz{XR?3d)(U_H~KYiUcQZ=@C^ zy!6J#%GX8+#%R9bYjCGrcvLX>HW|0qM0ChFZ>r`|AGHYW%DO1Obwi}DbewVHGS0hh zAnVKWxlF-TT^NvY`pTiaY~tQbzNJmOx``^#!icpyV1Mvs}a+|Z~gka~W- zBB{}jc=9yii4NloLxV(vr0UtiDlXwSE|ab}dY$Pb)du@=M94D=`>8#TzC?LH%DeZy zbS?Bbvi4wpAX4m|Q6qiB)Fq}@VKaJ`kwv(^N3$s`;(ReJk}A>J)_>+{uW!4unF~7~ zsJ%}-xtd|DN6{fgCs4e95w~0NU3JwdoQUbB{49+cV|&;rwNK$rIi#7FkeH`}FxOni zOgcTR?w{sY7ufeFkj>_(tV2#5XXRwQ&bnxPnjr|P})dSLYRYOcb@%9&T5X|&V&H!*Gs3Et}r{MbMIwSth&kQzNh zC=NNtouL(&EUEiVD^Ssy+gLl-5lN><{2ro{{=3|mxhi&0wxfhq+sV}~Gki&3g-WhU zUG8zlI*0z9wP(4zHzJIW)iGlBF0bM;c{?lPs_c1(Oz%{Es1(>t8n&{wO|-eT4*B;e z-G8^~oWj_883^AEVp4UvRi702-S%y6JaKW(bSZ2gug_rMX=W+OszGMVszO~P!T1;3 z*`;T!Q${tAWf)U=X1aZ6nas!Q`C3L=xti80o}2WQ$+S{*Nr85m5vcDrPp!#v8R;-+Pu4bY?I}$kED=%w0GiMLb#`i;zdJI zNkU>1Ts6p|Vh)ErKB@c!!8x%>%9yW77of=>*d*qs%(=8n%z3miq!f}oZ7g$H{(lmH zr&{n{eaMUJH2qxB?!zZ>^7nB{6?-47vSXIMQ!xYojW=E3D4O{)lnqBb+^*au}UHp zr&YMCu?MBNnjBwstQ=cGmenUtiUVlsVAoXWt@OdF9Hqe5eS|`aOb*b`R6g2LO*Sy? zYA~|;erkxwLz#UsN^pPU&-ECJ;EGwdgQ&{;{PtNUiZ@otLC4#rJ%IXY-dr#BNUqIiWA#T`2ZoWMP8JG8(GX}@ja!Mkz2e%kQtRUC0IMv2(nKi*qNUyyz$vZFG1cIJlvuzq^P+`Pji}k z4_$5Fburrct^$KkU!}!^RpLIqfArz)anB!wA0POmUn~~5;T%UvTiIGKUbf87m~oY^i^Ujw$qzFT`zv^D=poh+aJkW; z(3wRNO-#CLQiiFN>Rb0s1fkm7Ozw|xKY!Y?BHDAkZp-1;qnxU+%kOJ0|s zX3h*}IDCF|*bGlM@gY-JAxrw52DL!mOWggN*cx4D+V0KaZ+#!%AF+Hi=0mpU$svZ^ zd7`-#=hP#0q2cH*VSWAl=QY&9nH+elI-f4Jr3@}-A2Y@v$;jgc%i#+tc z=XbHQdN_ATw&7BpJnnC)F9m>S>ajN_g@1X3Tf`dd=xCYwRg*W-ac5*J=jRe-^eQB_dQZu6y!>!*MB<(6q z;+F5YU2301jho~UfNJ)n8O zR#=QSDy-<-P=Y%j#kl&cQUKG$!X)F(*7WCe%Zi!X(VkXMs;<&a5UxvBSzd0O-O8Oc zsNr^PJ;C*U{LpTL!?ImD2~ShHi_WWGguL~M(pA;;D)AWdN|b9U6wz7qj&_h`XEG=< zk&6KFMVbrFsK?eoV*XWmR&>4QrKu@=5w5-kml7r-1q$oztrq+OkDB0 z?**?NbJ@f_Kp-r{%mEm#10Md z7>PuQlPgF4KjyX)3p5sWDVy#K#ym6Kv!?`R#lRpeac>Fc$WP|xY#*N6-nu5=i0u_i zmT0A??imgq*f8DORI97dwOC-G`%}oY`d7Hb?rtI-Ro3e^+Pn9f#O04Sb}|Qf&G0kn zUF%uoR~WgBNG*D9RRvd*O8gV$UA|#Pn1h*zGd0=O#y=^1###}~#Li28=n6Zaro7=` zp>g$BVe}}4)KIKog5g)!7~0S4k?$%`jCPAjRe?IprxmQ1%2kVPSvyOn@)zKF>7k`( zSE0n*HkQ%wsL1kr)SU>WrGSQuB6gSv?2l#b5*gc5?3@ZMj$}u-)bOK}14H-e8;R8G zeXI0jY$Db9hI_-$&+YyXw7Ir5E4;ve*i)yYU02yp2goxovAZ@HaV27_SA%0UH;yZ!6u2d(Dv@IH%OWv6d87#;NRE1T5u0YRivTv6KTj7r0QK*V&+R+ zIyBrM|EajEIFZZfRFu@}3&+{R_QD6GuO?XM3?803%(AN9ELve!;nyrXDR;Dk^?t(r zW6`6B@9(5wow$D~*z+Ghvu8$auaDfFvTcaU(C^@FvZl9Gy?pcAPy%-#nP7)K!RoHw z^Rj-Wm55%pGc9)T^+3)~YL2J7$4Qg=dE&n$=_Th5 zdj5*(eJ$IVvHqxG#p2l!E!ta-{0_NFqSaA7C*4m|8BhA=hG$i&EU7Qu9f&aEX7)Z+ z(*4ehu02eClsS{Q#q)zsno59lrjBF7KtAM$viJ4$u1yxJt1_ZqyX`v#SfBis4>nX} z_NEC}9(Tdhk1^pQ$M;)0{J@s9o0x5m^t65~t{o%IuN+~IY>&x~s1v8-R4iKovoiW` zRyvq-CbpZ?f%2otR*^(7wUDv(^d_dQR~HN>F0r&J-F`&HqiQW8rtY4aeT4UgZ&`_@ zUwpH*LN)mU-%&McgVoU0G9Lqx9!Cxu@~P#s&I83el>nV9natNp(2#+H>K)cYJNyIG zRU{RuFEQv%s(uC;M^(rC(QB}9>^aqD)UZ*(8dUk6m5ZyUpVX5~4({IS9^okYN4$vMU6 zHeQ};c$|UbR103ZRB`iXR+1@kExRFHoXtw5t{!U33U^b=Z=|7k4(USorJyJW9H0?*@ot*ieDR_4dW>=N1F;*&S=m-a+v7FVlAlG_l)8SE z>?)QyP}g+rw8sx5=irVz7R9})aW7>us2I)`Te{Xfb_GQ=odBR&*>;3HaPf$HHvTl7 z)g_xsGd_^#Od`i0r^@J**7I^MnW4t?agaU3#WRdaf|WH8?iXKrtLvriLVR<5>3EZPfUaNF)b*hukFXE*OA6KmW}<* zB#Jc5_-j=9_^eFMe*&^Y;+kAFx%$-rHu4K~{L!L0{%$1d_I3XT|}pJvB2@NMm^b(AmN(Jnh~b1s51Psqf1b;fm6uP8*}W5$Zft9BkVBG z3C>-ix?j5vYvml#qlx_bUhvtL8orGd)~Qe8f+b#|d>RTtxCeqiznX z{`PBU<~#Tsp4W}J>-}`t-lKbp43*AL!MD6ARGoW!p<1fV)?qAl37kLnIplO=u7H83 zn-}!2FNvhu?L2FxCdS5LhaV;iA`0&HZx$o-Tl!{i+nCE~*)=;{M%ula`QHf~3Q+8Gc_@T`Y!UUOt&K~+2$Jp4rDR@%U6j^7 z*-SB26FCpi&c8b4({{B0tfia*tvYP&ZGAn?OfiLnJg0xnbdpJS8lu!Dz!#cfZ*f2m z75+L2NRvb%@52&H>OC*~e!+tZ2~iV@f#@FUy6iFyI7)u}i&AC-nc|c^uUs7Vv>-zj zyjG0xB{S5*xU(RC0q*ioUxY#zHT$|x_FcnCWF7^DlnELzDY>lavVU@JM-G_%i5P|jg zGSjgSyaHfs?z#Ud`g?I7U@(=Q^A;jOf8P3G``TNQ#b4sWoPG}yBQa`E?f~>x^zTXC zz)zFK%F4<5e6dzfy|vqGKQ@?Of$-fXhK~0=XOhRb#2a3+xjO^?B+dvJG{3b5teXc?3lOu{a=(+ac)7xs2S1go$<*S$239ss z(r5U0{T|C^)Bl>J2iQkZ#-l%o-yzjF!IRM(e`Q|QrnKRKtTSB~enY2rf8d$;W~1hL zG|q#5*AAtyhmvhK0s@R@V%JxDK)U_5x!zrUd^z-gAZEm4COz1Vo-nGIS3?5Hs$%p)t#=Q4_g4k~yTI_2%R1?FzO474o~yDV3?KF zUld)&9c11cz?9E%@eCoZjQTj3i8o7;r-l?~Wy=sZ3oy+9hic@@k$CMJh~-a-l@NaX z7^;nQd+q-O4YLnmdG8w)ZlXrqiFsq7t}6ghD8QGJ|9;(J=-K}Jb>G&W^WD4S7&IUP zY~D}X`e+|11eP&0vb$91b=>$8R$I?}`57v+^#Z=Ed6hA<63z5@v37oSZ~*YnQ96C8 zyn{ZYr(lRiA6%gCH4IuC0%!jI*~^?eC1%JrDzERtc7GeP=2-=JF-dT=0?oLKxBdgd z@Uw^xPNWjnfN!Kp#Y0v6AZC0FkjP>`K*;_ww61twu38NzF>B!w&s5lf>}ybkg1A^- z&Lg@KDz|Fzrl- z(Au}%7v|e`-k;ucf3R}>r34_`A)U7h!N?nUCVl&lEh4d-o~bD2q8nH;z2JzTB_|qZ?CSCistJ8)l)_A$v5t5nnhsEoV*Wg7=fFSi`JZRH8Q;rc zEjO(VeLGqfkn?I2*cOBst8k7{Jp@m)f0ZmHF2tLnr^#+t(CI!z8k&yj86pA zo}$;YUv&t20Qz?@@j6DK`=_n*>KS7=x^B{*s{0FJtK~51-GCax41@#+X>I;w6f|K4 z!T2jmJPgj=tbjB8UQO!GbGU)Jn>6O+^xye?%8{tJY<-q2>koTt$ApowQ5>biBi9?w0-B-)Th8zv|+y_1U29jXszSuScuwQw=|gw`mUU z$tQM5(Cy#p+uzWDxu2uk;HhPz@cU1*l_p#0&Z>C@SLZi;V3l^Ky{%6Bz)kcIy&2;L z?g$ROh6&ML+gr2SO5t}hQ9$T}lc^mA+ZlA*eRkq*^HI_&Lh6taxE2h*T3Pc#25v$* z$qx_$-LkPmrG{m|O95K%A4-e{&%Z%n)%QQt41rUZ2iWNJOkorJT^&a(lFeV`Ml;iL zR6K=G6W`7gzYbi*Bsy5*?~S9z5(suH!JK~{$wT`^IkZ-1`j1v`Z&dddYQ?jD4BG4t z@`M2A+$1%Jpte0~oI}HigXxfFFqm92L!3~#*`FbnKA0&O|9AOFK}$VQssGnfAM|IK zQ=6*nfmwk`J>_NvbSB?GEwF!E8e$*9RLeduXmn7aRKl%*wQr9zx|fo63zZkV8j5s= z22!E6i>9mjcQW%HyMd8vQjfuC)g>MIlrv&88Uxo-S0U+ee!I2^HdUomaj18N!}3FH z;8NBMAVDJTiGLHWksx~lW8T$&r^M~h5#YOIxibABq6?m)S#DjDfbTq9n{ZC)8|sV- zCi#*A5kQApr-~VusgA%o=M1f>GpLtjrL9Q}0X%)M8FU@iQ$9*CmZ?S#RcrntD@9rr`K(R z%xusF^x=ZP{yE9d&@EaaIJETt*U|H?Mh~MRT}>$8`#9Z41B|9O{vOFDZjk;Z z^$3(=X5pNeQUVz2#%Bm3;;0)$>w{33hy8^FpD{=we#I!QkH^OoHS|PpWc{vuLzmXh z5x%r-ZD9rlsD5P;mPU0uGhlq*05bdLPs|-0j!%<65+32@&Zs}*LSr30me4!LZogzQ zpt^%H*1V@FJ4;rg7n)uEUKzGvL*64id@#WK`#&O8yEl$}Hdfja8enJk!Th;En|H^C-Y~&= z9*s<}XHen|g#AFGRk}u}m7W(G-6q=0Zr`CbgzkkGKI|8F#{l8ALcdGMar?!1w|2`9QCT+6< z=8JivMxMXBjwM%idDz&qbL0feW;V%*))Vch9SuB|eq}r5VzNkZIhG zqZEB(Op{Qp1kR>?;GEeZg52utbK$S?|8mBXsfPH$X9@rA zy^P|O5Ha_GdFItqs7;}s;W)hi6lq_kgaPP#cmwaWyiW>df?}J2KJQt0wgx=1H{qJF6bz=xyFkr3Z0Uv`7 z5-@c;lP{J$7_gSnKWy|p`#uCQiCKhY21hn&e_H@LyzBw&3r33Qra#sDl@P-;{^#1npP06cCN4GWX(}{PntRM`9wG9j{A3p-`+cj+H#x&6U^&> zZdO1K+1Tgri0_c&@CiCggtW5KH{S?auAi?Eru8|jDhk3%@ zzp*;Zx&4ZG(bHnu4j&6nFGZctd|T%_ z3`9dYqmmaOHs(}D=4CG+T)PMVcL98c)f%_^2ZV=fgV8I+_VxqYWuCwG+SbJfY@ z%byC!w$G->>7gUY3nm7^~l-T9e_FmYk3Rf`Ml{n0=ns6>SDl5cB0<5ao-rt4W(2bK zX>?dU-nqr-TsFr1ymR{CKQKNT^vNewg8b!iEWW7GGj!KZF0lxMbUk4EeCNvQg==en z0Hk231&=ovtBJSpUJ{mtcI>E(pcI>X6Oa%TEyx}er%j5?+~~WWJRtr-Vl6glTvbN~ z(z(d@=1SkF>A+<{292eN`s9XkRdJPoJ9Npz3RK6K$#c>}VQqpAm7wByKogYcgUVP@ z=+iLyR=t%!W&C9t_4t&*2D;(+tVk`-#ZrFab@SsB)lUn99mo7#;Gx}S5ZGNqClVPt z?hYZCX};>>y7cClKZz#p*He_~*r-<*ZU z*6t7`9%T*5Qt;VgvI?r+8(}Kbiu!Uo4`W!FtUdPz`l ziH15mT{8_&=TR@_-xezCp7uyVxF6W%{F>Qmcs=dfBI-X#V{?oB_ag{e8}YR#;YG44 z((Ws5cS5bvl+R%6#9`0kx%E+NHNo6m_k=Bv5gOT0Z{GZ|cdYYeai6Z(r+&AOB0uG+ zb~SRi>y$Ssa9?OcZDq1`lr55alSZb#6<;G{VzT&JYfTd4dKH$Ce`?rQI$N2I@nFM0 z7Iy-m1X7N!Hu4kX1g)NY7=UT5VZlYaK7BG~`aMsquze%*6_0jj6g7v=b<#fT-Co_^ z408W-Hj0Wp?&z&4FZcmTjHc^-H(G7b%}gkeHgtHjr5aXV;;h@@<>+DeW^OFS`lUpY z6}{}EbmS@T=C8yM<}>l7GdzWgquAY}0Kd!1@77-CaoEsFdNfD5Rb^SiHee1}X*q~s zW7vJWtQt^;=N7Ym#iMd_O42cnSaoqM%f`8}!<*iIj>EQ)O>t)9uAy<>rjZ#&^9FRU zGS1g|M_OI*NLzaiy>2Y?bw%Or5pzS|P7^hEuBs_#kuZ9bQICJ8&)*epS@#HPBAIhb z#mVQ;X5Kf3xrn*J^5W84xCwY-Ug)8%xs~qjNMe-Px zBqQxRobI)cy3hu;`tm_D1L+a-9qQ*WHg3MYHeFAt^(Pn;UppnS&qDbsk&k|jB&FP% zLdWyL1)`Jai5#4hs>o5X&YraAKg@=r?BYzvCG~F8R!3U#tjsR1OvqrLvCgza5!^e4 zK1`}%zkxMqq*eS>{pYH)gLXmCvDf*i?d!f^B?}dgchH|}Qjj$}&i72F8}(fh_r$bR z1i-OB+XVxTZrvUTNn9^pPIfDx?e8~m4O~CZpm$mZ>*CPPpV>G3eba1GyIrbQMTf^) z?8B68cVngbpY)LH|TRL{=Kr3Wt2St5yCPQT2U|W; zTs9=q6vMq3X=O3T&ml?N8kMeQl}TzR=Tq~%SoApFTKjwF?hXTRE-~lccP}gKQRWn? zt5KejbSM_n(7yOZpQS3vS9{>8J3ZwuJKE)fh|PYmf+YxAoG&cPW6$^rGfsY?5;wZZ zvWd~ko6hdfBm3?b=Z^W!p7GYFWTJ~Bc(FqOW1*t64!z2s5Tg9EIA$6ufE;T&iaD90 z{%PKW*sOB_jd*NPK;hK*pI=^(W(wlcsqWAPl43Omy67>y+&Q6hS)5c6A>6C#5AMPF z7I1Vqb>8~>EF8{!yMNu>jnT-Hd zF73F8T9PMDbzhewIPCb_K2%@QB)J9%t=BEP)HB+5Q$1f+L`MLxK^&uA4khY7uFVvq zw(G)05>S2Q+AwPMr^K|WnpS3MO5->cgSB;Xu#wnOw3D=*=3dHJy^S4DAd1D|o+Qn^ zQ$}odQk%q`)!Xk3TjYC_S2gvd;_$ncA18m=*Xj0VerD<p)i-;2D4_60v=YBZvVr`* z?g+8jK1;KemeWCyHXha6N1B;GyzYe1`HkrJtcxBEKK8A+5ACZ}TkBJN4z~>w--o^( zP;I8pXejlnVsLu4vKew;qgGA%6U+KrPx{~Vn?EXgjipP+>4welYE)bedxy(s&EU$n zFQ99=4+7~&pT@2I=2gWu&#ancR%ORBtM>;MJe^*aeoQ-*Ih&j1O0=0-eY@s0ZFvpj zM2cu{R>{FNR0)c<6}?gGIQsONeh42fqZ%}sjm^iWHYXk#YwqlhyC~VOdwAsUb^Gwh{fM|kH&Og#z} zlxpujfJF${lCtF=epvN4$<-Xw4>cpKRdn%oQski_0=e(&G!r<;C5p2?{*?B++k_y;x=Vw@fE&C zF@{opMRRwbhp}_H^~aYZ&8b!Gdd=OJv)`9+$F+X!#3m0Z`%6v853_)`4<{v zC!LeW=2y1h9wt9t%<%3yF4Ahq$&@{e^~-)LB?dLmBY#8&zqE}{emmxy&AivYjCeRN zCG!v=tg}-kqsfZ(b(lws+K9=eF`lT`~ zI90MEkDA!Y#@l_>>xo7=6Tw7XJ$W)yDp`ZNR*}0wT!#{$CGzsCTfsRLBsI~lTt@So(aelSy=8V}1`#<>?8TYFG;EH^R zDlNY`2fCk%7nC0)ZT5@AoJ(xikTKjl6S1qKC_*X1a-k~eR-dOWd4^9;T$x=P!z6yG z!ES?zdl(xbT}j$5NY%LiN=^JEA!VF%A`QRUDHE7!nYu3Apt@O!W}W|jFl%RUeecfe zy?h$eX)>0(F|;}}U`%#b3}m-;tac$NC)$`> z4s~;8&+@gP|-QW zQpD9X4PbM=e6%|-&OI)pR{A3BbSIG*i>iow^k~|9?h7f*e_5C-cI?R+aEP$-Jm{eF zDscVn)>6{wcBR8lE~Qfs+XHcQ_Kc-96#`5*U8d6L0*DkZ_hagR_kr~J(~C4g8dK>M zEpp~QyOz}6_p&aOZe+6a`g~<2Hn!;eW9>yniZ)O%Ty^ia8IyS`@-QQ-F+nm^TI=r> z@c6K$>q@rGY_>f6pBk_(n_)9?ZUXCC18fWwjVB%IA8Nmm>=sGA0B<=ykn7FMK$pC% zr2myHq=ikhBZ=WRBV-Go!oU9Z9ty2;cOE=YAqhB$o`sP=D)l4qv6tg-LzlX>skt!B zwsQZt7(ztDxEZauxkEACx1-;+yQ|}G^_(F^?+QK^tZ>?q%wS2jT{@7#6&ZJqlC&Db z7wNb!W)M~5$AnZfVpeE^Js9a{38^2NWSwD#2jM#AHO^E2iu!3^Dbru;r+JmBbSa1$ zwKC@`Nitg}kMis5hH6}>)i|r+bp@AMWNKG-W`OP`$789pm12}+xdlc2sAajXK`KoB zShmLl;mjYtB*OM2gHV;sbs;&?`b+pZERDV`xBlnfe^hBi&f{eQj*BGC&iQp4j65c; zV`Fi+-&Xt12{{F9$=#_QYs%BxMCm-TzE((INY zJVA)qiSvmq9&eMqoD@DTxqm-KQ4PmK`FC}=c{*Tjg1ga^3sV;Z{#xZoq(Bq;?}*pN z60dhM!+uXa{3XcifIm><;af&`$u4Ra8>2` z1*=*~!qT2S;=@9*Ye>5JWD*@0w=1o^jK5-y=Zk?tdMC3k*-TZ%dXIv$x`vojiVMQ+fOo)_6*FByB*U-)U^NVy{IJyHF9HZbb_- z;?w%5V4IvxT_Io@L_1xXxohcw-j(QmV?(}Qy2;M8vmN7FoN5YoJX-#6YkKJ!Sh4E2 zUr5ik;T3mth-+olLC?Opd2+W=ZdRm>EuoG$W#jKiad30k?6#IkxSVRe?3DZfL)eN& zv`zI2Lrn$@GBS-4D`V9kfeSFA`{NK}h#-V3b*8~3Q!2H1{Q!mOW@+D|P zscYc5T~x_#w%9zlj1~5QOO$m(t$7i&S{}p-V0rxBah-=C{huFHWqVpszIPz_dC6%~ z2x*aaXxE-&81nIxPF17ZBD9gT6&7FbGB+;T=gJAL^s@cjJlnUwk;hW`D0fs{@Ikp; za)>3}SVW^>8@38mgI3I43@&8&Igsqu95=pm|0+q37>~^z5NC|*66#>LUyrsh&X^O# zAE~+ROXGOpvG0~1lJzBNX^zuq|#aj+X!z5k~e4!i=j<_+k|*M4xDTOK8- zpm=|Jy+Fg|eb!y^9Pq2UqLVg14xQX~(xAPjIFic-+eITYm=tkGLz8i-j|i8Ksdn~a zLVaF-{QN!e;@jk7yKm~<8+_p$*eKW{cJ@DuxfANv-#Z;*ulH8NMLM{uSLq9ree~zF zq7KPr`Wx7EGbpj@KPc>8EDM`|@b!lR;g#5&WJOv1m+8|Us+|Y(g3gWG$7Y17ba_c6 z?t9riTl$Epo!@g2Iw0}v_$xFRYMc;uD9dc}B@8d~LRE}a(DUzY%7xJ}5v|~^tQ?!W zzeOo4wlXVo+y9EgoR*+F!rO-f_!(=*wnCFA3l3&(xrN_PidHAPvbmTQPJY3>z1!I z8V00BC!{nWub(u`Osb688HX~M(UaYosOOiH|2XsN9gV@bnJIJ;O1Wr^2|a!G-&dfc zbi%P6+9kvsauyw@C-I5il)lVIPqLe4?>bHLFk7yRu(f7R(2lpRFef@i*PXDqdALb; zT;7)z%vsa2`0I7gbW&O6C+fE2e-Wt3ruMIQR}WtaLe@69yE9_~BsdOv{vx$y3QTH{ z28oNX`i51opZ6<~Dv=GxMC|yniE8gghBux5dC~I}l$e7!>YCxIY4>S_NQ*N|K2e|E zw`rLU6{zu>GCvnrkc-Q`5_RMkjrnhK>w1M!?=eLiv3@IVJY&0hNBiEGdYCD&UiP)s zwXMZ=ce-nT)~3m+2K^|j>zjB(A{V_6_lE}0t^wRD#@EteJyQyw>6rL7(}@prFFeMu zwN!pL!@ZjHJ$heU(6}i;x(9lk;hklHl-Udf)YYaDglkEYpU!g;!~fee#2wDjv6K8| z4XWU#&nVC^<=*{}&F^-j@g&I_Rr?<~L0rk9Q5a{PVtvE9Oppr^P~!0}AvWZ)iz`pqRhoS#*3#6T;pJGW3`W z0|NN$^sS}GHqX!)D53T%7GGZ+#zYd29ktKh5JUOcXqX|Mds2O}&3XYz$n4DO%xC85 z=yKYgf}~-6l3T!cQ#i4UoJKc4gW!gJTdl!g7u`!}7NbO~USUxP&7n>+BfaRL#>BI3 zs!v7C7N^Da*rf+IJtTT7K#f&9*`l{|oSs_v z3%pH6ahYciU{^I$RnNBICX8ZXSg`D5-YYMB@hg`wtwOk}z!xDLo%N03rNH*LqD^IhClV}5GBhbJkjBOiwfEx3p6ytt27Q_P`x6OLuUl)*NgajCnlRhl2uKL zS~urv4{0Y}*kRAW*(XvE$Nf)WZh+yp-}6g4wDug@WwE+bjNN&AKOwD|LeRynu}2;l zmx_6{-w#UoVd(du@)75#c~s@cA2P2G3b2JSHa`x596Ez+-H-|m)|p%)bs%+q1O4kp zww*k8CX;>%Vsv_;#8?gx&||#X+3fq0wA4^Tzv$!T)}<96F$CbsvC?2-jHg^h{>z>x zdRGhZcq-#<2(Y6}@}~CrK|@{Fq<@aEH^7nv5(RX5e-<4I)@9 zE~m480z{<2nUc?EkGWU@RNQttb zv+?`Y*)FsjG4ciEvqiZvi9!lYvmmlq(mBM9 zrGC{P8`x^~sxsg|&)NgbYP=5P!~Z&aE$22nlB%S}6&X?SXbP3pX8>+GuG(Pjr|s*~ zV_Udt8?VFMumw@Y^x@tqs)A5v<6J!LBJU_Ik3M1K{GZfMgD#nO8Vz)0p?VVap95#i zSZ37mNyknj__7jmaH9t{^Pe7ffJW>{*F4=C)TuA=C%~F2zJ;dL5qaln&mZ~;2;mfR zx;gk~1~%^uI86?ea!w=V50~7@AmlRuqqHZaun5v%i1CW;+wKo2ETfq4!p~>(!UP>Fkid~lmygpl<_J#D}#;rI5|ud$=t02wpXB!hXGxjPN5z3B@dztmESKUj&{a z81Ps^?RFacdt`Vu>M9_W%>#P#*236GD0)SSE@zJCU zI&u0B#E(}ERCm1H8VlO8-3hT01S87`KFXG-*4gi0ckXz!NsnCUw||^Ud1Utv!v?x{ z@!vZS3XS*nx+`qIguNTz%8fG+wkrezNM+(2W2%{b_FYPmH`#iayYYK>)^%XbtB=Rr zV5qG~UgV_l70<;ogg}#@`~hvz%s~nj&4C6MT8-@CEj4-p=}6M=blf7U#AEoTJbWOj zwSVo^DZv_*jFTAMcWUh&7dIb|KD@=8(ZadRGYF?^I|eZ0=pk@!lRCK~144N9mSPn&k{Ctmte$ z5+;M0H{0P-Q8a@zz=n}?O24gjCEnNJP5+@~kCf>`pY|eBFrE|RmpSe% zM*EGO0|y#~0eKF#%iKR<7q?n2!Y?qmV~Drdr=kA0pV&pPq^#T255gZ&P&EQ#z*V`l zfVlXNnMM>JfMNN4%XSb>QlaX?9N$U`)$I$>bm5FkqNXHJsJl&0oa!rZBpA&y8wylG`UG)xA@Sf_CIQ7## z{9239vMJQR4D{`5&>Xb23QtCN!>iWQFU#dz2Zp>*roV1r^H_N>;}r+Smi*)e&}@_5 zKd008FY6)veFJQC9aFCp&JSz@tO7f0KR+Rn>{0=YRUbT%%p4uvLYp^qmq;WGp1?6( zbW(^+M^K&@aL}srA1(+y*<#=G8EWe%x}%2CMw^h<-MuOKCt}X8?H{ zhvtFGCE~fOd?9puM^WHVi4zzN1sp+r6{^*55M{IDcFI?CZ0q>mUm(hU--N&_eKS7?};0Xi^vB`Kq}ps4i1yX9%)+lWU20uJbY{|1?$B6V9knn~>^ zZ;DeclLtDqseZ~Wgw(d|+Q*@bQ=>|VJ1)94vyBW{SK%nRM~rVLa*sar-0Mp1KW*X+BMzFdUP)*A^&woYA*OP*NC*7*n=JP`qlAPeuMzoCV@mJzhjeO&e zEb*yPpbJo@a1qITZ<5hv^1r8Rz%!J1)A1LaU=-!%=wuW+0IJlm5gG!p?~ zK+Z^Oj61qKkp0OZ{JbG?aP$F%_Hx9KJC6KC@WX}(>=8GRzAuT8jdlLA2=iv!%VvF- zY*SCnadBk|rh)69gnu@d@X_?uCf=R9JMt;?0)6t_z9ST>BnT`u zJKK1FE$$N;UwpHZEVBlOv*~@@OK@N|)9$~~;;wBlp88I01NbZ>{Wv=X%Tfdr+vpwb zBR2HLhKt0iM1Aj}TH*w&cfQKof=s>5P6@-lFCJ{liH?o?r)~$TT?T)0EBCz}RUh)3 zo<8Vy=tD6#Q=5D{qPh_7ZGEVbezXAk;CAa=Dbx`6TYL+@!P;%^S^{77xS!+dD>@5`*6z*rsW--aTs z8%9$&1iKDt@3u2)T?TF>F-CQzpr zdZb|nL5FwXy1$f9602CI&#mt0)dvk9MeY+GUM7JYixDxLhhW`BJQyf)QuLF$EzeUt`a#0Rdzk?*(+j9|$w z?XxeJ5qJhwA)5j{_M2HgqNpJ!Teq^r9H*6FeG)myCM_!;yO--UwDlwL1@r^n{cX6p zikQT5XrI>(afQ;yB^+BbN6cxh$h+``7|tEb{C_-scOX^&|9?pt3D@4^+97*pyVqV9 zA$xB{l97?DYhAO9j9e6j?5xNhiO9;{vXZRKey`*G`TqXra?g2<=kxhk>6?IBdZ{nE zp-hpmwg>tQwJtH37#_`Nk?_Fo||HU=*1x#mR&+j@V zL%iYwSB^JmJ;>Mj2E0}BlMHZ?^M446Gc7?dDPq+l{BD2QVDk=Fci8hIQ`y%6OLXtm zFChHd0A+A_rQbs79No>0uKe7gG}Uu>ET6udomX|=?f0p*U`p~bG1x^2f|4rBhnFeV zk~ohf1!_R2@{tAu=J)4|y;RzSS8UEFLA`CB8IkVTN(l)ph0&@m*qQ|K7~L)zrS5==rqx zxx8Vl+sOO+$aB~2ijh*;sM^sr%OmZ$mORs$5RZQyUbItPb&gHl6TZxK%$F^81N9j% zR~@`_775o6D2&~0HU%sn&F~nd(u_Wkid7)5R@L*Yv771XW5|qO=*q`I_pdT)bbV0u zTKLlVLna9*KofCB&!YaKR2^RVLQy&#%7_w?jTTay zG!xYTtp+t-Y)|cPs3CvFk#d>t3sd2{o`47WItkO5DE;!0}0)AY7ju0`6JLur{dbk0YhhrBXY5o6`rV^cHjv z2O+uLQ!um9D5cbvzNM`pBxb^wQw^@O(q(?*Wv!*sLFKlamIf?FDeup~u#H!npE_R8 z@Z;Yg_camIK)Gh#=BZ%O(~(6Q(e45%IeyG|h1)>>1FvAO=7Do?)G?3jN52UXjqP(D z@*RSrdxoJm!l2Fi&ACkR!|KVtYnjD|)dn3yx1&)egX%*D?C6@=k@CLwzVXaPbKJ%I zQ@#^8W@zH&FC7tRlg~ypD|@vDz(*QAtECt$>SF zZ3!SX>bcev2WLilQ>NHE&nzVNihvrWzDd<@WX=0-hB}%82Ez)~x&)77_Ihx4AH@IU!9YU8LNcjN8Mz&R$ptq0oi6Y-tzNBXey#P(;7Z|D%0rVK;i!^hkYf}b zDhHbV_xc|qhQXve4bE> zI+=PBk}<`8@fe!Y5(@bL)wa#2N}>?fw;6)%$_qYsoe$W6#}9Klah^Mtd(N9trou+1I?i#ifdIILcBAk zNFAF&4$Vt{UHA694&VD}9Zr%@qB(V0!Et0C2i=x|Ys&P-x6I~F z#u#{L#ST88`6Z@3m4y&a2peI>Bn=~1Y83kzFebS5H7$7CB(tBZX1@T;?@F45>9?~EP$7Q=naM*eefD+oN0FLl1M{xBEa^Wlqli|MqEC3OfHOG_7|O_P zkVxaEcV^DI8DP`)6uRXEBIZSA5J2X#%yP-l8@|s6rdgd*EIXs%6N@9H#){wOm=d*? zN(71elwIZwf#0$^0)=aH!Jt}l#+7SX(p)ip^h;Qm(oc}2g)+25#_>Vti`w6TqAhIZ zj$iY@2kin_Yb39a2>+ZFwBaAPvpjIt6j_KesDb!eg>MW__x%Z?+;efy2Gre@%#%e(gpu z0aTe;6v3n!d;tm&)T(u6%?s&dV3)7$0`3hDz}6gpL8rD+ISa%lNEt|tq4|VpZTsvX zLarWkK$`#5=1zbH-{&%|1Tf+Y!1SP7gZ&RgM|AD{xe4Vj;a8W1x-DeZiI>41dX0ZQ zI0cHIMu4YQ%jlY0c_YV9A@Q&rP-oMU4aQ6wRfCz~i~aoG%Ld(8YZiME)>Nt#lCs-s z-`}BQY3`rnrqxmS8Z>=AeFb;c1+aRxKs_6pst{KZBjJd4Ox7Zy;rGLhuu-(R7rO>l zo&5$@k2k-lx;Ty;6d-d9k=9AKu|_muv}*sriCbfpqNV7!{pgWkx1+OdzvE9wLnrIB z00TQUCp{+<0LX2|CS|wE*4m{yy>=>Z0noeVhD!#yPZc zjRbdqGC*^~VGkf(oKd0o81blEh=df9-yqRjXCjrGv`+|RPC=$^t5XTGJW?}b^e0Mz z_5Tczh?~=)mR03XP#@a}c~eCTG7qqvgUz)-xJD5wlzAZCt?Ck4_-})>4N&JUy0&tb^WTKh z^)Fyk)a<{%7#%z@YuOALZ$0qzFlQU@^YK16{{m%dm>c^Xm^+P6Nq;-wGQIy1sJ{nh zAZLNDC@U3&)EvLA5p&RIvM0oSa{jHo@j+@^ciyAd>uDZu^Tk%AG?;B#BLU-Zz=4 zy+-39_dStF)ShSOM%CfyNbB9dpjP8UQDMpOsmnO*#FSHfDD1?QCG4b?4e_tpXgQgT z)BNxtzu`9(iO=%2Y*&#XQBm<0qZ{AQ(pUvFER5~>4J9oVtgTQulyWHD+lIk(!yCB& z$h~}g|A(Wa2yw|wJb_(%z>K7jrA$~HqutL~t20|#-L4n)Z-qEbd(uS?FYn(9XDzzi zu6IIiXLMg!MQL}sT25O{vrU$*p6gGObg&;0!_c3Vn^^fw->=F@sB=O?Ui$txj{lD8 z-xrI$3ySl|5a2xV@m0UHpI>;Ly}tdhJq~3GSIT4C4mDC#xX`9q3QrItDe7!?IX0~i zu^2k&{U!Vx0yA#>Cj@_O)nj?Gfm0?ke9Gx!n?1vlRGUx9?$H-EL9UPBYW$9ubaS;> zrk$42?tzg+mj(<6OWptW>&KJ}7BR8}f0rMAC$SVPex!G3UQfBD@tf6Fh%FMIhMsES zrq-7|cFF--cP$1630D|$B=cwa|Js{BRfItYEb}#N`;W!jJqO+^7nQ5W1XtkCA9v7) zM-h-vTC%f6?9{+xWXcrzv09IFzpU_XOoa>e&OG9G3^2B*cXJr0Utdb~(nuPlF~M zWgYIq)7|>;r+HVt>y^%m>#Nkbyek=xjL{deMBt6UZfC8O#51JfU&GEk#^}&$Qbb%7 zvGGh?->1ftIe#m_mZ`+Nxv929iK0xZ<)G&>_*pU7Z!Vf_wpdBOeRYV1xH#0Xf|4ntR1?C1# zytQ-A|Bw~t{RlHKUcZSKgiV;B-l*6JAmUO`X{i0%vdJ;?1I$VcHn!};Et9o~e(?`B z)*Z>*EcmJW-(88F$fgc#3Sar1^;OM><3q9f+ulO+2Ey-9ZdT#5*KIzWI-J@u!?#rV zu|LgXn^=Hs)r572oIpT`9CiANXvk4I@7Zg0r-FDvlrb()ny*Lq$VHMJiG(SfPiuqQ z`3*Ip-+R{AZ$h0KkNw>Y?Y~hHv_9*3owV-3NhL;)<|118zi`U3}haC^e?W3As)i#pkuT2=3}0gjkDRn{Z!)w0fGRhuK{ z6E!Pjo^MbQGItC)S93zwKoSqrcEpQk%QJkK*-r$&po|)qeC$ft2_<%2izg?#8^Ot6 zbCBW4&lIr?k->&3QpX|D{P0Nc_Yu8!Q=oB_{#Nkg!4x#H{#F9s(UAULl}BKg@9~3E zZz4>0g_KJ7_TTJ;J@4iL3;CbqLHRAv%c?8eC@N}H3TJ)L`H2#T(563#N+;Ky5J9&j zkYAo+e+G(dvE+##|0}ZnTK5+5!@dKN0c}ryr|p?O{~A1^DPA!ZvX)yVgTT91v7jZxDb*;`0&=kTQGg~ z=aRl{d)rRGPm}#oJk-H%b9!4p_Xf^e6gz7To-(GeLgw`*tv0`|aBzI>JZ#v73Z?ME zO?oEp12GNY47ni1$B5AK^1RPeFQ0aSe^Inv z*WdKIfg+*;4eMegIgOs3d)$sIqVKQ1c>o5#)sTl&{BMRe8q!EH|96F^YG7kI62loJ zgNv`fj#Myfd>&OxhpW#LEj=K8b&nW@2(Z&pm6@^o%Du4+=^&Hj5ii#nT)-)Pz4iu< z(5nb12G;L;gR;d;w_b%kO??f&t)=mYUA>F=ZJa4$*OKBO^S{QUvrWm;3|zrKENiPM ziyI+-kzwkX#`tCAilRm$8n3wb3qX}*e*1RGAg9l@(T#c!P3(KlH;!BjdwB1nMwBow z;eG#XTHi|MMZZmSK8{drSOrIKP*XR}sLURP@QV;>Xx;g%`FI0Gcd97nlfKq*c709& zEG}AzT33Z653#WM(?EsYjR3_}vj-RhDR?A)#JTR&aNX%waD96tYpdV?b^_P#BHepa zl|g&ov;%ICu}n~^m;B%AiLqU0W%z9`xtypxsqJ_RNE->7#vvlZ;h;Cmy_eU2K-LsW zF0@PjEfTTjd#p zST+4xlAqHxfo$W~3^2qQtD30y4Lxp1HGsCkcNfRhbY5>2(ooy%JxHxhA-aYJ4lw^v z0vn)+W>(YHR@-HW1KrW@`h>lVGWJ0XFxWeiPJnpvQg0L-CC)M`na@mBt>p+96>^VN|9P_+6L zyyPNIDy;xRTtW0cm`dK5b%SgoAfp>~;dWBiU&k##e@wwDJ6toSZC|%v%HY&LH%g}# zU>9T?%pj&$K->VPB9#ML$y!1bubsYe`8EjC?Io?#@_JXM=k$8QOIyD65%OAJ~?+RDVq3L7{>q>HOvUC zNF4MPipsSGmK8WnF6H)iHUPro{xk)QE+Dkj8-o zXR%cmwBk=vydt>a0=pnN%aBSOqV9c!6va5(pEy^Uj24H~ZGAk#FD=zwNRNTOlLXKFatM>MYK8-%@{4n6N=LxImeye)v&T}ZOeYvWS1 zc`f%|;wLH#h6F>e=0V>r(3&a-GkHVL6AZ&2yjEBR`Kswi)fD^4`5#N4$QCO^Z-9l> zyQNe>;KYDItyaTOsE<%1GIGSytU0=S>6A9!uOa!CzoLVzFrmL&7jX5o7GAI$FccGx zUatZdWUEH>6vVC8hTt~3zy$<;&2IB4M^66Fu=SA|6P=b==joMMi zgg{3S_V@h4IkUi_-<2N%?e%R5r4>y&6*U0scuXSmTVeMN%j4e|vOIXh^SoU26%unC z{0}^*yFZ{-$jl~!vhM<4;E)nO)I<3q2vekVrJP@A9?~Y7Bb|s|FqKfEx?%*|0HpkW z|8-XIl)=2jk6jvhSfV=K*NY#IEs^^0;hf6jtFmLG>2Az2D?W3ORTuTMbm zdvLYj$8k3q()cRC@gOmGV~VZ!1R?WRKd=l=`(b9l^vpj5VpZ$+?@4_r^5w>vZphEx zy!fdqf16ktKI7ErRIJ9dSwotQ*tfs^8ZNFVaYj$)SqNxrZlps@~h;f$qkpGyblp8!=pTj<5pSK3Vt*cjWnbVIN zn|ahM@@uK#+ttsFDrsbseQ9$7P1K!~tFeq?e6v7?Gz?`L0d?rrFR$VhXPxi~p>e}* zzaMDMV@<5B5RnrJQF9QoH}5rir$kGELE<5Cw}uve#T5juOXtYT;)RMDKmr7gNVAJ2VfP7I)WpC$&WK8 zct7~^-V_*Kdq-amfk9GUG}QU(yRH^(L+TZ)KU^6WOghxHb3IK`j)H0rk;z&T{l1Vr zTJlV*?3R?%tsN`W?QK(;3`WwnA1;#QV9kE>VhBN@d{D4i*-= zT$QLRxS@i{dor2$%k;D0<;J37Jw1VVgi5)dx?WTtOTlnWxoN~W>7=*d9I9fE>Dd~u zW6(EqY(K@>BCaAyJ>29uVM4nf*c1>M9qs8+!a~tW7tjPcgzE}Ad;*M%H0df#t%r00 zpZf1JR4#g{4*6&#ybdr?lqs`v&9cU!sSuUq$-N=J#cqHHOMioq)6k#EMhLI%9niyH z)fHJ|=^O>WZxT)#v}jfA#aFbiYThCZFO<`b)s5xvwy#_@r8Q`_iYWtVbKtypdfKKK z6RE4Y@?7cf9Q?OXAyR{0WK3Ha1ACsp&o%e)usX*^204n6@xyWU&zs&>-e<+h8ur41 zJ86?rt~WCCb*aP5lG*>oo|=dgX0Vr3umy=ZjgI9#dI$4ytXU@$5@OUN@1|i%eELxv zui%TVfi1oL>88Y11~|OhZ$35s-=HrUQra8EM?DN<)vgf3iAE}(+F}N$5>jn~9hbSt z1m_`a2$@b=ql6pdUHhW^Hfrp)qy;#OLl7bD zLghE97+b)bZL_I*VtB~bpd{)$Cwgz6@#2mvf_ZQMDWE6BsJ)Tr@7dTD8`~74(vdwF zflEvIDh(Ru>VaFxSIvRcvMbIZ=@+n3VORewas*M)bd<)ZJrELjOaq3iKc`M5tjpb&ap{@?XTQtYtxie8BLv%CZQ}h^d@3KE)Gm7 zu0}q|d*l^3vX@7gs#>inGeL7oAdKQ-ZxX^M{uI+Sj#Z+sxw;0!LGt57pq1F)adDFK zU=g{}s_R7s_HOM2+qnv)=cpq$((baKX8?_m{rQj|L5p0swV_Rt^vWp{XjLXx+S)05 zzlps-2a{rG;|>DiuiM7sr>1L>f}NWo^}8URy9tb6WWG~_N{ijw`j@Wc1UP{-`4#sT z($iE@R*gDj@R(InixXc3VXd8GML)y000rg{GKqb2)%9+#9xG#j%oj=OUVXsIuPXFh zMD!AS_VK)}4$)anB1K8IK^!V*4?|c3Z3|X~fa+Z3>l}DY;Cgi05464w(Tsc%K?vD6 z$m8#PFCEsYVE((@rf1G3ig8k#Fm6|qzh;DGD33~<&m9A#2ZwgoxEXjRLyN43S!_9C zQlg%*QL}%DH1focBtt~9!IfyXHWt^xqZ$d)_(r;JpzJ(s98NI)Lb8x#gsB)=Bx#kG z%%*L~$IyN^z6X{w)6Ay-{0E-bB5-74Ks&=2y#Z(f7^}ie<8GD}0#f4Jn(_4AU^Vo& zibOe8JT=G7GXD!Lqv4x-S_1}Iy=Xkvw()qoVIYh$S&du$GsZ1)ON`w9mS;1=2gy26 zvdJ!Z9)sf4@0<75vuniMZoi8)(A@609-n>y;z!DY!p4d2ixHE2)~+@~bwy4);7}w3 zsI1=ho?ysyxrXMfm-{GO1ZRJTOD@HU+=yEuG6RjmIYUnCg)XTxGHQtK#P06ghdWWf zy;E@Eab4)u1qt&B3z|a$@s0erNa^es=K0$@Q+u=mFplA@W6a6bfyR#GO}ud*U<1q_ zA?l&2wW~yk9n9ME#GzY27Q}4x@%caLmRq{JNC!h~$QdoM>vzFSlsae=HpM>tBys89 zva0jUjs%3G^+B*B%$g>qm&_j2JJ3A`&m|dF;kZ!P2=c-yyYopYHfjMLMeW3}Lpe1_ zfk1mdj;(fF-BU*X17P97ZXi5!WKifnYr)T)ci{`u6%f&TJ2%DuSrFuW!54JsCu8XCBAj0n$LvT5IiZh) z{|zMs8xHHK7ZwPn6kY3H*zpNH1y2fTM@oREZMw7Su&Ddj1R&k9zw?Tgok^1_KoOK? zxBr?%3E{#f2=XwUMFq|+K)1l$xwKC{h%+Pz^H;JS`Up7?9%mbx#kAnHk*#Rflu|f0 zb753}IT1DjhkS3?T?ARN*l*xIOYCk}u`UUp$=rLt`)Iea_E!EXWak?!53MYz?dtD0 z+Fp%5@B-FW{S1)E_fMctLhS3OfV&3-4d6KzaIIECO$6jZM(l@-T0n>}A3lJ69b9+1 z{kEHyfB1#h4ewxVN8wYQYXBR2IP!?nf_ktFR0dBp-g*Q1EX2e^H@fo^O$<#4R2qH% z37piw@2@{5dt=Mz?7DJDU;Bv^H8B9M3HtBIXB1Oyd)+@FKYPICI`j;djheL$uVe9e zIih#C<4d#wG9KlDxVOEJ-I}`~o-Fc#*$^4lB7k;vdiJc{EHzK@xUm%$EEmIfFrJ((S(adQLI)oD%uz6eIa!-Ps9Jp2rWdl?$>oWK_37S z%EekcnN}sca_}xvBMdspGU#b`F2zDaKeLr&23bN|} zLI*Wy-{T-tT8dSS6V=_%_I?Ld_j))Lt!r1;p1EYtIi{CquW>#loFdqLY0+9yspCRq$13W*~VtD{RPrfQmm?R27{7w@#1-9lW2t00z1 zIaec!#PoGE%xwXZmGMlJ3UJE1%GHhmzm~cYV9te)veswM2!n{Q&-x5{b#5=E@`L#ZSybs>lyw0vOa0 z(QaCS-#Mk>Bk9Dj&h)H1E<=uRs7s8BQbK46@tm@JfJe(vD9f>LEZyc z%-v4SJpOOXNY0fsLrXSfuOIiTeY52W<`)wMp`Ndge<%wu1rzN-j=+Xpswk8*HHF2!cj3`ZpDhah`P!-fr7gOX~bs&m0|CVF*{BTwSy z_FTVpA5?U=&+T;Urw$+ewIsh1|F+XJh~ktnrjC>IYP*L-4yc#D-lU3Yx*z9{gGuuQ zlwLe|Ik)stU1!F1Bj|*V!((WLD3i9YQ<%o+-ilucZFp)xuH0Xwo!OU$4WHRje|YHa z$s-iyodZorS>Om)vq8?mcaS`D#=ajRo-mQ%L;&vCCn}zUz2eVtiOvAM(~*;pd0z7rww z1zpu4lVKz&3y~*gB};13nd-?caq6}E^4`jxnTVp89-9I)?PatQda_`nlkBN|2N<8_ z3{feP*q!QY^4}vmmD{DhR&$A1RtXL(Wu;3&|d!yP_sw3U`_cZub zJhL2Mv5Aw)Vvk#v=rqo}v@oG|Zjw!r3M*LzY}lXxDZJAXA+@4$E4jT+fnYC|P4quE zc<9DS-GNswKy#7ql{q3WmH+->EX#MsTB0Kx@jGZHnl9Y}xCXP85?&<~seRq!CCs8o|h~QfsoyU|F*ZuurbZ$1EeycIvj7Uu!bd)~;A zaux3sSqE1@E}PG=35q1<3Ji;T^_jKc$5Wdks_ToAw4!6MnBusl zR^Jo5WSE0a;0472maar-^qEf?0=K&(&ybk`*x!P{JHSDlNY^i~*n`!WQ!7zVsak|| zHAkx)3dFqqs-hn&Etk(<{u$>Q4key@Thb1-i+-#sD+R1$F^F3vp28a!_V1*{ZK2Nm z6_{&|-)pEr%T<&-tF^bW_pRrh;8&cq?SmofV4Q&$|?91d;RD?<7Xid0Tkn!B~XCOxUJT{m(P4& z#K85z>VO>J=~8OrOoRF4x-wqG-Q^X3z(AUc_5R77Q&xMyGIFw#Bo(&E*HkD(Dyg3; zJUqc~Ot>kwcK!OobDeU#tc;*Dzf5 zU1Vg>(xZ5?_YUU!8nUx!=nvGd%-3aq=wXBjHn;C!1xTcp6aAk|qn4Q&Fy6q3m~=AkQ5c5PhqwcX-o1DA>#}{y`E;TCN^+n-+ekOMn%#) z0ytFQK^HI!J{Ah^O&aX!`e{fX+IFSGdurOIXrCvAV8_k}pOJaceF)&|{R|~R{yUA` z1bE~X4{WZay^Lke8OqX*CeK`(aqdh_LQp70>=d#CTr>8o(9^YRVyU-UeE6LFD!)y#&c3~$Vjl5uZ_!gX%*w75@wSXYZ(TPv=Y0_Dg=M3!b1>`%a(aJC#r!zpt?NqLmL z6ZlrELG=%~NhngGF(c(lsd}RD6Qw zmWxk5O${`1fN-E!{GsHyAOUVpnu@-G5f~Pb)>(fec^)%XeCyzlpCez6otvA}YH4Te z1!@pkrBlsBpoCx*F3#t8(R?0d_jYa^bq8{u=P{alFR%&_ou;r6j&%;f<7_o}kRJ7; zw0HW1!a*Gb74a8|wYh&O>@*9|yObI9k~cja_dgDe)e&sqvz?9$oT%B~Y;_f-<>(^M z;KQcNI^nzYC0_v1im-#;NUY+IuvX>1FAqg?Ci6o-hT#>Zz2r_~U+1EY7OqOlNu-XC zX3%9F6hZNA!fqGhbUwdNv&|ipzHlJo${Mo@+el>7_B`o@-(*{VbSM z!e@)vTBGBGcseL<{IPT^2kev%l7cDQ@*^TjDcE4&p@?#}*{5u+>m4*aTLe_Nw@^2> z4D}U0CJ_${2+yAK2|*^$bbsl+RQSoCt5WEUPtXzRMZ$($o-Zw&?4TdUL3I zM<#b|)RNW0X#bPfH_gwFh#5$uSx?{yisBN2ExGq~PvbYa9BKo&2PE_(qgVaG@UTzZ zN3<894Eh}jNendC+FnvgzXu^e)qG$ z9$R7kxiBBPmUow_r*Yk3jpK8BvC&G3edX?Ayc$JHntygGyFm(CbkU)}&z3(>0bQxq zI7AWt0W8=x=_pS$H|HYoH0KXriCRn9Op927*q_0+7c6vErc@|yB4-=P?~vkB*vUq; z#2`xN#&9x3ExQ)^DNm*zoVqz`HN z4px*gg9t1XzqBZ@;A3TO5i`Ifo1-LI8#pi)@2?X9H+->`y9Ba(bgX`$`hST}EiMO7 zx?q(vx#m_zt;ho6YT0LKB(MA}Il?i^Iil&l@{jBl{88i{mQFI*aq})19)0oETq`7L zyW#hDAu26cLMKpns_9gT#%uZuUzL^=CyH+?3Ld}JUXNZy2`jL#sWJx{mUSYeS%3bk zi^8|RZ_*4E+FG=5A|a+fY*uGgBzt^6T;fG^?Iv64D?>;V3$n9lo>DN+LYXG6iA4sl)j8Td zd&;|tI`;yEp>Nr@D|sWQIYwBwKnS>y_pUg}pWgonM6;<+0|7e1gr=Yopy`k9AXC9|(4&86^2z^5Iz@*soV&Ag!6RmA;63<$}kH^Otd>6l?GTbvs~ z$HpTX1x(~p2C+=d?e**4b_t{uHCAB1EIys&wCezy)%bAx?sj|gLG0$jBv}P~Bf?`y z%x2t6mb3D(vV#1OElCKB_ntJW3#&t~frq=LDFHs3lolHG%g+ztPk^e5;LusVc1K;M zt)mO^nFgXHb9K;jt&@~F&oteOvCjeQQ{1ZsF;Uqbv zUdNr8i~F|3n72&}rwhx=VpM0mwO)yjGiEzf(kLk+O?<#3>t-rLO?%2V^FoVf3=Gje z&p!;J8abunVIj2TE_h4U`dLtSUJ?Wl^?BW&6Z(5Pz*C@D zxQl!Bs}PLfaN2t?6uapfs`kam``$eJB%ncpE;%Ks0zDR6SKaOZ=j(V%R#ztW=*wkhO~9oRcj&yd#amh zi0)IOEs-rzv=?a-e`rnf(Y*q6>>xdF*%U&NC3ezrES(3tp8%$?JgVFSyU+HY@6FTm zY=2b~)FN}~QU<0R6`yZ%wh7S=Mr3k|Tq74B&sB zZkc*1K_=7PHl-3EnN!$1()yfT<`>$6@IEw1=`{>fYcog-mZ#F=KB)TnDq(FaudzUM@Cf*&J=NfM~s?uc`SEbWhw z`AE9jwd6)q*9^&(KXl2YoiBVrcbn{bEhRyZBIfF>5O6H+R45_6uVf#iv|bACsboajj*yM65UE8MXYGa zL+4qMPXnwh`!w<=y~ihUn?(c0ymjc>g)1!j-B0EH<#DhTA}uuXl|uHc4EGV_jZp zR+(Fj=ONcIbovOtuuf#P{GIGbR3hvCRoPErkBQ5#+T~Ffp{{%98@rI%JgE=hS_QJ# zX+3z=%p9B@ziR20a(xL~+$`$Vuy_ASj4V$~!rtmf;v=n_Vg9GG2-v1n5;@$A<^qXB zB$n$W}Cm<(4 zhRoLY(SCEcCas}&2(ZXUA0%rTfPk(Fk;&(wgkQg?e(_W^th$0mr~GX;LeSp%YCIg` zX9_m9?1PF#XulgUdZ&@=Napz&?w!=!xY-`>A7#ij>wat{JQBlwNn4H8;DiX(jY z^jC~ZhI4+P@F%?;RnrPTRWf$uAv?s91@rGVV+1)7*@eg3DE5=IOv><)Fh6*J$O0D zS=;5z+OH03o!a=#w@OLC3AfB9%GHCmQ50~(hwr3yJwV!nt(^Jf&F(inua_jo&RUFf zkBJA(Ee&@ADPc=8``OnMc4&0ld$o`cmW#GI-U;Ue(jI^{a1*SKSJ*7nvnN&ZZ1z&1 z>e$*+jH*8rN|g%T0ovCk@9Z>o5qx4>XuWJm2@(Q4LmI-TLVXHV{MgzqiNW?7%4$hz z!(nilyl|<^egkVjlK5;(!BG55Az6##4$wM>V!vl>IQ?QZ5b{HGRQ= zKLS{JRELM%&U^xNGt^-vZKEgZ+`QLB2mNCyBS!)B+2p53{ubihJpg{sJUkh^h}p9Lb!_yXF4r4Y;QoB?_>wbE21CkieY zUUFi*cQ;ljEPcIs)$cbh`ZU$A<#M5d)=!os4K!m&L4 zq;~gAD0C=}$BtDncr%rH68Hk$?|R8ib5z7o*7a?g?`Nb2iBT-~C!+nsg^c;xnCS*8 zeq5#-q(^Z=7ODj!bH%ySBuZaAK6J&(eOtbL)T9yFRi(P0*u3BL=j>8NzmCs_IWEt< zlBL_vVOy3jSSeLkkJ*RsEy^sOQfgv$Aji2zARMm))WMtmcPaI1-_Y&AXANlG+y`7& zw}8mp;Jzx?dAI5Krc`(}}LthH+5IoW-^A_8PBjbNTDd_u_<*l8d*anO=x8f0eT6M z7yOuOqEUAwCtU6_`5g?pHE^k9{PGkPdb^_MFhD+llUWaQ;w9$Enlp`hxllf^R82Ye zV!f%0m`i$3)s6LB%FNnC)K?tS?}B{Kbsj~|Kp(T&hG|^~`jn)4z+1xmA@@UZgkvc% zaUX)eT_U0)Qe}zsZbuV^eD=`={A$Hm1L6I_%*IaCNWQsk*-QhYBssvYK zE`$up`}XU(VMFFglbZHa3mtQ3Zg~PnwM8#1YZRB)YkohvwXM2vgw`vLxM7)wQbpp@ zdLbGFaAI4?SJCcttasuzFkynq6vS*{+;^s6&s-6P{#`ZjKE3N}tZf}fQ@M2d@LZ*= zT>+aPejIAR>!gMGZ&oS^F7+I37Ys*ur-DLTCBchJLm)>NyRs1g1-PgSXWJC5s^6i@ zR>q5pO{ex&TKx70SnR%(Qu*L-Byb3b6`?z%{JEC3dzkt{xws1%5VAF;?Z~%!$S{%h zvHm0acfmiCJtgXt~F4}i6+_UFCUmrkT>EB6X6{CKxFr<;ilVw41AD`Gq9Yi0M9Hqs7`g& z4NpE-<5&G$2h{Q!_ECnE`j&$4$hF<+*H~N!oC*@(e-HXvie9V&oYsE|yHYh$BNAR` zZJ`8yCkda2Rj@1=Wi}Jgz6J5T?&rXjuoF5##k7OP0t4S?VN9n#JIIQ-aHR;-L!tPwbvA1YppK={du!vWQpXl6|SiCApw?W1ejr{FPf}b^0UZNiU;XQ9bn(;*gY0$oAd!HhjAHGz|JBx8Bz(i%BN!pH-8z zO3w?gBoCq-5UKe~!Feu6C8mr3F4} zT=3V`;3@L<2Wh3Z@g3=}R_|_9TPG#+4ttHd)G=6PKjGDXfNbDj1KxO{&GQQ%680(C zb_sb)^qO`L-k+76yjqy{@zgHxPp<7116nlI@sWdj0$nu{OmNE7iq4|aUvbiOU0MIk zaQ}?B?MaKIHU`(^Nz*btM{hcYZU@wjoAG6tJE5q`@|><}O~+q5_b}&qE}a@>hiIG_ zOBWS4_dTlnNqH2%>*{V&P%Bx678a&*O=|OwiOM2KDoO~zp;(NlV4u8`ZU&36ww)40 z{I#6T#>`dqinQ%M#WaCK*&AOym^oK$?E80zUp9t+_@W5}7W#dlJ&~M&k4J%_UDaAu zAH&%tUhc5{x*}^b_|Vi9)_GQ(oy(rz-IP|PV;=T{9gF5nC<5a3H^j8jfZ9Z2#xa$3 z8X44nQkiy({nEhCkx&2K?>@&s2SLVSp(n(n1)tYP`EG1<`BBA>-xI~rouL`sblgQr zM1E=Dv9^$aN&4&wde9H<)e4;)y2gD~7JEM8oN1f9H#xaO_pYX!s#G@jBx_Ty7|8gA zPb5#|%GAx@*F}0+Sgh4ubg9HKFKHR9!Gp)(w+!R=^=w4BY-_{}lBHN#NZa>mueR50 zUx&rv8^;pYlq)uo-4~|7)ls?`n*7>jS66HIzRFdcn^z5!{^m6)KrE-Qh0r=y)6Ty{ ztB^k|jEa$K>C+y@_K(mkeiW6!r*L-LvOUJz!<@9V;|bjKT{o4aS}(KR9EDfs8O9N3 znX!T=;k!|6 zs`)@*D$)R9o+Bo(`R04UFUPItu{ThUH7d}*- z)Z0s=Ky^i#B48^e;$PkKs8Ib5mwrhp1tgwYDeq(u5c{}!2EcY=?!=V{&nKz3*l;Om z#fgD@IvHee(t&0YTr9n6bDa}Aoz4|S6Gu%nn@;34&epby2%%!V@yH`=CiBL0x+f`o zpmWSx@2}V`1q1FCN^_20?~5$@d|oT>ha9E?+xUU zNq*gImF$=m5Gl<}`T}2T;p_ca?Tx>Zde&>uK!>t|TR#O58gUzd*jL2fk7+)X zmyDZgcX;rUhVFQI_wnjgVbrij$ZXrxa=4OFjCn|cKfvYe^2>JK62HleuL=dxWVwYG zI{hspWG^Q9*5!%X0oc)-B&$6ZYmV2&DVj<8!2wauB!3sS95o{BJfX;K$uoshJ?lnEbNodbr9nm4~c{duU>bAw!#42s_TLS@1ODMzQE3ZU*!u< zHCD4hi2LjQYX@(eIo}a=TV4D~BZE=Yut%GEG)!BDuRR`FTW(rzR0$i@;CIVWKcgY> zb7h7A);)pZdtXDM;F!6J_iOF-MLI&p?L98R59Fw)JVhT3z!T3c<>L;|oairM90;`C zgnu=S;qGo1|muz==@x1$k|krjh?*RA6~Fu;hvFV@_Y$ z^x)jRym|ila3*i)UkzAK^PA87;^&lURfrarmb4c7De{~A8azN!-!$XeqeI%hyzVsu z{MAqJ3MhM!)YuFs;c*lEEu$tv zB_Z6-d)5?R2(XGC6knxTLRWnc0T!dR%={zWgCL?3Bq-JpK~ABXyw z|EPM)fT-SRd)NS_#2LB-W)K*Z4hbp8p-VyqM37KQO1cpT7&=u_q!bJg1tmpk6hRQd zpgWY5kbL*y=Y9Y8e(;NqC!VvP9c!;=?SuH_l==>nQ`fo{C;dS+?DNA;-o5D1ZzI=I zActX(oGK4pB3wR09T#u0Gkf_cR&TbsdD!F^>;3<{`^hvL!UOPR^l3 z7d&Uh2fScmx9_@MN~poo#FPmME($dg!qrbSOBhIWffl05hx%Zsm4$L6nu4pp-7@1- z_`MKeUD&Bk`N%mgc+UFGx3a>q_uo}xPzHoK;}_~P7orvAujb2eE1Fz0ZS5V>QlDAP zYMVy=KK~+m>q>rt7~=Mj4U9_M>^_^k@&VZA-OfoyC>nyYsP^1xbXVhn#Lt;l`RZTp zJ9KgYD!@CM7j~{QAbmtB_kO|UF&pcS3fRCj0&i2fG-Di<=z;93{q)Y!mS{4c^k`%@$C!SjX3TqV zvu(59%~g3c2=GA@NEc&x82#D z_hY}DK4;x-no%*xBW3Mto*o#d_R2Zq86Vd2)SERQ{wL*q*UU}Na)vwpPIb&Eb!ca+ zZWgVMn;cLzS~K%Ho(U2N2(?3Q`fuM;lM?^e=$7OnwfzQRatt|fI^0J;-PN}A#Rw%j zEJVyir7Dt*Or$RY6957iLgiY5#*FUXpR~sxo#ML{_k|eiofS3#!)Y(qmJEmHYR-(B-qmf>nd_05X=nPpEkBJ0yAv|yimhr&s~>(duXG?Af8 z^xzm)?cruBW4MjRZA zF3A*`!o{hQ@x&Q&#E=9!9zXv`kBv&dx34x#me~Meu;vD1%Q)sw4QPS1WJJ%4@T&qy ze}4?$-EQ~1YpFbWU?v)op%w2SIdC+Syn2PZD8kbni#nyYP$wFVg{lO0dCxfX64>7B zjEO!(F2K&3RS@Ka*fiY~Fhjq5vR*7j1FT-g07{s^5z zu2eWf2>7rs_*e&}XH6QZ4J8kYgxS~$-Vl6ESZqC(e$o(z2`4^sDzhIzc8|?#jH#i= zGw(<0BnJI()eonzhppKU5^?89BtAJS0Kh~7Wm8D?rFYz8JLwe(al}_@S#;5&+_OB~ z4x%?pUer@HaZkT8=o9A?H=xt~l`Vrg#CMH>H%azJHqVcnFWw)y?!4e0lVH%Jd4Iws zF8alL5?hBRk?UuamD^3M5|i<@+vlc=IdeL<{6)}VkLAanEV#yyp?2s6>R1d4eoGyR zmz_*KmtF2P)~fQX)B;P32}@LBWeuN7WuL1vB_t-^9;uKLw*7d<@XF~zpUKLjJ=Ymc z?jQQl1Ddq(b{dn29=)f)Xj7YE&M|hqBMsD%wk$4EoT3`y7fY2(zW7e;Ai@JD&o{{O z#f;T#6=-Ts5{RYKR?TO2Y>F{e6>G02+w@{S--j0YEE}Fe#CDXrA*+ya#i8p!BUv0b z!eH+NnMuR5jo#6h#OjLXHqz!Y5i$lTVL^$zhCueVQ{(>E)A-A=bW*A$C9EaEtB~t} zRovcLVfz-6bFZP>V9iHH+clG|#JIj7$NXN|*}}XdHMKi}R39p%h1Ubgur0=F-P~mN zQh&P5eyHNiYmtk4QLsSxQy4*CK}#k()>R(K@In4ho|xR5@Tr^MH@mC^d&eZ2pDjK8 zdzE5fX>W5SXo_{pWYRaUw;N6#C62a9{>O6BfC09}0)u)>;^o)W^;SpFRUfNA*$`5N zU0$9{i=yC%p7>x4tNs3{hFY}S3q(W4o3A0sL$NJ0)dH2lGVZhbMju7E2Wi7K?(0nF zv+TZA&0iHFZd5Z|YCI}n*8xWlZp&fz72syS8rQknEz7D*M{?%nFmWT2OqNP&B+THq zURllihgH34?HYOXg**cZg>SH7an>s)Rv+M)khC2;TEepRzPXxYJLqCx>bQF8z#*#h z)1WVIXt3F%{G zNP2(nWP%+JttXKMEo5+~sC3ki-ijh3H;m^znKF7?r6H0ZF%$J3PCddhk#UOB?IyGbEjGLC>xtU_k+F!({wqdeD3FN zVjpShHw{$F<JX$lF^6ay zD*QjmxG53szEDZC2I>bgy1 zd<==Hg>#C2lt97ImCN-cp0t+mZN{&9zIYEZbi)U{$h0<%4Bqs1_q*>Levukq#qpe}gG{e!mpe3e z!15VwX5-C77n698P2iqD{&CX!pfw#zwHJcNQ-26Jpox7%grxO`rxvr<_lcQcxv%*X<;dKVT#El=i7Po$2MajC2W^COZ zJQYa{RCy~KQ#w%VU11(`FTqh`sp4J2KO^n9oAgpz;dxB@EG0ifmky5Q9_opCNOr;+ zFJ{~5MjtkdW60JwGo=y_whg#`bJe%`Vz_5dxjBn*!CsP|wvvo@Q}$PHedQ{~3qLyJ z8FZ4{BaQ59R#v(60>bBJLdA4r;2=e(W@EBcp=Y%uF)B}@N^3%^(I_VK+%EJ!MAmUU z3R4=LlJ-X4b=ZJ`kpv+aVHG0u_B+PGLKCLd9wxud}R^PPooK!4{ zjL1E9NkUOYnq??GTdJ<_8~cw*_qPq}AjiQi|Lt*LG|FJ8b&2#$36x?#xXGF461Td2 zWw=I(Mz_hRMi~?9E9VYl)9p?ky6@jEF{FU2ChmTThZ@HLE!^+;z;^S8M0m)EBG*$= z;>j_UfBBCFn0o3rm{~5)mt1qj=T$0c)pQ()Q>7K)@59qLW9MxMbXns+o^X#0k1&v6P3Zx+^0Y)*kKtpWcl1PQwz@umWjO#>TKdvxnEE z?~pH)BjHYCk|<^&8RE@nnU-0@wYs@GjQC3K6Fb zrEG~q5~hF%DU#i>Q7Zj+n|b#W@IKr%C~1bP{Ndm3MO3LX_kD0v_a>xp+w|ysVz4^0 zaLcMi;BlrwH~H5V-H?&@omy8Jbn?fNdLwW~kLi;YwSS0Ve?l2$+)Psvgcim;BwZV6 z#jbLAvQ@jJ5%iMHvuP!~*&0Zk zeYyKD;fZdLqvHkgEUmdC)5V(4`E0G&n{7q}pWyuGp9Qz~Hd>VS2g(4kD1Q^}BuK6; z&QBH!$~|5WgIVwK*Q;OUD@W&qC*)el z)PAp1y!#r*s<5e*F4d>4L){1M4jVZDnfHB8X+6eiGoQlUj&F> z^o72Tre$A6-2_FPB-?(fPNRzX>nfbo+$JxZMlbN}PweIiD$%*q`n>X6tQ&Kt7;YNU zk;L9;9I8FiKZTt5?fIpTfM$wS_rsdXD2ivDn>@UC^Qw`cH4p1d33Mjo3NvxksFpa{|B3nF)o z}X(Gj9A0im=3X@{Bku(#x1{X;bi27h3Z3gB}+=Ch3l$vcyCFsdXvS*Xpl{Df7~ zBA(EzFQV+bn4*|j@zaGongkORGoeu3S7n$RkE#sc-1QB6mv<(Lq40O=g#^j}1P ze4?LHlPoSrlxt;u^IPCiw-#UYLkhlJ6Umv+`KNd~P>!#^-^ef^tKL<#ZzOERcn7z8 zP+XgLTCX7Gs*50k=>z}ZPO*dWW#nv~Pji-+;ZSjHf|v6b9qAJlb#OF%)a)k63Y=_1 ziIODj{B6Z&C2sN4>#^(S_8#R&**|N)8cuKf=q!QbP)d~;)`ieX3YUd$jT(b(Ql%PM z(llm$0}-a1HG?(MRznatbI~x zJ?O&HE@&4OI0HDae>Or4nLAnJJ_lv7vrn0}`R(mqdqzC%}L?b_H_rXVito|WwQ{B$by1U+!;vWSK zGh^YcWI2Gwb{Ibc_*p+P{^kGJSQ@oR%?lDgUlNtwBX}T*Xk@kt;ynq#^qcIqt+9?Fl&s7c3ElC#e(czrQTF^6G{T z+yooVDsH0=)bNSlYrlVv!CkPY6G-s8f_B&=qqAS1PVpy6IrO-~SR?!W-_Lx1Ws4`U zN;#yQ^;x|3?>R+QFFiM{^qlK$%7EudUF5Wlt8t&|@S(>LTd7XwQeX%I^ijBrPmJ86 znS`RG(Rdp%OPv%-dOlp%eR$Et&_o`!&A*}x?Y+;hRIkqUwd`N!0>KyNZ}k^ntwvt| z|9`OMiH}$B967d4ScW5r?(UQAZeWbwHEAfU)=YoptFw^E?I`R*a#4}Yuh$G$BBJVL zrJeg_F4y{b?se3_v=D|!EFtbT$x2{rb9HWNZGM0qSsRnv1O^NdcfIxS&s-I1rNn$h zCwP9kD{turo+LO9tPEGc<4d-{c*g+U=0K)|RKdNa2epx#bX002^HqXEMSo|cfMA{9 z>MXoJfr-9Xflc&U$QFFK|FgcO)-7fcv?pp3@3z_O?d}XbQ1Bo91P{^L9Lqicdt`FK zN#60kpb6c7pKb5@k23pWSmxc@wk`LjD8?l|c&JG@m8L_&5el|9=StvjC=waf31~b? z{GnPrXJk61&vS$(wf|F_*`6Xw>(8JoJlgxrg#O=EC*VM*7f3f`1-qQN`Ry}^A_{^{ zB{mIeA`GQ-`zKq0W={2TNA zmBr?@e!By{;Fhao-6!`in<`3oePeNC{&UXx|GgU5*5?O`1Gm-&KYt+79nZR2?bWn@ zX_%+icGazkarZ&7$*V^ONyuj;e{pL<(;JQ#YZ z2h=(mY?$;+85@Pdr=YEyW87emEYfSt!8X{gUV&DRr^JnRS2&2~vTwhXDhw*HTVAs6?`YKkSTT688}l%yDZZ2b ztk*ANol{}0__c+h1w%%or!q#5_G{iEEGzDU%5tU1;lPVK7gUzAnS zEkk>Io5DVR>j_r|UZ`}G2N2AQDkiVQi= zCnDh=17mPcLYI*{j2OSV5$ksFH$Rl~_QWsR=*EK1s_d&+@;vs5MC09CPteA;^LxZ2!C4hX>$j^ssK09HYxA zVk-*fd@`@YhVVZa#xXBQBT_j|XE8_W1iPWo7rpZOrrUGVauArPxeOwpOfG!HRs(PY z_ftudT{9yHsrU9hq#XsHq~G6V@uP{#E5z9z>N7 z(b@fPEtV5-HO)Y^f~KqWm@!xf zA&P2fPwCj#5oHd?aVgCsyAJBg({Efy@D`$+Bl)nq6`d~Q?7Be^UlS3`si<_H+T7Ab zjM=s$it+UJcJJUT86oXV8U6BuVXaoWnKC1J<(&{Zf-UH~J_THXt4%x~*ss{I$el$n*D)udys*^++%pJP@PCCGZY_1|I3)-GFtlv`z9; zgGv@be+zzAc|QI5+9_iTw7mxgYyE5c6e=Gd=aiq0tyhcVSfak%9U*pb(r2;kGDcay zTeO+QF-EqT^H57yp4tFVh~``Pt?!CyNivPy$Dx*mkgySC=JKPg&FR~V_s_4q>3D8l z`$NrH$l`531c~+`!~6;ScB+))y*j-L3-f>_mAV(mf!YD!hYz2+^elDCJTDQ{;=g3y*5MAKFO6VQ_MO%evJuFx~YP= zRvS^4_`YW*W6klPR_-^Ax%|fY>gCE?6JN;gz8I0}#HN@Y%O^($`wT#z_?N==e9elk zMgMW@B~bjE_4_kDf<*V)`J>0!#trE)&GKL}ib_&-(fr)1V=$|<2@ovD!D;!xO%EmX zqs1LChTk_jGG}7Dp{9JG7fejt1p4crL$ot2i}R1UrLAbgZ}NSGX$+MmhwoIpa`<}6 zWl?qf{e})3Z{velcIiz9TW~H*`iEFP6dhZE!0Oi^iwD^YmhK{rbCwHPXT7T+jcQ}B z^j;YJz4q(-SnNClIsb=x#!G6azNO!aozn*usu2 z37AB`d39qeCv5A@(4>r=mziW-^4LVhZ+RL1j@Xs)09hBdK;_`i(tqd!Rrk`SmG`%)Vi;wrsK;v?yFV3;Gd>bl?09|W>+^rMtwUzVm<=-+duuu~;YC+E-D1iHt z=p`13GU-8|hImYKQn~$?M}M1Hte;^h=$yjdy*K8Ny_L_E9dof+u**1R2qvDitA)7+ zUvq`oTmEKXepkVX??<-^+k<_D`aB{sNuhI4F?fS_Tun50OVRkYnECYL(|dI;IKPj+ zqPr+B#4RT(rYa4$UR$^}S=!E;^7azx*PSh6Uh$b6D~-pnSsu-kL`&o=AvHzKQre+V zWH7u1gJA-@*wOP|4dKTIV0z3f6oqG8#&1}~zGt_VYF;?w^us594?OJFXf8zXY~~#n#2;8)Hhfm^zqH`AiR9u z-n3RTYDY^cV^R67RcfwiH0tJt_i{x zY!b<2$%sKWQ_aY-NcYg6tGOoF#qy>=%*#+JC`0U%n7etVQZCI-MB$xx`6*Z={I}gc zOyQpFs`yU+xGgJ&;B}klUct0=ai`o-elf`PZ|vv#OriYpxm5Zm0EK%Z{fq~7O|<1K zSI%ueS_(#%T8;Ar0#zt=vop;={-K6eogS)ir_O$0_Vre>aF@52Ch zvCchFPh-v8b;r%@xv7-WBeWaK5J@h^lPKS;P_K@gM0bU`MwA{|I`;n966Ht~i8a@a zG%M@7Y!fg?2|hTNj2kHDg*A0fK_P~;T^;%B40xNPgpXtGVaX@f#pW)E)59yh=0Vch zTQp4R`QBJ^CV^D-cx5(hH$SE#0c+o1;Wo&F9LZoMS-m60Oeq|>p3Ahjdp-tZdRv{G zz?!R)e#U7wkDheuJ@?hex>-c^J6-AI3viZd^4JfSH)0;vf0_@(iE$kEHBmh=uBpW* zmpY7!q9xh15s&7oR45XTcRBPFj+9)%DWmN1lIXtsd)Nz$-Ue3ata=%4Q*u3tTxrrt zt4x#%!BnW37EhiiLU@OgBHG>ltR~559m_E-Nv0glVKu_2Mr+c8LZ2sv$6O^GZx-uP z0NPWi-cHj7?Uzomd92}@kBfc<^YIS*botV&V~yN{NtO^A} zD!#h$bLFbzujuBW=SycLgTAn}D3AHb2*<^bO?+#Ryq?K3l`eQOEs_>$2`!4__nx#9 zj?!}^6`ZCvylav!PS2^%s>&v25C>;Pf3p{}n?JPNVP8&k*O66aI#2NAHX7D%mU(DS zh1nvwB&Anrx}I@15K$E+vQf+CR`NjoU?`M7&gQg`&TI4T*m)XLsZb28!i zna^Npn?8$m7+!l2I!*XttnU%^`HH0nk@Ixi+glSK|9j3p%fft-*X{fun*|) zl6-sqyo?o>v11hua4U*BmFXJ9gLk?9T^M;1aucO zpWF*>jA`gwybmtAkdms0r{@I=c9UWW5Cm|p1H*P=E`#zG=R0# z;*XACkcPF2vtzC(VGU2DnR#!_pr0AIyZskQ-;kdiw#|e*v-7d;v170DFBEXHb=AuJ zhQc%)%DTVP^af0ZE?s1Lv%)WTd7?we^e_Zr46?%sKXMM>qRsfa8PXs3hPRq8V)GuJ z?Ms$#$^Q-k2$EP0^}~c%28RZSRG&J}19xt(0tlJk`W`fF?I3wT5z*!uW9~R+LbNEp zs6;)m1rq;Lm|OeVpY*M9_3YQFl8q3`wDv!BqE{g`Y!bdNf_?%wg0Q49liKuRu;s{hmGnY!te^0%h z>wSJUPgPRqTj3nt83#?G69j4B-66K-UoV^nYh=6BvmBk^H;<*dvrV?3NDJJ8Q&M!X zJz8amP*j;};Q1e5TG@6KSIU0>e6#f$Vxs(SD4+j0O?6~NuEC(Pzp89FHml3uAcab{ z>lu6X+GXf1p+TAJOm2apRAz`>yS3J^qkUDbPue3<}I_0ExM-9ZYBm5#Lsva zAMCs1*iwF7Q1lUtcg&UM#h2GV_CL-Jnnh}=7Fl~=)~)`-uWUQL;BZ!yOa3IDj8)n> zlU*^OVh*s4xz9Ow^om*nJrUReQYMkkKwHOJDB6A^;|g{a8R28>C$FTk2K)*zlr*)U z9X-y;;evfhfnNty=64Cd3Xg3^ChI-|L!m!ttiws-7KEKk(9`3ej*%2k`{!!>n1?0E>dRwZoY?W7BYQIW3_kWYJ z?`}kxxo@SMew2Ot4Xw-0HtY^#?mJl+jfby%A*y`>hD!}q?qkf9U;o~xwMfbzi`uY zJB~~@HLp4A_s1@vnhHbE{kK8S5iFC0(Y(^ig!gxn=X_y=RRv%zXAzJSqIu5-VA6q- zP4!{a1Ms>Nc@M4((ozr@2?KT0|FdXzhSUAE5j4-s?eGA0x(vhMNCo{D)rx)*6Q z3%F;A%U{?{TRqhK`TxBu2}g{P_-kvRC+v_|yS=-$;5^zGx$F1xGfY3< z&%WEZi!ZQb$kFwX=k!ua>D?2tm}mFYD`b8@bYB=08dww`dy%c`lWygNH^NT*72DF2 z^e~(XdrXSg4q)EZa$aCh9~SAqU>#can9!0iNs|52Q)D;*$v{ADB*45CFdE~1G`NM> zTRduUe^&WL4aPhqU5AzX>Vkv&N-UK69_w*gd?7~{!Uma#0v4oXy*$C;6iVOpp6zK< zfZ0~_@Z9V6cMB~n?eo;huiIyGjr8&lHYn>p;g6Zj_s4p&3S^T+U!>^e%e+d_ z13}e0(k~5VINnp9mp_&#AIo4bNx-qWtE3l1){}zyKD~9>vL0Hy!bkC0hL3mzRit z-0ScWn#H~poItVI-XvS9a=n{4=QB>qRX@}&Y4PD(igJIJ%aYA3u}e3s*Q{ePp3U;s+5REv0s+l+NY(p_(s+k!S1XWlcI3j?1OiLBK#q;O z{4czu`neH~Y zG}aRMfyY4=%DBd}k5Lm^RYp{9SHzZI3|BBR9*>TU2x}C(Y4|5Abd({S|2bV)6FRz3 z@0<~B)6{cyg=W{NA6H8KX~tPT2`K7bDR;OVFZxj5{m7Wyr^ZJYTwjLkE8b)oYY1Ob zJR#Koc;}K2M~>*V(GW_qOzPN`Q4age63Qcg$anC0@zbX_W8UT+pV8V(c?&MtWixAc zO3qcGLYTpCO3tme!i#}4C3j0FGHH~J$AMcjjTHK@!ASq ze_^iyPYk;qiSsd!{1ry7WPZy`PyX=IBL*bQ0z5=RtAWg*ePHz)4k=e6TF9e10MFUr z5D*NXpJhyd5mC0IIs_)Lyx@;VP9XRltTtypJI_QPIN;elMuxzI4Adulx+wjH z!U%jU!m50C_B2!)tpWf4MjK~`x#8>yH$uo2`;xJD$6c8AAEN1wM-eE`cds`wTl|^H zBsiaYBJ7Z9=K7wT@?cZc2g9dfJ=X9;aV`W6!gSO65(T#z)SI9p(;~8llp)~O{_6`+V4_+<>LVGYpDt-Xdg3x+ zUitP$Q#A9|Uw7QYR8oDpOJ8x;&o?d>+ZSPlS;PO!-KM}sZ#niAd%_;~0GsplU;72g zj!9!AJ%2yoeA`wFFl5;cEHdD~1d)ito{;lUC*ktz?~;pQ|B9x$C8jd}!=34@k~2xsrCDm9d&}rIrmx z`YPP4g9A?+6%Ob9`Mn3z5U_iDJ(@EBmTm#;I#xmr-;R6#+o7-)03S~%j0<|Nys&IQ z8cwjv1T>{%E}|(%r+hG5l080dxta<_AfgF?h#MOiF-&$>9=f&J24L$!CLuC$b<_s zDDHRm;If~hZxC`8UC0;>xRK{H`=95;MO9vVf2gjNXZ0NbM*r`UNbhSZ(|w!csq;V; zJTPhE<9$TmPCW7L9AsAR^nBXt1g|$vKhL1kZh<;iCCoYecH*p;;9x9lC7S)MM1i5Q z>B?0JO1xa4$q3GQZ^IhdLKOOhDvy~Yls-;Wn8(j~5f-l+dM5(t>%>%X=t@ZYE{_vV zYC@MKFv=|2$o|`l9B>a8YfdB95xQ?3cm@hAm25`ai1w*N|6NjEeccB80Xn(-XkN7q zNCNv|qHG3A9Wgl`Lq47K6qG(>6XRbd#NSN1+&8-w2~dgA|Gj2w60g=aHoN~*t^azQ z(tmqRg=j}cuU`xlsYsmc0ee?wVY6vNFOw(2-LH2bT}5Iakiu~(pE9JY?+IPmtV5TF zF5W^KBlqFkh#%PBf<#;bVEHf`RuRJSRX}(~$hvGGxrd*=U7=M^7qOMzTKnyO_$aM5A;%1wId5sHIOKgk$FfTB zVRC6Wi%2fNqcw7uENXcQeb1*q;5euvXWh@$OcH<-WJHrjU#3UN05httHJ5{zYpsq zZSu*@fFJ7`1S?44{TDdE=&Atd<0}o}CImtccj*Q;8fmGR!9@d>zC>{P;dYJpV-V>U zb?(uw^`7HpM&|8l@h8qsDk(ww!-)(e=TItXX2!bgs6@bR`D*||-zxHp@kan50$Mok$Y_9Lhq~g%4R&%7BL%JFk zi%F)^|FdQ;GJQ zXZ12v$E}UnQJ}xMf`ak+nzU9dQY#``G*|(`2v@M+e}1uY90?)7d01Z6PCab_;c*@` z=i3eY$j}pDi7VNcp(1*$NQ+4-u&4`)T%`}C(bYyQ{_wxJ8Ajg{!GXsD7zLv)_4;jk`LXT^c$?QwbNhWrfU4oa z2$j}Qu@^(jJm`V8FB8Em3|2#fp1 zqUAcw4dx}JAl03SFk$Vb^MIT;(u14({z!*C`}Ka@!;_Y2&?mV97H>a=S}2?UGF0@( z*w3`@KnKhB^?3rTm=$qo5_&Nn(6IZoB0Ef40~_V*y#T_T$y=bBH1-LeHRa$xqXsCt zO2F{ZNu~pEkv}sJ0fi}Thoa}ZMJK|}fN~rmc_446U;OQjOE$ZV>$L}-$5lqHp+T0d zMwt2XSg$5>3k;(l{>*>5U3wJ?m;MCId4%5Z|L%i0C!_%r*+c-(H2_WtS;-}ctec1K zo9P)aK#qFyztRuSkQheIylEQRUM`v&{VQ~?7vj2-+P-Usn3F8W4sL-Fq2_goN~<@rV}<^`oYLO*ms)VhlxT>w&Tb#?2}MiOn>0cjA<*=4~XE6lk@zWca8bKLhZY zHx!hAoirXtf93<rdEB zE1DY%o(cW9a%Xd$;WfC8wrK#Fc?DsuSAiKvhPht`~D? z0^$B3M6nMz1_~91?%>s5-z5f59>li;CPJ@2+{3GV>6tlfz?pq);X??*A^~KrMK3Db zcan7-padWpCFfk3{0WAEdR~FLJd=s%emj z!VQ%P|FNc!RA5wR*%dl=(vSst_nO#N!tJXe@j@6(M4SrZ#tE#bC>(6P)xRELtv2ds z`>H%0m2POvmH%T%Av=lep77OlHNs!mm?v{>C@J`4>M{8jeoiiqG%WG?!u{lZUetA% zt#B&%HD-+8c2jm@Af_OUf*HV0b7PRz#W5q46J$EXihTj7l)Tv(GvH^?Z;jXj$PBh@ zqCVuY(mwV(GTDUF12WWgrq&?sGA~;wVnE#B;4b=SG>~(GyS;#Y@W` z%e@L54C9B&JaE79|L)hGD%Nln4&J`yK4ZCjJr<_gISo_VTCYL+vSR&T+{={hspKwC zj>JVXi$GzNhuCb=oLLT@ybpTH2onXx8Uy(up$O4TtQ>HmI&7mj_x5C<=~1O&o_nwkpb^+Ow>onB^Ee+h7T&;V3&6OMg0kaJh*xZTLc^>F!Y|8G3JSE=V8z{P6p&n z3UPacBS>RP{}k%UAkezCRSKHd_T2q zOuJ*KiOa2lE3)LMbytjgTh4QmN|A*)NadXA4L%A!S~C%ThPLiOvLqCxI_Yb$rI) zJbD7OO`vGZBVs?mF_tko?|Fb&;VJLz+oqKI(LV?Q$ZZrvU{=dDqE6>7?f?zsFNvYs z4+w^x{vP^Yh=-O5DL~HXQVUG|TptV&W#YygDPk6++<$kL1MBC|7II#{ro-;vJ$<=Z zDY&^kTWM(E)<+@uU>gvo0$BFo@W;%TK+K;6Djm|x@O0Jw>X`4~&o)xL|5@JmYHybZ zQk1k-QTR?BSP-OMM-Re3zB~%vnPfoj7Fwp9A?>2;IO1VoiB@^(KBenD+DMtL_zMy| zZv^#&o3C|=9|54st9HN&DIiYjIz7*%y^`3583uWM|!+nD=`A! zDJXku9r8*#lV<2}PXkkJav>W$+Ljv_06cXEN3T%6{Eg?gAP1=emsa^-j2b_XL#khG z*3bj1T|)|-L@^E-*LP}G+YoUg>%_k2zuN>F`_7rL5g==5_ee%w$vEQ_a7qko ziJ~PX2ox)a$`BWCTl&s)7dS(>Gykk{DVtoIfWd~3;b@3khAT}5iD=I|2RvY`$0i#i z5`xz_e zsu7Y`DPqG&)VZhYadh%ti@j8(c8pl%dbZQs&5-y6$_dOqLY?YVtY&8A(rv|hb|D6TI9@5l zB}VrM17&S$$k%owBB<~7c z11JNRVtkjYC#7;{?dOizRUn8fVpVKJ`qlIlR#WO8Ahd_|48&w1TwOEPOTz!?E$>-@ zoFY*HDNIsMLDu34OMN{zq#7NC^MP}9vriRs4_h8;P(-splkCr-CMW`y1DNAC5Rf3Z z&rOG(hsmDG@H1!L5=;Rd4GXa`U#Gx9JY5|nuUCQkH85SYk=DRt>1yNQw|WDKDB^Lf zdQv2gLJ$F>l{zj2V6L)s@mjKpFZ1z)NCMk-ow4~h#Jx=;tG0KGIzNQT7&c=_EOXH> z;!#FcBaaI|EAg43Wg9#hzf+GL$HdHObj$-~5+SxPKw~BvE4# z@`NnZg;W%egYh_plmY3&2nPy<3ad92`5TE_V@>2MB4A#WnL*m|bngTi1Jy@Gj1&I4x^668&eb!sjj~RzCys88T33*Ch4}&l)oa zzVs=>n@i77mH#CUR#>Vdo-ymHxLem6TRz9*lub`#u{cfL52qAc64)R0fz6FzE+(!4 z48=zc-mR0TgE(~V8U_Dd(kTjBr13gr!SVN;rZ7|d84W(FJN5mlmIMZ@>L>+M0~v$X zN0pOQd%C<24-~b1iyJ!^gRP2C8x0V39<3* zCBaU6aT8!U$S49}S50R?R7WW3gd+U0<77!tJo+A|xMQ_^Z!=Q$g9ue(22R=L=ph=B z4dalU4yvK+rK(*UHk^|1v3_Nwp93)EIANNw%^^=>n}Y$&fNZj&3!i(uk^qrJbam=sE1wBZaBO1>(k5YafRE*&K`1tSU7+C zD5om|s6!BotrI5hCt^-00n%q>ZaDBEO&1&tzOu&h9wqMs6a={ro*N-5b_wC9p0y|+ z^fBKExNl?xNk+$t0!5xC3SIx_(7j2jrAa6iKCV>f)1Eh@3{Gx4vP#%Eo2=td=2}DI zf*B*3B2}MkVJX&B1|v6yy=7ImuF~r#6>wWGBdGdb7W7Sd`9exVLKDvC_%ap#aRdOY z*6#iI^pmH>%jKr~_#8yOzc|LT%zXC(MCOM)?zmp>BX_wE+=6DL;|hGd)v=5@Ur1`Q z&NxSZ0A6oDRC@tJIogZ$^t{JIr8=)Xn4qYYUtc1fch$M?bDD^nIF{*=>Mv|shT|cN zzCd_Cf~AksAZc?m>ATWstNjUfvvgMV6uhU%d#wJ+Dg%u?6<>1&c zpKpE~QZH>H_E3FxZ`an9wQZyh;>tNJCcxwjCD?dpvh~C|+UbBb4_a7I`#TqA7)VR* z2z7p)uax|BAStmLkfXP~l+?y+=kh6or=$fqD}X_mkEhycBV$`&Ft`=wX%%&-*)3P^ zW=_bv0!ov$dWANhaZ~IKx(Oo=QaW~h8F~ba;^ws6N%4-KBx1-pipbP4ty>|sqMhuN zP$21hAiS%EXWg%ze&(-Kb@MUki>rJ?W9ikQo`KqL=|%yqyQtPu%J2HaaZ%4&W`>}; zwIzH?(>@rXd6P4LvHrV$=M79$+`gl-!c(Yzdm$>7(}2*=m^lm>rCKWD(ad6pa+(^+ zhlhN!L}k^lCzLy(&?px5g|Kt+ZDjiF+ErFd$lyP3U{oszg|XBv0Z#9Ku!$*3a*e)^ z5u-T)(BYy!cSFSSb$yHlkd3i;Ufpx5Jv6x}dX7e>JMIlnn}a^GHT{-VC8Li?Arx?5 zTB>Gw@;rMZHfU#)w>uhLj;|0#pP?WKdZ0}gwIRohE?LK8+!_-kZfjJi?0ueI&^GRm zmbl}lVZ+#$M8S`xj5-|3+j-ESFY%V{xG=z$cbGMO=0$^i;;sfPHSPMgxpzIdQ}h5} z^u!sIKzj{pD%peekt-E8JvBVS^_1(Q_Zu3ZWH@QkP914J;$y8h?vZdRi`m^eDEY5_ z)h6!pyU398gS5JTuR10Uh<5IG3!I3Hti`q;sp$1(9+}bU8$Y$d=j%OEK4vmGtdlP3 z2FYwJK)wvfc`uSdSY$g8^>@hl&t!_1=3mkXNm2f1+YCflL8B&%Sb-aznlZ98J z=iX7O94Dh7V~lBNtG|!tEMyO@K(TKWe#hHT+Lu*%{hlR{f?PT4(q&?s7@pj|&RBWb zg{E8Znk*;r?u%lo7yHGVr||(dqQ@~G7kd5BsS%LXo>=*2aG94u+u)p1Vie6`KD2~( zYUE2xaTO(QDieKKtSRa`l(LKq&>7Yxd@#1K)f}{NyTQFH7G8Oepl>O9injkOO91MC zp-TL9YWX+Q@d&@@@c{ka(N27gFxmwju5rFqBHmArFtS9$L$w4za8IZ%MdDqQPVAlI z;b*bQz7~%TM@zz)sC4fq8octUq)@kKqF5?j+4jS-=9Bg_XMXV0>-)SlFxzW+uC&O1%ls;tf2Eyuvk`&J{h2Ra5^}9Pi*?tCqa*#=#Fz*TJDc$FW9CwiuD_L@emhk^dN^us$ z+aY(jXQbUi3&LjrNl!zsW{4j|&{>Qo8B+&KOVI-Un`lulVXdA>RT|PJ&3QGy@-!{U zv)r+7ZHwot`5CC%;Gk0+G4udUw9gKv=DL3)aGCNCP06%!an*6pFR@3=q@LrIGfA{=H$Ui4Cj^&i$)CnVlDrDC) z=?2a@IKO^R(mQ#klbd5@bfIZDCQSdHtzH#@Iqtr!Orlw-F`j2MYmD#gA^k=9r(-cJ znMXTYA^dMTlF9S&%)F#a*|Cx{BZ)RNJ+dc94|6}XD-;Mg;f6)7nZl_(>dGUBCBGR~1vvdiXsf2!B} z^L}4`zkl9;ye^k_p6B^|Jns8`z2%FS?{}+=3!gD`^&Zo*Kcx5jkxk=dwvN1PP^^Y< zp?$!rmT)1@2^Q1rN6LMbS9MiM+J&)-Y|xfJ@0T=jK5yRlK$G&8Y`Ig*EHdpCCtwpR z<~-2b+`I#Qx!1pG z7gnuRq{vLzLeERpIIvqzX7t4E8Lr8qved*1jmG-pe_z!J#!&AGHXY=9ta+11l;M1Y zwO+SU{h1@Lw+pXJd@wne;;}n(Kk1>q@}+8b8(TUqx$6r34LS6M`cihPL;egKXHJ<) zNxPRiU?1{cFe(>j4e_h5R8dC@-bOn$&1T#8sz^ix*B&MX+6v5sq)2^4w`+dCktmR5 z-J%pGdme40K}Pc9q{{OJhO7=nvGXDP5kx^2#hsRgf8=DyUKmM8d@8&0BjnE5NPWck3H9TpGuC$@3(?ifA!I&UIGgX8BY%_EK3S{!S$J%W5uOigj%n%h?JT252+g~k z`F;ed5Yy3rOI{;YQ_55Qwx$Dui!|>`R0Z`I@n@W zrSGUuml$D(bf-g3@}72ISW%*+jI6m+nn*3XJ#@&aT;4HH0SX>oNWZR0!|a$$)+6hb zP;}1mO$%r|ek`^53NgudkX z616~;CyS@vc7P65kUgI>Tl{(_j-}_eOa$w%4v%ET{=}>mK@+KMSNVq>dTNtYK`jla z&DnEDvP4a&7+V{$*=Fv9k>lNdYU*5tEDL-+U&2!U^_3BkwbI0Xig=p5>A zq&BD~TU;j#le%CM;*esG)T!NN7R^tgYN35F>!UEZMD(R zdO3^}hIQ_0)T;cfyZhKZkT$ObeN8JKJ#jd=;8E)Qr4VS{+&p|lMw{}8%x?aQjsVo} z)J5-XwkX8`=u=OvniuQ>Cv(%kzqv{^dqWR9kx65> z&N6(chkxYN$qdw1SF&YB3x>K!t*ftlrn?Sr9WD`9Fft#o+kL8aNZ`gfQNx=(Dc?YDWpwkkc+6*wUbDY*R11P8CnBwz>)-&hM|-=!G1B&wgLL^hEBq~hpDwBzQ_^C5=?E-*BOGL58(ZI z&HqdhqcL8-kx#*3EIfr?++A0rO*-3GaO;)rs*K7R>{*KGmL-o`G|sQxcHqH8u5(+H zUc6Qfw1$}M$od{jL|LT-xsMS9T!~o$5?LNEUQit#MTcrE1kt|jdXT0`?x@0uggqlY;;EK3^Y*qiaf2wq)TJOy;`+||DXP|^ zxFLp7h*c{ZcS9Rz4M|tb4VLH2zogvqD&i^+o{c4L7CcPUy5hGwGGL=D=thExXM$1+tOr7`c3&RS2Q zPUJYKL?O#6x%v_IRp;X(Fe5E#0m{W@GUGZOk{=AweP+Uy$;}c;GZ7y=ABG#Kxp(2& zaqsOI1bkI@nln$7-m}FpYaM1iiQcXb2LJtR$gc4d^t@E3*x8D@-)1L!#Exrn^S^Jf z7^p}$UY7}QI?{!!Na zjf;1`*T-w#wQswNp7*DHQ0qk)Bb(RnUw>vutEJ5Ccf$O!L_o-jUSd+5qoF7&a*SBl zxDeeQgF0rQ(H0Qn*uWksA<-t%r|#F2B^et`ci3PxpeFec8g5^<13yRVfUIz2)S~d{lCO&-=Bre! z6k25#N84T&PVIZ-oO*rNYNW!xJq)kTe0Kd!#@)odyLMquC$w8x=i_#VpV?@rf3Q(X zjPq6Kd$e@w%ILq|wlVZQSyCrkln3TmtOgX(iN~7`rg^{bKD(~Kbf)&Ac_2a7nI;!f z{Ee7kQHCHFn`mB@yudsWF=b(N2jR}5d*Yy{`txL+my6UDilVq{L=t4!Uv;AuufFFw zDi+^FNEH;dmT@E`bTpCWljiEZIG4-aCQfUM=e=+Tr+Hk9L;0!LT=ohz8?_kcqRI1| zcD=V$kwUkrWepfA2$USBWlleP7NeXHlIBTKfPdkh!^Y&pDz#KdBY&}tBaV|cNX(NV z!_4$W#*#8GM;fK(SIh3Gm|-i{e<(!lGzjUTT_M#Xaa?cBZA0o^$*epW5IOF8{< zTVV}eNrC|v)T=|{1tU?$b4toWgdN5m+ctC_jo;^}FN^h40f9X$3MWaYg$jop$yuUb zy|9?eA5%NsxlofZSbfTHUHoRJ-&&urL{dd+?~tQZdsJnVV|fC1qHM53>`3TV?&qT? zRmbZMrpApv`oa@t6-TW=2<&?!lGjV6U?J5 zQF68L`gH0TX`2L!T#Yj9kcN^vNs2VC=bSUSp=fQDyqolgw5jIhw(PJR9T_H{c)iZT z8*{IQxRTUbP%Yfj@_jQxtP}-ors!W98f1;tQEWDq%!eoj<6>@^#Sa`AWN~vYbXuFD;W%*V~Sfw|U1)-ugK&aMZU;CrpcX_ZK`9uPr9- zc9Queep%^3VHInc|WmozbQ-;^JcHx>`=$0E5_u92 zm(`1vzB)Q;@2>txv+pwweXe142!CTnIG_V@b+(E1xCa`95a-%zvhB2l`@abllC)cwj4}AjpPZs%JhzNN)>2^r9*IbPK$&N438c!yB^}Ue3j!BQ*h%k0ocP18+dmm8i`g zr^wTUBNCLvK4flZ_l+t)Uq3-OXU3399u{N1%EGh@sn>eN1;#=h%0WEyDKYDsviO5I zp7;{V)A2WZqOTpHrSjmuI}=(Fw!rt0m&Y>*+Wl8TdgeGhL@n{lmq*f8`E04}!j@9I zoZnR2#k8{9h0pL68krf{$rto?qj86XcDS^NALu6(Rssa0mM$wBR^K`IK$!uS;z7Pp+Pyq`RQ4YnmR8^2arXuQw>%Idz(@ zksVX+aqRNQ`X{n?*yux}X2R~F8)3N)T9s?+~XOIKlcmF^v9 zwk*0=FfpcWMdoAXfvsTC`$0O5OP{wA#tbj4;e6vheu3M%mZXs6ksqaQYSw_0W%lyf z%=TqjaJpO`RWjky(5c;SMt#3stUvCDD>05W*1J@7{ZwKN&r-$)X?xFh_*yZQ^fsHM zV3zhp%YUmbHig~%DOBuf3jGQ!(MH&cC=^adWOP%H@tpp=fXSksmx9 zC-#LcP8Z7}?e{$Np-*%pS=Kc)>tPqSLK+&HMBa~0JS~O%>p`rg+g$B&Z`ET(Yf}{H z1SiG3Ych(wr6SPND=?#Vi^v;Ap*&jso0ER@gJ2oj=jgQ@p_k{*Zhk&!*Ht@65T+|* zp%GzK5<6`+Vq+~KFK2Q&D^z_92T@cbP4e#asD?XLLP3GH*7HrZPT_g8%H7t?@IJDX zF524}cj=m`;#3m{RqRmcQMPSWOFyv-vrIj?9vwB~k?dptsM{YbdSTPWCxu&kq?zU} zZass)uO8YVQLjtgx_F9#n!jKO#W-j|^{Vk+b6v+md(&g~pBjD9TfM(oXcReLX@+}p zp)Yh6lO*H3($A+#?p zDonEBwMo{2Uj1X}?)4OWYPLJQ$-waFVAmn{v%=2!3$hVXE7cX_3rdUKXqiS=074GFgArh26UO^)I0MW`x5O_JJO z>$sNjT0btyX^lr!GH1I6{XiyHH@}bD2CHLc$A7+#Rw;O@qks}yq}b+5mTXje&n@fy zQtAEC$Sa4YEGGhG;#0?iOg<>*t)bLOwx}b+n_!NE$XmK#(*Nu?k@M$W3Dz1*?8r(z zzhdN#ImJb}HqwP-y86)v@b|QfWP)4?5t=@Wt}i#FdO`)wA~Dp?-%O3uXVJO^`0xnk zA5}(0cE`KSssfU{3rfDT8M~zt6LL4IU-{Gja|Uk%tle$<$ocm)`^AKGbgf^Oy)+5_ z)Ll)VMzVMr6}{R_vsAY#c_d;o8+!IO<}guGj52dlD(h-L`>jhA=tjzfLr2q$Sf5l( zJmQD6ZX#1+97iM2f89LMCsF!DkE^wdqgr$$Re`_>nO>EwbufGq5fvi*%*GO~D*@;a zS$fi_Gx63pMXbH`pg@wSR&ug;~MHnCDB+-ddLaQnP4cvmNTBpp&ughf`w2#8IYxkqCTJJFlD zRGf~lyHT?%)+Et#>AM_jiP$FpL%a*F!kJV}xX@jE$SWot*Ew%Ee>ATgZPc`2%Pbvs z!0q$;FsY3xl~$%Z$EeQs$%Vo7V5z0s(+_t9y=3#QZXdN%>2?3^v{d$O!&!@elm=Uu ziJ~?h2_D2U$IC7)dAfb^WytTYW}*?Ujbr$_ais;F7&(2gS135YZSQi}EhUFh*@||L zlW(`*<<+6*Vy~An50fnTj;{PxnI4TBdw!K<#bHW_lZhbG%bZ6gug9~R`POmGRm|Xk zm%^0j@6!8{+}7MBCa64-E}@t_{hQ_4yjjEt4gQvrhluR9ZL?;=k`Xe!{ic?i%5OGK z>@?PIl<%mHy;Pr6PIH&x)fWw!R&cS6dZ3~)t}T0}wII@;w|7S3Y}ho%0^8Kb&i>M+ zH_FZa6kc3d`KX&R)pQq%aWk(>QF0VbhX#|qtOJYACUkj76c?2@lg|1aU9TIumVY8O zAyPt?!|Qw;dcw@lcWB6(qyJpA`XSpCO`E>KDQsIq^MG0H7xjq`m;iOK-Ts03X!v3_ z)GE4E4oz%>$IE^;FAcjzk!vRPd9|(MM{y)~S`UQd*slwRcakMj z9cOYB46HyE|2f|?;_>P}^b#ZvsCT{+46PIn2km6jJT`^Rxh@HQLO<>fZNqXWH5t8V zdE}r;=Tt#*Dn6FilW*AejwUrq9sSOX-|Hzg-^0w%<|3t4Qv-726FJAe@ZJ4To{<+y zv_q+Vm&4-i3z_<8(LaX|wY{`&dzTtILqAWmiPLm&DJ5%rsrkh0Cufx%uPssbcbAqC z|Bb8%E%$g8adEcsZW)$`P&n;U`yxBBVfi!66M;*U&Ckx}GYK>oj$swsShxa9urk(? zJ!UPT&2_!Mu9ApIYTEN*wIqTiui<))v6Nc@qW4)S&?}3y|M)EBRN+zmrynj)Ro(E* zSSgztZPj*gYGQCB_!fLrnW#Ypx2?nwA9z@K(PxK^nYb)%j}O0B!LySTBlM+lF3Lm()65!Uod}W)f3}r_d78hsZ8<&5rRAR6{=+FZt4qXR z%+r@AGw1cp^WAuG;NTQ_g>hbkB)6J<>tfGyn)c(tX`kJ@)cj8@=8Vf_YP2-iU~s1l zCA=@&>xtU5(-P*S^=@!9?>O~w`5c{n1@P5d#U}!^o`>Mv5@nflGkX`~ua0taN7zvK zOqos8en?DN8LyQ;gOlHEJwkX{N1vJ2hT;(<@FcUPi%Ql$6vQNdK73HwndZo^!~SO$ zFLN?2uqa)8(%86``paKiv0Xj$Zi1uAPWKi+^Sa4LuL*S8jox45W=#r`Yq1a3D?7O6 zJrW}qmh;|;CtUGpPfa*9`sAeH)LJI-`EJ4jjr9&roB82Wnxx?t!5nr5qvnd%n^kL) zL<3~ikTONfZ-Ot!%rY?kX+@ZkJ)78KR;KiZ;5QSGxAJyXi-obX52zwtX^`{InXzCgHpL-Ob3fKz&FhtMSw zuI^S3HtTORE2RX`Eq$K1i&yPz;ZV+!#2e9w1fE=>onYr49eiPbQ}dCkgHx!OD6S@- z{Ze&tvz^v_w5OiLL{?(O9Y;orhl}jG+%`DUrB)2+wxC5gX5WP)RaW_H%P8gclYmw=4R%7VQ}fC8Pg#Fr{`ah8>P zVrTq&<#P;&re66`zc1+E2o?HQM7M%_SG5+N zneTlF!E-Lv=GfVeb|{wKvpcXETf^yP{JZ<+GiTdDfjQ!bji?p-gtir>k0Z?er&tM_ zCr<`@I8@H~mg!V_9GLjsHNBCjnUBlt=#Z@n>MNG-o;aYHZeF?70udtN&Gzx_!z#qF zr8W3pi|XDR9J_JCvBb!3j?{#wi;8>$lfL1+^f&;?vZjc9tfV$M!&@5!1sBb?>)k@9mEm`qk^&I17%5ouE!* z70VshX{j*txDnBMM9Zls&5J6jleQn-Kqw5Spx~i3&0(XtSVj`0X-YEPJ7(^eoJu3k z8-Hf?=)~m75(i!~pkwTBEm0;w%9M zw8Y(;^jqliF63xbM#9`HKHOd7b!9*4eUUgtnvM461dsRfLf3H~Z^Pg9UzJcXZ^4z! zR_FN{dXBCtG@+)|#xVWKf{oOcsj6fBLlCr$I(NoDNKUL^jul3Wb$(0yVB2<~h2DfW zGlk{;zvSTrJEMQ zI)&raf0A-Ucd;;7Nt4XC4{38o6jbw&yp69WS7Y|MmZ|?KGi{t%(>p?TN~tSuUD^E7 ziHK>6k_-M-vHTFn<+&xRZ$=xp5lTf4N6y4O4%&&dT_8&&oHnasdG-7=3tAUxdDi_p&V2N~cocn8J2BZ?`F`olg9K^X zMKLauZ|o}P7HKi0nA^PV@oSvoB?c10Hp)$JyCfwylAOJO&dF~5j5~gA_0ZOo z64q(F^LxBq_lU zO`%}L>LC^?5gw^&B8k7;XkVr)Wp@Sdf8T@7?P0hO z{#s8F*KT){ zgYXE=D}~Ct7%AJt5*bd04ra1R^_A{bqh?*{iT}r`mHCAg8R|)r5woA%B9}o`$ZrExFh6(sW$bQd=CvK?!K|nblVtoQuKkq05IcS~ zZmsY*GlOv^L(Cjx;CZ=de(iE&P6m4kvghI$W|!2szT35xSVyx8r_0ycZfZzWj(_cK z`9k{!d&!zpM>eY+&B@`m&LL9Wp<&o^;MXa673D#B*8Y_74a3hEWp}(7p6!O9x;v+z zlg-=4(+O(aM5(A{v(wLJ=o6J1yy>vM)-XeUOqx0+DPvXNVZ3cd?~+XXf(@2-x@nBq z6vyjeeP)Q7Pks)4y!*&>V<9fM;y18Gw;o|=mpCuZop-_*1piW`EfbMVul~I)*xv1h z^BUh!v#U+kq7FM`9JY72SXMpZK_qeW+bh4b(`J5t+24UIib+LM4C!?5s#fJST%LR<3mFe+0IzB-^N8eUEW3>IcMj9wJ zSA|fD6GQ6Ok|=>;>C==;A&WVutM9e;RxOiFCuTNzMfq|hl62AdspOW(^I;WvOxSpP zM;rXc&kfWl+qU^lIkA$YUZ3Y`hQ+04U*ft1zuJZUqIG7RO5b>T5GT*H-%u1rWqz>M zdl5-WA$^?`#_qMrXzQ?1-n0DzyVcs(9d8*_tlx$fX_}gw5}ro;qDmsQLm1*1Lc?$~ zEHsjBnOqC+dA?El_c;v=KhsK4U!Ee=#TAQ2T`MGCv=ul@##jDr``ccpGxv1-lsp5w zEm09+FR@y`+IWH~|{=*&gh)$zz$ydDD z>GVQgjM*jD%^~VDq{JvmjUajx_9DeG@1)4;`olZ=rW6FyLWVrgjgMYbuY^dW7kIcN zUDfH&mhpxNY0?{O0tr%9C_M7DQVC89{I9JtPmxE$T=ja;X4;tkr!lmYCq?bC({}R%qLNa_^cR zZ)_~gHRBirNVTMtPs5D$nsk(6N*x3F=>alzHQ4zfCdJWGToHV=5jkvEDsY5Hl_YeB zK_R(I{%Ck{?Z+bRkN6{M{5;p+-hFV1uww62LyM)1UqeSNm9~FK=wmM%H+!!3 z2MtlyWjZFMUgAqJ4UURhgE8{-XW>HX8Sn>9Q%jpo9r`4TLuh)7hC)v1!K23UW7xM6 z9tUUMw1#FXTybWcpUu=YTw;M|Dtx;`06pX#TJ%o^>X z`S~Ai3J$rMY=PVT(fo3Jjp0-5xgm+Kc8yr(!s6vPGE{NIy$Do{jK``1fp14IgsD#5 zX3M@Q2l8?5NC zG@G&U7R*IyO}j)qVxS#EP#Nln`sgBmwM>ubM7#jP*9&U$LKp2dGajiZN@_B}c>evp zGTtuwOrfFAc+>WxcP#2f^qf!4%2OH9j>6xcQl^~v_cdwt+?736Vf5%?c^+UBjS==m zX1b;Sx7(v;@j?v@ga#vA?)LiuHU&`A*see}*4`<=n#Zq~@0)~!9w@S6>WnhEruDJR zEzJFc)c^T+8H}}e>}Vo6+-m#SGT;M<_&zdhEK0W}0^6kj$whHr5V(7d(2DUe#>`Oo z2w@`ZqvrE96GP7^=PkwdvlZI2EBTZf&T1Y#^I(;6_76gmG3*jK^JhBEbXWkD2mAmE zmXww>>uU0tw^`M<*{)ZC>_9hL%-5Bf`@Ec2l00DXBn)~GPt1_!~Kw4|%rK)d>7cF9_BTan=XiRpm z!aIt*3R?jPeu0XpXmCuPFJ^=Qq$+P@ipIimYG7r$Hwh7NNGbU9Lu`}3_`@0yvP10( zYe6dUSM0@q43@8HN|>*|bp84{V1SBuH}0b*@6$n`y6jU&@I^)zz-0lt9zYHc@t(a2 zyselYfz5~Is)4;JfvAXJqxPFZ*qCKlj;TTRRCb?tsN?w`TQ$_fqhJ5{?&zsoGNV}F z$)sEYMVib27+@L!1jXM!0ILaXu%3)Lw+rjTfWG<0Q&pM}V3P-m@DPU=g(#etKKvr8vM~Vg17l_CYprOEGxr#)^2` zL7!ZpeEmd77(m*G4gNniQk^*#jzJB8o7N+(fUNPgG2+mF=u|wZlFY`!C*g;$-OCv7 z%93UpL~w<^z}Zavuh+_pEx6ayjEthN1tYo#$TspHTq=WRsP>oJ!hn=9 ziU<+F;kzl#UpUEyW$VsWlJm#~DQi(g%c30wOK=@V|9-u3k>Ro{N)3z7pI?-Gfd!|( zYrevW{(&7A>Kp7>r{hy2VA@pGb+pz$=7$Av%GN$Rmm|U_|Ml+#0w`i0X*kj}AbaIN z$=KHvg15ySW)-p{n}lBmB^@AdsO{4P!3hYKtpMU+7BOM}{q+T?d^P8q;9b%$`}Rsbfn(1tI{}s`t;! zUoc-f8h8|M5Z3Hk<)!+-nwE62=$x;xjvZ>Xj-XE2?gGRJj$iA+iAkW@dBR>$q!M6G zwktYNDxd;^!+RpQBbdb82iE+3cfyj0-Ejq>lLhlZkojpi%{=@bQ4c``2H<_d$n%#M zPUrrxL8Q?=bfSH6LA32&I$cLmg#oT~-$-|@76$g_+jsBZ`r z3@;4B0Uj``nt=RM|8De`J0iRtAO!xsoHz!+wv-qZgy-u5vA-FiZ2ix#TLd$>zIvqq zFXc8`i{P~8Mr!`~HPhE@)eCyStxwG=Fn^CRWeQF+jr^7p3Ww3`48XYuXbZ6?e+ps4)M!J-7q^$LLs!odTdATA?bc?a|a5Fwn+ zABaBTX5_v7GYh-p?x!Cko<|n}@*O^^2)6gXthD?00g*L;Tuh>jS;fp4+ypD}|1!Zm zPN{B8rv2B~-v3&=#@c;WjcwsOA!JxzCIRU$%U=ht*;9(GD8>ZP|{@DFZ z(xPbKQ@w%J(qrHgcwxh93Sl-*_k+@UN&pa{|3Cj?-I^sAN;8FKxEt`7gi}-1d9YC8 z1B)<-dJ%GSH0`IlvdZ9-AygPw@WS)paT4TE3U3i|(+OZ^s@^Fxc@WDXVf`0@@b`R8 zAodsV%K#<=79-mUkTxGAG8`iXp6-D&jgA0rrf`eXxOGZu|qHyIp*^#n)%MvgcD*f8X zFHmPr0>WYc{`Rm}sY=jh^}cKro-_322LuaIz7ol1IRi@w`ySxmtJX49?Q!4v>+6?E zmu5abxg)Lz$UC6TRl~C7M&uZ*#r_x#TKoF<+Zki=1~6i+@cjDyNkLesUiG?GwgPH) zO-DhzA^#zp1!eG(q5eEie2%*nFnDIx@4q=r7W+b1%7*-iY0VN3|jS+|$Z|H%g z=)8tc|MPQzwYhTB$ZY+J&+yfHl|Pf`+vqhLmF2Ll5wc;+T zX}}hJfkzejZyQvw0WeM^)cOjS_(k=x=p1>ECS<5weES3_eXKzQ@F$8^$74JJLw4)F zqE{)h0n=N9ov-j;aTtVkrjT4zHzHK^-?ln3yp@w_4-tC{J9y|dETp$*=TOY%1DHU` z3cCcBX{p_R9qa@WeSRgh#_iN6ah(G5`wrdq7Q{@a*@A%!UjcAT`~UnlP&mwDGr_Ba zc!qyP$EhmUuixP$-)1nL2T%V$f13c?{T_%pvGfG~4FYZH9@IssL*v9TBpv#5T?352K20U``fj-^@ z(f!YPc$F=ij{rpSzkgD_9S~{=lWbaO2SI-K36z|c;Qg9*oDuX+=@xA6#Df}G+gGpN zQ!zJUvfY(1LJKQ_d&DAb^nL*VjGdb&QiWYmR@8e{k8soZZtJKr1L&iMQ8^U zwcL}Gm$l!07JxW=Z8py(<#cQrcgVKnXjiegAJD{tVkN2&9Wsl-vMWKi#8b?-;jp;^ z?`2o*qR zx}mBn?Q4C7k)UUGu{NuBXB!NFK$S+3d8NG$q{j@8&NtswU;PeP@IU!5gh*6H>cN2I zV*HT@IQl0PiEx*w_U{t&k?Xb$G42Q!IVJ>NR;>||IJIW)!-+@^Qt-1<0p7TH?=s~U zLOT44M6Rz&5M;cm_EAA8CbBZG9){ZP~~jp&2)&Y zx-^h>BOX%`wB@q~>ve*Hd&o*Qz`Ww+ybwYk&| z^RGjDVB6R2QVNW?mjMG{SegXronp#BNFEsfJf9T8t-^B;oI&(N*Gj23VAV0ODEv4h zgEl~?a@KP&v2Yz%oC?-!|Ke)>fAgN@YAJ($^K0;37$>Z87g(Yx@)r;>z6NMXV#Z0XiY?0-XSO<@7YbW3mI~`{l-~jSlOaEC76+ViD?P z^qC)-ogS%Dx&Ec`|XMo%sQ4{(PycXSu|cSgC-PV>s)i zd0jky?Y9E?Hysv%vuw)j=AmB4x&6k&NZDW{cf7Uf=)${gYvc9rPs%rcymyO7Oa2Xb zG1I3f<@as%BcL)k=?}};wb62;T)bw$j$Fi?%|M89R_7$Vr8Jn4(!s``OJUETiR1El6W*wdIBK6678aV=jk1z_*yfi@e{3vLiGk>6DLZnKHd1A#Ad-6EY1 z%L3{S16I+%TV)Zp&zgaTlug(6J-;cCDVbXfLc$HmasNo4!n*_YsoyJhQ>Wp+FRqMk z_upOi2jRvmAXl}`sd#=w!iC3{6TtxCJ#y zT(4oziI~JRE}{U^a1DmIlb$Nvs=>;CWL`1T)+$}wpDIieQXE-`K_WFxIplvjk#FO8c!ALi`5QXP9R1UPE9?mm7lX z_ToRRg5~;(NPL{(MFOp*I>5+$E?$F~TeRdeL};ZkPt!^LGv)Ve_frhof(AKgw^VCL za8zF7e)zyVZ7a7B83=aAUXz-L+&7&Q&Go zPWKy~>}(gMlp4rX#e*$iU;ZWriBweft!s{?-v9#YQK62EJYXyWWZ2(%o-(yhA!{Ha zz_k|AuvQ|tekPQd7zIE{(F>Q-F$0f*gH~l#`w#dx*ERSOE!qCa>NrCC<-QWmkU?E_ zpN@KP9l^vH{~$#@Uz{4mSbU?sCVn6CXwdfSP@$R1JkXc$G>;%)w)kK z`8_9_bWJ?xkE+1DV=W^>5V3Q!Q+($W7}YBvO}PU(|I3w^9=5@t+ByhnS_^4PVO9y| z@_xYJc^l5FaK(qxX4m(*%cw4MZKRS$AFj(=i1Y!B4&xGn$&1LSdgm(={HE-3lVuX@R)TwyR?1zsM1n+84TB$pTBNiX2If>^%kejtJ$W2+xu;;+&A=;$M`R9b9H-&r>bgoEV zeV4BMWj{MgKjhAXshrI5>RDRB}oeOmpVwrhBiW*>pd;#2A z!oVlR{Yf2x<9DSY#U}4=elR90e_EnL&v-KBJ#3pa67RBuE02|1v89ncH>3p*NB`t% z6DGImff#{^FND?W^&>(S%?RKbuF2F!AYA?Vlrvm9VsxELb%7hp|2`cXl&0)1R@k<- zj9debbnyd1Q$+_h6|#nDl(8i&$zsn+dnLMzU!jmDC z7HkW<5z(;^(lq93bT>Fv^DNlf;#e6lt7CdSw5T-GAL?fd7Mg13Z$TQA!`g|dJeZS& z6#H6~YoMT7p8=H-eAk)%Xz0ahQDNojCQXu!yN4evzoRqD1ijHAD?T#u<&)9-X$z(E#sU>VWcR!~$ z{>>oy0#uBxUZl4MLZNB7Xzp{Z|MNEpD)1&hc_TanQ$BA|3TYQ>6xiIlt?>wY#{B(2 zBmGSM&hN`e^F(k;8yXuOd{GjPAa)G(?wDSJsU=(vxF%vzm%<*|vs4&ea;*sT?6~Q=b(oAWc z62y~OhgA1#mXG?5Ow@zd`fpe5M1<64nFh;BF1BulMmhto5d5nogdInBkn5-Dr(`AH zx`7A*T{0e<0=kZ#*z$h3QRR}wpo7Sh?iuj+gM<&`U^Q5l(KL`R0Ba;K=QX9Ls#;mo zAI~mbdmlCa%o7rYt$}K&7QalQZ9_ePcqQg1f^dK%;kl2@hGI&KtibC4s4)HaA?~Lr z$%P4%6f<;kRVZPS@)!an-{bJVxA0~`WetkuZB}gTEf5=yAbN`B?Bx17AIZh$pbGkP zFHzg!rkmLTqz4h>9P;t}#SL%r{fN40LpT*u9Kn7c)K$;zb&oxEr@Z}~)qVuWg3U1U z<|SM%sjcKF`w6KG;L0anBVCcd0xldyT$zwGCRsjfZQR@L-CJ)hd>~v6zVRz0`^+Kd z?a|CNMbAVA=VFhQxkFK{a!;wGUhFfaN9(e@I7p`_y|M?kXplW<)48)Yde}MwrV!ub;=DiO!M2bR;>^#X5kfo5Mh>{z5j>e z?QajR#ZtLK;U6S{h%L?pyD>b-{^HT?YKFQFOQ>5WSMSR6J6!>@_qUqm6uD|J=~C6I zHiT#hY_2Voo06H6((E9hQ|B{BIr_DeJ?}x@y6d)I->3cw4Fnwv$No^>$NOglzjrV? z9aPB#Vq4)<{Hiar=mLkFD|CAhw7X`+=D`vQH_|6Jw^W|YV%LB-5-Vdf(@ zja7D5|5-nV(HO1^cop!yqWUw#IC!F+BfaX!@#u3}nat6m_tdY*6w* zNY8)FqxUUR?g}*Z({6o<>B152K;>M2a$b<-3zDTE{eis&gv3uH42thMl)@H{*GsMy zh$AlALlJu;xyW7$UgQ0~=|N0AU7MN4;$KvD$GX|`TZ0(Sjg^P(?CMb;{pk zAfc5n7QTIPUY*$&OqD<(nDvamVs67ASB}VuGezAr2{3lCqYv%F(2)llo%3(bDSFdr8kcp->~lx`};Q zYFVea`+EO&QohyshB)Yf-1}PZfL=NRpU?0-Q=dUHifU#_rwZAdRvlZ3!Wk!k})2$WI{51Q6+%b!W_bbMQo%)Hfc) zHbevDPoe2g2muN=kY?5(MG_Wcy!tHY8YY?j=?g^O2OArcZdu?(-SiG3)FAIy9mlnU zoD+4B_wWY<4!;V3EDhjh7X;Yjga>N z*bj~%x_=J6c|WIma(^KGG-xPFal0DaNJDZZ7z}7W!)Nqm63+kPEV)GKIrO>o;}!?6 zr?~4Q71wflng#fK-A92=e#@0hV zBkhd6guyRJvqbb_+1zFDR@=VUAVs(dHfJU7H6nz%P!2Y29$Gjz?_BZ8azj#Ow@J^3 zA4|>u+4F(WKaf|CT9C2e^wW4aGH(!HVVea;VP*6pr!zd<6?m<6HhG$4nllSf$3%o4 zkka$up*jK*eK{e%2jug$TtL)~Sl1ZRf?=W7Yh*Atvw5}dhCwuQ;IQP>Hs zW?-=sZh>uR>gii@cS?!mJaucJ4nn@snZyE4jU}iT-*F_lkddI= z<3Susm8HHGl>k$(N+1cql;A~s8Ja4L3FO7H(wXhuxFYWteq(}q<}^tOtlUb+m0zvb zI@vVff>?h$kE{uNC3rJcOxstT+)M=qn?%ssAaq)25!rW1 zM4L+XHR~XI{O)ni_w)Ju`s19(qvJgAp8I{@*L_{D*YgF1nYd+LNx6E_nX^A9qo0Df zSaf=BxS^pT!np&~D?=b7t7m%W(QWqEC+YJl370=$H`xv}{KfbCMY{IgHaQ~MN#*l3 z0_D5TE1ECyYkNh?#YF}uo%Zkfw0Ak4yV$SvOehnR#phMMK5IfSHwtIvB<=u=jaC7r z;T;g^mR;b6oj#8%ba1WaT($Ws->sT!rr!HMl_8mnZ5->tz&dBE*OZ3LM%ZaNkI*3*bCXp{1W!yOL5}Y=*cyGEGr!M z`Dt5ohR~-E5Av-HEaha{JnZf8vaK@kY|P%$kJsWhG`+C~`Zc4AurH&h$DmxD<^w;0-Z7D|-jFZ!_2pM~J_`B9b z+Xv;i%KA5Rgl`EK&wuNACj0Gn`?aaNL4SQIEbS~rQhRU(E$ebDvMxZVF&52)BCkpk4Xz?e-7JOcF0hc;89@3zHv;>OyXUkIo&pM1R; z_U;8%v#es_ z6xf9P0_}rp-Jq#W^FS&IY~>3-L6>(BNWtph?66jUaFZ3%tbZRtHY%xD0?nXa?TEDD z`$um))8|R`1}(gL9DTC4Tfz6C9vu3SJoq>8>RPW4!r;z&MBiWfm;kIrr~M2cwb)A9c}}oFh?olTa5?eIu_oy~0Nq|rW|CVlB4;MBC?)$+VBj+Ur))&BSZ^uwtLPx>?J zbjIL`rR_OlA`K_1%)OM-Qf4cn42x9bLt8m0TVyzy55fT9E&2y^cFu%ve+09_&}j;N z{#sHZD+XdTU49w!9-E-6r9Ks^J#VbC{&BYdL*^8Cluq@mq8T$qS+ak9Ui=_mF!4X_ zpQbBTcFyAa@xOZ8_g>Z0BQR_>F8cae?05ZnI3=zSTc&GadCs0ANX$ars`8MiA6*)= z0nW&sIeNMnJsoB%@?=BWu-DU$Vd!mWhWW*0_g9|p_*D5oti+G!f1zDKB?e5NiSWU0A1gGEl966~JY z^Il+BsTOv|ag&~Eh*ED{PL|5q&<^ssunJ>03_o_E>a5r?dqum#n71L_)i4z>=Ud|b z)!l)8K%9SM>I#zcYBR)Ov8A5}B;EQyTv(L7_(Wu^9V>US+r?h<_mu2KovXJ_C8x<= zrs=jS<08TGKbyRxRnIf)v%jjpAM|55>4%QiHXALWB6->PDcE3V&xH?0TbnU>BDr3jx-unW|JxN zuRxhU`O`Iclm!%Zl@jV+lzm*P$Na^K&W!qsCD0Gt8Xn!C8%gLBoHo_eSco`E53>@K z2kxymK+#;KzYZkBRiK@=qbuyu2(*!Nh4>#J(awYsWoCY>JMSs@215$*C{$2QMsi!ZCq6$J;=9H`Oqw$;;<8h+R8}UTRS#-@t{A9a^LFk7 zH9Og>`(PGpN4p@U#w9?R`0Dkrm+{^Xzcd>wJ3oXTOl2upJ4QERq)OdK`$$t4TaY*Nh3?gi4tknlkeS$+b#Q<)Be2 z;O>9p`}sQ|eKDrnN?-LMM)-Zn3+X-c;PBpn8PcOU(hx>6Mw{ipbA9{w(oKm9(%%ZV z_1>lBFp3Z`pjSv*2i_D_>_`b{+X}c$zT$kNrasUu+ay^!Gt)#oj;JNxp>>&45mBN1 zR|i!7kxLd*-3rgZ8c2niyWszFpj6Dyk%)LuVf+Fu{)24o z!rSbceXd~(n)~}8jBuq7fw#rjOox50a+kp4p&twp%w^5yaMKC41``d4LNoLbMvqwD zVc-u4TB6PEp^}$H!v-)D9VsE^10JJ*Bi^Ab5NpqF9GDzObx^j>I+ZQp_Gzp#c%S9< zrt_G83si@I*`CwzZ@nP+8v})B-NrD`xZ{0(a>cgH@YP4!|5_N&|6&Zdxj96)}yB& z(WP|Lfru9+T0cSK{U$cdp24O7_bXYSYzWk3Yh%2=uQ2&pTAaq)AjI(IT^0ND1S@YG zSrYH5cM+B}B&{K3o3Wm}=C&p~+lcv?6_bNF3j)A46kb4+%{TBInp|^5U8tuyCUKG| z7(1V7EGa`0|B(KVGGN?zEdY}uTMO{>X@S&Fnm2({nlMme(!6|*Aj8$s92j_0fPtvc zucLZA%k*gm=9F-_hshLuHjTfl(cdNI@$t=?x=}p;-RZklK;(6FGN5iKc${0+9Ef?? zE;q%n>}^NT8^2&j+S@+q9s!zL5jSm}>t)mgFyb4aqNOXhq)#B3oTc&nQ#DUc5$#wE z$@Muvh_7l2I$PBT(axQRav(1q2^AeO*%mrkr)%d(XkW+j3P(@Fju;CB_u?n|oMw7# zYVW+=JJbr09H(j1Utzp(V?gbdNlX7uYq|W9`@P-}j?ZdvBLpuAmh>K87MEjsM2q>B zqm|U*DKLP_Oh=JAb?L1!JS*nLqVv+Pme#BQza+F5^-f;riDlDP^)!z>dJ3w(KwK0= zyw-qbJ%N++*2>QZ-wQZBbW}#dZ9G|AX%A@vL=^=xU411n3ickMD&EA~!eIQIrh*!K zpU2=QZbjnmI|VK>iU$i%EglnYG_9PV9^wPCf|34mz(K&{si$S3{lH1wnsNH6Ci}7@fFWQ!$QV>KKc;^P zc=Ir#LmH~@C}`R21=25t2#E{nb~Vxhw&+VI7C3�>pXYc@a`hIS|&r^S^T`5~VU zgU>9A-ep7^uP^?Z%a7Qe#UnUaxV*b_>=rLP4Cp~zk+~&=vR5jyeX#uK-sP3*8`w<= zyFS+bl2Vqze$tu64r zc7TNOY_+07(C2rov zXqcfU-;|Ing5HORD$zBHoamdW11~@bgx;`V+c@~TgHA&lIg@O!2Q_%t;tDkE`mUhW z6AFe9-3PM{BLrVR!-rgTc|!K<_&Z_r&`D5Yje$N?7gThJt}Y{+6U5$t@hbhxI}iW+ zS;Pe1(y**V-UPjX+7g$LLeS_19rB17aPzUDonQO?<-H(!LOn^~nOg&{cf*7R^q(yB za9;XD0lgl7XK+0420sO=1fV>-`tJ;sqY1PS7U;4+AYjQgGicHY10`P#xZbz9!BEOj z-RP6wK(y?58eYmBthJ1%!{#^6iGQm$A`c!!Z)d~kQcHToMV6)9ol}n;Xl0MP&K*|u z0dv>plZmERt#cUJZ61BxSq4zvKOr|vLNPEC##5n<($_zi+9jGuXPl74wUiOc;wdt-45Q)L) zHIJQXdd*g!nZyYt2;Jw=7QMpiYWZgH%JKX*?9fiI7$eK%?7iin3&95ot+#ZH0T|m~ zmWq%d*=}cRNUteuY=H~2C_VVohXzs9kR|B=smeU=ud{X{hJ|1BAs)s&eXQJ<_Lq{f4Ks=X>QQekm z8(Xdq7z}<$xJjQo((?p?jEZguwFogB(TqnLI~;c;7Nd)Z65bpjK>0C+Ixlgh6{>2&Z}_gb3Q~^oG};@E@R`@CFzy z|JANew15?U^Fvku5Bbav$c*LpKaBtJ&{SHX^m5&w(;#_^x=FM%cQkXp_T^OK&Kn@E zKCGrn>a_fK@zH=z>Mgi&$qB@fw86boaY$*pNyn`&^x7~RgcayJY5Vl1&yLW+jPM`m1)CrY$ca9j zaWo(Y)7i$A_(^9FqrBGuVv&mJZO4_rD&>w%K|#$VcGsx0RRN>E9q>#`{HRWqoRTC+ zT?4Z!w`8Euk~_w_{R9-JL+>dIzQM^_7e06T4U~gu$IN7_IvYa2L^^-3-YD&i>4eu9Td_rrs@rc(`KWN-vzRPook0y9TU;s zJag<{r7KelS;s*;QHZ``gbtJnXlc5}M1IOju52T^uU1bemzx^sh=j)(*#+$!Xg4^! z-9X-YecEPCqH-B<$HjuracitF_(g~iY{0COol$%Fk7kFJ*?U>FfiOSy!ss| z81_XNIXE#tCB~XT1O3jcXPgbgPtR#fgv0kL|L)QIpWysV$wu`6t!dp%)5iDP!(;op z;sQ}Lnh}7gtr}pApjg;H;2GnLd_XN$5|S?UApLgAMbOp_)oPyC31omVz$BgA;(f zxCojoJ?gw3sUT|LeekEeNxBrA9?4X2hHp}YQ{jmS+1`t%5;zmDRHvdXptJxoA;pMP z0RltRG1gD(WzVveYD$mVKr;OPeS=~E_WBw2I;eqGX>z(Yj!UFYIAygc%1DV0al5yV zaw-qTi@N^=a-mHGFRk9uQCc`>(;zw&rLZhRYydm*AI<>bF)r3N+fMTi9J#98N0^xn zWdQC5_e#?U^!XnxejTd!8mgjHEJS5)Q(AcASL^83HoWltWy!{((V9W$h>m$fT$Et} zAFRpfsDM478bLUEHsVDtFL!hYF4qCUTBz|CP}OSHOMmQ7CEGRj4!?sI7;#=RRzA?OpS`7iD$%w?*W;*t7l`WO$v`*mt(pch|Z-? zZ^NbTT^OloZk|0vW&hT9~jc)sCmYIQq=6KZNS9BM zvlGvitH1U2Pg_RpRRWlGqR-elpJxfDjGODIHqBUe^{+5rj19n1Vn2YUYyM_lNKtWJ zG4`9emOsCDd?K2!w4cfD_moon@-HxCqMt#XKL4F1pcs*p%M_rx$9xrm zuq^z1vjG!PLel3(4d|O-e10>Vxlf{K3Bn$CNZ?rDhg4|NEs((ZG?7I?;f0dHC zT&aA&Lb#R zu;+MA!FT6>^cwX%fN&~gJ1PWqjTTis1f&_+@-9jloq$KTXj zS*{|ErO3M?r81uAgdT3rCJ_8Gug=GQyozT!_p!a1b=uS}wCC89g_s+~f3?ILK<(DE z8dl~W?ac^*Nd|s~FtT^}wvGBCE^wa=jcewGp&?Rh*qy&1c6k+|B87itD;>WSIJ73g ziE35di#Oi{D!#3}p|bUi_W@$CJ{c+*NqqrAjq|GHk9RF!gto<3e293imxwO@^{pC` zLH+!0>^nV<7@ovIB!eAhmr2XDE_%i%#^OpLZ2YtMB zJ4KMCD5Xl`X_^m+PJd8PAi%nAsF(^nDz>8o-#nJb6wvdq!Wx}Oq(}T{Z~0AC6+KYw zJWJL+@-rQEXFP3szM3av%~PbD)^)ch?;U9GiTw9i9l>k8&mHDnE%WD&*LR0qH`UCie5c?o`^2WMuncv7De6xH^ZE&M;g>9IjbC~ z|J_fLnImh>TUlCPrhL1_a?zuFe`0ol$L-XfZeh$V|WDa#2HSaXTE<7X8$+CZa`CgiD@H|O(@f?iTX zY73TU8r>ruKkSaH1*3L-iFx2x6NCf*T*MDysXL?2`e1+Nz9-|4#h2 zG@%BI4m`pzBuZEqh)FCF0^K%G$b_b54_0Fy-y$Pg+!09up%}8^V$elK&hLd)~$H}yi}KL2l{XvTHF z+EaWwcM-*raTk(Fop7($1iJZF&Ekx`YrtE_Ek`ixJ38mvG8UxB@?>!xZR}F9e&WZ8;LWfX!&Y+Ca6Ge=Py%16ZQwt>ws893j`lmky? zHmvAT^UG8V8BNV))5E;o_q2OOCUpI&(ok5Yva@{?V~PnId|f6r54 zoCCW_9WP7eHIey6@2E~!VC`&n25XARf2fq8jGv@***HU|P zIFz`pVcm^7sk4drR>ReyB{7 z*;Ypv4E)$7>-X4IH@qn6esI1(68edLz!bfteSWR8HurCU=p|#Pb5k2qat|b*l`98@ z4mEWDIjiXS+gY@$i<45gockdMTyQV_cY7})ylDPi)Ztij-LKZd+NgAo-o6*)Msz4$ zP;oXIO^hcODjc#=LAlWLHXCf&1!eDfV0&9~WGaof;kx0s0Vs7|k9BsaM3u zp%tpkiHGKssmYK_U>x>5jT!6zw7l;|aNl>vTl<+tjZ}+y$Fo-Pf6NE2gUiz14g`O* zy&|18x=oZSpuoqKnrtqJx}fwnK0crs1tG`#4%gSYwe~|@FiK#^(q-1_?gP*sx%y}9 zV@KP7@A>&1&gVz%Yot{Ayp^gJjwGwM)S4$g_Ta)!!6vP^y5-0hUcQPf&Jc$b3cJ4D zb(MSMdDz0T=6MjqqGmhSk`Qwi=&@{hYloBj(l!$Q{&YI~<+wuG-20lFpv2wNZ3*k< zD%0S4UJDO&g6tUlvF~R>S^7A7tDGL9>b8W+Q;t!#fGYerS@vs{U~ZP0^_KQEA{mjm z5r;m{GNat4xMcI2b8NSzHV@*9BMuq;IQ!3^JI}&_VF9K-=q7q}N5e3-I$X^zdPXMS zOyFIUR=4`Xn-fI_4;*WQ1M`>EhJh`TS|I&%H5np7bwzpuQpKN+_j(JUmie&y3%JQy z1Xs_)Jq84O#Z#pz=DdW6X8(XoYbUCnF5DY7y?R(RVDEHC(%idHGtebr`L|2r-d%as zA~-wFj=!2m{G*aXW+Id&+NU`t!2}T>d1W@&pb!LRg6yuJ9_v{dr)(a^RFn7ao-g7mQmzS%({!g%0EIj{ z0;am4wueIb+x{j@YmXyy9GV+92RU_^T%oTV_u%!`6r5O4Oi zXs%0XdQZ9hS+|Qfzi*8*L)YZpL0(t14{5`q7gz4U}9&Ynw5u&aD7nlP=?Vr zW<-SiO}TR#+8ub3PVZW}EuI|!-QUb#1f-adO5V{oiFu=2twGE%@<#j@&(Rt^VsL%7 zJna^4WV`>4bJ&nHIFFV8JCBV9=}M|B^V$H%^5hr-HjbebNW+HavX^%qOoYxBNpeWa zl=4t&Z})O13d!^)r0SfJ~AHqyZ0bEwPLWnI%#_B?T8QN^sy$QEqepZiI%V%iO6FUNULiW73i zt8=+h1^c41&B1zTTR&Z(pdT3PeW-^4d8>MfhM3Z(>#|Sw75!GsR|s|-U+Aq#@6@bs zIi?=5yIJ&2UZ`zhj+%e;uhn6Ea>U4P$a7*qOHS{OSfPYz-Db^sL%T($Z5R{bW<%em zc>XX_Fp;sT`T-^*ZWq^Upx1H2eO#OKr*>Oiy=^veWNB)KPd^A*sN?8Pkv+^miutn1 zGrBji8GL4C2jMt%w-t3dgw0!sTJWT|nkO!tXM(Xi%lt-L^(q(pMN`amLEf36(e6$j@CczGO~tEDbMH9zcC-Pw%9O>gz83j}PB zR<{kquFKoxi~jYvl5>ymR`YIjy-`(ShaHj@J7ZjAt`j*W-q9(6V=}j(oU}9CQ0|e) z(;p`6HP;*>WWLEH*K5Xy*tQ2;cc0suegoxEtpKNAq zQ-O&V=RckqqSQ-`2BZ=h)58>dXRw7~RBm_70dS^Lf7axtK$)R|5HLoePVo*sN50w; zVKvtya;`*l_#}BN#S7IQs}?8U49*){Na|xIp18!-^f9qSS;4IPQ?|E;DU_n4v`gzc zG5M#b&H(0#!eJV1=Y?AoO+_gh$cMV}sMk=>F>(d9ImDy;5mLR~KO)W4$*3O-3v@?W zJ`P0>7Un6kift#>H#J_Kg=0PYH{LbQm}xMw*6LhfFubJ^iT8>dz)EKl@QHPQx5VSM zdYEiR9hN$;uKmf)tND^UT1BgE$D}s8+i^fJYitcdC1hQ=QjARM;^+YOs}!k-@1LYl zi4onNy0&%vS!lFRy8+5D>klC!r!n5V{k2wf%HE@;m70W4f#DBJI~sGmx;-#cKh&+U z+x=(OuGx)-wEwj@e5_rdAbJL!BX9X>p3NElc;;RH&uWg|HJ~(A>f_Z*05-vM|J|!hmDPnSmOHZSAQ?v`sg6gU z?=04+`c|CG5awztl6$z1!KXKf5;<6B9pd|uTubE7NJp4H1~Sfnyp!w~lNA zD0pL-VqvVd{h1_9=%xzmvw_XT=qQa?jtzfqL9=Tbk5t$m8AJ}yrsRh$QdQ*y< zQ_nC-tHqnOa<&ViI)T2S?ohjbMssSzz0H=2)t|^yAbPtc_=fjY3HMiWMz^ckK_WbR zS-VZdM{j2v_&`+;Qn|X}GGI@rW)qMnroQK=&T)EQ;SwMuZnh#jh`%nh6m8%43|JUY zh2@I+qhACnV{gM>w^pSU$WHQa-wzdbKJ!e_=uAlTPRa~l&;29DcwD=r7N!6acRuOP zxiy{`BKk;MTi>esF8nW{L4t#Kc8G21k!Lfjhu(AwQ`DCvu0lhy-efhx3@;!1s`7N0 zG|sEuW`c7`Xwjlwr~^^5Hl~~A(}fGxhMs>{NiM4$hF;A)Hf&6bTL}Jo#U1%gSSxB4*Nx_```eO&DJJ%vT+pz`+Q13 zDm(g23uZuLN8wC}cp{fTaFquaD@DAhw=pM)z^pn11>q(+9?mAvmP1Jo?=}!eM&Gu_ zdpDLESt_c(a^E$yEjRUq#9yy>bBMj9W)z(;-h!qWevL@$UH)BJTIy*Ufbw77HNlVM zEf2i$;onOxb!V;kcwEp}ta=7!jlt7g?42sO|E4hr6P@y|r}S3czHWtFpA{tWCwsJX z8oiZ!VoH z?Vm-LT8w{EBQZT~c6@Esyp(c@&4uQnSm%F$Tzzxy_`W(TS=7+I`GMtqrbR_?IQO&l z2Zkqyam6QkRY>MY2rwWx9Q8gyr>#8coyc8H6Nic)M<(x=FKe(~*C;k zT(mc{0&Gis!%An^0NTzT-HU7)Y z?J#g${l8i>{W?<%3*20Q?Q}5M3q&LVm}p$QVBG1uFdfS=b|eQAykS6i<`+AVWhD)zG7pvLJ-4{^8fd@#=O=P7NtyPY zjRO>U<>40hWE$x$4Ef?If${<(!lfnJk!vI&HI~yD8=VwMRC+Ny?%a$x99*AZ#2hct zUl>@shHY0wDQt>&So7SdZ`mStbEUYhiJ1D%5m#yx{$O=*T%&I4kTgL~1+?`+CoEccU-E9i4q<*GUR9H#JFhrMuq2S)G z?)9FueKl4k>cgsyXjr~tgP0k!Y;2%`MoOX}o)4Bm_}fwe<8Gl5)A5q*(%64jgVgmC zn@>HBJk_8~uz*Zi1LHkA8AoF@lSabt?;azR) zjLz@t-*;2@-1WO{2DI&rcC|dXzQHo0G~FKGV4CiHN#1aI?01D5kJxS>I$n_jrenDW z94)kTb11u#Mvfk*)D<>LU1evxPC}pof}4vYxU&BaQU!bZV~Nt+RrtwnbN?Y~usX38 zTr{2$5luQ~!V;9OiqU2~LcQKSr+Fv)VrM1ZqFjROCGrI~oL|TEf_b?MB%Z+Kax-nb znH-ye`>u;%?Z*+SJlv2yXsSp)=Mw*wG`K>umIf*me1R=wG+0NAB)y%aab*6^>O=kI z2fP(yKi7D7B##tgNTwmRQM!u`?(RlWtv0d$ zy~edEzb!ldC@*432pOt7v!{r9Xab`=s~guCZF9u2`ue>3*{-S;j$RlXjw~QQnPkXg zC0}<}g685K-0U8xz!m*TwRk3s)oFZ~aExy~&8KB2zqFYhF1I;c?3Gmf zSVs8c16R%s4VdO=#d2-MM(&r&3Lin{Utk3HY5%rK>#~3THS&gDa==G4@1IEXb7sSR z-p*{ruLMBc+P@PLNZn`JE9+Zsj*yJ|?jtYTG+Cwe#7z1|zmo|pEy`s#md!gZ9@WRP z5EbI#Tu*|#Gmms7`2-zP(uDSo4wSPOZDYojzCl|8(YEP#W1WPM#WZV83?HGj83Gq} zJ`^jkBl|cme8nY|tG){D;KAcn70Aw!bC~bjV9X5EnBCr8cq7#GyYM8A_xwaCr*+(*xu}Srow=EU;S%fByXS5f^k}ClKSuEToFUVU7BR3$x zJK_17vj?F*5L6ElrJPB+4TK=&coV%AwguDYuNHZ1=yx|6KB_KPL)pFE#)^T4juHVyM z*svhtFFaV`i4TI`K^7LtE2+BU&8&=>6HUl5?i2u7Y1`$ql9WvBg;8M1A+tV3lg(n)zvY@^FbiR52L0YRxI)!P4Pwq-Ml2#@DNR)HU_i z-wx1o(e_1zOlAH^>$Tb(y_p+HD)%4a{#-=m4gM+_{Bvtnx0fk7baVqY4P!|pkN!Hr zEZO0=Bs!(-{o5hqM8MzN;-RKY>Qsi>b)SPTov^<~9*f?P zD^n}=jgn#QE+iu>HCyER4i(LFq$p1{rS>t6 zQ^LI(L>}E|{3j&}mN$*t!E-gP*6H}(>@H6ZlpA6*%e%umm5sf*SSJ~OaZ>+9x z5=zE+^w}WEn4X~d3InFKhb~Ndm%nR$zm9zvEtEFr5VAk34U!42!9-d<(f8~d`%KD} zGLMU4`%P!3))~{ROHwV=3VjZ5icvl~YjcT0cmnHqA|!}xKS#d%yJmZBPCA;Kv{sA+ zrW%SfwER9gvXF$m=OZx06ujKtey15KP}8n{2OEz~zPY%@*~fvrOeQs(M`Q02+lj-q z7d~uSU*PV~DpEtp#?E~zG~;cs(YvY38*yj2DN0N$f>-t8GnT(*kKqw_a1Z@;uJ8Ei z?Yw(qbAn4RUfGEm*2J|pJ2N+ycNC;IzyR~?Dol93I$~mP`sRtw@|jXwB!ndkc=om$ z_@{9Y_Ku@{e1y6<%))6Pn+zxMq|i)3bRI7-+)ZRT5|@}Il#Y$^C~tAs2LeR@QrRDI+k(h;Vr(G9)V21T zu}z3NdevF}6pQOZQE}5qO4H|lRoF3ifal`N_dQZtF2`Ydo@$7}d~(|rXz*nepElV( zoGR4B)WqHwMQqQKVA7aA?z-ve!5HqBA&gJ`STGN7YPVks7@8Mh5+B+pwD?fW>V-hr#p%R#nO?dVgzfW`d3LT`%L-c>oHT! zMA;I(_K?&zjwgcCIC4c$v74^e-iZPfj=(jJJMT&y`14vrtH?t7H^eY{&&w(xQaXut zX(F%cGKE3p4rr~k9g9!L2q(-4GV?FWMWp;d}G< zLfM^bJZIl`iBL@N`XFVr3WyGk9pQ1_(nghF#Lx8a0R}{KZPy-)4jPf?nG0p&?w-Iu z#Hg`4+67}M_LuluPzspNlhzK5*A(e~Js)H|w(3(aG6+_TbNuDR^~EEa{=fsmIb>pAp4v%SjJ~&HeCBXs+t(3%+{OFh&e( zeB0B+iC`e_TS3+*GY+jI{a~ie!!PS4xdq}~|Cu!M6B;^(3;^b%iIna&^$gtY9>^$) zA}19?Dm|PES9RKEUkXvQ?%l9pkLxt)D&HLRg-9X4W9wfkOE$WDPKBb%9q;p86!_j> zVk05J0{b>^ZssPqF{pfBsH3a8m@_%FaiE8WVHNuH^Ns%oRftu-lKr9nyT54$jaZ|= z#aJYqd)Dm&3=^@bJx&?-d=mGKJZ|VSn$P|GsYx8^npf3uX;4(tAXV?bI+n1OT@01! zVzXm~K^ZoHN~u%29-0zk9-Gc5uHUPc%FhrNmgUXU(D9n!=A7)>Kt* zVK}AK1n~%x-(nyhLB0CFL)$}~UaR(Z$SZD`tL{GS?j$Upe2KLM>AK*h(*gpKPT2(q zTYF>|OPr7y2}#U=5z95Xs5j#Ya3+jtV7cBT@;LTEKW5^bCn<#V^9FVR9_`VP5~yiC z&L0ykY}^Rwt9&MfTjHz+_9xvPW8EV}BM(cz zh(Tl^AB=l^{aO-SCk@SE$lcGpz)AEMJrUCw-INni@_duxa|h!TUEdfc+7wST^n{4` zelvgCdc*1?L-MjDN0bjIGb7g{$gwpxMp!|@Ft+~-{e-fgwADc}w6$9w8F7SJ4S1sw zA9=|)%1FodtoIZC+(`RmZhT<#l5}lmd`jut0Oz3lI�i1k`1w zY^^YUyN@f56uS4hfX`@xNAGw@WI!4`Bc>dvs=xoM&}G8toOOcKy1hH+Jk%s|`;|`c zYx^R5@FDQ})MkG8Xxe1U6-4!MC$-w99MbgaMoMrS{E)59!upJ}q{7|+^EU1<3$d7; zDT=_y8(J$$YwwV}+#X=RRIbmQAf)bJW{n{cI( zBzKF?kZab^5wf|Z+*{lKBNAwnJoTOY<9ew@)3&idA)Y@1xZbi?da{^SB~?s20Z$oFKL- z)2GP6&Yl`9NwsC_;CfPfJPumT)N6fDA}DRVQSyI2E$PmeDzM3Yu&dz!XzYa;B^hr$ zfa>Fb(7`b%O)|PxeX>{uzZm_%=CR#93HVvPD-)duqT~NgD8tKviRgVZG-3s-p>ny|gv9@TSnEstCrbsQh^kDtaEc>O50YLV5yJ7I(4+N+RiQqe5fqs<* z41(j`Y1JLTvJN!zK&eUp4?qwHXyqm-%TOfn>DPcR>EPCM!4l9ckAtZ^%pYYL4J6Gq zqb&*Y0*Q24QdAcu@u>pv>7wO~5ifW{LRf)ve96CgA@w<1ywF6o_T(+Ao^8y0f+m15V zXFfI^FwFs5p9Ezfqk*CI_MPp0BZoj>g>V7Xq`|@Jpo7NG}_ZQtL4+b!EoIFYDL|{&O28<$$)MS;FWH`zmXurXOvCN?}2DpXP zWGc*SfdD{Vt-#H_ouOA0&RT*kgG%%@u6VtuY>NiyTJq3w90KJ--6nOyJ)B&3SNQ0zV+tft~aH$*XrYiYJBOb~C(F{6?PYI?GCq<&s$a_Sre`KmGq6kLC0&LAagM zE1yyBomtXg&BV{*a_y`QhvgbD6#5fVl4aPpzA+p*oD45CO<7?X#NL)?B8&m;+I1kL zJ#pzJU?DA;_;|DdGw3S64##^i4qTwKfRo>y`>le9rry!}0a27XeRB^u<*uA@4vgDa zdjcSybkd76AZNHdcdfgLKW>O9*6$JlW}oG<&zCYbJEhaA0P99~=!t{7UL+uqEGEk5 z2Y_jG5P+_%=SM=SuHU3Y10{F6+kxSXf2iQbV4xYmsjGWg)ml`e5Oz6dfQO+KWeG-i zzkoGp>;rhyr4KNJ=SleNS^z)>dafEUQ|pkBZw%OP6LbrXop8*!Wt>H9jZ76E<13>w{ zr31<7#L91BQgycz`2j-~7U}%6@m>M7uRxny&DfSsT{jD1vT;InKd}*F+kN`VJpc3Y&14J(4rC)c4!|6PoY*sQF7zeD#XdT~dZ!kVZ+oLSwI!}856n-lJ`yD9!|IaSa zhCcv!0pTLo21HEh$@hHBG94VjdH5aIKA9CU?n&JPGPh!@vd;kapnACc7qB(v%3p{Y zHYptmYUQQ$)7gyw01~~RDv%fj-wlT4_X2g_MBP4ZcRCn$k?s|3qX0l{A02x&)On)* z;!0ErU#JJvjSd@B?FtxN0{^aO?4|?H-%N`lZ|vSFMFLJQ_?RvOwRSy7&-nZoTn=d8 z;CU92+1k)i;HE5al;I1>PAx6mHK_8a4N`sO`>FjG;Q5!B=^l%VIqx$rXWkH4`UfcG+wWy7X7?zDV{DqqHi1EQTCFG!YZu@p_dO82%KTdm zr;{@UsrZ>>2cTOLg>cJVg?@>K8fL$vD_~oi60H-tzW{epG4Rl&?)U6&t!6!1HA1)# zUy4PS1AJiXb$sYZS(;7&unmSijBz-F=XZZ%`mxFO#N%?%--n9m;F$S(0_jqVnqVjU zn|Un10Q~E>O3l35{*Fl*d+er>GG~24fZ^yB`x^wPJe94*7-?r59l|wN89>0MlCzX1 z&m8V~1>PTW06yB$ZDaoXr({=I2F(ET;&{pSU}J|@JFO%*${N$WJa`sh_`rSer|$+G zpl$b65-8TxVduf(p~+2hB%)_WfzUWYEUFN(>OEymT}w1UOg*6h4^t=Wzn3*Su8)+U zBR)5IAK*%M>dob-k`J5#WUjseiV{$Do#)yo(^loyqogcw z-gVS~EDz?Kj@il+{NDIdg%P>k>yYcU-q>L>=c-|Q}R)d%4?{#|K z9lWi%I!!`<$m|&suL`3z-5{$#QpP{aNNuw+B<%ANj@BMwFPN^u4}E@o>b=>CbW^1U zmh*wzg|&uG8qB&idvq@M8-Grwaq(yh_PEp0?W50vjP}*NpaIacP=1&&r-(23ZH%&I zYhMC*o%p`BF}S&($#2@UadMiOYl{cn?H ze&^YSu~w))32NTY;)%wBoD97Nq%5D@cTL28LEW2?%!&4|&m!N|DCA zIaV|KxUS6wINRZ8CGbqc*MIs|uS?ExObvEFb!w`;tI0Mjy?y8ObHk#I&!{eN2;@o^ zo8XTym>|H6WDJlZJ59ai(t}gd))Pb2!Hy0`X<$~eGD{IUhblZ{xwU+d>%PI$^q_zg zKc_@Agcn$4BMQ++hoxCgkzq(G?C>Tf+kHME?o5$YbNY#gq5Vo4ic{9xGD+`V`iYtJ zQ?SH#Pog0ijcgHgbdPSo7Ra3k|5i(D7tyxAaLc!rk!uO|y_qrNmAB?4tzFzCy(dypm9cc4h04B$8E5cvuT{q zA(K08TW+$K9EzCB$-S0BvDUPXPtX9M%Twn+Nc4z@$|lL?mr zv!PYvJ&Ld>G7CE)ZB;SCWsI|faVo#E1-(~40V@csW})(2YSG9JvO1CVu(V^B4-eI{ zv+uZ5qT&#iP%(pCl z8TP-90FK5#9YX8T8TZD9yX!7uqljwz(#^RJF9)B)t#zd6IHePXnRI{htA0zx^YZ8^ z?0lLz3Gm8aOP$7eC;bTRR?L;Ic)s|oIDQr7BG@-_@maMOu3*u97;&!w$hAKKLr{*` zsje--lxk3@EzbCXUt)%7TW1>)n6618o6A2SqflyAl2kl%8*XR*(+k&jA1TV7k~19z zxvMQ;9Xv)fH+3HXeyL9YJ(5=_Ltc5X4>dGTSqBB7M$*fy<40@nLQ6a(l-l1VbQ@)t z@+MtQkPHO$vKwWuW71#Fzj!~@>-e!EV4k~p#^4ym{(-M)k*u+j)ZzHm@gI%@FE+<_ z%c6}Y%!+76n?A1}6rmM|ALd&Xu`$_46G|HZI`1uV_&aZg{@StRGpLi$lYqurotN~a z;VK-ubjD==#^_X|x|NcxsT1RE|mtW;~x!!9zEo$C`{3JMZ%y41G&XqUF-~Lj@ zVGZK%%-5m|L^{Xh?ij234f%KehAO>Jm#h-MTo{IS;kb;RPE`$CgOS z--_YWYsKK@eNAp2xaaVyo@`$8?bg^vQ=Sd??|qRCLm3cuI^;tgWqT(&lBv{b}5_GfHBH(Iz9g z-ud7EKdQbw9?HG_zmT0~m>6q}%D(SQ*1=E_sU*t2RFaS-`__%IOtg@t#waRV)QRkS zWr>th*&`}TNECk8t@Aw3_c#BXb6#i6%>7xe<$b-c_eF|23Z$nY452zkLLkxp;(3*l z8;j#NWz=5upURg1?ej!}6J(Q8Kk~}_qF!ee`67~b!GYFM_%*LA%{9uDBUS0qmHOp- z@PiAGrl8`*^J2HZ)hu>%?~3<%OX%WfI}|HJuu|}JjMNV$Mml|Sbk$EYm}qL|Ubs-v zrFH;z(yi_^q5!R!mj|JL(69Hh&YpyMt!Y~NBhk+wuW|^&bT13vsKR!?m_|lRi@r(< zHKo$2-_zjcxUOFgQA{yB_)4a-J(0LZ@4U?qAHI2WYGJOIozSGS*!kL;=#*kiG{w(m zDk}sgnfOa%y8}XGCPh10rak&9xJ0d+17VELHE=;*$ReYFJ57cC3q}U*rzjo4cVP7zQ((ah7;S5H}On)xz27y<59|LR4ubj za$aKIi?*Ddn(*DAy&~L?U2MF)zi7z^l|npKY0O$N0v_)9dZ(g7=j+`vwkMX1U3%M> zi4oi!&C!*5aN+bF2~Fe;7tv%>HJS zINsoyCJH8p6whP62TdX=PQ8{Byp&V@t)g%6E)>PYMtURW9-i^|v(6tEL!Ey$1R43C z2z2I*{sf)UIr~)>vOhcMcS*EyQq*;LLCE&VDgW`uiFcSN8HG}PU6dd6qu2aJXeDB2 z5~ui}`#hxP0OrI=WA6ln^t+)ZWtb`a^{-D&cU)DBTv@_;55hs{u)j&-3}0C-o(jAb zEL30hhf|riIc@iJIUV6i=FSQqst$(d%*`uG^5n}=U$7sHX-tcplh-rLHDT{ANie_6j2fGrgAlowgE)zAFOnX=Oem!*!BHGa%ky|DVbyFOD)Vdv;aTV9=C zBQdMWZ9FMrwCb>5qgMomorW4hH79F{e#-S8sfh%QG!5d zW^h`x$`FWgt{5B6XW=oVeI{s)p?$%RE;KTmmrO$TDK1}!Iq+*!OHXMyK}(3HDW%tp znD2J6Z!LRK#{+6LA2$P}N+ai7zPUq_kPh`U;o7Osj<9Mk`N@8Bv>9|xl6|x?%c=b> z?W>}152k*;JDHS&3GCMT`cP)}@GXg!92n54C{RY?wl?`b-?^AO=I|4st10B6-H7N# z(=+KPpEVBMi1Lh>fOt^WOrlzdV9gbvv8OK;s1W0V@Ya5 z^Chr#3#CuT68|_*S62^l{8C@^tZUAWMwYiR@jmg$ZJY~c>HVj3ud{_o{g0EDFbpX{ za(z0r*F=`8P|uB>#^J1p#CR5VUp#lFzH?1Q;m{`c0R@>Sb`Q%ie1&T}DYJ>j90UVt z+Y4h=@n4z(=d;+x)fnw(g;u!ABjb(*pSNckc+K4_HnL_L^mry_s6jW2&qI?jnv3l) zl}xx+op2C`+rUl6Jsxo_~#S=43&Xn54>IXUY9`a{m z#8{u9JfV+DiEw!+{+N-?-}A zs7N2f9lBAv-hI@BAj@lGw&MS|H*uQmWDj80)?u<&9^+go<_T07>mTFn(SJ}VL{`8* z2_ej5T8}2`1T~FqC#x$O?u;kq%I9j=R3^}k=L-9_U{^QBKA973Y3^eDJL&n`BwK|0 z=ze@uI@aNp+VMg|>PY1^=bb&eb1b=mLuQow#qYI^iLSRDhj8(JLtPr@$?gzf*Hno8 zh#Jn)x*DUYTFyT}*eR%4s>{mJdykPdQUJ~JIq6$!6oYT< zRMyljW-j&*#g0}Mu{(?Q&eyft?lwtPP{I@&Ozdc*BaaiS8I}Tqv5?qI_&3Cy_?7e5^QcBhS-&X%~Sk~G0`=gLCtJ8p_jaZ7Gt?UB1@*czFz zoV{22k&l+KnuJhH1ZD;k@pM>pn`pOl17Yw=dCDDJs#9Opf}HzDku|4>V#NY&;#4(k zs3IAgfL{*5>`wUObWll`uY&S6Sw$7JN>7CUV{EQI>p3(zRF5a60Hh&ZDOiAaZnL0tsd=^){ja z)aWSYI@+6divFOZBrFL9mHv2`evID7o~o+w&gJR74jD!qd5t)Ep%nSmP-dj~UUk4; zS>Pe*@|BLrdHoiap<-qnMR5&cCuOsAw3$edCE)!bB+7uUKF=+}zPUl=}#0a9U@o6~8iJ)Ua zmoUWVJ*lKb+eFVqCbc$$+*V92HZ$l{@OkX5pp@WTY!1d%5htbd{BX0zL|E6e_&{dr zZ_m4sclyZBac*>LBg3yFr>;%iT47)3o%^Cyz-#reX6_x+TtU#=)#AxOB_eD13Z^1P zDJ!OsG^7VyViSugd$gXs|3mwuJ7+Ou^~vh(;l>j&Romu1|Kb=6jLRspSB}=1 z`gG8im8!Q|>~X%Abts|6XId=0*<3NsZ_Mutnn<1j{LB|iu`s}b5xd#or`@+wwymi{ zG-jvp8pL!sz6Qxe|G3-nLR+vD+?U?eXxB$!p1)P%&r;|9<&)k>ENM$AJcfwO{D=pFa42qWM z50PlfAbk{@n0h?-j|gsq#`PAF6emG?%~Ox@A^6?I(~bx4F|sjUVt0J}#VE>+yiArQ zE7deV-9)zGFCoGPME%41_WGU;X)(-EB2gMxIt;<2U&-(b5<5nt$Ea4ag2p&2#MEs@gN(SXJuTeXFB4}kbb@GvA=*milL>ticFY!{ zT5(vs>$u<#2M@uV*ld|61lt8c=RNMH+A7TWg>zqjEqNxV zSAiS{bZLW>bR{SWi8J^OgM@l|w1hWwR)SU*Ig$RP8nK|`^Ft%+QHKwX*Eao*TVbap z)%XRi%ySyLPKLEymTZw9I?n8fT{Xi=%vB=7pB+ri_Y8%JA`7|`wH?1HZO^U-?whm7 zHud~2k>)%5Q9IW%SGBF%b02v_r&N@BVUzE7O=-|ieBj5^{NwS=hldWqr@IGlyk}ZW zxG?BkvyffBbBOJag<@UMsUY!%Q!cx8+R3Alb7DMYdbtBbT2a@i^gVjcK7k^*MT@Z< z^H{DiD|>;t!`*o+ECkiLBSRmukMotEFmV+@+$D#74|~8`&p99kIO&- zm|~w4SpS~d%eemXvPyVF<^u%rXZ;(EvIUT(4iYl|1RlQu_KKWkbyzr`B`JUs#TcU< zZ`j5cZ^eG8cE>nRygbhY{<8wjv5bB0m8Z-X#UMwCR*V_ZByhFWZN72a5!U^9jx*)r z)Ih`e(cEj%J53(BQ+F~JB9jRj=l+B{BCQevR{JTr#XJ#bwo4FiW1`I!nVSXMMlrDp zg4{cz6juS>eKGw}K}C5I%t$m{vwtS&Sh%EinVcsNP1LtX#xHQjaGO*jbWQUq1`0$` z_TP^cVn^AqqAj8=jImcR2u4D|KaLbT%Hb=<9*uW1!blqDdw#BPJ3Ca(ont;)gv&GE zem|4>%|V1!QHr6+E<-A=2y-us#YWWnn&DC8sHp8V>QPDiYhy>5Z3~cHhQ~7Z$y4(U z-?92i^AnCUJM`km3%=}XI$nusFl3O9D|2_Yt;DouvA7fyDrVXi@f@QMalM8l>66GA z(TB#(QG4wW?_Pz&M|#+T^sZVYl(*S7B6?X~Zc9^PFQ48^#q(0s(iKgH7yhKFQC1?D z<;VkHrY`P2*Ao#e9K^#nE|?0L50!j-M4j!<)t(@Wn+N{6zsuUBv`wu5vA%~t%UJL6 z$D%B(hAqhQ!YSr_-wl=S1Yd`H3sR$@!GZIgH?V5VS&*B{KF7i`agX z!{pThBzBbBS0r{10|R?#ry@p>Fjkm(6X$$|aJBfvgB=yKcCvR5-EVz2K1?N7#1!~V7A~a!!H08RtVXLgz__`cV+Xwbj zCe|>YNv6ksZl6k-H^Q9=6xmqZAYN$E6hSJ%8e1g>?yo;>Z>rLr_m$-itH-wR*MGxI ztcGh#ElnP<=zrk^V;14`g9JsYBkpQroUZ$$Zo(B~Ubfwo#@jO4iBa4=f1>DO)T~&^ z9(_I|VTnuu-o(b&T&bQj8jfkKE{Vtz8AHLOnH$+UFfoH{5BY@z6_ZQ|u$L4xRt=!+6H5w%^+6$`*K z1`>o69&zoB8qlCBVev#4qCYp?u3lk0If2z6!=y0LQ=I({HXvG*Z(NE_HF?DwIgU(- zI`$$VETT!8r-;vE3wL3yv3rf#q8^;b-r=SmfB%9x&zdZ;xxeUGlE|mWKUUln_y=Cr z?#PvIV~dztdwL_EFW0P1b$nZ6{vX+?;HhB8hsHQwms|b*ZbYfMJ;YOOGG0Gk*c`99 z6|{kr7>*R&HN}~OChLwGHuT-)yzO$OlC!1} zYc7>hP){;jqNfD?H*$!r%_x#>VveJv?$Fs$x=_fAole1|&5&sBy(BlHHQzNGlYQ5`zMA(I z9Xx&fUfKAkV@Zb}V|F>)NRqBmZ7S`uS)-)vByFxaA7wsP;Q7Y9M(QN<2{$&Y2iKk+ z{Ri_b>#^N7fzD&5p35}R7USl_LL9y+ZS{(8#rk`n4k<_|UoSkuU4I&J$`@+G1%%n{ z5}#TX`H|uK>D%H^!#j7msHkTapN}>z_SDcjZTHP5dyYT*s7-9Z;51euJ#s3Y_ha|d z<(`dwe=O{kfJV4-J6B2+@-mx6v7qiQ$*Lnj^u_goey~-C<^`fJ@DNxHpZquDQs_-_ z?!})xQQ0vPt;k?`WUWS>O`S3nO-aJ-C#&jk7jY}zbewMqO2D$x^W*}W=C+MlQhnrd|K${D!?|zZ>>&#q2xf zuJm-oEGbZ|$YehBv2Y^vnZ5a8e+6lJDj=mm8eLA-H*f<;oe0N`h1QkKpxns-I z^=_H%5&p`rc!g0%@WgHD+Z~RE_pg(tR9~rDh0uEik&=z7`EqY!`n9Q-M7-`5-XSQx8hK?hQ-!g5m;lEzb}ON|gAkN1iat6AfbPp4 z^*A#SR{I!|F3Qx5Q>E`PvNZ5ul`swW@#{`PNZJiPZw{${>c?0egAr&U+d|I)jA#!Yx>O`A~G@%agXF- z7f9mNts)CZ%8GWu>I8FDK}Ep5R1ls9^S&I)G*j^TvmV*LCbbC&g4l>qKf;VJ4zaJS z(hm>&5a>HWRr@A%Wu*TOEq?sg9Hga#Y7;slBN4-#x}dtEMlnL4Yc|Vf)sMC8_gsz&j}#A~Gg`80r3d^VGR} zH=Pq@3SpB-EAzjD0WgI_boAJX0ysokIAR8>0|gKAY5I1#zTNBTBIO4MDP6%Q1PD`C6t!l*a3q`-djJF+$N zO`6_qKK1LzA{^FOns%`f*EWGZRIJB??$V?f$bB2Yh5uZPkOiU4832#?FM?$c!`-V! zRMIx*hjSI5ZSYtQ13Sr$mz$=y+_Sv;V0dWmYmbhG zXspEAW||Ra(bo4Q!T>YX9B3<8{U<&6C8NMOINk&yb^=0Up zs-C!dpbyHr*HHFiwpPs2k>W4jr^O~P3x!|e<2 zoUVKxpLeKhk1%@~w{2H@*hm!9#nTy0W|d;8I!a0V&K>a1Fk=yaHBe`wFp`Uo^$EVC z0y-5R>+O~4disx8=1f9iX$hoc2TltqUg)vVS!glYe9a4oA)q!BE)!1SpN8JpVT(!j zPE8^s*JN|&%L~pfy3$5$XJe3lckllhIG7LRk;3mtSM|z}tpG-hJsNUtF>GOQrmHGB z>k@&(hE3%JR)=0;P65GGjV`!i@0H8Rk4A=NDve@;Ver{4lY{4gKH&u&D7lrR}SY$bL^yS5;L<-WaJ~LysLJ{g?$7Hn`}M@h6^J$Uz51l z@;mYC=cmL16>cYub{H)qmLwbC%CebiYq0%T2~%k`e2|vVZIb%+$roEO8XRoLr8 z96@=n%kE&Qt>-}owE)Ia>ug?98Xj3W;ws`aYhEnWhF{z;)7ESrb#AJk3L#k9Te4sFTfCC z8W2G1+$wuXBVcGOV4wY5&WXHzGj0wJpt{Nab0j@yM}1BwOQv`N`yTL$;0M(MoUb<0 zWA_S9BljT`{#%7fPy*da2K+r!Eu#NJDCux)-_fjVwG;oxYv=x@!*;+VxcEJ#ZzL3y zpQkEMFYOx6-^c8{BmQ#T70*cDTxRaIjCJl$F}-yv{Z|mfScWkU0)ydp#&8AT0-b|8u4s?&XyXxI87!Vs96F0pcG^e`!y z)4;%Gq|CTT=?1n<`fF?7E>ocZ8;f9@_*XE|C?~4Snw5ii8dJE-Pp0awd7|UPF6MBTB+cdcw()6di8&gW4od49**O2Ui*0x{4;0`D zn^U;w=S(C8!+>+V+|I3t2@8W2Q=__i5_h9Bqn71NRKV{#3QPAg37miG6at!;U%Svi zAhHVMcO3#X9EOi%j&%c#R13I%cDeV)By7~OuVi&a6-1OhX;?i7;f)r@UrrZLqF;DX znmV+K#>T|9vp*+_V;Un3s;-UJ-&J{0F+V+f8Ih+nC4Na(GCs!4nr%e9eL9M&QEmzu4tyb)1VivnWvs%$ecGCU|jhEMipj#PB80Dgcg<}HJwU5h=xN{1T+SdaGI@`zNT^PT7GFP?dH056H9+I{mq~6#@^wg}`ii35Z|=w~{50mJvH_D)Q%xWPwwjirJ^A z3s~%4w`8uqGsYbJb`@VZtwXV@(eIA%Y_Eq3;g6q?=N`xGH+0k&89DVxL}ZZBY6qe< zdYLvsJ%$JJT20j&u9Bkcx`-$`E0SyAdD0wRajY7&mP+e``d{FU` z%FsO(AI?Af9LJKB0TH`=EodC?my$ch|MRQ=iM{w`$;qykY#avlPtDJY$ostZ9wbUh zp9#eb?#^g8#Z-aPjQEc%f`|G*{0g*YM|}Kz`A^Qc>3(eA&GJYQH`@m`_Q*LaWv1SQ zuX180A?85&6P+Q6*UZ~$H|gE6KO_|To@mJeqr;EYUs{|DE{F7VQ@+9j~3Y}yy;4v zE*+(DkqWbmVRqlYajX13pUt*7WUb)2BQHJodRFYioj*Yu-yb~XR8`P}amH*hY!B$^ zhR;e{_YQ$G3@#CBFaFwpEm@T%J~aCdqSNSm-=B8I8*BVY#nJA@E(DM6P>Zg3F+R}?&S@!B(s2zzU^*vN;)0y z)Z2ddT}Byq$xG%46Tb3nX*ovjDX*Y5&uO=)6?#q$5&6zD)SFtWhKO|(M0%>H#SC{0 zopTGjZq`V}jK+K&4b{*sswix>UO8InKDDdq&7+rcA6k*!Q*ElB{9hjmrm)fX4iq34 zUl}k36v->|-=6F`^Pu>x8>Oz9Q(L7)6-=;~1z$qb$0-i_SGa z8Hi6zR7A(J7e@s(cdw~TmSA_Kq#u~pcq6p_{Vb92me4s>H3=tLR`zkfwL^(O`yRr8 zsgm0s*0bVgjwB-fbbku~&d+}>j6XZLc0y&eRoA-l?1P6l8mUpO?A+7UPQQl1dHW?w z@K`d-_G3r2gZ4R(c=WbJNOJZ3l+m}nP;t9S`=NbJ+3gcfal3w8Lfb)fZ}9V&&}N*S z0w5Th3e)fZN8i!O_d{p1ObZi_KK5Fa0ye-=V;065@c(`&`sp3Y+!He}JlFWZ8?q>G z+-cY0Z}(;&cyX5@0H-kH?}hsh^PKHeWwmzjI}i3!T5MOp9SE@l-7?Td~K-j_En)3Q23 zBIdMT3^SUzsBFqr!go(NmZAIB@7lL^XFuyY);$ki^yzDPiz#oZ$yz-Y)+Nu6g?0o*1ObE?ReD z1C4TM4qJ$Y2|6-L$2Y~e4R%6(j5mpyjhT@ch!Mil8N65M=028c8i5D}-ReCpbJuxH zD7lr5j)hIP<~eezh!;VQ$uzw)4aABf>@q_j;5=cX7}%X@%u6u-z8xR?tD)&W&TIbI zc-uyudr6dE0~@zx6|{=gS|R1<5N+l?%g!7mFAP>g zFi)`O&yDrN>2dzp$G7{3>-I^-wW_r{D!w~6s$)Gmm_TJsHRS1+DKqPNbug40GN#R# zs)L;;70Q$>`KA?8o6-bZyAv|>x9K~IC}lA_vM=JlWJc`JDEbB*Vvh0{O(wr?$WW9c zc8ZIf8B=VZP;S9zYf#F!q1z@3yv`ikJooHWeVKztFtB=kR5Y{#tw8wX?RQ) z(GXcDy^kjucKodGRC-A}TUz@B%O?~~?{_hqY>trKhhF-^Fv6*iV_8LEO}=Jcmxsev z0#?r-W;0E97Fo+G45Btv-5^d@Fg9)f3SC#T7avl9@%xo!jS`^H?E%nLv9H^Dw7?M$`kDW{Fj&y0cBs%mEwtIEsZSNi#f+2o%_+vy#Y3#aFw#RVS zT9THujKnPm-eEVZJ^J`I&pUA!n#xZc^B(e7&Xx^F)T7 zl09gYbopi=vD%7#wey;(*6z@G0}J(CevL4Trv5X#C-(s<#9NuQRK{exK0h zrI#KCl7h>`LII?1pWEQz+0supcVDHud6v%`1_mk`?uICaBn|*1L5Vza$8w>iZ^T?S z?uu|o;gF3K#ulSydm%vlTqS+B81=A_mEj)dG1h=%wjx(8B1t!njS){fl^aG`@QeiU zj`^DAr02dJo<4B8LUzEK%}I=pXT86TWv<$jXir(OLz9Yo2d7l+h&bXl_8Elkr1#^a z2bv=ed1Z+2I3GA{^asHmBi3N{V`FbejznoR3SR5*qFRT8jmr*6^g9Wo2`Y>Z9Zr;3 zaLA5BW=HH1+sWZj3mns;l|&py*GNzcxPpuJ_!uq9rzNH-gYPZY4;@1ld?qi`{%Ao! zBLGpMS%O4o>&w+&`bz;-J4-lviylg9@Q(~}zv1?CCE^Ib{@a$o;3KV5`XU;MolocV zRN>1rWeXs(Y#Q4eoYSX$Dbnvf38ROheL3}r$@7ke9{`)UQOHvotT%r6Sljm(DZ{h6 zIwK{?65oL-rJDm8*YZ9w&e&~zL~ND0cTKJ}|7SCSL7~p|ZjEtrjdv#D!5{776aGve z##tYMmGtL~I&Z3R09r z$8M~}Db~-J?N4|27l%XBl+Af(&txZf*nakpuIYz<41Z<5)l0`|s=?EGQ;`cE62=KY z??wEYNKnqTq#MwCQ|g5^#(G)KAFEmFJK*O4%nvU-Q6WX-W+IBxVI&25IW+d9UG<_F zIV4Mvy0h=xqclNm8*;fg+x3^Wl?YL{OqOsICoi9G=85bLVC^Hh8Gpx~xyJB$L38tB z{r2Buy4D`4Hl?3eNP)&#rO=J2kR;QYNeVdN$~uK}KKPv5Qit+&-^ES<9ktNe)nSPv zoY(@M_{cR`ji;148L9_kb?I-hZ2?NmHsfu{XUXc+1pCFrf8n*x5sK0M0tp*Zb4*VE zf&N?!<>wW_cQS|~VMO!5C%#-aG9y6h_O(?I^Q%OC*}=a2C!Q-Ol5&~m*UX`=Vtv!e ze`+nkCM6^^K+I0aWgTq-d<)d}Kea-E9~JM_O6}OO(e;E#PP>}46TtbA?pa)9WsxTuXs>g0?wa(OCudtoS+ zo2^+l`r86zNm$Ay6iB9V1OXe?&zpSnaG&i#hw#nwc%o8c;h_nJyv1Ryt9wM;B%NEKFXSCk_BlI2O8?8)qI<>|y%$2eBVF0lrc9+Rw@z8bad!xscp4p5N2I8(|T>oM(gWGEI zVG8p1?92aTla;Gzr&t4y&?=mIiMd`pWSJ~XH*HZgRo&R^godfF4bR&kwX0ImadmJD z;8gs^4Y|u5EG{3yot$&U2twcP^-8GIxYFw_8@xc-^b;z!MCS}=CTB*I%WGt2=1BtW zs^O9?9JWP|E-I03O#mBXI8!WQ7hie0zfkg@@j0SRf3yM182#8wtf^oxM%l1twECv{ zo8CPrxKx*`@RYm2=*&Z|l3VqPJM;B}Kb=mJc|@xF={|7Oq&b7i=Df2>sN1`XpS#2~F<$QvCCi1(&auV7^CsE})<{ zF0A*w2o2*Dtzl!5L2TiOa1n~tj~eMU&;BA!%#s0AIa$J)KvIJ#G}#*uG9#^D->93+ z!ZxDdkFb!-9)m&UzPeWa8=lJ4rT_2=)S~ADE-qc&{RC%y^W%|M3jiUP))R#2iryO% zMcY-2q-e^G>GJ?7cQn}sk=m$vJM@OqX+94YeIV2(^@zmr)))P$v!;DmdLnj1iyEuq z@bGeBy`if`pq1Qu)G8`jP2pIjvFF>?uyZ1}(WZnO%d$PH0RjI3FV*Ph0(iKrj*IfM zJ%=VBIS25ecbRAOnLq5rx(zr49wkm6Mns&;4{gjzY&-cRS$#-Do@*1t64o97aL{Ko zZzo>MU0?iXs07Mh8&4AbB_NgbD@o>FbR%>Y>H(s>1-w`C1(k9OqmFuN{%4>5%h+@~ zwxDXg<^Cgb`?Q%a+L~t3v0h1uClrY#z~3w1>mPqFwyy8ck}>sTBxdIpoCFY5{Y)wP z-0}^6IVxrgmtSVzM0Q}ObuI{>JX2J_zvw>G1o*cm3gLRA$O1_qTEo!SjA}tGb627A zA=a4y8~72M0o?T-yv^wdFku$~zjEP<(4`K;%u2nWZi-J+b zoLS)j&N}jTID^%@KSNh_)A^%lb-O6tqcHu;Y`i^n?$PzVZXX;AytuJ^GH6w05YNgv z1P1&)=_LR8vx;ueN#7Iw5l{GSQVB^MaLPxl7Z9*jrdhJt|s2Vy&j2%4= zZ*>z)z7wo3Q;bgLr$9c{V7O+-dZGP~SM~JW190QW*gP@JTX_viRSMq;qHG4(kSfK9 zT&M+ITtaK}XqeVF>6pEDkl{|OnTL0W@xjIh?Qyd5-KxnfX*M|-1?=fMXp{^IR62NA zTI`JHIVi#=N_aP+!3v5jqLr4rwLb@?iyys^^4eIP>S>h=9%{P4%|Xv;WsD*d7Sc^C zhaQMSIH+-0zzx7PpZ;sj_{;Dw0;`(QBf@!5c$!9$;j_X*M@%@0>E2*B9Nrc$b}Czj zKv%@fA!Fj8FdAQQZcE*g7}yVSndUB=>8>N{?8~n$w7q}kn%bOzS!l1b&X#1p#(swx zanohZoCn&40`1XKwiXnY{d5P}k{yWQj^({gNnv=rZW4!>|WJ-I`A=&r~B`W^o6FUPV9=; zF?o0wvTEBW=%b*tkHdkwB@9Cib1euTE~k*-!v zW`@w`pj!%qgXCLXEDhzFCYx z@W{WWNCpY?g4lQ|-gKX@FH8sPDo~GovR{idSEyvE}5NkL~InVJ`n zKXU04CUBZ&DLZ5Cz#Eg>nGeZ z&*4u4G64mi;tZ6LAb7NF@XUkHUj$?KBtCP|Z_oFI2{oaAR_fY$LEqwVK1+W+R$%KYNItv7;|>+AK53P^(s`dr|Ir6|MMsWe;$A?SFJHb zj6d1s;y0LUu9C+#)w1e~k^|K9v0zym zh9M#isgj>QvUUlAE6WEZ)n{e{*U)Nd&q~Z?BT=(6zc2J)mY-hvcwkm?<@)8$TDiF} za#F~*3>WD(p>c|@xl5-~IA1&2qwNyeuWf(N_8$7UGiS^9@UJ9)+|61Q;9l-GbW=y@KeBX-gdoN z#l|nih#yFdHd%cE_kTk-Ra&lF2*#J}JZwaL@;RQpg%eE=+q=u23qV z>XD_|Qs~YRb^oaJYr};(OaZ*5p21LABKe3`&T&KRges-@jyW6-< z@OexCCJ}i-XqtH`U*CxpX9XKUXx?e{(jzC}n3$>*herY&6%kXVRN9x%XrB(oNhXJ- zM>LZW$^q3icM*?kRchV#klB+_^UW0Fd&78duOxWXLl(YhusHF`i^`4&-oCBbONXVD zqZ|xoIsy}d7qN@^!a>kPctl>%$0+q~C(?~frv~Z?uj8jOF)qY*iBkeJ?psn)cf%)EO5lK1_i zf&A@vNU7#7y83%$i(CUjz8u6^nJ{XsD(3B5+n;MGc=u*0_nVY=l7znJDCkcXncomC z-X8BrIBI*&I)Wi}?NRG6<`k7ug0xMj^AG(}A$RsFXqo0uvNUl7NETi*tYx0D{s;OsYZGg@iG~7`9aX8=khav_c^3PZ`vnKXnpngq{EyP^IzzNAW$j9l zOIkWXyX8E3FiW=YASFD>#-27osKq&By{ZXa@Q<-xv|%xMikU>Ym>BPr(|<&|2n+WRZ~{X6mBZLfAWamPp0nZA&=qlt`XduE@` zj3=KRv3t6Yf1u;>#V*QqvdYPRhoaZ%y`<;6Bwvu;?DBd+vNEZ^N+nmGqWvPgIZoT> zf+dP%%0OXf9y=V`m@o4~aJ65tUr>gUh zkF*K+97HF$q9mgqzn%iy;D>GSmpfN}JPn2-)gqn{#>3`0^F57Mpt<8-UihHfx42&h zNxtSu{=Mn-{Gvi4)@iq20Z^C+l9QoLN1z08YI<4O9>kYB*`99g#M_JTnyz`8_P3_}k9$OQuuHHSS_rEsWR`-fs=}0XHi%DDnch zK6|UmLc_idjtt*_GjQ^sFL~9*o7alJowi%L{C+I=)r-r!R#J}*4LlvFe7f1|Z2t-` zuF`g}5^qYDXEn&1 zD?T~fIekK1=SPRw`|WHV}RpjH@CL#L&VumLeM2+6HdmdOjeTI7awj;u>ST zJ=GR_=!KRf#SmFKOjP5R?5bgF=d5#iQTnr)l<_krz+9tU)P!I^7P<`NE)U8@ND+Ii zmOuOP_KJTzVErutA*-M(w)LXrH@rF!VrhMq>zegjbIIiD{$FCdWTs8Ar;u*%U55lH zeF3iC6~|0JyzLzb^IEQAH<2!_72=&nLRW%<=+Q+83vS5)yGT%y7~(YJ^0w!=${8N(-?9EJw6D9cdRPJ&LseUQcGyPOm zsQC}j6x^)#&2(9v1?uHL;FkXIeJGP1zH>@wSpMs;YmfiTXoXw7+!^D4qCItqWc4!D z#G&@l-P8S3VurWRS+%FyMf9)cQ%+rPTYk-R!j%&GsWOhou5e*^qw?J_jo-?May|Jn zT=_-S?j)Wm{;BQ&qV!qZ+v11E@v5vZLt>ZqhMDI_cv;<2nBnd#Eb3TC?BW7y4bLc= z)HXeGcXB9AdjfdKlrQ!%8=tt9hD*01B#7L@7f3N-|&hOvX1?wLkQar@36{ ze3Y_hsw3Es!(L!r0Z^~0%O-Ymxv6WJUT@xALK!e&7a;I|lX@2bCXitJcPF*@2}cTr z0$)hpa~=N!b=7pXSLw449_BcoE&Zvtxv0mz-t{(|!-=am7AXbFo0jQL(GDmU2`%SK z5R?cgioW%tVCk1FQE?~#>4F=RO#{T;X(AUJGU)l~X-!Lws49uJPO*?$sxc69hgaote0rPkv&18-)8)xOrR z4J|Egv{V>W*FXhd+4OZbsA+1>Oh6D{yjvSf!J=uw-xt|aB5fTdH$hsVi|Qs-0hkjo zeg8p;`3;9lC(!t;58|;~DE#IR*5)3RDCL9#`mL(wSJ7Ytls8JV)e`*8XifwNZZ8UH zHI`&#CrNN5?SiPQ!p$;lhWJXs(A20SA>LH4Sl-#fTcfl!)&hr%mzFA+y;r^aYh7*z z$!fUy$41-!d)Hj<2#P75_e3ISMfp#9fG3jIZcDo@wu?`)igeHTzFglWMK!*B+6v** z0HhWcYj?YMG2D--CLmE_UFe4h>rVUX>#E*#4Q0c?k?cH5}zO>nub5_ zj_yV)MoGag4Z?cip}dMrV!r@lxPg)sz` z{`FeWuak22lw*c`@{*f0$7*qGUT&qe#7!HcSmr3EMnY_SNZzN%YoWIiux|M&Oa$I_ zMJU>MMA0prg;292!1jxFtl|vqFk4)fct1{*E5=T&U&erjDrYS2*ZgujrqAh$(Df7` zYQ^hE;RB%~$<=pu%c

9STcu7oA0dDt=4@?%LfX>=IqAyE|;jY7NdpAJ7-`*lIUn z;_Y%8D$oDFPlenJnbzkGNA2@#!&Q8 zhQ)^N?_d~ydeLPQGSPD(kb*5Lo(A8z^g_CH!jE7+1{_{9w1lAR(IQnc3cJ)j(DGQ+ z)wh@^=KHvEEEL}O z4Gc7zxW3$C2xkld%@oL$|GvDGx7w^XSU_zh678^Y=}WkdYqv?_s4i zZ@q($4$T7Mkw?%wEdAHi(=s-G4{zRu%ujstIJj-0Ez844hn`H7aVJB42x;v!RNRYE z-YnGh13tn;SuYgl1p2Etx~(&*VD#dJ{w~FBoYgUhfTMqZBj0ijFr2)Ajoj%s!RL{! zxPNsb-Gm01#E1Vpae2G&ze~Ski`o>r(7Gs?c?mKc8jxFShYkXiVEgZnB3WSwFQBM> z7hNp^AwL%q6d74)cHqE%25C?W6d6E?dmUvcvl(4a;QRsE5weWqTl-0|>Oqss>!(7d zx4I9S=Fh;YfmqH#5TtYaKdZL&Q3RuIpl4^G>|XfN z`9`LDKwGW(KM(u&{E1G%CAU0V{P>s=@jX25YltfCeBehbJ=_WalM(T;AEHuOZ6IVnM1^$m^l0Njyd{;Vp2uD?!k7%U)~q!Io5VZ*56)2gnOv?FJTZcAX;_!VKp(9j_K?oyh(10y!Q9t!6Wkh{SK5QA@^c^ zE@D%=xU7kF8bXpH^=b72M2V|q2Nihx?pcB-jJB_$$7JDzB((-0Glx2H`kj*fDF7E? zm7kys4lmbv)D%%p8R1@#mAh{yze4B`A=IJNo~SQ0&amL+RU7ooSAo@>`uZMp&t9XX zq+c?`43vAhAA)oAQ!Qnw{%h~J{;mV1amxyZbb+h6CHHhBx)KUA?nWwBlHxi3*6^{Q z$3)QBf?1#vcMmNe9@6}rQFTcBd*z8Vyytcg1>1=AWxojvS{gdAls_*HdDOynfCgw( z`O6j8k^co_xm$K6Yz$bQ-G&W79K2Bic6JHqfc>{r9G?MwRtDg*a4(QpBLRme;I*9; zdiDoVu_u&B3sB9IrP7FXX@P37u{fShOR-1v24momN&??pa(oC7GGMEol2nw`>rgi! zge%wtIQO9Gn*)Pp#8Kfoz~rU@vNeNv(8JCpS@lN**ss-)BmW)e`HGDlE`Wtx;-~wZ&wtO==W#p&?io=FR@D!?*eI2~KD@c1jvAh- zg*mS7%t6U)`Cc(*VT74kOogCqKE8=k&`VQ$&XmWt>7t5Pg275asD@>gccWiIN%Xzt z2SNsSufv#*a)|iUiK0JOV8PXNZ)G4&su0R3R!OX*xK>Y;-?+~74Y%tj?iNWLZCcKO z>sqDu5~iuzMd;kVa31yRx*mhDvO0Tu4arQg8YSuDXh3kN6=}p+g=lq0gJF;|^%KQg zD7NB~Jz~mxA_6~n|Fb6j@4ZxFM@>*AAFX^4nt=LbDmt%PXpF|5%PzoZ5{S&0%4*K% zX#A4=lz720Em;k)^%aTFN4+CwQ%7(4p?2S|x>zZTmLk%aCU{tE0^pQv&yxVe6<^N{ z{qcMWVn6rZSc#jnvD_eEw)A-TGAov`mzS9jHNxW+pz*TaypEi0Xs|d_0gP!0w(TFq z{~uj%9uIZfzm1o5Y?+A|TNovZ8j_IRU{qwZA=!yi*0Lo#gRzw&S!%3pDpV@j4VOZe zA{9lERISAIMutQUbf7uJg|ZU=58m zfa?DoBg!gIpT3Z#H%io;7^^(l99h@ z9bQv2;*pS1JHa8^cK!PmSgx11y%>*o0WVBZ#&th#3;o&;@7{Q-V|>+}-Ue{2_YKzz#erO&%oVSWb3K_j4k44p98}}&hFd7Buu#I3y zmBF6@Qpivt#}~+c8pb9nyLF9k`3miU(aI^+!6xnAIq*sC0twQk8azXt93Hb{!S zGVRLRBhMW_!0E5Lee1=(g+9rbzvlX(+PbT}OTf%je8Lj>nMVLZ^*^91mqB4#C%KY&J|?vEOUUeOIP+89}gM!`v0*Wq-X zFnUeoz}Y$S5)`DYQ(4X;$re+&;a}Em(=G5Go}yTU@S^k}yENOEDDPM5(nr!rx#>;` z&5~{W(y_~?d)^*7$+&1@*Ce7ZAU`xuyQBeHXVJN%D5$GM_)=b9y&kI6bpG5Ha2iA! zJn{{_wDqIcAk!`sBELv!x(NP(@JYW8k7gznA7%z0h^6W72_QB|h~s|f6gE7LlMdge z^+7;MKS+_jnf`?RgZ;y5(2`908z_A7#3-)k2qKseZOabPMyM(wx3^lv^L$f!p7&t6 zZ#Cs^_YiZ$Tb zu7($i8)H~WlTltq!NAb&lD*+^xskP!=YYQMmlv?Eo@jLPf_e-wXg43ommccBDitL4 zH9BnL+`gm(l81Bit%B}Zlj5e&KF%O#?p&xS)ET1r4nOhvl1&+bD^7+`t)qB~}g9Mu&yZ2{C+l z|A5mGfPgFWJ5!|!)@Kgc*T`}cek77?9V^F_HkR>qtp6!S@X53Cs|{V5%2jxx06CD) zd!2#m2mkbIw+qdD_sD}f`{n3dzrIeV|D$n)(~EUQR$eDTb+&3D01l>mr_g>X^AQ)#C8*cDXdMXFCWzqW01w# z^vyDV_u+%gW5_X<2$HZTO2V#YYVyXi!`IY?vLLf*C0VAg=>34vD5^iA1)=*`JD@K2 zd3d|<$4-b;9gtC0q&@GT7qStWA;tB4d!e8K4anOiErNORdJC~k>4uF@oOCF>+AD49 z8w1Vj)eqlgslBNpG-q^h)QnV1TtUJXC5^cvouIDqm+TYkD6(w{Au@ez)UDELh6a96 z>lr`)StJ5XMCbks(8XmG+iNc4cr1S0?e5XUV)y7TC?)B=++^|a{I-QOQsyIUz>kTh zd52V?kWvVZ1+dLuOsEg?$xC8)$Za0z8q%jJF2p)D^pXxr7qE3$k{&h8Ctwu5x2y4B zMFTl{IdHU)KU$5c~=XCi%Fi+EV849%uqr_by zj7(02JT22mY-}U7=lwX1g5teO?cy_ICtet|C*0flSzB|=J3e=IvVDeeP2BUwg zfX%iktO7BU)3*HvHjxJ0l8_4#p(K(J%kO~^PfF*U2kZ-JnF7_O6OCYbWI@kahP!^h8zC? z$1nHh6Rkp<)``ZB>Fr>D`|>LEE+wyKLq_?(yI0S#^G?I%N5c89rw(e!3&~K|V-)g< z)bDI;3Z%=&7=9%Q%a1}nHi0Zxc9-nv099GExHvP@!UGhV@;>Y5O%mDd6=;x~lrFUt zi)o7i90WI$=BCQ*V;r<9)*=LaFDkfZy**e43g`!o6zju9=0lZnCELGbVU4jysDF>E z`q1Njtp0*W^7?QUJ8x5}e7^jcNhkv9)+K3xIMyUbV0O!$XA(EtBz=ELD8bBc4z zU}OR%xXcR!34MgNyE@D3wSLW2>Hc_xpf$-zLpyx$sPS*srbH_D!cQ2ywvE&d;0)$z z!DTfpJO|TCKZE+Y<@~-uZJ+V^4J>SoRU~$OHOtCJ5F(JWc2JBIqHs}`AFm~{jDyO> z^BhbXFIKDBv2i^)^-+p;RsIn+TJCq-ipUL8aSb#NS$u3u+1{knyGZ;NK701<42fTN zxKJuj`YWX?Ur5W^@b9#LR_X>1=v~kAXi7S* zba5>~N^yrVlgH~;GUvh5Z-3iwNbKVKP_{GU2;Zhr9n|A-A15gHtsTaeBL**imL6F4 zj)bab@2{%G-+?CxB7gc$yS{Nrod5*Qb!Y}k+>d|0Hd!80(Z$GnAGBW5Q514`oT}b- z#qS`J1px)f+n%kd*!0V9*PLmpJR0z*3OXrx3dKmem#9g```ZBr<$ai02tREmze)pU z^`awh2UeI{wIqEAF4>&KUdela zb=T9%wpjOh9_hH#(_C;S9rq{+u}XT?S4!{n=Y%%>?Xvp`h~2m+^&Aq|YO98Bi=Bnl zfA_6|mbj3i!Y0Q!Bw`4MAG-;B40A2yezvP6(5JIxC`Lbvl9UIZ6WOwm#1;=9_qggt z*skPLkxo1vpVb5=1|TB)`CqQ62_g*pzW(KSsh@lJom%ZfE%^w}ptcm;NK z%#jM2xDXT_4X7LIj3KcZbxXS&o~>l`o(LM0xrl^iiOm~%8Tne<6P%syg#u78=?$!N zy@$}=4pXT_GlBKtO3!5DGd-bY%=HT@{0u74^C+AWIsFG1o6=P2JQ@i#s!1slJm1Yvj|WPEh`C+*!=7 zu;K4&aeD2Zj&zb;cM3=Nkaw#`{rQ-69lX3tg66vtSC{ydXj9c-jaE95dugcOYbF0v ze3gNe47VKHx}?Y^smGoOCuID+mUzbah|IEZ55r$=GtZJmbDrmqOoEMMhu+WhkhG>{ zz~4$gH~dZ0weQDqP=*DpLIK>^E=Z-)y5#@f|(L|+fkrmk~Z{WcYTlQGvD zbB55G4o|>O_4T9q>{C_$*{rX1x+xF+_eS9i&o9$;fn`GSBMtW}02!H_RkK9=(U~UY z8muBPy0wdTeuEmp60j|0N7Nj<8~phZ2p~}x^tr14Me}o&1_Zj&M)!>_-Rh34+F}Ry7(v&!6k7E%}k9Pc& zjGc=ynYVHf11B{ zuf5J6?`boDreot0baGiHVM%e863Wnie<-AKvf`r7%?H*qfn2%L)8 zW!d9+UIjHZiA(ceB=!O*!odOnOR6e+p4V;zpWVHMvy7&7W}-LwRC}e6PWSy3-%y}C z`FE0Xt6)NADK^No%=fD&Ngeww!cv!eTN_WkmhE4BuF02Zf1Q#3JAc$!Sn2Gk()g7) z-eSIqJ+NLTGr`?ma*a{0eD&%^lcl)(Se_B6Gan^*HB4Ab2+Y%b6EI1~4xZN0HBEwc z+?DRTQ;ZSZaCbpe|IMJUh4*=X$shxpf&8`*sZkRYt3$SEjKldFDfc;Sjh0S@aYa-Xy_-LNFS(PR6L*r8-kmIObEE4FDO8=%kn9pIW{U?l`G-!ut|wUplVlJ_g6msd!p(R&dw~!$Q(O(C)qs7adH_p9M-x zCNh-ugK>Q!%x3l)Ur(U&av&tKlj#)@eA~gTNIQN>6943YlRrs*n)I0~@2{6=lX^Ss zX~sfMR;V4jA}Xe9*=oN4h}n<5-dHjY=;N!$ynq+YEDV7nbx7R1m)? ziXk!MLx>Q@@u2^* zgjY588TLK)gWAry>@=@w2YEN%d}`+hZ;t5>=~(g0nf)Slvq)dv)WJ`(v9B}^`0Sam^SPVq{wa|? zt?LYagYD*(#6Ki2ELZKv06?i<&5RR&BFZ4(ME2iZR%}FXs&kCm##!61VuZDDY`wyR ztT7?O!-7^#H_b?Dd)UUI6Dq`QK|`9AeWMS_Pxm*l(K(D<2v$Ltg?4;D5IS8jz!`5% z(mN=1Rx#$nZ_b`Dhiq%wv|@*nH2sfOrx5T$Hj$RFwROw(p%_6{zL6yu`xbq=@Ild7 zY>xm(@ynj=jf0+MzZyu3Qb_5BcBX zsY$&}FnLRtsaIO?Prtx(2@aHaoNoTQm~*^uT6sTx>gZYdezuw)Yy~0$A{}y$*gLox z&r2;tRc+PS+BpFTRU%*HGEg4-l|uwa4zY3fw9ZIr@TyF=A_AddIQAm&rYkgEEn}sX z%i}W~crW)WlCZPkZjV8X_ElT&wIfYcK!|Fu??JGpjt%X@oGi-n*`IW&_Ii@H7dUUI zR@E=(<&Hfv98fM4*)4vBj4QeI-nXmAdlnJV#=Sg14dQ_zLGiX6mb?R(>WVsfnqw6V zE`DA$d>)3i$S7C7=?pA&`^mB&Fqhbj#S z3y3M!4cb9Ju=C|~=gt8`UA>>5t#4ng*OJN>rf*7BBo(F9(Y%ispO*?sQB5LCzq|C& zQ<>j9FLp+HgemeiP2Lo;8KC4S^G5ZUjAlzI(U`Mm7Bt$(*Nw|J%tbPmOQJ!*QcFSy zJgwd_EKc>W+x3S=PChLx4{TNT57gAhf8VTGWhn(|4am_HQG54*K>_bKum2?CMCx}j zpmNRE2UCdKH;I#uS&ey;tTT}E-frEP53sK|Z?Cv;q}m7E+c0TSCskkfCsuHKlNez? z>0EaQd)78aNT)we0{)#|gogCe{TJm2_ngnXYtoUs^4+wWU&TASgFugH4(imw&Aj2I zQ>ewCt#5Fd=J(QVIVnaME{tGnL&l1}WIKeGzm-Wa*?z=SK~R#yycg+@ho!7jUcn`!0e(OO+$9jJ3*vWVkSb6zjD)wJPx* z2k9C~kf(#-qGAG`79$OrR~K|X3MB0X11wh@?YU08JVCh^Z9j67%3m0^bxkx$4p8?y z=+!#5%cZT<4~BPhD9HXvtM9sdv~L|c;iFoBIMJHG9&o0M)=3q2WM_m{t^l9?uYwoR zv%WdO`zf2jP7dd)18gmNoD$vLVPcM4A5yQ$(^V+F@nYSZcAj*`o z;COIOH}$0SRH%JvnDtloe-JWfjZFiSLxhgdCypESy`j#_jf>H_=J1aq+=_C#e{wl` z;7l5OjdkOmE0XFSUfL2LeRlr@c42qhx`{D3BL7iTe~L-R{4!}R%pedVnT#K2?14YF z1ce*LT$&M<%_ghTAGLkvJ26d2$iVclAmu09+ZQzN^HsFqfq}Toj4U7dMKm>BI#UDN zeYFPr${(jl9EKy1^B!+!ygOT`6jJRhrx%TxK^)Eu4EsbZ4J8&ABND?)RLz7{ojXo;7{D!s^%HRMmOBAcKmdG3S-B+&7>Dt ztxXT#8B|JV#*50mlx7GGX)v}%2%Kr6HpFLYo-aQd%CM?g5KP^0D`D&-jC@L|dH=gO ze(RN@|GSNoW97WEWa+WSqu|}}DUNgNBZO^8U_KlTGahke6-GoGR9|c%Q zO@xpxgEqJ<0jr;gp*834seAJh(b&@r^{%gQCkGG5-PgT&(_VArJj{z#;#S$lCy3w2Qi?_PORP^_6vx8?rts7GN_n?V_{vKc2!@@FGGwF&<9 z7=hGg$T>|Y1HaowW>2ubN&;qc6es--;^S{N{;x;AyV+KkZ9kA&i-Xzt+QP}{3ok+> zPVGTi-E5!Hf*QeRFjGxc-{sK4VN!k(I%HH{<*$1|cn1VpIz^YsbmWL3b~*}f)1Yrc zm0rw+2Fz)V>FzLIm)Z$pjVCnV?=eZYGRZjlI%rSi4+pL{^5SlrLL-Xw#Q)Y)uwdLH zS3rym&unIA6aO*hpuo_5*AvRn4l3odiRU(^-%f4yeyqD=z%;w8&8UJ<6X{ zi?>(k-UAAng?J|ENXo#>E`AFmA`%Q{;paoQ({ibQZe(kU0#Di zIgWAE3dYyrb>~=Xnd?Lop_4)2RwoWWF^GFX|J$lur5mJv)8gqCpG6` zKUG{xGG*h;72s<0e$PJ?=V#rT^h(VzO#X0+w>aCzhJ;MzmRRy8g6T~dGtj4hUU8fx ziQv?a7>hi5fbDd&A*vd#wcEHvdJM6B^NTOv79RCD$=U#+?n78|X5dRM2dgTjdT0siHcS>r>C z64;&k7rBTjhYv>@C5jM)sP}Hp9Fqkxj4gQiANT#Lpj@j)Wf ztSrLOZ_a$@EV(-7dIR*vyLfo-pZ57P2ON(l2@|>1xU&s^K^r#pe*RE3j=kZp??y6s-nIoPf{81-WIarNE>%s>&(SWw+`!UZR^=*1+a><#3T=0iX(*#&8+Xj2 z*~zPuf8j=P-gP8LN|Y_hS!#`e=4{#gkW6LgGTgLjEfNU*NHphU%~MLP`4m2pSx!zh zyFJ}tyi_A4k-C-2ik~!z-DEGv7|Ic}$v3W(GL2d%f0ORo2(;5o8UE91sxd(gX`Pmb z{McihxnrEbC zR_{j$JHgo9M;TP0H+te45sDmLh&(e0$%R#3b*l9B%>3kwORYk8dk~bH zKgU5egsvxg)rfrTsMCDB_eos#e!u>+;lZ(;O79ettXFs}ON*HnS`d?J;~xsVLrUAuy(8Z1B7cZ_l$BPIe-fr1;37ezVbS#8*TC#&*`n$>*{CHg0=gLdhXv9K<0C+lihl*Oo6d27n8#k$!lwaMpaJU zhTntBma0*kt6bjQKOZn9pcT=h&0$0P^is)8#=<;cv(w-8oZ@fpj&tC?fn!=sa9aYI zh0a1vgT866=!}&$#i&K+&Em7rIx5_O*>+0N!AXrt) z6iQuAQN4bO)d3v47xOoZl_-JgKp1ch6sy=bB{W(JTJ)A(-Ij$Or{7LNe$>PGF5^zy zHLtQ#Vy90?b4X)DV1TW@^~mc4#78hnu}MkZ`FpHH3Ufo6M^xq5``{{W)7Dx+@(n3hd)9$R}ryyKPl zA3(MIJ$c=af3&?2hFY3-g&2*9NJL5exBIYx;P*!=OyN9%2wxh>5FVAzf}G>oIn==R z7?PhMvYIIkj-|Hnz_pw*#LGY&`1bVC<8;I)=tDPIqHi8O3zsil6OIBxT!lzuC91H!rWIi$?6gJ_PMmB}y9rw#EN}`n!za(O)(DRoB zlJ{qVEu3%ePMF1v0{+FM#)=57F>lv#n^|D5tXha4W`0hA&Qe0Y4M;QHqcA7ZU|gUGZUTyak!jyZRycYeTi_XmfJ5cfG)u32sch1D1zA( zxe$A>@KleFy@ZIIozPa;A(E-G0<+da+k-kC4qdj^OJHs@`T0n%2Q}3VMAx?Sl-2qS zLpL)tgb|`J32A`!bt5h^FVFvZ@8!<`Pnp_XoG>)|v^^wJGy;iHg(z~wRd6q0F4q73 z+sgNB0Ly2JUkNQ>k8usCosZUNo#-Z!e?Ku-QHhTnU#&>D#~_ew;Y~VI=Ju z-RlrWjl@2hp4#v)y8x&{Hn{}lum8R(2rJI6S^fq=m%{W5CXtM^{^z_* zRR)3|<9?`5E+9YY=9D#de6u*T7C_PlpWTC=l@n`T(;&4t1(>fXI`=tyA5>1Z75=i-#`GtYF05t*zE1iJ`R__O{0?>oV5UWw&w2-XY& zKKB29!q+$I|GGi!JhbcOw8j7P7s!s+{higR)`Z%l$4~A-N9un#**PF^DN*v2jadBO zUFx&8;3AseRo^>bDF;*NA~f?x_mcnbUwIEP3V>p+l^_Qwzdu5Wz|KzY;i~vi zh^lv(2Wai1F}Iy_?=xsRx{#lTVVZnVma^7Fg8=jM86=_Iw)Vrj59}AuX9B)kM%9PY zjJ&7+=VPwzFOdsK8-v_L_K?y*tp6q`0ye`>=4O}<2RMEACurD4AwWs~iT(t;C55RC zLIV!4=$02Q6h^NjGWroz4(=U>AYqmRv4H0S5L1~LLyRDENo*$pp()z)&QhioG&=4O z15I-;PMW+lsm=hJx3$Nk#!gT{r$2%55xAjB{!>-Z)X?@W2fAiK=<=AId&#rm{Y<}_ z|FiQt44hy-ONsu08RltBcuH9xyqSguKh$cvkGh2Gs;Y$T*!-ku$Lek%bXcd4JZ2-=` zN;(?$}E(&S&n$ z1twHQmXwa}Vc}7DLlp&5_hFxI6H|HGJqQ?zJdLX(I;$pU?maTf+uWl2&YHREBB#Mc z`~slsg^)c9`|lkh9&$Y=v4~R1iMBFG01J`m5RfJRLTKzlfO{9`L$4Enart)q6{R2g zQ|WXATH=yX)f$3ts-cj(0vn8jdB<*{V?%Dzf4MO$%JC9{r3;Z&7`y=@J-CCe>h)qH z7qT{fFK+^|_U786aiXc5OmvOl;e)B?phlX8-S7Y26)f63sdD$Dgw%kv=wnTMa^v)! z+nvas1X*maZK2epFUaBnaX1ZrRv>ym2vR7X?{;K(9_HCfsQ5bv>Gmj7jUlymC|Nr( zK!aB18`q$l$$44%)U4-J=?r{y7On^%oB$r`C7C-e;AZiJSctxipC+J+mg#9Aka&C? z5#xWf1~tOC=<1mb3~huNOvBM$6S1uAxaBZ1PAGz6pCc*{YL=k%v&de+Bwh}p*hO?s zMS1il4MRaWgd9iUM}mFo8#qnVzbKHt?!on}g1TVLH25{(d^t!@P+5)AYI^}Z6Am6` zD7fVF&$52kuE7=f?IIA3srDS;0ux=tuFO1b4`<|LXM`4>j&C-#8bNSi($Q`kYbFX~3K6vA6 z68P{#l$O5;Cl}e#pCx$;+)5mxaLHF9=}gQUWw3jg1^|7pwy~cTTwpJnI}Wh#@=gl~ zCCc(^8F8J3V9E`3p=jtM4$Bnu+=|`YhouNXnnTJ;)v7t9cR~E_)V{ECTcp%`bPBy| zYHBScu)bne@(akNQx_|u%Izl9*}vWn--yg6P7*3W1Mp+#V+`6NtjsO4KCJsBiruhn zk#K0`-acFb&HfgwexQKj7Uq^PV!<+b^y8;GBhKE{)G>!Q%UsOP4^eF7&_S@BA>|c8^qD!>D<+&qW=5L?@Jvl z&P&>ljQVp}1+09)-ntOD%QB%zb6tdbv(P61R~137-?f@8H?GY08wC|acRbw=$A>kU zS@Nz!@;ucjoaXm)(7P1YVd`4o8k0=aaaR69Mk(yVor387s5Iwdi3-U6d)QJHpMETr zzUBd%r$qHhQ2ca|qk~~IS%@aPs}f4zP^C7z9rmT#TFEMficw@4BgRvQG#bXYx{?0h z9mRDIe<*s^k_5BkbS9NTI4D5!q4sE;DVlAFTa`lCEkrfcszi~@kDSkG-pmNYmiEatc}ZpY;hhWge#v7I%*WwNaw-)UD_wW?K>;+U?iB4iSd*ol)-SJ6EkJx5S9*}k^XGEkd(AKd8Op}lG6(W09* zf8&KKaTN6~y6S$Wgkd&BGzZ|g#5Uiica#KDnodwg+QK)T9k$;SV8~nEr zALpmACgy8k2IjLNQr3No-W;ujZo5*rX=!eY3gEA+YH&n5L5$RqA(;z-VKhp>jEsa( zLEo+(M~&r&t=4OL(v^iXFU3$GprXBNq#db1k`T#VWdGWi4o8RKEL6_V{%qyZX4^!kD9EiDAofsT z(!ub*;EK9^HQUb)RWAni?_sp{0;2#6Z4kcJ5PHM@Q?(Eye z@d!w>On`6g<&=ZM_Nn#XKnVG3J-%6qb|rbg`mbNeMjCtINq^87k}ZFGF2|IOh_#UsC4TuRS|4Ty;+%5x-G68Z zR|t$NeW0&gpgRJbg0O99H>st8UdVc>dIt3N3j8&$KeIqa#o6Xnq8`XTu8PRhE zuN0jHrjSsYaH{qMOe8POJ0S1O=9GUet@)xeG|h(&o9;Qf zQ)Lz42WxX%=XFwBH*;*_eTS|DS)QP>PRsymWd;>|V4r;?-;ZRM40~~85;uw>E>fUp z0u`+b=_+3qLS@ia4rN3UpcIVn(zw*kUa%KN1Z_dH3EgP{mw@d2=1m~H{ih)Z3#wh6 zNEE0IP0Bx1g)CX~VijJlnPUuIxM(Gy0Y~8GD1f5E+R5U^AJ94$0){nQzWd{i(AC8O z^#$}6?GfCJYS0~atEdnNT48Bd5A1yAEX=%<>a`ge_Fg?x`r0g|SI)J$1#-*r1yHw? z)=oh`)hcf%7azAiLv9nV7Z_e1U4X<^)~mk=iLMI{_%*|H*mGNg)w_xdB1V>>=}a-a zjuHtI>w+ZjUJin%eJI|H!ro%y2?&eug%Y-Bk|k{xWlz*np|RU2G&HY&K}sN_0`(l~ z4pry1;LAS)q1@q7h`1gqE4f0sWoO^#Kydxod#JqEUnQx@Tb;ixc4|;u{Zw1_na@EL z+9g-d4EK$40@LPJ^7mZ}zo88KZ`q{U(W7uP-)Qpp(BF|X&5Ai-KLx8kRDh~$rpbV1 zC_yQ>0cV^Kr}W`UfG)^-X_J~`2DL!G&ghE8x!0D-uX2uOz#r`kHWfHq0pL;*y1NBg zy^o^HAXa7B`BDukcNC1PZ7?+Ae4UDJOW1k4lAPRp(F&4I1u9`hqU}Gf!ZR&%kTmdh z?>-f-Pcf!-;wP1kIaM@;B`S%*C3rlvPxvaOIw#&Ha#A>kbrGIL4x09Yj*P#6b8~>y z2(_u%4>>^~S9a9WzpwE_@_Z;Qw2}QG!Wdgd1B!(sQn5~7p_xxd*V_6)`TMPM5)jNF zTT(U%CP(4!t=IAw|EwT0i7uc7U*78WABh z5moo!x)t+J*M=pWQp@p0VrV960Oo6*ZYWCTDFTRbvM+*XXK#Vx!x|Pvv7FY{KB#

AO6^Z6MV%C+y*hkG3nvjboEnPk;$LxZw z;X&HU-iUCv#5({)gkI8y%XKdVTni|Hdt#%pkwhWF^NFneXY}Eg%-j)gwa*eQ!L+4c zv~B%YBjH{!A_KQQmgN#_0O!V&-km1f-byB)p1=PQWKmOEUqa5q4)^<;HDHzFsd@K) zLZ^;+EFq6`*q$^qajxLu5*T5-ZnuFwa}4s7Gbt=xJ;s>#GXA^Lb0^EDnk0uQWLwJ- z0NP@S3zlZ9KF}i!{~}(1F#Jq?^*6d^jxQI~eH7Ir9mF5TFQA2=Jmv6tBp5Zg{lbf1 z@!|>eQiVSU40K|SRc=#y?)Xy&wpbVB$&bYOaX7Z`)e<^F;WNIDsYRSVv1O&t=YY!Z z4w`9q{324n#_=vAuK}cdE=>)E#Mj25#oO5ZrN*lg^xA$(zxVF`xp(*Q-reu^?ygIZ znT8VmZbviMp&JkaeV7GOV%CI;MGol(WIUgG^Noor@+zIPq#J-4ik?MQK4+oUe8m^( zS`K(nVMigfT}Jh1)cOFPeeb65uEl6M+)d#-9RQ`((<-zImmM!hJbrbx1PjjX{C(=< zP{lYc8ZSvp#~aXY<4@6^;Uj7P;frWi4C2uBiB`)~Kby*Gd~p$nF~-sxqVa+(*gAeX zzazgsfBQat$#Ir`o=O@0k6%LSUa(VA=QfN>HIUdO`wuLwLd%xkNN$iInUlKBwvq&Y zzlGYC5~{f+t&U?ZwQf9;y-t)xrOEN06oZeQR5>B0uQRnMv&z}VzEm=q^ZELgKJy?> z`JHi^&MPrfhC9}tyW;DjjG~Fksgh0b%+27L=jTc9XK`?Xqy3usDW!n za^a~D`IIR#cZN08{p)z#t!mZzxrXmL@=l@tMwb#7ubi7cjDIsaLcEqf6s=S<&n=@i z#Fys|VI|18oE0ZeDwMK^(%eVgk^N90?-!~KsoWlGJZ}wBHs3FSY)TB}!CqhhWr;C- z*mi49wlE@;MHx;s|E0a6s{gYxHmu!FL`hKn?oQ|CuUUIkLiMdp=qd}`=Q+V=N zA?`2E(T+6z=1{rCbVBjwg*^>b*m?ceJnNH#Hxf>2KKS@FV8g3ZiaGZu)wk#72Auoh ztt_s{mWvGESaF;2c1mfE=J)uLpM<*?^j!LNU((U+*KIpd zKLg@+_WPX&bTnrU>{`ak*PW=I?gb&xSeluOXp!>O!*^r6tE}!m-7#k+Iu$JX=&L@};kP*XtEr@96dhhWfvh?kC>eTk}DpUYXI+CuONB^ev$DN+|~MlXv;Xm)&Qo#*XEl`t)P$=Y2|b73Y&%?$aT(SJZ=dWZnlGy4lyqM%^LD9p-n9G1r~X0A^Lm}cv6lX_c}2F_^)%SuN-@HrUl6% z3w%6$w`}6NEs~eo0u8#|z3TomF(*i0_F}iipkd3anN+i+>1%m$ll@a!smG24@t^M{ z`?dtfR(<`nKeHR05m9Z;D-1Yor;@on^bMYE^5U5yD{fRVJh#b#Dd-Z1m!9Rv%|r9! zTkE=FIy4|?I#S)yD{JjBhp-k=cPBT&&_ar4#w=9JBhgxkgO5o6nV$;N9}~3NrnRAt zztI(X+u5x49703B!!rN_e18bjh}}-N+PD)|OQ{+RHC}e)?^!YxA)WMmXZPV-73Zxq zvqiTfU+KJ<{elOZ+~OxxXm{}DulnR&G`3dm8}r+byN>G+mfnuD>QJE5OU_srA4!wH zO4xXdrt#gZxVVw}{B0d+IS}sXz1?_4`KU!%5FQstG454KD3u>`NW-a~KVm9kVvu`< z5zh#z(QVTD(*K4|49e#k(pEjNiSbhQ{xSS_oRmp*bgEWogyF(2#(}fO*Xq3}F=B!j}UD`;wlYKX4(+hhA|zvt~gGg!XoT?^1HN`g}*j^icrl$&l_(J%L$z?{B5Jo zkt-wVSH~s#4Lbf*CK)~Y;f6}tb9m7ck9%W;1`-i3FQ2`kV{7F$WJL5J_uU;cV|hAWq{BaBU_yKFW!pGc^`6Qu z<0&=q{lz9zF!la6pxnO;i40d1zs@BOXbhK6Je?1r4H#US8{AvCA|dws*hY`7} zC4`u3ztDW6xWmpxhR^jAtW50AUr1kB3?=g+fj#{@I_B@+qMW&XuJSIOvMXU0A=&QG za2n@%=ep-h(->vKi6e}X^=3N={u10_Rhy$;SJ5J>3 z?{L!hH;VdAOErjpIUawRHlODr7>{2DN)}D{ypP z_}ER|<0C~`7S(EfdD&ip28w0m=)-1eM~FqkR=rSh`W|s!I*W#NsPDN@{CRhlJU+Q{ zgeqGXPCqG{B;J8fP!ZqPqI%(H???R84t$wP&M)ePJ15klN&CP9L@d#pJQS!P)+PhC zjQj3)c*({w_N6k)^khVLMA_ub0jyGVU?{|}emxJTpy&pxO1nI8mN0x4?Zln9Xj&h~ zV|>-?SnZ>Pm@e+b^TO={iRIBM;#Mt;-6R9QLg}b$zo6dmP!YFts~n+E1PdoV!YBG0 zsEEIzPgEM!d&`Qp`>JHQx5P}b@HNGjPVC$+HvY-2(jf0kn((Tk*kI86fN_;G$(obo z^cYIj3k3Vl+*aCk>uqT6k-N!uPf&x9aOS~c;ZGgWko}VB%aU`^!3`k^!T-I4zF=I0 zhn9%nMa#vXP#VUEDs6Vk>a4R$eS|8+DTSymLOZrysC{$0V!QU#rmVUE;mMWyINsPj z^r}~I9^wX0H@I9nPIC4`$brOq0d?*p=^Y%8sEn~36QKl0S`pw4`-rxV)2_Fyj0><7 zJvIT3p$id)J1B;5SDMYz3!OKd)XUP&4<9y-Jydmd`s129aPZAG3qB_qo)DoPl7_Sg zp|U)p9XT0Rx^2s(rCr8<#XTUlT{UvWbK}?zR67!8W;(EaL?e4PhlBRI#funh;@=Qi z?qu)iH}T;q13q-0`F*JuNJrki*@o{_dT3Ank|X6)zL5jZlV$LdkFFRaNEptX*=pZA zg3qrX>N{>5!rdd8JH9#Hs7tHg_&tn`# z_}YRAE3X!{MUz500{pw?mrG8Nl8t44RC`n=w8TUj*@=!HvtFM2Z5{HHd{*MVre{fB z<<645CDlLEzdvwGZ4J}jdB((Ox!LtPU&V-vYGku>joS7yVbYU`hkU#)+u^p~zgl|c zfj~$TJWMiLkIIKx`>?tXiWYfkS7a)O{?JAEcYX}gT+g2ATw=RmEK%8H0+_BH%nge;%OpqI^LM-*l z2?%t|;7?_##~2CnD&%wU>L00*NvmpVuAivbmM_s@KEV5%pAo>Opji?g94{4?$|}P% zQu}iI;zkOK9CXfPc2Qq{D4TGcqt zJL>ojBSKrMQ`aec3{fc6?2&k|%L0)hSGS?VENd@^#6p_UbF*Qim+OZ#Y@gsui5b#n z(`*c)qcDe3TcozVo%HO4M<+k>$=dwXW?R!|%|HfrmvvZxwZ*!1l!qoHy(3E4A>!vx zjKzrHR(1QzZyJHF>YfaOZL5q3>16+jX}wWe_Z_7H#=5o_T&%8#EXpsO`h0KBQ>^b; z&&Ru60~gX+{N=1ecHRd=UiiIqsfsRFho5(BZGCLb9PCAD!#=c_Q!7sL79P zjmx+8N)b~Wxeau18W`#6^M|^QMN=b`SYkP=VpBJHvl$q4@2czFfIF&ZD!Y)S&Gm>~ zIx6JLx^)OlWP8iBhQMIE-3}QNe9x!!J%NP8Ob=!NgNKRO}y+LQU!7L zNAZ2N4KesA7TQ~qI&s6ZVT*%bvP^{Q0_vq&16;B?w^7M>f5P*v#Z8(cNfR+svU|+) zALwPAQSE;r^ILfFcdOd4y1h1I?vG&d;$fqM-q}m(i!Eg_#Ja%Cv(ahSYP5z{<5J`r z`&N<+Fpp^CL?f&{d8}?Kwj!1od5af!K3_juGK!pYH|Vv|kVMo|wec-a46&r8!JLT= zTQU=l9HUxcF6Y6~R267s5-0F@k*G0Q*)=ZC|A;on zN>@$|==RMk5gVOR+t$!s8Y4M}+cjVONjT?sg@F6kisjw(1Bx}>`tcDhfA%iuFz`-!f;$Ks z<3=uzTp39lFuT+hkd$Y%rTVejhCd$#&PH0c2jB2#9mmjr%->H8N`75XJb|B)FnQIu zSY&n9xc_MC<&-(gZr`t3O`b}*hIgu9H}WSn{=GVWWLN?77T8nq6nTOSV&S--Db}j} zT9hTV+eA8=BjGHsF^qg3+Uqhh;wUOYv6Zn_X{Bl&oMiq@GjhZ*2e&56WAEg?WKC>t z7?LosyWS>GSij#umB*v9Lnc{+;|%XQe3??jQ@S88af=yuQsSSw!Q80^YRzDFRbxWx zfHq+VQLX!SehY@5_Y;+$^u<0(;X!t2*&9zG_Kp+#hqN{?YQ>QWn*G01pb>XNj?QUA{_`}27sI62bsXYirKbuwS=YrbV%WP~(e#O0r09h#fkvXOU=XdPCAltJ~_kd06}Bl}p@t0uj2NhF3%zu)KXgUkFkv_EPcq*1Qb|bp1FZ zcJ$ONe>(g0Q)w|qT7eNbdC}n>cSk;lUcxEP@P|i9WIKj*yXa8#M9nkerIuzoe&0u( zoTgOF=%0+edj&Z0I;5i$=T0dc)(IPlogm`-yFcTHV?@gGaRBU@8)F7PZDhf=+hPpy zN0jbei{NJ*e<$5d!gRz|aes*=6G@Krje)Z!1;=@GNipF6A5L5`YAGORA1T})FKFUo zosXruX-c+QNKf3mItn+sPkfv>cUOi=w2cir5>IZW$F>yWJ62V1lc~xx07XZ=F)vgf z>Ng51BcBnHZ*5U=O|83k&`Rib3t_8OKGsE3SrxZk%J+}|k#Wd4C+pZ8dmnop`xup-WF<3{ zLQ*!F2gfEOdxSz(MPxgK>>|l5vy5y}zw7Sv{r!Ibd_IrI=h5eM-`9QJ*L_{@_v`h1 z-DKm`(zB7)NuB*faMf{Z*>qcPFr6x}pU}tz>08@P(a2ETEtn%_gb8UkWmGR1qrfB-wC3+j#Oi2O^yAwpjt~Eh0deK*Wj?g?slwh zC5zXVp~B)N5-z3bU~BDw8JaCB*fhkRH=&$mQ#z_J1-Fg!im`vwMod{lOZCBr({Y9X zC+yqXTrf%4Xdn47hYT3}*zaYc^LkrRXX)4R^O%6CT=?eMHC1=|Dc>FgEzrGbydI-6Nw^M%Iu6B+vGa0Hanb|L+=)Q5Z&NzBZ*KuYHUhBH^*M9J&^$#AaAI%us289gQ z?|y`)jH;H`jkB)6q`*Jo@>N?C7=SmI^57ppQIA^&PTKMATi$&SuLm(On_w;Ec@S@hi$rhAx@G13`yajsp(iW zSE*153QBCg-B+2261#o>Iye!|jnL6*PCLhS|BJK`f|SR`a*wU+JV(wC*7NsD8rCViL|aURREL65 z{^z^Y$l!P(_2ivoa zYSIgnHZ*cjP5ajJMD4VA(fG1E%DmVJ9$`W(n3nxsr!CRxNg@Of2nyiUad=5{o{QAs z%qzV9@Epwn97t~wp%J!7t zH=-b5mBS-8^z;snAltL=RW-VJvUcGlQuUW5>t}QVod5V|Lb({~1H6#l6sto6?EqiT z2gURnGpi0cMHt%(@PLjzE#X|ZdrQNT3f`XHqi5k6y$L#V_sa+sUKmMzrAvfBiy`kq z+r%M}Sja5ouAi{Me}gRQ01B|Q!*$Tn7xEWrqS3nngy4XyUx*ehkaLLWBZ-w5f!n^R zj;_5miC&p9Y-gp)OR{afzINV0rFK_!Q-TGbWGm|&* z*i4$xJ9s-vmZgf(vH!QhoNU}o`+Ve~LKsb6M~=5qcUmJS(okXel14x3px1(1meS;E zpjrf?nl4<`mhh^EoO^oYW{H^_zy3Zcf!ahSM++x7!-^RxzPYsJ{2@yz{<`|vMNHQ* z$Ki|5uhQ(=ySuVg1xoen117$B^{Mo9aUveW_%>-*=-0lWNLg*awW!_hc%ItfIk3yL z6#Iw>#h$l{i~zQHI|}j^ zu{!{{B(}!0UBhMw7f_%b5?a@kIqxVQhWQdHpkiB&e0gkr*Q|HmF!lmf6SCnwd%|dX z;P;X{t~x^QNyH_*RHw*-;%C^23kyd`FUc8sX%_in{ZuT^df0?WDWbX3<1th43s^RY zDt-CB*7g}p%eA*k_L}DyS03~&lB+EnM&N2iWycGC1 z_JQiZIh)kW;4zRHYJ5tQs99_bYj%4xzmP`Fe<dSc~@_NPO&+LX2A-bkQ&amq@cL^UMpdg$x&33GVhD;OGh*T{za698;ih$ zbRO#d2YSApspA~zP18f3*rOB&@^ERPWQD}HCO3H!;tHScUCL3O>&d*qWF_%3QL`Qy z|69v)1cS{#=csr*+w_ofG)nTVjGZ?P^2Jj`vL@1B5}!Jme@Dm6E(d`Kvs0&(DB{Gm zF1!_*Ve7p5S)l6nOKhPxiTnw_I-ciplzACKYu@hWx3ZOHJa?>T#_vR2+{x^lFwCz# zy2;SSPu7n`u=E6)WYY5#!!(gE5m$W46x4=N{iQHWnDZ^2YRqmuXCNDXHRmm<4|r72?GKH<4d5-v|n z7Bw$LZApK)>>+x;f?e|lWy`u_K+ETy?%o>12kH|5y>5Gz1uwbi_@fLi(qnV!YlM9U z@l5gnMHrV~(|lghIQJv;m#6 zX0w3iN-UC~qyn3kXt;@ZG$L9e*vXYputoc1g2lC2oapmMBNr=a!ZBVA?R&C<0TR zfg-ri-SBUn#cLg0`bL2=#Ekp!nuM-uo}~$%vLmkL!nT_9LUgrkbRIAwk_#W>402Ra zRZJ!|I_Lj4&7aI`$9W0yb0B;5!p8=`03RzE6TS)3Ep!R$ek0NB@C>(y)81LXiQQSP zvnL^N#hugW*#9L@!lOp$8AA_x(CAFn=@4JArTU1~vU+>spkr z$;KUI3}!Hy?`>-?%88ZXV=gNA0(`D_b-1(TnpkY8=G&6Aa_#Wei==N6xp2%XIm!oZ z<4rY2n(iZ^clZz}tX+~}_UnyH2^)277mK1ia|C~LZL*`z6I0Ij90R-8w*bbqnoz~_ zIwwHb;(|i(!fMG(Ldy%eP-d%lNCY-nd!6*Pi=U8@1#F56 z3QA}KW->XHvgbvi;v^gn^-^gmX_vb{_RnLS|6(&t_w=v2#$Qwdp`IQ^JFEtl9sLMO z0@t!DKE@myPj@h$D`DMYZC>D`k>=mDvTZLqGV(dwN) zUn&fhCth%Ww}Mr{EM_i%2C7x&VVc`J&(6^&++&}iu7I`X+Y1;K$FYtgpaa^flBWYP zP3O2Do0%cE`~>O+-7_W>a#(dVTShNssNWW6S&N3x?JRnjPh@Y|v+3BxY}woN7~4$h z;fj{9B{CxF@%B0X7fkATNZ|MpdRhTKi~4MVobx)iRDVleU& zc#MPH_Q}74G|EwS&?|W$QVensgu8iBHc0Z&}v}z zeoJS}Mc_2C_xX@#UK3LN6KJSBJ|}{mP>HivB~~=9Px>UYl|P%jkQ*h>&u92r87qnq zV+aaYkQs3Vay)+Ow$xL($g6^F8dy5kc+^%Y#5$;B?6_KM89772Mxev5d6ccAdAHNk? zxhw-Sk$D9lWm*+Dfe}5PmZGmN;~=AnGyLx*l&4p8`h_&Ek1tWRdn~&K!xL{xc*M zyac_ce_1e#9@4iO*yw1+tmcCNIlSw9oa*g6*g}2G zAbA}}m)^N91u{p>wl(tuWo9f*H*3q8WZj{TjO(NRa;|Q4k)+@~qi>XC{zZ@Z8P>+E zv$atO`>q{51YuV;<;1sObwdFg$UBy`xQFAL@Gq>Uk;TGe0#N#|0p>f{pDCQ((fp+g zxS+>#UOAE|mdFQb(=u2k$S&-51#O z2hcTh#QO_Q9C2#{Pb?MZgjw97)Q2=%RBjv)mp1VE2AiVOW5^Vbc3?msxIpURciU{| z9-%&ALI;IPxzADqsnW_y_`Xkj)&NncY!T^oE4R{|Y+w+JN(7#hS68yN#~xf{7DDAl z|NB9D*U~;=x_F@QSAoP|kk?GA+nC25D0|*--7+N@pR>X`HE8YluUClz{Z*6Ug)N=i zpOXyZWJ0uhK)dM++@xGgA*~mz$1%si9#VIM$JGCyqxe=P?g5k^Gxh`^b;S*U<;|~1 zqvIn`i?%{8%P1sf1)}|)-Xj=;*6s>q4#O)!W(vp|CX0y&RiR&Om)-$gSt;bygB)N@ zoBcp~rB)s}D>;yar$*dX{RaBvhtR zl1EP(O+KAu{t3I_s{;(4RBBG2acQ=|uM+L;l1u}Iw@jy4VWFJ)5)~wvo{rp&z|K_$ z99EU!Hk~FqnmvKB$kLTR{SzSnn=}2-8h*!NMvsd#M@9>MG0j2 zoW1)}VY~gjH$7FaWNrpdFJK!oV+7Ma%f1bcZ2$MKg`}*IM;YAYXD2!^!;aGpb&UX+@dkL!g=98ua zEvyV^VaGvURw}g;FTC0J24I1E1#U$N3O91Q0vXOBGu-kef)4B5G)R64IUSkF48aoy zVQSLf52cR}L0A1F^q-Iaa^IbVFock;O!SE;;K{pX0|A$wap8h`1Eib;<_^papfvp) zvkFv;L;L0aoj|+pF&hN##tA$Pb1Y)b6WUXWaS#82+=K$mP*8?Hc#6OzT9Oe6v|(#N zvUmx~AU;dhO9TQ(?=fHy$(tKyz1LWXdV|xWRfmB!@zH&Qv~XHg1!W4zwI@c(Fk8>+ zUZbJ3oas0Q!k{xj@=z({H}|p5%S5miXKB!{D9fo52r1VdlXH}Vv{dpXB5k98#z-I%{m41MrJjx9-}WtA z92Fj8f{aBuUHON*EO#oCAtfpFjMlhe?2su8AlP2~3!nW2=3@f@KFj39tiJ{Z!Dqlg zBM;@u`H5d+6nth8gcB^WcIR)v>20a+$;jsIWk9|tAoX-QKEq2dKyqX7yKdglK>f5U zioI?t$|zX}(dG7UjV&Y<-32OeeMm)q@I0*pq&yapL$CdbV={+Ety7-$>@ZN%z6GmO z-#LxEQ;1M-19G%L^205QB4fyA64RXqlsOXMtu=&FHuZqd<(Ay9O=$7V76fxV9@P(v z@EHR}*VMlvTIYvsF{uYld(mKO)N>a>xmRFaVu$?O4eY)X3o%S_-4O5qJo_N%3TJ>& z-VF%tKg>RXL?=+v5qFU&5TdSu1+H3+aSh>nMLNn?4T9FK0i08{Jc3yUt@O{gX2Ke# zM2CFsip0D59yl65lPoK+&v|wRDossi;14uGf(%7@#h(Du+xHX5FoxqBqDLXn-VECI zNqibx1F%Ltzw#UWdDfpm4%_z=2;}pffV*foTdu`CUApB*tx`?AC05O2 zWpmT->nLZ#YVoSF)#tL&;nA&zMy44imP5f!OHgBRnYp>?GjoU}$5Rl<;=MXE*RYNZ zLAyZmTLK6xB5{0!(Jnb&geZhK%d4vp&%mZ7kKUHM2x2wI3i80$lO!4SeK0cHn-+kM zwCmlwD>!x?-o12YuxBsuM?41q@W1E0qpuWZ!RFxF2Bk@TDCzP8Hi$8RO-%)P-p_JU zG$FV-eRYYNU3u~A1k~kWiY_&sujm8EQ|hH)XZ+ztdAXYYk|NI_n9+@X91-q*gkeW-v8g<=1T`&c{Oei#BwAP{yTf36p~44JU^ zz;LhVf%57^b0NfG`)sGX6s!j9v`xvkQ}s;P*f#-L_|!UH{mKLGtxU)laA6tTxAj{f zQ}=EL-+?ipRnLKJPBVg;A3WM4DCq7N>ySjg0m_`|2Vfq5BV-mMNFygzUNz{6JOaL` zXW*$ROs+KnSKK(Hy!`?^;`u8@kZU*>&??rzMGkNE0Je*dAwd1QrtLg|T6 zIK(|j0(=kjPFHzZwL7h4Q4a?mf)r`lyWi#ZRo9o&#fDNDS2*G~3cc=nReAl~J0GN3 z98%O)^mp~jNxBZ__^QWA9wDf}%cxO)2DI%C69V*>g74pK>Ycf>sd$+0rS7PYmP7L3 z83j3|4+0xkam-9<$WvZ8H=uoOBC*An%4;Is|2__KbfAZIX?_i%!P9hpqAdM=Qs=q`C)tRn&MCQ1qX>u&Fsb-9p- za=Db}HQI|~$pASgiGy5E zZa@S$@CxCHnSbOBrneCk%RLRaRdp*TG|E4cUIR;;yFTBmVRP%d=_(&9;;_l<@p99G zP!!QW)SM3za(U+5O8)&8^3q>3x8!3@GzVF9KaN{~=gzs8EI1$Jl+cfKGMsQ zAEx*xRhHO{90?f5e3#HPYnjyj|b?nB?;Qhf#^RDB^{n6o_s;1Mjnh|m}cb6&VeqboHRlz zjLXV5Y#bTx8_Tm92Xv-&p1T%+#Z&&ML)D^h?sNC^P>D)R(oiSBwZIjnIXX3sz?UkuR`ibS~-aJ zc*Y`LWu1c)Lhw&n{@#`Q;wgt>0Lj~o$i0|fi+_G8^Fyf_drh6`d(oy??lEJ;|NYRP zmvE$VsD*WF(Hhw?Tc)8XIi^bLlRHQy{9A0+ydy={L~#5tN<0r1%u!QJfi{VT5HI!x zbBL*PYRb6mLAJ9{mdPAZxp_;u0x{v75<|L@qWCL`^U$4 zSkzl?V!1nmh3f=qYk4+v{*Q3Br?SDOzMPSL$pK2w!DlMgSDNm%Mi{j#d!_YfBdT45Zrt5?lYWJU3-C%u;Qk~mJD;? zsy9k{&AXdExz+R&mU}&m{v~S6^50X}h-HkmC!j|3(vmL;6(to#nW;di@;-I-bs?%L zJP$sYyS(9cX{ag7w`S>n$0Uy@wgUL=(uH?2ucPhXHSa0eZxd?R33u@6AI8=vgnUrB zG-M1#R(b*2JpImK)7HOMx)Z!6C z#y9Xm)v0^fGw_L5ZAwlFY_o!T4V$Kxtvj5}dY|;!GW1 zUX1X$vd+Td%udaMFfcjK$MOnD(8&J%gZeR=W$jacHmk{_V&FvJVKKQTGxl2BAfaw& zHlBrjRBxZLs`B$_B+!7W9MFMci*URUP=~z^l}gISeRjGGag&eo}odP zPm&#>J2R?e`sAhiqwP7011;%U!vm2^lHC~~I2~VF$lj$ywqM63OZX+I8PCnuB_DMU zKaH#fPOD6sF6k%(s$5Y|$uI}{9`)FT*b^a>)vRgZ35&u{lucDagNA1e>ovwTx!I!h zo@J^Re5?f54L>l(wp?wCe1s&OKmJh6h1tL1i7GJ0#J`oGPb|Y>;{DMr0*D+T8KYuz zs-Be!R;@E6OXOOGz%DS;f1j4Z9-FvCp$;Z;3Qa=DmV{rqx=xgGSCLSMC*!D8Fd3NL ziQAebOF=O;ahWz3#<_F25>TSQ?kzUy{-7=--ANAuN(6cnnNAwjQ+}yKHICn&+JR=*q)NyQ-Yeg30=KI~>-jD>RfSz0HT5 zn9cSRjx1d4tsU&ULXJq@5YjRlWxzZcsq5wI;&^oTVbyuDqHeDpuHHL2Ax7)LdMn{} z8fLI19E=hhj&29wpDlHmHeP#5TkqB^>(*2q;;yb4+o;g#Cuo;tDI>-mD~`#Hxpx&j zcd&5n8Zz6^n>ZEBvvX{D7tsLFpb{-~tW@5f?m;u9QZeEoZ`Hf32MS+f1nIxyE{X9g#svI^;$>9p^g8My=L@br<~MM-kT2xrWh+IWsIelj~8?Wr}Tdbz|%BTZK9_RDH^wlsRpI$T=|Q*_>*GE zdDM|^2!6eBEq^vFDHV*~`@8h|_nJ(1QaZC=O!Z=lztzAN&_6Y}OOXzt^)*uIy5a?! z+lugtQDIGO|5qn7X*B;rHx67g=o2t`vPs(TY5v4_&HvcOl1BlV&Qj*>5zM~L&%U!-HG zA5VoqmhPdGO${wcb0PeO0%rn~n_z&{CvxHhy53UR=1(9SaKPcXRN1srN zlQmI*U4jWAt+FYECLR?i$j$Nfiu>@wsLRetj&k9ce*oe*QP8Dt>S9Ik zf&Amz+r>8-eh5w3UD$0NS?TqMWO!6xPJC`wbAX1cx`^3z=%t z!wFmI-d(r0t%p@Jw-BVN7HA1wixLjY2_2qkh83#Yeg>4E$lRGwu%&lp#8ev;W2-%0 zZZLhH*p|nPCjCpMVD)v8q;9YVAx&3OXNuw`XU|R|U zUPJ;(1P-T4v4qjo6AP@#wAauZ@?@08I?9A=jv{%dI7usZ2&}GfDY(XBj-o_2qMMwt zU?QQtK*QuGcthKER9=I^4urd6sxNt2Y#}>2fQl_ohrE;_pG2vdeQX5<2ljaixEi)^ zBwQ)p64Z$!ihL`o@p*;2sHS{*261CC%du^OPq^43)RnA$40`51@)>_rRCk0vYGJh{_q<vD zSE+^?3z=`ryQ4~(p-C>inyGnZ_nT$?!trNVDTJG3MWlfT#6Pb$;fw#Cm z*C$A@JvFXQqYY4w!9q=rUK=RweOCCe&)o@UFb&HPj&m%!jti{VEv_$cEF^?gW6D5k zgE}|$^^)(JgBm%VHR0T3lFuS2)JD|;^UZifZDJHOzJ0^!MP$oB7O-QN&0V1=u60Y4X1q<<;A33zc_T&PgjysB3v1{M1qC5NGp zJD6ME=;h4HR89Q+7ES=Uf!Kj;@EBdE%E*qq)g^PZ2X9z*kmRIjt``~Q^0yr6Q7JFB zp)5up9?%HfzOj79ty9BpXd%%qb_%WsQJmlBJc7q#K8e2jN>02?QJPHSLav>b44g!E?GnXC znM_L~WBNz!xPe9|<=1YcL+-U9%lL4j;mbMFf2l!GINKOoWVt6O@%S8bcwZ^Ht{JG| z$-SP_1RJ`R{4nkqA}~#VZw~b~!5Lukos`wf7pAb+K_&agvGj5;-{=akTJAEAs*JAA z-P&jJqtV)1@VAubx<};gTQopk-{yQ|f7iM-zk=xb|38p11^cd#=!tYk*y?bAV)vug zUKklxAwm~PhP%YT-v+9SW1uXS&vco7CGLdXciN;py$7Z_0IDRP9Fq&>lh)65gzl~= z&8*i6jfX5&=Qg>CR(Tnid?uoM@#tkyb1Xfoa?@0-R1xn zkrg&rLc?d^avCg6Yi}4{_~gM21tIixBwmrCW@}6@y3hvt^pl;{N#^rp7lHQ|y@?aN z6+m#UH?zcNScD^zXY_Ha5^VAjRNWVcnj&_w^!DO6b&HGz1WD-Gd_9$f7npG_iFz+D z9t_kjo**vww=Q~9eJPnL7Qb={e0|bc&myiOf*4Fhv5pxvbKnct=d2;b#6Vsg6_&*FMy9fmldCP{H4A;R7y42Sip(_4hYH zg56Y9g9}q|@6M~vv z4OTFUy?0fn)pF@{#~ggX?(X`~1%;Qf6g%XO4KoW{&OLgS#>st-i6I`e6 zK`u500w>D)2QZfzkWLlXUxQA|$~0_3Zq+S&_;J|Wt|?Zdj~QnrGG@#5uv;ae;rgJ) zrBbDe&ToNCUh!LKa#8kpy;=s%cEDHpDpgdfhMr>sfdr~#E9#fO+{32Fw~=irbHtCkB5bEc;!(KcTT7ClHEK zb@TE?U`h5j{C`VwcsZS^Zx8SJr5%6`G4|kRr}69Rl8)QEp)Zb5Y5WVLG_gq0CXb`W zhAUO(Z`pN=?IuK@Gts$DMKVFcE%l6^7T2k;S`f~%((iY%;&GWty$*{gW@^$nfx3hLJ9pr#!S`Vp#IAX+cxQlA)QAabmb^W2snyE`uyH4>_>LDCC4 zere-3lkbJj4C45pnw_CbPguO;_RaOkdB6UhD;feUAubw`Ozd-ojRM44^hYJ!`ozS|K|I?LPitt+PraB06n>dNS0fmH^CVk-PDHM z08+I+40-7R&EMgqx$F}jqR9X4VlQ?FqpzMVAn?uC1Bpw%@rpz ziqD(KP8tg9A+uGKXxY}ZUHu3;UT!#Q93^rOL6`ke*Pgh)J57d6l;qg<-OF=@5s|1x zVxbE~Ciqlif2#?unAIdbF~+#7EyEVR=@+<%9{<%%vy;3ok2$sIj$C0|iH3nxDKzMf z$~(VQV|4oQ0u%B_?RMgJ?l0+s-r%*easyM&j7Yl@S;v5)w}LvuD^({HN8(}h2(8+4 zE5Ai&!PfeB3%s1i<48V8TW$1_s8dyjWZgWo$ha7b5NP0Yp!0>}9{?uq8_-q@_xl31 zHMS&nO=GX_%16+TJxEW$m{jKYn7yNAi*T@Ijo(VC_9=)_yx+d#MI$`SAW2%*bSr*3 zWYFDlOij)GVRwki7 z*Fm3+zN4uS>@iv`VZRRofR$@$vD9s%?2c%HN~ffLaow<0Jm_-*hRgrefID~{K@IMn zzEtM+>tC4}LuA~fU4f4Xf&7_BqT61D5oa~+*=7ho&Go0+?rGGIBl~Dtrix59cugNB z(_GLf``ENccF^HRcSGubVEgtJ(Pf1nP<U^}Z8k=&9?B*YWO>&e19Koh=Fj9Wr7xX!uc0yfKGwJH6M_CjkxGztH8Ooq;c|Je| zNlXr5R3p+{12a}vew})WFPUNjS6hiy>TcSWi>al2z%y*gyMOwL z@E8@2ACm969^LcpjN2ps1+OpHr0+~1} z%RRzejp+~FA`^1*mPf7Zj0_TcZUNBp+_hL70(UKM5t5I&GKtBf&?osOjx z)4<+w(~d-iH9XWF>oylY=x%-es^r?M>sKy0B;wMZq`cmn$s1N#Im}a@R8CEHo?T(j zA6A^}($CBCd-8kc@Xsgf*mD#KA=oQo!V2X-3fLuLncF)WJiuq+JX*exG8}H4zOJA|B;~g+)(8ct$h>N$JWhJvM5EZPrx7lvy|cGvrC$~e){0yvpqW7X6x9ipGbKR=pW-W9NP}@s7yWzCca;!jh5ljVoDogDh z2I=aeQhn~R9~yuL!MmpSL#Cvo)vol@>FIl*!@r9GNhT^e4_NOuBx}%H-qVvQDk3cl zQTSbPu4Ccmb&ncVLG8GzDVLfX+JhPK4?oJ!$hY#(M;6~86Y$}S4FEM?0W$D2UL(RW zW6Ks!HXthC>^VIgk(_?zoj~kQMr~|1(QZBX<g0j{udwVX4v&^<6F!*#vn4{zLL zkVJ*&9`CLkX673y|C9+AMBTt?fRZAcdqWW55x-_+PD|QV0Wt^C-Q^EgpdRU+;MV!D z3lGL(a5AAy-YaiwTX1^k?fKM$iw%`C+$ zAvFXL?=y>6fQ)7!jP$}7fQ4J@%s04=-*2XYE)5c|Sb_73_3rNKXfZXYc*ul;V8e1< zIO*Rg4f^EGF0NRdOp4IeXC;MN(aYKduyb+byn#q2t8pDIg3?EpoBYzRnO_<&GkQr2 zuzxff7haZsn*|i!u{J>OJ%I=}cCgGejWnnZ@$mcq28kC^bXoyY*e+@b^!P4aU=pu{ z{&%8fpp?;rBrSPQ+fF{dd%sRG%SQg^vY@%_-JiYFjJNCwQ@v2*3qwgCNPrI7|5 zAOHX0@nzVgL{YOoVPfuOe`Mnip#B>mv)d`O@YN#dqW_v{0dkXPA}?F&{`*;=xtOFZ zV!PkR4Mq;jqIo1Y0=3>DuqwECH--PkGgnC#0{r=W1zw27qk_9Btn(5pJI)D^z*+NEZK9dygi)Ben0Tn+z?s~@20jYI3hkkSJXc~S;``n4+1 zFOr>mPV@$-Au*g+SrSHye`0 z+y9Ng+q+aOs)4?nKL^`@DWSdAcV|(?zB8f$yobxjnRfkP8)MB0^qxtO_R|vV4;O@=vHFyl2`}ha1WwDzD#9$LHRJd4H-rH$jsD<;C1yuiBql zRU0hU9396fb{Q-sgO5`)qX7eLau3j_C!=F=df}NEzx#K19J?nugZ50Ic~VINvl`Bn zEY@Y9gt$a^jl{=F|E&jF2Lzzond*Ys0Ot06k*u~q`!Dwwz@UUdpX+h(RY{f?OgPSH zIs(<`%g{}1bb&9c#>jmyXy@&51+aSFlh{{XD5r6J6e6DU=iA|2dLC7IN~8}D}y z2z761eaV!%;1A~6@W+`UaA6{eU=dOE;4)%;S(iT9=(RLMt6dT&3GGXw^9tL3f881b zEWyt_NnzewImj@+*EWR!bs%Vcx6iZ0?E92Ccgjpeki~^m{E!hG+Ca@dAz9p7RDGOPJCQesH zbZX+D>z@y*fIU4xwz~q3=NSY%bLy8zAJ3j1p9rWO$9xPyp>B``SU?y`mFJX?zyWa& z5W1d0!4m(5oT>%r!`u#jd|rDxcX~_2)8G)F8K4`_l0FOg@z#DZ4@0l%-^ z4>XjSQ8et+QY6QJ0e}h(O1SolnZLl>KcNQL)C^9Yqpvqou86-+7Z@y-eDmZdIr^ND z(pt&P&AA_nh~&*Bi0*6cUaOs`-rtwZGcFNwJaDRLcMWJ6>on}X$+sOFoa`iqoFb9lUz^{ayJ{HVL168q{cFteWt~`4CCs_F__&}bSpVjZrX?PiyDonyxhJ3nx z3LDL9hman4Wh*U7GUm9y zrz>i^QzeU!Yx^^ufBZ5WuS?t+Z2Xj5e}AC~{SZn9p1ExYmeN5T4pu_OuAA#rjlG|@ zU*5YYFv}06f=>2;1XX$8^{4hz=9DChxtc#q(!f`LvJ4i}fdl&2^F|>j;~cDgbPCL1 z>752=bn7`siR(Yu5o`CL)qe7EDnqnr9HZ6n{5O%@U}OXY(S!iPV>krQ7?exWx?kl{ z3K7-Vfn3**GabwT@m$}<7$~md>|$@qjj-UoV$M>)w&UIa^S}rK$DJ(DhGd3FuuAUA zCxF_B3;;Q~X9jNt%w2(?DyoxP!H3OFDN&40?s9KHdH);LHc&H)99BXFFmDR<-GP6M z`GdHQE38pa?4t|yJwRe!187bEPoUiXRZDdKve(%w&+Mqa%az)m&nE!L{6Mi*(7g?%;hpU(5c;`qP4fPUEX1kmCV_aDFq=fBX{tIN@7lTQH)lRw>)F+X=fgJK{mVzswD7F$T@~ z4g1bPxYpCn9h!?6g>Ok(zXIO>ejnfu;Abn3o;*4P+~xkQ9q=LDRsPWYAz-MbLwT63 z-KKp;J(BEhHEhO#D;KDa*TD0;0y#HmRQ0oEgbcujJs0ql*MJ(w5D3W)v!$FbpL`w; zeiHm=H-9A*&H5Lzd9DEML!>#ZS3D^Esk! zsvFfHB_(G{!<6(Af>%NGLgmL}%bX~ZTm2E7DMEVB?Zh}K)Oc^qr}0woTnyG5nco#i z2}bQiJ0;Afgg`R(99UQ*Sqj!Ti^PndIN9!Ej5=>lQlREVO5xPU#=MnDEt(fEJWmbx zzXG?iN-=K67V0rlrpGAy{4&g)AGZ#JQ``z#11kKRLeKvi-h4jXWFC~e|87+zM(ASJ z<({A|ktQMfVX2F`{Z3_izLe>gjFOM&-y8W5oAGLBqs(}QrWLr-F^Ip1ekWPh1j34b5rT*q+f z@gRfG7{mMUWZ!H2H>t6myek7i3t8xDivdULn!JQpGU;EyX~X}u&s1~z)=Qvc{!HF+ z#)jLaPfm{mhrcx|3*%a}V__^E-HOLg60E2lta4;LKa%=)ITxeS!1E*aS4U^u!jABS ze)x(pC68RUVpLH&&El!n{wpEbscQ)#X05E{fgZ-khJ`1&B6GdQEiVdxsfe%OP4{!u z=U>wkt_tDHAEID0hDhftSN#G{TYq^Fnmi3`ltF&wF0A7v$XJkOT|+jQlgHqwI6S*k z=+!le^J({8&c)DyfS^NfV+htZP}L4+MHNHYYa*?QkT?7Zq&1cSAI#Hw=-f31qOH=a z8$oA)LK^J(%%#q~C+4L|!m4W$8D2`EK04*5cCN#MU+Xc38mzqk}LGBZf6vj2{3Dq8Srkyw^8FNbzeup zef(B@Wi35%C&|Cc^rf?T+IrjgN&#VKT4V4yx87r;ytjP zaK~1ChQ&W3*bX^U@|O(8q-H(rObsG?Sp7ORn>xW}TN2vv=q( zVUyL(_-VBl;g__omKlEVCjCj4C7-F5sap+B379hUn*xZIJuD0DW%3^N`<3V9U+y`* zCcON5q|&n9D)u%S#jONa6^x6{`eoP$Ph zSekagUF35H0k)}y5*pnH3K?B?l;;Jha?rQXc4(`jb#5wpdHwyMNb|XS-5oEf8t-WnL3Dl8G z1!m3b<(|TxwqAKAd_6u~)iHPCHmIVtX2Lx&c24O-}_7Z2W%%)>rU2#rASAMY-{1HoiA$PN^E24=*IF zOLa;fb*hRq9-r|P5Eabg#gQf{acj} zQ~P^vH`)EHGM$ezqu9H~K63q9du)I3i)${OAD>GvO$$kpWjs@O{pWbrEdNTyiPG(l zhXrk)V}h>TdiA1@@?+Fx6|(+z-HK_L@Of0}x}@a`jxGa+$9aBq>e;#+f_nifNWXl6GTX{pOu6|DdUR@l*GzrY7Hd zxoz~O7oK=bu1p#3@9(=%f132Kv`ziEQ8W9=@#iP|eg8@hfy>m4JBD^f2!%q^_*e2D zr5mj3Q>>k~?ZL*GOstkj`Rpo1^~e)`Ijfi2iJweIn`PykXthJG=foGT{$)9{D-Mwk zav0zka^_rF{jOgSZQHO{UYZg`%Ji7i|J#^a#-F!)^QA5lLBVSb`dN}1g}^_>eReY0 zW;ki%ZLn$oAuRsaifeLcJ$;#5gHAzCCf7yFXfsJWQ96AT4tp*}n=|1$TeX>&@m=*& zq7Kw^RnJd#3MJ3Eh_%h&aKWOYi04GYWaW(QYjHdrJhWQWj-pi0{dX$N%6g`{{n~wd z;0{z*iNZCykpiwAnJz!B-q4y)qZFK_70!p%nbvBN(r#VcVqnm0@gNa)y`kPrY?V2Z zG7keVxR+TgtMc)% zG%V-s6GJ+g+7F>P-1g_L=@ePs^g6R9(~nl>%bynKv`i?h&3`^gZ#H?y%Fx37snl84 zqoRB{K6ci)xqr5<%DBo^+AMB(?V-a@wx77^iqkj0rdora{W&&1d$ZyFZD#3o>F>tI z(p!_}%PZv%H(#tbRR@Y6Mv7ijeRDs*uRMw--I1H1tMU+e^;MDo+sdM9?O)av+aaIiW%_EesoZodRp`9ghzPgPV&s8(8rZJ%=4EF9}k5#orBs;&T{%) zEY&t#EzO^^Deq4`@3m#?-C7P*2oc^x%q`_|DN-o2J!QWce&=>_2lP=)rs>kWkX)rX zLf&&L2ge0>(0X^@duXbFZULrUYw6-X#Y{8qnO#>CM~yLI#y$_%ac(bvscz!YQKV!? z&l+0i#%<^zr{FZMC8x>j8*x4>o}MSP+g4s7qk=b<>muvrIt{+Zy#5not?|K)3Z)eM zqY@c|C#y{c`v{`asZiyUd%gTKB$=H_)Lk-EBWQG25|yJQRTuh?h0~$=`)dGeA9EY``+fOlGuaLDN+- zlctg*fMi=hE~rM2hyT8Lubbm(O~%ugbolwKi=y8Y>C)yuFYVc5(Bd$||FaD@H6F7y zw{1M4Hm^s&>Y(wn8yUt;qP4kmBH4A73O>4;oz2-EnYK6ZnM3{rP2iYc{3BznYipEF zd-W{bQ0w(uJ!e5cCi~QF0XLi%Zs@*s(`{*Q$ZfB!XG3oJrXKQKaffH`wv}F2PgbQK zikrD2e0A)jxRqs|HK!MU`mXEyb*E1MsnX(|b$zBKYZjm0TC!KLHpfjq`;EYP^LeEj z*X}NqFn?OnWf+zhbN+gv&i>h-AI7zuJ^g(Szw5pj;IIO4Om?~XTJNPvktG$8f2v_Ol1qK?^(2Lm06wFR^z6uQqH*iaY+iHd*~R7wU5pp>YHh=7PnlM5|uL(SN zR&D*-t!ufsxYnzmK6##tiyH>_6RTH(E9uU~1THR%v+5_0U-U4YE?rY%sp~chwInO_ zZc&h_5g(Vmes7Did+c#@o9-PtDz=9r!hhHLT}8#>_v8Eb-dgwU9`_#7UW4tVbH~{j`WTR>F?ac=v;^C!i&JbcayWU-hyH+=W#ba8zlv9!3M(~9Wbj`3B-YrpHG_fXo6 za5=uoBa>YJ1OP7jNs!JVrKx_elTP3J+;}(A&_NRtU68w1<4}QH}JQ!8I z*O>jVUECRIT#aRa7u+^rvreAs=XCMNeeeDg@H+Nqz%6`l&w;fF_Cs}G4-aC_>gz4Z zO7gx=0s8^;`+v|7v=Gt?qE1Tefp0!J>i_h|*!jmw{&>*ylU$Zwe3RL=W$}nR5o~#8 z8~E40Z-7@nacdNPzJhuD4EXcm5?kgb%a)$lFVluOb9+QNN?Yd@u)xP6U_(t$2OAjcNO7yX%C-zI5rCEQX@dR~x4ej;=@Fy^rtzAVc zmY&!z4QiKE;FmsZ0;<-y=jU!N>BBF5NC$d%=a+8Iag^iOFMVJu;V<2cT2l63`XB)` z^65|AtT{Q-(CvWp#jRDUe~&%X!0=3X_a=p5(Mj5MB?52&|^A9u0~>F^63M2C zmw6)0oM90v4qf$mHS5EQY8u>(CwRoGg+^-UMW7~s+qJ4b=6mbhvZ0MJ^0nWa6?ake z=NY^ZGWttf*$>MtE9VOuXS*7l7{d&y#44GDSzYiY?5EkGhI`tF6NvZ&ppV_cnWSo` zxf)rLmF=y2Y7c)NeP9c=o=3%cSZ8?assB|DgTE?AU@qG&M(P!$m5|@TzXkJ6I^dWK z@yC3QO!sJ<@w-IMmmYIa90?9M3{BpGE6vM13`jq{GGpuQ*;*g3yb%YJM!d%-1fLDu z&dm&|@|k`8^Q($WY@=1*XKb8F91d8F2v{SeBc{vx?cais^qFp12K{ABV^s z+Z$az>fqammLJPh9z$-M*B6~f@X}JlmU?@xpnA>7*}Od(QS%+?Mja57+@Wxjn(ebV zSB`r0NBUywpvkIvIT2mZh0IWlFzDCCiuaFkNBi84L4!J*Ko=)GI^MCpMO@T=B_oo4 zRWUCZ<*R`!b!cJY(I>ds7eQdaZY7g9;L61PBJGT}*^JxY+AfIHJS*?J)16!H5mjG3 zoyutqv65!9a{i~Q4X{-g5l2{dIgMvI^YG8trC$O|5Mkmw)pK}kU>-9utsQ2EnOc6b1n-d>5v+eyW9O%GTR~Ui#_4N~+mjPM7N=aZ| zpO4cHM}z8l{;AC+N4E$0Yghi(db_{gHi`H z{80bQv|V^Px{B0*!sB0GQAs6jr%CK`qQ2wvJ9pjTwg7yz=sAkB^Z}h!6v-^3U4NOb zex6D%=RxkqX1eyM07;zgCk7+S?W6T=C_}^n605abfl$Wx*`5v%Ml`gl1nq!->K(0ZhC6-n^miy@FDgF4FEL zABS4f&^W&tO5e4_kf6e2BX@R%DQ34l%a(!;XMdUSK@HwJc!rsE3~DjS*E#9s^%aJP;8L&SSK>YC%Vl>-GS3cZN%9rHj|idyOYWN6~!RfeXO9 zb(rw@)_VFj9$!W7NHe^!)hId!ruBG^#@{B^hRF35#AN1uHVcOR5vHFfRBsrJX4>=Z zxvD5RNo-ORofh@@a&vjz5oVwu80RSLOX=0$D#}w!Bi$zwRrr+N#PAK_l_)(qtaJz8 z)I+jjc8im{T%Iaqx}t3*C$;VCon5*jwUrGgI^x{BtZ!!>a2P8~zB^9bTl8XS)?mrX za@OnVmRP3uHCfHsk*fbEpHkFIGtg;{YsVjKP`q$ddZisTb^ICZYP$Ih!9Tc-v6xjQA~Ek(vnToi zO<1na!+hLl!7%r_?s&}64|5$>Wj=OT-vw*CSycOCYPU+@Ccavl_t?{bJJRyHJMb@Q z1)B}puwjx#G&#v@p(vQzER%KZJ-QC26Hzl4;FU#4*(mbpXk8cz_IQa`lip}swm5P- zi$0x?eu>*>MpFoqp3r7TehH}HtQnLI)Dz1zQk1PoN%m+Ez+RX}P4ccjoa!Ik0A@!i zMn12{xfXV+64tL#MVMUV491%_o`OJcb`Gcp8YgS;Igi%b=su1;Kkk}$cpE?BPns4uy5b)ujtkYeXk@#>SK&X|O_L2$9bjV5@Xsk+nr2DcS@ zXXq2@d4}*hiu)>5c75_Jo}-va;~oYbSli{JNLnqw(|MeFeV*%h^A5X@3+Xg!a#k{K z|Iogx6bx!8TxF$pL^c0CLZHC$gp7`=iYMzK;R6T}mrpYOVJ4S6+NPg=xEt`vhBiQY z;SUvZNiDunoE@rkx9rE&(po&P>@6H^Fw>G`;I0Ir;|E4v)mHEry0U8a`I*Y;eg4=t zkMGaj*Ox)diy7x1x^+w{N3Y=Gj^IlNoZ`GDUM7|01x{y9ts38`rfy*Bw>bP1mdGf0 z7IWGGOm*|D7Qf9vD`-A6;IcYI+JQJSrWNSA%M1Wb3QbP!ux_R%-8gwEQjtW8;lajlzu}-hvdC_jPmOeR!RDKTZy3&Rk zvU{WY1e;~9_md~lEH9anj}n>NCcJ0ftqLv^LrxqXChqO&^}j{^LaFh|ZPPQ_^XRLi zy!H{VLnS}*!A$rSzUZfQzYtFwa}da`Cc=He2&BkF4bPDbqejl$D?x&(IWw%$3&wb%K3+9>^B~kr zWlcFf{z6);8I;+Z&uWe(Ao4EwipC5C_08WZ@qt}R$FW)WX33uj`Q69wg+j~BOInsGQ7%9B(%CS?b=(4O4WxHhvHs$5R%r)e>R{=_;7FGXnmRw#cyn5zMH)Hu zkcZ2@$A^a{I$0MgOvcD3$tfg98SXPawL`zZ_e1|ix6Xm&km8WkyVIFQ)h(FqZ*s+7 zinV`u30$BspAAi@f(Kc0uBXXuDyu4rW`4y4s*Oe>=kwjCyPhhuLZQwDHBq74pJCeR zWs+qz%Wew51Y)DVzdl$Zr#z}vuX^tGYt_3-C_g4dr2lS4pKP&Yjj$ZI7Y*YC#^=dKZdt)@F2x)9EAF zU_u~STB>B}%RSTqR&Q^ump>5fqvgz5g7wW`Wt%<2x=`9#Otieo#3WNO7fLLH?5w){ zlL$BVb$_&BUC8Y1B$tkt1(f84eR{J-UwbxVpWjP4=8Cs=rdXDzeLsGO_SUJx6=+ZM zSGM_mQ!=<`v^d`~IfW6ci+F>pFCAZ=5aX5e{Afae2cI9%`IuO0SNi!pn1SwmItj~S zeA%Pp+!0(nc@BP9Wok)0c@^HMY`<}D#1 zp#=mbKyC&j1X#9-WyrGpVdx?(bp^9_W=g}=6<}m^p?vu^sshj|W zVORP}5I#)cZjCIP$^5oJFXk9jm*Z>)2Se{i1{eIeIbt|DIzqpxWjwVmvnib0DK zwl<^*N{1Rbl;*}Ls_m%p>oy7-TS#^cW;myuXj8Th&C81qJnRIt!mR53WBAn!YbC$2M>0wC> zGk^A(=hO9&)EH--H!U!z!;Ttxl=j~d3W8rr?ePx~^()xi(pu|izBoC9_CO*pwK1JZ zcP67#Y}}xh4@V*P8ZdB07joHQWJx<5@6#|Qqh+oO-5T0WaPVc!Rz=fG`mk|N}nniJk?p{on!8&PY$uv<3R+l3Q`m^#OW{8 z&+kL_y6^{W58hxWsr+FgqCl)|3t93R=X>OUKyLkb-D3AQ&vC8C_)8A(&yp`*Ti^N& zUuyamY&=Fa&?sH+Lbr56D7lo5Tu_QGkxt%A4?nEy;Uh@(u7g1*G# zKrmx4`}!4bzeOf1G4$y5)7}WQkCV0Wy0i)H@D%ADJpI$b21&9)Po4YRgHnRc;}|&f zSuiIEikZFK4?X@7x%o>n6zGNf;3BzFKAi2Oi?4$_&-UX;Ms4j46a1Edu*!O85%y`UW;$_MmT3n`y0VgO54LJ1%Q!AA}ub*bn z1j54J;F!}TMUMR~K|dm7&oX~~sP?`NJ*8GJ(*D!)9w#Zp#L&w16mOonQbV_XZ&QvS#OdVlvlc`XUxrk_TFNN1Qsq3M?+a22JNCetQJ?DrF{&;P|~%Z zl~dZU3h$o>#{P6L^vKm@t%VlMV1BGMe|L)wdi7kwZ!-BaM4ZIs9SaqGQ&u5eBGnKd zmp4Q00~Dt%c#Ul+3msbU`4<;^O{Bd26d+3$)Qdl~v?-hN_@ve`pVA37nZdVI36EQt zSy>q{WW?TBhkBcPirOjv(-h)TcL(lF=uBK8)A2C?${SVl@Wo6(NXNa#K zGIQAN%y&fmDw|IC59F|}>lOF_-}4Iwh@q7s6~PDuu2{u+GZe;37K|L-`D5?Ezgxc> z1n>d#;&N+p7yTsPy@FS;R6i!>(Pr_>--@v6%ncz)hIWhU(t1d;$_fXoylSbl6*KUH z&-EY*B^kR+-XmEX$p{wijwQHsGpo&T^fWoILFLih+A)fGHyA6&5nH--u{kPU zIp)_NwUt|L>)GU8xLk*F7({aUAhf@UG_o(WeA$pyA!xL&vN~!-QRa6OHCS!ZjBNm0 z)bqQg?Gy#k{+7%de?d&jz|B=61``-_<9imFZ28JCA98bz)}9E*g}^{gX{gTY#|0O- z3%$=$c(Nbf2@aUTrqg4^l5tV<-@?_A29i_>jh=$`==V?ZdxzdF>ZZR?@L?{0ZQ(u@ z)88fhPD}3!_OWSC+vjsB-`-?ACV6RbH>-G`4G%5LX8l|_n5-E zJS=CYx6G0dubgg%BxRLWr`pvnZo|)++2}DX9M4<40}3c7G=CLJF#hl&8?5f|rXq7= zV5O^(IL6#%hm*$%zk@&oH?!UQ%Oso}IG*`BT-}*eCntK(n$&c1DQ{#(OEoNVweF8q zbWf*uzs-g%CPgR|?4J^yC9_4HPL+~Q5g2aD zx=K3ThqCI?Ao@!40Q=P;+w9y2r1X%KGb81%;BAEXlXJf5^c9{^RB)RKrdM}@?J)GR z!Vmi!{LNM^`q6~B%u8tq`pdp4cV!^JxdWFAFK=hoUWGTwl7Iv=y?qa4e6;w@6gd|l z&8Q`k4IF3&hF?0mnN*|@{AL0yn}X7}B=;Anrh~ZTA=TQ|3k4;C0RAn3lFD9{Pi0I?0{w;-a9ahAbr_Qux`sHp; zB%Y3n73twt>Lh533Z0RJgzcX?fDDf`xyEP2C#bMsfPa#~-bn@p=rR|PQX+0yC0c0{ z4LguJ?z7>a$6z2jw?=XNw4Led)@hmXfv=&B0Tr7Z{C$@Rm$UR!sGql0*Lqg;2(!wK zu6BL!GY{%6Q8ql5c_3e&7|z4Za=P~6jc<;e!qp3z%=H(h2;}JrvcEJVxJDzW*Gl@) zfR$Ajlq2Vzb;7R$9cRra3o-=nwYHXnO^xFGuq0i%Xy_c_R_3M6Heeou_&^rRP)6sY zNpuD=P&{F?B-a@%RVr-X#9-`C5xq;ba$!C3VmD(oh;cAF#q!m9`w84F&klD!J}LdN zz|TanYI`2er77ua5MNMkAU_)Ca%Tip_{TJU4K92jSvrduA1pooBhEo&&+H$~6xwB^ zsk}H8dE33v#xo_}eLwBg^gEFgK)!RWN%T@2bJ_thJy$y37v0j1N&5YVjMYQMa5+6> zBwXijyXJYA!&YGLR5>Jw+b67Ru)Qs)@)sDcL%^M-j{S$SJ`wJ_n6GE&yyI=J6f*6C zuk-P@`S^r!mu*LSOkzr?9nj6!#-|IO$o zOKq>zP*4MVqw_0@d2aF+Qb&n9B;uSu?&JIbzHn?Q_;pvb2u7lhJvCPUA^b{V`V`i(TSZP+jvmq*Y$>vatDz&sv_8F zX%xqlpjjM;iC!&qn6AxTlBzGus=0x`;9FKRz_KizYlz@kR;)D}nWM65VHm{RFH+Wz z99&e)_Ya3XjT(4Utw4|{TBuNkGpbTEb?vT<=MR--Y#OrE^Lp2?M@rX-t70cdWY)Vx z1j(cL^}7%#SIFoIZag}T7FIz&{oAfJ$+j9vVeI5v>fb4ke8LP3+IE|0ySt(1F6 zS?s${Y{f#D#S@#!_P52dUZLuA^-Xk(3{QnmYj&qObj@|B2}V@t*R(CAIi)?0#uuhM z8Y!1zRQc%ZX+mmE-&X6W)va|h1vlO-3#ycu`keS)(d@>Y|+ z@v#M!9nCu2VSKP)dt<8x0&&I&9yuZTZNEJ?>c0bNR63p4O&g8J^j!|#R`@xs>d>%yqDL@5mb703|V2P%haZwB*<9eBmA=fXj$6q}LU3r50<@%b9``K<>i@>zCYKfV6 z-8=1bKQ*x)Rp@o?KY?nO(}coa0ic?V>vHhZHy6h{|BC@@@V~3lcj3_4(1K>3~X7KQCX&7Yisa9#SF! zBKn8>u<~U20KsCFFmuJh)W*Th4b=Vf1$mWWXSR_G8XG#m3y)QJoF({LzJlrK{lOuy zJ9$=h#MN|CGt9=OWWi+{bOic^*s^%=Ul3auhJ2fkI9C2>{k7Adl^+aGeQOr=XmFqL zX6$vvU)xF-kUp4P?J{~x=`ig*j>@^StwiGrFBxHH!)mN+zjG@=ha!Tq>$s7It5r2H+wgN|kXa^;Mo zw$#x{pRZUkP=09Qg}-`BKf`YB)xN*8UO17=e?kvp6Kg8An|K*t6BtO0m6uwSkZJ$# zE@5)_$09RQ|6Mc@HVrho&+*#WT8}riJ0N? z;Q$ol3e;_vKkblHC(C2j^h`CW;Tp(?6Tqf~$wup#8pzT&t)sgfB-jKLx{%$Qrpt`0 zv3BjKu_w1_>||a@x(0{!1GbyW5sF}=!J*N(&d6bGOUdm_c9yHM>X{VOk%>mhW8*4U z%rdKYh1t3_aWS>bk3-opQ}YW$k=jwT!ytpub$N=Dh_qQdFQ;Wsx@qt5SbckkdWHHF+3Jv%BqZuRl~o?5Azk>ql~^!aDWu^?PrH=+%2iW%3HLLYo!KD# zF**-6q8t0nRxI@r=$Bg!N>tsnj8aN`7#jQ=EhiDTNkUHmR)nd8Nq(Y*r(Z9Q#PU@V z0I~-XV#grQ`c(j6ws1nz$8s8!>2LC=q8i^4WX|xE2Z!!*S$Bs^rZR^3FOp|*fM1f8j38XQz+XBJ7#SiwtdLz8Ms56*b4v5xgAbq#M7VLDatK&ob<~Tg9gMP>UT3hohK) zzmO3TEM4@%)p%XkOveuVo~mswnoo;!Jg^8 z+pAmNv*}13_~m?x=NNUv-Uf50UZQtr&Xd#)tD37SLHbVFB!R_YJ^^5rpNHiTGXQdt zM|wBB^V5@)eCy{NgzX5~w17?VNRy*eX0G((REIQ-#5Q%h9}^6Bg<1{`XDaq3HO` z?Qh7oti40CXlm=Q74lYlh9gWkfXv!1;>q+&R0@LbYe01n6VY9guOmi$&0PEhB@kz1 zm)rIvGkvF6)Heu*%;;qK$%6^kU}pc`Pt#T+x1MK%LHfJo-i;A?1Ys+XTyoYakn*il z6M_>!@=ZnV+L10={mgMFZ(cl@an|80*QLDo^4?1ige1tbTIMFuT>Rv~Q=6?d;=QVm z^G5D`hSv&?eB)T?S@p9TQVPoSO?RXIQ*z5b(@8Dp@8p(V5vDT*_E`un%|`MH!bT?u z#?Fx+If?hM;}_tM!o^-@u51y15LcENi$*&1idPG+Xhue6m~LOUXDMU0UnFrRzwpH+ zgNNd>yvo!s?;`hMdYsf=vY8Fub2Z`!SWn5%E zMKM5j3ZqUY{HZkc`u$pzAr%Xlc?Ctok8Lvck#n_#0MIyb)<)Un6192y>6;CF8nj5v zCizlbrFVO@WeZ^0Np66Nh-m=PEr74njeb}D-8Y^)+seV#~O`R7wG4LQ`5$z7HR(l;F=2 z1o#pnsCa!C;8LE4Bp=UHit$YQcU`$Q!E3VFmTY2-Q&di(`?G^X^C71Ly`m>LD^xSs zCphvL(Rc&%7ChPktk@BLt(iz7&OEeJ1|+v*P;VP{cM-g8%(PwNq7L1jtC{XW9v`emIG9gC7-huFZ|w|K}*2?F8I`^)b%OsOyE zD_DWqvgEYx*h>7s=@AKQ8}E#bOV~eb=nchD{KbfazGg&cn0)PpQx!0ow z%Us#ayDRDttjcTX*v{mY8v*f%B>6DF+Cv5wN3le#Tf3pEE;L}O;=bet*U*16#z$Z68hXi9xO%&tjaLhC!;IF#6uvQ* zfa?O;PCkSsx{P*ct8CBllTBokDnb%w(gqIXE^%R30ST+y#JnsFlvsM|8qd+@3<;a&C%g+$q9~8rT z+`VkqbuH+R{UR>tY7pN-GO>h})t}L)O-Ux!M>j+kPx~rhl1&l>sZN=`0gv4K?GD<( z72KVAFFx`T>q4D)7ywF(z1nqRNmS+_do7~5J9~5cLKGs45%18?YSutX(T4pki_kMf z`x%~uqu&*=F*rTD+G8+8j@J9wy&R~JYUr=x{N%silPY< zuI}^4&$_4UVFRzi)axf+DQo^F%Xnsw0Mr}X7l6(q7naEtbB8}cz{8erGl?crY8U)K__31GO(z7#FAgSabwJI^}%@&{E9+#p-FMVI4P zrx#u@>6Q||%iU3cW8blEMcGLMdX*K{WFFJ6vsjD7mCUF`T=9IAq^Q30aip4x3AX3( z&M*l8FRo;EHUX-~Apb#;6HAMY;ob+_%UC7+xKwGMYK49580cH5>Cf;K>|ASB5wuA~ zn^+BHfEZZ|+T<)Ts%aqBj7gT)ft-y*NC>++7s?TS-uvc46x74@`I>LD2`#N@a z9!~csRUE~wa?D`T3kRX7nAsO6oJZMohZ&_35Y;1QBZ#T$xS2yi()Is}gu>FEzZwQe zDAW2^Vf1JFWG!3mOy3>V-YY-!0C{BGIOlESus|}ThE*qc6AT3;oiSmAFc7bg8d0OG zvQ1bo()}0|uGdx2!Ga3aMY9aAqLT}6-65Aw$OJ&#woq@d$E&qg9O?;9OG)m;VZ z2~B9&WM#cv;of}+abIJs-lctG_zT{`DFuE|1?{heiVN5)iTVaUtn_#T zq(lGSUoYmOcFY<3mw>b3k28t;9xDP89_yKDVGjIZpYq`fx_68AZ`%}GQV?)O=(HB4 zaN5rz3-8-5RO*B!by?zBIP9uUv(xwdSu3x(?3~4)##fpz=zf^Pl=%aCISiPiuUnDS z;Ejn1Yd1%8$o+ft6t{u>W$zavh<=TtXxofzvnnp%Q#8wz zPLb*CHloO_2iqEyAY3JWRna*Ch-{sO&X;-x;x?bmGlU(gq{oMC)X6V>z^fu zyi3328@{V)b0v@Bp^-% zd0Yc$PeyBjXE2%I2lHNN2FFgmypjAGuwJ}$0qezcE1()=KYSh>P=Y$cp)#m7RV;pE z_XCa?{&TDd64^mUOvV?MGExQM;ei`;Pa*Qv{6#|o-{WjR8s*7;jkd%!#`+=N620ss z8FI&B`D>b!G=kO7=jApr?S8MtFABm;kYy@wN>(9ZrBjXNCvN>!f05aR>RK*Kqf#BBWVph355~5QU;c+ZR(QdZ zqpw*9-pMAMe-sP^#h*DB|HoR2oFhJE&VJ+1;sQQE(IfiL?b?!)e(y4EhT>J(3|nxx z!GFjRoN39MA6*C+O*-s}jwI!WAhsH{V@uS^nd$l&#)cD!p{pA)I%PI;_3}UVf^5=L zVJ+@@mg!UqVCF2cOSeE1d(bNfj+@*U>7+dv-!Lnl(3cG!6_bzQw(mdpc#=%IUOBY>ntop!0(-(kc z!gS-#7O2oQvQ`3K-5+OXKmC+*+DlCnGW^LlFBFt}F0n%L_7W6ilSh2M>lcWZk)`v^ z1B`9C)J8GDhTy^6j8Fm&YYU%Yt20FUL3e3PkvucOA*1Xq;us_NNH8rj`i%l)LFix> z6N)CEXv5NxK7+S7T$bHv*R1gLU2eG7fMIR+3B(_0K+@qb5=q}k_ap?2J6cgSa?G?| zVuV0N^c`psTM)X?GeS4c&+2ijy&fSLiA8%Q3={+{h`b0F)&+clh#0n#on$ArkdH-7(?d~mEb>i|g}Js+nU zU7rpL;K8mF`~Vobe-6o<@%3SE!U#tlk4C8R_3~s55w{mb1lwnz(LO=!jKo*d{nL1K zd2gS9a|f0*+Kq?Zo231UDZ+1tQ-A-qEO68?ZA_Oyi)J`UTWD_=)f%qE3fi0L>4s~RLd$vv~eeXT5RmWPusgmNx zopCPpM^tE$L}Qlv8~;{Cw?Ov!mG4e32GECpw?M<`y0ONqJwDM{Zm+bZS5p_d5Bi4g z3Ml4@M6hVc-=lce=+9rp$~$6_xJVG73Oz{=v}k*!Td|_EQsdmI4Yhb+_&nlqjwfW! z&WH{QKK6TOwUh{x5K(qciDSt{`5WYy2AP9(59Dt^$XJgXCAT$PIXE zEX$Ko+S2A!&y5M0ge=}@`E``}qkD=_$Zn=wfYEWCCUleQC3cHWgkkHE1V za84*dFn@*T<1_i`rlm!$w(EY8k`4r43fxXgz-Fo|kJ^8^3j&Rrs8&D86@`YUCLI7v za*4FE|0g45Ge!D2Op)S*|2k8oGE0sEXm!-K7(=A-Hn{Ej;(-pP-G*AU;g~aAmwy-Y zNv7&SndXYlLELz;;5VIXtgek9FN@i(uG=Ok$v$f3x$izM{Xy97@a$s}VNDZ*W;fvE ztS6|#FXIen?Spmx8VTlvkU)5j9q~q1FBrm+>y%RxyslgOX#TLjBVWNKef6vX5Gqz}c>`hGfnWp60qOw``+( zMrdnj-leR!%lX#1CY$$<{3&E#@YGdP5=74}-&cZ;T=b-t!#mP!-+Th2%i=iNv}6*0 zpZIB-h(vG+nmLSXvC<$wKBQ7c1B%6KB@xm`B7$*q=XZKuX#&Ui6M$bd_% z7?Il%v#8)Mvh@lmeDQAs2}WhLkPOl0)IK`?-%OFQC6SW%^*vEY+v5N4JLLQSbMywB ziQ_f!`TXSrhd%s#aw`D8PD}oO0KcLi<}1s~7e+}Mo?X8ywgYflG<^R^RvE zJYc?aTLIPPHoxZCL%wrv+)(qIrStiJ#{)SmowtkK>&B4l<~x45F5#vp`Uf%|7uA4s z?;8S<&Tg%X;`)65uMhsOcD_{@nI2rNdma=Spmhs?zJW7goDv2q;Jge@>n2>@$S!+u zVF~LzHVH^4KVAYb)D^Hhetge=siMI70(pxQPDsu1WdpUw{CEEmH)aERZ$711{p&&t zKWZ<01G2d{T zjk9A8vyl53D<16A$_%us&Xg`{der_rr|S)7zjmG7X;KCITD6O^ME7Ws#9gD zoJ;GpI2@qbnyEs(Yy!$N)ZD0I7W+E;wX#}m7!AMJhvPL-Wh~TDqARIWh4?EKpASlx zTz%oNgFj_E9b4!G(J9@VrnAKBDa@u-s|GD2S|by8hZ0Cblt$3d7}9}Rv&nnkyvMaGrZ|4$aUmiO0Oty@214a0k+IjQTs}aowDnASq2lR!DlwX4GL3KPfXZ@lCphpL=RsP#`6Sxo4{;D#`l*y?&&CN!3^uqJFQxQlx(~P4uU!Hh*Zn?mk zU4|J_BOg4wZ#QfjOcgtOJY$&#ZYnyNF814ebSAvQ^J$pEDV6?+l@heb6BQx&;b8PW zGEecei=)iQgpr6Od6bt{s}DI_#R)-uNpcTq!`z-3&;w-`4sF+Vl(7e@=a-=I17!dc z!`5c@wMzV}N?Z`{MwA(AZkFq1_MT_SOS%p`NFB8Axhl%K*d*dIpg8_XBWQN+?gIh> zz8Y*7Bva(D@ffrSj_+W2cwR65hp?x@o5FMjiPKafmmQnbOgy@P&DV4C`==|jFvh;>lT$# zzFod&z23Yy=Fij4druEsYD3baiW(lUI?SC{feFRwR3~4j_$_dMsMAuzPol*{zq%*?mVwgz{14TpyoVa^h5Sw z_9GKw!NqvH1i<25qnF9I)!8T6~>TT@LXgjs#C zI&ja+9ic_4B@bIkDvf5t1iDBwD>uI+omzsN@;ztG_*bG#rR@T2a`rU=`?ZoF;{~e; zNA-}oGFGNTRek#+(4|$x+GR&%jDlY)kLxxo-^n7gDH)9hB-zHo*>$kPfjW8~w>=Ch zl$p}$;?%BZZ!0(7tw&@#Mz%;bn%j$ z2m_=|$%)LhbE5}9ofY;GfW@ZE#O%ILE1rv*Z1q>v8xFO09(aHyhO(wIyrXHpLrMGX zr?wzWj3zoHkBr1tP;J_MQd*I=I1-^^bFbW*@AH~A?vQGS(%&ZTeL}=%NKR}{)k2s3 zj}-DjX@clXEKt=fwMIVA*4olcioiN?)-v7`hXf_)GikkG8n~TzBs?PXE zf+H~c5XTfEU`BY6Em71D5lW6X)IppG zfCx>H&$FK2hUePwyv05*sXec58N$3mzBBJ{`z_k`qSkOir0H+VnJ%zYPzL6kJO1v7 zTa3#l`kvOD6X`6SOQr{@eIAk`&)vP>t@0^lJdjjyL3*#Uir8!ZGS8;VtOMi4vjlOv zTD^7h>ggm&EI{pgj``uMrav*n1zsa!rqqoeM?Y&|En!7{Q+ za9%vjugKg2d3BLFVz!Y-i0!Bu{>M9SUsSFo*HB)wR+mn_dliOVk;dLew;0Gn+)Hck zdQg$hpb-eSjD7tsT+66T5ocrT^8$U;V4N`xf}Cx<1_XvecQC!Fkx42dcFk4w{ytmi z^x`$JP#?EvoVKXkK6ZeU^3<^=AY+n9y?~SyMj;Xxoa(o*JM*Wu&-L!7gTuTlWcR1&6w2Q4vB<)wbymK(0T-?r~73=1W4nA zGwn(XBe>6}`YZ^Q3EnlYnn%NKYbMw?oD0#kKhQL3E_h2bD0|gQSo2W~-x1n_X6o0F zOo-r9Lj6=Iq7x^;URUU!o+)Wy40E+&N1U^O0I&==F z(gTE{$}c)rv*dPi!-Gn#t(<7;dXXQzcBo;|>^Nkyf_a#aW&}v$o@QfV49_cHZG`(j ztpfUAAiw`lPX;WBDr>1??`!|1x?W$)0*f^+Rn7WdGI;65w{pUbD>yak*ugp|a?37o z%jNbztfuhgW@-n*VH%o#tIFK7YU;q3Ht3S>)lv79UB(D&NSk$0qx|5R`|ntG_dsiNHS{cZhu{5%>eoX0p$<#+HV znR9W8`7hOQjyY6~t@RmciRJUl7-}uz*&>TCb@5BJfj;Jz31?g4ZS^36|p3JuNecsp6l}YB?Y~q{vIIT)e5|RBPme! zyxiq}QLdt6U-v&?Y%@Fv^)E@=omfbtFJrSVo1`-peN`a39bEKT9KQook8*ARxd7qp zFjx(qV%{D&9zL6r@S6CA2pz^x*;9@t)h0!Ax8P|hh3B@ zv`fvJ3YG&LExJEMRJTCUkNY|6 zwB|SG8}M2jZcdc80D%WR(=h|eLC~;uJfk1%?1@P zriTg!KjzjF6MfsNiPM#keE?1hROg9N!oAjd$;Y)Fvs8loa`WprRMi=E|>|idV5%UxX2Y zSm@&{J*tw&$f3~srQ>2PiWPYroO*s<#dF@ZutA9`p}>hkt?j2?L}#rbU^2a2h5jOk zTmBH&*v79>_is1U%`f)v#0W4uSK*E3YfRLdO?3-S%j~p+SQt?$-?I0cb~uYaVBmV} zhLpXTcC}{){RjE*i5CkBe9sM|JMnMUQVP$X8*i9jgd-(_vs|X}4Z&)b2v}a6zwFoh6v}jg|S4q7J%BT?_R2AFSfpR5ftBN z8^ZU}@-c@-_8b2R-0k^!jVvAAf`a0^?!DuYNrS==jALDUv6(s&h~B4+0sibkB@bTS z9eW%nVa!ma;+aI;5sx)p#%X0Et<6@~jhpr$En0lbYlSk9ej~=(ppO0!C>Y8Eir;H) zZ6}TH|698f#xS|UXqDw&sC@nI=vr z#y1}~wGd;%i6066SI=KOy(2Mf1EHAAc$e6gjebdJ8M}2RhcT782{$S%mtgo;Q`F_S z!<|3e;V1oq)%m@Sr(`Po5z=THqg}OFu`eww`b0Td^bOH!m0g39yIYeU`SdIww)Ev* zcSd=|rcLFgHuVz1;=!a0)~$oQQe>O@OJ2uh;1UNB;|W%(7RrFZd;Z3_AJ>f@P|y=B z{2bS4{I?lCu@-S$M8$n@1OsE*yE|wB#k*!Iyb*w#g|Bp&tFe_KIX|RjMebF=EEA^OxY;g$)~%( z#P}xmf&aO{FOD$vuhwO7t$*iWk013Gx?pv{6s7Q)_UADs)W|{;>7`HeB18LZ#$rZA zaaYF<4&zYs;>3M~i?52z)n^bxmiM-i$|xl+V^$_^vrc-FiAE(_;%l48z3r4p}ajGYPS=Sh8bB z(NDvr9}n5vmZYvcAo%ZHzTKVu|2|D-`@fc3+;Q8BkK)f|i;iDfdb&@icYc5OdD}go zKBxGHR<4QaXJ?op0xQ%M>h(7NHLJJ1l!Q*!R^u~E$7g$)L6y}zPw@rvsM1tqhH>0 mg4BUHM-6xm6F_TDfBVn8=)X#XfKNU6+6zxtKbLh*2~7Z9Uk301 literal 0 HcmV?d00001 diff --git a/docs/source/community/versioning_policy.md b/docs/source/community/versioning_policy.md index 1b092885c1..c5d733dab8 100644 --- a/docs/source/community/versioning_policy.md +++ b/docs/source/community/versioning_policy.md @@ -18,7 +18,7 @@ For example: ## Release compatibility matrix -The table below is the release compatibility matrix for vLLM Ascend Plugin. +The table below is the release compatibility matrix for vLLM Ascend release. | vLLM Ascend | vLLM | Python | Stable CANN | PyTorch/torch_npu | MindIE Turbo | |-------------|--------------|------------------|-------------|--------------------|--------------| @@ -38,6 +38,11 @@ The table below is the release compatibility matrix for vLLM Ascend Plugin. | v0.7.3.post1| v0.7.3 | >= 3.9, < 3.12 | 8.1.RC1 | 2.5.1 / 2.5.1 | 2.0rc1 | | v0.7.3 | v0.7.3 | >= 3.9, < 3.12 | 8.1.RC1 | 2.5.1 / 2.5.1 | 2.0rc1 | +For main branch of vLLM Ascend, we usually make it compatible with the latest vLLM release and a newer commit hash of vLLM. Please note that this table is usually updated. Please check it regularly. +| vLLM Ascend | vLLM | Python | Stable CANN | PyTorch/torch_npu | +|-------------|--------------|------------------|-------------|--------------------| +| main | v0.11.0/83f478bb19489b41e9d208b47b4bb5a95ac171ac | >= 3.10, < 3.12 | 8.3.RC1 | 2.7.1 / 2.7.1 | + ## Release cadence ### Release window diff --git a/docs/source/developer_guide/contribution/multi_node_test.md b/docs/source/developer_guide/contribution/multi_node_test.md index 1d78c8e353..1fdcc3c590 100644 --- a/docs/source/developer_guide/contribution/multi_node_test.md +++ b/docs/source/developer_guide/contribution/multi_node_test.md @@ -51,7 +51,7 @@ From the workflow perspective, we can see how the final test script is executed, # - no headless(have api server) decoder_host_index: [1] - # Add each node's vllm serve cli command just like you runs locally + # Add each node's vllm serve cli command just like you run locally deployment: - server_cmd: > diff --git a/docs/source/developer_guide/feature_guide/KV_Cache_Pool_Guide.md b/docs/source/developer_guide/feature_guide/KV_Cache_Pool_Guide.md new file mode 100644 index 0000000000..f29595f5c1 --- /dev/null +++ b/docs/source/developer_guide/feature_guide/KV_Cache_Pool_Guide.md @@ -0,0 +1,83 @@ +# KV Cache Pool + +## Why KV Cache Pool? + +Prefix caching is an important feature in LLM inference that can reduce prefill computation time drastically. + +However, the performance gain from prefix caching is highly dependent on cache hit rate, while cache hit rate can be limited if one only uses HBM for kv cache storage. + +Hence, KV Cache Pool is proposed to utilize various types of storages including HBM,DRAM and SSD, making a pool for KV Cache storage, while making the prefix of requests visible across all nodes, increasing the cache hit rate for all requests. + +vLLM Ascend currently supports [MooncakeStore](https://github.com/kvcache-ai/Mooncake): one of the most recognized KV Cache storage engine; + +While one can utilize mooncake store in vLLM V1 engine by setting it as a remote backend of LMCache with GPU (see [Tutorial](https://github.com/LMCache/LMCache/blob/dev/examples/kv_cache_reuse/remote_backends/mooncakestore/README.md)), we find it would be better to integrate a connector that directly supports mooncake store and can utilize the data transfer strategy to one that is best fit to Huawei NPU hardware. + +Hence, we propose to integrate Mooncake Store with a brand new **MooncakeStoreConnectorV1**, which is indeed largly inspired by **LMCacheConnectorV1** (see the `How is MooncakestoreConnectorV1 Implemented?` section). + +## Usage + +vLLM Ascend Currently supports Mooncake Store for KV Cache Pool. To enable Mooncake Store, one needs to config `kv-transfer-config` and choose `MooncakeStoreConnector` as KV Connector. + +For step-by-step deployment and configuration, please refer to the KV Pool User Guide at `vllm-ascend/docs/source/user_guide/feature_guide/kv_pool_mooncake.md` + +## How it works? +The KV Cache Pool integrates multiple memory tiers (HBM, DRAM, SSD, etc.) through a connector-based architecture. + +Each connector implements a unified interface for storing, retrieving, and transferring KV blocks between tiers, depending on access frequency and hardware bandwidth. + +When combined with vLLM’s Prefix Caching mechanism, the pool enables efficient caching both locally (in HBM) and globally (via Mooncake), ensuring that frequently used prefixes remain hot while less frequently accessed KV data can spill over to lower-cost memory. + +### 1. Combining KV Cache Pool with HBM Prefix Caching +Prefix Caching with HBM is already supported by the vLLM V1 Engine. +By introducing KV Connector V1, users can seamlessly combine HBM-based Prefix Caching with Mooncake-backed KV Pool. + + The user can enable both features simply by enabling Prefix Caching, which is enabled by default in vLLM V1 unless the --no_enable_prefix_caching flag is set, and setting up the KV Connector for KV Pool(e.g. the MooncakeStoreConnector) + +**Workflow**: + +1. The engine first checks for prefix hits in the HBM cache. + +2. After getting the number of hit tokens on HBM, it queries the KV Pool via the connector, if there is additional hits in KV Pool, we get the **additional blocks only** from KV Pool, and get the rest of the blocks directly from HBM to minimize the data transfer latency. + +3. After the KV Caches in KV Pool is load into HBM, the remaining process is the same as Prefix Caching in HBM. + +### 2. Combining KV Cache Pool with Mooncake PD Disaggregation + +When used together with Mooncake PD (Prefill-Decode) Disaggregation, the KV Cache Pool can further decouple prefill and decode stages across devices or nodes. + +Currently, we only perform put and get operation of KV Pool for **Prefiil Nodes**, and Decode Nodes get their KV Cache from Mooncake P2P KV Connector, i.e. MooncakeConnector. + + The key benefit of doing this is that we can keep the gain in performance by computing less with Prefix Caching from HBM and KV Pool for Prefill Nodes while not sacrificing the data transfer efficiency between Prefill and Decode nodes with P2P KV Connector that transfer KV Caches between NPU devices directly. + +To Enable this feature, we need to setup both Mooncake Connector and Mooncake Store connector with a Multi Connector, which is a KV Connector class provided by vLLM that can call multiple KV Connectors in specific order; + +For details, please also refer to the Mooncake Connector Store Deployment Guide. + +## How is MooncakestoreConnectorV1 Implemented? +**MooncakestoreConnectorV1** inhereits the KV Connector V1 class in vLLM V1: through implementing the required methods defined in the KV connector V1 base class, one can integrate a thrid-party KV cache transfer/storage backend into the vLLM framework. + +MooncakeStoreConnectorV1 is also largly inspried by LMCacheConnectorV1 in term of the `Lookup Engine`/`Lookup Client` design for looking up KV cache keys, and the `ChunkedTokenDatabase` class for processing tokens into prefix-aware hashes as well as other hashing related designs. On top of this, we have also added our own design including `KVTransferThread` that allows async `get` and `put` of KV caches with multi-threading, and NPU-related data transfer optimization such as removing the `LocalBuffer` in LMCache to remove redundant data transfer. + +The KV Connector methods that need to be implemented can be categorized into scheduler-side methods that are called in V1 scheduler and worker-side methods that are called in V1 worker, namely: +### KV Connector Scheduler-Side Methods: +`get_num_new_matched_tokens`: Get prefix cache hit in number of tokens through looking up into the KV pool. +`update_states_after_alloc`: Update KVConnector state after temporary buffer alloc. +`build_connector_meta`: Attach the connector metadata to the request object. +`request_finished`: Once a request is finished, determine whether request blocks should be freed now or will be sent asynchronously and freed later. +### Connector Worker-Side Methods: +`register_kv_caches`: Register KV cache buffers needed for KV cache transfer. +`start_load_kv`: Perform KV cache load operation that transfers KV cache from storage to device. +`wait_for_layer_load`: Optional; Wait for layer load in layerwise + async KV load scenario. +`save_kv_layer`: Optional Do layerwise KV cache put into KV Pool. +`wait_for_save`: Wait for KV Save to finish if async KV cache save/put. +`get_finished` Get request that finished KV transfer, `done_sending` if `put` finished, `done_reciving` if `get` finished. + +## DFX +1. When looking up a key in KV Pool, if we cannot find the key, there is no Cache Hit for this specific block; we return no hit for this block and do not look up further blocks for current request. +2. Similaly, when we are trying to put a block into KV Pool and failed, we do not put further blocks (subject to change). + +## Limitation + +1. Currently, Mooncake Store for vLLM-Ascend only supports DRAM as the storage for KV Cache pool. + +2. For now, if we successfully looked up a key and found it exists, but failed to get it when calling KV Pool's get function, we just output a log indicating the get operation failed and keep going; hence, the accuracy of that specific request may be affected. gWe will handle this situation by falling back the request and re-compute everything assuming there's no prefix cache hit (or even better, revert only one block and keep using the Prefix Caches before that). diff --git a/docs/source/developer_guide/feature_guide/Multi_Token_Prediction.md b/docs/source/developer_guide/feature_guide/Multi_Token_Prediction.md new file mode 100644 index 0000000000..27986aabbb --- /dev/null +++ b/docs/source/developer_guide/feature_guide/Multi_Token_Prediction.md @@ -0,0 +1,112 @@ +# Multi Token Prediction (MTP) + +## Why We Need MTP +MTP boosts inference performance by parallelizing the prediction of multiple tokens, shifting from single-token to multi-token generation. This approach significantly increases generation throughput and achieves multiplicative acceleration in inference speed—all without compromising output quality. + +## How to Use MTP +To enable MTP for DeepSeek-V3 models, add the following parameter when starting the service: + +`--speculative_config={"method": "deepseek_mtp", "num_speculative_tokens": 1, "disable_padded_drafter_batch": False}` + +- `num_speculative_tokens`: The number of speculative tokens which enable model to predict multiple tokens at once, if provided. It will default to the number in the draft model config if present, otherwise, it is required. +- `disable_padded_drafter_batch`: Disable input padding for speculative decoding. If set to True, speculative input batches can contain sequences of different lengths, which may only be supported by certain attention backends. This currently only affects the MTP method of speculation, default is False. + +## How It Works + +### Module Architecture + +``` +vllm_ascend +├── sample +│ ├── rejection_sample.py +├── spec_decode +│ ├── mtp_proposer.py +└─────────── +``` + +**1. sample** + +- *rejection_sample.py*: During decoding, the main model processes the previous round’s output token and the predicted token together (computing 1+k tokens simultaneously). The first token is always correct, while the second token—referred to as the **bonus token**—is uncertain since it is derived from speculative prediction, thus We employ **Greedy Strategy** and **Rejection Sampling Strategy** to determine whether the bonus token should be accepted. The module structure consists of an `AscendRejectionSampler` class with a forward method that implements the specific sampling logic. + +``` +rejection_sample.py +├── AscendRejectionSampler +│ ├── forward +``` + +**2. spec_decode** + +This section encompasses the model preprocessing for spec-decode, primarily structured as follows: it includes loading the model, executing a dummy run, and generating token ids. These steps collectively form the model data construction and forward invocation for a single spec-decode operation. +- *mtp_proposer.py*: Configure vLLM-Ascend to use speculative decoding where proposals are generated by deepseek mtp layer. + +``` +mtp_proposer.py +├── Proposer +│ ├── load_model +│ ├── dummy_run +│ ├── generate_token_ids +│ ├── _prepare_inputs +│ ├── _propose +``` + +### Algorithm + +**1. Reject_Sample** +- *Greedy Strategy* + +Verify whether the token generated by the main model matches the speculative token predicted by MTP in the previous round. If they match exactly, accept the bonus token; otherwise, reject it and any subsequent tokens derived from that speculation. + +- *Rejection Sampling Strategy* + +This method introduces stochasticity in rejection sampling. + +For each draft token, acceptance is determined by verifying whether the inequality `P_target / P_draft ≥ U` holds, where `P_target` represents the probability assigned to the current draft token by the target model, `P_draft` denotes the probability assigned by the draft model, and `U` is a random number sampled uniformly from the interval [0, 1). + +The decision logic for each draft token is as follows: if the inequality `P_target / P_draft ≥ U` holds, the draft token is accepted as output; conversely, if `P_target / P_draft < U`, the draft token is rejected. + +When a draft token is rejected, a recovery sampling process is triggered where a "recovered token" is resampled from the adjusted probability distribution defined as `Q = max(P_target - P_draft, 0)`. In the current MTP implementation, since `P_draft` is not provided and defaults to 1, the formulas simplify such that token acceptance occurs when `P_target ≥ U,` and the recovery distribution becomes `Q = max(P_target - 1, 0)`. + +**2. Performance** + +If the bonus token is accepted, the MTP model performs inference for (num_speculative +1) tokens, including original main model output token and bonus token. If rejected, inference is performed for less token, determining on how many tokens accepted. + +## DFX + +### Method Validation + +- Currently, the spec_decode scenario only supports methods such as ngram, eagle, eagle3, and deepseek_mtp. If an incorrect parameter is passed for the method, the code will raise an error to alert the user that an incorrect method was provided. + +``` +def get_spec_decode_method(method, + vllm_config, + device, + runner, + is_torchair_graph=False): + if method == "ngram": + return NgramProposer(vllm_config, device, runner) + elif method in ["eagle", "eagle3"]: + return EagleProposer(vllm_config, device, runner) + elif method == 'deepseek_mtp': + if is_torchair_graph: + return TorchairMtpProposer(vllm_config, device, runner) + return MtpProposer(vllm_config, device, runner) + else: + raise ValueError("Unknown speculative decoding method: " + f"{method}") +``` + +### Integer Validation +- The current npu_fused_infer_attention_score operator only supports integers less than 16 per decode round. Therefore, the maximum supported value for MTP is 15. If a value greater than 15 is provided, the code will raise an error and alert the user. + +``` +if self.speculative_config: + spec_token_num = self.speculative_config.num_speculative_tokens + self.decode_threshold += spec_token_num + assert self.decode_threshold <= 16, f"decode_threshold exceeded \ + npu_fused_infer_attention_score TND layout's limit of 16, \ + got {self.decode_threshold}" +``` + +## Limitation +- Due to the fact that only a single layer of weights is exposed in DeepSeek's MTP, the accuracy and performance are not effectively guaranteed in scenarios where MTP > 1 (especially MTP ≥ 3). Moreover, due to current operator limitations, MTP supports a maximum of 15. +- In the fullgraph mode with MTP > 1, the capture size of each aclgraph must be an integer multiple of (num_speculative_tokens + 1). diff --git a/docs/source/developer_guide/feature_guide/disaggregated_prefill.md b/docs/source/developer_guide/feature_guide/disaggregated_prefill.md new file mode 100644 index 0000000000..46d3dbe9aa --- /dev/null +++ b/docs/source/developer_guide/feature_guide/disaggregated_prefill.md @@ -0,0 +1,103 @@ +# Disaggregated-prefill + +## Why disaggregated-prefill? + +This feature addresses the need to optimize the **Time Per Output Token (TPOT)** and **Time To First Token (TTFT)** in large-scale inference tasks. The motivation is two-fold: + +1. **Adjusting Parallel Strategy and Instance Count for P and D Nodes** + Using the disaggregated-prefill strategy, this feature allows the system to flexibly adjust the parallelization strategy (e.g., data parallelism (dp), tensor parallelism (tp), and expert parallelism (ep)) and the instance count for both P (Prefiller) and D (Decoder) nodes. This leads to better system performance tuning, particularly for **TTFT** and **TPOT**. + +2. **Optimizing TPOT** + Without disaggregated-prefill strategy, prefill tasks are inserted during decoding, which results in inefficiencies and delays. disaggregated-prefill solves this by allowing for better control over the system’s **TPOT**. By managing chunked prefill tasks effectively, the system avoids the challenge of determining the optimal chunk size and provides more reliable control over the time taken for generating output tokens. + +--- + +## Usage + +vLLM Ascend currently supports two types of connectors for handling KV cache management: +- **MooncakeConnector**: D nodes pull KV cache from P nodes. +- **MooncakeLayerwiseConnector**: P nodes push KV cache to D nodes in a layered manner. + +For step-by-step deployment and configuration, refer to the following guide: +[https://vllm-ascend.readthedocs.io/en/latest/tutorials/multi_node_pd_disaggregation_mooncake.html](https://vllm-ascend.readthedocs.io/en/latest/tutorials/multi_node_pd_disaggregation_mooncake.html) + +--- + +## How It Works + +### 1. Design Approach + +Under the disaggregated-prefill, a global proxy receives external requests, forwarding prefill to P nodes and decode to D nodes; the KV cache (key–value cache) is exchanged between P and D nodes via peer-to-peer (P2P) communication. + +### 2. Implementation Design + +Our design diagram is shown below, illustrating the pull and push schemes respectively. +![alt text](../../assets/disaggregated_prefill_pull.png) +![alt text](../../assets/disaggregated_prefill_push.png) + +#### Mooncake Connector: + +1. The request is sent to the Proxy’s `_handle_completions` endpoint. +2. The Proxy calls `select_prefiller` to choose a P node and forwards the request, configuring `kv_transfer_params` with `do_remote_decode=True`, `max_tokens=1`, and `min_tokens=1`. +3. After the P node’s scheduler finishes prefill, `update_from_output` invokes the schedule connector’s `request_finished` to defer KV cache release, constructs `kv_transfer_params` with `do_remote_prefill=True`, and returns to the Proxy. +4. The Proxy calls `select_decoder` to choose a D node and forwards the request. +5. On the D node, the scheduler marks the request as `RequestStatus.WAITING_FOR_REMOTE_KVS`, pre-allocates KV cache, calls `kv_connector_no_forward` to pull the remote KV cache, then notifies the P node to release KV cache and proceeds with decoding to return the result. + +#### Mooncake Layerwise Connector: + +1. The request is sent to the Proxy’s `_handle_completions` endpoint. +2. The Proxy calls `select_decoder` to choose a D node and forwards the request, configuring `kv_transfer_params` with `do_remote_prefill=True` and setting the `metaserver` endpoint. +3. On the D node, the scheduler uses `kv_transfer_params` to mark the request as `RequestStatus.WAITING_FOR_REMOTE_KVS`, pre-allocates KV cache, then calls `kv_connector_no_forward` to send a request to the metaserver and waits for the KV cache transfer to complete. +4. The Proxy’s `metaserver` endpoint receives the request, calls `select_prefiller` to choose a P node, and forwards it with `kv_transfer_params` set to `do_remote_decode=True`, `max_tokens=1`, and `min_tokens=1`. +5. During processing, the P node’s scheduler pushes KV cache layer-wise; once all layers pushing is complete, it releases the request and notifies the D node to begin decoding. +6. The D node performs decoding and returns the result. + +### 3. Interface Design + +Taking MooncakeConnector as an example, the system is organized into three primary classes: +- **MooncakeConnector**: Base class that provides core interfaces. +- **MooncakeConnectorScheduler**: Interface for scheduling the connectors within the engine core, responsible for managing KV cache transfer requirements and completion. +- **MooncakeConnectorWorker**: Interface for managing KV cache registration and transfer in worker processes. + +### 4. Specifications Design + +This feature is flexible and supports various configurations, including setups with MLA and GQA models. It is compatible with A2 and A3 hardware configurations and facilitates scenarios involving both equal and unequal TP setups across multiple P and D nodes. + +| Feature | Status | +|-------------------------------|----------------| +| A2 | 🟢 Functional | +| A3 | 🟢 Functional | +| equal TP configuration | 🟢 Functional | +| unequal TP configuration | 🟢 Functional | +| MLA | 🟢 Functional | +| GQA | 🟢 Functional | + +- 🟢 Functional: Fully operational, with ongoing optimizations. +- 🔵 Experimental: Experimental support, interfaces and functions may change. +- 🚧 WIP: Under active development, will be supported soon. +- 🟡 Planned: Scheduled for future implementation (some may have open PRs/RFCs). +- 🔴 NO plan/Deprecated: No plan or deprecated by vLLM. + +--- + +## DFX Analysis + +### 1. Config Parameter Validation + +Validate KV transfer config by checking whether the kv_connector type is supported and whether kv_connector_module_path exists and is loadable. On transfer failures, emit clear error logs for diagnostics. + +### 2. Port Conflict Detection + +Before startup, perform a port-usage check on configured ports (e.g., rpc_port, metrics_port, http_port/metaserver) by attempting to bind. If a port is already in use, fail fast and log an error. + +### 3. PD Ratio Validation + +Under non-symmetric PD scenarios, validate the P-to-D tp ratio against expected and scheduling constraints to ensure correct and reliable operation. + +--- + +## Limitations + +- Heterogeneous P and D nodes are not supported—for example, running P nodes on A2 and D nodes on A3. + +- In non-symmetric TP configurations, only cases where the P nodes have a higher TP degree than the D nodes and the P TP count is an integer multiple of the D TP count are supported (i.e., P_tp > D_tp and P_tp % D_tp = 0). diff --git a/docs/source/developer_guide/feature_guide/eplb_swift_balancer.md b/docs/source/developer_guide/feature_guide/eplb_swift_balancer.md new file mode 100644 index 0000000000..af6e90db17 --- /dev/null +++ b/docs/source/developer_guide/feature_guide/eplb_swift_balancer.md @@ -0,0 +1,222 @@ +# Expert Parallelism Load Balancer (EPLB) + +## Why We Need EPLB? +When using Expert Parallelism (EP), different experts are assigned to different NPUs. Given that the load of various experts may vary depending on the current workload, it is crucial to maintain balanced loads across different NPUs. We adopt a redundant experts strategy by duplicating heavily-loaded experts. Then, we heuristically pack these duplicated experts onto NPUs to ensure load balancing across them. Moreover, thanks to the group-limited expert routing used in MoE models, we also attempt to place experts of the same group on the same node to reduce inter-node data traffic, whenever possible. + +To facilitate reproduction and deployment, Vllm Ascend supported deployed EP load balancing algorithm in `vllm_ascend/eplb/core/policy`. The algorithm computes a balanced expert replication and placement plan based on the estimated expert loads. Note that the exact method for predicting expert loads is outside the scope of this repository. A common method is to use a moving average of historical statistics. + +![eplb](../../assets/eplb.png) +## How to Use EPLB? +Please refer to the EPLB section of the user guide for detailed information: [How to Use EPLB](../../user_guide/feature_guide/eplb_swift_balancer.md) + +## How It Works? +**EPLB Module Architecture** + +``` +vllm_ascend +├── eplb +│ ├── adaptor +│ │ ├── abstract_adaptor.py +│ │ ├── vllm_adaptor.py +│ ├── core +│ │ ├── policy +│ │ │ ├── policy_abstract.py +│ │ │ ├── policy_dynamic_ep.py +│ │ │ ├── policy_dynamic_ep_v2.py +│ │ │ ├── policy_factory.py +│ │ │ ├── policy_flashlb.py +│ │ ├── eplb_device_transfer_loader.py +│ │ ├── eplb_utils.py +│ │ ├── eplb_worker.py +│ ├── eplb_updator.py +│ ├── utils.py +└─────────── +``` + +**1. Adaptor Module** +*Handles registration and adaptation for different MoE model types* +- `abstract_adaptor.py` + Abstract base class defining unified registration interfaces for EPLB adapters +- `vllm_adaptor.py` + Implementation supporting Qwen3-MoE and DeepSeek models, standardizing parameter handling for policy algorithms + +**2. Core Module** +*Implements core algorithms, updates, and asynchronous processing* +- **Policy Submodule** + *Load balancing algorithms with factory pattern instantiation* + - `policy_abstract.py` + Abstract class for load balancing strategy interfaces + - `policy_dynamic_ep.py` + Default implementation of open-source EPLB paper algorithm + - `policy_dynamic_ep_v2.py` + Enhanced version optimizing expert swaps for low-bandwidth devices (e.g., A2) + - `policy_flashlb.py` + Threshold-based adjustment reducing operational costs through layer-wise fluctuation detection + - `policy_factory.py` + Strategy factory for automatic algorithm instantiation + +- `eplb_device_transfer_loader.py` + Manages expert table/weight transmission and updates +- `eplb_utils.py` + Utilities for expert table initialization and mapping +- `eplb_worker.py` + Asynchronous algorithm orchestration and result processing + +**3. System Components** +- `eplb_updator.py` + Central coordinator for load balancing during inference workflows +- `utils.py` + General utilities for EPLB interface registration + +*Key Optimizations:* +1. Maintained original structure while improving technical clarity +2. Standardized terminology +3. Enhanced algorithm differentiation through concise descriptors +4. Improved scoping through hierarchical presentation +5. Preserved file/class relationships while optimizing readability + +### Default Algorithm +#### Hierarchical Load Balancing +When the number of server nodes evenly divides the number of expert groups, we use the hierarchical load balancing policy to leverage group-limited expert routing. We first pack the expert groups onto nodes evenly, ensuring balanced loads across different nodes. Then, we replicate the experts within each node. Finally, we pack the replicated experts onto individual NPUs to ensure load balancing across them. The hierarchical load balancing policy can be used in the prefilling stage with a smaller expert-parallel size. + +#### Global Load Balancing +In other cases, we use the global load balancing policy, which replicates experts globally regardless of expert groups, and packs the replicated experts onto individual NPUs. This policy can be adopted in the decoding stage with a larger expert-parallel size. + +### Add a New EPLB Policy +If you want to add a new eplb policy to vllm_ascend, you must follow these steps: +1. Inherit the `EplbPolicy` abstract class of `policy_abstract.py` and override the `rebalance_experts` interface, ensuring consistent input parameters `current_expert_table`, `expert_workload` and return types `newplacement`. +For example: + +```python +class RandomLoadBalance(EplbPolicy): + + def __init__(self, config: DynamicConfig): + super().__init__(config) + + def rebalance_experts(self, current_expert_table, expert_workload): + new_table = copy.deepcopy(current_expert_table) + num_layers = len(current_expert_table) + + for i in range(num_layers): + # randomly choose two card + # indices = random.sample(range(num_card), 2) + indices = [3, 1] + + # swap redundant experts + expert_id_to_exchange = new_table[i][indices[0]][-1].clone() + new_table[i][indices[0]][-1] = new_table[i][indices[1]][-1] + new_table[i][indices[1]][-1] = expert_id_to_exchange + + return 1, [-i for i in range(num_layers)], new_table +``` + +2. To add a new EPLB algorithm, include the policy type and its corresponding implementation class in the `PolicyFactory` of `policy_factory.py`. + +### Add a New MoE Model +**Implementation Guide for Model Integration** + +1. **Adapter File Modification** + - Inherit or modify `vllm_ascend/eplb/adaptor/vllm_adaptor.py` + - Add processing logic for key parameters: + - `num_dense_layers` + - `global_expert_num` + - `num_roe_layers` + - Ensure parameter synchronization in the `model_register` function. + + For example: + + Modify `__init__` of `vllm_adaptor.py` to add a new moe model eplb params: + + ```python + if self.model.config.model_type == "qwen3_moe": + self.num_dense_layers = 0 + self.global_expert_num = self.model.config.num_experts + ``` + + Modify `model_register` of `vllm_adaptor.py` to register eplb params for new moe model: + + ```python + if config.model_type == "qwen3_moe": + model.num_moe_layers = config.num_hidden_layers + ``` + +2. **MoE Feature Integration** + - Extend `vllm_ascend/eplb/utils.py` with MoE-specific methods + - Implement required functionality for expert routing or weight management + +3. **Registration Logic Update** + - Add patch logic within the `model_register` function + - Maintain backward compatibility with existing model types + +4. **Validation & Testing** + - Verify parameter consistency across layers + - Test cross-device communication for expert tables + - Benchmark against baseline implementations (e.g., Qwen3-MoE) + +*Key Implementation Notes:* +- Preserve existing interface contracts in abstract classes +- Use decorators for non-intrusive patch integration +- Leverage `eplb_utils.py` for shared expert mapping operations +## DFX +### Parameter Validation +#### Integer Parameters +All integer input parameters must explicitly specify their maximum and minimum values and be subject to valid value validation. For example, `num_iterations_eplb_update` must be greater than 0: + +```python + @staticmethod + def check_iterations(iterations): + if not isinstance(iterations, int): + raise TypeError(f"The {iterations} is not int.") + if iterations <= 0: + raise ValueError( + f"The {iterations} can not less than or equal to 0.") + if iterations > sys.maxsize: + raise ValueError( + f"The {iterations} can not large than {sys.maxsize}") +``` + +#### File Path +The file path for EPLB must be checked for legality, such as whether the file path is valid and whether it has appropriate read and write permissions. For example: + +```python + @staticmethod + def check_expert_map_path(expert_map): + if expert_map is None: + return + if not isinstance(expert_map, str): + raise TypeError("The expert_map is not str.") + if not expert_map.strip(): + raise ValueError("The expert_map is not empty.") + _, ext = os.path.splitext(expert_map) + if ext.lower() != ".json": + raise TypeError("The expert_map is not json.") + if not os.path.exists(expert_map): + raise ValueError("The expert_map is not exist.") + try: + with open(expert_map, "w", encoding='utf-8') as f: + f.read() + except Exception as e: + raise IOError( + f"Fail read expert info from {expert_map}, please check the reading permission of {expert_map} : {e}" + ) + +``` + +### Function Specifications +#### Initialization Function +All EPLB parameters must be initialized by default during initialization, with specified parameter types and default values for proper handling. + +#### General Functions +All method arguments must specify parameter types and default values, and functions must include default return value handling for default arguments. It is recommended to use `try-except` blocks to handle the function body, specifying the type of exception captured and the failure handling (e.g., logging exceptions or returning a failure status). + +### Consistency +#### Expert Map +The expert map must be globally unique during initialization and update. In a multi-node scenario during initialization, distributed communication should be used to verify the consistency of expert maps across each rank. If they are inconsistent, the user should be notified which ranks have inconsistent maps. +During the update process, if only a few layers or the expert table of a certain rank has been changed, the updated expert table must be synchronized with the EPLB's context to ensure global consistency. + +#### Expert Weight +When updating expert weights, ensure that the memory allocated for the expert weights has been released, or that the expert (referring to the old version) is no longer in use. + +## Limitation +Before using EPLB, start the script and add `export DYNAMIC_EPLB="true"`. +Before performing load data collection (or performance data collection), start the script and add `export EXPERT_MAP_RECORD="true"`. diff --git a/docs/source/developer_guide/feature_guide/index.md b/docs/source/developer_guide/feature_guide/index.md index 6f7a5d31c6..91f6badb4b 100644 --- a/docs/source/developer_guide/feature_guide/index.md +++ b/docs/source/developer_guide/feature_guide/index.md @@ -7,5 +7,9 @@ This section provides an overview of the features implemented in vLLM Ascend. De :maxdepth: 1 patch ModelRunner_prepare_inputs +disaggregated_prefill +eplb_swift_balancer.md +Multi_Token_Prediction ACL_Graph +KV_Cache_Pool_Guide ::: diff --git a/docs/source/developer_guide/performance/optimization_and_tuning.md b/docs/source/developer_guide/performance/optimization_and_tuning.md index fd5947031b..953ec389a2 100644 --- a/docs/source/developer_guide/performance/optimization_and_tuning.md +++ b/docs/source/developer_guide/performance/optimization_and_tuning.md @@ -70,7 +70,7 @@ Make sure your vLLM and vllm-ascend are installed after your python configuratio #### 1.1. Install optimized `python` -Python supports **LTO** and **PGO** optimization starting from version `3.6` and above, which can be enabled at compile time. And we have offered optimized `python` packages directly to users for the sake of convenience. You can also reproduce the `python` built following this [tutorial](https://www.hiascend.com/document/detail/zh/Pytorch/600/ptmoddevg/trainingmigrguide/performance_tuning_0063.html) according to your specific scenarios. +Python supports **LTO** and **PGO** optimization starting from version `3.6` and above, which can be enabled at compile time. And we have offered optimized `python` packages directly to users for the sake of convenience. You can also reproduce the `python` build following this [tutorial](https://www.hiascend.com/document/detail/zh/Pytorch/600/ptmoddevg/trainingmigrguide/performance_tuning_0063.html) according to your specific scenarios. ```{code-block} bash :substitutions: @@ -116,7 +116,7 @@ export LD_PRELOAD=/usr/lib/"$(uname -i)"-linux-gnu/libjemalloc.so.2 $LD_PRELOAD #### 2.2. Tcmalloc -**Tcmalloc (Thread Counting Malloc)** is a universal memory allocator that improves overall performance while ensuring low latency by introducing a multi-level cache structure, reducing mutex competition and optimizing large object processing flow. Find more details [here](https://www.hiascend.com/document/detail/zh/Pytorch/700/ptmoddevg/trainingmigrguide/performance_tuning_0068.html). +**Tcmalloc (Thread Caching Malloc)** is a universal memory allocator that improves overall performance while ensuring low latency by introducing a multi-level cache structure, reducing mutex competition and optimizing large object processing flow. Find more details [here](https://www.hiascend.com/document/detail/zh/Pytorch/700/ptmoddevg/trainingmigrguide/performance_tuning_0068.html). ```{code-block} bash :substitutions: diff --git a/docs/source/faqs.md b/docs/source/faqs.md index f997eb9fcf..3145466e77 100644 --- a/docs/source/faqs.md +++ b/docs/source/faqs.md @@ -21,7 +21,7 @@ Below series are NOT supported yet: - Atlas 200I A2 (Ascend-cann-kernels-310b) unplanned yet - Ascend 910, Ascend 910 Pro B (Ascend-cann-kernels-910) unplanned yet -From a technical view, vllm-ascend support would be possible if the torch-npu is supported. Otherwise, we have to implement it by using custom ops. We are also welcome to join us to improve together. +From a technical view, vllm-ascend support would be possible if the torch-npu is supported. Otherwise, we have to implement it by using custom ops. We also welcome you to join us to improve together. ### 2. How to get our docker containers? @@ -38,7 +38,7 @@ docker pull quay.nju.edu.cn/ascend/vllm-ascend:$TAG ``` #### Load Docker Images for offline environment -If you want to use container image for offline environments (no internet connection), you need to download container image in a environment with internet access: +If you want to use container image for offline environments (no internet connection), you need to download container image in an environment with internet access: **Exporting Docker images:** @@ -74,7 +74,7 @@ There are many channels that you can communicate with our community developers / - Submit a GitHub [issue](https://github.com/vllm-project/vllm-ascend/issues?page=1). - Join our [weekly meeting](https://docs.google.com/document/d/1hCSzRTMZhIB8vRq1_qOOjx4c9uYUxvdQvDsMV2JcSrw/edit?tab=t.0#heading=h.911qu8j8h35z) and share your ideas. -- Join our [WeChat](https://github.com/vllm-project/vllm-ascend/issues/227) group and ask your quenstions. +- Join our [WeChat](https://github.com/vllm-project/vllm-ascend/issues/227) group and ask your questions. - Join our ascend channel in [vLLM forums](https://discuss.vllm.ai/c/hardware-support/vllm-ascend-support/6) and publish your topics. ### 5. What features does vllm-ascend V1 supports? @@ -142,7 +142,7 @@ In scenarios where NPUs have limited high bandwidth memory (HBM) capacity, dynam - **Configure `PYTORCH_NPU_ALLOC_CONF`**: Set this environment variable to optimize NPU memory management. For example, you can use `export PYTORCH_NPU_ALLOC_CONF=expandable_segments:True` to enable virtual memory feature to mitigate memory fragmentation caused by frequent dynamic memory size adjustments during runtime. See details in: [PYTORCH_NPU_ALLOC_CONF](https://www.hiascend.com/document/detail/zh/Pytorch/700/comref/Envvariables/Envir_012.html). ### 14. Failed to enable NPU graph mode when running DeepSeek. -You may encounter the following error if running DeepSeek with NPU graph mode is enabled. The allowed number of queries per KV when enabling both MLA and Graph mode is {32, 64, 128}. **Thus this is not supported for DeepSeek-V2-Lite**, as it only has 16 attention heads. The NPU graph mode support on DeepSeek-V2-Lite will be implemented in the future. +Enabling NPU graph mode for DeepSeek may trigger an error. This is because when both MLA and NPU graph mode are active, the number of queries per KV head must be 32, 64, or 128. However, DeepSeek-V2-Lite has only 16 attention heads, which results in 16 queries per KV—a value outside the supported range. Support for NPU graph mode on DeepSeek-V2-Lite will be added in a future update. And if you're using DeepSeek-V3 or DeepSeek-R1, please make sure after the tensor parallel split, num_heads/num_kv_heads is {32, 64, 128}. diff --git a/docs/source/index.md b/docs/source/index.md index 940a619b4b..8c087447a8 100644 --- a/docs/source/index.md +++ b/docs/source/index.md @@ -25,7 +25,7 @@ vLLM Ascend plugin (vllm-ascend) is a community maintained hardware plugin for r This plugin is the recommended approach for supporting the Ascend backend within the vLLM community. It adheres to the principles outlined in the [[RFC]: Hardware pluggable](https://github.com/vllm-project/vllm/issues/11162), providing a hardware-pluggable interface that decouples the integration of the Ascend NPU with vLLM. -By using vLLM Ascend plugin, popular open-source models, including Transformer-like, Mixture-of-Expert, Embedding, Multi-modal LLMs can run seamlessly on the Ascend NPU. +By using vLLM Ascend plugin, popular open-source models, including Transformer-like, Mixture-of-Experts, Embedding, Multi-modal LLMs can run seamlessly on the Ascend NPU. ## Documentation diff --git a/docs/source/tutorials/DeepSeek-V3.2-Exp.md b/docs/source/tutorials/DeepSeek-V3.2-Exp.md index c3e7cbf65b..415134f4ed 100644 --- a/docs/source/tutorials/DeepSeek-V3.2-Exp.md +++ b/docs/source/tutorials/DeepSeek-V3.2-Exp.md @@ -32,13 +32,13 @@ If you want to deploy multi-node environment, you need to verify multi-node comm :::::{tab-set} ::::{tab-item} Use deepseek-v3.2 docker image -Currently, we provide the all-in-one images `quay.io/ascend/vllm-ascend:v0.11.0rc0-deepseek-v3.2-exp`(for Atlas 800 A2) and `quay.io/ascend/vllm-ascend:v0.11.0rc0-a3-deepseek-v3.2-exp`(for Atlas 800 A3). +In `vllm-ascend:v0.11.0rc0` release, we provide the all-in-one images `quay.io/ascend/vllm-ascend:v0.11.0rc0-deepseek-v3.2-exp`(for Atlas 800 A2) and `quay.io/ascend/vllm-ascend:v0.11.0rc0-a3-deepseek-v3.2-exp`(for Atlas 800 A3). Refer to [using docker](../installation.md#set-up-using-docker) to set up environment using Docker, remember to replace the image with deepseek-v3.2 docker image. :::{note} -The image is based on a specific version and will not continue to release new version. -Only AArch64 architecture are supported currently due to extra operator's installation limitations. +- The image is based on a specific version `vllm-ascend:v0.11.0rc0` and will not continue to release new version. Move to another tab `Use vllm-ascend docker image` for latest support of deepseek-v3.2 on vllm-ascend. +- Only AArch64 architecture are supported currently due to extra operator's installation limitations. ::: :::: @@ -66,23 +66,7 @@ wget https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/a3/custom_ pip install custom_ops-1.0-cp311-cp311-linux_aarch64.whl ``` -3. Download and install `MLAPO`. - -```shell -wget https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/a3/CANN-custom_ops-mlapo-linux.aarch64.run -# please set a custom install-path, here take `/`vllm-workspace/CANN` as example. -chmod +x ./CANN-custom_ops-mlapo-linux.aarch64.run -./CANN-custom_ops-mlapo-linux.aarch64.run --quiet --install-path=/vllm-workspace/CANN -wget https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/a3/torch_npu-2.7.1%2Bgitb7c90d0-cp311-cp311-linux_aarch64.whl -pip install torch_npu-2.7.1+gitb7c90d0-cp311-cp311-linux_aarch64.whl -wget https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/a3/libopsproto_rt2.0.so -cp libopsproto_rt2.0.so /usr/local/Ascend/ascend-toolkit/8.2.RC1/opp/built-in/op_proto/lib/linux/aarch64/libopsproto_rt2.0.so -# Don't forget to replace `/vllm-workspace/CANN/` to the custom path you set before. -source /vllm-workspace/CANN/vendors/customize/bin/set_env.bash -export LD_PRELOAD=/vllm-workspace/CANN/vendors/customize/op_proto/lib/linux/aarch64/libcust_opsproto_rt2.0.so:${LD_PRELOAD} -``` - -For `A2` image, you should change all `wget` commands as above, and replace `A3` with `A2` release file. +For `A2` image: 1. Start the docker image on your node, refer to [using docker](../installation.md#set-up-using-docker). @@ -98,22 +82,6 @@ wget https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/a2/custom_ pip install custom_ops-1.0-cp311-cp311-linux_aarch64.whl ``` -3. Download and install `MLAPO`. - -```shell -wget https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/a2/CANN-custom_ops-mlapo-linux.aarch64.run -# please set a custom install-path, here take `/`vllm-workspace/CANN` as example. -chmod +x ./CANN-custom_ops-mlapo-linux.aarch64.run -./CANN-custom_ops-mlapo-linux.aarch64.run --quiet --install-path=/vllm-workspace/CANN -wget https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/a2/torch_npu-2.7.1%2Bgitb7c90d0-cp311-cp311-linux_aarch64.whl -pip install torch_npu-2.7.1+gitb7c90d0-cp311-cp311-linux_aarch64.whl -wget https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/a2/libopsproto_rt2.0.so -cp libopsproto_rt2.0.so /usr/local/Ascend/ascend-toolkit/8.2.RC1/opp/built-in/op_proto/lib/linux/aarch64/libopsproto_rt2.0.so -# Don't forget to replace `/vllm-workspace/CANN/` to the custom path you set before. -source /vllm-workspace/CANN/vendors/customize/bin/set_env.bash -export LD_PRELOAD=/vllm-workspace/CANN/vendors/customize/op_proto/lib/linux/aarch64/libcust_opsproto_rt2.0.so:${LD_PRELOAD} -``` - :::: ::::{tab-item} Build from source diff --git a/docs/source/tutorials/multi_node_pd_disaggregation_mooncake.md b/docs/source/tutorials/multi_node_pd_disaggregation_mooncake.md index fefb86f2f7..1db83e071f 100644 --- a/docs/source/tutorials/multi_node_pd_disaggregation_mooncake.md +++ b/docs/source/tutorials/multi_node_pd_disaggregation_mooncake.md @@ -57,23 +57,14 @@ for i in {0..15}; do hccn_tool -i $i -ping -g address x.x.x.x;done Mooncake is the serving platform for Kimi, a leading LLM service provided by Moonshot AI. First, we need to obtain the Mooncake project. Refer to the following command: ```shell -git clone https://github.com/kvcache-ai/Mooncake.git +git clone -b v0.3.7.post2 --depth 1 https://github.com/kvcache-ai/Mooncake.git ``` -Update and install Python - -```shell -apt-get update -apt-get install python3 -``` - -Modify Mooncake compilation option +(Optional) Replace go install url if the network is poor ```shell cd Mooncake -vi mooncake-common/common.cmake -# find this row and set USE_ASCEND_DIRECT ON. -option(USE_ASCEND_DIRECT "option for using ascend npu with adxl engine" ON) +sed -i 's|https://go.dev/dl/|https://golang.google.cn/dl/|g' dependencies.sh ``` Install mpi @@ -93,7 +84,7 @@ Compile and install ```shell mkdir build cd build -cmake .. +cmake .. -USE_ASCEND_DIRECT=ON make -j make install ``` diff --git a/docs/source/tutorials/multi_npu.md b/docs/source/tutorials/multi_npu.md index 80a0929e4b..3dedc9723e 100644 --- a/docs/source/tutorials/multi_npu.md +++ b/docs/source/tutorials/multi_npu.md @@ -1,4 +1,4 @@ -# Multi-NPU (QwQ 32B) +# Multi-NPU (QwQ-32B) ## Run vllm-ascend on Multi-NPU diff --git a/docs/source/tutorials/multi_npu_moge.md b/docs/source/tutorials/multi_npu_moge.md index 57ff41e26b..e426c0f337 100644 --- a/docs/source/tutorials/multi_npu_moge.md +++ b/docs/source/tutorials/multi_npu_moge.md @@ -1,4 +1,4 @@ -# Multi-NPU (Pangu Pro MoE) +# Multi-NPU (Pangu-Pro-MoE) ## Run vllm-ascend on Multi-NPU diff --git a/docs/source/tutorials/multi_npu_quantization.md b/docs/source/tutorials/multi_npu_quantization.md index 7e664b2b75..23b183dbd1 100644 --- a/docs/source/tutorials/multi_npu_quantization.md +++ b/docs/source/tutorials/multi_npu_quantization.md @@ -1,4 +1,4 @@ -# Multi-NPU (QwQ 32B W8A8) +# Multi-NPU (QwQ-32B-W8A8) ## Run Docker Container :::{note} diff --git a/docs/source/tutorials/single_npu.md b/docs/source/tutorials/single_npu.md index 0759e3ede8..4b10d009a9 100644 --- a/docs/source/tutorials/single_npu.md +++ b/docs/source/tutorials/single_npu.md @@ -1,4 +1,4 @@ -# Single NPU (Qwen3 8B) +# Single NPU (Qwen3-8B) ## Run vllm-ascend on Single NPU diff --git a/docs/source/tutorials/single_npu_qwen2.5_vl.md b/docs/source/tutorials/single_npu_qwen2.5_vl.md index 45aeeaa764..2454e0c710 100644 --- a/docs/source/tutorials/single_npu_qwen2.5_vl.md +++ b/docs/source/tutorials/single_npu_qwen2.5_vl.md @@ -1,4 +1,4 @@ -# Single NPU (Qwen2.5-VL 7B) +# Single NPU (Qwen2.5-VL-7B) ## Run vllm-ascend on Single NPU diff --git a/docs/source/tutorials/single_npu_qwen2_audio.md b/docs/source/tutorials/single_npu_qwen2_audio.md index 94d86c5a9e..e093e84511 100644 --- a/docs/source/tutorials/single_npu_qwen2_audio.md +++ b/docs/source/tutorials/single_npu_qwen2_audio.md @@ -1,4 +1,4 @@ -# Single NPU (Qwen2-Audio 7B) +# Single NPU (Qwen2-Audio-7B) ## Run vllm-ascend on Single NPU diff --git a/docs/source/tutorials/single_npu_qwen3_quantization.md b/docs/source/tutorials/single_npu_qwen3_quantization.md index bd735d79a5..40acff3468 100644 --- a/docs/source/tutorials/single_npu_qwen3_quantization.md +++ b/docs/source/tutorials/single_npu_qwen3_quantization.md @@ -1,4 +1,4 @@ -# Single-NPU (Qwen3 8B W4A8) +# Single-NPU (Qwen3-8B-W4A8) ## Run Docker Container :::{note} diff --git a/docs/source/user_guide/configuration/additional_config.md b/docs/source/user_guide/configuration/additional_config.md index 78e6d33a68..ec1e1a429b 100644 --- a/docs/source/user_guide/configuration/additional_config.md +++ b/docs/source/user_guide/configuration/additional_config.md @@ -1,6 +1,6 @@ # Additional Configuration -Additional configuration is a mechanism provided by vLLM to allow plugins to control inner behavior by their own. vLLM Ascend uses this mechanism to make the project more flexible. +Additional configuration is a mechanism provided by vLLM to allow plugins to control inner behavior by themselves. VLLM Ascend uses this mechanism to make the project more flexible. ## How to use @@ -35,7 +35,7 @@ The following table lists additional configuration options available in vLLM Asc | `enable_shared_expert_dp` | bool | `False` | When the expert is shared in DP, it delivers better performance but consumes more memory. Currently only DeepSeek series models are supported. | | `lmhead_tensor_parallel_size` | int | `None` | The custom tensor parallel size of lmhead. | | `oproj_tensor_parallel_size` | int | `None` | The custom tensor parallel size of oproj. | -| `multistream_overlap_shared_expert` | bool | `False` | Whether to enable multistream shared expert. This option only takes effects on MoE models with shared experts. | +| `multistream_overlap_shared_expert` | bool | `False` | Whether to enable multistream shared expert. This option only takes effect on MoE models with shared experts. | | `dynamic_eplb` | bool | `False` | Whether to enable dynamic EPLB. | | `num_iterations_eplb_update` | int | `400` | Forward iterations when EPLB begins. | | `gate_eplb` | bool | `False` | Whether to enable EPLB only once. | @@ -70,14 +70,14 @@ The details of each configuration option are as follows: | `max_long_partial_prefills` | Union[int, float] | `float('inf')` | The maximum number of prompts longer than long_prefill_token_threshold that will be prefilled concurrently. | | `long_prefill_token_threshold` | Union[int, float] | `float('inf')` | a request is considered long if the prompt is longer than this number of tokens. | -ascend_scheduler_config also support the options from [vllm scheduler config](https://docs.vllm.ai/en/stable/api/vllm/config.html#vllm.config.SchedulerConfig). For example, you can add `enable_chunked_prefill: True` to ascend_scheduler_config as well. +ascend_scheduler_config also supports the options from [vllm scheduler config](https://docs.vllm.ai/en/stable/api/vllm/config.html#vllm.config.SchedulerConfig). For example, you can add `enable_chunked_prefill: True` to ascend_scheduler_config as well. **weight_prefetch_config** | Name | Type | Default | Description | |------------------|------|-------------------------------------------------------------|------------------------------------| | `enabled` | bool | `False` | Whether to enable weight prefetch. | -| `prefetch_ratio` | dict | `{"attn": {"qkv": 1.0, "o": 1.0}, "moe": {"gate_up": 0.8}}` | Prefetch ratio of each weights. | +| `prefetch_ratio` | dict | `{"attn": {"qkv": 1.0, "o": 1.0}, "moe": {"gate_up": 0.8}}` | Prefetch ratio of each weight. | ### Example diff --git a/docs/source/user_guide/feature_guide/dynamic_batch.md b/docs/source/user_guide/feature_guide/dynamic_batch.md index c1e76354e9..7c68b2a930 100644 --- a/docs/source/user_guide/feature_guide/dynamic_batch.md +++ b/docs/source/user_guide/feature_guide/dynamic_batch.md @@ -11,9 +11,9 @@ We are working on further improvements and this feature will support more XPUs i ### Prerequisites -1. Dynamic batch now depends on a offline cost model saved in a look-up table to refine the token budget. The lookup-table is saved in '.csv' file, which should be first downloaded from [here](https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/dynamic_batch_scheduler/A2-B3-BLK128.csv), renamed, and saved to the path `vllm_ascend/core/profile_table.csv` +1. Dynamic batch now depends on an offline cost model saved in a lookup table to refine the token budget. The lookup table is saved in '.csv' file, which should be first downloaded from [here](https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/dynamic_batch_scheduler/A2-B3-BLK128.csv), renamed, and saved to the path `vllm_ascend/core/profile_table.csv` -2. `Pandas` is needed to load the look-up table, in case `pandas` is not installed. +2. `Pandas` is needed to load the lookup table, in case `pandas` is not installed. ```bash pip install pandas diff --git a/docs/source/user_guide/feature_guide/graph_mode.md b/docs/source/user_guide/feature_guide/graph_mode.md index 3af9a41809..90aba6a3ee 100644 --- a/docs/source/user_guide/feature_guide/graph_mode.md +++ b/docs/source/user_guide/feature_guide/graph_mode.md @@ -8,7 +8,7 @@ This guide provides instructions for using Ascend Graph Mode with vLLM Ascend. P ## Getting Started -From v0.9.1rc1 with V1 Engine, vLLM Ascend will run models in graph mode by default to keep the same behavior with vLLM. If you hit any issues, please feel free to open an issue on GitHub and fallback to the eager mode temporarily by set `enforce_eager=True` when initializing the model. +From v0.9.1rc1 with V1 Engine, vLLM Ascend will run models in graph mode by default to keep the same behavior with vLLM. If you hit any issues, please feel free to open an issue on GitHub and fallback to the eager mode temporarily by setting `enforce_eager=True` when initializing the model. There are two kinds for graph mode supported by vLLM Ascend: - **ACLGraph**: This is the default graph mode supported by vLLM Ascend. In v0.9.1rc1, only Qwen series models are well tested. @@ -45,14 +45,14 @@ import os from vllm import LLM # TorchAirGraph is only work without chunked-prefill now -model = LLM(model="deepseek-ai/DeepSeek-R1-0528", additional_config={"torchair_graph_config": {"enabled": True},"ascend_scheduler_config": {"enabled": True,}}) +model = LLM(model="deepseek-ai/DeepSeek-R1-0528", additional_config={"torchair_graph_config": {"enabled": True},"ascend_scheduler_config": {"enabled": True}}) outputs = model.generate("Hello, how are you?") ``` Online example: ```shell -vllm serve Qwen/Qwen2-7B-Instruct --additional-config='{"torchair_graph_config": {"enabled": true},"ascend_scheduler_config": {"enabled": true,}}' +vllm serve deepseek-ai/DeepSeek-R1-0528 --additional-config='{"torchair_graph_config": {"enabled": true},"ascend_scheduler_config": {"enabled": true}}' ``` You can find more details about additional configuration [here](../configuration/additional_config.md). @@ -74,5 +74,5 @@ outputs = model.generate("Hello, how are you?") Online example: ```shell -vllm serve Qwen/Qwen2-7B-Instruct --enforce-eager +vllm serve someother_model_weight --enforce-eager ``` diff --git a/docs/source/user_guide/feature_guide/index.md b/docs/source/user_guide/feature_guide/index.md index 61c333b0f6..b0c0fd7d46 100644 --- a/docs/source/user_guide/feature_guide/index.md +++ b/docs/source/user_guide/feature_guide/index.md @@ -13,4 +13,5 @@ lora eplb_swift_balancer netloader dynamic_batch +kv_pool_mooncake ::: diff --git a/examples/disaggregated_prefill_v1/mooncake_connector_store_deployment_guide.md b/docs/source/user_guide/feature_guide/kv_pool_mooncake.md similarity index 63% rename from examples/disaggregated_prefill_v1/mooncake_connector_store_deployment_guide.md rename to docs/source/user_guide/feature_guide/kv_pool_mooncake.md index 28dd83b7cd..34ab047907 100644 --- a/examples/disaggregated_prefill_v1/mooncake_connector_store_deployment_guide.md +++ b/docs/source/user_guide/feature_guide/kv_pool_mooncake.md @@ -5,17 +5,26 @@ * Software: * Python >= 3.9, < 3.12 * CANN >= 8.3.rc1 - * PyTorch == 2.7.1, torch-npu == 2.7.1 + * PyTorch >= 2.7.1, torch-npu >= 2.7.1.dev20250724 * vLLM:main branch * vLLM-Ascend:main branch - * Mooncake:[AscendTransport/Mooncake at pooling-async-memcpy](https://github.com/AscendTransport/Mooncake/tree/pooling-async-memcpy)(Currently available branch code, continuously updated.) - Installation and Compilation Guide:https://github.com/AscendTransport/Mooncake/tree/pooling-async-memcpy?tab=readme-ov-file#build-and-use-binaries + * Mooncake:main branch + + Installation and Compilation Guide:https://github.com/kvcache-ai/Mooncake?tab=readme-ov-file#build-and-use-binaries + + Make sure to build with `-DUSE_ASCEND_DIRECT` to enable ADXL engine. + + An example command for compiling ADXL: + + `rm -rf build && mkdir -p build && cd build \ && cmake .. -DCMAKE_INSTALL_PREFIX=/opt/transfer-engine/ -DCMAKE_POLICY_VERSION_MINIMUM=3.5 -DUSE_ASCEND_DIRECT=ON -DBUILD_SHARED_LIBS=ON -DBUILD_UNIT_TESTS=OFF \ && make -j \ && make install` + + Also, you need to set environment variables to point to them `export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib64/python3.11/site-packages/mooncake`, or copy the .so files to the `/usr/local/lib64` directory after compilation ### KV Pooling Parameter Description -**kv_connector_extra_config**:Additional Configurable Parameters for Pooling - **mooncake_rpc_port**:Port for RPC Communication Between Pooling Scheduler Process and Worker Process: Each Instance Requires a Unique Port Configuration. - **load_async**:Whether to Enable Asynchronous Loading. The default value is false. - **register_buffer**:Whether to Register Video Memory with the Backend. Registration is Not Required When Used with MooncakeConnectorV1; It is Required in All Other Cases. The Default Value is false. +**kv_connector_extra_config**:Additional Configurable Parameters for Pooling. +**mooncake_rpc_port**:Port for RPC Communication Between Pooling Scheduler Process and Worker Process: Each Instance Requires a Unique Port Configuration. +**load_async**:Whether to Enable Asynchronous Loading. The default value is false. +**register_buffer**:Whether to Register Video Memory with the Backend. Registration is Not Required When Used with MooncakeConnectorV1; It is Required in All Other Cases. The Default Value is false. ## run mooncake master @@ -29,26 +38,32 @@ The environment variable **MOONCAKE_CONFIG_PATH** is configured to the full path "metadata_server": "P2PHANDSHAKE", "protocol": "ascend", "device_name": "", + "use_ascend_direct": true, + "alloc_in_same_node": true, "master_server_address": "xx.xx.xx.xx:50088", - "global_segment_size": 30000000000 + "global_segment_size": "1GB" (1024MB/1048576KB/1073741824B/1073741824) } ``` -**local_hostname**: Configured as the IP address of the current master node, -**metadata_server**: Configured as **P2PHANDSHAKE**, -**protocol:** Configured for Ascend to use Mooncake's HCCL communication, -**device_name**: "" -**master_server_address**: Configured with the IP and port of the master service -**global_segment_size**: Expands the kvcache size registered by the PD node to the master +**local_hostname**: Configured as the IP address of the current master node. +**metadata_server**: Configured as **P2PHANDSHAKE**. +**protocol:** Configured for Ascend to use Mooncake's HCCL communication. +**device_name**: "" +**use_ascend_direct**: Indicator for using ADXL engine. +**alloc_in_same_node**: Indicator for preferring local buffer allocation strategy. +**master_server_address**: Configured with the IP and port of the master service. +**global_segment_size**: Expands the kvcache size registered by the PD node to the master. ### 2. Start mooncake_master Under the mooncake folder: ``` -mooncake_master --port 50088 +mooncake_master --port 50088 --eviction_high_watermark_ratio 0.95 --eviction_ratio 0.05 ``` +`eviction_high_watermark_ratio` determines the watermark where Mooncake Store will perform eviction,and `eviction_ratio` determines the portion of stored objects that would be evicted. + ## Pooling and Prefill Decode Disaggregate Scenario ### 1.Run `prefill` Node and `decode` Node @@ -69,11 +84,11 @@ export PYTHONPATH=$PYTHONPATH:/xxxxx/vllm export MOONCAKE_CONFIG_PATH="/xxxxxx/mooncake.json" export VLLM_USE_V1=1 export ASCEND_RT_VISIBLE_DEVICES=0,1,2,3 -export ASCEND_TRANSPORT_PRINT=1 export ACL_OP_INIT_MODE=1 -# The upper boundary environment variable for memory swap logging is set to mooncake, where 1 indicates enabled and 0 indicates disabled. -export ASCEND_AGGREGATE_ENABLE=1 -# The upper-level environment variable is the switch for enabling the mooncake aggregation function, where 1 means on and 0 means off. +export ASCEND_BUFFER_POOL=4:8 +# ASCEND_BUFFER_POOL is the environment variable for configuring the number and size of buffer on NPU Device for aggregation and KV transfer,the value 4:8 means we allocate 4 buffers of size 8MB. +export ASCEND_CONNECT_TIMEOUT=10000 +export ASCEND_TRANSFER_TIMEOUT=10000 python3 -m vllm.entrypoints.openai.api_server \ --model /xxxxx/Qwen2.5-7B-Instruct \ @@ -88,34 +103,34 @@ python3 -m vllm.entrypoints.openai.api_server \ --max-num-batched-tokens 4096 \ --kv-transfer-config \ '{ - "kv_connector": "MultiConnector", - "kv_role": "kv_producer", - "kv_connector_extra_config": { - "use_layerwise": false, - "connectors": [ - { - "kv_connector": "MooncakeConnectorV1", - "kv_role": "kv_producer", - "kv_port": "20001", - "kv_connector_extra_config": { - "prefill": { - "dp_size": 1, - "tp_size": 1 - }, - "decode": { - "dp_size": 1, - "tp_size": 1 - } - } - }, - { - "kv_connector": "MooncakeConnectorStoreV1", - "kv_role": "kv_producer", + "kv_connector": "MultiConnector", + "kv_role": "kv_producer", + "kv_connector_extra_config": { + "use_layerwise": false, + "connectors": [ + { + "kv_connector": "MooncakeConnectorV1", + "kv_role": "kv_producer", + "kv_port": "20001", + "kv_connector_extra_config": { + "prefill": { + "dp_size": 1, + "tp_size": 1 + }, + "decode": { + "dp_size": 1, + "tp_size": 1 + } + } + }, + { + "kv_connector": "MooncakeConnectorStoreV1", + "kv_role": "kv_producer", "mooncake_rpc_port":"0" - } - ] - } -}' > p.log 2>&1 + } + ] + } + }' > p.log 2>&1 ``` `decode` Node: @@ -133,10 +148,9 @@ export MOONCAKE_CONFIG_PATH="/xxxxx/mooncake.json" export VLLM_USE_V1=1 export ASCEND_RT_VISIBLE_DEVICES=4,5,6,7 export ACL_OP_INIT_MODE=1 -export ASCEND_TRANSPORT_PRINT=1 -# The upper boundary environment variable for memory swap logging is set to mooncake, where 1 indicates enabled and 0 indicates disabled. -export ASCEND_AGGREGATE_ENABLE=1 -# The upper-level environment variable is the switch for enabling the mooncake aggregation function, where 1 means on and 0 means off. +export ASCEND_BUFFER_POOL=4:8 +export ASCEND_CONNECT_TIMEOUT=10000 +export ASCEND_TRANSFER_TIMEOUT=10000 python3 -m vllm.entrypoints.openai.api_server \ --model /xxxxx/Qwen2.5-7B-Instruct \ @@ -151,33 +165,34 @@ python3 -m vllm.entrypoints.openai.api_server \ --max-num-batched-tokens 4096 \ --kv-transfer-config \ '{ - "kv_connector": "MultiConnector", - "kv_role": "kv_consumer", - "kv_connector_extra_config": { - "use_layerwise": false, - "connectors": [ - { - "kv_connector": "MooncakeConnectorV1", - "kv_role": "kv_consumer", - "kv_port": "20002", - "kv_connector_extra_config": { - "prefill": { - "dp_size": 1, - "tp_size": 1 - }, - "decode": { - "dp_size": 1, - "tp_size": 1 - } - } - }, - { - "kv_connector": "MooncakeConnectorStoreV1", - "kv_role": "kv_consumer", + "kv_connector": "MultiConnector", + "kv_role": "kv_consumer", + "kv_connector_extra_config": { + "use_layerwise": false, + "connectors": [ + { + "kv_connector": "MooncakeConnectorV1", + "kv_role": "kv_consumer", + "kv_port": "20002", + "kv_connector_extra_config": { + "use_ascend_direct": true, + "prefill": { + "dp_size": 1, + "tp_size": 1 + }, + "decode": { + "dp_size": 1, + "tp_size": 1 + } + } + }, + { + "kv_connector": "MooncakeConnectorStoreV1", + "kv_role": "kv_consumer", "mooncake_rpc_port":"1" - } - ] - } + } + ] + } }' > d.log 2>&1 ``` @@ -234,10 +249,9 @@ export MOONCAKE_CONFIG_PATH="/xxxxxx/mooncake.json" export VLLM_USE_V1=1 export ASCEND_RT_VISIBLE_DEVICES=0,1,2,3 export ACL_OP_INIT_MODE=1 -export ASCEND_TRANSPORT_PRINT=1 -# The upper boundary environment variable for memory swap logging is set to mooncake, where 1 indicates enabled and 0 indicates disabled. -export ASCEND_AGGREGATE_ENABLE=1 -# The upper-level environment variable is the switch for enabling the mooncake aggregation function, where 1 means on and 0 means off. +export ASCEND_BUFFER_POOL=4:8 +export ASCEND_CONNECT_TIMEOUT=10000 +export ASCEND_TRANSFER_TIMEOUT=10000 python3 -m vllm.entrypoints.openai.api_server \ --model /xxxxx/Qwen2.5-7B-Instruct \ @@ -252,12 +266,12 @@ python3 -m vllm.entrypoints.openai.api_server \ --max-num-batched-tokens 4096 \ --kv-transfer-config \ '{ - "kv_connector": "MooncakeConnectorStoreV1", - "kv_role": "kv_both", - "kv_connector_extra_config": { - "use_layerwise": false, + "kv_connector": "MooncakeConnectorStoreV1", + "kv_role": "kv_both", + "kv_connector_extra_config": { + "use_layerwise": false, "mooncake_rpc_port":"0" - } + } }' > mix.log 2>&1 ``` @@ -275,4 +289,4 @@ Long question: ``` curl -s http://localhost:8100/v1/completions -H "Content-Type: application/json" -d '{ "model": "/xxxxx/Qwen2.5-7B-Instruct", "prompt": "Given the accelerating impacts of climate change—including rising sea levels, increasing frequency of extreme weather events, loss of biodiversity, and adverse effects on agriculture and human health—there is an urgent need for a robust, globally coordinated response. However, international efforts are complicated by a range of factors: economic disparities between high-income and low-income countries, differing levels of industrialization, varying access to clean energy technologies, and divergent political systems that influence climate policy implementation. In this context, how can global agreements like the Paris Accord be redesigned or strengthened to not only encourage but effectively enforce emission reduction targets? Furthermore, what mechanisms can be introduced to promote fair and transparent technology transfer, provide adequate financial support for climate adaptation in vulnerable regions, and hold nations accountable without exacerbating existing geopolitical tensions or disproportionately burdening those with historically lower emissions?", "max_tokens": 256, "temperature":0.0 }' -``` \ No newline at end of file +``` diff --git a/docs/source/user_guide/feature_guide/lora.md b/docs/source/user_guide/feature_guide/lora.md index ad4bc2d320..4678c0241e 100644 --- a/docs/source/user_guide/feature_guide/lora.md +++ b/docs/source/user_guide/feature_guide/lora.md @@ -20,4 +20,4 @@ vllm serve meta-llama/Llama-2-7b \ We have implemented LoRA-related AscendC operators, such as bgmv_shrink, bgmv_expand, sgmv_shrink and sgmv_expand. You can find them under the "csrc/kernels" directory of [vllm-ascend repo](https://github.com/vllm-project/vllm-ascend.git). -When you install vllm and vllm-ascend, those operators mentioned above will be compiled and installed automatically. If you do not want to use AscendC operators when you run vllm-ascend, you should set `COMPILE_CUSTOM_KERNELS=0` and reinstall vllm-ascend. To require more instructions about installation and compilation, you can refer to [installation guide](../../installation.md). +When you install vllm and vllm-ascend, those operators mentioned above will be compiled and installed automatically. If you do not want to use AscendC operators when you run vllm-ascend, you should set `COMPILE_CUSTOM_KERNELS=0` and reinstall vllm-ascend. For more instructions about installation and compilation, you can refer to [installation guide](../../installation.md). diff --git a/docs/source/user_guide/feature_guide/quantization.md b/docs/source/user_guide/feature_guide/quantization.md index e2a48ff35a..8a6e36765d 100644 --- a/docs/source/user_guide/feature_guide/quantization.md +++ b/docs/source/user_guide/feature_guide/quantization.md @@ -28,7 +28,7 @@ See https://www.modelscope.cn/models/vllm-ascend/Kimi-K2-Instruct-W8A8. This conversion process requires a larger CPU memory, ensure that the RAM size is greater than 2 TB. ::: -### Adapt to changes +### Adapts and changes 1. Ascend does not support the `flash_attn` library. To run the model, you need to follow the [guide](https://gitee.com/ascend/msit/blob/master/msmodelslim/example/DeepSeek/README.md#deepseek-v3r1) and comment out certain parts of the code in `modeling_deepseek.py` located in the weights folder. 2. The current version of transformers does not support loading weights in FP8 quantization format. you need to follow the [guide](https://gitee.com/ascend/msit/blob/master/msmodelslim/example/DeepSeek/README.md#deepseek-v3r1) and delete the quantization related fields from `config.json` in the weights folder. diff --git a/docs/source/user_guide/feature_guide/sleep_mode.md b/docs/source/user_guide/feature_guide/sleep_mode.md index c616f7e815..6fc3652115 100644 --- a/docs/source/user_guide/feature_guide/sleep_mode.md +++ b/docs/source/user_guide/feature_guide/sleep_mode.md @@ -80,7 +80,7 @@ The following is a simple example of how to use sleep mode. vllm serve Qwen/Qwen2.5-0.5B-Instruct --enable-sleep-mode - # after serveing is up, post these endpoints + # after serving is up, post to these endpoints # sleep level 1 curl -X POST http://127.0.0.1:8000/sleep \ diff --git a/docs/source/user_guide/release_notes.md b/docs/source/user_guide/release_notes.md index 56d101dabf..307d15357e 100644 --- a/docs/source/user_guide/release_notes.md +++ b/docs/source/user_guide/release_notes.md @@ -39,7 +39,7 @@ This is the 1st release candidate of v0.10.2 for vLLM Ascend. Please follow the - MTP now works with the token > 1. [#2708](https://github.com/vllm-project/vllm-ascend/pull/2708) - Qwen2.5 VL now works with quantization. [#2778](https://github.com/vllm-project/vllm-ascend/pull/2778) - Improved the performance with async scheduler enabled. [#2783](https://github.com/vllm-project/vllm-ascend/pull/2783) -- Fixed the performance regression with non MLA model when use default scheduler. [#2894](https://github.com/vllm-project/vllm-ascend/pull/2894) +- Fixed the performance regression with non MLA model when using default scheduler. [#2894](https://github.com/vllm-project/vllm-ascend/pull/2894) ### Others - The performance of W8A8 quantization is improved. [#2275](https://github.com/vllm-project/vllm-ascend/pull/2275) @@ -106,7 +106,7 @@ This is the 1st release candidate of v0.10.1 for vLLM Ascend. Please follow the * Environment variable `VLLM_LLMDD_RPC_PORT` is renamed to `VLLM_ASCEND_LLMDD_RPC_PORT` now. [#2450](https://github.com/vllm-project/vllm-ascend/pull/2450) * Added `VLLM_ASCEND_ENABLE_MLP_OPTIMIZE` in environment variables, whether to enable mlp optimize when tensor parallel is enabled. This feature provides better performance in eager mode. [#2120](https://github.com/vllm-project/vllm-ascend/pull/2120) * Removed `MOE_ALL2ALL_BUFFER` and `VLLM_ASCEND_ENABLE_MOE_ALL2ALL_SEQ` in environment variables. [#2612](https://github.com/vllm-project/vllm-ascend/pull/2612) - * Added `enable_prefetch` in `additional_config`, whether to enable weight prefetch. [#2465](https://github.com/vllm-project/vllm-ascend/pull/2465) + * Added `enable_prefetch` in `additional_config`, Whether to enable weight prefetch. [#2465](https://github.com/vllm-project/vllm-ascend/pull/2465) * Added `mode` in `additional_config.torchair_graph_config`, When using reduce-overhead mode for torchair, mode needs to be set. [#2461](https://github.com/vllm-project/vllm-ascend/pull/2461) * `enable_shared_expert_dp` in `additional_config` is disabled by default now, and it is recommended to be enabled when inferencing with deepseek. [#2457](https://github.com/vllm-project/vllm-ascend/pull/2457) @@ -461,7 +461,7 @@ This is the 1st release candidate of v0.9.0 for vllm-ascend. Please follow the [ ### Highlights - DeepSeek works with graph mode now. Follow the [official doc](https://vllm-ascend.readthedocs.io/en/latest/user_guide/feature_guide/graph_mode.html) to take a try. [#789](https://github.com/vllm-project/vllm-ascend/pull/789) -- Qwen series models works with graph mode now. It works by default with V1 Engine. Please note that in this release, only Qwen series models are well tested with graph mode. We'll make it stable and generalize in the next release. If you hit any issues, please feel free to open an issue on GitHub and fallback to eager mode temporarily by set `enforce_eager=True` when initializing the model. +- Qwen series models work with graph mode now. It works by default with V1 Engine. Please note that in this release, only Qwen series models are well tested with graph mode. We'll make it stable and generalize in the next release. If you hit any issues, please feel free to open an issue on GitHub and fallback to eager mode temporarily by set `enforce_eager=True` when initializing the model. ### Core @@ -590,13 +590,13 @@ This is the first release candidate of v0.8.4 for vllm-ascend. Please follow the - vLLM V1 engine experimental support is included in this version. You can visit [official guide](https://docs.vllm.ai/en/latest/getting_started/v1_user_guide.html) to get more detail. By default, vLLM will fallback to V0 if V1 doesn't work, please set `VLLM_USE_V1=1` environment if you want to use V1 forcibly. - LoRA、Multi-LoRA And Dynamic Serving is supported now. The performance will be improved in the next release. Please follow the [official doc](https://docs.vllm.ai/en/latest/features/lora.html) for more usage information. Thanks for the contribution from China Merchants Bank. [#521](https://github.com/vllm-project/vllm-ascend/pull/521). -- Sleep Mode feature is supported. Currently it's only work on V0 engine. V1 engine support will come soon. [#513](https://github.com/vllm-project/vllm-ascend/pull/513) +- Sleep Mode feature is supported. Currently it only works on V0 engine. V1 engine support will come soon. [#513](https://github.com/vllm-project/vllm-ascend/pull/513) ### Core - The Ascend scheduler is added for V1 engine. This scheduler is more affinity with Ascend hardware. More scheduler policy will be added in the future. [#543](https://github.com/vllm-project/vllm-ascend/pull/543) - Disaggregated Prefill feature is supported. Currently only 1P1D works. NPND is under design by vllm team. vllm-ascend will support it once it's ready from vLLM. Follow the [official guide](https://docs.vllm.ai/en/latest/features/disagg_prefill.html) to use. [#432](https://github.com/vllm-project/vllm-ascend/pull/432) -- Spec decode feature works now. Currently it's only work on V0 engine. V1 engine support will come soon. [#500](https://github.com/vllm-project/vllm-ascend/pull/500) +- Spec decode feature works now. Currently it only works on V0 engine. V1 engine support will come soon. [#500](https://github.com/vllm-project/vllm-ascend/pull/500) - Structured output feature works now on V1 Engine. Currently it only supports xgrammar backend while using guidance backend may get some errors. [#555](https://github.com/vllm-project/vllm-ascend/pull/555) ### Others diff --git a/docs/source/user_guide/support_matrix/supported_features.md b/docs/source/user_guide/support_matrix/supported_features.md index 10816a4092..72d8811e86 100644 --- a/docs/source/user_guide/support_matrix/supported_features.md +++ b/docs/source/user_guide/support_matrix/supported_features.md @@ -10,7 +10,7 @@ You can check the [support status of vLLM V1 Engine][v1_user_guide]. Below is th | Automatic Prefix Caching | 🟢 Functional | Functional, see detailed note: [vllm-ascend#732][apc] | | LoRA | 🟢 Functional | [vllm-ascend#396][multilora], [vllm-ascend#893][v1 multilora] | | Speculative decoding | 🟢 Functional | Basic support | -| Pooling | 🟢 Functional | CI needed to adapt to more models; V1 support rely on vLLM support. | +| Pooling | 🟢 Functional | CI needed to adapt to more models; V1 support relies on vLLM support. | | Enc-dec | 🟡 Planned | vLLM should support this feature first. | | Multi Modality | 🟢 Functional | [Tutorial][multimodal], optimizing and adapting more models | | LogProbs | 🟢 Functional | CI needed | diff --git a/docs/source/user_guide/support_matrix/supported_models.md b/docs/source/user_guide/support_matrix/supported_models.md index 256f0333f7..c5a718b0cc 100644 --- a/docs/source/user_guide/support_matrix/supported_models.md +++ b/docs/source/user_guide/support_matrix/supported_models.md @@ -6,78 +6,78 @@ Get the latest info here: https://github.com/vllm-project/vllm-ascend/issues/160 ### Generative Models -| Model | Support | Note | -|-------------------------------|-----------|----------------------------------------------------------------------| -| DeepSeek V3/3.1 | ✅ | | -| DeepSeek V3.2 EXP | ✅ | | -| DeepSeek R1 | ✅ | | -| DeepSeek Distill (Qwen/LLama) | ✅ | | -| Qwen3 | ✅ | | -| Qwen3-based | ✅ | | -| Qwen3-Coder | ✅ | | -| Qwen3-Moe | ✅ | | -| Qwen3-Next | ✅ | | -| Qwen2.5 | ✅ | | -| Qwen2 | ✅ | | -| Qwen2-based | ✅ | | -| QwQ-32B | ✅ | | -| LLama2/3/3.1 | ✅ | | -| Internlm | ✅ | [#1962](https://github.com/vllm-project/vllm-ascend/issues/1962) | -| Baichuan | ✅ | | -| Baichuan2 | ✅ | | -| Phi-4-mini | ✅ | | -| MiniCPM | ✅ | | -| MiniCPM3 | ✅ | | -| Ernie4.5 | ✅ | | -| Ernie4.5-Moe | ✅ | | -| Gemma-2 | ✅ | | -| Gemma-3 | ✅ | | -| Phi-3/4 | ✅ | | -| Mistral/Mistral-Instruct | ✅ | | -| GLM-4.5 | ✅ | | -| GLM-4 | ❌ | [#2255](https://github.com/vllm-project/vllm-ascend/issues/2255) | -| GLM-4-0414 | ❌ | [#2258](https://github.com/vllm-project/vllm-ascend/issues/2258) | -| ChatGLM | ❌ | [#554](https://github.com/vllm-project/vllm-ascend/issues/554) | -| DeepSeek V2.5 | 🟡 | Need test | -| Mllama | 🟡 | Need test | -| MiniMax-Text | 🟡 | Need test | +| Model | Support | Note | BF16 | Supported Hardware | W8A8 | Chunked Prefill | Automatic Prefix Cache | LoRA | Speculative Decoding | Async Scheduling | Tensor Parallel | Pipeline Parallel | Expert Parallel | Data Parallel | Prefill-decode Disaggregation | Piecewise AclGraph | Fullgraph AclGraph | max-model-len | MLP Weight Prefetch | Doc | +|-------------------------------|-----------|----------------------------------------------------------------------|------|--------------------|------|-----------------|------------------------|------|----------------------|------------------|-----------------|-------------------|-----------------|---------------|-------------------------------|--------------------|--------------------|---------------|---------------------|-----| +| DeepSeek V3/3.1 | ✅ | ||||||||||||||||||| +| DeepSeek V3.2 EXP | ✅ | | ✅ | A2/A3 | ✅ | ✅ | ✅ | ✅ | ✅ | | ✅ | ✅ | ✅ | ✅ | ❌ | | | 163840 | | [DeepSeek-V3.2-Exp tutorial](../../tutorials/DeepSeek-V3.2-Exp.md) | +| DeepSeek R1 | ✅ | ||||||||||||||||||| +| DeepSeek Distill (Qwen/LLama) | ✅ | ||||||||||||||||||| +| Qwen3 | ✅ | ||||||||||||||||||| +| Qwen3-based | ✅ | ||||||||||||||||||| +| Qwen3-Coder | ✅ | ||||||||||||||||||| +| Qwen3-Moe | ✅ | ||||||||||||||||||| +| Qwen3-Next | ✅ | ||||||||||||||||||| +| Qwen2.5 | ✅ | ||||||||||||||||||| +| Qwen2 | ✅ | ||||||||||||||||||| +| Qwen2-based | ✅ | ||||||||||||||||||| +| QwQ-32B | ✅ | ||||||||||||||||||| +| LLama2/3/3.1 | ✅ | ||||||||||||||||||| +| Internlm | ✅ | [#1962](https://github.com/vllm-project/vllm-ascend/issues/1962) ||||||||||||||||||| +| Baichuan | ✅ | ||||||||||||||||||| +| Baichuan2 | ✅ | ||||||||||||||||||| +| Phi-4-mini | ✅ | ||||||||||||||||||| +| MiniCPM | ✅ | ||||||||||||||||||| +| MiniCPM3 | ✅ | ||||||||||||||||||| +| Ernie4.5 | ✅ | ||||||||||||||||||| +| Ernie4.5-Moe | ✅ | ||||||||||||||||||| +| Gemma-2 | ✅ | ||||||||||||||||||| +| Gemma-3 | ✅ | ||||||||||||||||||| +| Phi-3/4 | ✅ | ||||||||||||||||||| +| Mistral/Mistral-Instruct | ✅ | ||||||||||||||||||| +| GLM-4.5 | ✅ | ||||||||||||||||||| +| GLM-4 | ❌ | [#2255](https://github.com/vllm-project/vllm-ascend/issues/2255) ||||||||||||||||||| +| GLM-4-0414 | ❌ | [#2258](https://github.com/vllm-project/vllm-ascend/issues/2258) ||||||||||||||||||| +| ChatGLM | ❌ | [#554](https://github.com/vllm-project/vllm-ascend/issues/554) ||||||||||||||||||| +| DeepSeek V2.5 | 🟡 | Need test ||||||||||||||||||| +| Mllama | 🟡 | Need test ||||||||||||||||||| +| MiniMax-Text | 🟡 | Need test ||||||||||||||||||| ### Pooling Models -| Model | Support | Note | -|-------------------------------|-----------|----------------------------------------------------------------------| -| Qwen3-Embedding | ✅ | | -| Molmo | ✅ | [1942](https://github.com/vllm-project/vllm-ascend/issues/1942) | -| XLM-RoBERTa-based | ❌ | [1960](https://github.com/vllm-project/vllm-ascend/issues/1960) | +| Model | Support | Note | BF16 | Supported Hardware | W8A8 | Chunked Prefill | Automatic Prefix Cache | LoRA | Speculative Decoding | Async Scheduling | Tensor Parallel | Pipeline Parallel | Expert Parallel | Data Parallel | Prefill-decode Disaggregation | Piecewise AclGraph | Fullgraph AclGraph | max-model-len | MLP Weight Prefetch | Doc | +|-------------------------------|-----------|----------------------------------------------------------------------|------|--------------------|------|-----------------|------------------------|------|----------------------|------------------|-----------------|-------------------|-----------------|---------------|-------------------------------|--------------------|--------------------|---------------|---------------------|-----| +| Qwen3-Embedding | ✅ | ||||||||||||||||||| +| Molmo | ✅ | [1942](https://github.com/vllm-project/vllm-ascend/issues/1942) ||||||||||||||||||| +| XLM-RoBERTa-based | ❌ | [1960](https://github.com/vllm-project/vllm-ascend/issues/1960) ||||||||||||||||||| ## Multimodal Language Models ### Generative Models -| Model | Support | Note | -|--------------------------------|---------------|----------------------------------------------------------------------| -| Qwen2-VL | ✅ | | -| Qwen2.5-VL | ✅ | | -| Qwen3-VL | ✅ | | -| Qwen3-VL-MOE | ✅ | | -| Qwen2.5-Omni | ✅ | [1760](https://github.com/vllm-project/vllm-ascend/issues/1760) | -| QVQ | ✅ | | -| LLaVA 1.5/1.6 | ✅ | [1962](https://github.com/vllm-project/vllm-ascend/issues/1962) | -| InternVL2 | ✅ | | -| InternVL2.5 | ✅ | | -| Qwen2-Audio | ✅ | | -| Aria | ✅ | | -| LLaVA-Next | ✅ | | -| LLaVA-Next-Video | ✅ | | -| MiniCPM-V | ✅ | | -| Mistral3 | ✅ | | -| Phi-3-Vison/Phi-3.5-Vison | ✅ | | -| Gemma3 | ✅ | | -| LLama4 | ❌ | [1972](https://github.com/vllm-project/vllm-ascend/issues/1972) | -| LLama3.2 | ❌ | [1972](https://github.com/vllm-project/vllm-ascend/issues/1972) | -| Keye-VL-8B-Preview | ❌ | [1963](https://github.com/vllm-project/vllm-ascend/issues/1963) | -| Florence-2 | ❌ | [2259](https://github.com/vllm-project/vllm-ascend/issues/2259) | -| GLM-4V | ❌ | [2260](https://github.com/vllm-project/vllm-ascend/issues/2260) | -| InternVL2.0/2.5/3.0
InternVideo2.5/Mono-InternVL | ❌ | [2064](https://github.com/vllm-project/vllm-ascend/issues/2064) | -| Whisper | ❌ | [2262](https://github.com/vllm-project/vllm-ascend/issues/2262) | -| Ultravox | 🟡 | Need test | +| Model | Support | Note | BF16 | Supported Hardware | W8A8 | Chunked Prefill | Automatic Prefix Cache | LoRA | Speculative Decoding | Async Scheduling | Tensor Parallel | Pipeline Parallel | Expert Parallel | Data Parallel | Prefill-decode Disaggregation | Piecewise AclGraph | Fullgraph AclGraph | max-model-len | MLP Weight Prefetch | Doc | +|--------------------------------|---------------|----------------------------------------------------------------------|------|--------------------|------|-----------------|------------------------|------|----------------------|------------------|-----------------|-------------------|-----------------|---------------|-------------------------------|--------------------|--------------------|---------------|---------------------|-----| +| Qwen2-VL | ✅ | ||||||||||||||||||| +| Qwen2.5-VL | ✅ | ||||||||||||||||||| +| Qwen3-VL | ✅ | ||||||||||||||||||| +| Qwen3-VL-MOE | ✅ | ||||||||||||||||||| +| Qwen2.5-Omni | ✅ | [1760](https://github.com/vllm-project/vllm-ascend/issues/1760) ||||||||||||||||||| +| QVQ | ✅ | ||||||||||||||||||| +| LLaVA 1.5/1.6 | ✅ | [1962](https://github.com/vllm-project/vllm-ascend/issues/1962) ||||||||||||||||||| +| InternVL2 | ✅ | ||||||||||||||||||| +| InternVL2.5 | ✅ | ||||||||||||||||||| +| Qwen2-Audio | ✅ | ||||||||||||||||||| +| Aria | ✅ | ||||||||||||||||||| +| LLaVA-Next | ✅ | ||||||||||||||||||| +| LLaVA-Next-Video | ✅ | ||||||||||||||||||| +| MiniCPM-V | ✅ | ||||||||||||||||||| +| Mistral3 | ✅ | ||||||||||||||||||| +| Phi-3-Vison/Phi-3.5-Vison | ✅ | ||||||||||||||||||| +| Gemma3 | ✅ | ||||||||||||||||||| +| LLama4 | ❌ | [1972](https://github.com/vllm-project/vllm-ascend/issues/1972) ||||||||||||||||||| +| LLama3.2 | ❌ | [1972](https://github.com/vllm-project/vllm-ascend/issues/1972) ||||||||||||||||||| +| Keye-VL-8B-Preview | ❌ | [1963](https://github.com/vllm-project/vllm-ascend/issues/1963) ||||||||||||||||||| +| Florence-2 | ❌ | [2259](https://github.com/vllm-project/vllm-ascend/issues/2259) ||||||||||||||||||| +| GLM-4V | ❌ | [2260](https://github.com/vllm-project/vllm-ascend/issues/2260) ||||||||||||||||||| +| InternVL2.0/2.5/3.0
InternVideo2.5/Mono-InternVL | ❌ | [2064](https://github.com/vllm-project/vllm-ascend/issues/2064) ||||||||||||||||||| +| Whisper | ❌ | [2262](https://github.com/vllm-project/vllm-ascend/issues/2262) ||||||||||||||||||| +| Ultravox | 🟡 | Need test ||||||||||||||||||| diff --git a/examples/disaggregated_prefill_v1/load_balance_proxy_layerwise_server_example.py b/examples/disaggregated_prefill_v1/load_balance_proxy_layerwise_server_example.py index 67c34ee899..8bbc3595ee 100644 --- a/examples/disaggregated_prefill_v1/load_balance_proxy_layerwise_server_example.py +++ b/examples/disaggregated_prefill_v1/load_balance_proxy_layerwise_server_example.py @@ -447,7 +447,7 @@ def get_api_request_id(api, req_id): def get_origin_request_id(api, req_id): if api == "/completions": - return req_id.replace("cmpl-", "").replace("-0", "") + return req_id.replace("cmpl-", "")[:-2] elif api == "/chat/completions": return req_id.replace("chatcmpl-", "") @@ -561,9 +561,12 @@ async def metaserver(request: Request): max_retries=global_args.max_retries, base_delay=global_args.retry_delay) proxy_state.release_prefiller(prefiller_idx, prefiller_score) + proxy_state.release_prefiller_kv(prefiller_idx,prefiller_score) except Exception as e: logger.error(f"Post metaserver failed with: {str(e)}") + proxy_state.release_prefiller(prefiller_idx, prefiller_score) + proxy_state.release_prefiller_kv(prefiller_idx, prefiller_score) if __name__ == '__main__': diff --git a/tests/e2e/multicard/test_external_launcher.py b/tests/e2e/multicard/test_external_launcher.py index 9bf855e30a..d5441691c3 100644 --- a/tests/e2e/multicard/test_external_launcher.py +++ b/tests/e2e/multicard/test_external_launcher.py @@ -108,6 +108,7 @@ def test_moe_external_launcher(model): assert proc.returncode == 0 +@patch.dict(os.environ, {"VLLM_ASCEND_ENABLE_NZ": "0"}) def test_external_launcher_and_sleepmode(): script = Path( __file__ @@ -154,6 +155,7 @@ def test_external_launcher_and_sleepmode(): assert proc.returncode == 0 +@patch.dict(os.environ, {"VLLM_ASCEND_ENABLE_NZ": "0"}) def test_external_launcher_and_sleepmode_level2(): script = Path( __file__ diff --git a/tests/e2e/multicard/test_qwen3_next.py b/tests/e2e/multicard/test_qwen3_next.py index 9fda522021..cf3382318d 100644 --- a/tests/e2e/multicard/test_qwen3_next.py +++ b/tests/e2e/multicard/test_qwen3_next.py @@ -20,10 +20,17 @@ Run `pytest tests/e2e/multicard/test_qwen3_next.py`. """ +import os +from unittest.mock import patch from tests.e2e.conftest import VllmRunner +# NZ will cause precision error in Qwen3-Next +# When it is fixed, this set-up can be removed +_IS_ENABLE_NZ = "VLLM_ASCEND_ENABLE_NZ" + +@patch.dict(os.environ, {_IS_ENABLE_NZ: "0"}) def test_models_distributed_Qwen3_NEXT_TP4(): example_prompts = [ "Hello, my name is", @@ -36,8 +43,10 @@ def test_models_distributed_Qwen3_NEXT_TP4(): distributed_executor_backend="mp", enforce_eager=True) as vllm_model: vllm_model.generate_greedy(example_prompts, max_tokens) + del vllm_model +@patch.dict(os.environ, {_IS_ENABLE_NZ: "0"}) def test_models_distributed_Qwen3_NEXT_TP4_FULL_DECODE_ONLY(): example_prompts = [ "Hello, my name is", @@ -54,3 +63,50 @@ def test_models_distributed_Qwen3_NEXT_TP4_FULL_DECODE_ONLY(): "cudagraph_capture_sizes": [1, 8, 24, 48, 60] }) as vllm_model: vllm_model.generate_greedy(example_prompts, max_tokens) + del vllm_model + + +@patch.dict(os.environ, {_IS_ENABLE_NZ: "0"}) +def test_models_distributed_Qwen3_NEXT_MTP_TP4_SIMILARITY(): + example_prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", + ] + max_tokens = 20 + + with VllmRunner("Qwen/Qwen3-Next-80B-A3B-Instruct", + tensor_parallel_size=4, + max_model_len=4096, + gpu_memory_utilization=0.8, + distributed_executor_backend="mp") as vllm_model: + ref_outputs = vllm_model.generate_greedy(example_prompts, max_tokens) + del vllm_model + + with VllmRunner("Qwen/Qwen3-Next-80B-A3B-Instruct", + tensor_parallel_size=4, + max_model_len=4096, + gpu_memory_utilization=0.8, + distributed_executor_backend="mp", + speculative_config={ + "method": "qwen3_next_mtp", + "num_speculative_tokens": 1 + }) as spec_vllm_model: + spec_outputs = spec_vllm_model.generate_greedy(example_prompts, + max_tokens) + del spec_vllm_model + + matches = 0 + misses = 0 + for ref_output, spec_output in zip(ref_outputs, spec_outputs): + ref_token_ids = ref_output[0] + spec_token_ids = spec_output[0] + if ref_token_ids == spec_token_ids[:len(ref_token_ids)]: + matches += 1 + else: + misses += 1 + print(f"ref_output: {ref_output[1]}") + print(f"spec_output: {spec_output[1]}") + + assert matches > int(0.66 * len(ref_outputs)) diff --git a/tests/e2e/nightly/models/test_deepseek_r1_w8a8_eplb.py b/tests/e2e/nightly/models/test_deepseek_r1_w8a8_eplb.py index 26bcfa9104..89449ac4c3 100644 --- a/tests/e2e/nightly/models/test_deepseek_r1_w8a8_eplb.py +++ b/tests/e2e/nightly/models/test_deepseek_r1_w8a8_eplb.py @@ -14,6 +14,7 @@ # limitations under the License. # This file is a part of the vllm-ascend project. # +import json from typing import Any import openai @@ -27,8 +28,7 @@ "vllm-ascend/DeepSeek-R1-W8A8", ] -TENSOR_PARALLELS = [8] -DATA_PARALLELS = [2] +MODES = ["eplb"] prompts = [ "San Francisco is a", @@ -38,55 +38,52 @@ "max_tokens": 10, } -aisbench_cases = [{ +aisbench_gsm8k = [{ "case_type": "accuracy", "dataset_path": "vllm-ascend/gsm8k-lite", "request_conf": "vllm_api_general_chat", "dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_chat_prompt", "max_out_len": 32768, "batch_size": 32, - "baseline": 93, + "top_k": 20, + "baseline": 95, "threshold": 5 -}, { - "case_type": "performance", - "dataset_path": "vllm-ascend/GSM8K-in3500-bs400", - "request_conf": "vllm_api_stream_chat", - "dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf", - "num_prompts": 80, - "max_out_len": 1500, - "batch_size": 20, - "request_rate": 0, - "baseline": 1, - "threshold": 0.97 }] +mode_aisbench = {"eplb": aisbench_gsm8k} + @pytest.mark.asyncio @pytest.mark.parametrize("model", MODELS) -@pytest.mark.parametrize("tp_size", TENSOR_PARALLELS) -@pytest.mark.parametrize("dp_size", DATA_PARALLELS) -async def test_models(model: str, tp_size: int, dp_size: int) -> None: +@pytest.mark.parametrize("mode", MODES) +async def test_models(model: str, mode: str) -> None: port = get_open_port() env_dict = { - "TASK_QUEUE_ENABLE": "1", + "OMP_NUM_THREADS": "10", "OMP_PROC_BIND": "false", - "HCCL_OP_EXPANSION_MODE": "AIV", - "PAGED_ATTENTION_MASK_LEN": "5500", - "DYNAMIC_EPLB": "true", - "HCCL_BUFFSIZE": "1024" + "HCCL_BUFFSIZE": "1024", + "PYTORCH_NPU_ALLOC_CONF": "expandable_segments:True", + "VLLM_ASCEND_ENABLE_FLASHCOMM1": "1" + } + additional_config: dict[str, Any] = { + "ascend_scheduler_config": { + "enabled": False + }, } server_args = [ - "--no-enable-prefix-caching", "--enable-expert-parallel", - "--tensor-parallel-size", - str(tp_size), "--data-parallel-size", - str(dp_size), "--port", - str(port), "--max-model-len", "36864", "--max-num-batched-tokens", - "36864", "--block-size", "128", "--trust-remote-code", - "--quantization", "ascend", "--gpu-memory-utilization", "0.9", - "--additional-config", '{"enable_weight_nz_layout":true, ' - '"torch_air_graph_config":{"enabled": true, "enable_multistream_mla": true, "graph_batch_size": [16], "use_cached_graph": true},' - '"dynamic_eplb": true, "num_iterations_eplb_update": 1000, "num_wait_worker_iterations": 200' + "--quantization", "ascend", "--async-scheduling", + "--data-parallel-size", "4", "--tensor-parallel-size", "4", + "--enable-expert-parallel", "--port", + str(port), "--max-model-len", "40960", "--max-num-batched-tokens", + "8192", "--max-num-seqs", "12", "--trust-remote-code", + "--gpu-memory-utilization", "0.9" ] + if mode == "eplb": + env_dict["DYNAMIC_EPLB"] = "true" + additional_config["dynamic_eplb"] = True + additional_config["num_iterations_eplb_update"] = 2048 + additional_config["num_wait_worker_iterations"] = 200 + server_args.extend(["--additional-config", json.dumps(additional_config)]) request_keyword_args: dict[str, Any] = { **api_keyword_args, } @@ -103,5 +100,10 @@ async def test_models(model: str, tp_size: int, dp_size: int) -> None: ) choices: list[openai.types.CompletionChoice] = batch.choices assert choices[0].text, "empty response" + print(choices) # aisbench test - run_aisbench_cases(model, port, aisbench_cases) + aisbench_cases = mode_aisbench[mode] + run_aisbench_cases(model, + port, + aisbench_cases, + server_args=server_args) diff --git a/tests/e2e/nightly/models/test_qwen2_5_vl_32b.py b/tests/e2e/nightly/models/test_qwen2_5_vl_32b.py index 760f8deeff..fe6bbedf2e 100644 --- a/tests/e2e/nightly/models/test_qwen2_5_vl_32b.py +++ b/tests/e2e/nightly/models/test_qwen2_5_vl_32b.py @@ -45,7 +45,7 @@ "dataset_conf": "textvqa/textvqa_gen_base64", "max_out_len": 2048, "batch_size": 128, - "baseline": 76, + "baseline": 76.22, "temperature": 0, "top_k": -1, "top_p": 1, diff --git a/tests/e2e/nightly/models/test_qwen2_5_vl_7b.py b/tests/e2e/nightly/models/test_qwen2_5_vl_7b.py index bc35ff88c7..d3a726bf07 100644 --- a/tests/e2e/nightly/models/test_qwen2_5_vl_7b.py +++ b/tests/e2e/nightly/models/test_qwen2_5_vl_7b.py @@ -45,7 +45,7 @@ "dataset_conf": "textvqa/textvqa_gen_base64", "max_out_len": 2048, "batch_size": 128, - "baseline": 81, + "baseline": 82.05, "threshold": 5 }, { "case_type": "performance", diff --git a/tests/e2e/nightly/models/test_qwen3_235b_a22b_w8a8_eplb.py b/tests/e2e/nightly/models/test_qwen3_235b_a22b_w8a8_eplb.py index 52aafa156f..8debeecb2a 100644 --- a/tests/e2e/nightly/models/test_qwen3_235b_a22b_w8a8_eplb.py +++ b/tests/e2e/nightly/models/test_qwen3_235b_a22b_w8a8_eplb.py @@ -14,6 +14,7 @@ # limitations under the License. # This file is a part of the vllm-ascend project. # +import json from typing import Any import openai @@ -27,7 +28,7 @@ "vllm-ascend/Qwen3-235B-A22B-W8A8", ] -TENSOR_PARALLELS = [16] +MODES = ["eplb"] prompts = [ "San Francisco is a", @@ -37,53 +38,53 @@ "max_tokens": 10, } -aisbench_cases = [{ +aisbench_gsm8k = [{ "case_type": "accuracy", "dataset_path": "vllm-ascend/gsm8k-lite", "request_conf": "vllm_api_general_chat", "dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_chat_prompt", "max_out_len": 32768, "batch_size": 32, - "baseline": 93, - "threshold": 5 -}, { - "case_type": "performance", - "dataset_path": "vllm-ascend/GSM8K-in3500-bs400", - "request_conf": "vllm_api_stream_chat", - "dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf", - "num_prompts": 80, - "max_out_len": 1500, - "batch_size": 20, - "request_rate": 0, - "baseline": 1, - "threshold": 0.97 + "top_k": 20, + "baseline": 95, + "threshold": 5, + "topk": 20 }] +mode_aisbench = {"eplb": aisbench_gsm8k} + @pytest.mark.asyncio @pytest.mark.parametrize("model", MODELS) -@pytest.mark.parametrize("tp_size", TENSOR_PARALLELS) -async def test_models(model: str, tp_size: int) -> None: +@pytest.mark.parametrize("mode", MODES) +async def test_models(model: str, mode: str) -> None: port = get_open_port() env_dict = { - "TASK_QUEUE_ENABLE": "1", + "OMP_NUM_THREADS": "10", "OMP_PROC_BIND": "false", - "HCCL_OP_EXPANSION_MODE": "AIV", - "PAGED_ATTENTION_MASK_LEN": "5500", - "DYNAMIC_EPLB": "true", - "HCCL_BUFFSIZE": "1024" + "HCCL_BUFFSIZE": "1024", + "PYTORCH_NPU_ALLOC_CONF": "expandable_segments:True", + "VLLM_ASCEND_ENABLE_FLASHCOMM1": "1" + } + additional_config: dict[str, Any] = { + "ascend_scheduler_config": { + "enabled": False + }, } server_args = [ - "--no-enable-prefix-caching", "--enable-expert-parallel", - "--tensor-parallel-size", - str(tp_size), "--port", - str(port), "--max-model-len", "36864", "--max-num-batched-tokens", - "36864", "--block-size", "128", "--trust-remote-code", - "--quantization", "ascend", "--gpu-memory-utilization", "0.9", - "--additional-config", - '{"enable_weight_nz_layout":true, "dynamic_eplb": true, ' - '"num_iterations_eplb_update": 1000, "num_wait_worker_iterations": 200}' + "--quantization", "ascend", "--async-scheduling", + "--data-parallel-size", "4", "--tensor-parallel-size", "4", + "--enable-expert-parallel", "--port", + str(port), "--max-model-len", "40960", "--max-num-batched-tokens", + "8192", "--max-num-seqs", "12", "--trust-remote-code", + "--gpu-memory-utilization", "0.9" ] + if mode == "eplb": + env_dict["DYNAMIC_EPLB"] = "true" + additional_config["dynamic_eplb"] = True + additional_config["num_iterations_eplb_update"] = 2048 + additional_config["num_wait_worker_iterations"] = 200 + server_args.extend(["--additional-config", json.dumps(additional_config)]) request_keyword_args: dict[str, Any] = { **api_keyword_args, } @@ -100,5 +101,10 @@ async def test_models(model: str, tp_size: int) -> None: ) choices: list[openai.types.CompletionChoice] = batch.choices assert choices[0].text, "empty response" + print(choices) # aisbench test - run_aisbench_cases(model, port, aisbench_cases) + aisbench_cases = mode_aisbench[mode] + run_aisbench_cases(model, + port, + aisbench_cases, + server_args=server_args) diff --git a/tests/e2e/nightly/models/test_qwen3_235b_w8a8.py b/tests/e2e/nightly/models/test_qwen3_235b_w8a8.py new file mode 100644 index 0000000000..8220e4d59a --- /dev/null +++ b/tests/e2e/nightly/models/test_qwen3_235b_w8a8.py @@ -0,0 +1,107 @@ +# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. +# Copyright 2023 The vLLM team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# This file is a part of the vllm-ascend project. +# +import json +from typing import Any + +import openai +import pytest +from vllm.utils import get_open_port + +from tests.e2e.conftest import RemoteOpenAIServer +from tools.aisbench import run_aisbench_cases + +MODELS = [ + "vllm-ascend/Qwen3-235B-A22B-W8A8", +] + +MODES = ["full_graph", "piecewise"] + +prompts = [ + "San Francisco is a", +] + +api_keyword_args = { + "max_tokens": 10, +} + +aisbench_cases = [{ + "case_type": "accuracy", + "dataset_path": "vllm-ascend/gsm8k-lite", + "request_conf": "vllm_api_general_chat", + "dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_chat_prompt", + "max_out_len": 32768, + "batch_size": 32, + "top_k": 20, + "baseline": 95, + "threshold": 5 +}] + + +@pytest.mark.asyncio +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("mode", MODES) +async def test_models(model: str, mode: str) -> None: + port = get_open_port() + env_dict = { + "OMP_NUM_THREADS": "10", + "OMP_PROC_BIND": "false", + "HCCL_BUFFSIZE": "1024", + "PYTORCH_NPU_ALLOC_CONF": "expandable_segments:True", + "VLLM_ASCEND_ENABLE_FLASHCOMM1": "1" + } + additional_config: dict[str, Any] = { + "ascend_scheduler_config": { + "enabled": False + }, + } + compilation_config = {"cudagraph_mode": "FULL_DECODE_ONLY"} + server_args = [ + "--quantization", "ascend", "--async-scheduling", + "--data-parallel-size", "4", "--tensor-parallel-size", "4", + "--enable-expert-parallel", "--port", + str(port), "--max-model-len", "40960", "--max-num-batched-tokens", + "8192", "--max-num-seqs", "12", "--trust-remote-code", + "--gpu-memory-utilization", "0.9" + ] + if mode == "piecewise": + compilation_config["cudagraph_mode"] = "PIECEWISE" + server_args.extend( + ["--compilation-config", + json.dumps(compilation_config)]) + server_args.extend(["--additional-config", json.dumps(additional_config)]) + request_keyword_args: dict[str, Any] = { + **api_keyword_args, + } + with RemoteOpenAIServer(model, + server_args, + server_port=port, + env_dict=env_dict, + auto_port=False) as server: + client = server.get_async_client() + batch = await client.completions.create( + model=model, + prompt=prompts, + **request_keyword_args, + ) + choices: list[openai.types.CompletionChoice] = batch.choices + assert choices[0].text, "empty response" + print(choices) + # aisbench test + run_aisbench_cases(model, + port, + aisbench_cases, + server_args=server_args) diff --git a/tests/e2e/nightly/models/test_qwen3_30b_w8a8.py b/tests/e2e/nightly/models/test_qwen3_30b_w8a8.py new file mode 100644 index 0000000000..307a1575cc --- /dev/null +++ b/tests/e2e/nightly/models/test_qwen3_30b_w8a8.py @@ -0,0 +1,92 @@ +# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. +# Copyright 2023 The vLLM team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# This file is a part of the vllm-ascend project. +# +from typing import Any + +import openai +import pytest +from vllm.utils import get_open_port + +from tests.e2e.conftest import RemoteOpenAIServer +from tools.aisbench import run_aisbench_cases + +MODELS = [ + "vllm-ascend/Qwen3-30B-A3B-W8A8", +] + +TENSOR_PARALLELS = [1] + +prompts = [ + "San Francisco is a", +] + +api_keyword_args = { + "max_tokens": 10, +} + +aisbench_cases = [{ + "case_type": "performance", + "dataset_path": "vllm-ascend/GSM8K-in3500-bs400", + "request_conf": "vllm_api_stream_chat", + "dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf", + "num_prompts": 180, + "max_out_len": 1500, + "batch_size": 45, + "request_rate": 0, + "baseline": 1, + "threshold": 0.97 +}] + + +@pytest.mark.asyncio +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("tp_size", TENSOR_PARALLELS) +async def test_models(model: str, tp_size: int) -> None: + port = get_open_port() + env_dict = { + "OMP_PROC_BIND": "false", + "OMP_NUM_THREADS": "10", + "HCCL_BUFFSIZE": "1024", + "HCCL_OP_EXPANSION_MODE": "AIV", + "PYTORCH_NPU_ALLOC_CONF": "expandable_segments:True" + } + server_args = [ + "--quantization", "ascend", "--async-scheduling", + "--no-enable-prefix-caching", "--tensor-parallel-size", + str(tp_size), "--port", + str(port), "--max-model-len", "5600", "--max-num-batched-tokens", + "16384", "--max-num-seqs", "100", "--trust-remote-code", + "--gpu-memory-utilization", "0.9", "--compilation-config", + '{"cudagraph_mode": "FULL_DECODE_ONLY"}' + ] + request_keyword_args: dict[str, Any] = { + **api_keyword_args, + } + with RemoteOpenAIServer(model, + server_args, + server_port=port, + env_dict=env_dict, + auto_port=False) as server: + client = server.get_async_client() + batch = await client.completions.create( + model=model, + prompt=prompts, + **request_keyword_args, + ) + choices: list[openai.types.CompletionChoice] = batch.choices + assert choices[0].text, "empty response" + # aisbench test + run_aisbench_cases(model, port, aisbench_cases) diff --git a/tests/e2e/nightly/models/test_qwen3_32b_int8.py b/tests/e2e/nightly/models/test_qwen3_32b_int8.py index e245f3d74f..bbaf863aa9 100644 --- a/tests/e2e/nightly/models/test_qwen3_32b_int8.py +++ b/tests/e2e/nightly/models/test_qwen3_32b_int8.py @@ -58,7 +58,7 @@ "max_out_len": 32768, "batch_size": 32, "baseline": 83.33, - "threshold": 17 + "threshold": 7 }, { "case_type": "performance", "dataset_path": "vllm-ascend/GSM8K-in3500-bs400", diff --git a/tests/e2e/nightly/multi_node/scripts/build_mooncake.sh b/tests/e2e/nightly/multi_node/scripts/build_mooncake.sh index 8fb4610bce..7627cf0c95 100644 --- a/tests/e2e/nightly/multi_node/scripts/build_mooncake.sh +++ b/tests/e2e/nightly/multi_node/scripts/build_mooncake.sh @@ -9,15 +9,13 @@ YELLOW="\033[0;33m" RED="\033[0;31m" NC="\033[0m" # No Color -branch=${1:-pooling_async_memecpy_v1} -point=${2:-8fce1ffab3930fec2a8b8d3be282564dfa1bb186} +branch=${1:-v0.3.7.post2} -repo_url="https://github.com/AscendTransport/Mooncake" +repo_url="https://github.com/kvcache-ai/Mooncake" repo_name="Mooncake" state_file=".build_state" echo "[INFO] Branch: $branch" -echo "[INFO] Commit: $point" echo "-------------------------------------------" @@ -29,22 +27,36 @@ if ! is_done "clone"; then if [ -d "$repo_name" ]; then echo "[WARN] Directory $repo_name already exists, skipping clone." else - git clone -b "$branch" "$repo_url" "$repo_name" + git clone --branch "$branch" --depth 1 "$repo_url" "$repo_name" fi - cd "$repo_name" - git fetch --all - git checkout "$point" || { echo "[ERROR] Checkout failed."; exit 1; } - cd .. mark_done "clone" else echo "[SKIP] Clone step already done." fi +init_ascend_env() { + cann_in_sys_path=/usr/local/Ascend/ascend-toolkit; \ + cann_in_user_path=$HOME/Ascend/ascend-toolkit; \ + uname_m=$(uname -m) && \ + if [ -f "${cann_in_sys_path}/set_env.sh" ]; then \ + source ${cann_in_sys_path}/set_env.sh; \ + export LD_LIBRARY_PATH=${cann_in_sys_path}/latest/lib64:${cann_in_sys_path}/latest/${uname_m}-linux/devlib:${LD_LIBRARY_PATH} ; \ + elif [ -f "${cann_in_user_path}/set_env.sh" ]; then \ + source "$HOME/Ascend/ascend-toolkit/set_env.sh"; \ + export LD_LIBRARY_PATH=${cann_in_user_path}/latest/lib64:${cann_in_user_path}/latest/${uname_m}-linux/devlib:${LD_LIBRARY_PATH}; \ + else \ + echo "No Ascend Toolkit found"; \ + exit 1; \ + fi +} + +init_ascend_env if ! is_done "deps"; then cd "$repo_name" - echo "[STEP]Installing dependencies (ignore Go failure)..." - yes | bash dependencies.sh || echo "⚠️ dependencies.sh failed (Go install likely failed), continuing..." + echo "[STEP]Installing dependencies..." + sed -i 's|https://go.dev/dl/|https://golang.google.cn/dl/|g' dependencies.sh + bash dependencies.sh -y cd .. mark_done "deps" else @@ -74,7 +86,7 @@ if ! is_done "build"; then fi mkdir build && cd build - cmake .. || { echo "[ERROR] cmake failed."; exit 1; } + cmake .. -USE_ASCEND_DIRECT=ON || { echo "[ERROR] cmake failed."; exit 1; } make -j || { echo "[ERROR] make failed."; exit 1; } make install || { echo "[ERROR] make install failed."; exit 1; } mark_done "build" @@ -83,19 +95,6 @@ else fi -if ! is_done "copy_lib"; then - echo "[STEP] Copy library files..." - cp mooncake-transfer-engine/src/transport/ascend_transport/hccl_transport/ascend_transport_c/libascend_transport_mem.so \ - /usr/local/Ascend/ascend-toolkit/latest/python/site-packages/ - cp mooncake-transfer-engine/src/libtransfer_engine.so \ - /usr/local/Ascend/ascend-toolkit/latest/python/site-packages/ - cd .. - mark_done "copy_lib" -else - echo "[SKIP] Library copy already done." -fi - - if ! grep -q "export LD_LIBRARY_PATH=/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:$LD_LIBRARY_PATH" ~/.bashrc; then echo -e "${YELLOW}Adding LD_LIBRARY_PATH to your PATH in ~/.bashrc${NC}" echo 'export LD_LIBRARY_PATH=/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:$LD_LIBRARY_PATH' >> ~/.bashrc diff --git a/tests/e2e/nightly/multi_node/scripts/run.sh b/tests/e2e/nightly/multi_node/scripts/run.sh index 60a7ce0883..e4c2555534 100644 --- a/tests/e2e/nightly/multi_node/scripts/run.sh +++ b/tests/e2e/nightly/multi_node/scripts/run.sh @@ -9,7 +9,6 @@ RED="\033[0;31m" NC="\033[0m" # No Color # Configuration -GOVER=1.23.8 LOG_DIR="/root/.cache/tests/logs" OVERWRITE_LOGS=true SRC_DIR="$WORKSPACE/source_code" @@ -97,34 +96,6 @@ install_vllm() { pip install -r "$SRC_DIR/vllm-ascend/requirements-dev.txt" } -download_go() { - ARCH=$(uname -m) - GOVER=1.23.8 - if [ "$ARCH" = "aarch64" ]; then - ARCH="arm64" - elif [ "$ARCH" = "x86_64" ]; then - ARCH="amd64" - else - echo "Unsupported architecture: $ARCH" - exit 1 - fi - # Download Go - echo "Downloading Go $GOVER..." - wget -q --show-progress https://golang.google.cn/dl/go$GOVER.linux-$ARCH.tar.gz - check_success "Failed to download Go $GOVER" - - # Install Go - echo "Installing Go $GOVER..." - tar -C /usr/local -xzf go$GOVER.linux-$ARCH.tar.gz - check_success "Failed to install Go $GOVER" - - # Clean up downloaded file - rm -f go$GOVER.linux-$ARCH.tar.gz - check_success "Failed to clean up Go installation file" - - print_success "Go $GOVER installed successfully" -} - install_ais_bench() { local AIS_BENCH="$SRC_DIR/vllm-ascend/benchmark" git clone https://gitee.com/aisbench/benchmark.git $AIS_BENCH @@ -136,29 +107,6 @@ install_ais_bench() { cd - } -install_go() { - # Check if Go is already installed - if command -v go &> /dev/null; then - GO_VERSION=$(go version | awk '{print $3}') - if [[ "$GO_VERSION" == "go$GOVER" ]]; then - echo -e "${YELLOW}Go $GOVER is already installed. Skipping...${NC}" - else - echo -e "${YELLOW}Found Go $GO_VERSION. Will install Go $GOVER...${NC}" - download_go - fi - else - download_go - fi - - # Add Go to PATH if not already there - if ! grep -q "export PATH=\$PATH:/usr/local/go/bin" ~/.bashrc; then - echo -e "${YELLOW}Adding Go to your PATH in ~/.bashrc${NC}" - echo 'export PATH=$PATH:/usr/local/go/bin' >> ~/.bashrc - echo -e "${YELLOW}Please run 'source ~/.bashrc' or start a new terminal to use Go${NC}" - fi - export PATH=$PATH:/usr/local/go/bin -} - kill_npu_processes() { pgrep python3 | xargs -r kill -9 pgrep VLLM | xargs -r kill -9 @@ -193,11 +141,8 @@ main() { install_sys_dependencies install_vllm install_ais_bench - # to speed up mooncake build process, install Go here - install_go cd "$WORKSPACE/source_code" - . $SRC_DIR/vllm-ascend/tests/e2e/nightly/multi_node/scripts/build_mooncake.sh \ - "pooling_async_memecpy_v1" "8fce1ffab3930fec2a8b8d3be282564dfa1bb186" + . $SRC_DIR/vllm-ascend/tests/e2e/nightly/multi_node/scripts/build_mooncake.sh cd "$WORKSPACE/source_code/vllm-ascend" run_tests_with_log } diff --git a/tests/e2e/singlecard/test_camem.py b/tests/e2e/singlecard/test_camem.py index 04643c8082..2fe4a8553a 100644 --- a/tests/e2e/singlecard/test_camem.py +++ b/tests/e2e/singlecard/test_camem.py @@ -18,6 +18,8 @@ # import gc +import os +from unittest.mock import patch import torch from vllm import SamplingParams @@ -71,6 +73,7 @@ def test_basic_camem(): @fork_new_process_for_each_test +@patch.dict(os.environ, {"VLLM_ASCEND_ENABLE_NZ": "0"}) def test_end_to_end(): free, total = torch.npu.mem_get_info() used_bytes_baseline = total - free # in case other process is running diff --git a/tests/ut/attention/test_attention_v1.py b/tests/ut/attention/test_attention_v1.py index dfb9a2a07f..9c732f61de 100644 --- a/tests/ut/attention/test_attention_v1.py +++ b/tests/ut/attention/test_attention_v1.py @@ -63,10 +63,26 @@ def test_copy_blocks(self): class TestAscendAttentionMetadataBuilder(TestBase): - def setUp(self): + @patch('vllm.distributed.parallel_state.get_dcp_group') + @patch('vllm.distributed.parallel_state._DCP', + new_callable=lambda: MagicMock(spec=GroupCoordinator)) + @patch("vllm.distributed.get_decode_context_model_parallel_world_size", + return_value=1) + def setUp(self, mock_get_dcp_size, mock_dcp, mock_get_dcp_group): + mock_dcp.world_size = 1 + dcp_group = MagicMock(spec=GroupCoordinator) + dcp_group.rank_in_group = 0 + dcp_group.world_size = 1 + dcp_group.device_group = MagicMock() + mock_get_dcp_group.return_value = dcp_group + self.mock_vllm_config = MagicMock() + self.mock_vllm_config.speculative_config = None self.mock_vllm_config.model_config.max_model_len = 640 self.mock_vllm_config.cache_config.block_size = 64 + self.mock_vllm_config.compilation_config.cudagraph_mode = None + self.mock_vllm_config.scheduler_config.max_num_seqs = 10 + self.mock_vllm_config.scheduler_config.decode_max_num_seqs = 10 self.mock_device = 'cpu:0' self.builder = AscendAttentionMetadataBuilder(None, None, self.mock_vllm_config, diff --git a/tests/ut/attention/test_mla_v1.py b/tests/ut/attention/test_mla_v1.py index d8ddc6a6d6..8d15bcaab1 100644 --- a/tests/ut/attention/test_mla_v1.py +++ b/tests/ut/attention/test_mla_v1.py @@ -82,7 +82,8 @@ def test_ascend_mla_prefill_metadata_with_chunked_context(self): seq_tot=seq_tot, max_seq_lens=max_seq_lens, workspace=workspace, - chunk_seq_lens=chunk_seq_lens) + chunk_seq_lens=chunk_seq_lens, + chunk_seq_lens_npu=chunk_seq_lens) metadata = AscendMLAPrefillMetadata( attn_mask=torch.tensor([[1, 0], [1, 1]], dtype=torch.bool), @@ -103,6 +104,8 @@ def test_ascend_mla_prefill_metadata_with_chunked_context(self): self.assertEqual(metadata.chunked_context.max_seq_lens, max_seq_lens) self.assertIs(metadata.chunked_context.workspace, workspace) self.assertIs(metadata.chunked_context.chunk_seq_lens, chunk_seq_lens) + self.assertIs(metadata.chunked_context.chunk_seq_lens_npu, + chunk_seq_lens) class TestAscendMLADecodeMetadata(TestBase): @@ -478,6 +481,7 @@ def test_compute_prefill_context(self, mock_ring, mock_load): chunk_ctx = MagicMock() chunk_ctx.seq_tot = [8] chunk_ctx.chunk_seq_lens = [torch.tensor([8])] + chunk_ctx.chunk_seq_lens_npu = [torch.tensor([8])] chunk_ctx.starts = [torch.tensor([0])] prefill_meta = MagicMock() diff --git a/tests/ut/distributed/mooncake/test_config_data.py b/tests/ut/distributed/mooncake/test_config_data.py new file mode 100644 index 0000000000..4408b41a82 --- /dev/null +++ b/tests/ut/distributed/mooncake/test_config_data.py @@ -0,0 +1,68 @@ +import unittest + +from vllm_ascend.distributed.mooncake.config_data import ( + _convert_to_bytes, _parse_global_segment_size) + + +class TestParseGlobalSegmentSize(unittest.TestCase): + + def test_int_input(self): + self.assertEqual(_parse_global_segment_size(1024), 1024) + self.assertEqual(_parse_global_segment_size(0), 0) + + def test_gb_unit(self): + self.assertEqual(_parse_global_segment_size("2GB"), 2 * 1024**3) + self.assertEqual(_parse_global_segment_size("1.5GB"), + int(1.5 * 1024**3)) + self.assertEqual(_parse_global_segment_size(" 2 GB "), 2 * 1024**3) + + def test_gb_unit_edge_cases(self): + with self.assertRaises(ValueError): + _parse_global_segment_size("GB") + with self.assertRaises(ValueError): + _parse_global_segment_size("abcGB") + + def test_mb_unit(self): + self.assertEqual(_parse_global_segment_size("512MB"), 512 * 1024**2) + self.assertEqual(_parse_global_segment_size("0.5MB"), + int(0.5 * 1024**2)) + self.assertEqual(_parse_global_segment_size("1024MB"), 1024 * 1024**2) + + def test_kb_unit(self): + self.assertEqual(_parse_global_segment_size("256KB"), 256 * 1024) + self.assertEqual(_parse_global_segment_size("1.25KB"), + int(1.25 * 1024)) + + def test_b_unit(self): + self.assertEqual(_parse_global_segment_size("4096B"), 4096) + self.assertEqual(_parse_global_segment_size("1024b"), 1024) + + def test_no_unit(self): + self.assertEqual(_parse_global_segment_size("2048"), 2048) + self.assertEqual(_parse_global_segment_size("0"), 0) + + def test_non_string_non_int_input(self): + self.assertEqual(_parse_global_segment_size(2048.0), 2048) + self.assertEqual(_parse_global_segment_size(True), 1) + + with self.assertRaises(TypeError): + _parse_global_segment_size(None) + + with self.assertRaises(TypeError): + _parse_global_segment_size({"size": 1024}) + + +class TestConvertToBytes(unittest.TestCase): + + def test_valid_conversion(self): + self.assertEqual(_convert_to_bytes("10", 1, "10"), 10) + self.assertEqual(_convert_to_bytes("1.5", 1024, "1.5KB"), + int(1.5 * 1024)) + self.assertEqual(_convert_to_bytes("0", 1024**3, "0GB"), 0) + + def test_invalid_numbers(self): + with self.assertRaises(ValueError): + _convert_to_bytes("abc", 1, "abc") + + with self.assertRaises(ValueError): + _convert_to_bytes("1.2.3", 1024, "1.2.3KB") diff --git a/tests/ut/kv_connector/test_mooncake_connector.py b/tests/ut/kv_connector/test_mooncake_connector.py index fa78a46f3e..a5bc066f1a 100644 --- a/tests/ut/kv_connector/test_mooncake_connector.py +++ b/tests/ut/kv_connector/test_mooncake_connector.py @@ -978,9 +978,6 @@ def __init__(self, *args, **kwargs): self.data_ptr = MagicMock(return_value=0x1000) -mock_envs_ascend = MagicMock() -mock_envs_ascend.MOONCAKE_CONNECTOR_PROTOCOL = "mock_protocol" - mock_logger = MagicMock() @@ -1017,14 +1014,15 @@ def mock_string_to_int64_hash(s): class TestMooncakeConnectorWorker(unittest.TestCase): def setUp(self): - self.envs_ascend_mock = MockEnvsAscend() self.mock_transfer_engine = MagicMock() self.mock_transfer_engine.get_rpc_port.return_value = 9090 self.mock_transfer_engine.initialize.return_value = 0 self.mock_transfer_engine.register_memory.return_value = 0 self.patches = [ - patch('os.getenv', return_value="10,11"), + patch( + 'vllm_ascend.distributed.mooncake_layerwise_connector.envs_ascend.PHYSICAL_DEVICES', + '10,11'), patch('torch.Tensor.size', return_value=(10, 16, 8, 16)), patch('torch.Tensor.element_size', return_value=4), patch('torch.Tensor.data_ptr', return_value=0x1000), @@ -1053,8 +1051,6 @@ def setUp(self): MagicMock()), patch('vllm_ascend.distributed.mooncake_connector.threading.Event', MagicMock()), - patch.dict('sys.modules', - {'vllm_ascend.envs': self.envs_ascend_mock}), ] for p in self.patches: diff --git a/tests/ut/kv_connector/test_mooncake_layerwise_connector.py b/tests/ut/kv_connector/test_mooncake_layerwise_connector.py index bc9ba253a4..28504c9b79 100644 --- a/tests/ut/kv_connector/test_mooncake_layerwise_connector.py +++ b/tests/ut/kv_connector/test_mooncake_layerwise_connector.py @@ -32,6 +32,14 @@ def setUp(self): self.engine = MagicMock() self.engine.register_memory.return_value = 0 self.engine.batch_transfer_sync_write.return_value = 1 + self._patcher_cs = patch( + 'vllm_ascend.distributed.mooncake_layerwise_connector.torch_npu.npu.current_stream' + ) + self.mock_current_stream = self._patcher_cs.start() + self.addCleanup(self._patcher_cs.stop) + fake_stream = MagicMock(name="FakeStream") + fake_stream.synchronize = MagicMock() + self.mock_current_stream.return_value = fake_stream self.first_kv_cache = torch.zeros((2, 2, 2, 8), dtype=torch.float32, @@ -792,15 +800,15 @@ def test_request_finished(self, mock_method): class TestMooncakeLayerwiseConnectorWorker(unittest.TestCase): def setUp(self): - self.envs_ascend_mock = type("MockEnvsAscend", (), - {"PHYSICAL_DEVICES": "10,11"})() self.mock_transfer_engine = MagicMock() self.mock_transfer_engine.get_rpc_port.return_value = 9090 self.mock_transfer_engine.initialize.return_value = 0 self.mock_transfer_engine.register_memory.return_value = 0 self.patches = [ - patch('os.getenv', return_value="10,11"), + patch( + 'vllm_ascend.distributed.mooncake_layerwise_connector.envs_ascend.PHYSICAL_DEVICES', + '10,11'), patch('torch.Tensor.size', return_value=(10, 16, 8, 16)), patch('torch.Tensor.element_size', return_value=4), patch('torch.Tensor.data_ptr', return_value=0x1000), @@ -833,8 +841,6 @@ def setUp(self): patch( 'vllm_ascend.distributed.mooncake_layerwise_connector.threading.Event', MagicMock()), - patch.dict('sys.modules', - {'vllm_ascend.envs': self.envs_ascend_mock}), patch( 'vllm_ascend.distributed.mooncake_layerwise_connector.get_ascend_config', return_value=SimpleNamespace(pd_tp_ratio=1, diff --git a/tests/ut/test_platform.py b/tests/ut/test_platform.py index d5d4309878..9e0c0b3295 100644 --- a/tests/ut/test_platform.py +++ b/tests/ut/test_platform.py @@ -4,6 +4,7 @@ import pytest import torch from vllm.config.compilation import CUDAGraphMode +from vllm.engine.arg_utils import EngineArgs from vllm.platforms import PlatformEnum from tests.ut.base import TestBase @@ -722,3 +723,32 @@ def test_get_static_graph_wrapper_cls_returns_correct_value(self): self.platform.get_static_graph_wrapper_cls(), "vllm_ascend.compilation.acl_graph.ACLGraphWrapper", ) + + def test_aclgraph_enable(self): + config = EngineArgs() + VllmConfig = config.create_engine_config() + self.assertEqual(VllmConfig.compilation_config.cudagraph_mode, + CUDAGraphMode.PIECEWISE) + + with self.assertLogs(logger="vllm", level="INFO") as cm: + from vllm_ascend import platform + + importlib.reload(platform) + self.platform.check_and_update_config(VllmConfig) + self.assertTrue( + "PIECEWISE compilation enabled on NPU. use_inductor not supported - " + "using only ACL Graph mode" in cm.output[1]) + if vllm_version_is("0.11.0"): + self.assertEqual( + VllmConfig.compilation_config.level, + CompilationLevel.PIECEWISE, + ) + else: + self.assertEqual( + VllmConfig.compilation_config.mode, + CompilationMode.VLLM_COMPILE, + ) + self.assertEqual( + VllmConfig.compilation_config.cudagraph_mode, + CUDAGraphMode.PIECEWISE, + ) diff --git a/tests/ut/torchair/test_torchair_mla.py b/tests/ut/torchair/test_torchair_mla.py index 3dd1d2f7f6..1f108b3eb0 100644 --- a/tests/ut/torchair/test_torchair_mla.py +++ b/tests/ut/torchair/test_torchair_mla.py @@ -86,7 +86,8 @@ def test_ascend_mla_prefill_metadata_with_chunked_context(self): seq_tot=seq_tot, max_seq_lens=max_seq_lens, workspace=workspace, - chunk_seq_lens=chunk_seq_lens) + chunk_seq_lens=chunk_seq_lens, + chunk_seq_lens_npu=chunk_seq_lens) metadata = AscendMLATorchairPrefillMetadata( attn_mask=torch.tensor([[1, 0], [1, 1]], dtype=torch.bool), @@ -107,6 +108,8 @@ def test_ascend_mla_prefill_metadata_with_chunked_context(self): self.assertEqual(metadata.chunked_context.max_seq_lens, max_seq_lens) self.assertIs(metadata.chunked_context.workspace, workspace) self.assertIs(metadata.chunked_context.chunk_seq_lens, chunk_seq_lens) + self.assertIs(metadata.chunked_context.chunk_seq_lens_npu, + chunk_seq_lens) class TestAscendMLATorchairDecodeMetadata(TestBase): @@ -661,6 +664,7 @@ def test_compute_prefill_context(self, mock_ring, mock_load): chunk_ctx = MagicMock() chunk_ctx.seq_tot = [8] chunk_ctx.chunk_seq_lens = [torch.tensor([8])] + chunk_ctx.chunk_seq_lens_npu = [torch.tensor([8])] chunk_ctx.starts = [torch.tensor([0])] prefill_meta = MagicMock() diff --git a/tests/ut/worker/test_worker_v1.py b/tests/ut/worker/test_worker_v1.py index 1ead0c5750..2fbad2f810 100644 --- a/tests/ut/worker/test_worker_v1.py +++ b/tests/ut/worker/test_worker_v1.py @@ -1,3 +1,4 @@ +import os import unittest from unittest.mock import MagicMock, patch @@ -273,6 +274,7 @@ def test_sleep_mode_disabled_raises_error(self, mock_sleep_mode_enabled): @patch("vllm_ascend.worker.worker_v1.sleep_mode_enabled") @patch("vllm_ascend.worker.worker_v1.CaMemAllocator") + @patch.dict(os.environ, {"VLLM_ASCEND_ENABLE_NZ": "0"}) def test_wake_up_mode_enabled(self, mock_allocator_class, mock_sleep_mode_enabled): """Test wake_up method when sleep mode is enabled""" @@ -295,6 +297,7 @@ def test_wake_up_mode_enabled(self, mock_allocator_class, mock_allocator.wake_up.assert_called_once_with(tags=["test_tag"]) @patch("vllm_ascend.worker.worker_v1.sleep_mode_enabled") + @patch.dict(os.environ, {"VLLM_ASCEND_ENABLE_NZ": "0"}) def test_wake_up_mode_disabled_raises_error(self, mock_sleep_mode_enabled): """Test wake_up method raises exception when sleep mode is disabled""" from vllm_ascend.worker.worker_v1 import NPUWorker diff --git a/vllm_ascend/ascend_forward_context.py b/vllm_ascend/ascend_forward_context.py index 80e6541e5f..a0f1edd59f 100644 --- a/vllm_ascend/ascend_forward_context.py +++ b/vllm_ascend/ascend_forward_context.py @@ -115,12 +115,10 @@ def set_ascend_forward_context( # the performance may degrade due to the switching of communication methods. mmrs_fusion = True if is_moe_model(vllm_config): - sp_enabled = enable_sp(vllm_config) and \ - tp_world_size > 1 and num_tokens is not None + sp_enabled = enable_sp(vllm_config) and num_tokens is not None mmrs_fusion = False else: sp_enabled = enable_sp(vllm_config) and \ - tp_world_size > 1 and \ num_tokens is not None and num_tokens > 1000 forward_context.mmrs_fusion = mmrs_fusion diff --git a/vllm_ascend/attention/attention_v1.py b/vllm_ascend/attention/attention_v1.py index 258d5e3aac..098e77c543 100644 --- a/vllm_ascend/attention/attention_v1.py +++ b/vllm_ascend/attention/attention_v1.py @@ -127,7 +127,7 @@ def copy_blocks( @staticmethod def get_supported_block_size() -> list[int]: - return [64] + return [128] class AscendAttentionState(Enum): @@ -163,8 +163,8 @@ class AscendMetadataForPrefill: @dataclass class AscendMetadataForDecode: """ Decode Specific Metadata for Ascend""" - num_computed_tokens_of_pcp_dcp: Optional[list[Optional[list[Optional[ - list[int]]]]]] = None + num_computed_tokens_of_pcp_dcp: Optional[list[list[list[int]]]] = None + batch_seq_mask: torch.Tensor = None @dataclass @@ -232,10 +232,36 @@ def __init__( ): self.vllm_config = vllm_config self.model_config = vllm_config.model_config + self.compilation_config = vllm_config.compilation_config self.device = device self.max_num_blocks_per_req = cdiv( self.model_config.max_model_len, AscendAttentionBackend.get_supported_block_size()[0]) + decode_max_num_seqs = getattr(vllm_config.scheduler_config, + 'decode_max_num_seqs', 0) + max_num_seqs = max(vllm_config.scheduler_config.max_num_seqs, + decode_max_num_seqs) + self.batch_seq_mask_buf = torch.empty(max_num_seqs, + dtype=torch.uint8, + device=device) + self.pcp_size = get_prefill_context_model_parallel_world_size( + ) if prefill_context_parallel_enable() else 1 + self.pcp_rank = get_prefill_context_model_parallel_rank( + ) if self.pcp_size > 1 else 0 + self.dcp_size = get_decode_context_model_parallel_world_size() + self.dcp_rank = get_decode_context_model_parallel_rank( + ) if self.dcp_size > 1 else 0 + + self.speculative_config = vllm_config.speculative_config + self.decode_threshold = 1 + if self.speculative_config: + spec_token_num = self.speculative_config.num_speculative_tokens + self.decode_threshold += spec_token_num + assert self.decode_threshold <= 16, f"decode_threshold exceeded \ + npu_fused_infer_attention_score TND layout's limit of 16, \ + got {self.decode_threshold}" + + AscendAttentionMetadataBuilder.reorder_batch_threshold = self.decode_threshold def reorder_batch(self, input_batch, scheduler_output: "SchedulerOutput") -> bool: @@ -356,11 +382,22 @@ def build( common_long_seq_metadata = common_attn_metadata.prefill_context_parallel_metadata if common_long_seq_metadata is not None: num_computed_tokens_of_pcp_dcp = common_long_seq_metadata.num_computed_tokens_of_pcp_dcp - num_computed_tokens_of_pcp_dcp = np.array( + assert num_computed_tokens_of_pcp_dcp is not None + num_computed_tokens_array = np.array( num_computed_tokens_of_pcp_dcp) + num_computed_tokens_array = num_computed_tokens_array[: + num_decodes] + batch_seq_mask = ( + num_computed_tokens_array[:, self.pcp_rank, + self.dcp_rank] == 0) + # TODO: numpy array mode of the shared memory is used to improve performance + self.batch_seq_mask_buf[:batch_seq_mask.shape[0]].copy_( + torch.from_numpy(batch_seq_mask), non_blocking=True) decode_metadata = AscendMetadataForDecode( - num_computed_tokens_of_pcp_dcp= - num_computed_tokens_of_pcp_dcp) + num_computed_tokens_of_pcp_dcp=num_computed_tokens_array, + batch_seq_mask=self.batch_seq_mask_buf[:batch_seq_mask. + shape[0]], + ) attn_metadata = AscendMetadata( num_actual_tokens=num_actual_tokens, @@ -869,7 +906,6 @@ def _forward_decode_pcp_dcp(self, query: torch.Tensor, else: num_heads = self.num_heads - q_nope = query.view(query.shape[0], 1, query.shape[1], query.shape[2]) k_nope = self.key_cache.view(self.key_cache.shape[0], self.key_cache.shape[1], -1) value = self.value_cache.view(self.key_cache.shape[0], @@ -880,7 +916,7 @@ def _forward_decode_pcp_dcp(self, query: torch.Tensor, 'num_key_value_heads': self.num_kv_heads, 'input_layout': - "BSND", + 'TND', 'atten_mask': None, 'scale': @@ -898,10 +934,12 @@ def _forward_decode_pcp_dcp(self, query: torch.Tensor, 'actual_seq_lengths_kv': attn_metadata.decode_meta. num_computed_tokens_of_pcp_dcp[:, self.pcp_rank, self.dcp_rank], + 'actual_seq_lengths': + attn_metadata.actual_seq_lengths_q[:attn_metadata.num_decodes], } graph_params = get_graph_params() forward_context: ForwardContext = get_forward_context() - num_tokens = q_nope.shape[0] + num_tokens = query.shape[0] if forward_context.capturing: stream = torch_npu.npu.current_stream() @@ -913,26 +951,27 @@ def _forward_decode_pcp_dcp(self, query: torch.Tensor, workspace = graph_params.workspaces.get(num_tokens) if workspace is None: workspace = torch_npu._npu_fused_infer_attention_score_get_max_workspace( - q_nope, k_nope, value, **common_kwargs) + query, k_nope, value, **common_kwargs) update_graph_params_workspaces(num_tokens, weak_ref_tensors(workspace)) - attn_out = torch.empty_like(q_nope) - attn_lse = torch.empty((num_tokens, num_heads, 1, 1), + attn_out = torch.empty_like(query) + attn_lse = torch.empty((num_tokens, num_heads, 1), dtype=torch.float, - device=q_nope.device) - - graph_params.attn_params[num_tokens].append( - (weak_ref_tensors(q_nope), weak_ref_tensors(k_nope), - weak_ref_tensors(value), self.num_heads, self.num_kv_heads, - self.scale, attn_metadata.block_tables, - self.key_cache.shape[1], attn_metadata.decode_meta. - num_computed_tokens_of_pcp_dcp[:, self.pcp_rank, - self.dcp_rank], - weak_ref_tensors(attn_out), weak_ref_tensors(attn_lse), - self.pcp_rank, self.dcp_rank, self.dcp_size)) + device=query.device) + + graph_params.attn_params[num_tokens].append(( + weak_ref_tensors(query), weak_ref_tensors(k_nope), + weak_ref_tensors(value), self.num_heads, self.num_kv_heads, + self.scale, attn_metadata.block_tables, + self.key_cache.shape[1], attn_metadata.decode_meta. + num_computed_tokens_of_pcp_dcp[:, self.pcp_rank, + self.dcp_rank], + attn_metadata.actual_seq_lengths_q[:attn_metadata.num_decodes], + weak_ref_tensors(attn_out), weak_ref_tensors(attn_lse), + self.dcp_size, self.pcp_rank, self.dcp_rank)) torch.npu.graph_task_group_begin(stream) torch_npu.npu_fused_infer_attention_score.out( - q_nope, + query, k_nope, value, **common_kwargs, @@ -942,11 +981,17 @@ def _forward_decode_pcp_dcp(self, query: torch.Tensor, graph_params.handles[num_tokens].append(handle) else: attn_out, attn_lse = torch_npu.npu_fused_infer_attention_score( - q_nope, k_nope, value, **common_kwargs) + query, k_nope, value, **common_kwargs) + + out_mask = attn_metadata.decode_meta.batch_seq_mask[:, None, + None].expand_as( + attn_out) + attn_out = torch.where(out_mask, 0, attn_out) - attn_out = attn_out.view(attn_out.shape[0], attn_out.shape[2], - attn_out.shape[3]) - attn_lse = attn_lse.view(attn_lse.shape[0], attn_lse.shape[1], 1) + lse_mask = attn_metadata.decode_meta.batch_seq_mask[:, None, + None].expand_as( + attn_lse) + attn_lse = torch.where(lse_mask, -torch.inf, attn_lse) attn_out_lse_list = [] # Concat out&lse: [bs,num_heads,v_head_dim] + [bs,num_heads,1] -> [bs,num_heads,v_head_dim+1] diff --git a/vllm_ascend/attention/mla_v1.py b/vllm_ascend/attention/mla_v1.py index faf032536b..6d5c13971a 100644 --- a/vllm_ascend/attention/mla_v1.py +++ b/vllm_ascend/attention/mla_v1.py @@ -1,11 +1,10 @@ from dataclasses import dataclass -from typing import (TYPE_CHECKING, ClassVar, NamedTuple, Optional, Tuple, Type, - TypeVar) +from typing import (TYPE_CHECKING, ClassVar, List, NamedTuple, Optional, Tuple, + Type, TypeVar) import numpy as np import torch import torch.distributed as dist -import torch.nn.functional as F import torch_npu from torch import nn from vllm.attention.backends.abstract import (AttentionBackend, @@ -111,6 +110,7 @@ class ChunkedContextMetadata: max_seq_lens: list[int] workspace: torch.Tensor chunk_seq_lens: torch.Tensor + chunk_seq_lens_npu: torch.Tensor attn_mask: torch.Tensor query_lens: torch.Tensor @@ -140,11 +140,8 @@ class AscendMLADecodeMetadata: attn_mask: Optional[torch.Tensor] = None sin: torch.Tensor = None cos: torch.Tensor = None - num_computed_tokens_of_pcp_dcp: Optional[list[Optional[list[Optional[ - list[int]]]]]] = None - seq_mask_pcp: torch.Tensor = None - seq_mask_dcp: torch.Tensor = None cp_seq_len: torch.Tensor = None + batch_seq_mask: torch.Tensor = None @dataclass @@ -264,9 +261,10 @@ def __init__(self, self.rope_dim = self.model_config.hf_text_config.qk_rope_head_dim self.cos_cache = None self.sin_cache = None + self.pcp_size = get_prefill_context_model_parallel_world_size( ) if prefill_context_parallel_enable() else 1 - self.cp_rank = get_prefill_context_model_parallel_rank( + self.pcp_rank = get_prefill_context_model_parallel_rank( ) if self.pcp_size > 1 else 0 self.dcp_size = get_decode_context_model_parallel_world_size() self.dcp_rank = get_decode_context_model_parallel_rank( @@ -274,6 +272,9 @@ def __init__(self, decode_max_num_seqs = getattr(scheduler_config, 'decode_max_num_seqs', 0) max_num_seqs = max(scheduler_config.max_num_seqs, decode_max_num_seqs) + self.batch_seq_mask_buf = torch.empty(max_num_seqs, + dtype=torch.uint8, + device=device) self.seq_mask_pcp_buf = torch.empty(max_num_seqs, self.pcp_size, dtype=torch.uint8, @@ -449,6 +450,7 @@ def build( seq_tot=chunk_seq_lens.sum(dim=1).tolist(), max_seq_lens=chunk_seq_lens.max(dim=1).values.tolist(), chunk_seq_lens=chunk_seq_lens, + chunk_seq_lens_npu=chunk_seq_lens.npu(), workspace=self.chunked_prefill_workspace, ) prefill_input_positions = input_positions[tokens_start:] @@ -490,36 +492,19 @@ def build( num_computed_tokens_of_cp_dcp_array = np.array( num_computed_tokens_of_pcp_dcp )[:num_decodes] # [bs, pcp_size, dcp_size] - seq_mask_pcp = torch.where( - torch.tensor( - num_computed_tokens_of_cp_dcp_array.sum(2)) == 0, 0, - 1).to(torch.uint8) - self.seq_mask_pcp_buf[:seq_mask_pcp.shape[0], :seq_mask_pcp. - shape[1]].copy_(seq_mask_pcp, - non_blocking=True) - seq_mask_pcp_shape = (seq_mask_pcp.shape[0], - seq_mask_pcp.shape[1]) - - seq_mask_dcp = torch.where( - torch.tensor( - num_computed_tokens_of_cp_dcp_array[:, - self.cp_rank, :]) - == 0, 0, 1).to(torch.uint8) - self.seq_mask_dcp_buf[:seq_mask_dcp.shape[0], :seq_mask_dcp. - shape[1]].copy_(seq_mask_dcp, - non_blocking=True) - seq_mask_dcp_shape = (seq_mask_dcp.shape[0], - seq_mask_dcp.shape[1]) cp_seq_len = num_computed_tokens_of_cp_dcp_array[:, - self.cp_rank, + self.pcp_rank, self.dcp_rank] cp_seq_len = torch.tensor(cp_seq_len, dtype=torch.int32) + batch_seq_mask = (cp_seq_len == 0) + self.batch_seq_mask_buf[:batch_seq_mask.shape[0]].copy_( + batch_seq_mask, non_blocking=True) + batch_seq_mask = self.batch_seq_mask_buf[:batch_seq_mask. + shape[0]] cp_seq_len = torch.where(cp_seq_len == 0, 1, cp_seq_len) else: - seq_mask_pcp_shape = (0, 0) - seq_mask_dcp_shape = (0, 0) - cp_seq_len = None + cp_seq_len, batch_seq_mask = None, None # TODO: After the fullgraph supports MTP, the if branch needs to deleted assert self.cos_cache is not None @@ -542,15 +527,8 @@ def build( actual_seq_lengths_q=actual_seq_lengths_q, sin=sin, cos=cos, - num_computed_tokens_of_pcp_dcp= - num_computed_tokens_of_pcp_dcp, - seq_mask_pcp=self. - seq_mask_pcp_buf[:seq_mask_pcp_shape[0], : - seq_mask_pcp_shape[1]], - seq_mask_dcp=self. - seq_mask_dcp_buf[:seq_mask_dcp_shape[0], : - seq_mask_dcp_shape[1]], - cp_seq_len=cp_seq_len) + cp_seq_len=cp_seq_len, + batch_seq_mask=batch_seq_mask) else: cos[:num_decode_tokens, ...] = self.cos_cache[input_positions].unsqueeze( @@ -569,15 +547,8 @@ def build( actual_seq_lengths_q=actual_seq_lengths_q, sin=sin[:num_decode_tokens, ...], cos=cos[:num_decode_tokens, ...], - num_computed_tokens_of_pcp_dcp= - num_computed_tokens_of_pcp_dcp, - seq_mask_pcp=self. - seq_mask_pcp_buf[:seq_mask_pcp_shape[0], : - seq_mask_pcp_shape[1]], - seq_mask_dcp=self. - seq_mask_dcp_buf[:seq_mask_dcp_shape[0], : - seq_mask_dcp_shape[1]], - cp_seq_len=cp_seq_len) + cp_seq_len=cp_seq_len, + batch_seq_mask=batch_seq_mask) return self.metadata_cls( # type: ignore num_actual_tokens_pcp_padded=num_actual_tokens_pcp_padded, @@ -919,7 +890,8 @@ def _compute_prefill_context( iters = len(prefill_metadata.chunked_context.seq_tot) - seq_len1 = torch.tensor(prefill_metadata.query_lens, dtype=torch.int32) + current_seq_len = torch.tensor(prefill_metadata.query_lens, + dtype=torch.int32) cache_kv_c = kv_c_and_k_pe_cache[0] cache_k_pe = kv_c_and_k_pe_cache[1] num_heads = cache_k_pe.size(2) @@ -927,8 +899,11 @@ def _compute_prefill_context( for i in range(iters): toks = prefill_metadata.chunked_context.seq_tot[i] - seq_len2 = prefill_metadata.chunked_context.chunk_seq_lens[i] - seq_len = torch.stack([seq_len1, seq_len2]) + context_seq_len = prefill_metadata.chunked_context.chunk_seq_lens[ + i] + context_seq_len_npu = prefill_metadata.chunked_context.chunk_seq_lens_npu[ + i] + seq_len = torch.stack([current_seq_len, context_seq_len]) kv_c_normed = torch.empty(toks, num_heads, latent_kv_dim, @@ -944,7 +919,7 @@ def _compute_prefill_context( cache_kv_c, cache_k_pe, prefill_metadata.block_table, - seq_len2.to(q_nope.device), + context_seq_len_npu, seq_starts=prefill_metadata.chunked_context.starts[i], key=kv_c_normed, value=k_pe, @@ -1664,8 +1639,6 @@ def _forward_decode_pcp_dcp( q_nope = q_nope.view(num_tokens, num_heads, -1) q_pe = q_pe.view(num_tokens, num_heads, -1) # use pcp & dcp split computed token nums from scheduler to compute actual seq_len and seq_mask - seq_mask_pcp = decode_meta.seq_mask_pcp - seq_mask_dcp = decode_meta.seq_mask_dcp seq_len = decode_meta.cp_seq_len common_kwargs = { @@ -1735,9 +1708,56 @@ def _forward_decode_pcp_dcp( output=attn_output, lse=softmax_lse) + # Update out&lse + attn_out_lse_list = self._process_attn_out_lse(attn_output, + softmax_lse, + decode_meta) + attn_output = self._npu_attention_update(attn_out_lse_list) + return self._v_up_proj(attn_output) + + def _npu_attention_update( + self, attn_out_lse_list: List[torch.Tensor]) -> torch.Tensor: + attn_out_split_cp = [] + attn_lse_split_cp = [] + + for attn_out_lse in attn_out_lse_list: + attn_out_allgather, attn_lse_allgather = self._out_lse_reshape( + *torch.split(attn_out_lse, [self.kv_lora_rank, 1], dim=-1)) + attn_out_split_cp.append(attn_out_allgather) + attn_lse_split_cp.append(attn_lse_allgather) + attn_out, _ = torch_npu.npu_attention_update(attn_lse_split_cp, + attn_out_split_cp, 0) + attn_out = attn_out.view(-1, attn_out_lse_list[0].shape[1], + self.kv_lora_rank) + return attn_out + + def _out_lse_reshape(self, attn_out: torch.Tensor, + attn_lse: torch.Tensor) -> torch.Tensor: + attn_out = attn_out.contiguous().view( + attn_out.shape[0] * attn_out.shape[1], attn_out.shape[2]) + attn_lse = attn_lse.contiguous().view( + attn_lse.shape[0] * attn_lse.shape[1] * attn_lse.shape[2]) + return attn_out, attn_lse + + def _process_attn_out_lse( + self, + attn_output: torch.Tensor, + softmax_lse: torch.Tensor, + decode_meta: AscendMLADecodeMetadata, + ) -> List[torch.Tensor]: + attn_out_lse_list = [] + out_mask = decode_meta.batch_seq_mask[:, None, + None].expand_as(attn_output) + attn_output = torch.where(out_mask, 0, attn_output) + lse_mask = decode_meta.batch_seq_mask[:, None, + None].expand_as(softmax_lse) + softmax_lse = torch.where(lse_mask, -torch.inf, softmax_lse) + + softmax_lse = softmax_lse.to(torch.float32) + attn_output = attn_output.to(torch.float32) + # Concat out&lse: [bs,num_heads,v_head_dim] + [bs,num_heads,1] -> [bs,num_heads,v_head_dim+1] + attn_out_lse = torch.cat([attn_output, softmax_lse], dim=-1) if self.dcp_size > 1: - # Concat out&lse: [bs,num_heads,v_head_dim] + [bs,num_heads,1] -> [bs,num_heads,v_head_dim+1] - attn_out_lse = torch.cat([attn_output, softmax_lse], dim=-1) # permute: [bs, num_heads, v_head_dim+1] -> [num_heads, v_head_dim+1, bs] attn_out_lse = attn_out_lse.permute([1, 2, 0]).contiguous() attn_out_lse_all2all = torch.empty_like(attn_out_lse) @@ -1746,24 +1766,12 @@ def _forward_decode_pcp_dcp( group=self.dcp_group) # permute: [num_heads, v_head_dim+1, bs] -> [bs, num_heads, v_head_dim+1] attn_out_lse_all2all = attn_out_lse_all2all.permute([2, 0, 1]) - attn_out_lse_split_on_seq = list( + if self.pcp_size > 1: + attn_out_lse = attn_out_lse_all2all.contiguous() + attn_out_lse_list = list( torch.chunk(attn_out_lse_all2all, self.dcp_size, dim=1)) - # Update out&lse - attn_out_g = None - attn_lse_g = None - for i, attn_out_lse_l in enumerate(attn_out_lse_split_on_seq): - attn_out_l, attn_lse_l = torch.split(attn_out_lse_l, - [self.kv_lora_rank, 1], - dim=-1) - attn_out_g, attn_lse_g = self._update_out_and_lse( - attn_out_g, attn_lse_g, attn_out_l, attn_lse_l, - seq_mask_dcp[:, i]) - attn_output = attn_out_g - softmax_lse = attn_lse_g if self.pcp_size > 1: - # Concat out&lse: [bs,num_heads,v_head_dim] + [bs,num_heads,1] -> [bs,num_heads,v_head_dim+1] - attn_out_lse = torch.cat([attn_output, softmax_lse], dim=-1) # AllGather out&lse within PCP group attn_out_lse_list = [ torch.empty_like(attn_out_lse) for _ in range(self.pcp_size) @@ -1771,45 +1779,12 @@ def _forward_decode_pcp_dcp( dist.all_gather(attn_out_lse_list, attn_out_lse, group=self.pcp_group) - # Update out&lse - attn_out_g = None - attn_lse_g = None - for i, attn_out_lse_l in enumerate(attn_out_lse_list): - attn_out_l, attn_lse_l = torch.split(attn_out_lse_l, - [self.kv_lora_rank, 1], - dim=-1) - attn_out_g, attn_lse_g = self._update_out_and_lse( - attn_out_g, attn_lse_g, attn_out_l, attn_lse_l, - seq_mask_pcp[:, i]) - attn_output = attn_out_g - return self._v_up_proj(attn_output) - - # TODO use update op to replace this - def _update_out_and_lse( - self, - out: torch.Tensor, - lse: torch.Tensor, - block_out: torch.Tensor, - block_lse: torch.Tensor, - mask: torch.Tensor = None, - ): - if out is None: - out = block_out.to(torch.float32) - lse = block_lse - else: - if mask is None: - mask = torch.ones([block_out.size(0)], - dtype=torch.uint8, - device=block_out.device) - out_mask = mask[:, None, None].expand_as(block_out) - lse_mask = mask[:, None, None].expand_as(block_lse) - block_out = block_out.to(torch.float32) - out_without_update = out.clone() - lse_without_update = lse.clone() - - out = out - F.sigmoid(block_lse - lse) * (out - block_out) - lse = lse - F.logsigmoid(lse - block_lse) - # mask - out = torch.where(out_mask, out, out_without_update) - lse = torch.where(lse_mask, lse, lse_without_update) - return out, lse + if self.dcp_size > 1 and self.pcp_size > 1: + attn_out_lse_list_pcp_dcp = [] + for s in attn_out_lse_list: + attn_out_lse_list_split = list( + torch.chunk(s, self.dcp_size, dim=1)) + attn_out_lse_list_pcp_dcp += attn_out_lse_list_split + attn_out_lse_list = attn_out_lse_list_pcp_dcp + + return attn_out_lse_list diff --git a/vllm_ascend/attention/utils.py b/vllm_ascend/attention/utils.py index 82eca81dc9..ccc845c655 100644 --- a/vllm_ascend/attention/utils.py +++ b/vllm_ascend/attention/utils.py @@ -16,9 +16,7 @@ class AscendPrefillContextParallelMetadata: num_actual_tokens_pcp_padded: Optional[int] = None - num_computed_tokens_of_pcp_dcp: Optional[ - list[Optional[list[Optional[list[int]]]]] - ] = None + num_computed_tokens_of_pcp_dcp: Optional[list[list[list[int]]]] = None q_head_idx_tensor: torch.Tensor = None diff --git a/vllm_ascend/compilation/acl_graph.py b/vllm_ascend/compilation/acl_graph.py index b929326790..5c65936e4c 100644 --- a/vllm_ascend/compilation/acl_graph.py +++ b/vllm_ascend/compilation/acl_graph.py @@ -215,8 +215,16 @@ def update_attn_params(update_stream, forward_context, runtime_shape): output, ) = param seq_lens = forward_context.attn_metadata[key].seq_lens - torch.npu.graph_task_update_begin(update_stream, handle) - torch_npu._npu_paged_attention( + + # When using FULL_DECODE_ONLY, there are some rare bugs for FULL_DECODE_ONLY + # mode with GQA. This is triggered by getting workspace for _npu_paged_attention + # in torch_npu. On some rare cases, _npu_paged_attention with smaller seq_lens + # might encounter a bigger workspace, while currently we use max_model_len to + # calculate max workspace in capturing. So additional get_workspace is added + # here to avoid such bugs. + # TODO(Angazenn): we will remove this once _npu_paged_attention is fully + # replaced by npu_fused_infer_attention_score which does not contain such bugs. + workspace = torch_npu._npu_paged_attention_get_workspace( query=query, key_cache=key_cache, value_cache=value_cache, @@ -225,8 +233,18 @@ def update_attn_params(update_stream, forward_context, runtime_shape): scale_value=scale, block_table=block_table, context_lens=seq_lens, - out=output, - workspace=graph_params.workspaces.get(runtime_shape)) + out=output) + torch.npu.graph_task_update_begin(update_stream, handle) + torch_npu._npu_paged_attention(query=query, + key_cache=key_cache, + value_cache=value_cache, + num_kv_heads=num_kv_heads, + num_heads=num_heads, + scale_value=scale, + block_table=block_table, + context_lens=seq_lens, + out=output, + workspace=workspace) torch.npu.graph_task_update_end(update_stream) event.record(update_stream) @@ -302,16 +320,28 @@ def update_attn_dcp_pcp_params(update_stream, forward_context, runtime_shape): graph_params.events[runtime_shape], ): (q_nope, k_nope, value, num_heads, num_kv_heads, scale, - block_table, block_size, actual_seq_lengths_kv, attn_output, - softmax_lse, pcp_rank, dcp_rank, dcp_size) = param - actual_seq_lengths_kv = forward_context.attn_metadata[ - key].decode_meta.num_computed_tokens_of_pcp_dcp[:, pcp_rank, - dcp_rank] + block_table, block_size, actual_seq_lengths_kv, + actual_seq_lengths_q, attn_output, softmax_lse, dcp_size, + pcp_rank, dcp_rank) = param + attn_metadata = forward_context.attn_metadata[key] + actual_seq_lengths_kv = attn_metadata.decode_meta.num_computed_tokens_of_pcp_dcp[:, + pcp_rank, + dcp_rank] pad_length = runtime_shape - len(actual_seq_lengths_kv) - pad_tensor = np.zeros(pad_length, - dtype=actual_seq_lengths_kv.dtype) - actual_seq_lengths_kv = np.concatenate( - [actual_seq_lengths_kv, pad_tensor]) + if pad_length > 0: + pad_tensor = np.zeros(pad_length, + dtype=actual_seq_lengths_kv.dtype) + actual_seq_lengths_kv = np.concatenate( + [actual_seq_lengths_kv, pad_tensor]) + + actual_seq_lengths_q = attn_metadata.actual_seq_lengths_q[: + attn_metadata + . + num_decode_tokens] + if (runtime_shape - len(actual_seq_lengths_q)): + actual_seq_lengths_q = actual_seq_lengths_q + [ + actual_seq_lengths_q[-1] + ] * (runtime_shape - len(actual_seq_lengths_q)) if dcp_size > 1: num_heads = num_heads * dcp_size @@ -323,7 +353,7 @@ def update_attn_dcp_pcp_params(update_stream, forward_context, runtime_shape): value, num_heads=num_heads, num_key_value_heads=num_kv_heads, - input_layout="BSND", + input_layout="TND", atten_mask=None, scale=scale, antiquant_mode=0, @@ -332,6 +362,7 @@ def update_attn_dcp_pcp_params(update_stream, forward_context, runtime_shape): block_table=block_table, block_size=block_size, actual_seq_lengths_kv=actual_seq_lengths_kv, + actual_seq_lengths=actual_seq_lengths_q, workspace=graph_params.workspaces.get(runtime_shape), out=[attn_output, softmax_lse]) torch.npu.graph_task_update_end(update_stream) diff --git a/vllm_ascend/distributed/llmdatadist_c_mgr_connector.py b/vllm_ascend/distributed/llmdatadist_c_mgr_connector.py index e72f4eba26..d92b724f1a 100644 --- a/vllm_ascend/distributed/llmdatadist_c_mgr_connector.py +++ b/vllm_ascend/distributed/llmdatadist_c_mgr_connector.py @@ -31,6 +31,7 @@ from vllm.v1.request import Request, RequestStatus import vllm_ascend.envs as envs_ascend +from vllm_ascend.distributed.utils import get_transfer_timeout_value from vllm_ascend.utils import (AscendSocVersion, get_ascend_soc_version, prefill_context_parallel_enable, vllm_version_is) @@ -438,7 +439,7 @@ def init_llm_datadist(self): assert self.local_agent_metadata is not None llm_config = LLMConfig() llm_config.device_id = self.local_rank - llm_config.sync_kv_timeout = 20000 + llm_config.sync_kv_timeout = get_transfer_timeout_value() llm_config.enable_switch_role = True llm_config.enable_cache_manager = True llm_config.enable_remote_cache_accessible = True diff --git a/vllm_ascend/distributed/mooncake/config_data.py b/vllm_ascend/distributed/mooncake/config_data.py index 745d91131f..36c820b089 100644 --- a/vllm_ascend/distributed/mooncake/config_data.py +++ b/vllm_ascend/distributed/mooncake/config_data.py @@ -2,6 +2,7 @@ import hashlib import json import os +import re from dataclasses import dataclass from typing import Iterable, List, Optional, Tuple, Union @@ -11,6 +12,9 @@ from vllm.utils import cdiv, logger from vllm.v1.core.sched.output import NewRequestData +DEFAULT_GLOBAL_SEGMENT_SIZE = 3355443200 # 3.125 GiB +DEFAULT_LOCAL_BUFFER_SIZE = 1073741824 # 1.0 GiB + @dataclass class MooncakeEngineMetadata: @@ -419,7 +423,7 @@ class LasyerMultiBlockReqMeta: class MooncakeStoreConfig: local_hostname: str metadata_server: str - global_segment_size: int + global_segment_size: Union[int, str] local_buffer_size: int protocol: str device_name: str @@ -433,8 +437,11 @@ def from_file(file_path: str) -> "MooncakeStoreConfig": return MooncakeStoreConfig( local_hostname=config.get("local_hostname"), metadata_server=config.get("metadata_server"), - global_segment_size=config.get("global_segment_size", 3355443200), - local_buffer_size=config.get("local_buffer_size", 1073741824), + global_segment_size=_parse_global_segment_size( + config.get("global_segment_size", + DEFAULT_GLOBAL_SEGMENT_SIZE)), + local_buffer_size=(config.get("local_buffer_size", + DEFAULT_LOCAL_BUFFER_SIZE)), protocol=config.get("protocol", "tcp"), device_name=config.get("device_name", ""), master_server_address=config.get("master_server_address"), @@ -446,4 +453,81 @@ def load_from_env() -> "MooncakeStoreConfig": if not config_path: raise ValueError( "The environment variable 'MOONCAKE_CONFIG_PATH' is not set.") - return MooncakeStoreConfig.from_file(config_path) \ No newline at end of file + return MooncakeStoreConfig.from_file(config_path) + + +def _parse_global_segment_size(value) -> int: + """ + Parse storage size strings with support for units: GB, MB, KB, B + + Args: + value: Input value (int, str, or other convertible types) + + Returns: + int: Size in bytes + + Raises: + ValueError: For invalid format, missing number, or negative values + TypeError: For unsupported input types + """ + + if isinstance(value, int): + return value + elif not isinstance(value, str): + try: + return int(value) + except (TypeError, ValueError) as e: + raise TypeError( + f"Unsupported type for global_segment_size: {type(value)}" + ) from e + + cleaned_input = value.strip().lower() + if not cleaned_input: + raise ValueError("global segment size cannot be empty.") + + UNIT_MULTIPLIERS = { + 'gb': 1024**3, # 1 GB = 1024^3 bytes + 'mb': 1024**2, # 1 MB = 1024^2 bytes + 'kb': 1024, # 1 KB = 1024 bytes + 'b': 1 # 1 B = 1 byte + } + pattern = r'^\s*([\d.]+)\s*(gb|mb|kb|b)?\s*$' + match = re.match(pattern, cleaned_input) + + if not match: + raise ValueError(f"Invalid format: '{value}'") + + number_str = match.group(1) + unit = match.group(2) or 'b' + + multiplier = UNIT_MULTIPLIERS[unit] + return _convert_to_bytes(number_str, multiplier, value) + + +def _convert_to_bytes(number_str: str, multiplier: int, + original_input: str) -> int: + """ + Convert numeric string to byte count + + Args: + number_str: Numeric portion of input + multiplier: Unit conversion factor + original_input: Original input string (for error messages) + + Returns: + int: Byte count + + Raises: + ValueError: For invalid numbers or negative results + """ + try: + numeric_value = float(number_str) + except ValueError: + raise ValueError( + f"Invalid numeric value '{number_str}' in: '{original_input}'") + # Calculate byte count + try: + byte_count = int(numeric_value * multiplier) + except OverflowError: + raise ValueError(f"Storage size too large: '{original_input}'") + return byte_count diff --git a/vllm_ascend/distributed/mooncake_connector.py b/vllm_ascend/distributed/mooncake_connector.py index 5dfb125ef4..7951760d1d 100644 --- a/vllm_ascend/distributed/mooncake_connector.py +++ b/vllm_ascend/distributed/mooncake_connector.py @@ -2,6 +2,7 @@ import contextlib import hashlib import math +import os import queue import random import struct @@ -33,6 +34,7 @@ import vllm_ascend.envs as envs_ascend from vllm_ascend.ascend_config import get_ascend_config, init_ascend_config from vllm_ascend.distributed.mooncake.transfer_engine import get_global_te +from vllm_ascend.distributed.utils import get_transfer_timeout_value from vllm_ascend.utils import vllm_version_is if vllm_version_is("0.11.0"): @@ -855,6 +857,8 @@ class MooncakeConnectorWorker: def __init__(self, vllm_config: VllmConfig, engine_id: str): self._get_prefill_decode_size(vllm_config) + os.environ["ASCEND_TRANSFER_TIMEOUT"] = str( + get_transfer_timeout_value()) if self._prefill_tp_size < self._decode_tp_size: raise ValueError( f"prefill_tp_size: {self._prefill_tp_size} must be greater than" diff --git a/vllm_ascend/distributed/mooncake_layerwise_connector.py b/vllm_ascend/distributed/mooncake_layerwise_connector.py index 874adb3edd..1c5c0a9260 100644 --- a/vllm_ascend/distributed/mooncake_layerwise_connector.py +++ b/vllm_ascend/distributed/mooncake_layerwise_connector.py @@ -3,6 +3,7 @@ import copy import hashlib import math +import os import queue import struct import threading @@ -18,6 +19,7 @@ import numpy as np import numpy.typing as npt import torch +import torch_npu import zmq from mooncake.engine import TransferEngine # type: ignore from vllm.config import VllmConfig @@ -31,6 +33,7 @@ import vllm_ascend.envs as envs_ascend from vllm_ascend.ascend_config import get_ascend_config from vllm_ascend.distributed.utils import (align_memory, + get_transfer_timeout_value, kv_alltoall_and_rearrange) from vllm_ascend.utils import vllm_version_is @@ -91,6 +94,8 @@ def __init__(self, self.total_layers = total_layers self.use_mla = use_mla self.block_len = block_len + self.model_stream = torch_npu.npu.current_stream() + self.current_layer = -1 if self.pd_head_ratio > 1: # regesit kv buffer for tp inequal @@ -190,7 +195,9 @@ def _transfer_kv_cache(self, req_id, req_meta, layer_index, key, value): src_list.append(src) dst_list.append(dst) length_list.append(length) - torch.npu.synchronize() + if self.current_layer != layer_index: + self.current_layer = layer_index + self.model_stream.synchronize() ret = self.engine.batch_transfer_sync_write( session_id, src_list, dst_list, length_list) if ret < 0: @@ -241,7 +248,7 @@ def _transfer_kv_cache(self, req_id, req_meta, layer_index, key, value): ((self.tp_rank // self.num_head_replica) % self.pd_head_ratio)) src_layer_addr += length - torch.npu.synchronize() + self.model_stream.synchronize() ret = self.engine.batch_transfer_sync_write( session_id, src_list, dst_list, length_list) if ret < 0: @@ -602,6 +609,8 @@ class MooncakeLayerwiseConnectorWorker: def __init__(self, vllm_config: VllmConfig, engine_id: str): self._get_prefill_decode_size(vllm_config) + os.environ["ASCEND_TRANSFER_TIMEOUT"] = str( + get_transfer_timeout_value()) if self._prefill_tp_size < self._decode_tp_size: raise ValueError( f"prefill_tp_size: {self._prefill_tp_size} must be greater than" diff --git a/vllm_ascend/distributed/utils.py b/vllm_ascend/distributed/utils.py index 4b1344a16e..c25c1f15f2 100644 --- a/vllm_ascend/distributed/utils.py +++ b/vllm_ascend/distributed/utils.py @@ -1,3 +1,5 @@ +import os + import torch import torch.distributed as dist @@ -45,3 +47,15 @@ def align_memory(tensor: torch.Tensor, alignment: int) -> torch.Tensor: aligned_addr = (data_ptr + alignment - 1) // alignment * alignment offset = (aligned_addr - data_ptr) // tensor.element_size() return tensor[int(offset):] + + +def get_transfer_timeout_value(): + ascend_transfer_timeout = os.getenv("ASCEND_TRANSFER_TIMEOUT", "") + if len(ascend_transfer_timeout) > 0: + return int(ascend_transfer_timeout) + hccl_rdma_timeout = int(os.getenv('HCCL_RDMA_TIMEOUT', + '20')) # type: ignore + hccl_rdma_retry_cnt = int(os.getenv('HCCL_RDMA_RETRY_CNT', + '7')) # type: ignore + return int((4.096 * (2**hccl_rdma_timeout)) * hccl_rdma_retry_cnt // 1000 + + 3000) diff --git a/vllm_ascend/envs.py b/vllm_ascend/envs.py index 8f9e1d9899..a6b4081a79 100644 --- a/vllm_ascend/envs.py +++ b/vllm_ascend/envs.py @@ -63,7 +63,7 @@ "ASCEND_HOME_PATH": lambda: os.getenv("ASCEND_HOME_PATH", None), # The path for HCCL library, it's used by pyhccl communicator backend. If - # not set, the default value is libhccl.so。 + # not set, the default value is libhccl.so. "HCCL_SO_PATH": lambda: os.environ.get("HCCL_SO_PATH", None), # The version of vllm is installed. This value is used for developers who diff --git a/vllm_ascend/models/__init__.py b/vllm_ascend/models/__init__.py index 21ea48e3ab..956df2eb31 100644 --- a/vllm_ascend/models/__init__.py +++ b/vllm_ascend/models/__init__.py @@ -35,6 +35,10 @@ def register_model(): "PanguProMoEForCausalLM", "vllm_ascend.torchair.models.torchair_pangu_moe:PanguProMoEForCausalLM" ) + ModelRegistry.register_model( "Qwen3NextForCausalLM", "vllm_ascend.models.qwen3_next:CustomQwen3NextForCausalLM") + + ModelRegistry.register_model( + "Qwen3NextMTP", "vllm_ascend.models.qwen3_next_mtp:CustomQwen3NextMTP") diff --git a/vllm_ascend/models/qwen3_next.py b/vllm_ascend/models/qwen3_next.py index f5b4b8a142..b0bfde0eb6 100644 --- a/vllm_ascend/models/qwen3_next.py +++ b/vllm_ascend/models/qwen3_next.py @@ -260,6 +260,24 @@ def _forward( mixed_qkv_spec = None mixed_qkv_non_spec = mixed_qkv + # 2.1: process the mutli-query part + if spec_sequence_masks is not None: + mixed_qkv_spec = mixed_qkv_spec.view( + attn_metadata.num_spec_decodes, -1, mixed_qkv_spec.size(-1)) + mixed_qkv_spec = rearrange(mixed_qkv_spec, 'b l d -> b d l') + mixed_qkv_spec = causal_conv1d_update( + mixed_qkv_spec, + conv_state, + conv_weights, + self.conv1d.bias, + self.activation, + conv_state_indices=spec_state_indices_tensor[:, 0] + [:attn_metadata.num_spec_decodes], + num_accepted_tokens=num_accepted_tokens, + validate_data=False, + ) + mixed_qkv_spec = rearrange(mixed_qkv_spec, 'b d l -> (b l) d') + # 2.2: process the remaining part if attn_metadata.num_prefills > 0: # - "cache_indices" updates the conv_state cache in positions diff --git a/vllm_ascend/models/qwen3_next_mtp.py b/vllm_ascend/models/qwen3_next_mtp.py new file mode 100644 index 0000000000..c17d969cb2 --- /dev/null +++ b/vllm_ascend/models/qwen3_next_mtp.py @@ -0,0 +1,109 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +"""Inference-only Qwen3Next MTP model.""" +import torch +from vllm.compilation.decorators import support_torch_compile +from vllm.config import VllmConfig +from vllm.model_executor.layers.linear import ColumnParallelLinear +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.vocab_parallel_embedding import ( + DEFAULT_VOCAB_PADDING_SIZE, ParallelLMHead, VocabParallelEmbedding) +from vllm.model_executor.models.interfaces import SupportsPP +from vllm.model_executor.models.qwen3_next_mtp import ( + Qwen3NextMTP, Qwen3NextMultiTokenPredictor) +from vllm.model_executor.models.utils import ( + make_empty_intermediate_tensors_factory, maybe_prefix) +from vllm.transformers_utils.configs import Qwen3NextConfig + +from vllm_ascend.models.qwen3_next import (CustomQwen3NextDecoderLayer, + Qwen3NextRMSNorm) + + +@support_torch_compile +class CustomQwen3NextMultiTokenPredictor(Qwen3NextMultiTokenPredictor): + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + super(Qwen3NextMultiTokenPredictor, self).__init__() + + model_config = vllm_config.model_config + quant_config = vllm_config.quant_config + lora_config = vllm_config.lora_config + config: Qwen3NextConfig = model_config.hf_config + + self.config = config + lora_vocab = ((lora_config.lora_extra_vocab_size * + (lora_config.max_loras or 1)) if lora_config else 0) + self.vocab_size = config.vocab_size + lora_vocab + self.org_vocab_size = config.vocab_size + + self.mtp_start_layer_idx = config.num_hidden_layers + self.num_mtp_layers = getattr(config, "num_nextn_predict_layers", 1) + + self.embed_tokens = VocabParallelEmbedding( + self.vocab_size, + config.hidden_size, + org_num_embeddings=config.vocab_size, + ) + + self.fc = ColumnParallelLinear(self.config.hidden_size * 2, + self.config.hidden_size, + gather_output=True, + bias=False, + return_bias=False, + quant_config=quant_config, + prefix=f'{prefix}.fc') + + # use old version mtp layer name to avoid a exception in vllm + self.layers = torch.nn.ModuleList( + CustomQwen3NextDecoderLayer( + vllm_config, + layer_type="full_attention", + prefix=f'{prefix}.layers.{self.mtp_start_layer_idx + idx}', + ) for idx in range(self.num_mtp_layers)) + + self.make_empty_intermediate_tensors = ( + make_empty_intermediate_tensors_factory( + ["hidden_states", "residual"], config.hidden_size)) + + self.norm = Qwen3NextRMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + self.pre_fc_norm_hidden = Qwen3NextRMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + self.pre_fc_norm_embedding = Qwen3NextRMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + + +@support_torch_compile +class CustomQwen3NextMTP(Qwen3NextMTP, SupportsPP): + packed_modules_mapping = { + "qkv_proj": [ + "q_proj", + "k_proj", + "v_proj", + ], + "gate_up_proj": ["up_proj", "down_proj"] + } + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + config = vllm_config.model_config.hf_config + self.vllm_config = vllm_config + cache_config = vllm_config.cache_config + assert not cache_config.enable_prefix_caching, \ + "Qwen3NextMTP currently does not support prefix caching" + + self.quant_config = vllm_config.quant_config + + super(Qwen3NextMTP, self).__init__() + self.config = config + self.model = CustomQwen3NextMultiTokenPredictor( + vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")) + self.unpadded_vocab_size = config.vocab_size + self.lm_head = ParallelLMHead(self.unpadded_vocab_size, + config.hidden_size, + org_num_embeddings=config.vocab_size, + padding_size=DEFAULT_VOCAB_PADDING_SIZE, + prefix=maybe_prefix(prefix, "lm_head")) + self.logits_processor = LogitsProcessor(self.unpadded_vocab_size, + config.vocab_size) + self.make_empty_intermediate_tensors = ( + self.model.make_empty_intermediate_tensors) diff --git a/vllm_ascend/ops/casual_conv1d.py b/vllm_ascend/ops/casual_conv1d.py index 2d008899ad..7ddc9cecca 100644 --- a/vllm_ascend/ops/casual_conv1d.py +++ b/vllm_ascend/ops/casual_conv1d.py @@ -55,7 +55,7 @@ def causal_conv1d_ref( final_states = F.pad(x, (width - 1 - x.shape[-1], 0)).to( dtype_in) # (batch, dim, width - 1) if final_states_out is not None: - final_states_out.copy_(final_states) + final_states_out[..., :(width - 1)].copy_(final_states) else: final_states_out = final_states out = (out if activation is None else F.silu(out)).to(dtype=dtype_in) diff --git a/vllm_ascend/ops/fused_moe/prepare_finalize.py b/vllm_ascend/ops/fused_moe/prepare_finalize.py index f158a4bfca..f54d4579ca 100644 --- a/vllm_ascend/ops/fused_moe/prepare_finalize.py +++ b/vllm_ascend/ops/fused_moe/prepare_finalize.py @@ -29,7 +29,10 @@ from vllm.forward_context import get_forward_context from vllm.model_executor.layers.fused_moe import FusedMoEConfig -from vllm_ascend.utils import enable_sp +from vllm_ascend.utils import enable_sp, prefill_context_parallel_enable + +if prefill_context_parallel_enable(): + from vllm.distributed import get_pcp_group class QuantType(Enum): @@ -382,6 +385,17 @@ def _prepare_with_dp_group( hidden_states, 0) router_logits = self.moe_config.dp_group.all_gather( router_logits, 0) + + if prefill_context_parallel_enable() and self.moe_config.pcp_size > 1: + hidden_states = get_pcp_group().all_gather( + hidden_states, + dim=0, + ) + router_logits = get_pcp_group().all_gather( + router_logits, + dim=0, + ) + return hidden_states, router_logits, None, None def finalize(self, @@ -431,6 +445,9 @@ def _finalize_with_dp_group(self, hidden_states: torch.Tensor, hidden_states = get_dp_group().reduce_scatter(hidden_states, 0) hidden_states = hidden_states[:self.num_tokens] + if prefill_context_parallel_enable() and self.moe_config.pcp_size > 1: + hidden_states = get_pcp_group().reduce_scatter(hidden_states, + dim=0) if reduce_results and (self.moe_config.tp_size > 1 or self.moe_config.ep_size > 1): hidden_states = tensor_model_parallel_all_reduce(hidden_states) @@ -504,6 +521,16 @@ def prepare( router_logits = self._naive_multicast(router_logits, self.cu_tokens_across_dp_cpu) + if prefill_context_parallel_enable() and self.moe_config.pcp_size > 1: + hidden_states = get_pcp_group().all_gather( + hidden_states, + dim=0, + ) + router_logits = get_pcp_group().all_gather( + router_logits, + dim=0, + ) + return hidden_states, router_logits, None, None def finalize(self, @@ -528,6 +555,10 @@ def finalize(self, hidden_states) # Sum across DP hidden_states = hidden_states[start:end, :] + if prefill_context_parallel_enable() and self.moe_config.pcp_size > 1: + hidden_states = get_pcp_group().reduce_scatter(hidden_states, + dim=0) + if reduce_results and (self.moe_config.tp_size > 1 or self.moe_config.ep_size > 1): hidden_states = tensor_model_parallel_all_reduce(hidden_states) diff --git a/vllm_ascend/ops/fused_moe/token_dispatcher.py b/vllm_ascend/ops/fused_moe/token_dispatcher.py index 077163c54a..c6bdfe4d1c 100644 --- a/vllm_ascend/ops/fused_moe/token_dispatcher.py +++ b/vllm_ascend/ops/fused_moe/token_dispatcher.py @@ -225,7 +225,7 @@ def token_dispatch(self, "expand_scales": expand_scales } - group_list_type = 1 if dynamic_eplb else 0 + group_list_type = 0 return { "group_list_type": group_list_type, diff --git a/vllm_ascend/patch/platform/patch_mamba_config.py b/vllm_ascend/patch/platform/patch_mamba_config.py index ad083f51c9..1b077b4135 100644 --- a/vllm_ascend/patch/platform/patch_mamba_config.py +++ b/vllm_ascend/patch/platform/patch_mamba_config.py @@ -58,7 +58,7 @@ def verify_and_update_config(cls, vllm_config) -> None: block_size=model_config.max_model_len, ).page_size_bytes - block_alignment_bytes = 64 + block_alignment_bytes = 128 # some attention backends (e.g. FA) only support setting # block size to multiple of 16, so let's suggest a value diff --git a/vllm_ascend/spec_decode/__init__.py b/vllm_ascend/spec_decode/__init__.py index 886957491d..b0b75f396c 100644 --- a/vllm_ascend/spec_decode/__init__.py +++ b/vllm_ascend/spec_decode/__init__.py @@ -28,9 +28,9 @@ def get_spec_decode_method( ): if method == "ngram": return NgramProposer(vllm_config, device, runner) - elif method in ["eagle", "eagle3"]: + elif method in ("eagle", "eagle3"): return EagleProposer(vllm_config, device, runner) - elif method == "deepseek_mtp": + elif method in ('deepseek_mtp', 'qwen3_next_mtp'): if is_torchair_graph: return TorchairMtpProposer(vllm_config, device, runner) return MtpProposer(vllm_config, device, runner) diff --git a/vllm_ascend/spec_decode/mtp_proposer.py b/vllm_ascend/spec_decode/mtp_proposer.py index 9f6d787471..2d4e239e27 100644 --- a/vllm_ascend/spec_decode/mtp_proposer.py +++ b/vllm_ascend/spec_decode/mtp_proposer.py @@ -1,3 +1,4 @@ +import importlib from typing import Optional import numpy as np @@ -12,7 +13,6 @@ from vllm.model_executor.model_loader import get_model_loader from vllm.model_executor.model_loader.utils import \ process_weights_after_loading -from vllm.model_executor.models.deepseek_mtp import DeepSeekMTP from vllm.model_executor.models.deepseek_v2 import DeepseekV32IndexerCache from vllm.model_executor.models.llama_eagle3 import Eagle3LlamaForCausalLM from vllm.utils import cdiv @@ -42,6 +42,26 @@ PADDING_SLOT_ID = -1 +_MTP_MODELS = { + "DeepseekV3ForCausalLM": + ("vllm.model_executor.models.deepseek_mtp", "DeepSeekMTP"), + "Qwen3NextForCausalLM": + ("vllm_ascend.models.qwen3_next_mtp", "CustomQwen3NextMTP") +} + +_DEFAULT_FIRST_LAYER = 'model.layers.0.self_attn.attn' + +_FIRST_LAYERS = {"Qwen3NextForCausalLM": 'model.layers.3.self_attn.attn'} + + +def _load_model(architecture): + if architecture not in _MTP_MODELS: + raise ValueError("Invalid architecture for mtp.") + module_name, model_name = _MTP_MODELS[architecture] + module = importlib.import_module(module_name) + model = getattr(module, model_name) + return model + class MtpProposer(Proposer): @@ -150,9 +170,7 @@ def load_model(self, model) -> None: with set_default_torch_dtype( draft_model_config.dtype), set_current_vllm_config( self.vllm_config): - self.model = DeepSeekMTP( - vllm_config=self.vllm_config).to(target_device) - + self._init_mtp_model() draft_attn_layer_names = (get_layers_from_vllm_config( self.vllm_config, AttentionLayerBase).keys() - target_attn_layer_names) @@ -228,8 +246,7 @@ def generate_token_ids(self, attn_metadata=None, aux_hidden_states: torch.Tensor = None): common_attn_metadata = self.runner.spec_decode_common_attn_metadata - if attn_metadata is not None and isinstance(attn_metadata, dict): - attn_metadata = attn_metadata['model.layers.0.self_attn.attn'] + attn_metadata = self._get_attn_metadata(attn_metadata) if self.speculative_config.disable_padded_drafter_batch: # When padded-batch is disabled, the sampled_token_ids should be @@ -311,6 +328,20 @@ def generate_token_ids(self, return draft_token_ids + def _init_mtp_model(self): + architecture = self.vllm_config.model_config.architecture + target_device = self.vllm_config.device_config.device + model = _load_model(architecture) + self.model = model(vllm_config=self.vllm_config).to(target_device) + + def _get_attn_metadata(self, attn_metadata): + if attn_metadata is not None and isinstance(attn_metadata, dict): + architecture = self.vllm_config.model_config.architecture + layer_name = _FIRST_LAYERS.get(architecture, _DEFAULT_FIRST_LAYER) + attn_metadata = attn_metadata[layer_name] + + return attn_metadata + def _prepare_inputs( self, common_attn_metadata: CommonAttentionMetadata, diff --git a/vllm_ascend/torchair/torchair_mla.py b/vllm_ascend/torchair/torchair_mla.py index ce539b7d68..51becad900 100644 --- a/vllm_ascend/torchair/torchair_mla.py +++ b/vllm_ascend/torchair/torchair_mla.py @@ -69,6 +69,7 @@ class TorchairChunkedContextMetadata: max_seq_lens: list[int] workspace: torch.Tensor chunk_seq_lens: torch.Tensor + chunk_seq_lens_npu: torch.Tensor attn_mask: torch.Tensor query_lens: torch.Tensor @@ -447,6 +448,7 @@ def build( seq_tot=chunk_seq_lens.sum(dim=1).tolist(), max_seq_lens=chunk_seq_lens.max(dim=1).values.tolist(), chunk_seq_lens=chunk_seq_lens, + chunk_seq_lens_npu=chunk_seq_lens.npu(), workspace=self.chunked_prefill_workspace, ) prefill_input_positions = input_positions[tokens_start:] @@ -760,7 +762,8 @@ def _compute_prefill_context( q_pe = query[..., self.qk_nope_head_dim:] q_nope = query[..., :self.qk_nope_head_dim] - seq_len1 = torch.tensor(prefill_metadata.query_lens, dtype=torch.int32) + current_seq_len = torch.tensor(prefill_metadata.query_lens, + dtype=torch.int32) cache_kv_c = kv_c_and_k_pe_cache[0] cache_k_pe = kv_c_and_k_pe_cache[1] num_heads = cache_k_pe.size(2) @@ -768,8 +771,11 @@ def _compute_prefill_context( for i in range(iters): toks = prefill_metadata.chunked_context.seq_tot[i] - seq_len2 = prefill_metadata.chunked_context.chunk_seq_lens[i] - seq_len = torch.stack([seq_len1, seq_len2]) + context_seq_len = prefill_metadata.chunked_context.chunk_seq_lens[ + i] + context_seq_len_npu = prefill_metadata.chunked_context.chunk_seq_lens_npu[ + i] + seq_len = torch.stack([current_seq_len, context_seq_len]) kv_c_normed = torch.empty(toks, num_heads, latent_kv_dim, @@ -785,7 +791,7 @@ def _compute_prefill_context( cache_kv_c, cache_k_pe, prefill_metadata.block_table, - seq_len2.to(query.device), + context_seq_len_npu, seq_starts=prefill_metadata.chunked_context.starts[i], key=kv_c_normed, value=k_pe, diff --git a/vllm_ascend/utils.py b/vllm_ascend/utils.py index e1afd24a08..46e80606fd 100644 --- a/vllm_ascend/utils.py +++ b/vllm_ascend/utils.py @@ -659,6 +659,17 @@ def enable_sp(vllm_config=None) -> bool: # We retain the env VLLM_ASCEND_ENABLE_FLASHCOMM here for backward compatibility. or bool(int(os.getenv("VLLM_ASCEND_ENABLE_FLASHCOMM", '0')))) + if not _ENABLE_SP: + return _ENABLE_SP + + assert vllm_config.parallel_config.tensor_parallel_size > 1, \ + "Flash Comm v1 (Sequence Parallelism) is only supported when tp_size > 1." + + assert ( + not is_moe_model(vllm_config) + or vllm_config.parallel_config.enable_expert_parallel + ), "Flash Comm v1 (Sequence Parallelism) requires enable_expert_parallel=True for MoE models." + return _ENABLE_SP diff --git a/vllm_ascend/worker/model_runner_v1.py b/vllm_ascend/worker/model_runner_v1.py index 0e12aa1049..41f63de598 100644 --- a/vllm_ascend/worker/model_runner_v1.py +++ b/vllm_ascend/worker/model_runner_v1.py @@ -1471,12 +1471,27 @@ def _prepare_inputs( arange, ) +<<<<<<< HEAD self.input_batch.block_table.compute_slot_mapping(req_indices, positions_np) self.input_batch.block_table.commit_slot_mapping(total_num_scheduled_tokens) tokens, position_pcp, pcp_unpad_mask = self._update_tokens_for_pcp(tokens) num_scheduled_tokens = np.array(tokens, dtype=np.int32) # update total_num_scheduled_tokens total_num_scheduled_tokens = sum(num_scheduled_tokens[:num_reqs]) +======= + self.input_batch.block_table.compute_slot_mapping( + req_indices, positions_np) + self.input_batch.block_table.commit_slot_mapping( + total_num_scheduled_tokens) + if self.pcp_size > 1: + tokens, position_pcp, pcp_unpad_mask = self._update_tokens_for_pcp( + tokens) + num_scheduled_tokens = np.array(tokens, dtype=np.int32) + total_num_scheduled_tokens = sum(num_scheduled_tokens[:num_reqs]) + else: + position_pcp, pcp_unpad_mask = None, None + self.num_pcp_pads = self.num_pcp_pads[:num_reqs] +>>>>>>> main total_num_pcp_pads = sum(self.num_pcp_pads) max_num_scheduled_tokens = max(tokens) @@ -1789,12 +1804,24 @@ def _prepare_inputs( # We will ignore the sampled tokens from the partial requests. # TODO: Support prompt logprobs. spec_decode_metadata = None +<<<<<<< HEAD logits_indices = ( torch.from_numpy(cu_num_tokens) * self.pcp_size - self.num_pcp_pads[:num_reqs] - 1 ) logits_indices = logits_indices.to(self.device, non_blocking=True) +======= + if self.pcp_size * self.dcp_size > 1: + logits_indices = torch.from_numpy( + cu_num_tokens + ) * self.pcp_size - self.num_pcp_pads[:num_reqs] - 1 + logits_indices = logits_indices.to(self.device, + non_blocking=True) + else: + logits_indices = torch.from_numpy(cu_num_tokens - 1).to( + self.device, non_blocking=True) +>>>>>>> main else: # pcp not supported now assert self.pcp_size == 1 @@ -1931,8 +1958,15 @@ def _prepare_inputs( ): if use_spec_decode: extra_attn_metadata_args = dict( +<<<<<<< HEAD num_accepted_tokens=self.num_accepted_tokens.gpu[:num_reqs], num_draft_tokens=self.num_draft_tokens.gpu[:num_reqs], +======= + num_accepted_tokens=self.num_accepted_tokens. + gpu[:num_reqs], + num_decode_draft_tokens_cpu=self.num_draft_tokens. + gpu[:num_reqs], +>>>>>>> main ) attn_metadata_i = builder.build( common_prefix_len=common_prefix_len, @@ -2057,6 +2091,7 @@ def _build_attn_state(self, num_reqs, num_scheduled_tokens, num_valid_tokens): attn_state = AscendAttentionState.SpecDecoding # Speculative decoding. elif np.all(num_valid_tokens == 1): +<<<<<<< HEAD if self.drafter and ( self.drafter.name == SpecDcodeType.EAGLE or self.drafter.name == SpecDcodeType.EAGLE3 @@ -2064,7 +2099,12 @@ def _build_attn_state(self, num_reqs, num_scheduled_tokens, num_valid_tokens): ): attn_state = AscendAttentionState.ChunkedPrefill else: +======= + if self.speculative_config and self.speculative_config.method == 'deepseek_mtp': +>>>>>>> main attn_state = AscendAttentionState.SpecDecoding + else: + attn_state = AscendAttentionState.ChunkedPrefill # splitfuse elif ( not ascend_config.ascend_scheduler_config.enabled @@ -2715,11 +2755,17 @@ def propose_draft_token_ids(sampled_token_ids): with ProfileExecuteDuration().capture_async("Draft"): if self.speculative_config: +<<<<<<< HEAD use_padded_batch_for_eagle = ( self.speculative_config and self.speculative_config.method == "deepseek_mtp" and not self.speculative_config.disable_padded_drafter_batch ) +======= + use_padded_batch_for_eagle = self.speculative_config and \ + self.speculative_config.method in ("deepseek_mtp", "qwen3_next_mtp") and \ + not self.speculative_config.disable_padded_drafter_batch +>>>>>>> main if use_padded_batch_for_eagle: # EAGLE speculative decoding can use the GPU sampled tokens # as inputs, and does not need to wait for bookkeeping to finish. @@ -4549,8 +4595,6 @@ def _build_drafter_prepare_inputs_torchair_param(self): def _update_tokens_for_pcp(self, tokens): num_reqs = self.input_batch.num_reqs self.num_pcp_pads = self.num_pcp_pads[:num_reqs] - if not self.pcp_size > 1: - return tokens, None, None tokens = np.array(tokens, dtype=np.int32) num_decode_reqs = sum( self.input_batch.num_computed_tokens_cpu[:num_reqs] diff --git a/vllm_ascend/worker/worker_v1.py b/vllm_ascend/worker/worker_v1.py index e8729925fa..a90883cdcb 100644 --- a/vllm_ascend/worker/worker_v1.py +++ b/vllm_ascend/worker/worker_v1.py @@ -47,7 +47,7 @@ from vllm_ascend.device_allocator.camem import CaMemAllocator from vllm_ascend.distributed.parallel_state import init_ascend_model_parallel from vllm_ascend.platform import NPUPlatform -from vllm_ascend.utils import (init_ascend_soc_version, +from vllm_ascend.utils import (init_ascend_soc_version, is_enable_nz, prefill_context_parallel_enable, register_ascend_customop, sleep_mode_enabled, try_register_lib, vllm_version_is) @@ -184,6 +184,11 @@ def wake_up(self, tags: Optional[list[str]] = None) -> None: raise ValueError( "Sleep mode is not enabled. Please compile vllm-ascend with COMPILE_CUSTOM_KERNELS=1." ) + + if is_enable_nz(): + raise ValueError( + "FRACTAL_NZ mode is enabled. This may cause model parameter precision issues " + "in the RL scenarios. Please set VLLM_ASCEND_ENABLE_NZ=0.") allocator = CaMemAllocator.get_instance() allocator.wake_up(tags=tags)