From d539c5778b072d1998e0e85fd23b8e3c4183450c Mon Sep 17 00:00:00 2001 From: Jayon02 <12012211@mail..sustech.edu.cn> Date: Wed, 9 Jul 2025 10:47:17 -0700 Subject: [PATCH 01/10] refactor hf engine --- arealite/engine/hf_engine.py | 459 ++++++++++++++++++------------- arealite/tests/test_hf_engine.py | 161 +++++++++++ 2 files changed, 429 insertions(+), 191 deletions(-) create mode 100644 arealite/tests/test_hf_engine.py diff --git a/arealite/engine/hf_engine.py b/arealite/engine/hf_engine.py index d2d5f469a..ed20cf395 100644 --- a/arealite/engine/hf_engine.py +++ b/arealite/engine/hf_engine.py @@ -1,91 +1,69 @@ -import asyncio -import functools -import math +import gc import os +import time from typing import Any, Callable, Dict, List, Optional import torch import torch.distributed as dist import transformers -from transformers import AutoConfig, AutoModelForCausalLM - -from arealite.api.cli_args import ( - EngineConfig, +from tensordict import TensorDict +from transformers import ( + AutoConfig, + AutoModelForCausalLM, + get_constant_schedule_with_warmup, + get_linear_schedule_with_warmup, +) +from arealite.utils.fsdp import get_cosine_schedule_with_warmup +from arealite.api.cli_args import TrainEngineConfig +from arealite.api.engine_api import ( + FinetuneSpec, MicroBatchSpec, - ParallelismConfig, - TrainingArgs, + SaveLoadMeta, + TrainEngine, + WeightUpdateMeta, ) -from arealite.api.engine_api import TrainEngine -from arealite.api.io_struct import FinetuneSpec -from arealite.api.llm_client_api import LLMClient -from arealite.utils import ( - get_state_dict_from_repo_id_or_path, - recorder_list, - split_dict_tensor_with_cu_seqlens, +from arealite.utils.data import ( + MicroBatchList, + amend_position_ids, + pack_tensor_dict, + pad_and_stack_tensors_along_first_dim, + pad_mb_list, + reorder_list, + split_packed_tensor_dict_into_mb_list, unpack_sequence, + unsqueeze_mb_list, ) -from realhf.base import constants - - -def get_cosine_schedule_with_warmup( - optimizer: torch.optim.Optimizer, - num_warmup_steps: int, - num_training_steps: int, - min_lr_ratio: float = 0.0, - num_cycles: float = 0.5, - last_epoch: int = -1, -): - """ - Create a schedule with a learning rate that decreases following the values of the cosine function between the - initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the - initial lr set in the optimizer. - Args: - optimizer (:class:`~torch.optim.Optimizer`): - The optimizer for which to schedule the learning rate. - num_warmup_steps (:obj:`int`): - The number of steps for the warmup phase. - num_training_steps (:obj:`int`): - The total number of training steps. - min_lr_ratio (:obj:`float`, `optional`, defaults to 0.0): - The minimum lr ratio w.r.t the maximum. - num_cycles (:obj:`float`, `optional`, defaults to 0.5): - The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0 - following a half-cosine). - last_epoch (:obj:`int`, `optional`, defaults to -1): - The index of the last epoch when resuming training. - Return: - :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. - """ - assert min_lr_ratio >= 0 and min_lr_ratio <= 1.0 - coef = (1 - min_lr_ratio) * 0.5 - intercept = (1 + min_lr_ratio) * 0.5 - - def lr_lambda(current_step): - if current_step < num_warmup_steps: - return float(current_step) / float(max(1, num_warmup_steps)) - progress = float(current_step - num_warmup_steps) / float( - max(1, num_training_steps - num_warmup_steps) - ) - x = math.cos(math.pi * float(num_cycles) * 2.0 * progress) - return max(0.0, x * coef + intercept) - - return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch) +from arealite.utils.save_load import get_state_dict_from_repo_id_or_path +from realhf.api.core.data_api import load_hf_tokenizer +from realhf.base import logging, name_resolve, names +logger = logging.getLogger("HFEngine") class HFEngine(TrainEngine): - """Simplified HF engine for transformer models.""" - - def __init__(self, args: TrainingArgs, engine_config: EngineConfig): - super().__init__(args, engine_config) + def __init__(self, config: TrainEngineConfig): + self.config = config + self.optimizer_config = config.optimizer self.model = None self.optimizer = None + self.tokenizer = None + # huggingface model config self.model_config = None - + # initialization + self.initialized = False self.weight_update_group_initialized = False + self.world_size = int(os.environ.get("WORLD_SIZE", 1)) - def init_distributed(self, config: ParallelismConfig, ft_spec: FinetuneSpec): - """Initialize model in single node.""" + def train(self, mode: bool = True): + assert self.model is not None + self.model.train(mode=mode) + return self + + def initialize(self, addr: str | None, ft_spec: FinetuneSpec | None): + # Initialize distributed enviroments and load model. + assert addr is None, "HFEngine does not support remote initialization." + + """Initialize distributed communication and model.""" if not dist.is_initialized(): dist.init_process_group(backend="nccl") if dist.get_world_size() > 1: @@ -93,36 +71,48 @@ def init_distributed(self, config: ParallelismConfig, ft_spec: FinetuneSpec): "Distributed training is not supported in this engine. " "Please use FSDP for distributed training." ) - torch.cuda.set_device("cuda:0") - dtype = torch.bfloat16 if self.engine_config.bf16 else torch.float16 + torch.cuda.set_device(int(os.environ.get("LOCAL_RANK", 0))) + self.device = torch.device(int(os.environ.get("LOCAL_RANK", 0))) + + dtype = torch.bfloat16 if self.config.bf16 else torch.float16 self.model_config = AutoConfig.from_pretrained( - pretrained_model_name_or_path=self.engine_config.path, + pretrained_model_name_or_path=self.config.path, trust_remote_code=True, ) + self.tokenizer = load_hf_tokenizer(self.config.path) with torch.device("cuda"): # initialize scratch model from config model = AutoModelForCausalLM.from_config( self.model_config, torch_dtype=dtype, - attn_implementation="flash_attention_2", + attn_implementation=self.config.attn_impl, ) - model = model.cuda() - - self.model = model + self.model = model.to("cuda") + + if not self.config.init_from_scratch: + # Load model from a initial checkpoint path, + # which should only be a huggingface checkpoint. + load_meta = SaveLoadMeta( + path=self.config.path, + weight_format="hf", + with_optim=False, + tokenizer=None, + base_model_path=self.config.path, + ) + self.load(load_meta) # Set up optimizer - optimizer_config = self.engine_config.optimizer - if optimizer_config is not None: + if self.optimizer_config is not None: assert ( - optimizer_config.type == "adam" + self.optimizer_config.type == "adam" ), "Only AdamW optimizer is supported in this engine." - lr = optimizer_config.lr - weight_decay = optimizer_config.weight_decay - beta1 = optimizer_config.beta1 - beta2 = optimizer_config.beta2 - eps = optimizer_config.eps + lr = self.optimizer_config.lr + weight_decay = self.optimizer_config.weight_decay + beta1 = self.optimizer_config.beta1 + beta2 = self.optimizer_config.beta2 + eps = self.optimizer_config.eps self.optimizer = torch.optim.AdamW( self.model.parameters(), @@ -133,82 +123,230 @@ def init_distributed(self, config: ParallelismConfig, ft_spec: FinetuneSpec): ) total_train_steps = ft_spec.total_train_steps num_warmup_steps = int( - optimizer_config.warmup_steps_proportion * total_train_steps + self.optimizer_config.warmup_steps_proportion * total_train_steps ) - self.lr_scheduler = get_cosine_schedule_with_warmup( - self.optimizer, - num_warmup_steps, - total_train_steps, - min_lr_ratio=optimizer_config.min_lr_ratio, + if self.optimizer_config.lr_scheduler_type == "cosine": + self.lr_scheduler = get_cosine_schedule_with_warmup( + self.optimizer, + num_warmup_steps, + total_train_steps, + min_lr_ratio=self.optimizer_config.min_lr_ratio, + ) + elif self.optimizer_config.lr_scheduler_type == "linear": + self.lr_scheduler = get_linear_schedule_with_warmup( + self.optimizer, + num_warmup_steps, + total_train_steps, + ) + elif self.optimizer_config.lr_scheduler_type == "constant": + self.lr_scheduler = get_constant_schedule_with_warmup( + self.optimizer, + num_warmup_steps, + ) + else: + raise ValueError( + f"Unknown lr scheduler type {self.optimizer_config.lr_scheduler_type}" + ) + + self.initialized = True + + def destroy(self): + """Destroy the engine and release GPU memory.""" + self.model = None + self.optimizer = None + gc.collect() + torch.cuda.empty_cache() + gc.collect() + self.initialized = False + + def save(self, meta: SaveLoadMeta): + if meta.weight_format == "hf": + self._save_model_to_hf(meta.path, meta.tokenizer) + elif meta.weight_format == "dcp": + # TODO: implement DCP save/load for HF + raise NotImplementedError("DCP format saving is not implemented yet. ") + else: + raise ValueError(f"Unknown weight format {meta.weight_format}. ") + + if meta.with_optim: + self._save_optimizer_state(meta.path) + + def load(self, meta: SaveLoadMeta): + if meta.weight_format == "hf": + self._load_model_from_hf(meta.path) + elif meta.weight_format == "dcp": + # TODO: implement DCP save/load for HF + raise NotImplementedError("DCP format loading is not implemented yet. ") + else: + raise ValueError(f"Unknown weight format {meta.weight_format}. ") + + if meta.with_optim: + self._load_optimizer_state(meta.path) + + def _save_optimizer_state(self, path: str): + assert self.optimizer is not None + os.makedirs(path, exist_ok=True) + torch.save(self.optimizer.state_dict(), os.path.join(path, "optim.pt")) + + def _load_optimizer_state(self, path: str): + assert self.optimizer is not None + path = os.path.join(path, "optim.pt") + optimizer_state_dict = torch.load(path, weights_only=False) + self.optimizer.load_state_dict(optimizer_state_dict) + + def _save_model_to_hf( + self, path: str, tokenizer: Optional[transformers.PreTrainedTokenizerFast] + ): + """Save model in HuggingFace format.""" + if self.model is None: + raise RuntimeError("Model not initialized") + os.makedirs(path, exist_ok=True) + + state_dict = {k: v.cpu() for k, v in self.model.state_dict().items()} + self.model.save_pretrained(path, state_dict=state_dict) + self.model_config.save_pretrained(path) + if tokenizer is not None: + tokenizer.save_pretrained(path) + + def _load_model_from_hf(self, path: str): + """Load model from HuggingFace format.""" + full_state = get_state_dict_from_repo_id_or_path(path) + self.model.load_state_dict( + full_state, strict=not self.model_config.tie_word_embeddings + ) + if self.model_config.tie_word_embeddings: + self.model.tie_weights() + + def upload_weights(self, meta: WeightUpdateMeta): + if meta.type == "nccl": + if not self.weight_update_group_initialized: + self._init_distributed_weight_update(meta) + self._update_weights_from_distributed() + elif meta.type == "disk": + self._save_model_to_hf(meta.path, self.tokenizer) + update_name = names.update_weights_from_disk( + self.config.experiment_name, + self.config.trial_name, + meta.model_version, ) + name_resolve.add(update_name, str(time.time_ns()), keepalive_ttl=120) + else: + raise ValueError(f"Unknown weight update type {meta.type}") + + def _init_distributed_weight_update(self, meta: WeightUpdateMeta): + raise NotImplementedError( + "Distributed weight update is not implemented for HFEngine yet. " + ) - def train(self, mode: bool = True): - """Set the module in training mode.""" - return self.model.train(mode) + def _update_weights_from_distributed(self): + raise NotImplementedError( + "Distributed weight update is not implemented for HFEngine yet. " + ) + + def step_lr_scheduler(self): + assert self.lr_scheduler is not None + return self.lr_scheduler.step() + + def _prepare_mb_list( + self, input_: TensorDict, mb_spec: MicroBatchSpec + ) -> MicroBatchList: + assert "attention_mask" in input_ and "input_ids" in input_ + input_ = amend_position_ids(input_) + packed_input = pack_tensor_dict(input_) + mb_list = split_packed_tensor_dict_into_mb_list( + packed_input, + mb_spec, + ) + mb_list = pad_mb_list(mb_list, pad_value=0.0) + # NOTE: We unsqueeze here because huggingface transformer models requires + # packed input to be of shape [1, total_seqlen]. + mb_list = unsqueeze_mb_list(mb_list) + return mb_list def train_batch( self, - input_: Dict, + input_: TensorDict, mb_spec: MicroBatchSpec, loss_fn: Callable[[torch.Tensor, Dict], torch.Tensor], loss_weight_fn: Callable[[Dict], float], - ) -> Dict: + ) -> Dict[str, float]: """Train on a batch using gradient accumulation.""" + input_ = input_.to(self.device) assert self.optimizer is not None + assert self.optimizer_config is not None assert self.lr_scheduler is not None self.optimizer.zero_grad() - mb_splits = split_dict_tensor_with_cu_seqlens(input_, mb_spec) + mb_list = self._prepare_mb_list(input_, mb_spec) + total_loss_weight = torch.tensor( - sum([loss_weight_fn(mb) for mb in mb_splits.mbs]), dtype=torch.float32 + sum([loss_weight_fn(mb) for mb in mb_list.mbs]), dtype=torch.float32 ) assert total_loss_weight != 0 - for mb_input in mb_splits.mbs: - outputs = self.model(**mb_input) - loss = loss_fn(outputs.logits, mb_input) + # Process microbatches with gradient accumulation + for pad_length, padded_mb_input, mb_input in zip( + mb_list.padding_lengths, mb_list.padded_mbs, mb_list.mbs + ): + outputs = self.model(**padded_mb_input) + + logits = outputs.logits.squeeze(0) + logits = logits[:-pad_length] if pad_length > 0 else logits + loss = loss_fn(logits, mb_input) loss_scale = loss_weight_fn(mb_input) / total_loss_weight + loss *= loss_scale loss.backward() grad_norm = torch.nn.utils.clip_grad_norm_( self.model.parameters(), - self.engine_config.optimizer.gradient_clipping, + self.optimizer_config.gradient_clipping, norm_type=2.0, error_if_nonfinite=False, foreach=None, ) + if not torch.isfinite(grad_norm): + self.optimizer.zero_grad() + update_successful = False + else: + self.optimizer.step() + update_successful = True + current_lr = self.lr_scheduler.get_last_lr()[0] # Optimizer step self.optimizer.step() - - return { - "grad_norm": grad_norm, - "lr": current_lr, - } + return dict( + update_successful=float(update_successful), + grad_norm=float(grad_norm) if grad_norm is not None else float("nan"), + lr=current_lr, + ) @torch.no_grad() def eval_batch( self, - input_: Dict, + input_: TensorDict, mb_spec: MicroBatchSpec, loss_fn: Callable[[torch.Tensor, Dict], torch.Tensor], loss_weight_fn: Callable[[Dict], float], ) -> torch.Tensor | None: """Evaluate on a batch.""" - mb_splits = split_dict_tensor_with_cu_seqlens(input_, mb_spec) + mb_list = self._prepare_mb_list(input_, mb_spec) total_loss_weight = torch.tensor( - sum([loss_weight_fn(mb) for mb in mb_splits.mbs]), dtype=torch.float32 + sum([loss_weight_fn(mb) for mb in mb_list.mbs]), dtype=torch.float32 ) assert total_loss_weight != 0 total_loss = 0.0 total_weight = 0.0 - for mb_input in mb_splits.mbs: - outputs = self.model(**mb_input) - loss = loss_fn(outputs.logits, mb_input) + for pad_length, padded_mb_input, mb_input in zip( + mb_list.padding_lengths, mb_list.padded_mbs, mb_list.mbs + ): + outputs = self.model(**padded_mb_input) + logits = outputs.logits.squeeze(0) + logits = logits[:-pad_length] if pad_length > 0 else logits + loss = loss_fn(logits, mb_input) # Simple weight calculation (could be improved) loss_scale = loss_weight_fn(mb_input) / total_loss_weight @@ -220,96 +358,35 @@ def eval_batch( @torch.no_grad() def forward( self, - input_: Dict, + input_: TensorDict, mb_spec: MicroBatchSpec, output_seqlens: List[int] | None = None, post_hook: Callable[[torch.Tensor, Dict], Any] | None = None, - aggregate_fn: Callable[[List[Any]], Any] = functools.partial(torch.cat, dim=1), + aggregate_fn: Callable[[List[Any]], Any] = torch.cat, ) -> Any | None: """Forward pass with optional post-processing.""" - mb_splits = split_dict_tensor_with_cu_seqlens(input_, mb_spec) + cu_seqlens = pack_tensor_dict(input_)["cu_seqlens"] + mb_list = self._prepare_mb_list(input_, mb_spec) + if output_seqlens is None: - cu_seqlens = input_["cu_seqlens"] output_seqlens = (cu_seqlens[1:] - cu_seqlens[:-1]).cpu().numpy().tolist() results = [] - for mb_input in mb_splits.mbs: - outputs = self.model(**mb_input) + for pad_length, padded_mb_input, mb_input in zip( + mb_list.padding_lengths, mb_list.padded_mbs, mb_list.mbs + ): + outputs = self.model(**padded_mb_input) + logits = outputs.logits.squeeze(0) + logits = logits[:-pad_length] if pad_length > 0 else logits + if post_hook: - result = post_hook(outputs.logits, mb_input) + result = post_hook(logits, mb_input) results.append(result) else: - results.append(outputs.logits) + results.append(logits) res = aggregate_fn(results) - output_seqlens = [output_seqlens[i] for i in mb_splits.forward_indices] - unpacked = unpack_sequence(res, lens=output_seqlens, dim=1) - return aggregate_fn(recorder_list(unpacked, mb_splits.backward_indices)) - - def step_lr_scheduler(self): - """Step the learning rate scheduler.""" - return self.lr_scheduler.step() - - def save_model_to_hf( - self, - path: str, - tokenizer: Optional[transformers.PreTrainedTokenizerFast] = None, - base_model_path: Optional[str] = None, - ): - """Save model in HuggingFace format.""" - if self.model is None: - raise RuntimeError("Model not initialized") - - os.makedirs(path, exist_ok=True) - - state_dict = {k: v.cpu() for k, v in self.model.state_dict().items()} - self.model.save_pretrained(path, state_dict=state_dict) - self.model_config.save_pretrained(path) - if tokenizer is not None: - tokenizer.save_pretrained(path) - - def load_model_from_hf(self, path: str): - """Load model from HuggingFace format.""" - full_state = get_state_dict_from_repo_id_or_path(path) - self.model.load_state_dict( - full_state, strict=not self.model_config.tie_word_embeddings - ) - if self.model_config.tie_word_embeddings: - self.model.tie_weights() - - def save_optimizer_state(self, path: str): - """Save optimizer state.""" - if self.optimizer is None: - raise RuntimeError("Optimizer not initialized") - - os.makedirs(path, exist_ok=True) - torch.save(self.optimizer.state_dict(), os.path.join(path, "optimizer.pt")) - - def load_optimizer_state(self, path: str): - """Load optimizer state.""" - if self.optimizer is None: - raise RuntimeError("Optimizer not initialized") - - optimizer_path = os.path.join(path, "optimizer.pt") - if os.path.exists(optimizer_path): - self.optimizer.load_state_dict( - torch.load(optimizer_path, map_location="cpu") - ) - else: - raise RuntimeError(f"Optimizer state file not found: {optimizer_path}") - - async def aupdate_weights_to(self, llm_client: LLMClient): - path = constants.get_param_realloc_path(self.args) - self.save_model_to_hf(path) - tasks = [ - llm_client.aupdate_weights_from_disk(server_info=server_info, path=path) - for server_info in llm_client.get_healthy_servers() - ] - await asyncio.gather(*tasks) - - def update_weights_to(self, llm_client: LLMClient): - loop = asyncio.new_event_loop() - try: - loop.run_until_complete(self.aupdate_weights_to(llm_client)) - finally: - loop.close() + output_seqlens = [output_seqlens[i] for i in mb_list.forward_indices] + unpacked = unpack_sequence(res, lens=output_seqlens, dim=0) + reordered = reorder_list(unpacked, mb_list.backward_indices) + return pad_and_stack_tensors_along_first_dim(reordered) diff --git a/arealite/tests/test_hf_engine.py b/arealite/tests/test_hf_engine.py new file mode 100644 index 000000000..927b3e985 --- /dev/null +++ b/arealite/tests/test_hf_engine.py @@ -0,0 +1,161 @@ +# Copyright 2025 Ant Group Inc. +# Licensed under the Apache License, Version 2.0 + +"""Test script for HF Engine implementation.""" + +import os +from typing import Dict + +import pytest +import torch +from tensordict import TensorDict +from transformers import AutoTokenizer + +from arealite.api.cli_args import MicroBatchSpec, OptimizerConfig, TrainEngineConfig +from arealite.api.io_struct import FinetuneSpec, SaveLoadMeta +from arealite.engine.hf_engine import HFEngine + +VOCAB_SIZE = 100 +MODEL_PATH = "/storage/testing/models/Qwen__Qwen3-1.7B/" +if not os.path.exists(MODEL_PATH): + MODEL_PATH = "Qwen/Qwen2-0.5B" + + +@pytest.fixture(scope="module") +def mock_input( + batch_size=5, + min_seqlen=10, + max_seqlen=20, + device="cuda:0", +) -> Dict: + """Create mock padded input data (same format for huggingface) for testing. + Returns a dict with input_ids, attention_mask, and position_ids. + """ + pad_token_id = 0 + seqlens = torch.randint( + min_seqlen, max_seqlen, (batch_size,), dtype=torch.int, device=device + ) + max_seqlen = int(max(seqlens)) + input_ids = torch.randint( + 0, VOCAB_SIZE, (batch_size, max_seqlen), dtype=torch.long, device=device + ) + attn_mask = torch.zeros((batch_size, max_seqlen), dtype=torch.bool, device=device) + + attn_mask[ + torch.arange(0, max_seqlen, device=device).unsqueeze(0) < seqlens.unsqueeze(1) + ] = 1 + input_ids.masked_fill_(~attn_mask, pad_token_id) + + return TensorDict( + input_ids=input_ids, + attention_mask=attn_mask, + ) + + +def mock_loss_fn(logits: torch.Tensor, input_data: Dict) -> torch.Tensor: + """Mock loss function for testing.""" + return torch.mean(logits) + + +@pytest.fixture(scope="module") +def engine(): + os.environ["WORLD_SIZE"] = "1" + os.environ["RANK"] = "0" + os.environ["LOCAL_RANK"] = "0" + os.environ["MASTER_ADDR"] = "localhost" + os.environ["MASTER_PORT"] = "7777" + + engine_config = TrainEngineConfig( + experiment_name="test-hf-engine", + trial_name="test0", + path=MODEL_PATH, + optimizer=OptimizerConfig(), + ) + engine = HFEngine(engine_config) + ft_spec = FinetuneSpec(total_train_epochs=1, dataset_size=100, train_batch_size=2) + engine.initialize(None, ft_spec) + print("✓ Engine created successfully") + yield engine + + +@torch.no_grad() +def test_forward_microbatch(engine, mock_input): + engine.eval() + x2 = ( + engine.forward( + input_=mock_input, + mb_spec=MicroBatchSpec(n_mbs=2, max_tokens_per_mb=100), + ) + .squeeze(0) + .mean(-1) + ) + x1 = ( + engine.forward( + input_=mock_input, + mb_spec=MicroBatchSpec(n_mbs=1, max_tokens_per_mb=100), + ) + .squeeze(0) + .mean(-1) + ) + input_ids = mock_input["input_ids"] + assert x1.shape[:1] == input_ids.shape[:1] + assert x2.shape[:1] == input_ids.shape[:1] + assert torch.allclose(x1, x2, atol=1e-1, rtol=1e-2), (x1 - x2).abs().max().item() + + +@torch.no_grad() +def test_eval_batch(engine, mock_input): + engine.eval() + eval_result = engine.eval_batch( + input_=mock_input, + mb_spec=MicroBatchSpec(n_mbs=2, max_tokens_per_mb=100), + loss_fn=mock_loss_fn, + loss_weight_fn=lambda x: x["cu_seqlens"][-1], + ) + assert isinstance(eval_result, torch.Tensor), "Evaluation should return a tensor" + assert eval_result.is_cuda, "Evaluation tensor should be on CUDA device" + assert eval_result is not None, "Evaluation should return a loss value" + print(f"✓ Evaluation successful, loss: {eval_result.item()}") + + +def test_train_batch(engine, mock_input): + engine.train() + train_result = engine.train_batch( + input_=mock_input, + mb_spec=MicroBatchSpec(n_mbs=2, max_tokens_per_mb=100), + loss_fn=mock_loss_fn, + loss_weight_fn=lambda x: x["cu_seqlens"][-1], + ) + assert isinstance(train_result, dict), "Training should return a dictionary" + assert train_result["grad_norm"] is not None + assert train_result["lr"] is not None + print("✓ Training successful") + + +@torch.no_grad() +def test_hf_save_load_weights(tmp_path_factory, engine, mock_input): + tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH) + path = tmp_path_factory.mktemp("hf_engine_test") + save_load_meta = SaveLoadMeta( + path=path, + weight_format="hf", + tokenizer=tokenizer, + with_optim=True, + base_model_path=None, + ) + + old = engine.forward( + input_=mock_input, + mb_spec=MicroBatchSpec(n_mbs=1, max_tokens_per_mb=100), + ) + engine.save(save_load_meta) + + for name, param in engine.model.named_parameters(): + param.zero_() + + engine.load(save_load_meta) + new = engine.forward( + input_=mock_input, + mb_spec=MicroBatchSpec(n_mbs=1, max_tokens_per_mb=100), + ) + assert torch.allclose(old, new) From 43b5b362f5dee6e050066fcc2e5a5640d4d89382 Mon Sep 17 00:00:00 2001 From: Jayon02 <12012211@mail..sustech.edu.cn> Date: Wed, 9 Jul 2025 10:52:19 -0700 Subject: [PATCH 02/10] format file --- arealite/engine/hf_engine.py | 6 ++++-- examples/env/validate_installation.py | 1 + 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/arealite/engine/hf_engine.py b/arealite/engine/hf_engine.py index ed20cf395..ea25709db 100644 --- a/arealite/engine/hf_engine.py +++ b/arealite/engine/hf_engine.py @@ -13,7 +13,7 @@ get_constant_schedule_with_warmup, get_linear_schedule_with_warmup, ) -from arealite.utils.fsdp import get_cosine_schedule_with_warmup + from arealite.api.cli_args import TrainEngineConfig from arealite.api.engine_api import ( FinetuneSpec, @@ -33,12 +33,14 @@ unpack_sequence, unsqueeze_mb_list, ) +from arealite.utils.fsdp import get_cosine_schedule_with_warmup from arealite.utils.save_load import get_state_dict_from_repo_id_or_path from realhf.api.core.data_api import load_hf_tokenizer from realhf.base import logging, name_resolve, names logger = logging.getLogger("HFEngine") + class HFEngine(TrainEngine): def __init__(self, config: TrainEngineConfig): self.config = config @@ -233,7 +235,7 @@ def upload_weights(self, meta: WeightUpdateMeta): name_resolve.add(update_name, str(time.time_ns()), keepalive_ttl=120) else: raise ValueError(f"Unknown weight update type {meta.type}") - + def _init_distributed_weight_update(self, meta: WeightUpdateMeta): raise NotImplementedError( "Distributed weight update is not implemented for HFEngine yet. " diff --git a/examples/env/validate_installation.py b/examples/env/validate_installation.py index 61ef6f7c5..6c69cd7e5 100644 --- a/examples/env/validate_installation.py +++ b/examples/env/validate_installation.py @@ -79,6 +79,7 @@ def test_sglang_functionality(self, sglang_module): """Test SGLang basic functionality.""" # Basic import test is sufficient for CI import sgl_kernel + from sglang import launch_server assert Version(get_version("sglang")) == Version("0.4.6.post4") print(" - SGLang imported successfully") From 9e31934e5b3a63db933168266bea6867ea4e1ad6 Mon Sep 17 00:00:00 2001 From: Jayon02 <12012211@mail..sustech.edu.cn> Date: Wed, 9 Jul 2025 23:34:41 -0700 Subject: [PATCH 03/10] revert file format --- examples/env/validate_installation.py | 1 - 1 file changed, 1 deletion(-) diff --git a/examples/env/validate_installation.py b/examples/env/validate_installation.py index 6c69cd7e5..61ef6f7c5 100644 --- a/examples/env/validate_installation.py +++ b/examples/env/validate_installation.py @@ -79,7 +79,6 @@ def test_sglang_functionality(self, sglang_module): """Test SGLang basic functionality.""" # Basic import test is sufficient for CI import sgl_kernel - from sglang import launch_server assert Version(get_version("sglang")) == Version("0.4.6.post4") print(" - SGLang imported successfully") From b14f45fad92d19e414ba0adc01368cc2b21036b1 Mon Sep 17 00:00:00 2001 From: Jayon02 <12012211@mail..sustech.edu.cn> Date: Wed, 9 Jul 2025 23:40:29 -0700 Subject: [PATCH 04/10] Squashed commit of the following: MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 8d4b8dc90fceb144bf00f8125f938b57f696e166 Author: Wei Fu <36355462+garrett4wade@users.noreply.github.com> Date: Thu Jul 10 13:14:10 2025 +0800 [Doc] Add an instruction about how to run the SFT example. (#164) commit 3bf9c85e400a2fe1c6a1e488d27d458ede8cea45 Author: Wei Fu <36355462+garrett4wade@users.noreply.github.com> Date: Thu Jul 10 12:56:24 2025 +0800 [Fix] Merge previous contributions from fw/refactor to lite (#163) * initial proposal * add arealite * . * change api * . * remove LOG_ROOT * remove MODEL_SAVE_PATH * remove PARAM_REALLOC_PATH, DATASET_CACHE * prepare for testing * prepare for testing * ready for run * local run * tests mainly pass * format * . * amend cluster.py * . * . * client test pass * pass rollout test * remove unused imports * add arealite readme * change api * . * . * . * . * . * . * . * . * format * . * implement iteraptable generation (#112) Co-authored-by: zhaochenyang * . * fix * . * . * . * pass controller generate batch test * . * refactor rollout controller into worker and controller * . * . * . * change to async rollout * pass rollout controller test * pass test * . * update readme * . * sft debug * . * add lisence * remove unused files * remove unsed args in ppo * add hf engine wrapper (#116) * add hf engine * fix issues * fix ppo bugs and add test * add hf client interface and modify cli args * fix bugs * fix issues * Merge fw/refactor * Finish hf wrapper test * add test --------- Co-authored-by: Wei Fu <36355462+garrett4wade@users.noreply.github.com> * format * format * . * refine hf engine * . * fix * add fsdp engine and sft tests * . * . * . * pass ppo unittest * pass ppo and rollout controller tests * clear unused imports * rename ppo to grpo * change reward function organization * reorganize code * add dataset api * . * . * . * format * chmod fix * . * rename workflow to collector * refactor llm_client location * . * . * fix llm server api * refactor config structure * . * fix tests * . * . * . * Fix unresolved issue in SFTTrainer PR (#139) * . * . * efficient loading * format * . * . * . * . * . * . * Add CI for testing AReaLite (#150) * ci: add test-arealite * ci: add checkout before running test-arealite * ci: add USERNAME * ci: add test script * ci: add GitHub mirror * ci: fix typo * ci: clone one commit * ci: fix condition * ci: set command timeout to 60m * ci: enable pip cache * ci: optimize container lifecycle * ci: split into many stages * ci(test-arealite): fix typo * ci: fix wrong env * ci: fix pytest * ci: uninstall transformer-engine * ci: uninstall transformer-engine * ci: fix model paths * ci: show stdout/stderr * ci: fix not clean up * ci: backup sglang * ci: remove tmp repo dir when run * ci: fix docker run exit 1 condition * ci(test-arealite): limit the concurrency and extend command timeout * . * merge fw/refactor * revert some changes * fix --------- Co-authored-by: meizhiyu.mzy Co-authored-by: Chayenne Co-authored-by: zhaochenyang Co-authored-by: Jayon02 Co-authored-by: root Co-authored-by: Zijian Zhang commit d48bf007cfb0821c35a18a673f3c2e8f51a8ee45 Merge: 42c717b b9dbd4a Author: 博惟 Date: Thu Jul 10 12:53:30 2025 +0800 Merge branch 'main' of https://github.com/inclusionAI/AReaL into lite commit 42c717b6e45b3b345f29cff5882ba5374b79faee Merge: c38cffc a203c7c Author: 博惟 Date: Thu Jul 10 11:15:01 2025 +0800 Merge branch 'lite' of https://github.com/inclusionAI/AReaL into lite commit c38cffc02385acf382196ba6b720b2d8b91a5675 Author: 博惟 Date: Thu Jul 10 11:10:10 2025 +0800 PullRequest: 340 [lite] Refactor trainer API into utilities and remove mb_spec in engine methods Merge branch fw/lite-dev of git@code.alipay.com:inclusionAI/AReaL.git into lite https://code.alipay.com/inclusionAI/AReaL/pull_requests/340 Reviewed-by: 晓雷 * support fsdp engine and sglang remote engine * minor fix * . * refactor trainer * add close * rm mb_spec * fix commit b9dbd4a2c18539648dca373c9c8c1347e4e9db68 Author: Wei Fu <36355462+garrett4wade@users.noreply.github.com> Date: Wed Jul 9 10:50:19 2025 +0800 Update to persistent wechat QR code. (#159) commit 17ea7fe94dc0bd9299082a069383d7372ed13ac9 Author: xssstory <33601810+xssstory@users.noreply.github.com> Date: Mon Jul 7 15:49:13 2025 +0800 fix math reward verifier (#156) * PullRequest: 293 fix get_param_realloc_path Merge branch xss/debug of git@code.alipay.com:inclusionAI/AReaL.git into gh https://code.alipay.com/inclusionAI/AReaL/pull_requests/293 Reviewed-by: 博惟 * fix get_param_realloc_path * PullRequest: 297 bugfix: reward is always -5 Merge branch xss/debug of git@code.alipay.com:inclusionAI/AReaL.git into gh https://code.alipay.com/inclusionAI/AReaL/pull_requests/297 Reviewed-by: 博惟 * bugfix: reward is always -5 * PullRequest: 321 fix checkpoint save dir Merge branch xss/debug of git@code.alipay.com:inclusionAI/AReaL.git into gh https://code.alipay.com/inclusionAI/AReaL/pull_requests/321 Reviewed-by: 博惟 * fix checkpoint save dir * PullRequest: 328 [Doc] update installation Merge branch sxj/doc of git@code.alipay.com:inclusionAI/AReaL.git into gh https://code.alipay.com/inclusionAI/AReaL/pull_requests/328 Reviewed-by: 博惟 * [Doc] update installation * PullRequest: 329 bugfix: math verifier blocks the async training Merge branch xss/debug of git@code.alipay.com:inclusionAI/AReaL.git into gh https://code.alipay.com/inclusionAI/AReaL/pull_requests/329 Reviewed-by: 博惟 * bugfix: math verifier block the async training * format --------- Co-authored-by: 冰临 Co-authored-by: garrett4wade --- arealite/README.md | 28 ++- arealite/api/cli_args.py | 209 +++++++----------- arealite/api/engine_api.py | 4 - arealite/api/trainer_api.py | 130 ----------- arealite/engine/fsdp_engine.py | 20 +- arealite/engine/sft/lm_engine.py | 94 ++++++++ arealite/tests/test_fsdp_engine.py | 35 +-- arealite/trainer/sft.py | 156 ------------- arealite/utils/data.py | 23 +- arealite/utils/evaluator.py | 36 +++ arealite/utils/fs.py | 9 + arealite/utils/logging.py | 9 - arealite/utils/saver.py | 68 ++++++ arealite/utils/stats_logger.py | 111 ++++++++++ assets/wechat_qrcode.png | Bin 166550 -> 2772 bytes examples/arealite/configs/gsm8k_sft.yaml | 58 +++-- examples/arealite/gsm8k_sft.py | 65 ++++-- functioncall/code/local_verify.py | 8 +- pyproject.toml | 2 +- realhf/api/core/data_api.py | 2 + realhf/base/stats_tracker.py | 14 ++ realhf/impl/dataset/math_parser.py | 11 +- .../environment/math_code_single_step_env.py | 2 + requirements.txt | 4 +- 24 files changed, 569 insertions(+), 529 deletions(-) delete mode 100644 arealite/api/trainer_api.py create mode 100644 arealite/engine/sft/lm_engine.py delete mode 100644 arealite/trainer/sft.py create mode 100644 arealite/utils/evaluator.py create mode 100644 arealite/utils/fs.py delete mode 100644 arealite/utils/logging.py create mode 100644 arealite/utils/saver.py create mode 100644 arealite/utils/stats_logger.py diff --git a/arealite/README.md b/arealite/README.md index d70b86ebc..f472c1bef 100644 --- a/arealite/README.md +++ b/arealite/README.md @@ -1,5 +1,18 @@ # AReaL v1.0.0 Design Doc +--- + +Update 20250710 + +SFT example: + +```bash +torchrun --nnodes 1 --nproc-per-node 8 examples/arealite/gsm8k_sft.py --config examples/arealite/configs/gsm8k_sft.yaml +``` + +--- + + We will provide both single-controller and SPMD user interfaces. The SPMD interface will be delivered with AReaLite, which is the paradigm most users are familiar with, just like using `torchrun` or `deepspeed`. However, this paradigm may lack some flexibility over global scheduling and control. To unlock the full potential with customized distributed execution, we will also provide a single-controller mode just like using Ray --- but our scheduler backend will not be restricted to Ray. Our code will be able to run with any scheduler in the cluster, such as native SLURM and K8S. However, we want the user code to stay the same for both modes. The following is a simple usage example: @@ -187,7 +200,6 @@ class TrainEngine(abc.ABC): def train_batch( self, input_: Dict, - mb_spec: MicroBatchSpec, loss_fn: Callable[[torch.Tensor, Dict], torch.Tensor], loss_weight_fn: Callable[[Dict], float], ) -> Dict[str, float]: @@ -197,7 +209,6 @@ class TrainEngine(abc.ABC): def eval_batch( self, input_: Dict, - mb_spec: MicroBatchSpec, loss_fn: Callable[[torch.Tensor, Dict], torch.Tensor], loss_weight_fn: Callable[[Dict], float], ) -> torch.Tensor | None: @@ -207,7 +218,6 @@ class TrainEngine(abc.ABC): def forward( self, input_: Dict, - mb_spec: MicroBatchSpec, output_seqlens: List[List[int]] | None = None, post_hook: Callable[[torch.Tensor, Dict], Any] | None = None, aggregate_fn: Callable[[List[Any]], Any] = torch.cat, @@ -323,7 +333,7 @@ Extended engines (such as Actor in PPO) provide convenient organization and call class Actor(Engine): @torch.no_grad() - def compute_logps(self, input_: Dict[str, Tensor], mb_spec: MicroBatchSpec) -> torch.Tensor: + def compute_logps(self, input_: Dict[str, Tensor]) -> torch.Tensor: ... # unpad logps = self.forward(xxx) ... # pad back @@ -332,8 +342,7 @@ class Actor(Engine): def compute_advantages_and_returns(self, input_: Dict) -> Dict: pass - def ppo_update(self, input_: Dict, - mb_spec: MicroBatchSpec) -> List[Dict[str, float]]: + def ppo_update(self, input_: Dict) -> List[Dict[str, float]]: ... all_stats = [] for _ in range(self.ppo_n_minibatches): @@ -344,11 +353,10 @@ class Actor(Engine): class Critic(Engine): @torch.no_grad() - def compute_values(self, input_: Dict, mb_spec: MicroBatchSpec) -> torch.Tensor: + def compute_values(self, input_: Dict) -> torch.Tensor: pass - def ppo_update(self, input_: Dict, - mb_spec: MicroBatchSpec) -> List[Dict[str, float]]: + def ppo_update(self, input_: Dict) -> List[Dict[str, float]]: ... all_stats = [] for _ in range(self.ppo_n_minibatches): @@ -742,4 +750,4 @@ dataloader = StatefulDataLoader( ) for data in dataloader: assert isinstance(data, list) -``` \ No newline at end of file +``` diff --git a/arealite/api/cli_args.py b/arealite/api/cli_args.py index 786cbe6f7..ca4b78bd8 100644 --- a/arealite/api/cli_args.py +++ b/arealite/api/cli_args.py @@ -1,14 +1,17 @@ import argparse -import getpass import os from dataclasses import asdict, dataclass, field from pathlib import Path from typing import Dict, List, Optional, Tuple +import uvloop + +uvloop.install() from hydra import compose as hydra_compose from hydra import initialize as hydra_init from omegaconf import MISSING, OmegaConf +from arealite.utils.fs import get_user_tmp from realhf.api.cli_args import OptimizerConfig @@ -103,8 +106,8 @@ class FSDPEngineConfig: @dataclass class TrainEngineConfig: - experiment_name: str - trial_name: str + experiment_name: str = MISSING + trial_name: str = MISSING path: str = field(default="", metadata={"help": "Path to HuggingFace checkpoint"}) attn_impl: str = field( default="flash_attention_2", @@ -120,6 +123,8 @@ class TrainEngineConfig: default=False, metadata={"help": "Initialize critic/reward model from LM checkpoint"}, ) + # Runtime microbatch limit + mb_spec: MicroBatchSpec = field(default_factory=MicroBatchSpec) # Training Backend Configuration gradient_checkpointing: bool = field( @@ -307,94 +312,40 @@ class SGLangEngineConfig: @dataclass -class ExperimentSaveEvalControl: - """Controls the frequency of model saving and evaluation during training. - - Manages independent counters for epochs, steps, and seconds. The model will be saved - or evaluated when any specified frequency condition is met. - - Note: - - Epoch: Number of full passes through the training dataset - - Step: Number of individual training iterations - - Seconds: Wall-clock time duration - """ - - total_train_epochs: int = field( - default=1, metadata={"help": "Total number of epochs to train the model."} - ) - # Save control - save_freq_epochs: Optional[int] = field( - default=None, - metadata={ - "help": "Save frequency in epochs. None disables epoch-based saving." - }, - ) - save_freq_steps: Optional[int] = field( - default=None, - metadata={"help": "Save frequency in steps. None disables step-based saving."}, - ) - save_freq_secs: Optional[int] = field( +class _Timer: + experiment_name: str = MISSING + trial_name: str = MISSING + fileroot: str = MISSING + freq_epochs: Optional[int] = field( default=None, metadata={ - "help": "Save frequency in seconds. None disables time-based saving." + "help": "Trigger frequency in epochs. None disables epoch-based saving." }, ) - # Checkpointing control - ckpt_freq_epochs: Optional[int] = field( + freq_steps: Optional[int] = field( default=None, metadata={ - "help": "Checkpoint frequency in epochs. None uses save_freq_epochs. " - "Checkpointing is used for recover. Previous checkpoint is overwritten to save space." + "help": "Trigger frequency in steps. None disables step-based saving." }, ) - ckpt_freq_steps: Optional[int] = field( + freq_secs: Optional[int] = field( default=None, metadata={ - "help": "Checkpoint frequency in steps. None disables step-based checkpointing." - }, - ) - ckpt_freq_secs: Optional[int] = field( - default=None, - metadata={ - "help": "Checkpoint frequency in seconds. None disables time-based checkpointing." - }, - ) - # Evaluation control - eval_freq_epochs: Optional[int] = field( - default=None, - metadata={ - "help": "Evaluation frequency in epochs. None disables epoch-based evaluation." - }, - ) - eval_freq_steps: Optional[int] = field( - default=None, - metadata={ - "help": "Evaluation frequency in steps. None disables step-based evaluation." - }, - ) - eval_freq_secs: Optional[int] = field( - default=None, - metadata={ - "help": "Evaluation frequency in seconds. None disables time-based evaluation." - }, - ) - # Benchmark control - benchmark_steps: Optional[int] = field( - default=None, - metadata={ - "help": "Terminate training after this number of steps. " - "For benchmarking purposes only. None indicates normal training." - }, - ) - benchmark_n_seqs: Optional[int] = field( - default=None, - metadata={ - "help": "Terminate training after consuming this number of samples. " - "For benchmarking purposes only. None indicates normal training." + "help": "Trigger frequency in seconds. None disables time-based saving." }, ) +@dataclass +class EvaluatorConfig(_Timer): + pass + + +@dataclass +class SaverConfig(_Timer): + pass + + @dataclass class WandBConfig: mode: str = "disabled" @@ -423,11 +374,23 @@ class TensorBoardConfig: path: Optional[str] = None -def get_user_tmp(): - user = getpass.getuser() - user_tmp = os.path.join("/home", user, ".cache", "realhf") - os.makedirs(user_tmp, exist_ok=True) - return user_tmp +@dataclass +class StatsLoggerConfig: + experiment_name: str = MISSING + trial_name: str = MISSING + fileroot: str = MISSING + wandb: WandBConfig = field( + default_factory=WandBConfig, + metadata={"help": "Weights & Biases configuration."}, + ) + swanlab: SwanlabConfig = field( + default_factory=SwanlabConfig, + metadata={"help": "SwanLab configuration."}, + ) + tensorboard: TensorBoardConfig = field( + default_factory=TensorBoardConfig, + metadata={"help": "TensorBoard configuration. Only 'path' field required."}, + ) @dataclass @@ -498,7 +461,9 @@ class ClusterSpecConfig: @dataclass class DatasetConfig: - type: str = field(default="", metadata={"help": "Type of implemented dataset"}) + type: Optional[str] = field( + default=None, metadata={"help": "Type of implemented dataset"} + ) batch_size: int = field( default=1, metadata={"help": "Batch size of the dataloader"} ) @@ -517,51 +482,6 @@ class DatasetConfig: drop_last: bool = field(default=True) -@dataclass -class TrainerConfig: - experiment_name: str = field( - default=MISSING, - metadata={"help": "Name of the experiment (no '_' or '/'). Required."}, - ) - trial_name: str = field( - default=MISSING, - metadata={"help": "Name of the trial (no '-' or '/'). Required."}, - ) - fileroot: str = field( - default=get_user_tmp(), - metadata={ - "help": "Root for logs and checkpoints. Should be available to all nodes." - }, - ) - wandb: WandBConfig = field( - default_factory=WandBConfig, - metadata={"help": "Weights & Biases configuration."}, - ) - swanlab: SwanlabConfig = field( - default_factory=SwanlabConfig, - metadata={"help": "SwanLab configuration."}, - ) - tensorboard: TensorBoardConfig = field( - default_factory=TensorBoardConfig, - metadata={"help": "TensorBoard configuration. Only 'path' field required."}, - ) - allocation_mode: str = field( - default="", - metadata={ - "help": "GPU parallel strategy allocation mode. " - "Options: manual/heuristic or pattern-based." - }, - ) - seed: int = field(default=1, metadata={"help": "Random seed for reproducibility."}) - exp_ctrl: ExperimentSaveEvalControl = field( - default_factory=ExperimentSaveEvalControl, - metadata={"help": "Experiment save/evaluation control configuration."}, - ) - - tokenizer_path: str = field(default="") - mb_spec: MicroBatchSpec = field(default_factory=MicroBatchSpec) - - @dataclass class BaseExperimentConfig: # NOTE: we need this unified config class because different experiments @@ -585,14 +505,45 @@ class BaseExperimentConfig: n_gpus_per_node: int = field( default=8, metadata={"help": "Number of GPUs per node for this experiment."} ) + allocation_mode: str = field( + default="", + metadata={ + "help": "GPU parallel strategy allocation mode. " + "Options: manual/heuristic or pattern-based." + }, + ) + seed: int = field(default=1, metadata={"help": "Random seed for reproducibility."}) + total_train_epochs: int = field( + default=1, metadata={"help": "Total number of epochs to train the model."} + ) + total_train_steps: Optional[int] = field( + default=None, + metadata={ + "help": "Terminate training after this number of steps. " + "For benchmarking purposes only. None indicates normal training." + }, + ) + total_train_n_seqs: Optional[int] = field( + default=None, + metadata={ + "help": "Terminate training after consuming this number of samples. " + "For benchmarking purposes only. None indicates normal training." + }, + ) + tokenizer_path: str = field(default="") + train_dataset: DatasetConfig = field(default_factory=DatasetConfig) valid_dataset: DatasetConfig = field(default_factory=DatasetConfig) + saver: SaverConfig = field(default_factory=SaverConfig) + checkpointer: SaverConfig = field(default_factory=SaverConfig) + evaluator: EvaluatorConfig = field(default_factory=EvaluatorConfig) + stats_logger: StatsLoggerConfig = field(default_factory=StatsLoggerConfig) + @dataclass class SFTConfig(BaseExperimentConfig): model: TrainEngineConfig = field(default_factory=TrainEngineConfig) - trainer: TrainerConfig = field(default_factory=TrainerConfig) def load_expr_config(argv: List[str], config_cls) -> Tuple[BaseExperimentConfig, str]: diff --git a/arealite/api/engine_api.py b/arealite/api/engine_api.py index 919bc97e5..26e124aea 100644 --- a/arealite/api/engine_api.py +++ b/arealite/api/engine_api.py @@ -6,7 +6,6 @@ import torch from tensordict import TensorDict -from arealite.api.cli_args import MicroBatchSpec from arealite.api.io_struct import ( FinetuneSpec, LLMRequest, @@ -79,7 +78,6 @@ def step_lr_scheduler(self): def train_batch( self, input_: Dict, - mb_spec: MicroBatchSpec, loss_fn: Callable[[torch.Tensor, Dict], torch.Tensor], loss_weight_fn: Callable[[Dict], float], ) -> Dict[str, float]: @@ -90,7 +88,6 @@ def train_batch( def eval_batch( self, input_: Dict, - mb_spec: MicroBatchSpec, loss_fn: Callable[[torch.Tensor, Dict], torch.Tensor], loss_weight_fn: Callable[[Dict], float], ) -> torch.Tensor | None: @@ -101,7 +98,6 @@ def eval_batch( def forward( self, input_: Dict, - mb_spec: MicroBatchSpec, output_seqlens: List[List[int]] | None = None, post_hook: Callable[[torch.Tensor, Dict], Any] | None = None, aggregate_fn: Callable[[List[Any]], Any] = torch.cat, diff --git a/arealite/api/trainer_api.py b/arealite/api/trainer_api.py deleted file mode 100644 index 40bddaac6..000000000 --- a/arealite/api/trainer_api.py +++ /dev/null @@ -1,130 +0,0 @@ -import getpass -import os -from typing import Dict - -import torch.distributed as dist -import wandb -from tensorboardX import SummaryWriter -from torchdata.stateful_dataloader import StatefulDataLoader - -from arealite.api.cli_args import TrainerConfig -from arealite.api.engine_api import InferenceEngine, TrainEngine -from realhf.api.core.data_api import load_hf_tokenizer -from realhf.base import logging, timeutil - - -class Trainer: - def __init__( - self, - config: TrainerConfig, - train_dataloader: StatefulDataLoader, - valid_dataloader: StatefulDataLoader, - engine: TrainEngine, - inf_engine: InferenceEngine | None = None, - ): - self.config = config - - self.train_dataloader = train_dataloader - self.valid_dataloader = valid_dataloader - - self.engine = engine - self.inf_engine = inf_engine - - self.tokenizer = load_hf_tokenizer(config.tokenizer_path) - - self.save_ctl = timeutil.EpochStepTimeFreqCtl( - freq_epoch=config.exp_ctrl.save_freq_epochs, - freq_step=config.exp_ctrl.save_freq_steps, - freq_sec=config.exp_ctrl.save_freq_secs, - ) - self.eval_ctl = timeutil.EpochStepTimeFreqCtl( - freq_epoch=config.exp_ctrl.eval_freq_epochs, - freq_step=config.exp_ctrl.eval_freq_steps, - freq_sec=config.exp_ctrl.eval_freq_steps, - ) - self.logger = logging.getLogger(self.__class__.__name__) - self.init_stats_logging() - - def init_stats_logging(self): - """ - Initialize wandb and/or tensorboard according to config. - If torch.distributed is initialized - - Return: - tensorboard SummaryWriter if self.config.tensorboard.path is not None - """ - if dist.is_initialized() and dist.get_rank() != 0: - return - - # wandb init, connect to remote wandb host - if self.config.wandb.mode != "disabled": - wandb.login() - wandb.init( - mode=self.config.wandb.mode, - entity=self.config.wandb.entity, - project=self.config.wandb.project or self.config.experiment_name, - name=self.config.wandb.name or self.config.trial_name, - job_type=self.config.wandb.job_type, - group=self.config.wandb.group - or f"{self.config.experiment_name}_{self.config.trial_name}", - notes=self.config.wandb.notes, - tags=self.config.wandb.tags, - config=self.config.wandb.config, - dir=Trainer.get_log_path(self.config), - force=True, - id=f"{self.config.experiment_name}_{self.config.trial_name}_train", - resume="allow", - settings=wandb.Settings(start_method="fork"), - ) - # tensorboard logging - self.summary_writer = None - if self.config.tensorboard.path is not None: - self.summary_writer = SummaryWriter(log_dir=self.config.tensorboard.path) - - def log_wandb_tensorboard(self, step: int, data: Dict): - if dist.is_initialized() and dist.get_rank() != 0: - return - - wandb.log(data, step=step) - if self.summary_writer is not None: - for key, val in data.items(): - self.summary_writer.add_scalar(f"{key}", val, step) - - def close_wandb_tensorboard(self): - if dist.is_initialized() and dist.get_rank() != 0: - return - - wandb.finish() - if self.summary_writer is not None: - self.summary_writer.close() - - @staticmethod - def get_save_checkpoint_path( - config: TrainerConfig, - epoch: int, - step: int, - globalstep: int, - name: str = "default", - ): - path = os.path.join( - f"{config.fileroot}/checkpoints/{getpass.getuser()}/{config.experiment_name}/{config.trial_name}", - name, - f"epoch{epoch}epochstep{step}globalstep{globalstep}", - ) - os.makedirs(path, exist_ok=True) - return path - - @staticmethod - def get_log_path(config: TrainerConfig): - path = f"{config.fileroot}/logs/{getpass.getuser()}/{config.experiment_name}/{config.trial_name}" - os.makedirs(path, exist_ok=True) - return path - - def log(self, msg: str, level="info"): - if dist.is_initialized() and dist.get_rank() > 0: - return - log_fn = getattr(self.logger, level, "info") - return log_fn(msg) - - def train(self): - raise NotImplementedError() diff --git a/arealite/engine/fsdp_engine.py b/arealite/engine/fsdp_engine.py index e45a871f3..8537e02b5 100644 --- a/arealite/engine/fsdp_engine.py +++ b/arealite/engine/fsdp_engine.py @@ -21,7 +21,6 @@ from arealite.api.cli_args import TrainEngineConfig from arealite.api.engine_api import ( FinetuneSpec, - MicroBatchSpec, SaveLoadMeta, TrainEngine, WeightUpdateMeta, @@ -319,15 +318,15 @@ def step_lr_scheduler(self): assert self.lr_scheduler is not None self.lr_scheduler.step() - def _prepare_mb_list( - self, input_: TensorDict, mb_spec: MicroBatchSpec - ) -> MicroBatchList: + def _prepare_mb_list(self, input_: TensorDict) -> MicroBatchList: assert "attention_mask" in input_ and "input_ids" in input_ + if isinstance(input_, dict): + input_ = TensorDict(input_, batch_size=[input_["input_ids"].shape[0]]) input_ = amend_position_ids(input_) packed_input = pack_tensor_dict(input_) mb_list = split_packed_tensor_dict_into_mb_list( packed_input, - mb_spec, + self.config.mb_spec, ) mb_list = pad_mb_list(mb_list, pad_value=0.0) # NOTE: We unsqueeze here because huggingface transformer models requires @@ -338,7 +337,6 @@ def _prepare_mb_list( def train_batch( self, input_: TensorDict, - mb_spec: MicroBatchSpec, loss_fn: Callable[[torch.Tensor, Dict], torch.Tensor], loss_weight_fn: Callable[[Dict], float], ) -> Dict[str, float]: @@ -349,7 +347,7 @@ def train_batch( assert self.lr_scheduler is not None self.optimizer.zero_grad() - mb_list = self._prepare_mb_list(input_, mb_spec) + mb_list = self._prepare_mb_list(input_) total_loss_weight = torch.tensor( sum([loss_weight_fn(mb) for mb in mb_list.mbs]), dtype=torch.float32 @@ -398,12 +396,12 @@ def train_batch( def eval_batch( self, input_: TensorDict, - mb_spec: MicroBatchSpec, loss_fn: Callable[[torch.Tensor, Dict], torch.Tensor], loss_weight_fn: Callable[[Dict], float], ) -> torch.Tensor | None: """Evaluate on a batch.""" - mb_list = self._prepare_mb_list(input_, mb_spec) + input_ = input_.to(self.device) + mb_list = self._prepare_mb_list(input_) total_loss_weight = torch.tensor( sum([loss_weight_fn(mb) for mb in mb_list.mbs]), dtype=torch.float32 ) @@ -431,14 +429,14 @@ def eval_batch( def forward( self, input_: TensorDict, - mb_spec: MicroBatchSpec, output_seqlens: List[int] | None = None, post_hook: Callable[[torch.Tensor, Dict], Any] | None = None, aggregate_fn: Callable[[List[Any]], Any] = torch.cat, ) -> Any | None: """Forward pass with optional post-processing.""" + input_ = input_.to(self.device) cu_seqlens = pack_tensor_dict(input_)["cu_seqlens"] - mb_list = self._prepare_mb_list(input_, mb_spec) + mb_list = self._prepare_mb_list(input_) if output_seqlens is None: output_seqlens = (cu_seqlens[1:] - cu_seqlens[:-1]).cpu().numpy().tolist() diff --git a/arealite/engine/sft/lm_engine.py b/arealite/engine/sft/lm_engine.py new file mode 100644 index 000000000..1b1715da2 --- /dev/null +++ b/arealite/engine/sft/lm_engine.py @@ -0,0 +1,94 @@ +from typing import Dict + +import torch +import torch.utils.data +from tensordict import TensorDict + +from arealite.api.cli_args import TrainEngineConfig +from arealite.api.engine_api import TrainEngine +from arealite.engine.fsdp_engine import FSDPEngine +from arealite.utils.functional import gather_logprobs +from realhf.base import stats_tracker + + +class LMEngine: + def __init__(self, engine: TrainEngine): + self.engine = engine + + def train_lm(self, data: TensorDict): + self.engine.train() + return self.engine.train_batch( + input_=data, + loss_fn=compute_packed_sft_loss, + loss_weight_fn=lambda x: x["prompt_mask"].logical_not().count_nonzero(), + ) + + def evaluate_lm(self, data): + self.engine.eval() + self.engine.eval_batch( + input_=data, + loss_fn=compute_packed_sft_loss, + loss_weight_fn=lambda x: x["prompt_mask"].logical_not().count_nonzero(), + ) + + +class FSDPLMEngine(FSDPEngine): + def __init__(self, config: TrainEngineConfig): + super().__init__(config) + self.lm_engine = LMEngine(self) + + def train_lm(self, data): + return self.lm_engine.train_lm(data) + + def evaluate_lm(self, data): + return self.lm_engine.evaluate_lm(data) + + +def compute_packed_sft_loss( + logits: torch.Tensor, input_: Dict[str, torch.Tensor] +) -> torch.Tensor: + packed_input_ids: torch.Tensor = input_["input_ids"] + cu_seqlens: torch.Tensor = input_["cu_seqlens"] + prompt_mask = input_["prompt_mask"].bool() + + logprobs = gather_logprobs(logits, torch.roll(packed_input_ids, shifts=-1, dims=-1)) + prompt_mask = torch.roll(prompt_mask, shifts=-1, dims=-1) + logprobs = torch.where(prompt_mask, 0, logprobs) + + loss = -logprobs.sum() / prompt_mask.logical_not().count_nonzero() + + with torch.no_grad(): + seqlogp = torch.zeros( + cu_seqlens.shape[0] - 1, device=logits.device, dtype=torch.float64 + ) + for i in range(cu_seqlens.shape[0] - 1): + m = prompt_mask[cu_seqlens[i] - i : cu_seqlens[i + 1] - i - 1] + logp = logprobs[cu_seqlens[i] - i : cu_seqlens[i + 1] - i - 1] + assert cu_seqlens[i + 1] - i - 1 <= logprobs.shape[0], ( + cu_seqlens, + logprobs.shape, + ) + seqlogp[i] = torch.where(m, 0.0, logp.detach()).sum() / ( + m.numel() - m.count_nonzero() + ) + + ## Loggin stats + stats_tracker.denominator( + n_seqs=torch.ones( + cu_seqlens.shape[0] - 1, dtype=torch.bool, device=logprobs.device + ), + n_tokens=torch.ones(logits.shape[0], dtype=torch.bool, device=logits.device), + n_valid_tokens=prompt_mask.logical_not(), + prompt_tokens=prompt_mask, + ) + stats_tracker.stat(ppl=(-seqlogp).exp().float(), denominator="n_seqs") + stats_tracker.stat(loss=-logprobs.detach(), denominator="n_valid_tokens") + vocab_min_logits = logits.detach().min(-1).values.float() + vocab_max_logits = logits.detach().max(-1).values.float() + stats_tracker.stat( + vocab_min_logits=vocab_min_logits, + vocab_max_logits=vocab_max_logits, + denominator="n_tokens", + ) + + return loss diff --git a/arealite/tests/test_fsdp_engine.py b/arealite/tests/test_fsdp_engine.py index 4237be76e..d295408d3 100644 --- a/arealite/tests/test_fsdp_engine.py +++ b/arealite/tests/test_fsdp_engine.py @@ -81,22 +81,10 @@ def engine(): @torch.no_grad() def test_forward_microbatch(engine, mock_input): engine.eval() - x2 = ( - engine.forward( - input_=mock_input, - mb_spec=MicroBatchSpec(n_mbs=2, max_tokens_per_mb=100), - ) - .squeeze(0) - .mean(-1) - ) - x1 = ( - engine.forward( - input_=mock_input, - mb_spec=MicroBatchSpec(n_mbs=1, max_tokens_per_mb=100), - ) - .squeeze(0) - .mean(-1) - ) + engine.config.mb_spec = MicroBatchSpec(n_mbs=2, max_tokens_per_mb=100) + x2 = engine.forward(input_=mock_input).squeeze(0).mean(-1) + engine.config.mb_spec = MicroBatchSpec(n_mbs=1, max_tokens_per_mb=100) + x1 = engine.forward(input_=mock_input).squeeze(0).mean(-1) input_ids = mock_input["input_ids"] assert x1.shape[:1] == input_ids.shape[:1] assert x2.shape[:1] == input_ids.shape[:1] @@ -106,9 +94,9 @@ def test_forward_microbatch(engine, mock_input): @torch.no_grad() def test_eval_batch(engine, mock_input): engine.eval() + engine.config.mb_spec = MicroBatchSpec(n_mbs=2, max_tokens_per_mb=100) eval_result = engine.eval_batch( input_=mock_input, - mb_spec=MicroBatchSpec(n_mbs=2, max_tokens_per_mb=100), loss_fn=mock_loss_fn, loss_weight_fn=lambda x: x["cu_seqlens"][-1], ) @@ -120,9 +108,9 @@ def test_eval_batch(engine, mock_input): def test_train_batch(engine, mock_input): engine.train() + engine.config.mb_spec = MicroBatchSpec(n_mbs=2, max_tokens_per_mb=100) train_result = engine.train_batch( input_=mock_input, - mb_spec=MicroBatchSpec(n_mbs=2, max_tokens_per_mb=100), loss_fn=mock_loss_fn, loss_weight_fn=lambda x: x["cu_seqlens"][-1], ) @@ -144,18 +132,13 @@ def test_hf_save_load_weights(tmp_path_factory, engine, mock_input): base_model_path=None, ) - old = engine.forward( - input_=mock_input, - mb_spec=MicroBatchSpec(n_mbs=1, max_tokens_per_mb=100), - ) + engine.config.mb_spec = MicroBatchSpec(n_mbs=1, max_tokens_per_mb=100) + old = engine.forward(input_=mock_input) engine.save(save_load_meta) for name, param in engine.model.named_parameters(): param.zero_() engine.load(save_load_meta) - new = engine.forward( - input_=mock_input, - mb_spec=MicroBatchSpec(n_mbs=1, max_tokens_per_mb=100), - ) + new = engine.forward(input_=mock_input) assert torch.allclose(old, new) diff --git a/arealite/trainer/sft.py b/arealite/trainer/sft.py deleted file mode 100644 index 4ae7cc70d..000000000 --- a/arealite/trainer/sft.py +++ /dev/null @@ -1,156 +0,0 @@ -import time -from typing import Dict - -import torch -import torch.distributed as dist -import torch.utils.data - -from arealite.api.io_struct import SaveLoadMeta -from arealite.api.trainer_api import Trainer -from arealite.utils.functional import gather_logprobs -from arealite.utils.logging import record_timing -from realhf.api.core.data_api import tabulate_stats -from realhf.base import logging, stats_tracker - - -def compute_packed_sft_loss( - logits: torch.Tensor, - input_: Dict[str, torch.Tensor], -) -> torch.Tensor: - packed_input_ids: torch.Tensor = input_["input_ids"] - cu_seqlens: torch.Tensor = input_["cu_seqlens"] - prompt_mask = input_["prompt_mask"].bool() - logits = logits.float() - - logprobs = gather_logprobs(logits, torch.roll(packed_input_ids, shifts=-1, dims=-1)) - prompt_mask = torch.roll(prompt_mask, shifts=-1, dims=-1) - logprobs = torch.where(prompt_mask, 0, logprobs) - - loss = -logprobs.sum() / prompt_mask.logical_not().count_nonzero() - - with torch.no_grad(): - seqlogp = torch.zeros( - cu_seqlens.shape[0] - 1, device=logits.device, dtype=torch.float64 - ) - for i in range(cu_seqlens.shape[0] - 1): - m = prompt_mask[cu_seqlens[i] - i : cu_seqlens[i + 1] - i - 1] - logp = logprobs[cu_seqlens[i] - i : cu_seqlens[i + 1] - i - 1] - assert cu_seqlens[i + 1] - i - 1 <= logprobs.shape[0], ( - cu_seqlens, - logprobs.shape, - ) - seqlogp[i] = torch.where(m, 0.0, logp.detach()).sum() / ( - m.numel() - m.count_nonzero() - ) - - ## Loggin stats - stats_tracker.denominator( - n_seqs=torch.ones( - cu_seqlens.shape[0] - 1, dtype=torch.bool, device=logprobs.device - ), - n_tokens=torch.ones(logits.shape[0], dtype=torch.bool, device=logits.device), - n_valid_tokens=prompt_mask.logical_not(), - prompt_tokens=prompt_mask, - ) - stats_tracker.stat(ppl=(-seqlogp).exp().float(), denominator="n_seqs") - stats_tracker.stat(loss=-logprobs.detach(), denominator="n_valid_tokens") - vocab_min_logits = logits.detach().min(-1).values.float() - vocab_max_logits = logits.detach().max(-1).values.float() - stats_tracker.stat( - vocab_min_logits=vocab_min_logits, - vocab_max_logits=vocab_max_logits, - denominator="n_tokens", - ) - - return loss - - -class SFTTrainer(Trainer): - - def train(self): - total_epochs = self.config.exp_ctrl.total_train_epochs - steps_per_epoch = len(self.train_dataloader) - - self.log(f"total_epochs={total_epochs} step_per_epoch={steps_per_epoch}") - global_step = 0 - start_time = time.monotonic() - for epoch in range(total_epochs): - for step, data in enumerate(self.train_dataloader): - self.engine.train() - timing_stats = {} - with record_timing("timeperf/train_step", timing_stats): - with stats_tracker.scope("sft"): - stats = self.engine.train_batch( - input_=data, - loss_fn=compute_packed_sft_loss, - loss_weight_fn=lambda x: x["prompt_mask"] - .logical_not() - .count_nonzero(), - mb_spec=self.config.mb_spec, - ) - self.engine.step_lr_scheduler() - stats_tracker.scalar(**stats) - - if self.save_ctl.check( - epochs=int(step == steps_per_epoch - 1), steps=1 - ): - self.log("Saving model ...") - - with record_timing("timeperf/save", timing_stats): - save_path = self.get_save_checkpoint_path( - self.config, epoch, step, global_step - ) - meta = SaveLoadMeta( - path=save_path, - weight_format="hf", - with_optim=False, - tokenizer=self.tokenizer, - base_model_path=self.config.tokenizer_path, - ) - self.engine.save(meta) - - if self.eval_ctl.check( - epochs=int(step == steps_per_epoch - 1), steps=1 - ): - if dist.get_rank() == 0: - self.log("Running evaluation ...") - with record_timing("timeperf/eval", timing_stats): - self.evaluate() - - stats = stats_tracker.export() - stats.update(timing_stats) - self.log_wandb_tensorboard(global_step, stats) - - self.log( - f"Epoch {epoch+1}/{total_epochs} " - f"Step {step+1}/{steps_per_epoch} " - f"Train step {global_step + 1}/{total_epochs * steps_per_epoch} done." - ) - self.log( - f"Detailed time stats: \n{tabulate_stats(timing_stats, floatfmt='.2f')}" - ) - self.log(f"SFT training stats:\n{tabulate_stats(stats)}") - global_step += 1 - - self.log( - f"Training completes! Total time elapsed {time.monotonic() - start_time:.2f}." - ) - - self.close_wandb_tensorboard() - - def evaluate(self): - if self.valid_dataloader is None: - return - self.engine.eval() - for data in self.valid_dataloader: - with stats_tracker.scope("sft-eval"): - # No need to log anything. Logging will be handled outside - # via stats_tracker.export(). - self.engine.eval_batch( - input_=data, - loss_fn=compute_packed_sft_loss, - loss_weight_fn=lambda x: x["prompt_mask"] - .logical_not() - .count_nonzero(), - mb_spec=self.config.mb_spec, - ) diff --git a/arealite/utils/data.py b/arealite/utils/data.py index d6c073606..0b322ad4b 100644 --- a/arealite/utils/data.py +++ b/arealite/utils/data.py @@ -277,7 +277,7 @@ def pad_and_stack_tensors_along_first_dim(tensor_list: List[torch.Tensor]): @dataclass class MicroBatchList: - data: Dict[str, Any] + data: TensorDict mb_spec: MicroBatchSpec mbs: List[TensorDict] forward_indices: List[int] @@ -379,7 +379,7 @@ def pad_packed_tensor_dict( data: TensorDict, pad_to_length: int, pad_value: float = 0.0, -) -> Tuple[Dict[str, Any], int]: +) -> Tuple[TensorDict, int]: """Pad a packed tensor dict to a specified length. This function assumes that the input data contains "cu_seqlens" and "max_seqlen" key, and all other tensors of shape [total_length, ] will be padded to `pad_to_length`. @@ -391,7 +391,7 @@ def pad_packed_tensor_dict( pad_to_length (int): The length to pad the tensors to. All tensors Returns: - Dict[str, torch.Tensor]: Dictionary with padded tensors and modified "cu_seqlens" and + TensorDict: Dictionary with padded tensors and modified "cu_seqlens" and "max_seqlen". int: The pad length. """ @@ -420,7 +420,7 @@ def pad_packed_tensor_dict( padded_data[key] = padded_tensor else: padded_data[key] = value - return padded_data, pad_length + return TensorDict(padded_data, batch_size=data.batch_size), pad_length def pad_mb_list( @@ -453,13 +453,17 @@ def unsqueeze_packed_tensor_dict(data: TensorDict) -> TensorDict: assert "max_seqlen" in data, "Input data must contain 'max_seqlen' key." total_length = data["cu_seqlens"][-1].item() + new_data = {} for key, value in data.items(): - if key == "cu_seqlens" or key == "max_seqlen": - continue + if ( + key not in ["cu_seqlens", "max_seqlen"] + and torch.is_tensor(value) + and value.numel() == total_length + ): + new_data[key] = value.unsqueeze(dim=0) else: - if torch.is_tensor(value) and value.numel() == total_length: - data[key] = value.unsqueeze(dim=0) - return data + new_data[key] = value + return TensorDict(new_data, batch_size=data.batch_size) def unsqueeze_mb_list( @@ -472,7 +476,6 @@ def unsqueeze_mb_list( new_mbs.append(unsqueeze_packed_tensor_dict(mb)) if mb_list.padded_mbs is not None: new_padded_mbs.append(unsqueeze_packed_tensor_dict(mb_list.padded_mbs[i])) - mb_list.mbs = new_mbs mb_list.padded_mbs = new_padded_mbs if mb_list.padded_mbs is not None else None return mb_list diff --git a/arealite/utils/evaluator.py b/arealite/utils/evaluator.py new file mode 100644 index 000000000..9d559ea63 --- /dev/null +++ b/arealite/utils/evaluator.py @@ -0,0 +1,36 @@ +from typing import TYPE_CHECKING, Any, Callable + +from arealite.api.cli_args import EvaluatorConfig +from arealite.api.io_struct import FinetuneSpec +from realhf.base import timeutil + +if TYPE_CHECKING: + from tensordict import TensorDict + from torchdata.stateful_dataloader import StatefulDataLoader + + +class Evaluator: + + def __init__(self, config: EvaluatorConfig, ft_spec: FinetuneSpec): + self.config = config + self.ft_sepc = ft_spec + self.freq_ctl = timeutil.EpochStepTimeFreqCtl( + freq_epoch=config.freq_epochs, + freq_step=config.freq_steps, + freq_sec=config.freq_secs, + ) + + def evaluate( + self, + valid_dataloader: "StatefulDataLoader", + evaluate_fn: Callable[["TensorDict"], Any], + epoch: int, + step: int, + global_step: int, + ): + if not self.freq_ctl.check( + epochs=int(step == self.ft_sepc.steps_per_epoch - 1), steps=1 + ): + return + for data in valid_dataloader: + evaluate_fn(data) diff --git a/arealite/utils/fs.py b/arealite/utils/fs.py new file mode 100644 index 000000000..f79040035 --- /dev/null +++ b/arealite/utils/fs.py @@ -0,0 +1,9 @@ +import getpass +import os + + +def get_user_tmp(): + user = getpass.getuser() + user_tmp = os.path.join("/home", user, ".cache", "realhf") + os.makedirs(user_tmp, exist_ok=True) + return user_tmp diff --git a/arealite/utils/logging.py b/arealite/utils/logging.py deleted file mode 100644 index 6bbc08b83..000000000 --- a/arealite/utils/logging.py +++ /dev/null @@ -1,9 +0,0 @@ -import time -from contextlib import contextmanager - - -@contextmanager -def record_timing(name, timing_stats): - start_time = time.perf_counter() - yield - timing_stats[name] = time.perf_counter() - start_time diff --git a/arealite/utils/saver.py b/arealite/utils/saver.py new file mode 100644 index 000000000..2692f0df8 --- /dev/null +++ b/arealite/utils/saver.py @@ -0,0 +1,68 @@ +import getpass +import os + +from transformers import PreTrainedTokenizerFast + +from arealite.api.cli_args import SaverConfig +from arealite.api.engine_api import TrainEngine +from arealite.api.io_struct import FinetuneSpec, SaveLoadMeta +from realhf.base import timeutil + + +class Saver: + + def __init__(self, config: SaverConfig, ft_spec: FinetuneSpec, for_recover: bool): + self.config = config + self.ft_sepc = ft_spec + self.for_recover = for_recover + self.freq_ctl = timeutil.EpochStepTimeFreqCtl( + freq_epoch=config.freq_epochs, + freq_step=config.freq_steps, + freq_sec=config.freq_secs, + ) + + @staticmethod + def get_save_checkpoint_path( + config: SaverConfig, + epoch: int, + step: int, + globalstep: int, + name: str = "default", + ): + path = os.path.join( + f"{config.fileroot}/checkpoints/{getpass.getuser()}/{config.experiment_name}/{config.trial_name}", + name, + f"epoch{epoch}epochstep{step}globalstep{globalstep}", + ) + os.makedirs(path, exist_ok=True) + return path + + def save( + self, + engine: TrainEngine, + epoch: int, + step: int, + global_step: int, + name: str = "default", + tokenizer: PreTrainedTokenizerFast | None = None, + base_model_path: str | None = None, + ): + if not self.freq_ctl.check( + epochs=int(step == self.ft_sepc.steps_per_epoch - 1), steps=1 + ): + return + path = self.get_save_checkpoint_path(epoch, step, global_step, name) + weight_format = "hf" + with_optim = False + if self.for_recover: + weight_format = "dcp" + with_optim = True + + meta = SaveLoadMeta( + path=path, + weight_format=weight_format, + with_optim=with_optim, + tokenizer=tokenizer, + base_model_path=base_model_path, + ) + engine.save(meta) diff --git a/arealite/utils/stats_logger.py b/arealite/utils/stats_logger.py new file mode 100644 index 000000000..62cc0ba9e --- /dev/null +++ b/arealite/utils/stats_logger.py @@ -0,0 +1,111 @@ +import getpass +import os +import time +from typing import Dict + +import torch.distributed as dist +import wandb +from tensorboardX import SummaryWriter + +from arealite.api.cli_args import StatsLoggerConfig +from arealite.api.io_struct import FinetuneSpec +from realhf.api.core.data_api import tabulate_stats +from realhf.base import logging + + +class StatsLogger: + + def __init__(self, config: StatsLoggerConfig, ft_spec: FinetuneSpec): + self.logger = logging.getLogger("StatsLogger", "system") + self.config = config + self.ft_spec = ft_spec + self.init() + + def init(self): + if dist.is_initialized() and dist.get_rank() != 0: + return + + self.start_time = time.perf_counter() + # wandb init, connect to remote wandb host + if self.config.wandb.mode != "disabled": + wandb.login() + wandb.init( + mode=self.config.wandb.mode, + entity=self.config.wandb.entity, + project=self.config.wandb.project or self.config.experiment_name, + name=self.config.wandb.name or self.config.trial_name, + job_type=self.config.wandb.job_type, + group=self.config.wandb.group + or f"{self.config.experiment_name}_{self.config.trial_name}", + notes=self.config.wandb.notes, + tags=self.config.wandb.tags, + config=self.config.wandb.config, + dir=self.get_log_path(self.config), + force=True, + id=f"{self.config.experiment_name}_{self.config.trial_name}_train", + resume="allow", + settings=wandb.Settings(start_method="fork"), + ) + # tensorboard logging + self.summary_writer = None + if self.config.tensorboard.path is not None: + self.summary_writer = SummaryWriter(log_dir=self.config.tensorboard.path) + + def close(self): + if dist.is_initialized() and dist.get_rank() != 0: + return + self.info( + f"Training completes! Total time elapsed {time.monotonic() - self.start_time:.2f}." + ) + wandb.finish() + if self.summary_writer is not None: + self.summary_writer.close() + + def commit(self, epoch: int, step: int, global_step: int, data: Dict): + if dist.is_initialized() and dist.get_rank() != 0: + return + self.info( + f"Epoch {epoch+1}/{self.ft_spec.total_train_epochs} " + f"Step {step+1}/{self.ft_spec.steps_per_epoch} " + f"Train step {global_step + 1}/{self.ft_spec.total_train_steps} done." + ) + self.info("Stats:") + self.print_stats(data) + wandb.log(data, step=global_step) + if self.summary_writer is not None: + for key, val in data.items(): + self.summary_writer.add_scalar(f"{key}", val, global_step) + + def print_stats(self, stats: Dict[str, float]): + self.info("\n" + tabulate_stats(stats)) + + @staticmethod + def get_log_path(config: StatsLoggerConfig): + path = f"{config.fileroot}/logs/{getpass.getuser()}/{config.experiment_name}/{config.trial_name}" + os.makedirs(path, exist_ok=True) + return path + + def info(self, msg: str, *args, **kwargs): + if dist.is_initialized() and dist.get_rank() > 0: + return + self.logger.info(msg, *args, **kwargs) + + def debug(self, msg: str, *args, **kwargs): + if dist.is_initialized() and dist.get_rank() > 0: + return + self.logger.debug(msg, *args, **kwargs) + + def critical(self, msg: str, *args, **kwargs): + if dist.is_initialized() and dist.get_rank() > 0: + return + self.logger.critical(msg, *args, **kwargs) + + def warning(self, msg: str, *args, **kwargs): + if dist.is_initialized() and dist.get_rank() > 0: + return + self.logger.warning(msg, *args, **kwargs) + + def error(self, msg: str, *args, **kwargs): + if dist.is_initialized() and dist.get_rank() > 0: + return + self.logger.error(msg, *args, **kwargs) diff --git a/assets/wechat_qrcode.png b/assets/wechat_qrcode.png index bde130ade779d00e37c2ce18869a01ff72cd6d2b..1f867badc73046805479fe1a2b44c03d619f4fe7 100644 GIT binary patch literal 2772 zcmb_edr(qo6u(rPt`%raT85bAwo{p`p-G`&ji!uSwzaEkIbyS$ni_(dJcS-pjG1EY z^q|LL>-N};(j6315gJO`vM?)40}T*$#}Y6LOu2Vqru}38@-nl3d~;^*eD};b=l3{Y z`Ht<03w#27003A(PKw_N0Eh|q@t)=S5-&~CyTX^Wr1W9{K;Cm7#ObxjWdPuFoE*RT zd*;P=y1P}Tt-fE}Jrw~#k4zT|^JR@!B-v|>VLL(-zGAX(i^XCrup8_nB7mWCLR__c z_MHDablDw#l-n;I5zBZy9+JdjqO>Ud7>J9B1m#uGL;I8}8Dp>FpNM80T2A-!UO00& zAB-fFK)*k~BeDH4m2=KtYqeskgqfEp;|@v|eU8MYurxRVtV2lO&@o`M5mt~k`>?3_ zsglTTjO)@}=R`>vB}`O+1X+ijPnyix9(>#fmR!DON=Dvrs^L29kD6VV!rL4F^HhOpxI@F7Pn zLB0+w=sWMr)}#sf7Kgr_-}ko57^9O*5UsZ4%r-#<{N~)UiOpI(4k}brS{gU~J*a=U zu7V>9#$T03;aCs_d~|^jR|8L6ist6S9mCcB*SJLM{MZdjZCF5|`7tcaKht7_Gg_fJ zJJ^9@wg=#ezsjP1U_wjBr~LV?oj>8g6qe@CtMJ9fUN?3isOY#7QiM+mgJ?R!sw>ES z;gjIq_3FI<+HYpHeik%awPx@PN|zDiE{7K)!nHv)T9IgaaeX!1G_@89HoOvDiH!^5 z71=P^Lk3JEa4>hYakXB(&|^9ee;W#d_I}bFxq12tI&<78tog1*P?0kNf7Y3Me%Wcq zf;wepWPAUI!=#5sM2^z#*GzrlBrq(F(+b$~a5L!f?Je2dH*EvXvu$__18Vvi({O)Y zFI#nU%Pd;_%z}4=kNF0DrZ;VqvRYDkqdV)Fp-Bgo==sM@{?KXx##YBHhUKrH5&MK8 z8J(Q{rbXrUU@KUD&xS)Q&(Oiu{7z%%pmFXsk2utaZs99(xf6M#hblVw=3YygKoV>Q zlf#|6nlW#&F7(LojM{CE1E$1`jLn3|_D}{yHTU=q$-8~CI~+%2!Plod;Tv#4A|dJY-kbACvmMpmXwh6Pgm0teDI zO%F~UI~i=9UG^qc_vxjO^h)?+u?IHn!u;ROE8Vk^>KeS*PLM?z@x(YhPt{E|U%o$V zz32hcRJ2!KE&RGXUC+o12SR&5|E}{GH`dUYJ_x@>*MfbpX0UR7YuRu>%|^9tfU0vy zKoy5crDu1;pL}c0T0Kcs`M7S#lGRbr$|y;qmpq@3&ed0)67| WRpj4FEUsrNKu*{me``zDk$(Wyk#Fk& literal 166550 zcmeFZXIN8P*EYHkkPdf(wm5g7>Y=*p@UQb0i^{5M5KxI-g~c+ z-g`@^0YY-te)e}o4E8o+?Tn3S4Qxz=-t2Ggn*QcjFOy^iIS4(?oHO4cmHo6_$Gjsl(>hm zgOGp|xI#-nNK1fk10dji5)=IE0RDXtTp=U^Z)T#6;l49|v9++wdj1dx$4GTmTi;knJrcZXkG z;=ZJmw9KQ&O3Es#YUnyN73BP;f|S*vIg=_|FN6 zNngGuf6L0w`JVeD@8|E*vhs?`s_L4S*0%PJ&aUpBk{z#K-uxO5FfDFUD@C z<0irlO;4I|dpe|Ed7Po1RGYO2FJb+YoY%K^ikxGor(3E5=)+veGiXM#b+Oxn)|k-6 zTHZwhAJ{Giww=zYhEDJKodX+{r=H2ZziR>x zZV8oji(7x%T8J6=jeTn<-fsIFyL>iQQ&$_!;F0!qjQUb$1p2UR+f3&&S19qT`}VEb zQxfTwr97K{aYmg$ft9HKs`T9W^NFKI_oA>uXKf* z3GF_?^H+S~Ipn7%j+9^FAh(608&6RkJOZE}J=!ZY&BntC6x5egJq<5?HNVD>PD^F8mw@h^;Q>z5{mUQ-9#CpG=8y}Y z(g-77u@Pid)_Y2REFWd1J0;1Y@2cP)(R%m}+0FRT!SNn#ud3+zQPDV#?(9CNeys)P zZ3FAbHRV_cb_#gt%J?W{-oFf=|a>v#98$`^_L(aVYJc*mY7!g`&(;<1$*h!@TWf=yU)j? zcpt~L%wKSfEcAc%7j8S2Iv&h|16T9oX6UD)M{SiAGW0@1vpv|R z&kx6t=9%y=>RwzGFD^h}6+w?l+%~TFC}OK30VFMU_{yi{jqHb1e!Ojv9~H5$iwF?9 zWQ&X$m8gU*)pMF^$?_FZ#zBGI<>O+&?-Pd2tJr*?#+;0hBbRErWcsRh-hk*OjQmK1 z^}EYRoA}P4_XQr{HF{ssbe34$B4Pf!JU&Qg854jb=G?I(k&fH z?aU0~d=z+qp$QKR8P$y}L#!zUhI~cvK-ZN~Ttg?$Z@kIfTHEIXPC)ZQpQyn{tlg_v z$*Fa9%~c$G>-+4Cd8-p0ZtXVlEat|R;MB)OcylAqu*11mjn-Rr6pYkF5`jWIArn|3o*UUga6oZHJp!d4i8ulp3&{YcW&Z= zb7B5rOnh$p&#>JbyW04Dw;Q$-ZabiBa@x)R9Uqj86{Ox6Zx}LR2=G8>l%?^aaB^L= zlOoE~&_t*$snp~*O|0Pn&^b1-R&D3xlB1d^b|&_6N4_*%LsNNVfvU7)#BNnGGC)dB zcM%VGN50noGCQcN+_mi7ytH{NVH3%C(jDY2%2P6*Q53p^2PQ}QUHYQRDjWwKBiE|LT$;A-f2jyio;DD`@9!2$601R*m9&&HGEz~YI=;i{OjvY zZfQ2Rb3NTvJ1U#}EiIED-Qpj#)Yj7c@PH)hB%9P}q^kPsxP|U-4~a2vspa+)C<=N> zke_5FFjeCavZ@kk>uY<72QCTnjupMw9Xj4jF81o-f!uO%$#T@i_3ET|I9FGE$qtjg zRg!$PAM>otjG<)mG#L#)A}OZ3c=ElTDE_1WrW%^3sBE)Pr0`%M*t<vGysq4)&V4@x`|qXeuU^ZoGa!S4Jj}o3+&+~-qO}T_IPt)gz9#p( ztG8wpFC9m+5S{DdY34=DGV*%;8%>nH*L$-`^@U#Zq^{W6Ff zj2}9>cp$9O4(1-&*lZ7hpyYIECJ%9sryDOwp}6qyayg%GTSkS$b_&x2I`-FZjlJF< zkd{bzG2?tc%ic$eWnS6T9#UyDdiK0Fu1ot-`uOd-n#5o``ZqhjKGDs@B;DB8my=v9 zrr*?$xHW3>ct&!ciwh4Z=GB>IU=X@hj{`L?-XX6)Icox&`K2b%I-up$L` zfGQt855q}J2Y>rq%+=bTFMjs6zVEAgvfo5Es^d3&wF?i-+u+3KF-qmdX9v*JDR`9U zmX1najV^1gidP0W|Bc}Mn{DEO3}mlNOt^&fDvz}&AJpXqK}<(O-iXjjs8U7c=U1u= zx)vL*%gmc*v^iLwiDf6AleAi1sq41|5>Kql8Qi})5W`Ag$vo|8(_ALT0bO1R`gqt5d=@rc!-wdWV|xAB1Lp=+gY8kD@mDZ7)OOng#j>@dKi@cBa8hu7v}+5{z+ zM@^US7*P6Q-)4H>HLXXgHZGgs`jB4+%xMw=)b_Z~-fQ>n@b9;_`MGy=snl}yT{Oik zI2etFwsLS8l<;J&59d5bOcY@>T3PB59}A0xcTOI0><%!y_rwmd4wp1A_Ph%NizdbbD%RZE_OqsR%XBjHW-c@`b4+PP)%kr3G zRD3V&A@vc_RS>USiD9PKZ=Rye1E<>+54|IJzmx z3b^E750!)b%=szTRpTAmH+bdOvVY|A&E@p9vX~H;?Ag+%%KA5KYn{&Zt`~~$vXfYL zHE_8L$%+2xNTePyQqQuCqo$FDcmLCAclrGjZ_v*B`?wYw&~?5Q1$LOpjbLxNFf?QluN3lo1a>P~M8*ynce-2c3b2d)%LV9Y_}fGa#MAogjL?4K{R?%TICGh46RM9>}iXr1m%oSbe= zb}Z$`T$1Oad1J~NsXJo4h^O<94u-XHeF9zH{M#0A`+ni^*xx(rY-l5MhCQz83crr# z&m8y5+4_=*1`US2O)U#-hRuy1y)V&7bn@-9i+n-cHw0II?pIwjDG2MEm3MFWGlheO4e_aHqD zCb+>Q5DY$!U*-J-4JkhsOZevH>1}|zkmY~mQOgYFN1sm=1uh$FIxlOMG^C`y2iIl0 zekjJc`8*^O4s-=DM~~J@Q_LI{XE^d6Y~s=`%4WR;(sv}6{``Z;du=&oHZUp;*PXP_=7@PldM1SH|y-qM7ln0spM z%wSnfrecm{`MLVH_>$0yhRzIl%fK)MXK1#E2OQy;pY>Vp+v?-Te71I3RcdRqm3`zS zz}GEWY-nP2jQCrel?-nx|9rrAl8 z?b%+t6U@ni^oOv$MY-l^Ux`Tdw}Ta9un@-F5&@7j^qDYnPkwT7rOnvLyZUQzwkh@`DI%E z_e%tHMY9$U{0amkSyg`WWhQh*)&fkbjTf@dtW=x?7DKUqF50L0skI-$yjOt-qMY#n z1qoKQoCkLpvua3(ae9`H^@+QBp|G?1OZH+yZ8Gh?j5Y=46>{fcxg~#>`KfQpdypAH z?G5o&&L$G`_98Wbf)5h$E)A69m`h4H1&KH^{omV@KM!1*IgRJ3jI0LNX#`5UR@ zr8?J^6N-q$C}zvbIVR}yelPZ1tcMD20jGQLJ$~tq9W!o!o(AFAuW3-pPRYHSx5sid zRrt7yV~Dxnx!V`j4r-d?td0!h-U^Ou??o}=G=y`Sm*qgzU_8GVLc0dul8dfun~Ci- zIxu#V^DK=|h&D6+8nU0(n5OdjP|Y{k!sg2{!3Rgob72`@iLd!)^X}fAF4qqF>I5ur zhR-3yJc&8pOz*fPPvl!VPzu1VQ-nZdMfbFyScaD`7*F0tYsP71l8IE3NT0D(QN7zK zOiH=)%5W?Zt7M10JNB&xee#t>(s~ej)Sv&8HtkfNobGecPEx8wm%(#s)!z_D0 z_XqfOi9cnHlr#BKbWb9~8tPu+0U7JoP3N^?!P@~J6coPad}P|N4Z zf63eD>DsS@u_&B;dpO{t(QUUR)j~8wL-GfC`?lntxrmDO@7M=1+VQP!HeT?`{#!{N zKq{>t!6`#MYE00VwuJmcCMn7E?K*)ag>qOC4CeT~o9l5{{^60l*GrD$Ej)15CAbmQ zVpK}VlgF?BC+^{oEUhjV?&8eNmk004k>~p@Igbv;^r3JR!%&i7ft6De^eyy^U9fQn z0>MQk)=kt4oPb^$I|Ali5b|a56iXEP{pT`mf3t+A#s32EgMR_|i11%H=)=b6W$`1^ z4mbsv)8Fa$^zZb$R4^aC(eFeEcb$Q6you+RO+9ToDO{n&-8iZQ1V!j*S?!m#z+w28*P2U<|7_r^J6y#zL&{NYhw;#aMdxZ z<`UYLmK5E!+TrrKh%fQy*Ui-p!Q2%I-4i(|76@#4v-+g+)s#S3jrgNgsBY7h--QZ~ zy9}AFcWxp6T7pRS)UN!#Hh|c#|6g z*qHBLNN$99ijY?g(=kldw(G#;cZJL{9zdEFHw!5h)+wJ9%=5HaIZj7HHqGEfbfz;Z z4zxUJ0secMUc7<6YzhfZR(TRQo-IpS)Kwbxcdw3r?K(|-lU7&5;oy_>e`tC&B*B8z zwMKl68$~lkgHzG?FZ^4q{#Pn-`VFQQXZZPjqeG*T|5YAG8+Vhk}vUI`5->yGJ{=-J(n1ew?C z>UUg(ZP!I0+QW5H6BFkq3@(l@E8oLCzE+&nbDH2MT_dzgHrj}rc}9VjtJV%Gyd zF|ylly%avw^15SPOS&D=+9ig+XjQ{yFylBM5By=AKb(N~!Gj&ug-Uat)PoR;11uQ9^;;yY z%4--4`B2J1ax5{^3Q3~tfCd3j@?sAw;<1z*rK2-1J*jYxVJ`7w>0XPKnwS|$^9Hf= zH^1aG7GdnJdR|fCGu}PGCRhm|i~e9d$LHNr?&1M|8dSCdYuQ}kx>l{v4S4Ve6^dsf z4?Y)Xt|ZMb#C)xSTFn;oQgI}~+4JyVrlX7dx+lr5&7nlaWO)VOPS zb)-4O@l1~hgfGt0Wo@JV?OPpDweMfgP>Uk%q3XN2WbN->*v7EJNw(j9)1l8Xuw?NGlUo7JAM$L^u#v1&H0!y+O1L?TLb_RF=8KG2gZo z1#4)`5q-5)zw47Iva74|?}B?im4AEKJn)YBCUsr%wz3_EHX7taM-xeyV`pqTD+C6i zzno5mnZHD*NaI}gjrIe?ci*VaZ7r>O*DWab$SnUk*M&hsWi<2mLY7eXj{3skITqcu zLbd5`3l7BGo~fSES$CpkpVv~?s!840J}!P;F)iH9^aXveRJvcsYwa6bINm?)VI87p z68nb($}&+>8a+U|5Ik*b@YJ}N;qLc+$7;j3CZV~WhT7^+cK21awkF!rN&Z#B{fl7( zqv_G>zZf?2G7)UFtN97_Uksau8BC8riT>$-@*t`A9Yh@5o`e4Z=|m77Z=L(peaI3k z_NJ~E=z&_#pIvu=Z-WFYcxt;w|2zwKdX!Bx|FRMfq{H*FFNhnlhmf!N&?rb0p!e_J zvUV8pz+s>7n}X%JNUtGchhNipVDQ#ivO+5f_5i$E{OMJyrfD(%juF^^c$A8M?dWp) za+@Ywq$E8V(`JIi#wlWZ?Y!Lm3CmH1Al=j?PpH(h=h9fm7hLrnWt-J<>TVWQwC#AH z?Zw-gsfTO5OKM!(jRK@igBJ-sF)VX2vl5?g{qo&$Z#wrkX^g;{`{XIs*s62JaCtll zv9GfZ)RetmJ!|S-U|icF_h#-@bh?Qx*%2PlH)hcpFBcF`r@7(r6-Xs@=Y}I?&JIpG z&LD1+Y4T$IF!$%}ZoZIuUGHwfPb~L0NPf_t#Zwz{O*=eqV?=dzFNM5$s>=FmB?d!m zI%KSe8}%1Ho-gkVFk&PnlakuVKp*F76FF_{d$6oNv7PacZ5K{hh1(f>Xt3-)il5(0 z9?)ui;?P!acA-36xIVgmSA(Z1DwPi^JR#M3MOOc_304KI{iRt#S(lA)RRuI3nT@qN zlaqk$IyasZ>IYTZ^;fQ~B#&e7d}KGmDxuAL=BL(i*uNgsc$l2aa!UHk`l=*c^;8y| z`$q!{=BYd4S?pda1Fv;mtLQJg5$X6t}T<1s4Wo`CW zT*caf#IctI80;VXu*5_xWyhb^lu;+t@k}LONyNpv^9KXA-LB)r8RJwZ<9$4d>hHnSKv6`Oxt@T=``Y4b>cezuy z8MG?7Kz&zqh+wWxWpT)!>J9S0v0%2{Ud01(NFUXFHVUWy#<%8YY&-uQAP0k2 z{Y6Zn|0bqjpT_Q)moPi=2x2ZXVjO15V&dvkx9%|UYULofgR_*POO7J1!eHGn7 zqkXKlb7F zSf1`tbH0)D6`E_UAstQ!@Fsz7A*_be6^}w@`+2x@XCYY7(|BvR{B-|0!umy*Ap+Z6 zV?p!Kl&N~nFe@7~c1CeYS;mbRjD zM@RY(YPTYRh4)pmC77ieqsu9NM4KCwy3BkBL2;H?^bHofW)%%IEg$q7SD?i-d?$vr zVNRTguR3Ad)44_+6V7QLL`daxBAb^*tuv|oq?iYx32(fGP~&^OdHXC&bN3J0UasD2 ziqoR`uJpkOm-{UxP#)SR@Xkv9bDTgR+P+St%&Y5}0faDtB)w667%eeSm6$YEk;H zGAUFk2t|7F08cS%1!c+i17rU<&*T&HAjE+H^wPWW!04D?;GtM|L|u%;WhVS@wCnvh z+64(=CXf(@UXXyH>MugL==#fV;e5bss66d?XO?!868;pF8H+i2bv-Bkpt8tFDPf zI>qnmiBqsY*NroVDK5TXU-hz{M~0u!P`SI#tB*`j1mEjhILkqGl2l5h{#0U7PO*Z7 z!fClDin2yscpgfV*ak)})!!0m5pu2=#tAmtk?%$!`ZTjJ>U40UQltN$>|oltAE=w@A{xUWu&e|(kmir!lMTM3~ja;crn-RuONoMm-7 z`S%0K`#23Vx%r;nQ*zQ$9$PLPfOni*m7_%;yVT4-K*oR77r)>M{s!Z_H+oj1}A zGGW1N`&zEiSjzF3;G)~;lk~p3`u$zd;D(Oj`GFAlGsHOLunuO)!W>h6V=eDBZcRcu{0&<2+5PEmh_J;D;Q^DWR>yI;j zN2mHJ!}$-ka(rlvKBvUgxpj8^IqCT=nLIB^WG#Qjus(l%|7NtRbFXD}($$BlyUS9A z^4d%6H8yJ+FvG>KNg)Y$2{yOQ9Nb(_E9p$MZ`oMMeu+0)Ty%3{Q&O^j@mRj0dq36g{ecE3-O7y^g@F3VWFP$i%m#m475!i(V8!izY3w&DM4xkxgIqer* zy1I^O%<;C&oinjH{WWK(K4j9=Jfb8QATU?Qgf>?l-^~g&OqRcgp@e?<6Y^+_Y{7?S z?JeOj*5X~#9Hh4Ly~0b;<9_$`;5vHr=7H(E`zv+N?kn%aWuRj$&59l1Hm zbss_oSKf8F!H<6$vA9ZW(sT_Sp@qJG!2_FIj3Y^pPvyVBA4ZOek7zDH)No=ZbZwZhmNqmcnoUYn# zc$2iYrRj8k0;H18Um2-gj8T;N)jJ{C1+tC@JxL*6n4j526pucYSL1prUE{s(*j~w4 zCy7~qfmN?ePAg!HWsIF!eaKB66p6@jd*CK-qhi!PT%G;yL#jk!!HMUqX-!y3q=g!} zG*>a_*Ssvv1=`g->yRhu(8(f{pJ(_TezvSMGb_t3Zv`{MAi?M=xmDHWMv1Y_saI-C z#uOu&jMMH9BVCk9&X6#rci&!ntaqAA;%FnFw@f3Weg*ybx+_n6=Y9vF)=%;D#hiP_^ivisgW&_b8cB?W&srv1WwhR&x$9133~ zHeTr6`~LNJKKEZ8ljZsA!qrTFj#8wt2`?{_<=A4h$~(`P;D!wF6sVyPpnT3*Z|j+=J+B3ww6dtdKd2v6y)xTBv?fEn#$wKzZ-B#;Id%$Svb$W zd~CmmN?S|>tu{?<;Y02?Cw({SF5eJ5k5^Yt@WW#o?Y~GV4+Mdv;&s>s~)( zjSjlm%=M>2^`uBIzEDWDC7dtg%0J_uXYh{?{Qu+w+YKBTuLnqlSv){Sop*^?IrG)t zUykV;*qwK0TPDmcbfZD#w+Otd>@S=Z%6b_Q=tvs5vf%xv)7%q4Cksrt6o;gcMd6OyQX=qMP=SU%Hfzq;yg78Khxn3+M% z5x(i*eMh1)h9p#)QzPNA@7jH7Dhy%#k;IVqyU`;}{__r`T1m*nn(CC=l%Rj);??2K zr(SRTJ)mcY5ox{99C> zT^V5fbvc0Oo`wLC61(BYCP*5fqU7vZ>@TO0ZF695z8S?{XE29zwu?-qrjH*x*Ovy> zrWj6-HPu(F>B{6;WAWiz@I^yRBhGIf2PzN1@24cZN-lONWc$`VXICsQs7DAdkT-uk zc4VU`|E$I9*}QXC(A9H}P?R`<;{R@O-9bG*?EBOvDHqA0lE+IA^awbaqI~N!3?u5z znhOgj+bpnc4qpNs{j^`gsQMFqUpF*R>1rrN|Y zAh|}5wj;ha2dU#+$rROq*93p8b6m{WhMdEEX-(gqrz41v93}@U+<|K(vh>zP_xK2V zeiRm8g};KgJOmws4DzKydL80iX*UuSOqQ@)w@xB7xxCtP9Y1Qz`r~*m zRjO71kNUs#1Po*FmO8ZcCBYj!&=8M>%x_M#oy;(tAl zOw4){OI!M#20MyJ)46Hs+6g84d%`=EF-!d)k_Uq&`l*NQ%QY2@-3{9;J`yqBOnvV| zkU{Aw@Ll@~%x=*fqbirc*u?awlg{HMyr468d!E7H(#px_z0i)`YSmYNPN`3&Y%U8= ziRkqPv7BaHpqB1x4j%9u#4!j}279Z}qkHB!#<&e;=wj?{-yj@Xu)gYd_igc8^m>5c zF@$8=e1k>I(5$sWuR>KdHld{YhKryKj)Wc72VbN`LoR`G5iBtrl|gAF0K?HjXhB&j z#WPR=s1IsLdh5%oN~&S7E}qIXV3zHNZW0&Q&5Nf*;`-mbrBT+QSe;B$p^i+>t@);p z&6R{%c2I8)T$+>|QnC;uW;h~&Ir{kW@J*Fn#a>|$;&R0OJaEKM`dk2Hhk|XEKxHza zBuLzX&KyzvV7QL>q%rjLzD)E-;^;EkIOzsuH;Y3qdyu(zK9C(E`8aH9jTI2N0Ux>v zY7RFVF+aP|Bq8N+YH@*rDW5NcRk;I&sh09&$MggVO3uE7!_H9-ddc%W%9DS7e5YN)DFu14Xbjg{VMdF=eyw|pD0q0i4d%7J4nR!e>bCwTl5np zuW#1O&Q5;RGNSmB_K;9Y>Jhk)2*;+BZR-AeV$>{fAi(MO-9E*S+A{+(k?5G`57@>T zCa`L#&J*6q?9$(%*UVlSzt&x*bS(2>y-D*#Ukz>fXb3C2Sjay&)kB+?s{H2Zi(g7- zlYR?vsw?5Z!4_YJO9q+kZgyR@hC7pU^m}5@q+o>`?M~#~Zsiw=@q6n-)W?0^qGi!` zdLF5-+x%$S@W65YG-ti)H-n+Y7ca>-a%f!FCDgVpX>bho@pwQB4`8VIz^84TjR!^> zafIO;3iJCa)0f-!&xM^3|NR7XQEwA^v=92#QVq>6?Y7EE-1+dUBB$TpLL;u;2x~B- zn24SuNtQ)I<2J2UOHB(ScLsDjFL)>Fy)HE7@?h6dH#mt&8!m*1~zyz!oTNJ8}C zj(~Psi6G1Ij;iQK8c+vi)K?@~J&^_i!R))={@-&gghO&Ps9Cw^nNdo|dgYc}Dy3cX zscsi7_252#BU20q4VBQ@M@~|(+btklk$O23-Drd^CUh6x?ts`{u#esKiGQ;%&f&J# zA&T4e1cftotWJgQ)|$i4(^Z)obScTY(7TWMWvnOs1UD5ZVzf}2$PnDz2u09mpRC>{ zR

m-aIanY^_uvLgg9TsU?)B$*dV9JJP3o)PI>98LiLeWE`=$Stp9h)f*MI4!0q? z-en|l^Mj0&nbU$pXep-gN%D!%Lm?uu8?JIzrNLCG0rGDNCkjY{zJc*wClGK)+d0=_!3RnLkNBi-5dHuhO$&>IxI-5wQKe>g zoLG6M$Q|QW8}@wA6Lt9=^Hl4oim1z++c^w9XAz9;VR_Ai^4dAT2N`W_upN9GYyxj6 z*5@vr29?9<26HuXl-SZ0ySV3Xn!GNOxfEzGi#a1Vf+JnP;1KM^)B&g4Iwv}Rx5-p& zx0W4=(UrbgBopHaCEgearp2wpSND z{g0)Nekk12I#nPZ%1x-$;OgXJuj;+=k(Y4pJEk0$)$$b&ApEg$e%zt0M&=@HRCq-kKkn z9y^>bz>i2b2>Wrj;9dKLaO5Z`vuQ5e4>LcI-8Mf4n<@kK8jGzMB{^cq6dowmt&p51 zx^PSE9@ty4%-!q<+$&vN z{4fcU7aIKjcn?k{Xn{KQ+mnbGpF$E8nWQ%uU7`04iYWwK?A$DtbP6{f2TUv&_u~P= zCjknnF+YlvMRs|9HB^;Vnf{*c8kRaGxmt?|n&XOtLi!w7dG&=M>2e*ipK%_9ms<*w z8_QZ~#rck^h^LW_?2<)P_PR$$(sW>-a*H-{lo)u zMqVPB!J!A9M-G%*=hkfwlqNjK$$5hbj%4eWmLbn$+BhW-0rCS6GIIiRf ztx*RTX>6Lzh*zHHZZO3)d2sumntA*DHC_!yz=W;rcu+kr?G8WJlNk4I53--2_DbfP~5$_ z`x&v@iF&>7_5*pUtll>7hwyS8i0?=FUN0|?7P+?R7<64qBPh~4Ct4NWnOohwoeUg& zR(8|X?5|qjKTs>&kYiXfT&7d`HfP5L6}Qq}aWvI$Db#QAg;bvip0gP^4U+usy$S^pgbo#;iHCRyp5PNSL*O&z^1ny$;zl)6m6}Sf-zml5_C+g?# zPbAyV-qv_+P1S!#TrY5N;csKFxg+223Blem)y4%>z)yRhUOf%n z-(MB7p#6adYzB|W;WEouo5hdVJNdT}PLAxYbpm(Yn+b_xqkR)pG$+b6g(Wp5b!y7= zSA*z&U-8kR0z45BIA&PGEi}X0cZVGvYm3!`f3%1K<3VemcYUYE zlkilUj1O>ipdDQqD1zlZ(H|2QTD#EkN!N0pj$FS9_xxqMvoGu>kye0YB^^v)_J(+q zC%u?(kvCA5kiAG56u2d~sj)L47MYG=7{btWI$;zjsuoK$ipyn689#l$OS&V!LFj;E zB#CKU?`}j96_UhNiuz^R}&eb6cKT!mPPS4VpgufWep7Jzf`08Tw>Ie0pqT$zILBxr!^3i%ld0 ztNl_6PO**TV41KSD|{8L_Kh-=dtm;{HBZ`YGk-AUR2<^`-s6D

Mi)NGi16P-|dk z;qm<|Dz+o%>&G1+g9fa6g2#)%bqCeLf&eSLs8|OL{g)#5d%a`MW*#$h;`KT;IyT~0 zlAh^lDjb+B5DDltu=~Mq0`7VSB+0klT$K`?jZt=ZAVo5^d;`jOTnN(-n0jvMXq8bG z+I6T8(`xcLHOu14kpJ!LM-BeF;Uf5uOzQz0$6s?{eho9R-I7;iY*S-f+i_g+U`;vZ zqLnHDKDmuSn^E?u6{}z@e_&FSMiDxtzmjx@f`T+j9lokE3;P{*dcXHN_ai|>p8t83 ztM!Y>Gau}m>$YN&43f-{!Vkj+nfUWjWgL@NY)b%%ePdGLOHA!jL;hv8oEb+t-wvlN?Ic5Fwu^1-PWA`FHy-vSTqjU>Poh{B(aJ(ui3~g)s9ib616IV+g9$cOc`f_T zjkrg5_c%wh0>o}cxZWoW-i(9Ap{ELr3dmT3Ika5;{gG2?3;B~0G}ldLpEz-Uiqp2} zYT_7YYIB<|1cM8>`)h21emvLGrnX}Z_;em_TYSLrj)1qEt3gG5M_QX5?S_p^YTS=m z5#T;9goARXu2v5m3a9v6pf{;)P#HoFZf%OS`2RrqMPiK#?>Kylwxu8%Rk9&qV&X7P zAQG9O+D=?1sep~ixh-76+B_pAS$yjz?xQ^l*{qam&ufxk33mYgX@P5Qz1rxB-w1}sq6zw8hd9QqxTE+8dKLqJC z>n(z&Fd5|Dq3XJ-BKY7yKN?1&f~ob%*>p{$wMpEo7|MPa8A(Q-@fzf^keleO)`gO& z5!5@g$EnwFZs%*e$UR)biklt>cQPZBUQIqo4$&zI@OAns3(V|b%?0rQHGGi|rgS|l zE%MgD(Bohgd9OEh=cmg&m*bx&*3~Pn7)+AUNiyHapphTx#A@0>w`<^FX@v)tNTNAW zpC=qOa?97JbO+CyQXFF}Q`hQ@n;IZBXr?fW%F;yAWSXPUdnFM5UjuikwdDn-Hv+PJG(p|Ap+A-3iD+hVjQ?{<4~j%Cp5GA>5~}dxG1Sz zfsW&*551`}uGrA&6pBG$WpCYA^RG-ZPU`T`8y9E2(zq{>V^w8r2JD2n20CUsY zwbYLvS0nwbOW&D!aOC> zutM3`I^3)dY@;K)8T(rAf^X5k3kNlt!*qU&dfr^B_3>)~n+dOF+56VbvUrTXe@ZRl zY+v&71UB73u@z{D3hqepoO7CzRc?+}i)!YAVo>Mg02+ixkM5fv&p|Iq^@r1?(TW%i z)rhBFVy@h27Js-WBl9_`ptHq)qEPJe(FG zjLnXYtRBhUtGtme7CpZpPc=RFW9md8(MMkL+KSb|xlv@1iDy=ix}SMn^Ymm3+#&F< zUBCmUAYcajmK_C{i;f0{F)Xc1wvB$y3I$y=ZHSIvQSSb|b87ID3d9znANTA7%$lhp zrO(#lbbT5hWXGSs043UT0*l4uh7XB@kk7W_ueqw-y!F)O6~L}dh%i?Nv_rvt&(m#* z&?(QHR-?_8PXTfcNo;gv3xN9omA%?`Z@$r)PA7N!@b~JyV(aM0S2fy%H@qLB3dJwf z^zQif^JE86TYexNT>j)6QisGb=tn3_ulPhmM5GQIhEhbgk#3uacfbkcN@FNOk)$PD zue0AauyG%as2_ZyKQN_@4cYliLMYs4f2hg$+AXr@*>ht~7Q=V?b3nnbK_q&crf}e!3>J9zgRcL2>mX?&&CBr-9c6$B{OJ9$slq@7JOqtm@sN84987$fF6nG+Z z*?nq;uIeSBNjiZwcM{+mxW~5QawHL7fHkcgDTnpTZ5(vj-FA2T*fd)f zn`(DbmLZWV;wR`2U+b{0x@q@e*etg|% zxVoFL_g&_cL?Qk-?aM>s24eJOOqk2Ch0Qk#W|sSK2~%yYd1sdbD8|s227JdC@cA&9 zThrV<6t23+CqiSWKdSZd4iBwz9d(w#AlS1pXXH}F-FydQAu^wR$=3v_)@rG~1fF7Lr9^|Rft8^K1(}m zI@Mol->$zpfCGUc2Z$a_@RJaULp;p94k|X8Bim?y^W{eyPKQ9ZH70@^73MdVJNK%0 z#HyFf>OV>EnJv=}$aa2(%{_j|DkPa9^sNK`lRTwy2geVzi2{Qf@9^)hl|G!enn~Zb z5!9pNT2m5tG^9TO!N?MLIB9r31GIR)J5Glka->Wu-5Mo78tcD7?#s|Wc_eo~dCy(9 zjnzqvnGd~Y?AZPzv7e7GWJ^^}@RDX!HU(#V_whI5K;mlIr|KeK*$|eC$7;=$<|#ua z>aG~=W34lXmc!5IpYabdm_GAX?&aRboQXi5Tv#yvRaqgVr;2?}_>}Xcs2sk&TmU## z;M+Roj9Yvw0uo(#k*w<=NL~D`1>h)q=*qK(3HWP{lJj@Vq)| zPK)xU!0>=IB_}E@R=Pcxi9q@?9rOdGndMi4NgL_yHTo*lk2gx0%F5z#!S*v4R_TU5 zkMwpW=c{r|@0d)xW%Qng*-D1cQbn_iX;gGmj#yeD2LtVD>doce)O|tCX1Ady=WQ@A z!8s3}Xwr(kj6=fI)iRg-_}0^H?T#`kbvYTCQ*el`1<`_aNu21o_PZhX?>&At{6V`|h78jM;3Wbt_h-iP zjL0%G{`0@S&AV+0o$h*=_}uoHL<{J6^sGlWafpB7MAP9tho<-%tH>pXSM^$-!YPxM zLn;NoPr1}wJg<8xB4o`F33z*RfY*vX-~>^iLujFjzla4a6b(MEji2ufzSlfnOUKXu zAar_->z2I)JLH*bT#J-@@xq<0&3KWpTSOV_YnjujE3V{=8pWETL8`P*jVwgNV%NR4 zpQD%GR@~E$XL@|i;2VA(#!5?}RXI`h%=EXpbm0T~#N}}9$bx6fJV87~o6^-ioRT-d zkUPNd8+qpcxK~k*+?cs;A7bx~k;e+X40!5UmGv zQQ96&`DB85T%XGuE+RPGlGxN2>%BXBX%GU~&R4R?Tck>0T6%B&{B1^w$5n7iPosub z;&K_26FOVuSCU|PUqfVE`V1!xR^I)r%W?cnf&D>8JzXG!*t$>F~M}97h$4x4{aN+dGSZy$HY@6|d9ui_amiNca zcwcm#y)Dmw$(^0Bv6jKS5MOI;hTaHYbtc|sCX8dYH@(;*nPLyx;QZkFd%1w8Z!4Pw zv1+zQ(lksiaMML9 zkby!TY`&XbIpxwhGJD)4W1oR4W%OK)j(T)lo~Zl#Xz)qaGp_8r!{A=}^Qdy>d6iZZ z*y43B^Yhp0mc;ADGT?HXCDWk*PnqFKI?hbxBo(>t?)6NW9eOStPh)@Z+gewWA>Jm@ zEeK@)BDLWldbTiu$vncMWv0lWiI5FrqBsA-$_saS(1ozS-=xZ6zjb{Pigm zh$Az}%XF~d7P$yvVJrgYh(;)ZsH=h5LcJTz6z_0ivQcmJ)}Hx=IJ=j)r8`DnlnTew za9TjL{E7PhGuw2eDZaPfb9B5sZmkKR*oYCWB&EDbhVwO1CPRqdT<%NIo%cdnV?cWq zZCsctBwx6Wq4J~W+S#1PU`QlSQbQ*4$UgnRKGQ(Lk)7jXR`JHyuOT8Le7-NoT;fI& zeL0#lB72FxL6%;=hm)HCtB5ktzz_U>?qjr}U@G6jWkazxa&zIrpQ0<@)=0@! zZ5u}=-;sD<94U5@@G=eWtW3NB=0c&nnphvcrO;oXBhTb}&3j1ejs5u-XutuENtau- z$8=$VV!;I;@aCp8jibUD+clR&GuIamSQR_51{k?-sH`hDb`VSGfs3d|fwU-r3(-9| z>I=rYGngt2a%aADQG+&lNWwWG{(1Q_bNC5uCBxT{2Q4j`mj?mOzvxY(e^xg$CTTnY zB2{ICSLzV?4v9{Px!Lc=7rhW(wX7_L<5+(igD(~hNteFndU>x%*R%lzh<{o&`Uyb` zhkXIeV}8$9NS@|JuG75F{UEg+Qp+HJKW8eOE%;qFl+m}Q7PMH30N(;rJ!y)q3=n$4 zOrlI*+oFyR(#N%5g;G)!&DB$K(L(lE*0!?-The1dKPRsZDz-06cl75n{sK9rY$)3; zu%EG@R0<27PRdD<2cL@O-^tPZ*;PR=(o2jeYViT6^8 ztEEYvfnYi{HYHEEj0E+(^tac?$s+-?vA~wl3~ZOaU0Rp-qOhPotZ4bF7B!4WL-#I! zFBAWY^3O~|>o?X7i|MFAF5DtavFGjO1W}27&KM-k(b)iH&4hRfC})?BG3e4qOSg1T=mfmAGDa@oBikzkwba(*L4 z!%iZcJVI(7pdUnqvtw)#SK~~&T59M}&g!(+Y{c{^uy>b*jU`0VGZSCw1?FWL%>GF& zV(Qe6O*h1j_qdVNq3=9;~aRIO*pKJOY?~Ae@-<8)%;~bM)#izE8NG z-Oor`J7vLD$;9LY&D0etZEy<=iiAfV22Y;7O;-I?&uk|bka0<}gsolmRq^hQMfke2 z9T#n7lbT?R${g$DNJXJ0!%rsWeA7I>s|%GjJj$khp&%h`kT@|8$kTw8D-cqk+m6K`wwO-y zQWc`!>+s?_3VTHbciCP(z5?Hz@dSHW4-T6xow)S*4`LT55t~QL*vo(X{Q`V*!tK(W zkec3HTqQPO6lFRI)EpsKTlx#(=vF~N@6}+r#4UpBi2?7;?sl-Oa0SFHSc$$#_(f3B62Y$i}g6VO@C1QH44_IeoRLG_ z(!xGSz&yDIbfr&v0Rd771spAc6QKl<6rJ~wh)WvLgliC>S&YublKtB)>6k44+buy^ z&qj7SSIZCg3u?c}o_H$jeApo&$R_yzZlYB>ravw%y=TU$(SoqK>YM|AaGeDqL%23# zXMmxS{hveiH|Qn&*HEedJybLf!+(Z>@?XR7;UB}0^*^2r;?I+@`_Fxq4)BoQ{Evss z0X$?U9t0sKX=HJ;33XPACm1)%p60<_bjlt}1nT<_C^i4(s>R(&h+xL8MrDROi=4IZ zoD;A&q(&MMLfqj^AaU^>&)Jy62o~CsrivWr{O#jqi)SqL^@{BA{@n2=I4&_q#j-LO zJ*E_+oe7X8lT`w-jIxDXzA%;^+=)0|ErTxuOd6&MbQ;4m07NZi2LhumOW${M0aQWW z<^G@j)TXraDwA+8y>y*_EYF7X&2YOm^MgHw-Plv6yJn~*L?Pz0+PfDMoI>+z@5LVK z<9XvTN7goLgS##x^5%k0dr~L%Gv|~e`p%6s39fq-Oxl3B#>BO5$QgM95Z4%I=!J8_ z*+m+GrJN2VFLaWu$e8stcx!9#muJ@Up6a&cBf--O*EK}Q353`&JqR1&G!rM%3yys@ zQua6vyulvcD~YTKl*SBy-C`#HKH;r?XHv9CFtT3P)_X+ZcPKbBtK9P13X@uhL3R!f zsaH2ea&ZWYd|*(d>Cgu0v(ZY7z==S|^!r$13{(?s5ZjaaEsn43@u5VtVdX~!Z5*}? z&rAZ*u9FJHszJ;e%J&_mc0+C24&8kRc2%trTrv-wm5lgLR5*ve5O52rG;wt9N4k_y zR?|VFpkBk+8->cW10Oq zUB14L&Q+^MjbzydLpdg&9S&)< z(XRH|FI@MuW1Z<~dhh1} zVspv$YGn)@gc9iVH;8Fb@MEsWT~Kg#VK)pHh`Shgr+SHlkMamo>P>7YPgBF|Q(~>>?y-7uZ?dRG{sIwWEUq9rlL8x$kJ8lm z$W0$vXDYCPkip@3^xXTa&SPI5)`FKdasKwnH1w2fc<_=eccXJoh}4UsN9!)TC4;7+{-> zv!8C#HnBkfKZ~$yUJgf)m9^aA&!@?U%5;n6kYFAe83~7m$#*RHjXHr&mYD*%P!U<- zrzWjZ8c_AmCXW0Rx($i(I_lQM-Ii-i+)=FEG6DF5xd8J;vq&F54)6I$7A_+B zKBkM3U?I$6&d$=kDX&E_%_-S&Kiv6n$**4{Y1va>!N3I)MY8nQ=969m1jde%k*P() ze3{y6PBA+xN$p35;{E{k0$z2zI&{a#td-8>xJ zafqu#V2^jq)$RDZKO+=OJ-stz6`n@H`-AOzD z{li(H!bKU#jFSE?$shZQHxH~H?fX#XAFGNFLbb2blZynpG9-Nec>6>%7UyEh;8-Nuy1|F#&y91qni^HzO!@^k%LS`59Z;IXX z)HHDY%qA-%}c_yeVWF{;_;*=2ssX%;Z3MZ8}?{US#)K^INoCJmo_= zUb5AXR&Ng;cs~mb3zoHEISfuJEsBH? zJ}xZ(S-XB@+C={%?-zHt1lODE>9hns=zR#C8 zrA$0#%oqcw8rY)3yL_0r&}H<&MNIqpoS$;_=Qo`D230p1k7McApv4$lWKjzPvPe|@ zvwcHIt`c`|`=h&4u_)C(#n{U+@R-mw1SQz5$D38I5S4{`1GV|rzv83;_3Wm;s$p} zo!Dde^Re5;+-fZ8EX3a>Ne%GelJOxTErLL=1x$kaWCfw&AwM$oS1?N7EiWM$x}Aqn zSh37zA~eA*Zu|@YX-MVA7P+FoK-Rl;^yz&YpQYX&5ZkN;&)khY+cw8$)qo=gZ(RnL z6*iFfssa&=Teu&_+8ANMuCc}=QPM9p3JmA@ez=L`*?f!L=Y8ZRh8}^@$w1qtf=OWy zOb93V9+qe1%!~L^(8R74Ep!LJYC$Ms3Y}fiS})MNO}1UePLJNfxmoFT^@ymyk0pPz zv5!3!-)xw|N;(W#Nz{yc`+Tpu5jen=O^BlqyWXBP!+A!z&FFAt7^ zS)HGmP|7>|NSernRac>bOznR3nvCFZARvliQt5)+5qNiP=2|e?Vw~+*LBvQ-f4W(x zhKsH&^IDXgRIiF3(yEJJT6%P=k3ItTPaLmI@G#^^)&l-h5(to>4&sm}$c-`nE$zN` zL)if(n6}AOD;$U?AQL7dAzTjuPt5|4_znQ;mycIV0nge$v8g}LAbb^$F~NpjBL0O; z?*UsoVhi$r`;^oF!%qP|FF;GR>Ux2i1{yg1fM1(|{9CCv(m8ua?>JqiJiafmw`50@ zWcoBmj535!pWS{c)*ycj91%Pgj8|3r-Gb7QrWCid%(s}V^F7R>qRfS@Edw_@eJkY_B(Y3DT{ zMMyFMJcIM3H;gGbN8hDiI_&WvFbb$62;=f%?ahz6v&YXqUuTQ;|Ih?+7J`n$JIZjh=C23{GQ*U{+-H*Xk#hD$2uwdQJ9Lh%02!0P^~fH=|h z-hCA!D~dk9PGAQ7i{Ik4IrJ~4VhpH?Wq3Yo&Y=ATvqM&A*xnu!D58f@xb z-9PhfMZUa;TE}O!rd=!G!hMNcOs!caZ^5^zi2R1;&7ni8?WKgH$5U6?Hm5e&6> zbgerr?Q4sa;RYUIrxed;oejg!rnI4WG;GOf{d9OIeYB-)YqRenae(F*yX$gtm>ZBM zf|ey_9%){ehR)Nu7@Q}wwCOF=1Q)Z?0WK#ZhL zx$+RL;8C}yStQQCRTJ6M{hr6>-bVeTlR=3W$vO1Df`iJcp2ngV`@Z-8_RLs0t7JsE z0uD@an4-F~)bqy^b2R*=o^){D%w|=qzQC7(P_%Hjtl%W_`_yoa{ub6s9X5E#R-pC@ z21p)DM2c8zC;6=E;#gzEQ}uK$apF5$CVPB*4$;7l(Q5*m|52Mb?}6KzYCES@Ov`xI zUNXmY*o%{Ev^QB(Z^7YHJOApHU@wZ+GvW2+v8mYGgW&s+7-Q=t`QpkV9a}~g+LkI3 z_fhU(=k832ZF$IZXKFMe!766Hw`q^;s}c~wS;8I54cY|Zj1istv|tA6u?kHz?!N%# z!`N<5O(g-Wu-E^f;QgF*JxX=h|2UC(fG%Ev6W3j0?I++`u`mdv3KI{eci7-c5@r={ z8;|P?AkKOj!qag&8o|sr?$sRk?5LN`^>y=a!Dkiyxy9|9xWy-ovrtsqF4771Y|)E> zPj>D3(z1O(SLA%A&vh41#+uBrJk!1XJI;7bu2Ji0v`dP`c_uOI`z%R~C*q6-B~POn zo!-X0%?5#b!IrlJSJD2vNMgjVEL4g~hpP+wFyd#UqS)(<^+*UMW~d860f?ZS-$ThmB4s|k>58Y8moC2ClsPuOC&$!c!OI`b{E6I~ zMN)qLed3gDfB!jti1eYJZoON&Z6`2F^1gxI6 z7?_KXtv1B5$Eag;#y@bkH}tcdQis3%Wv*rAd-r!nB+qAGBi8ruJGk%RlAtR$>ukXB zYmkCb9+{Svg%$O=(tj+^_2Le3wljQ3zNM+D6_~#AVQuu?EM53BSNQ!uY{-Te;;lK$ z4<4=UQ~k91Yw|dy|yd`OD%Pi66mO+c?>A zZlDvQ0`anZt>4q9__JK>UPKz+x7;Dc3o{G}LK`wuWBibE)IVF75*-;#*j^uVyl1gH zxF_Sj6e3~1G-Y6b9E~#PK;?%mJIy%peX6?`BfqM)-OMCxEa}>DNN4d=$(tsNo_&Yu zX?4jrrmDik6A>JPmBkic2&I>51)JK-A3j`bY^-7|E0<&h=auD>1_Sz72DIxNY0pCA zd1Knsubt{21HuR-K`6xAZ#~Xy!#-Trs$bQ& z!%AX7V$2}wQ3Inwj7pE}BQ$d{g>g$$tWfn*%?~p62w;ZCCTHN>O#ajxc3l;FR};h! zR8e~=Z&!VI-y_s~n@JNa&ZL!{P3nnZ!qdJMo-h|rq_<*g#J&CbhgI@n2zb@>a9@phS>UfgAv-nAq(77Fmx7?@Kbx77gySDcboD(NZMCS!OP z-9!>XvYyC(ojwtDBy?CTngSEsJ2Xck|+w9A0F>6$q6mugelP*iD8(?Kdp zuB|owyrx*U?I-Q3v)9aS0oveu8bE^*=DRc35YZ-R+AdiGUia4O^!F(nUq<&sL!c6i zsMZx6T2ueU9>nm7ET?Z3{(<4(i1el&NyEBJb;pc%d?aEv3Mqm1SzD zOtNW2jh9dCGu_?+?gKTdGpqOhAuzT?v&bBDSI@b=$9!nv{6&eua3l>$+e#V>q%%Z@ z?aVEb14UKMu2Ys|E^eL5zznw%&k~#fFE>7AAIZE&7JJVyK7g1-_bIXeA zd?mSKlRYzY3gQF4I2X&V*$i0 zvjl*k$NzxL7LWg2`GcYVc|jf`ik(CMY*I^AtJYZ>0Qo{)fz}^ee{zje zf2V+S%uiQFL+519qj(R!=Z7Y}y(xz4z`BNA#G(BXE2rfMjCL^a<5}e#hhLVYj;`<592abCQen_o7u=+)Y;g$L&%M8SfDv`x15oIYs$ zgbJrC&>OgAqjER^Qn}*WbpKD{<<`;&%;8@k_IjX{QYUvQ!PB&FD$obN;AleJd*OBbjPnz5a<4L-XB$2zhgCxY*_wM6mv?4|sc=p#_5e2*R>%L-Kxno+=1A*T zGWO}B3byku(0wSk(u4{JU73JbhoQkYZ(QU~to4~(U{_d`1Ty$f z+@qIVxG844!>X!GGD8PSu*e5_{;K?nurp9x8CHI@W9AL!@a)GIN2%^-06l)Zb<9gq z(AB+6u1NuJ#R~K7#6GcKokoqy9#n$7L9$i}y&*CtYq4*$h`rs}NdkjZ?saYVxoIr~aDn?U(FAjRU#vJ;w*Q~J}SDFMm z?(G_4?A`XkTQsd2=E4^Jg+|xxO=t!q@gIRbiTn%Lt_qX8GU=j>LPsuB5$^u5zSN!SrRpxWTzxdy-`HB z<;VRz9*X1vB2uT-Kqb^f06fOu{7w4QB-|l8!)!QN2#?r=vpFyd3?R#Av#HlinRL>Q zYm*`2I!C)$!Q{Qg8rzq05^eVbwtuG#ZxEutyXm+9zi~zS>mj)FNk8^a#oM-rk z81>QbneQd$?Ut=mhc2w_6iExh#cYLm$X57jROhy+1)<4~D^f#C>&lvrhCz(0L^$E| zbi*#-G2E7OP(x}6bZs4!^y$T+G-b6(w8m`gTA!22vxJdKP#@d(!a>8U0X%n~?tW;> z@aT8$Z+m1y#_Z@g)V zY?dd|84$%Vp5YR;yfmaD?}T)<6w{9%>J9pnxKEC^5wa z!TS)yI$R3C$}EWWtVENp^khZ02zZxnU)FYyLL*_DN?l>?mgHkVMhyCrt8@?xU3`3I>049p9N93e32fTNc>tU)athDI<$iXR<>r zfh@)jCui!>N?(ut61KS=Is@Y85pFjFZNunWX_5M`GfcG2nlor zys^_B_9WBR;`OypFqfz}R)+na;LVGedFnt``mo=s#^Wxqq5%|sv}I0bW(W({nR>CL z!=U7MKM;R~N+#a_63ci3M48EPZ`DBs-g&<@>@9mS^WuB4^r}js9B?A7UVHe}A|6Y2 zm;3HFRRX?P3U(qn%E_5l^EOAcMVI4#+XF{QR_MzGxD(5JGgI6ASQe?OfcKQ~U!q$5 zB`~ro*CUxM*DhSQAa5qS1iDEfREdVIJ3_0~sdxER&*&gud@`kNwD6$AY}^TZ7rlln zHaQ?b)=iA@MlA(t^)%fi@4oyA@OA@dzG;)(N8jym!QU=O;6yvuuBaq+SE3y;K}@b* z^W!m*4TeVb-c@`g!D;*2QiwF63h-*Gh&lZW6lH;5cCE1rb+*=-eh^XduEz0}z+leX z^~710ld{ow7I*fqDf8!?9!tKd7u1q_zbh1?7n6oZ-utLLt20D;qLtyy?-Lv59Yr&j zfHQZIT`vrbyVSDrg}ACiNF6VZ>a8MAF-w>vnTkK?D*V9U!!=&!Lu<53DzAsjbe@}D z=!F&t{c~mIyfTY@_Vkm^*JM3}!%wn)7gONa14PzlU(=!PV7>w`ZXNo?rO%9-EVsr| ze8q`#dUUG?zSJ=q34 zZE#KS6o2Hhe5~XLQi=IYwPS1zEBU`?hwMltH^xX>0Dx}I9Ln}h zaVZZ+7OU@^~6?!v+hvwg~!IjF15ik z!D5N5D3_X%!FOkLz{Dl)okoAnC}P=+uk}APo~~Fu;;)q&SbOafZxUrIzHUPqlOxYu zdL;tyzX$^0)-Sme8@-d%!1+H{&IA7E6;R;pl{$PtzWyKGVuYR@4W7!)nP8vYM%;u0 zm|#4~AWa)QCrbfTtqoKFAi*VfVmwo8gHgvSnL)5Ez`1oCYCQFKvaZ1(Kz+m4i=5yG z3IJ|MYAdQt#mWNgqSn-3poMUNKjQ$osh{`Fw5@41$%eHtTKpHqS(n?c*Lm5bu7C6s z+2ab?q>pZZp<2%rxCsU}zwFk)aSL_q_+OwSt$SF+D}Z@1Z*#~d{rVTkPV0iw8*)V; zhK-4DeQl5w`~&EKu_Jdz=6XZTt9#_OsaRlZLH@dORDo3E09EZe zAJ@+K$WmoC=Hhsbq|piqVKTFDd`Y(-#{T5FrG!qHN}zqd`}EQ^$ym)kycb|Kr^{zH zXr1%ESi3eEQZvS{Wv1YC7}Q?hMpuMp!#u7uXDTOp?V5!v(@ADu#I%rSDwxBGP6>gE zpE7cB6;86fQ+5Bv0D(RoZ9m)lpxyP-pA&Z)6FuK9z7GcA1AvI{$+BRqbx_PXg^uZY zz8}N_C`8^;#)`kvRKdM-F*{1NbD7W~i-RhKsi>v}k&JAp?X!Mb&ty5++o4_0@@(Ii zhwzP?S6?Y&>{Z72PaIDIXv@+MB+_&vd~#nutl8fp9r4`l@e>94Yqi5Ew`Cd7)Y~%^ z!9&!;h_s|s4n=%0m`RA|r}t=#IZ1+Av%_hTz5AVr7?z%e#1IgR!Qhm4HeUZ#m$>w? za=;iuY{gw!>JRy1(4&PvRA?f+tKGgK`rx4=_+`8T?XEc-sl|-(hsQSISKeOzto1D- zJI(8ft}r2fkSaqj?6em;AyOF?OcAb8_YO;bMdxfw=2@jFf2G_$o$5941#54%ZImTe9jeqL?bc zmKmc=%N89dRq!qLHu2d`_u5pZ8c?h2_ex#7emz!mNQyz4n6WOHn5f;-_${!gFEIkP z>U*{dgdPQhFq87AvnY7su5s9}ALp|EpK#^|bqvCU`?+C%IyEVvp~!m?wF0-CB!fSf zD*1VPk+a3KsI7KU_V+%jjlPnFsVmNi@Tq7|pKJ*fnq=J%JY6cup>6=TX&G-}`7vx= zKAE5?*D?{PlvJs9k_tzQ6^U3!cYTOoOiq$^%VIObFiV}KzS zX03^NgYsW=^W3ZF3|@f_O9SufyW0*eTB)KEL!NO zOMN_Gey&QwWJ1dZ+RXQzLv&RVYUG>t+=N?`yn3xhtiYrxem{z3mK4R-kqMcWrkRT@ z%%5Lc(D2wYUd7$l+X5Gw2LUP&``%?{jH?04#4%kzpXv9HV>TQzarr{_@8Uh)mjXQk z_-)B{qvahgOV99Qv^&N^W65KFW5qE%U30FX%Q_P$r6xS3e(!k{Xyp|d$>eurWaK3G zYAk>i5T#ljyY-looG6)nEXs%7Lrq2lws&X5x_YBCn@X|AlXJaZd1oyYp{>09OY7zD1)|0#xuMt zwEztyZl~M{zas)ej98UtHbQmvVVS<8E8kVP9tID&mMlc?=YLYYttiKYQgu^VGxLqZ za$qzq2zJiSnpzBAE*Ps5yFbmV7JjHRMIz)8RGNk{TY-}RV zue3<5#NlAYpiS04*_(n=s}>Yr{{<4UT2R&k7cq>0U1WI1Yazf~Zu*vqKgP?tOvh}a z{lH;OY><;&NG8dcs1|sr*r`dcb-%k_fDZbGl^(fJ3SsKKErzgVRV$t{JNJmbCk@+ zH2^9$y}{qha@yp|CM^dF&h{7{Y~&rSKWk}2j&aB5ax_-d`C0#WU{?9P;(hcixuegi z`)ddd5Kc_0dy_7=9?%LP$=6wzLfn_n+MNIWN{B4YYRjGeQBnO3Pe2^K zJe4~KO0VN;7P|igpoKgC1fU_}9Di6n_rkwfJ%MG2@l~Ac77)U*+V;N|y$z<}Oz`zI zsKr^=U!Z4fm8t)+ZZWtA=F^?mzJDs+0JNHi+%Tpt`cyEirHf?UIgNN~&+@dJYxI^b zJCIs)lTKw^Jo5!$?U-Q(laeKR8QlZ{GxggrFnEz=O<{`!}GP9Q>nyV5ND(#+Ed=M~$}s zXbm#aYWKfU_&!^X%%Z4umg;<*?!`5^t7HP=C`4}xcCz>&r#Xb(Q8|DZ!S`` zjMr6K&iiX0C0*wt4GS;2PekW!0jwY;l9Kyd1sNYLU>pm4MndjMn zp)h9oHzy-w?#jhqcbW|2YGocD_1N?)y1R@YuvG}*yIZJYM@zt6>W^{pMjr=ZJ-8ur zEUp-@J{R}|vJWdGPuB$LDR*&Zo*oSqHK`gGI4n>7;o&oQ9hK0110BTjVoDBsWI5F; zYl=nEY9n6J9Ivb5a8&)mtq77dnAQhz#5H9T-QohdfvJzr_r)8~E@|#FeOd4&Q6QiE zPrf(skF>TSz~p0L0>f8t_rzX>{^5W;NDW_G4<*dsF4UX`} zAYUsjFfNnC>3$~=KM>UCpSO>69Rr+{3)t-;v{ron8a3q+NPG`Es1$Yk(ERPmaj?5uFKQle8G*^AIhcVu?|98rlZtH{;H<^n{alinJhC2j2_goza1&-+7 z|7-@7IoAfCH82syn9Lwipode!AdI@78>IbF`uHdoJu1U9;-1chy8s;aiFo2vx^vdB&~JiDOzrWpMz0+z3QcIZoWr%L^B%tbh0cwM@WDSceH1~gMS zyL3`vPH!`H6HenTBY6D{P$2d(2FwCQkh|pez-9*o>-@djp`HoKGv}h#kdO1Nkc%Fd z%Uwr158AcOri;06*GL|D z9>u%CHp}&I(#C{gZ}&|Le?fO}EWKI% zo7%hjJYqI#d#N$+HTM`F(%nu7{)nuS6^0#ZqnRS&zXYrf4bzCEEa3Xkip1jR)N_mD z#QUFZFVD&na#=)53M$=6Ny+luXtlzQ6OcyL2=$l2YroR5Nxu#R>KW4Xq=c9l7=?*5 zu3p+h{B^IY_ocXFVfX~1`KT-7sX&G`@j~oC`o@;s;@)7@pszQu$02Yn%avgHQC-7s2im^~Fme|pEo|-FkUn#WzV@IugPX!SWQi{*buL8KVGPUa z^zB-I%DZRGrZMwIqx9!fhGF>}dED3oX2VhD7NP_1gm>w`+dg=5V@UODAJW~N4vNRf z`CNb@Z!f12cL5frO97Vq+u{QJRE2T?*wIz-?!4v-B{#n3 zY$f$Pb|l=jt5dZ@tZH*Pq7)7plj~An{vc)d=0?_y3wM)8?hf>PX4}&?XXoX7zfe^V zl#Pe<6%KyUvf<+PEdIPsP5OizF6~BaF7c5#3;mUxqLKzpHVK`j-+frT1nh$r3c#$5z&N~UssVfev>kCm2sq_#0f-J++I&oTH2`D>>$J+XWSvig+_F#wEq4Hg>nAxBw=0%A z0rS%QHo!JM&%#iP0nrkG;Tn4cU)FWiI^?`Lm1pF#NOzVp|s#NGznnM#tB;q9AsK&Mk0iwQ;FQ$=NF$@0Hynij0%5zi3D-!r+& z_Lj&dO~3wMcuwPQJL;eE8yeF5mIIT%`*Jo@COJuh&JH8}<+^`Kb3pkcDhz))Rv2Z+ zY!NcJ_$MTOBOJjGLJxZ`)9wX&7Xol=0aK|TcEq%9(pvgjb-{Bm!rXmq4PE&PBkb$! zj#^eB8;Q4i+sE=UDVBB}GSYB3=W#H9MXFwNY<;EBD0E>Gx&N79F`<$QXEX8I3fhG~`;=U{EHnGq zm}`+s^ytdyK=xTdhx3gEjR$sdi8c&Tfsa@qFC=GYg%=f^UD?km+1;P!FD`zgR(YY7 zAE*(nlWSGiJ!e{$+Bz^ne$L)2Hxg(ua;6yHQbRRAX{+^{)7Ig#D2Dg3Yb<2SDlVt_fD-z1pT!BS)-@h*zKE`< zKFeteQy;#yB%xCfZSTl-H%-1*CNKV1M;~BF36V@7hIyJ|5|Io!cRMw_Rz}@DJ=SmU z33o_m0vRe+X0pX7fgT!q@1D7Z?OA0if!|7O2W}GL3)%L}r=Dl_O_2~Pr$azlc=Vv7m>p@{6&aI0%7ucoGgN*_ID0*bp2F*u!0e0+=AXz$|1+RHMdZxZ?g*)UtG51Y!ZuZ`0Z=89ET7786W(Ert}j(*~HU9f;-ONE@Zn&?3%k< zSX@7AuB$=eH=PTsP_)L5KeWT;@7I$*XGgZl_kd&xGLd@mf{hDrbau2r2E~df(jE#$ zhT4M<3_@W#dGAw!#F{5UGVwgaBA@h`UQ8Iwl~m{O`r|a%%+A#v_eew8g>rj$=!_Wd zhi(CNk+^HK)n*`kWH)Ks_~Y9jF|=w7E(R~h;xYdhYwsNmXWzYxj}V;^UDO02YV>Z1 zXb~Zz*CYrcqBD9Y>L@`FJxHQV^xiwsTZ}%V3ue^O<~ets_j}g*o!|NMylcgp#hT18 z_q{*++IwI7x-NqqAGCtcLL2Lwuo1%ZZ#Df(?U5-CqjO9H(g&jaqu=nS6)7A7WtwW+ za=Y$+qNn!@)%fspU@SP$0I?XX_MC`uvku2={||_|0R8D08f9A5I%VzYP*|fIM+g-> z5>j*R(~*`&)&;%wYXC<{$XO}SuYI_lqF39Gpu&ee3+4Zgo%ePIcWawG$P{;&;fxal zS=4{;IPc&==Yc={=*zWs{cg4JdtQyZp)u)AGzq>tO|U5R$7$oL+-HGnr~JjGp~Oy# zKRTR^T?J7D$pN=P-tFi-`DZRcH8rXI&Z0WF`z@#*Q;Dsp+N^SZJp*8Q1+n&Ad4PZ_ zfS4oZO4&vYFD#Dj0-k~sv*`50@R+8}>Zd_O#h0Tv`S0?RUr{ONQA)OYui`Q=gec{l zAhLI}b$1=Lg430x?fQS|zklyF_UsA?VDQwNY@t`qpe$Eerh=x#fsBh3Y0mq>(y-V| zqaL{+a6cqQ!p}nGxn|^g4c@KFHLdv424Bxk_&qp~W9-d_?c3P2ow+b&Q)7(LWd383 z1gqX1@C;rcZ$#BNj!`+%BMiU9a?cDqDLp5`|GQVtH3w`WbVHz7xKLlCn`rd=`#37Dcxo0idjQl z>Vr_NK~^r6)YlEGVaA{E0NhVnlN(I~HGJr=P;T;j2gdoCvsrgNZZ-9S`Q193sLWu$ zL=q-l2eXu#lk1LLGF-+@jqu;8>*CXji|^n&Qj(UI-b%8BqeYLFts*ncU+6Hmu-UkM z`(W)Xl0f{&MfuwC%S~~`{#b>Is&TkCZjrJ<*Q-6k<0fPF}n#{|+wScACx$YbCg)x=_Iq3qY`0NT`yX=Gm zSTac*oByrV~btI2!-uOr7E>A@Vdw$dUSAM7A5=2 zp7`qd3tR(Jh0pdGITA}=5y0F4Dix~|>sI%mzb#USRmU%H*G$6X#n*<$hSxuN@Hfq= z)LcLyR2*J{f|NP%acOr>xj8E-& zzQ(nbg~4gaMd3LfBqD(9`|^T`@uKBZIFM`nhqN@rK@fdv7H$b zNy6+Ck22AhdKTP)Qo!h7Ah2s~q@P69?5UVSQ{6uxK~CdN&f@*QDQW}{(9XnP3jiYr z#^0;ff2`TFmoIuqHyO>XkE0sdsCRZxNZnjv6bI0n2{e=t6OXhU;Zhk$qn0mk?E=yU z*y@uVT;ilZ^22^b3*@yz%Z|9yhj7tH9B8~7D4YVg5z9B{PQKmCkxC=;K7R51scne) zjVj28!lMlztyqro8~!L>NZ|NBzbMW7o)OnR%Pmc5g=QlO%ZaN;OAFoK9Mg`T#fndK zP4taKx)hz?*^7(2vBxJe$8sYr*fpD`E%%-`ni>BpXAxDrDNI(=_h31NOY})goJl2A zl<)g9mutGpyKj+4phGpE&4sQNDhnr-Z>A81?WI9ooLcy)+V!%A+_w+Yg{xV2AWSC|LdaA?z>e1t{|WqU7e0 zy_=2e?dA4mtI=R-2=*o0U(BomSUu>kTpO^Xi{<}9Y5=-)dmc6P7g#>jMrnFz}c)3(y)aTJ3><1^?e4I5_nW0q=QVSP{Jzc$Cs2`Zzy8UXNI~aE$a%f2^lSiRL=Acypg*~5K ze*4kVoa=#7BOgg;ZvYW+2XS`(13I3132dV8{q2s8;izV;C*)ri@7CXGsYEPtm3=?| zNR)XA!7-?ZTh7;;HnEz&TP-vnf0{5mF;=!-XArExQ?!Z8R* zS?`S0VR|Q9p4rsxwwmX9>_kdSv-8D{wKOLWbPRlydHC%6hCt5Tzr{|A?dn%+%s@tN zWcif{*6LQ`D~vAM)xI)PN6%T-qQ~NEmP~If%3s-0zh4g?82;kj_XioQFpQ&xghRIK z0IVG}_qT-3X;Ci;dV>vnF*$1`Zel)2cx~-9%PGA-*_H~FY!j=_m!w)JR|0_Qi~l?_ zUR44{z3cmhccS0NQ((J%Mn!~!eKc6C@$~Dp0|Y#oP8duXV81o}B20|}M4P938=&jw z5-Kt~1d2=^!cYBT6Yu2?BuF!I41kiKQ~ILl8ZkU;lu>Jd1dQc7ghelM%$~09*V@dd zpXMHndtYYlIu1XYGKAOo?NNA{2(}Ev6u69szYj%ll9CWOjg?5WVziJYkrHFKmv=H% zd+NOz{B*$&6L}X&-WY?QxdI}-s~$ijLkP9&;9Y+nFBt>k{~btmEi}<$uvRit>lpj< zP)seUW9`=Mem&(uKm?Z!?uZq}@OQV}MHf2wPERgqv-CZ=|G?ApeUJysodkC!I-HhB30-Ci}b60d`5f%_Hqw|PDeZWU=gBqe>7kGkh&%=Ib5&DX4OfW zJ^qdV8^C8`g$k=$Q%>(wQIcb0{W*jFQHlHkzRb1=z(&}uagJxB#0}Iiq$e*tzyE?u zn;`B*a3_-ia4ITbq`HzjhN#ek(3D z77$BZ8D>bT7yg?SC6x(0Nx|v5+f7$&30&r1Uz< zp)Y{HuZ<6CxX{_gFY6S3r2S$1D<)cLF*sQyX)bB*R_iD@UXe7Jcs*S=jmUB2vS=$s@mi_)Dfe+!-0}}3F`H-a(8k`a2+<_ zP4sG^fr_}ZFTf{{QVXfmwJ)YJ*CJ>8b!kb!w|Ag6WwiVA=sI%<;~02qq1-!cRVrKq zuW0&n0)VqEgD?nGAkVed_$llT^zEU#*pf=&QtWrL*FYyml=TNQ5k%PBI`ae&GwIlJ zzJ9i$=f`XU&|UiN@qe8_BAT4WdM%8)y%|!nC=}UiZhfYH^eRF>N2w3@T&7<+bH5fG z!p7)F5>Yf;NPeZL?qe`bCC%*>2YVm0?!g|75MXDh4WcqH!|Y^+-=F86PtLjp>Q$GP zll5fUY+h198HL@i%-)CFHGR6bK)s+*lpxJy3knp+>i~LM76sV)ZoS>iU*~=;XA#s4 z#I(FjTSPKrb$rgT>?9`uf#tcjm0CkHweo%buLoR|Wsfl(odjF0kk$I0TuMO7bIQSY zadP2lIt8C>&uA4Pelea5aWEm1vad++Cf&}*g{7>sqz86s;VJHN(1>8V@Dns zOn|JrzLQ@P?Qc}FeKho1tI?}EB$@VBKO2^@?Ad003x0+2+8EQG^{as zl(D_J1lLm5YvpRkI*sjGS&rU0Vq7tMpY(qBP0TUwSt)u-!waviFkxK`mW(Xn_uh}P~mQ0F2y^Ow;UwZ zJ-_PFLk*(eDrs`XeMvEA|9kE13z6z!Z!9Dc((q!B#(0KzLqA^Hzmh%(Y^89wWXJA& zcQ6&q4sTMZ9X9Q&B=PUcN>yXvz#kPc4&=gSlwosweI-jAZE3}JDk#1ZlQ(}!%Jmur zbzIAA1}mlmoSYyv0H&!%;qZB|MrlxR_g&#@!FGC;n~ZlD6Q95*AF`4KQG&J4)gjMi z$w`5%7`}iMzM)yU&Z1jKKzdr;yK^LGJpGy(bC4H97N!j%`~2rh2gEz0?khR=_$${& zYvq(dG)An9VjXY!n8-4}DGgci*Q`8F#W2I7rHPy~QUR#0S{`o1EfStp2K5 zS~cl7Dp;*^yiWF(k6)kS+0>MRs}h~R`dTYpo5D7IY%wC%F;>M@{NAIuUG7c}*K4xA zN3J8j8R&!PYWCk}1nieyI%xQpYa8K%Zq8F%NI~&RN14SdK~%ngQd!}+vF-Jg z;>{7{MC!3rF0+#+iYNLqZhw*@Z*z|ldK%V%mnbqoB=Kv<3JmZ) zja36mw$nuyhD4KXm0Z8reBeL^bD-(6+wxIP^rL6onaju{@np&EA!8e(*%dU*J`DGo zo(NECz2;y4URHZ}3mQ7KdD;8t&}r$zJ(D@U-|}pf!~>^rVCgP0svUAHi~vqat-mUN z<*V%!_IsGzUdjIFb&n8gaDw+)$)amyR3!;pJBU^qD|BCkd%=pTBM`F`(s|AiN?F5p)7{uOyn`%8zGsO~orRv}%>l|BVrc)F7d(58e(~}UDM{mtn{qQXM!ce&%-FE2Rg1V?bggt4< z9X)E9NMLr@Ng6vsw)7*N0A%m~62&X%sm)Y&7%Cunra>k`loWPt!gVCg3J{9PfGS?n z{?hB~Ck8#0_5M2<_~C}jW}tQVTAw|0scXJNW`G>z2@H*Z@^`+}Y<0OE89=5# z?c_m0QTvkbdt*J%>$vY6)cwVxYGA9bV1;U=ROE>vTBvSi-YaH&UQ1N=@wcJvh)GjSEwOV8o5zAC^3jL~ z_pukJe@Vy`2TGw@AIAW#2Nci1^|h1Nyq=D7UiQd_l?~S~Nn^{MJV__}N_TVFt`)wj z*yW88<~k81zkB#$gqlE@`PNgw?HfWlvr`FmeiA{=HF1JY3jU$^>kA&%DAfNo#wyl1 zsA_GlM);5M>-7CQ;L{wNu(*Sr4v6**NH5*~X>yhTUi>WV zVX^TJn_{M*;w-=YM#ux3c$el;H>GW{gqf|=DQv9K42~1qvZ!LicJ%6g%<`fgu`Ky{ zKUc~nX)3PR`Ih}8Y1h}zyU#4oeWI2rxl zWuQ@|4cOBioEX$kT_@^pd5@{>*Oz8L{u9a01_Kdj(9 znogEo8g}8l$vq)wo^{U-nI7BX=)Fd=FX2r356I^CqDHhtf5s-}O9bj*J!J6GI}faZ z^;v7BhTw0|>2qgF#; zhitybx=^CNKzUe~tcRo}ST?mr!t7tbR=-Z~$UM2YaR3vZDq4C+E-6zb2M>gV_K z0ac!qhs;&|4H5G04L4$a{Oq{93@>oPVAJ}W7OB6g@Cor46@#f3=OE0sJ7uPee_={Nrbj*GhpgoAFukgagFyqyESL>T%CS(m@sdhcZ5 z4@Yb8kgfLUi^QBl4i?yk5?eavN@?;Qnmj$xp>BM!Q{Im&=23(R);~{Jg%dVPgNlHu{!$n}aqMC8yMpNAVAc5o)IjfYtf6SnCoq!PaS^A$jDma2)f5S$~_P+EiYWz=v6Wk0kFhd(&sZltE4WwV1=m z6HR}Ha+)HO?=eFT2FY}WhWY|usYGSyR+s4joCY=P0o+Vn?Z_v0lj)lxI$`faq8SfF z8sZG+KH_9{Wk;iZiq*d^m@3dv)+yn~^P356Ue?Yr|i7dxCD}cYsZ|Kn&GAUI5XDg2raX3&)K9HIptv;XX1EztqW()<0r%WVEwkX zpY@Fm4EL#~I8sPyS;*HZb>5*f4-{Fhwc5}Mtur-}y2}fTk5a5FAIHbpP=OiuaT-Lm z{;$vn35u-mruvKK`vr)r`Y>JJ20DZP#_G&iMm|axJbadr5uG*X_@=b5JW~ff2gA9?lOoib zo3z|Sq{N^&U9=0DVj+j?a-+4WBFn6hKcoWju1@*c8Eli+ zHC6sq*eO8_=!yn+XCG+F-VOStZ<-15*gVJ zupk2$fKUKhxHOjP<+~|+rmf3GUPfchsYL;W_*`2dO0*94<@-dPft+)H0_O@H#Foq= zAGT_cHF$z5nXY;931j9k;0VtZiZa3SoobV^dEznPW}PR9yD}`Q@77_mFe^iEGpI@F zZa<$909OK_GFaIE&eWe%osV9+@8~BM`zixd+V5)6*g@HySjivw{B#3}MWIoQC(iaD1#!q}PbP?%EF zWV%*;m09i4dg4>_KL;QVP{20+@ODL*Dw-!oqM4v|^297Q)^5NA4qb7P*n|9}Wv zw2`f1n$6W{0E!3Iz3*9m?X+y!&zaPmze*_wG%r@J-OvYdxs>oj&dL2Z4Yl?;OmE^i z?}--VsEMU=y~1FTZ4Pj2Y%Ow(X&Gw#dykC~*I(I~`lQV4rC96U-%bSg=UKb(Or3e% zmO%eP^g>PaM-MNjfcqdmE#78SktaK8`+9*rUfEk61O^ur1uUX>P9W6om*$_wi2=HS*b zbfhs{Yx*vrJj#_bfLxR`_Z(pTy8&-B$UmsW-85JiOciANii-M&};sv?v2Bsu%4i_?8Ul z>P|(T#yN@703Hij&YiI{vy3;~=hAN^gNX_JziwSS7Gc9yAxtW3%om3(c0%P2w|u5d z2Z|qUx8!)Upen#tf9u0Nqpp7E=$e!rAAsi6-|O8*iWT-lI)lx2k9N`Xf&+rImYRuw z#`N4CLFEmdVQqA~YPu zKrXqsB_C2R?~4K~8i)AjD}wBX*F)9n=ho76zZOVT>W(DfIrTH0cEXv{#AwTDZcey0 z&S^o2EB;RQBkpahGjKgX@6)onJA06Ua;Diabg(ON^iG?dxWyT5vxX-FObxWoiXc)^ zf$5N|r!5M8hLMfs?|e7LbB)?^j%=A#hORzZHn62;E3x%ni3AM{mjbI61;FKd)4F8@ zxKFBX-MFLx_F580TU|QfJ=ybUuDkT#5c+8t=j08s#FF@YWv0PX>BAj>->eh0ER*u2 z^(Cq_7XA9o?4L-9Po8{cu6^ojzcEV8fEdmBb&8zM<)IWV@?HBr6QFOh(Y|*ee3RJ# z3Ad!c80N2?PkX@Vv)nUS8-2G0L+ew2Ry8xI%UdDm zcRKwJ^V3Rlm;CxTRLL`6S4%sLlm><<{Mwovky9;rin_1k?9Uk0A#h`i$`El9%Aa9N9@uf5 z%>_o0>rViol&})u5;>W)69%nkYzLhJ@pocvG$LCc@2_2IqL5QmzO=ZYDD`C0zYK3o zv1Ww5Px0P_AVqOXnFYcuO>jf~0WHlOJ|NiSye|jrzC@-2`Yd?BR`qflwA@L@Z1!5) z)0yr2w=o|MrzC;>t~69ax59{TYWtsa=y-l^k0fS5pW(%OCU+rr`Sl|?#jAcUg?u5$i~Ewas7oZG>i3}V3i6JAmS)7MqSIB}-sHaI z27c0l7Ya7i_&4K_z~$`<6^kXL-{(;?q6j%TWmq-WU%B^R3=m~cSjJ<>*0|5ZJRtj4 z`_j(kGhI*R;esg;kGPZ^UGj*X=09W&now;52ADj+q$3YZiOmMKriiQnzW+tg38(pz zFF3r7@m&rthON59{2kSTX0e9e?i=#v#ma3_lB=)lne!N_j?yJ&mFDU4({Ij-!^}^| zv)tKUsF_iu41jWg52n^w4ejwPf4X@!G?rG?W}vwj0@x=b6oT$vl$<4^kSu{` zB0<|a=3m*V7}Rn@P%B6K<>uAsi(^&0ooC}jTD#kiVP?+^nI~wXf zYqQb2l||;CA3R8u0(+5L4uakQ{d*EsK>%g8;CrK*7aE)@P&JVw21TjUmBV)Sq{O2x zei$W|_y8lc7*5kG{scG<)D?W9J}MGeImmJ!6cX0no-%zr#TefRbNe>@y2__R;m24P z{5G~;7hUFkX%)+^Ufr0A;JvSR0ooT(Tg4mTWSh^U>^_SFvbXv(AkxaLF7fBSQHHiT zV|&-YG&{0hx!z^i_|xIH4M#M;Zjx<)^fxFo+co0N4(BA9PWb6#UdM64A`XiKk$>KTcFOTMl+eBx_kEF*HPnLgp zIT|x7AxfaO&T;?(4%LgzMyt3SCJbeW`)kjNy|J>gAJ&nEw@nyXlInR_l?NaLx6!QH zQ{rJYP6otTHXL_!=9_U(NP_loRyVg~*8v|8yJ74;ck+})j87_w2I6;~`|l6SuBU)g zx5D2Ws7YbtARcKVu4OI!$?%btUr0*o06r+e4-i}P_X6V5pzL3zTy|Ffa?5UvRi~w3 zCA#_DU3g5t_Uc8XE1>c-Jy)-WaM{cgVYx zr1bhvse&Fj=3)JUoQt6gYOQUbRGcU##jDt2#HLZTe!1aocc70^-KI_>XF2CaVhieG zLvz#*1$LPW5Cl@@iBEM)MLv4&qs(pg_zUf855|MG zWr~AV&4aED{J&!$FKTD}1Inx%Cd^QE?bn|y%T%kY&wA$*B^Zj=set*SOL&{M_8^xC#{_k>LVK9DRokDZONkE1 zWK8Jt(}8YtR)~!Ov1`HYXVte7rO1k2i71#pk<)Z~%Roj0mdQ4k)RBwkh;xnen{&2C z+jWYVK1tcBS{$MxNJu1UR2(tUFOBChLGX1^={;|nmxlrD#Zr3Jm=$bDrKGn^S zJZW&%8(xytd-qnw;r^1?%8d{N1J4SSz`lG2(5v!mCz6o$jddp1#co|Od4t(xKD^G6yA z#1%oP^cZo?tnL>J?+nku&fmGG3y7%qqkU82aQLl&e)ss>a{JlnZ|}~$+>+^$&^2z~ z;9q!vjNBQmB_H}ENg!!2)0&^6H8Y zVl`}EDd&Ot{JAFl247^bMacXUUPyb=TlS!T>yEovvcia~3VxBmC zn#_MUk^kBgokHe;y0->Sy-V`lQrY1w>s4jVD4lU6L41E4YI?5Du}u)YUlHvm-ZT@U zAA;2i_mW`*@g-}Hy|yGmL%Qx7#ri9Iv(?g!+%9VWeBXqW@Qv#rqvG|OR%>&t%xe3e z*!HTONz-5V-fHS0)%cVKq~9n5N{6~wFjAX-p~Kb0;$2$Z`6C^k43^0s~`WAoywV&Mt?|3ljb&|q-9A7bsy=KPRuED9GiV9|CwP{C4q#^UO zQNUV+5l>^(_xOeXJFNTK7zOr)gaYNLmS&n~^rvSB70e3WG;!8-3fuTICvXO+*WR<* zOa*71-T8Y8D$8--O6kh$dEI zb>O7gvtg>J;gg!w(46RZ_k`#8APCwEROJjnA`=LsMp}9d z;MFMp72PSMHEO>Wz*?`f z*I#k_ajS%cZC$awmTWm($qMezZ9U0J8kG}$U9_Pk{lZa~;E0-7R(0jNwdgEXAytD#xA!ii{^j9XizZvzLMOPM+Tu%${C`jh4!NJXh-g zsk!E;bACHCYiGRHF3>`szpRTgtjhEYPEu!i$)D!5>wC)&f9&Z+EU=9_3jqT`+a4-z zfSapSiyURN(G;R%V%6op#TeEJOeJfI8;jq$ZF?a+|1h{ML@(=bcZXB_6w*NPI3Qcz z*2?n~_m*MPpH1;36($^Q^0KBJf_~v~t^khLaNV06iFk-6MjJg1zgK&uny(QzdPj?p zTn*K+diOLOxL7Yj5pp7|Bh!GrhyN*t=W#81CH_=_uiK}t-c#gIhC`GeAjdWXVt<5i zfQbV(-SO5NEH`#ac);V-CFb!bFInej1$dR{V~wK@0kd4|^U2vw=I>-&Q+zCv#GG6< zEMD*&VDY>Fr+|g2HKF-`o9?F7Gf2@C53D6>XuM2*KIsBFg@3u!n^0mk>WaNm;78KK z0>1s?sEbhpa|X;TG4qb)4YHiUzXz@Hj6=5NfLx##6z~69736o`1*6$^M6KM_eo*90 zZW=K({Qjstt*W3VgX0Sr$>Lct)xXR?&WX0HBCNLN)U*}Xs2r{M^(R6f1pW?Baa9Eo z#XLPf3r#lLYY$B`2SxUYbr-rAD~CmLK; z4u^)bs>dcXsi_HvpOc9f78307`yM*(Y6Mb1AMn0l*Yp8;1xXb}w_)GA@pAKSH88c^ z-DXEO7LFUr%xU>S>KOf}zJ#icEO}w}asyBBP~t|@NB%5m`J^hL1WYm|>xZJDlttT* zfGBkXI2=N8LgBKP?C3J=TW2)pK+0r7$bgO3BjHKF=7AXs+Cs3|==LszdZ;Ib&eO`| z$<2u;F)M>RdiOkAy?2?l z=4XyiZ7XD0V?N7iNMz?XesVyBebP0Ecn8%`&<=z-Uo}aWJ7O8m#l2-YT9uwl~l?nD`4H2`1Ep7DCm>xS;cwVdl@!!LHnb_bu{Ngv5B!mTNv8j=haYl#WJbl7 zTc7ICZr|aQ+w*-BOd`_^yLQ}!$1O8XUGpF@wuPMBHM2V|oL&Of+@GmP)?w-L$0M#F zZ*e@o9w`xvJw*IS`w01*+b}!hP&yY2o38;W5c}V8jMx%oMS=;W)0;jS!Tn?=YLgiz z3pHX6z)LwESgc6^jyK6miQ?=aXK!(5fhuBiX#)8%um#zrS5@ zi*r!`$v`4}`v06GS=pw#7!1>!;>*j2Eal@_CW%L94fsZ6%{7X&MQ!mc3$sGXyX z&I}Rd*}WrEPbyW(fBfmSRfl+dA%D1zSt$3P*VIxX{BL<3y75C6aMDoU?(SK7-}r=l*4`QtvX4oul;gBiTKMC3C3Hj3Eo62=HA)A(i|`EJ zoKj01jQ-3yq-h>U9&!#{-)yO9f<&gFm!RiQLXosNJk3f2C_Xhm!20jN8dV=LrbY1z zsBM~pFrKS{VzW`v{s!OPVLD!4nKviPA8>#`V{JqdXU2^EAiR!VA4vC-;*fBhrN~kF zz4-F$cAj^58;48*Z5f9#po>Y3BCCwl(90ytestkqQztlSf8nl|)UUH6856-IbJ=G4 zb$Uil=|jsYC>kT1JZiF0X`LwRf^UYD@sA`P&!SXw3qx5oo*E! zHs>9Td0+^(9iy&=v5R^==~2o&ZSC^FM~Lnvo-znTGU~n`^GEzJp^ZIxh`w>E_DQb& za?<09;48_sBrH%t+Fu!GbtIaLzq3s^lhBmB9%7VKQV8i7n`{*tT4vT_6x=UJV6;|! z@`TX<1a#HGX@Nr6KJZ;^CVw-TNnR+KXZ;IF{Q6&JT;w6c)=Uz8pKaNXfD(q&N(Gvt zQyqk$>2BUHaF*=d@Xx!*ezIVEbzMmG+o(*IXf2)pP>Eginh_gOg5|t=x?)hf_ zu0GDQ+0Q;rnzOy(Ni-pyzD5T~j@(d96(sZG>s8D`K;*l=Y(SDLyL|p*v$E(@T1>Me zKCGYaRH`!;>V#w7#yxW_)Vh zHyiw?j71ttZM)S0w*#~>L&P5x@4lObShoY0upCj!q?* z5O7Eq877q8+cD4mGe#f^E!0cXNLr7aW?YQ>Gz@a_vvIXNhcB2d+k1~&sIZ`0DDO&{ zmr|5`%L9{PwJ>HH=>CYlq#wr1)t&sJn(t%Qx>=6M$f*Jwi~X&zMPIS-?iV|zwyC=x zvTc*KKIxM(B0u%u7M=Ag#6&nZh8V(hf$ipB__2bzD3#D zyF^)B68)INN{%@T@Jg_ud@h@bEUm^0J5#Lmw6gJkPBRYExMH#SVEpBKe2oAvb^-!wAm{daGrx9w{N0M18UEYb|QM$#;hs&^h6u zoA!4KWrjsI)g<>Mpnu4}A0km(F62aW=@Jcj=C8HByrQw1I@RGBy<+Wg|1&D(#DS7W z73l(EqhHfebKzJ(Lv=>|rV0l5p2Ep72Q4}Ln85rpvxSz2Zo^fQ8+AV-nEKrqcqV`H z&j!8yb}zjb5nlZS&LtV%({oEdU036Ti$#~{le}KZ$cE&K-oqEHxLvUT28pxcYA1^2 zRPWorH06cKh!T_=jjbEt3P3ch!8!<`YpQ9E-{t%9)B8Y!e^1%k)Vaf3?FPMM_D1OnoZkMdr509x{f zlrFxP`nh%~SHK_QpLxmLcj@$Hg|+43mZ>sF4v~YD8s(YphL$f1pIzCqw#F||9}m4t z_<>d|;NjCpR}+bCdsa`b?@j$F$K8W&_d1ZjMPS~9|G?Y$?%>8S?jp^Wy6CS`AG+6p z*fsB|D)>FZVaUXsN``+*fUm@+jUGeMN3R_co7!I%Wn(Nf{a2b%6mP^4q%pioUCRWMNP^A!G2W z@!E0wqy!=y^N&aVuL}eAR=WeaoqUq_=5lA2g|e{haFj}oXkJGF{W260Z>_? zHs-i%kgP`1x3Gboug3Ko&v^D0d#WotDej>Ijyd49gtp>FGHwS>SFYst^;D09g|Z(2 zCkg?0SE7gxaY4g%wOo6t$D-JaIszi5%P)Y9vA+hkxo~fpvURd%38HIq2!ct8{NSrl zq3LGHkqxXx^2V*WnzV&UoI3Y?ya)J(RJ@Jm5>cyqu$Eok08-CiKQ?M(Sm<^{Up=Nr z_fsX0K4j$R+qAv?`6}5x6Lg`aJk6{!S99~M%jWM5ec^O;fB?2An~cW!f%84bG_7U^ z81z<%bI7iS=!>@(*bd{GmU`$lKWl1Yh`s0Vz9CszKoU+Ka^%N=vk zaFyZLx*R-~@O$vu>Hxn zr$ooeom89r+Y6NEVwuQz3<&!<^C@p05tnHFuOEx&w=yqOLLYU{woRgETIic@htp%Y zM9Q09Q@tYpHb%E)MJ2w+)Fz6R)^;F|MEw#?Q}X^qP#oa804(5?qzYdj1zoMr1)=Tw zy(~$mo%;$MT;&-C!aN(u2L>BZ4)S5`b1|1mN-shzHPOXgeeBUW(jthicqdtP%4z%| zLkFqbC!>U}J4$pdMi}_xCiG^$ops4OCxvM+>BWS=JE>cZTp|j`Q<;NU3yiZS`aE2) z#_6R_yQrzye)1CC%*^My=Ng^E)+}@7*aPAZ^33I`@(%@q-elBOg^E%o`W#Ta@+YYa zU0KUX7kE@4zb`hl`l^w-DKy|Yh&=-2liPb3eQ6g{Q_2d_s%lZDm|Ps>?h4;f##18L zl513`v?y=yccyH))7qC^$`zMa)lVuLw$1D^aNbdjo0$Enx#lFgsiFI;DbAHdS+vtrt4|1M-*~zpTi%12zr*3fvVcAMf<7UWa86ea6oXBvOFF4U@&w z9wa<5G-NiulL$C|KvW%|ImG)dsy|dn2@>Q6Q*}L5DV6YllaAzpz0u%43kEH8ac=Rh zY+%vWd=JNiWp0mGg`gv4q6NCaYkO>133kF`&q;K0m$pXeU=GAj|DOCd6KZ2Gx2v^z z&7ejy7wjQXA1U23te)>ED+IIUf&=kU8Np6CyFlFi zMbvn9KhGktM7_xa0KEQdf=NEHfSI?KLXP{xrS?Mmw)IWX4?+zda(xhG($Q7*f&E!? zx&`IiT)VFmz`AF6x?Pus?`2l|))jk4p^D$KhLQa^ckADx!#;9U}}>|E#nttwHh=0NW` zXkf4(Fo|NPs|81kV)ZoqRf~-g4zT;th+iLgzI=1e_}N@J#f@Px8hbiX0x5Ev71<-@_V#*3EuudWKZP}?B-OJL zg11I|X8eTzbnIr1Rr84Opy;8)+Gx$vk3Qki6~8rGk@~SdW`EfMN>eB1;w~G^8WX=C&sD#>mzwt z+s^pM7uAoiR+fIQ+HK$V`CJezJAC*Ncdr!_&>p>POno`?#Pz=FRqbX)lzhS1fb_{1i5(i%lw6i;~6+y<~VW5Aur&05bSm zaS`URFo~IZzJqVTXl7PKOL{K<{+RM{g_=812gY<_dtFh(6D4NYiY{h*6WILZzz0ok zc{gS2W}pgGo^4HEJub_o@Ob={<*{<-DCJohIHBsR6*?w^D;_k1a&HOg*!B4X^kzS) zpzZpOp1^LhNDp+qT7SKcs$?oFmp$)+kYAf`4bsn@?doY1F(*HfmR>z72_=Ta2Y3<7 zhXM)$F&9Cot5W}`&7?ybp;R3#tNYvbxAw>SbLq7Sl8Wjw$eL0qlN2`;3x(rgE~=1K zTpyb4`E&hBE>Ro*SGY?*od+lwVBhxYB-URN0(b_ku)@9L}2K0Ch z+*i+`7yRjxm-yt-?foXueVJ;Q(b?Y%r)4^>KnHjHkTC2Upn0; z^S@YotEed7Fx-2{0qO3Lp`^Q0q(o9WRYXd{p#PpU6q?p(M?}oPe_DrH;_xt12EY69nW5*u#B?mQ#~DzEJN|E zO|IVe{?ptg$}K0hcC){cxJ6mAPnL$Tq zo49*Eg=a&86%IbTU(BZ|J|)s~Sp6S1_RrsUocvwh0WlyDHuqVmnUE{`W+bvNHS+5& znA`jDZ1jaZay5$ex@JPT^c45&bznr~xd-&5X*yD-E%f@MPsCZTtrv@>ZgwxEWj4Iy zjt)IJ#%l!)fk}LHQivPIXN^%B-HZ}*DO>peoNjDh(RW;jCB!j1WP*Q|Yzz=zWce+@ z6S8ELtlKzibdlqLkyCpomD*0=NT}&(5Ax+k=}99}9wCUGj`@%M=ph`hYu zq{DlQ8iRK2(Y<14Pf)wp3OofB`xzHR1s#c7pkZD?F+_W?3xA#^pP7M8T6M z9S_C`GeBL|p@VpHhks+&S(KqZ$oY#U5;V#I`gt=Qzkshldxw8!MUY4MaWY!3O) zt>V>mIN1gx$L)pDC3&^h^~EM&Vkd(3NUrSVXC>;#y1#!3CUHDI9H5Y>8Ji`m^iJNF z*e&ukj^Zc_U{dX?fz8Dqo1)`jeg zHY6=)5CxRfJ5TIsl0?Z}b7c#xr*^=7e|7eS81`(N|D4UsGr^c)^(q7kgCn<`kO$xQ z9}Le?6zzkT0Iu5SJ8=i7TJFd&)T(CP+Uo6<_N8dPwUOLLkPJET5K`%)dCOo$Mf^62 z+Fzen<7e}eqfU)U6Z|N=!=Y=_|3K}IWvG(e#)Ha(OiGF}U5SWT>E8##lZ?NCjGI}D zt}RNIot5_1?`3HAuPth{Au1N~pnVoG0|URcqO}ipZl#RaxA%U93KZ9nvBZr2BK^dN zs#^Y;e#b2O*q962^Hh)&NC=cbUkn4z)-s9=e^qdB;e_nfqUCp`HqHC8qJl(Gz9IOX zDzEn;9ad;SLqvy(e3EUl?Emvd$l;J(KKEhS6(_aNL6}7gmNZx1867wc@qqWG$z=Fe z7IBmx*~yM4m}2~3o=$eFjf4i91|V9T?K@`7_>KdGOXsyy9{^pGIc()E;T56vw{5_Q zKA&~Q6z(v*RdTBP?ZqFjm@ID!*K}p#=ob)Aat^A>lP*z#JGCaF_b@J0_7rJinbDnYD)@Mojr`dVl zpN-E@9Eo^>-0O#GN2fd>pDlXnf7QIz2Q+WKvvXBn@_t9@d~b%S_{KMus7V8FNqC>{ zKpp1zV34H4=|x$0ln64VRGu8XO$OroTTW z=7@JY8RB4aVU~?+Za;D`iJ$NZT!Iy?o2_JOgS!a~v==)fBrzwW1qo4J?MJd^UHv^h z!C6vW9^b~2#=6QzfPO0hF&Hfa&y95q3u*eCZTBeqT@t0425EGbZ?I~yUxYZu=$bF7 z-Hv_!DUEicxX#Z^Z$}7AtNhe1ReDW-=zgpUSFqk4l`;?Sny~QCq*b}fH0RSFR;IW_ zs*jN>+`)&VYaEn)d_m3;UFLApCgCq%n}2G)_%wWy;+(#(#t?}pX+OE9_z}6S0Bp;A z*-0?YVLr$s8=Z8U+|e6O=$cGErkhq(PT(3s{;X!07#i za!kce@=f7_mvil(8>KUmWkor8<+f$#b0s5!6B;bfvc>4t+(LJqwcKC=LT-C0C2n^BlT=tjiqUN&nm;?r0DOPr^OR>!x?%6ZW#u zm%Qgr_&2zb0@um|To`%NK`eJ@$mDZT)f^DCwCVRrrQ213e{-v@tK!g?eqwV0oy(ni z^U|b~=POFa^;_%UQ<3An66H~_yZrOa!&obS9N;Y=i(uF~D4}X!wiqu+d!H9qMSuDF zb4Afh?C74{e$qIm0R>o1v_CDfUHrXp=$aGyx=Fv0G6@0mr8^wjos6V&1izRc3U$i4 zYw>aoaI2Pw0J``5;Q$ME42hosfSA`1+;X%eMDipD+D|HLc87SUZ zbZMd?!>x~+l{8#`AxW33Bi>V!IX)_4-RXP0ZnW(0o}W8$y|en{j~{+x;m6P-s)Fmz z{T++g^5{Jk&ejaAs>?~O&_eG%08@SQjNOa?xnRL2~MVQ-`on*$nb ztjgMx7?PafpFfuyvgZTOE?{1YRbj{P`Dh87-DRl%sn@(}ZX%f>Ir&8#vqoXxa-)u^ z&_D;dR3qKw}?w#~IgMZ|#e!n3Of^D>Dyl_C+>y z+E563KVGPhp!(aE+vR1DO!S8=`MIUyu^4473Tx1rSJEIU&Cqpt)kSleC!*vjNkY0i z;qWsp38Epdv!%HPz?API%IFfhmr!Hra-Cw))-+!-LDOygu=O-tSBc^jYVto>VahBB zi&1>^Twze8Uu~gIS>cqK&XxExJn9feMhNy|6@4|Fs3eIfEPgR>k+ls!5@l_Pn-T&8 zYeKGeRbU*)=%T{Gfj>?$4L|F1P!Gi!<*pdejwD+kyI{AeQt;uHd z^yvFRRDR4xX9MiU9P3SrW#RKuy59^}DUayaeDrx2vO6qkqBjBLy$aO zIUYcPheOsbZ5KWL{>jZX<^{OImzt9)&BWAdevB3V1O4)4gr zo9}N~LExvhszasBm>^x0-|y?{8iLd@3z1!ebqzsBeD-sk8LU1=nAvN%->Im>r%Xoljpt;3%v{Y_N$Z{${~~UYQEW8ZS!#~?R?8}^c3e6P`7L`^zr#1 z45>!)n2X@0@EvNQrI*u_Gx6L-NLIhaPbKb)cPhbodT55XjqBRoHnmM2fw}_W8@YU- zt0ASef&x&<#Px|`>HNo@l2u!eEz&p^_WY+sW`(ms7K71ECjmBxi7W&axxarHMyPX} z2hidKPFgOX|N3GbSP|mPQat{=ID=-1`vL1xP5*L0L#@f(-OjQ8Fy+RRIU2rB-JyCq z>31^(8`NDtQdi~_kbw5@!|OYW17j-<1}vY#7al5$*CN9Vc#OuqQHmA>e4{o11JFkN zm&1A%6Y0a7qdzut-&8pUGuKq^R{o%%x+o}0o9pn>_{3=ZWwyJBmbdz^@P;{F^i{$j zB1EG;yxX9t)4@?zE#l-0chUY$A5s-)u(rA}sUAtE#`Vs z8>}2`t)^oxPuri&e9e(Vt|wj3IlNUNPvgt%(|9(F^P0Q-?KRF8e86b+*yPNzzb82; zPw7;rH$hIw@S^OVfBU<3ka=rx#TaqNQ`_Z!5JMJV&Pg}dr^WdOl|lKJsGzx5^JJTF zEA@S(zV!b1inXAjI z^HtkKj~lt)B=xEBa`W*B>xX$sxu-{2KBm`U|AHg@y`s$5 ztnE6+_qqiPh4dhe;1mmtz!YvH~wygFnGz z;*hw5wd}~p2D)55cks>AoWC8Y4dZk8*z%?Jg&@912nIVEbq-klIY;g6?C|_x+$3TI zv|&a5S4u`;ZNSBt3{2C?uk3(ta4NttU&{|xL^vJ}2&|qb_ZvS?*9yH%zEACC^H(d^qKQ>u9#*=<-@xu=kN)cNWuk z9Fvlo>mWmyX@Kt=+n6FE+WMySh%5D(XI<#v?LfciVSlBQ(O`rwYNGCg^*6UyyKYO#FD0uN(=18R zB`VKP>?=^~lXvt9!^~zKdo3Q$ejc75^=OYeV);R(HT}LoY$P_I%-(%~F59r5p{{zf z>Bs0g_H$i#Fu2_79birTioL%KiKE0^*%FwF(0${dEc-D8Q*6W7$dsdHjZiC_u$<1s zAJcvx+on?6(C*9=?S z><9R`HI-vVUMI-jSjD*3V?MKb`N|}P)iSUFO>W8?wlT4GUIex(BqQdqAlc3eR zT!v;l&Pr9yY@JTZgX3@)rJwj<)?k~^$Edib_Uz)&MF(M$*KfNdpZS9Vr1$>=;b7en zN05;<>xhOr4JYrebTJyX@|+yC;gc%^7d+XWno*luhLNm;)8LZ-KxS#PIVB{kj!|D| z1gH18o!19pDZmhm2@4=ZG=_qDJPYeb!vcaA>VzRui(LQBKVQnz`8LqMZ7s$CD`3R>4#%56M?&HtUg$yrgGzBdPyz5@3~!SNqW>>Lr3304UbeP4%4faO>)55L1BSU~mV*w8!$U$56t3c_#Fy@exl*aps}q z;kW#Fy#MOcf%cCWcOWa0@&4U#YvBu=Jw5h4PlfD52cESBYv=J>gK&vWEtD2aQOZ4^ zQ%k&M(oE41+y#_@gX!#SOG=7>daXUosUv=G<(11awthW7!gyU#8)@%nS2J!%V~3d> z&zu&?ktyY-;%PT;#dX@v{TnZtWfPtDz7dQXOr(-kiLN`J=3RUOcuJvP!BuzjDl`iL z7c0jdg^VZG6v7ZIEda)(yM8#d*E^U9D7yQE8DaqZrZO{pHO$=^NGw!KVbaoubajL< zj6^2ZZOaTM?Hjt!gbgJS-B>-|CGW0hu5*cd31$X4 zGf%7j{JBbOKqa?<^Z2*>yWEswM6BfDA;7#Te?e1alen2zNBDVGND%^(LjOu7id-M0 zt?6cnC=MlJ4{6tbESDK1+20s#N?w^IhuiuK1gpi;q0WG7UYOVQ95|Ny?P;z&#jeZ$wTWb6RG!X(m4Ju;e;`^V62u{} zBG6j>JnbQTVFf37U#@7DV6jX+vvf->_c4|GSnYuUbVMBV zn)KS;1-4(wP?)nTG*q-pDmn#1uJ$wB(H~a;RWN<;f1rQJi^MVtN=$yGwTr#NO8XB5 z2wgihEVpvsg}*7ij4$17uOV{mI=&n!%CP02Q8RPd&`nIAQK86bww{8M<=uxqT&%MT z4zg{FZWcNBc(_Ts+t)c)uTq)DkGCE{gy9>7m&Q0QcPLA+tT()^Bxol5L+}EG*L8ud z7fstFMcrj?V>;LbpTtb;D873#Bol8A=1jYlYczaQN-f$hO2#I@LAvGk+Y6$g`>OHV zhOfpmcE2Tf1t74bz8X`&XSTg#8Cay&?OVKZ8yZ+x@U2)Y09}OhAqL+(TRJLNAS)W@ zGI^OV+so=}SyJ71GtN$cI*(+QB9mZKez9~?=DHUn`9`SWK8Sqi4-+0trK6!7>#)4H z*i;#b@3uzjXynwHn&OovQR9k4t6J|UX`{KW_jls+njg-mo@p|jY9sH>x|fU%3UxPF(k1bgpl4bxjI zAQ?GA025g#aeStFeh)?RT@?Fthh!ay+I-d1o2!=_59bZY|UzfyfX4HH4_<^5f1=isJubwkt{Ysrq*_Z(`qx*eEDCBgNr}2h7ILFyNax-T;J#3~K&sN;-QPT{tAfuU4x1i$I5Wf^ z)elz+4h?boOwXS;mr%f|XQmBh)w<8b@ctAB9-pM70Iq7N`&X$Gav<4PpNpbxefpmM zPrTCx)?LvuWX*XqeUD+{fRGncaIU^3{5FTwoz`dH=bnP6^7Oph-mia84jgxz5oUkp zuB7u+l|S?`d&m4Q?|BEdfxIJfGA-JLAM3N&v@RozA>Ze+>&4|>R8o?wyu-PO#0tW| zfNQK5DD5KRLQ3PH#E5-h(McxZPFFHAL03W<)5kPfc`8cor4v^V^O)l0h1rY8gyo zzZ;mEem7(9T!A$yeS*nuz$BeK{=5atjHmDeOHb2&;vGuRBo6aM@I zRt}Y-V3sA5;3{uvl+UYwJeH=*R|k71_)DK;=dy}7_0{Zlllia29O&UoM;c^G9XuGh z{1r@wJ{b+QNa)jhC6)ZBJ?UT#nyEeFNov6vxb0`}=H7|v-{~!-3r><3Y42_yY<5PI7%^|puz*$fX*6uIoDB9br+dr8q`5W7$2R{N7^GANA0kfT zo(^C4pQYYJ(c^gsAI1$t7IW$JG^Er?hsy|7+>)- z);qdym>Oi$Koer-R%PZWHnUaN6HO5&6az@7pD(o>e)wp;*59ZZ~Ptz3m98RQ^1bW6;C6(iDxCHBX@FX;# zczm6IMu_w8Pt$;?S}xUtR(u|P3(IonjO6*$BXj2Kt(W~ufcBU8EP@7koLCz z_g4U*C3CvSwT_>y_N%hwJ16*rjkx;&b_)NbK-zHzK@tWpvA&BfM*#-v1VVu_%AF$5ojm`v}pau0fuXMSxvL)Y%d_PIQH zKW&!)^a=)~p8|c-IS4>748;Kl6oF+!+ntNjyBEx^=JKUm`UquC+&mWJZYG}7{xVwn zFV+4kC4vp33+1ctQ($XqZ?T>h@>T0)vZDPs41^I_A1?OyjzzK^b*NG}e6w`TRCxwVq9RChxHS>G8b!GPg%pnY3Vya$D%Jo8&=-1m zpw@>kFVSNI9IY8#1f;*a(-gBpcn3`SmpX*GTx^U!cDMNkahA~Qe^YLihn2mK5OaI_ zefR-Nr0qeL*o2O8fef-%F_e872gumPDpv=4O(eTltT(mOz`6H$ZtBP1vsHZjt4+6m zv5GZDYfdI%EwAzdDXQD4gg-?OZ$?o^yiV(O{N|J1-06Q%9sjlZ&={_@F1GSNwu zZWgW8z)Q3BEVT3DkHjM^3LD2h%U&lJ2W{-26E2i=6YI0BY_@C8>qCk65N`dWA zNhkW69bu7?{h;iC5K_^P+eglz?dvmL$FhU#H|*i@d0_$MkN;+@_cm)AXOJoU2%sQy zm$3-SrCGPN(PLZov7MOib${vYGYkQYgpZ-}s89lR#5!&nPHr;ytuCx+K;&1aHAQ-d zAejIaSu4?xr}Fpga5%$Wp2G~&FXKMKWib)^%2UO_9Hne63o~ldjUG%s4qyr6qyCXY zW%ICF3tms_f8Z@SR7UPsa&ZjwP*4WMt@B6_b1tlUG|bntfc1+kfteWuuk2=`&uxFS z;HaHKc`mhfl+-;hMA*&@Tqn0}5rpd6=?NCpDAns(+(fhcDd@76d+UH=GyjOIvCU6J z;s$%^qsWe{YFq;^Edo?0gBZQ$27+N$m{fFsld_48(hb-$5k!UxN>Ohp*ylw5Gt~g zERg$@ARJ%yTK8iOs#B zuF;F{V_*F7oa_DW=&?+l1Oi51GEn$n^e@5P(i9v=OinWw*N4A|&ghK~S&=leF6pT% zvK(jBz?>Gx5%`i%q2}ZJ)X|VJ2174_8Gk?J*_D}$SY^p5LuC-&DQ=X!o8V$cWDNU* zC8_n~C7EL1&R;LZMpn@+0))uf>3 z`0)~8T}PRYZ>i|a=pNSMfk;CwJ_EWur)G?-VbXt~r(yCy%_!909@i-&fkTS34`!VI zvDavceTJ@TYBZm&s}Ooj$@EO?1sG3AE0RWy9k#(M+(X==JGf*bV-6p}hb_twriwvVP6tJdFN@L5MU<)mcizyv3V`vR0TnZ1N= zill}zqeWsXY~8YFYc@t-_wMTOyg+C_jW-fgyegyc!6?-alKI<}tBKLeF@i-l)>v)~ z06oSVFP+z<$(^7pj{EbcFhM{nxkZNw(n~j33IKI3P#bf{Ie2~Krb)T1w--WJ_wSr= zy62Vn+U!=kBqb3acoPy|N#V?|Ro~4KHKB2?>f3}<-cT#BO-U%raa+7ESVSH7)&&Qd z=xghAyNA8QcLv2!7^!ck0}*Q<%&eF!aVhYr9H`winB#_6Da8K+o%-S47KQZmFUvvBbDSEJw{9UEp*IGYyzKzQMb1E(#<4sJU?F(i)G&}c7 zx)WPyUk@Wu*4OXv`IDk|ult??*%&)m{TJ|XcEGgI8lqW-2o{LlC&@B1)LhWm+0fF( z0gYa?c2b}7PPmMApKAwyrD-dbM~c?Z_M+8O zAWf3OAd%FyNt;iC0a$+GH1_&tt>Tcs_19IMKto;h=mXn$7DHnPferUNAyj1QgPc)d!5jtlr`&X8qbb`zaSBOij+qE0Fxh;z{^g!{^T=RPTWe`N~w4^ z3}q!1QO=7vN0I&sH8A@?Mbfbf15ky~IH6K|U6_kgs|&1Bd+edJ zUCI`_uSxe~v;q2>43MV!X`$~ywIZiTSFeU+2B}Ae$|t(B`HoPp2^`1&{16w#yR zKUbMl9Yd)vCnB8SHGz_8eZYQn4p?BWqH5wVDCiE2e}|fAIcLy|LOe{3SKgluUE;3= z$qqTtkJC)$DVUC%X1rBAj(q+OPt}9~6cRx9Em#oI+>a#a%{IJ|?laF~EYZsokWH(X ziSY$@3Y;WIP+^isu7wIZc2^l>&=P+Koip<@Xi~={(j=oNffXGAutTAs;zsX9Wz`0_ zW7N%=@V(d?@RPbe@SMX}-i50BKo`rp#5-5dh*BzPr@s?gu0XToON;ZJe3Q|j#=&>Q zJVvs|;^9V#|I?uh9;I^eBtLF5)zq}46sR4-bAi1*{Iql@f|;B6%pw;c7_VM4w9Rq>bKzLC=M7zvD?CtxNNCeXgtH;J#6iu2Akr?siG_2IU3|HKFk(A4hrW(=hF=^9fp{__!!g8G`!j2g=i(h;vp_Hi3Xp&$zWjO z!Qn$It<3^5iu@fIm8-97Pi~pYjHm>+Q9b=hj25{hyq{x8!FgN@Rh;Y^I#BHX3?1K@ zh9b;#RDvn8{G3!XG<*#Dfte-s4elMXsnFW0iwhrB%!Ar;xLp{4hPLnht=Wl7AERAs zmtKRt=@k#&ga?7%L+*%vJKUBv=Dn7+E;Puu2B$02Y3M;dgSa9v>Ubpw` z#W%HK2v|Vf79@v^PE&01)5Q$lW(JaXRDCAZf7fZ0B{zBBazZSUN{vJD{BUhC=}#XO zYSJEEQpj^xaG9}Q@z+?>x^Qr<-Ay*_mW8|3DxQP$_MX9&Bd-B_irSk) zL+fcZoDsmntJv7S4k&^s#_gKVfx6bC5^ypE?Dgc922>p|&@-SzEMyiLEA@vE3E&!9 z9P@#C3}a#`$$3nDy~C_<+=O8flJ69j(@>9E8}p(Kb~mJFHbdz-y_m|T5GkR_ z`wN*$FW!eDz9$c9Q5J+t9q}O*;=ZMwOaOltB#rSbEfX_xt_^~SW9!n-V9r%c0FVEhQe4`mdv2nK`4YU#e3|MQwoRy^p~8=Ey#gU6bC}z&=Me{bY3YWy`q zJkFdui`=g_Iy%KXVO;c0k7|Mv#HZ2KVr`bEg7??4ot>Rk%=U9Fas$6vP;kF&e;6=X z9RLT10wn$L&GL|1T|Ylug9*Mz>YrYM?qv_j9nN~IGC{$P+o=zST!SpA*SE9)gy=V+ zluGA45H9(N0YBb+!#5y#PBGDO?IM`{k(E=7Br{FSw-+{e1Cq1BKb3Xy>A0x?s^j+( zk?#1lQRd1uiAwRN%HHBR(B};K>otxWm3c3|(J&pZOm<7294EE!{CZh9=Tq{q{3osK zt@76ej{+Tf#v>AFan6H|?)!jHLNX^@jf1XjesKbr)wz69`4EebDt{upTF=sQ_7w@ZNy{`0N>zJ`u7N7^?OGz4~fB22bz^iWOb1H2MHD$&i zI>YZhxnrYG&nF}LEHsewvDUnW2z_nir4BM3TdRjl(LMq_V#ZlwwCu%a~z8Hr#pu_;3x^TaLt6&OGe4k<>O{=;W>Hd0ykD8 z52nor5gX?1k7>GF8%!4~pP*5e%)R*rAhkaq{)+rwumgC6E$Vh)UN%?Y|KT2p73A za((@sv1+EFq3+oyM&sBsXQj0yXW)SwiTF8YKA`aRScKd1L(-{CP~b@IhCKHJC z<6ZFd$|)n9xQx7Q=V*+flv=v3>8Eamq(Fj3W=0ax)H!F;t}E?}+$fcT+mhzzd5D!h zsxRr>G&`y{Q#X&%;E+FBq^K8#>cAXv8bd+Ttx%NIKR0 z@`Bx!$wblFl!z9Yr9M5YjSfRZ(V4S`=)1KHcx3RAZb_K2u{?|DUK# zx?fv_cO0&Fa^YI~2FE-6ug9Kg6Vmp|{9zkoC)|9$58U!|?gv{j7GfJv=|y~r%2a9v z9lExTu9o4eVZu*?Oi-GyQiJynLemPvBJyMcPrQ}(EwqS8=s5Y6qSN}!9O6rf{8J-G z=tDc}!o&AAJ-u^}Yir9gzyiL(bHRs;QPPpF4T<3R^Ii>szegc^L}zEOiH16rCDi8o zp;z{G=t4wi>GqMrjLBm2ai!HyF`Yai)_vD!>Yz|ZMwnn3h4yX9F=yRA{bcl2Pc(B#zpFE-dSgKG>XsF}PJ(;&jJScvYrEgI@Nd ztv7{Q$YHGgtn5%aSs!_e8*{6UX&Vag0j)O2Eo@%-wy+ zLJSz}Kl~Rb9nbR3?#CP*t=u%<$HvDCsJ*<~F4*sD%fe~dE~?={&%onnl4+8wQ7TJX zq!NO=TPasYKd^eMfW3`79qQW60uiJp*6~*MT2>4!ujUnjv@5sMd*_ z9nTiT%*rw@cX9^B?bW9G@F0SmU&zu6_nanYE&^Rc?>67X47`1Y66Sr-A?HKY> z+eqygH%5C$a364jo@j=XR6@b1jRL!d&CLS|TP3$eDbK{p%o3cZ>2pykp|f-cC4kE3 zxg(lS8vx?m4z;aNP7i330kcm_=ctGi;1=r-0wl&|Sib*2s@AaEJ0K7{O1>1h!-$Th z51n)9U8xC8`St=FBz^D(BvchHA`@jP;TQ(ntR1tr%oD+*;zJu#57XF_PhSOituYJmI9mdu}C z0h^u=_3?tS8NR;V^Bj-AU^-B}dZ^;u!k|if+spv}&Me^>!k^Liqd}p-nkRd~K}#J) zG0H`2Q#h;t^PX($R1)#A8r4~=tFXrymIV*d3YG_Z!0h56`6IfXjYEkYAFmTGguWVhV+iC0nlcTnGDfc*5lI4m8Y6;8GB4&{0&py($ z(hGbG)qTC&zp9ctUo3aO;%M@OS&VYLQQ%9&-gj?w>*^YCLcFN+{2Xm&+e~h{o7Fu} zi-R<8YTcvv>S=wzhlX_?RVD2a?Z;^5J}^~KV2uZt6Apx6dQn|^sN77)$PYh$P)UDY z*Jb@_n>Ca%6eXTo0~v7d?8HzZz8!tgy%4t6rBi#Qn`v6ID>52=Q~*kNy8Jrfh_0HV zV+h(Ww=R1Eo_TOaWO$XP%@zVP&smicg%FtG=Q>7#V8xzy3|7Nvx3y8hNY!tioHx$z z)g%a5TJwJw)oe~iuBwVbL|kyR#!JU~wl|elS3@bioExeMXW$#`w61ak-9}@CbD0 z6pGda(G`f>gjm%!Pyd;jI%Kc2mR>IDa31)66f5ibBvOszaYbXM;KXp|#Ls8GM9POh zBhwhf?66N(xI6P-Jv#lGBaf3SZO&n&C75*Pa0ATe!kPc5&;+ZjPu%h5jv9T+WpNRP zBe0ELuO*ssoNG>NcJDO3a<%SG@Uc926o<_h*Y%h4R-O3o2)#tjZI=7TgNIRh zf9c5lG`_TAdEi+)6vs`ehR?2O!3UiAP@zpwXWysABN6BBEDi3qhs`$R zrsUdqY432(a>6wn(b>aKtwy=8i#iu2a2J#tE6XQ2Gb&lb-SIBpe3eQz#2*iLuZFL0 zOa>;hGmgD%Ae>V4DT!@@-oT@jlkG6sW%KAr9W*JK65J!TyEDrvCFPhJci4cO@Gf{B zj8_#Z2z5e<5bHlLLY8$Ds$K(15?wX+Szx1Nm~b8*Id;X|o^3VSU|0C5xI#3_Ds8Eq zrbxD0PH9aS=mL^es0UFIb&k~z(?Nbgo+xftyc!|#EGxCUjh{cw0wsyae+;0>K)SpSKg=z zU@2c$p7@a4`kqp|N9KThBO<&OuR0mY7riMRxKf$&5x)UGlcSPwPb=chS_vHc_^Tsk z)(_TZAl2;=F~NJ9ZrruMzUDRayVP^#hzesd3g|ZJ-&V&NfjC}jO#ZZYa7Lt(VxfKQ z3NH!23q>q{Ji7gs_miO0|2TL;5?I94lp_9mesx7*}Z=E+N~L9b8|Dszv`T+{ga`W@_l@W--xUS{>n;~NyaU&qqfsR z@Kkd`N{2+rU9J@98+-z);Ts0h46*(T#9~RcYAkJOmbWgrs=$ti1}oYW%yp%=D9YX2 z)9bQg`tTPmm{eB?%poE-z1K%Q@E)^cVp+sm z@ZH-$q1q;P9Fl0PcFDzo#;5{07^`!>@Vn6R3>d8=nFTld=n&x~cMPg;j|}oXS1*kP z)oGe{Q$Ct_Q?Sh`D_VST-=qeDr{0E>76=1TP=C`1v>2K2#XzwoJ5frL+iPQ`WSGd| z-<5}XzE&F=k{@{)0y4yZWSFu#md^>OrgBjA$?h1EXji%9nb^(pj7&{V`iMS%@7miv z-pw@SA<)iLQvwKw7=DT05*_l~ zSdFb_go4I0*kVF_%dKj0)zY3YQw!R+eGI8%^-FW#;K9snCsqXNTkGbVetgvNjzJ|> zfH0!cV#ePH+y-UAWFt-};BJ>a8;e$j4XZt=*y@1$Q7jR_1J##w=E_m`W~l+&gIJP!q3k#h1-)XGlypQ@zAjMF2DzP?Wctq-=0|XAX}~A zX&m99AhDSm*I|SRR5JffO9EZ0(qeVLiVO(;QuqmE`&znYwwruz$r0`o<1zc_E$f#d z8Jl$Og|RIm0JiE2fbhZ!5V-!eP692X18c~;c8U$nIxQ@{L|*38VeacuOQv1qY0~1H zl-^U}!#HZ9G;{MBH)q@fTGP|tU9x9_FV`A^ zAb+1E(V4|>y~p+I{XGsuWB@@~v#7%-y@ik4OTtRUe`Z;l&2WfE?Z!q&$+21}-)Olg z=qJScudkEItgi0b6KQBrrt=$riy_2eGG41es znkV~8b&(?CJ1Mj*BbcvRaLWCMsC5oyu|0)#Fg z2thzl=}J+g_uflDdY4WJy(UNr5P0_e{N6qLJ#%Kyo;myc;V>d(kbKL!*SfCjlS|>O zyQasms8tn^F{)7D0vs*=hUO{iDh5a(*b!=H^g#>o-WKaeqiUMt12nFO?{oo+%I+zD zzXARyqPkdPtw%0$VC<$(2UDxWw~F{y<|>zTw8#y%q0#)=5Z!$&mfQOVHD-B=L1yEr z&3|{vqt4w)2D)+}Xi+qVU%dg_Rq8e;;?0;D_C^HtsaJ>XvAu-RDD~LW6G_Fn;GeM9 zXk+uI?q?4?_(=V`>nDX2PKiFz%(kkcIJ2D!aGEo3PIpXK{z&_frbZLx8+$2HrXtiA z8m~acy6m~XrVSrpTU@cBeOnTAc$g$baDP4S&P=2|M6Zf+(k19(8lz zJI1g!{R!_-l@-E+4d}Jxi;ju)=0hR;8zfaf4gC65D)ASjAK-sle!hxQjo55}>&WPE z1sFc{bmx9`E^jP#dcWPSP9rei?6N`r8St0BI&^Mq8B+V%qp3*EZ%ncS`Xt3X86Zq1 z0E$(l>|iOysU>>7ZE>@!mTos1tR z%Y2%XqH8sNzT{_~ue*ZXe|WRxJcU z$GXGfY{3&A08jSt>-`$t>$%pr4B1fT+)syu+vq+|oF=vmsb|oYk)B;>JLKmZcqi_T z{aavL1Bbe*V(eOOjI>!ZM#-Q^U3I>WsWx_0{r;Z_c>)F8RmN_+AlB|7pgJHi&z8=V zFN`FRHlPuQOtPQFjz`rTa7~fh5 zmy!|i1GhR!nHO;8PMdYGD&GMx(T|D;efA1UjTQ%3#_TBs*sn9zArvqT^hFF#!Mn)K zOmg9gN5)%U3dKr$Vu+!>=8EsW?E@fK9MQKLx56@uyBkp8JZEfcy&~Q+TxB*__=s zQ&$?`ZJh_#&aOW~EOUp;cb#uFJuws@m^T;_yS?>tF*VXavJHPgxKU>xSrM3ES^~ z1Xu$3KmpCMy{eoV4$2?aDBeHee(@kVi;Lixfm~`IAPL8YF^J)qT!P(iav8r74R4`{ zaMdZa1nl8Hqy!1xcF)Xu6?1X#M8o!L{sBp;aVDa26F#p9q7N1xx_JHR^D-L=lOM29 zrvzPUDiCDs!|tHQSo@Ififo>QBfr!s?=hF>`!c^ZNP{i};mx%{MM6HW0dq+D*D>Bn z*w2fYe?V{2!<7lv4H8@R#Nyw&EkApFi^zR)b5qv(%gS4r?4`aWb*H%E%Z9o0*k%-j z?Y-0F4uD;eQ9XR3?MZzj8g`xr^@jr8U$j}0vW99e$0LiWw-LoQu1_Akezp!B?uiXh7T>enpMiQA->WsGQ?DV zIDW9B;k3M?SeH}YY%-he%b2$zxL8elWbhQ&ve6;`W*BmQ44ad=HQ8Db0#?~2Qtavk zIK|a7!j-iEhfzYorLqylZL+|J}O6}#H@KIA}pB1~k zF2)JLfciwCz&@T!u)DfDPV&Xf7rnUdxEkSp zROV0_bFzZ$ZQ*>FPzIoSsnt1Wy5XN{@%A`nu~Wh8Vbs<;m|gKdAF_l~0{b7;vaeB6 z(y46~p?@GKmMMB>gG~C9sOD=D9vmDz#(*OmmNMZAf|Bc5Hv#*Mo zIW2w%fD;|>+jMTE5NrLG?+YqWrB#LwG-3U`0E3?1m(XZv1sNJJO2r@dpl-j*0>69- zw}X!oYG7{Yr~O+g%*g&w;joNOR*9ts3l+1;VlBK?fQ7&8QQ(>w!@QM_w{M=E2IUi* z`TRA3J%h=%sUFo6FIz!?xdpSkqLkVF4Bq1f0x$mo34$)?fN}W;oKd0koIk5-Os-X( z1#D!Z%0T3c4_OyU9;yLl3K78$^;v$7`D`$&EFzld=q>pbg)6;CwrrP$71EyOFA6YbWHrY zd543gis!5Io?vGTwHDgyACLmjA@>s_z~(=#Nk0L$Mq5Mud&doDtkIsg^-V}7O6-!} zuj4;wvql}vQH_E{Li?ScHMrMoA|s$zjAQ79Cl3~XeWTX(X~_e1evXHS@?%AA+rbb| zZt!8n--{0D|>Gy9^cHX4eDG&GufEd`X=W?+%x0=}ww;e?Q z&Mwj76UgY|*5rB_M9-O4vlfxdvZH!V+wd>xPK_xa8-OB-vjBo8A5j7v*3vK1RF)`n z_xq~`jj4vlzuhBYYw#w{ne)QH(1?FP{8a1UYa%UN9smby!iKF6>ALVYjf>^SFo=$9 zzBL+DjrR4sV7V@{qfuS;EY2gI(4Yss{J$tf{&z(S^#u~qeSOTfTeFG?V@Rwu4&YB82F5r8ubabNYDv!;vL{A01_Fz zJRI5dnI7wuVWxRQy_`{Lx0pct%!}j{(rxAu-&>&4{I$c$J9+x5xuM0~q~2=iH&ti& zBZ3r(kHOpfeIa;jV|R8^uv$7iW>q%z$(mi|4BC!rrKG%f24uQn1{l!<h9={lr2hn6~5f#}`hfylGq@yg}ZN^5xIgw18E43tm_- zC|>CDC9J+o))mmSZsLjI;UOsl1$y=xL|ev2)_(D0ks?`%Sh!`{t<+m*aguV|9)$1#lt=HjfeQ=?$gFl$t3PRq<`*8F_HG z>$5puYJqzqNf8F4zvTMBtknzwNRMv~>TqDp{dfq6i=oahOAX;WUkczlX)MeV@?6P(;x{^6 zsC=KaPrPjr62gk|GZDY68~{_)mg}$(qyl~^0h2a<0RbG!(`s4n7gWwUia zRBrMQh|Dz6v1Fg3WXUzGvF>Z8{j)~LCxAtrEn;9l*K!G5)AtVuSnR7%xq?XWm zcY^42uE;@{v3C$gOvptnZ~FvHdqE@XFKyMqU%5(TCI{2hG-}rf_5zLgaC%6+0+?ek z+_S>#nQoeQhyL_>`JZ~c3l6>CVi#_Cn@SUYuO`>HK4yz-Lz1_ekjM8Gr*|^^jO37{ zd{+)aqATS7WFh!#xly`&0kyWFPpI`8CA=1tK@!o<-B)1ItL%AhSS8AJ(mKZL0B}>+ zym5^c64>k?*re`T=@``WS>D!mfttk?9)>s6#3bmos7_Qz(3O-fMn58;Xwh1|^bhk! zqV|2{a}-AxSJZ)9cZYMLmBEa%&Ff~rCAs@6<67KtJ%lfZy^|3S1qguhGCCJ!8vu>m z&UFM&7GeX}I36J`K38z;SryjLX1J*Pi}tQLC@6ICZZ+-!a`77TsoJ^OLqCc?BCj$B zQzWF`QI@o_-;L-;B5a)rl={E~bs`jMtuyy#)>^OGHJv{Ee(=fGJSug79n}_S65@bi z>s`19WwJ8*H9s(AF+?uxTOy_OfE+~9By-{i;`@(Y?QK~SIn>GwW1D}j!a6-X6nS#@ zT>|I9at9EG>@dM9)jqv6GH;e@_ZVuhX8Yjn!zTfRXGEfi;k=?@W_s2!u+ z$#{A1j|-zefQS+nY77PRm@w~iim}VnMj#Tz2`8xVJ%w>m*pn?t{$Be!2(QnmW1@CM z>)$3`eG{*PWN)eWZi9zI{)b}!aQTv!U%B1VVkLCD zDgCiRzsVOsOb9Cc7b)L=NdMuUl+c6U)mgM5>g82Hn|M5AmDc)}+u*eP3D8!q#c)^_qhCSnJ^%YsWa?FoZcd6kaQC#lrNHukQU7EdZ5{dCKJ}xTNASzGNBU^7Uh}ut&?NZqefb&x zK*;N1>dd5-(-D)KytBjmVN|>6^$Ur$f3wx^FVaU0XqXaaHz9--&nk55fuq@*-lwN@ z_b5fc&$BjfKt-M(04Ing9m2GbG@O^s@G`#HpZH6UU~qNEjkky6iEXOnkp^)q(r+C@ zLXX|>sb!<0juYbNZ}McUIG!Q2ul%M1B)Xi~C8hP69>w_c@Kg}Ktx(7#dhPUbSXYWY zCHP;o1I;t^=PnVgUQTfB%&Qsu8YZ_bUQHhEKS6Zr1pHAzoOu%Z92jUwTIlc{1b8i_ zR-1py=c(Xz;IAT4s0hH?0rw4-KqFzVK8}~Mm_Cm8cS33YNA-J+Gz$inQ(|SO!Mbew z!NvK3e6ON=4vj&C$wyS6z!RWk{B`d;HfEb#V!%}OD~n8$`?|{OIcs-UswYSprq{0< zm;cfQZC7#5r0J@0=*{&eG22=WxT)M9}?FYb9-VS`8Vq>l68&XT$4i zqqLFCSs@1-L8c^KV{r>wQMGK@Qf3X!O664DIv|>?U&Vy49N0qsW=vrVF#!)TVo~L_ zA@^1=Kbj>ri`3%d%fZTbQ~Xgtz}b4~@<3S_v5Ulhj=QdS*T5BNgs7g%Qx+^r{Mxyg09p`p%zc>;)XK6qOtuUHFMXc!-2M9{N z`V~$he;knk>U+%}aY=pwj#{NumRqMG?kU+9*Y#5whJ)@(5}LfsqbhKc2Z9w}V+}BN zkI^0PzE9KzyH)YhX&bF2$a@W2WhE!vg`a{t*dT1supV2c*k|L&^jXJ7^RJeseqNm@ z1>OXQgrGU67ihDtsoU1`0P$Up|@kj&6b6uZ2{SXW&HJmN7 zx78vZMvde9W?jE*z6Qf@nPt5)Gur^%_?Oc+#T09%dsCja3&z{T#9y9=b-kEbt@aY zk=9t9W?64h6emuK>fm`)mY9CloG#D_co%wi3dd2r>XKe#v9w@SJJ5|>;>cIhehHOF zl=*<^GwQ16F^wl52P!;JMO(YUVAPrqFjUAWTC z9Izz%>+sx!QrtW%C#nvliylL2Mc0K9nY&t+)n1s%#XE{@HxsdN36QIGB4(>@MR3yN zwQ}Q`8W!#vXM6Jlg$rtOVa!W91E-@@Hdt>=l=?Uf{G!mc{-Rjr`(s&eaR7wv= zkf~OmYP_Xg+^Y#Qt=gtnQ%AFykxsHkc)YlQogEMX#7Pr*O9toFoGSYGE8M2Ma0xMu zthyveC&jO@)c_tG(b^H2%!3Hy=Vl5I8l60XJ}Nn7oPTLo#2WS|oL_yHU=iAQun;2E z+^V}C8<&T1N)Yw4(#{cJp%a+FG)6NIa@6dqvA6wO9)+;2t-j-=Yev_9_4~P?=jZF| z&VHip^mcPv7Gx~X(g)kL1ttJuiS6u_=>D&cKP^`aM+$4b4 zt|Vf_533KEEak8N6mGi38V_sngThtGPiPV$cc+vAP_SDFhpm!ZrnlUuQL7=P_gr^J zsij7OJicO3J*xBP>*sSuF@A}U*$|`_-mGrW5h-j zVejebi`cFpzqj3~9wd(u9BhdsW9L}*1=K(k2vyQnen+H;>4jcR#=_C;g6{hz{o~&c z-q7}*^$TsgUR1EcT+v5AkRiJIQz8`wO|1=3dR=A>n;sY5jZ@%+vZYWX14>U402D8vHUMqmKwhdHwg7Dd`x9U4{~mcLx^&o<>ot}xkA zK?N6wliWAaOj4M;#HKyp!G7rCu^Q*923Lg&_PBH}h(A*K z@P45CM#d(0iaA2?orLoFAn0|t3Jrth?e9gy+VshD!kiToK}_TC%8Lqun_Oj`vL$G> zp5)fa`q6K5RxP?3)$|@_dxNK^72li*Htjxi&}^Vh%Twpccqc{U#H+S{dtj<=wHmKj zkwOPRB1I^e9%_5>!(Av-Gw{{QG~(F9`6 z&^0}ZtC4T&^j@x54SlFSFvu97B^CscYhC!J?wfb8LpXPA>BoV(ikwB{>ybuF|0^dm zB8!M2I(0U0?96ctn5kBb8ml;^IpcmzIO{kqEl^%w_<>-MSsBGSlZ^Si{yG&U!Z`MH zBCpAa9yrKTET-?1ni}Te`)2oU1M0F(X@aQaKA-?j39xw8g- zXA7W9;a@|FEk^?V0z5ubZv_}qfc(QrnOP8Ju%k!R-`lpkz^pK(Lg+KSDS+s{(Nq)R zKK3bouZwuRbHfku)LV@mfe^#rX{U7;MLmL>8}AS6+zX^gD4HbY&t2cn^0u|IO~d4` zQ-4J{6!jFSdsSInCQRz%r&tl-sjJAIjRpO+{9g#5D+e{K+}hvo^D@hy8sw!*1d;e1-!>t=>(fNMG#QRu@df-Nc9Q>CpH@#+7)d2ssi(J?ox9!{2o%brtV8`w5d zxAYA_GjoSQ?3PIPR%SZ~gmwG27b=I5%`DFRkZE(zh+e?%g?nG6c{vSUu!Cx$|0=re zKUcn=>NOg;u>vn5LLcllt0v$c{c>eA!=JPG^rT2ws`q(49=j8F6X4EO%4NiHqSG2; zWP~!=bjr#B#!ij`;n5$#!b){sTJI4D*nO-8GBKdbG^4(PaL$bB&EaiRJ}JX*kZf{p zOIEIB!+J+z4tpDVrO?U2J@kS}_CzgGdxlB0&^H@V6e7ff4Cou7=?SH-o0PF~>k=u^ zCg_fsnaQdL$bwt_DnIeTi2$#ehH|Ib?>3fwwt{bcQ9DscyYyc&FK@!XWZun+E;}L( zPdeQnBTB!*CuN4in>chp;Zg@eqwQiiqY0cvfw{R08$e_6Z1mag%o~swC zi$bkNTavB>yFXE2QGaWm7Mc6>h~;WMTz>yJW`SLm#a&sIrIFR9uVP_X>5_T%6V)7x zep}%FN*lF76esB`M;l4Um!#C91;4M8GQov`+1q_05YG>$IsCdxQ9sl@&&8>gy9?^= zJgY*#?kLU$@*{G|1Z5XP^s?|DUJ6QvK+ZrucH|INXT{~wR+(g(KU2r7lf2XpgkqTN z){QGCh9(RXQZD-K81bGZJk3p{a5+oUF-&2J-OrdA1upYdrGN$Qf4VNkt~Z_-mkl6N(X%C~;O zYL$A=?51ijQirTszT^oLYf~e2Zd}6&O%BPiE*Z-)-!=n_Cf60to!IIt-#9fq0bRX- zP+&P|35td{DCy>W{+{)iWxIeWPjzm^_Hw|DE*n#C+@lxzItY)*-U%youFe(r`A^`W z=ns!eCtRN_&kY!>{{w0-iI@z1;F7+|=qE<6=j0`Qy%<&l61Slz>_PodQeZ);09Lh$ zqjqwvsG9{(LuJN-sWGETV{$wvg0h!Ah!uHD4#e;PQ-i3WX)2%6a7zeV%@~$ zRkYaQZmPx;I6$3YTa$6MG;jO(F}i7C$9qTfIjS*_rV?LNqRx9a{v_#r9c4Y=HyV7G(O(rOuM_Z+aVY{r$n;$u-~A=kzw$qd|%&8A~;3iV9@Ex7#t zry(u^QJ|w~0K--TU*s?604l027%37z-j=+=A`mxL8+3FyCZS!5qaCX;Q+`S|#Hoi? zGI%}j=KG<27^w7+j^%qxg4TWFO{(la5Nri6+MrJ_xa;bXOS;kgl9i`uB$D|Pe>v%k z{=jYNRd$%sEY7j0C^w9uu&#P=>r=fZ+n=|C!a=@Mz|(r~oD2Hp!piZwZ6eY117W3t z5V-tOmC^&mG``S>G8*cAvv$r|_c|MyG!OSo9}mDp6G>A=7!SJE_^xS8{(z(o=7%bA zF2%W&EiP)Xd-a4!-V^Bl1;BSzgj#{#^v!zG6^V;g8>~qlcw6`5CUB~NxWP?sYPUWy z`0asH)2Hh1DG24%a5AMz)$VYG1@KEi8E?=V$W?wigK9jaWj( zZwP=&f|TzpyUzNbBos^`*g_~ku7w6atZzOT);ISb-+P|Q*`loq;3pRv67NOxs7Gs% zw|7#KrFbDVq|g3Y7&+|W*q4RAiGw%FB`Q@W?VrI)0p;L)LOy@$Q-Yv)Da zxZ4@t*1@9Nrs2JNYW7Vm?Ed%5p<2i+!(|`?0Nd_)Smt5RF}y16)nF}nijqG)#c!i} zF3?Gjx0O4u%HSuqvJ8#Vos>oM)I|+b9R(2J_TOP>I*;j>R&@h83mwB$h7);9bv|q< zrzSY0?%F+?Py=&dlRDe^(0{~!?fUZasJ5%OLmID#K1vuC0v?yOgQzA0sK7gte{|yZB ze*XN=d0*UkHzvU6*$TSz*q(5!j=9iUSmIwcmg{8yHlE1Q4|{Vzh`_AJBXMeEb+hWHgTVU-aAs z+(XZM^dCm+;1;r!9zN!DA!wo?+1xUmy{sW4diFE7M6rl;1UqIrN!mUKMZlScQoZYe z(+FN){HkpAM#PWMM4hhuj>9eOJ5ICIQ3D`VDCpuJ(0u$PL7r=aCiC#t!Q)qga&Jg~ zc!Ws$+Tz&+gaa5l2&wM-+JVC#k!vx|N3LQCeLq_OInKo3DUEzb+t4&zj2_$I~^4WGQ!w zC*^S|Onc}2zRBsO7l@#EB;Wzs6CwO9Cgnp;VDtO1gK4~)YgsFG)kHDg9A5v(fmV$< zrGNRi-@14(72jlZh1^CI*H`vPw_?nHw|eB2P`lXb8H$vJ1=yecg>85*rQq0I-Sx28mHX(vjFo*ww?`f$n{mdEKjH^@Ea*!e ztFh1c<&u?&9EaZ9`MBI$^Vo^EA0?=*$-}dUSFT^z`oOQm0t@fc@z zL=faC&P4z&L58IT0HatpQvIY^-AFLTh_ClaC)m)OHY5?`HEkOEURp)Br}@jS^QkLi2c@b@M(Apz%~9FNg2a{OKzh6=jQ}GYC#I#pTsqCLlcUU+ zmS<1SjJwq3GXN=&%`LWStD6>y;|a~jsqc}l#e<47u2j$6YwPU@maN~?8i=6sn~WNN znskok4dV8`$VNW zMb7JFD;ugREy^iaG>7r2bx>Q@|5i~%egEk!YN5&IZW+>ZZ-;9L4q*Km)9fI1M0yZ!W(A;&s5~i_BE7|&WmYcu{ zn#QSqTigqy#Td%5*o9>Y$P5_9~AAi(zf#kE}$WPRKK(8%`9y*@0-!S4L-K*z_}cVOh>n&j8~fXU`q zk7rCq`8TAH%|jYySHJnXz>umE=6zE?W_d zw(A~wZAZB~_WI7<<>!Da9Rv(4*s2wO zdi;Ksx=snq4zhP0Xi>yYyJwH7Yn*-hxMD)tu$ z{X}R2rh@(zVt~k~h}oJp7C7x)<=u%+2uFF~?F6!HCk5@*dCuP|*+^~$iv(_D2rN9- zaqrHjm5O{s2WgbXTc=Okv!iOKt@wabx+x3%l_|gB4;Mhi243ouz0^jiL~CK>kdjb2 z^`z%@GnOZ+(i-<#t;mmdG|9#Q_`vYbiwd8qFemI*Wu)#%AS!!Ys#F|5FL|6!UDXz7 zVemsAv{IEKUYsA-A-rib_~+}YQVPU3{5X=HfHwA*RUHv_JiS6EN6Q}GtIR2_0%qTE zENI7zc+oS84;=m>YzOb(zp8&tr{JT_p#^?02FWrc8MmXIf7WTs^_q+3U>59=Ce|j&(Tyti-m$|vtQ~e#Mx1fM?I}aZI`sRW*N>As=WlmC!qVQ_;>Re35jj_N zM=%{2$xiG&rwg+lC%%CO^x7p@u0)`OM#S{$YOiHd1;H}9QUHTRoq4}6INB%l9@ZC` zd&iCJ*Ip1bW!Jm0gnr+{?Z^_2b~Cc3VETdgJf zoHNfUue_oT1XVrcSljs~j{V**9-~RSf;XN8OS-bZWmuOv>*(8umP6R~-4sdgEPiED z`9iB+ks|PKF-frZ4=D0!8M;S|y5Cf!jMfY*c zRs9d7Qi%R)Uvo?csJP{n3HNdo$1Tn) za8J2-PYEtc>|UkX(d{h2GTN&gi@JZO*y?{2YS4IXpAT7@8{b9eT4)3LaWC9LK%D^y z?Zre**8v2w$#Xjxp>MfYP#|={@wXN%KnVg&g&kf=cGLawy5J8Y{Tw z8wqlrD^la2=^W`ghX8I6#!)P+q}f^R;<7KWp=%Z78gvxxU2Fl(`Kyt*B|&=dCvsDZ zud*`$RS}fCKGxIr>@j+L#sl^&tQ6tSWeUf9Iau+JYDN4e!t@!ajZqD7Bcg4wSS@TYVqMzX7CSR_6TBaRf%l^_`f9};cOV@j3R7GJ6=gwt*4 zmsQKxV-L>tlR#1akDHrmLsDR40~sn%2+_4TaAtHXlvE709J+e15)lR1F?@Z79BsA& zg`~x*Mnhw}atU1_eDJGVL@$$~WY2q@T4Bydb0n1VVB5M$oCemh)Eo*qx$rz^vOfUF{%m_4u$r0FD*svgTLY*4apw97UZb*zOPlr)$=4D1I%FDpQm z80xyB#gI&cnKKG)_2tbTA0TG^2f2peP%$>&qNc8;J!fxk>v>(gRk6jn#~W&WZ5E*C z4w+;>J*`NGC`&f<9vXN+4hWKr=;n&M(3$3N`lMS%&gRVLlst__2awRTZN@N)BR|!+ zg`}8c8G!TTl(0PL|J!2YQEgOY&?Z3P0kS-|IxK~jMS*|&snJ^IQqeL@|B22jys!m7 zfK!!xvz-MdV8FUFwfC8|!~(&-MQFahq61>&=e73{AQ%y-Fa;7<{9q4|fDTm8qC9;* zbp&qi&n`toq|KiNOeq5O{=3lo|CzxMT+vCxw4zY959yQsX z&kvqm%T*SfJ96k4WaqLFTILq$_ZB0_OGl!tU8;kqzV(YnvvqND&f%m2iuYN4?DRbn z2u2*DO*EA8;5rVu-F58}FjaJa>YC7@){{w8byL0bSIhf6{;!^MM<^%AfZpcNU?Y%^ zkT!w}enUcm4s~!jxXVMy>Yv7>5LA-o$?5`AZ?k4cv5SQBT$OD|%7drXkLT4}%RwIR zaBaN=sK}0RYeU z#&~{i^9QzH2MJ@k`asY$hOO3BiCJN> zPGh@N=qE#{r=AT0G2#!~W3YTEJL=sPqK2tD4%6BGy%nvc)PcL)Un;qB@=5*zyOACy zfeuF>eV@G&mOUw~P2sJQA~q%0=GvY%lR{f{aH1~I;Sn?r_Dg!@14cpo)ypAo~Wp#un{p<;m4j^_q zp*}NNFiU55;{Wjv$g){1x`P@g61HX|d2KZLvZc96GezeLEOqnM!OcI@tb7%5(%fz?l#++G&f^=hDDajZWQdAC* z^_GVHh6+d zyKrSCCsY`j#UD$V!uArN9;AP&=4h&~8Aw8@juV7gMyN8kL!vEhKYwN`Fa5=|c_ ztT*G`!aJ`&tNq1qYwt=<>R7Y=73XrUto3GBIT zgX0ucilR_uW_nXbDU)POL&m0)Z>{B3wRQiEVxCo23&J-xDX6=+B%qF0g~+h*~rTBeUAC1Uf&0MPTU<^cVPGv zbbRo4J)7-es(Jma*vo1+VCoqNpLTDFU{QA3vL-)a1Gq5soREA&V8wT(;>hPdz-{$v01b?%cKi$#?nFQ&5!aM=i3 z-@;a0!Lmz+&Go|Ka;1yEy(}<(o_^)ml}A$mD99i6tlYiN@^k58(p3cy1$Ck#Oqow?vH~s1Tov#WkrWaqn@h$#`xr=^tw!`8rS$V>+Lie_JVY8Sd~l#f=!1F8 z+?xvJ*wejFv4ZE$dep_2&9)A4>J}!hS09#4vp2<1y^NOTsm#h8Q2~DOv0qC3N|`KtVYc=NQ%4!I-MQro+Mipw+TNQV9(T6<~UUR`F)3Mp7Psr;|{%#9xO>e4-lr1 zp};h|wzQx1XZ)0cDZZ53>NQZ)N>(y+<+M?U2+kpBSh2pwXx;_?uO*ANOG_i1(+yr& z@~*fyZ<)c_PB>~bf2>Rg>0q(zm5=R`N>49k0g^$jlNoa6gfa+kl`DXJf zA2j#LtQZGE=OnGW-B{|mj@y)_X!0b+k$o0RQR~-U>;nb;NNnfAK8G5ZYWoc-edPmg z<9ITHmYN5KAkHlyck`#R1A4Jr9Q({9XHhd|!CAyHjqiP>qhMidn9;~f7#B7KnQhSN zuLv(LL<9!BedXk0p7j!7&yjX^$Ad2S;xE8(TVnLlhdf}5LUPijR8AH=D*u-VjzPc@kf{;;`r8@}jTWXIp{Xm<{AM-`$UNBgz;q;i zTOeb(k+0AWi@v3J>v-E)&lV#|fe*V~49R z+k)j`Tk@o2+XP^VqcFjp(E9+>r0hq;nbo4meI=_*^QRL-cWHf;*mKlR^Jp@K40_}~ z?ym4m9+&ITS}ylUrLicPM972HEzMpLrpPktSIWy3T+Ttt1~C zjU-Nx!|`U=Q%Vw83#h(V5gL&OvpbVvddz3|I?PY6TO`~VdN3@0cQ&@GbA{I~09+io zD)7I5Os=fzs?9to9A-T?y2|)gSBwUFVL0`+aoDuDYEsg-!3SUMd=PjJI=bb-LKYTu zMmrq15_R9i8y@{R_@!{a_3s*g0!}p_Ykbo4eERU9oDs9t*Yl3D^cNoC+4T>ZwX-8t zd}SKsp)bG~POtgFw>f29%QM1H{_7W%;0C3}rpl`9`7eQzso6iX#L9NrR$7F-Ed+Vqu!?0g>aDY)dq*Fo^q`N~Y5kZuYROxOQat4rY5KvkLDFLOsVdxI&7&>Rj z0R}v;-}mja&)#dV^__Lr`GdtDtc3#4`##Te-@p61lH)S@x6GvVM`5%rEKq9=6w_hB z58K4qMZ+p(dTLg&omVec)O$OvTKscy|21*{fA|1T%MoFfk~hk{*_%?D>H;0#i0$MTrH2A3;dF6@W*V2hFA9I zy?X);`|rNnuye^mwv0z&Zn?|q*KSDPR!sQUd3Y`T9{dXu$xATeaDjkGT3V(yJ%FMY zFfK6a%kK!pJ8rMF&}B40k%cNO85v-0);!_gE9&4BMRSRqzmSw+mO4FSPT72;4QaG* zWv<#gpg*2Qs5MY7D2=f{->Y(>+#gK&NoU88c_L1=f419K zw@RI=N}VEN>#gQ%Jv;$uHf>;NWJ7G!fZ+{6UZ%XuG@p<|mfv4nXv1)bWo;SYU zV|>nljw4?@mg)18F|jpYnD^G+X<0lXhiantZ?pPQn{hTea@O{Xc#KB+C z?)P&OWwG>=eUn=Y_CU!VmZ;O5$lrO{pqc4sdMxihA4`AaU@4Tj1=_GI#DS0cen3@5 zUIJuI{l6gdMbaEgI!|^F|~mRNTHJ>tg|_5RmH>u;yi-c@&(z(4s+!vry;hi6+e zJ0lhTOwY8!=J>jF0U0$(fxEjg>_C!UJglmSAYQ;EMIXU>pCjgaMH@2mqoF)BkSn<( zn>XrQnvNg4J-7HR{N_E8>f)IsI1Z3xx~1&2R(`VQ>Y{lP)gw(ukV-e_&Dwj>&IXqS z_ykDC)N}Wj*`za+AA}+4cM9(^86bm915k}?Kbc8>w^UAg%sR2x*NHzzk=^|OdZ#EW z0XzpgT*@AgcIgb>6|K`_l67!A8Q>t0+6vB@2+p27d7#i+Xii5hy>)c(rI$$;$J~PVWvJ!q0Gw9|0r5gPaqilW^jk!CzSV|n=&9lpkn30DYF zr^mZZuXw>}%m4blv=koG)yWQ`)Eh8w&2jsSRzt^h^RjieoCwMO^Z{q_xyR(UED#T! z0Uzp#IF+Z`km2(y=HKoFnvQwPnuIYw6XzRw0!Rexnx@kPOc>M;enf2;@PFEVx_-)_ zRc3#nDaH}ivB<7n_`6US5ESXBLyI3}ATL4*l|s|U^(4Vb)H?SrR?NrYGK9C}i;mvH zHVsjQyMg^cG55{>ep&hE@e{`%^=Wjo6f_RLD;6{Jv*kP(S=`4+GZu$PeXIQ6Y9L3G zW0Wie3~acEdjPQMtI*O;S}K|d%<)U;5mx~RLB830f3;W(6mt_3IH+LQMaAc?n!L7( zJ#y~<&Wt|#ugSPGfLcIXE+b1MW!lV`)?oMEJi+}5`$SF?q`>UyT=PI@i_pK28~MxR zV#Oyh!t1H?tVjuNA8ZM-_!VN&fgW@nm)aswgH~y^ztiqmy4*?cQk+JTOZtvj=%mYV z@st>E^LL@~vqhb^i=S?BMI{UhUiGqszObm+5T0 z#0;yeVD^JYgXi zyjRE_c8C-8B52=5_%xRwwY3WW)AY8JhhICu-DCf1vWQY=v}!6}V~Lzcm_89dpXR4t z*ML4?k^Y5?l-TXc4%s;}&i9qyKciPi)3a@tR?O)cm#IlG#y?eHe3Q*Gy{3|6!lJE) z#I39@frEC{5zmhRxXqOoq2aP^>z_2=bWPUGfEXu_mB$P6jT1R$}7HC!}zc zG66KRn?f~K@7W4zgA|FhdpF1(Mk)75y$Gsw^iqr1PLB+T+WxH?afMC47^y$`cy77ZO<@XN3w%GiKHGI73G|Yu z<3Hkv;2Aj*|7S2#8-05HbG+NUBK~xa3Z?zVFoDk6MI^fDmd{@!%^1}_4hcUxovKgN z(DV(J&|x5u&P2+E>>;eLC3{L%ARfv@Gf5UfeQm~8rGcY@PjZO)O`qvM7U8^SoWj}G zi@ldinznrn5ISxS;RHP87?~)5m3u1csdsbYqN1WC2tfh*%1$}@8Q3;qVSH%`5OMO% z!S}c3`grEIt&-+in15>DAPq24UvrR2$A8{z$(LXQxZ;*gwrGAt{sU#3!|!tnU{KY^ zpq)`G_#-G{ZLk7ku3a15F;`t;tTgH~^J+g997CanyquQrD37ezVWtd!?o4852(L^W zv#fZ^+9~KS{&j$*lWJd1>=_JVuQ>Hqu3fO5j`0Co{ZWuw2@0X+sf({AQBN3)7;;U< zfF6MdZUwL^$z4*8q43dikIA?d6Y3VFpWTYu_kH~l@}VBn%T?5=qpS^M&eR`5ps|x> zhnNwGA=8^5>@28{2K{fGO_Zt37~$s0tN||a+?T^}SuwLH;sCVaNLS6=i&!F8LXjH1 z!Lo3d5k{v0Mv^Q@voG&R&po!#b!Orjm2TcI59aV8?tlDdQn5JCV3B%8W5z#)5)7;% z@7MJw9m8-1kc=Tqa5-Cmi(|@9xL^-?H8JNg>7+SxNJ7x7#ZDA>Q~)b?1n83X6=+E4 zkw=#F^X$j`H2t=GOkTyzc_25ff=jB6Z0=`Tl)F3h#l=g5jc?w87*}xKdH}Fg`YTO- z-O5RJW$&tN$tea2P6GkRUR05W0*j*Zwc_@sfW??|N^nf5$NQX8U=lRebGFGD$&>YR zZdf^KyBOh2%eShlz$BGPl&LRmT5>JYQvuyXRTMogkbcaQMEZrc_jDyuf!R|Gz*%)o z7~6WId!o}453+Psi#UMr05c})mT`>FqkNy|KvU0IM)s3s)FFrrShKgDr!wfDi9%{o z+x<^@h{9;M?FU<}ITTkk!gaL|V2_?axClNEf!tu!a$M@AP#sNHsNbszy|YguGri^z zd;E^8%%B|Si5~l1vp95K4q>&G1<}}N6N9qnIimRgnFmTO#Q7hu>;LvMP>&!~2QG>_ zdDFs$Nm+BI+$qqU za%LARn_py$G+&NWaPXwtVy^BRTY{7JC#3>tvTYffl>_}A!tF^uQ zK$*$gJa?Mo&p%|}pAn6qf1_?u8`JY~&cKbnpl3LwuMAn|8MznIrM&0r(e_<^j~R<5k;p4lsCF|-sbSUeyTNl&P* zXe|MLr0j1wkvt9XN~y7mXqsq8wyJ4#WFi)K{l6xs03HT9gY2oN-M11mO{Ty2YCS4caUMz_6{3U}f+o+0+yN@zbWYgWfkvnAH#;fi z_TW_<;}x=gSZL5Lk5DRH`Z_1(RwSK{?b`ow^YcSBG67_v#NOL%0*q%lbeb@_nZMWI zeL$&bX9`i5MG#zFf;FeqP4`rFf5?>*D*vjM;_DaZ**Ss0cRxfxiQ8%tSK5%zz?_O| z1eP~XuEd|$swxtmQo8=c90FHkbk;sm zKvOOG8#LEMuZagy8+}g6ZSdh(T5-@p!;MB687a5zrvY}bEW_$igeUC2Db$w%y4qCH zm-WM+4QF&6oZuz}&b-n)FFm-O;GYOu9j;eq- zlBrNdW!S9f-8_R&D$HtBpW|lDC$H1<->>o0KpPRho{RajX@(^1HQaYN>~KbyKt1Yb zbY)VFTEWTP&S-gMpLMm-szSPXFfU~&i;fz22`(G3-y-C2>8VL5v7dQcWa^IUpI;-y zq#2puuR0!4^-gAEHt01GM=ox&i@}g`(bH}p`SyY9w^)1RZz6PkhW3g1+{$GHv+=Jp z#yHi&T%vg9w^%)9%1QaEBafY*{uNFXBauW;k6?F^OfJ4wO%GezFv%j?P_UWpy4u*! zhYH3k6o7}KD^nkF`Y$W;-+lbE?-D+_*J2Y5oCHl zrW)Sj_?8WI*IiX#- zUJ1cWlO9mI;~FxD-%nOag4wC^b+j=BvACO$T3B2I_H`pH0ovgl-8;TBMx*`0?d5zz z;^?Yc@A<>}j1p%utPt8Qh!qIqwOQH>ucwgC96jC%mVaet+{vaz(MMVG9p2wRi|dIC zq*`omzLl}%tWp2TSG6f+SHF{aAZZ?Q6b8AEW9S?!Cz>RtRcrKPMPmO~D`cwJdL zX?1Ne-z7Gx5`^Q;>H?wT0*g*_wVBZ{Qo80B9TFWO)>FEm^lLoM^xLmTSmPcY9V{-+ z6GU<7sB`6(SJdKzfGZJ$7RG0d;fTfZEn;}7>OgES4m0+@W`vac5X!$UU4#7&dXseC9{4VRKZ+|U5!T`lrqkd-lp(&vkeq0Zx zW2%g*y;dAygb^RE`H-91elQrz!0UBU1Gj=jaMrKP+U2S3^r$c3e~g}NbC3@SLqMm! z1gbB6ryZ6E(AFQD9Df)XJ<`;(BDItu{p7x!1OaCTOZn+fv@q92?I}%c)h2SpmVTe> zRn!rS`>3A{y}BLek69w7Fi|SKn;?}2c0wsxPjKafG|%~CV5gL$tw%Yx9ZmDA2d!OV z=(4JG*|un%7qQBRy@Hia5+e{U$sS0#-IcXuITuFX&z`@lmGP&(pxVR&JEFRMsdyZp zXD=oOISm(XvbIKB`tuet%-;wZnQ=CqR<0?1zWnt(PB8N%)2bqn%FtgUxg%nUp}8VN zLr6aHd{}*Jlc)VZ9>jBb#u2905YhUFp0RSB0t*N*=Up)RVf}KDFoM66S4d)A#pXp3 zTZzSYpE8#KlDqOx6_|C=smloTPAdP}-n{xgQNFpDF>5lTGlSx=a9s1=fcuzvPnQm%@mk@vl)W zg`!Nv9jvv(fM$l-vLJ8Q=eBruTn~diWM}u!LSY$DC@EzSeJUjHFnZ z&(rJH$K_5^f9IYx5EL|C75EVQV7y6il_Azzy{XBT_pMCt;ag*&N<5?ut)#<60hSo( zATJX`-S%G0IMWa7HP}Ziez(>K#03nBk_TiD*&V`eyy{ z3Z7o&a0%gs2Rs5VlD4rB#<0}xUNR>kMs6id>5xpc7^80et2g5KPi-GgeIm{a#+UML z-)u<+ZmO*-FaCmhJz*$%=Uc$9i3#~Rj|uNF{83_2Rx=}X7!U1y2NF_sK#JhRat@Av zRezWQ6rKqMStkv?rNSxlHHs4O)bowq1U3`^As9qhSkTIC(1#-%CY5}k&vJt1zd5&~ z2gtK&DD%~uf|!g~bt6AZ}KOLMs8VA0-&wfp)DmUK#%YaOQ@trwP@vOy|0qE~^{h-qW={J*_`9t46 zf1#&wI97Z2hCC#Fvqc(nt$>#MQed#HuLT_)wDu;ic*O8L=mey`B`XLdVQxHI31v7* zW3X=;92Yz1=MjWj>?$~3pdh#w=wvd)IMu!eBf1tK-H|PH?Nm0z^s{CNC-Hu5s=CfS zaHeYp$qbt8C4KM-vVniD@)o-nkhuNX{B1zv0=hy>x2M?m#z zy8h@MRvmRy-faRiX!0rD7Ztg8e}wLV7arX^Aq8nDa?hlv77c4RS8Jl4D=~R721p(O zpm!f_&vUpur2>n<=~~jiHW82@_(V`l`KuF zv>p|elnN*KZ=kvVzRp54p_;>|uMlvL>PevEMwdXJFvTG|(R!xETFK*y_dv1;-B_el6O`?@?rg#*Bw_ehVqdf&Cflo zqlOP3FYqsM$I%8Ibq9z6UI8Lqlj^p-xUe|~U~>OH&Ph)EDJe$T-#Q?#-R(Hq`ZZ%| zr=us4@FCb-c8@|MI-J3xE@h_9nnV97THd4z%@rrgtN&6+v}!PtK}|sIjqb2CI40&K zYSRWheM{yq@C&UKzB%Q|q;y3z_(AQ!OJK}|IJWz-uI6WeU>Q={!>r6g` zGQIlNm}o3u1>We0Aq%W8k;lDdgSvONP^W3k@YDBO*4t90ivw^T4{p*I-ra9818A;} ztL5O$@hbcC=KA_1;p1o9WIXsBso*lf9vg7y_!@Nb$?M*bU;1sOX879$-ORkd#y9GY zV;3?EU!RbHxMD8a+I~LYmSeWpYhglXe<@xLx3~9EPkuK5dJ_W_DTY9P1=vTeq&b(- zdpQ^iK+^{$jr2gEjv_^gg(SZWWggxAnZn+w* zE#t;gsB{g=C0{2!-;&Xfo@vH%U@}$%7}}-{t$qhO9F`IVg*@A56p3jo`X}TVCQ$~t zo6*V9@7$7)m5OSLziP@dT`_9%e!;k2BESupCMOWhKFWUL2zeHaUspC`<%EZHO;g%| z(&`_T?ZU5bUA#f+(shV@=n%-Gl^82b6WwHFpJ%MaLDoyR4sGMDR#Br{q1+pmCH8xY zy@TrMIW9l*bYP@>VgcTZ&AjylLc;g3ik7Q63~zU(GNk6TbYk(=qjj=|RXrV=?s~W2RNy&Cf$&B6;R%9EbLgF0)MQed? z8ayW!xhj40TBEg@a`tP6!v@n^Rj2!m*ITG!pcE3wj)AE*&in<{SfzG*63#S*&OLOk zj$f2kd=Ngtd^YN@hRI*$Rc$v+&B?b@Io~~ICs0XfK+1P%fB?k-5KoZ&5gn1A2+95M zn8SzDalBF5oX$L1GygwZ(nW0PYLy#mJK4Kxx4KOCaX4xzH3uuzxvmE_RsZ{|!QbL>$XALJJ_B>kI#amXnPqQGL$BoidJ$i4-w00&n7g zDK$1YgPAgx5P*bvt`OSq4YBp_u5G^c?vvqpaA{l!b^(+u!=C%{_xxSg9QixBUQWnx z$<^9AvOAiA!OA>w#M=eFZU+Lxl>CtE;9dVdQtpWY$FCYm4fk!=e z=2~Fo`j~E{7WtkbRZY%Ms9pfc)Q=O5!ES8Nt2UNLU^=<#E<-3ow|MIB1tS}p<9{9j zvP_}u@PFBJNL3@R`5JSYb>Vz;h8I7(%CbjF>}LyaI&fTgbAZq-Ichyhw@E%92s%$> z!dA_m*oe>7!e!n&2FM0QolkIR6a?@B-iX0j&dH`+N(4$>%gpaaXE|Ix(3zD8t{js*mDg_?_|u=Z3q$F13Hx z2;kB63T_(pR{U5^tbeI`4Db-Xp}fP^p2f<@=YLWr5I0mofES`p2>I`Bb0Iet)6RA4 zE8_+Z>>X<_K0K|=%JHs87F63C`KzFRrjFHPoc-8^g6S+R^^k>l63nyZvbAVLB+c5w zRmC=u=VP2IM*;jLbC7?1k9(W+?|27oNYlv55wR9-Lf`6*utn6HgKA|7xsOEgr9Q1~ zT14Bmz7I=0eSR1&4yK39^B6zln5QpJ068F2C1rPSzoNN%ei@9gVWpdblEm2Fa$4O( zEU~ioGHJ2ZIfpEXps5pW3+HU*=+lhD)Nd!a55Ow!hTm z5pPunk6-jF#>4?F_i6VE!O3Bn0#a_1Q7 zA%Wbh;OB8ZiTtA<%&8J z-nQl8)f;p7KQvwGm&jGph-w%qChqmO9pay|gPfc-FT%Mcds!LZo%PD4>}0cH5`p1v zo=s;QYU*bZIQeC8YQTue*+bSIE71hux>DD{S0p5ADizZq!8SB|p39ANh{1sP3 zzWzhMjpOCe-@i-gnyu)#xW-X_myf_0<$=zsjZ-=)#oZARrH_#ceYH zSt=H5D}W+i+_ssa?Anihgdn0Bkbvx&>Y|;yd}|~8b>d5R9P>0B@5V`653dw#_Hhs$ zYrXFVxq9xmR)z)T+l|}nOXY_)@7LTuSKi7^V0B!_7O->KY|)i7mQAe2cWIR4X!>pZ z;XG(wG1)f$&co-G@JD`0k~Wvr3lqLdN-7@gS){N0D_HHxM!MvKT7yNCH@_ahA%#0j zh5k&anjrDc1MXdXZUgYB?krwu$RG47+ z{*XM3fw0O=ab+v2d<7OOZc2kXt4ZXGrP?(-B=6-TvbL3MXL$KieJeg6^cV(fCZ`~G8Xp^Q#KQ%R-;q1xb8#1rr#7==5s{}m4K{Lsd z(?>o0gaDl@JUwfwCf?G~)qz57qc+I=-F)yF$l^U81$%3*y%#AVKrh017)SGUGP% z6MzSu_zUXN7X_YFX4Z@~<=grKo6gq_lSm%UPtq0S3iH))**h^wl0e>ju{s_fYPF@_ z5TyxuNig=S--r!pV_1ShPDRl-0mSA0>gY^gQDiOL>8C7v#B`a5YqKJo%FS)5VCFw;!;lSIy7z1I? zL8Zw5c=xM@{Y=xSjhZ{APJ(_nNo;)N*7^vEQBeRuP#U-7EX1%;um(-I$$e+b{Oe_vk@ru zyLLhb7&n?_jhb6JR9n;RI?(Pi&$g$irDwA4$!2Y7sF>JjY-&h6w1}G%Qd&PjNYKe{ zPc|{FY4Qyno-t8e%`FLu4{&gBuOrK+;j$+yh)|DvEzGtv-cqtDL){~vkNaE+dI!CK z?Y6DFBY{wj1datVj-3KCcRLqzffG5zJq8hvBp=U%O9R^4Jx0nP_0IQ zL4v&qU(oT~(JjQ5KClvb4F$-H4N;FcK5=9gy^pc*VE=#mMqCm$cXd*pV$_zdMuee0F)WHid)u4Q5A`CyEZ(?4OnF+2H@l ztxFOy-Xfq4y3#Knx_+j)Arar{`tu{x&zaER?5@}l_Ib9Z;(_PWO|N0zK+VC}ba2q} zvr&(lQdjn(3yg2pARb=YE$?=7)3;o=vI(A#KhT8QJzP93To`Ut5w*-{ug9Big!ee z(NdIWJiDfua9o-mpK^K-G$UA2fv7u>G2fD)Z95p%|GsNVh?<)Eh| zl>64ACpzY+3gFhQmE#TvPNn(L1OIl)6A2xXQ6T}p89EfKr-fszN+VWY71cC%P^_`( zlTi6U<$W)gE$~L(i2@28aWY!PP#fEZf{$CH2V7sgwS3n|jZ4Awprz|ama7O-Tw2R;BqRC{s)<5)$ikJQVvU&s~I zUD{jDH`|8&-Vp&DrB+lV)WHLF3rKq4u0XNR$GMZ`o((CQv`1jzhvE|{hdrvQ*QA2% zde$TTvyj6W%JHJVpw4H5f;mKh!X*d+Bp+s^QG1BM2jM+ZL$?`UVGf$bIRHy!K`5mi z5bH{ST^U1iNqB9{49K{#%f*olRvnHYP3v_Y3Cx% z5U8GYk0s`(LiLoo)cOSz+dAU#%%KBoEYU#ND_CpAK1+=5o6SZ;%Ivg=^QR=SI7CP` zPZEl>N2B!~#*2wslUaJZboQgpy+H$_?g;tEBOE8)AkV)b7Oe2Mw@^RLrr+ZSj(l=U z2~M>!s-G-BD1t%9-*HdN)oW1BVWkJ(!(_oP9@B@KoEIJh?W%FyX!@U_8P=MYW1l!W zsAMpG6g!Cy>-wQ$4GP_|X#-^o4tY{9SBZ|d(=R~1B=dD~zBomNJFW9|#{bX+xdH#I zNUXdvy5nj79K!zodE)R6S;x_nT{R{M!N1djzAloV+)kbhtIss4C+)DxHr z^g?8>W~|X#9dgXfI8$Y8V?T4#m%vZlp5E!{?<`;i$J-sphcNTbRFN0XZnrR{nMCPS zN+~_BQqARr%aH&1H}MLXbg{6t#`t?ChNc)Hq-srlm(arC{tEbye-zFI*I6#f%xd_@ zCRyNe{4oAWx@gStf)U5Kx(Fe=v_PNaJFQW#GQ#t?QIP;XV&*-~iPHUzFL`lVYyv@_ zK&t%$!hng%UcAGMkQn6j z%&Wb3UCHv_)jKcps!$r$pcGr|SCjXHFc$9HfOiQ;AW*x*aRbg#kM1LA@iU`!j$%_oU1z3qtX=v6 zdGCy62Xj|hJK*(pmnXjEUcm6E*G(8{#NEz{5#{s?7>Zkr&*G~yQ zX>pEYfCJ9Vm;WztlB)lsD?EVxS_CNShXc>Wdph}3W6x^z%&ofDZ@||EJ9X0^T4F+b zHLiZ%(4^E!9kOC=C?oiN){bkvls|)YT{9y$C{3*_(e!kn6;D+vcCrKQEMS_cu4FwY26n7-HgdJwdUXe=Xmw1X zTW^Rcfvw{OP>&&qZ@VR(3Vg7a7UWE20UB}jbkU$tN5Xg2;bYi||+{cKHFsN@!F2#P8Cd}X7XZ5?!S4^Tu^u5h@i4GJ_ z>|qXMuDyuWQ(zgN$8W8!ks57>-%6eBXuA@bDPl{u;$xN=(Q!H74NU;6FnxDY|7)99 zEUD`*UT|>>@Q8CyJ)u@)pkQT9)Y7uamk*&ztdC-Vd&`r&oUT!b$@NQE?@j@1CKxDk z_VM&JL~Wk}`ILwjf!dn7W^ww^)wzimE>u)*FChx|DdCb=Gr6_<7eztGdo`iWGO2$- z^dNKtcfj*;M{(`CUya(e)D=xSg$|*X9L#~=N=ENYr-e@DQ0zPf#Dp)x*>CZA#4=-a zLmqdMC<4D$4An-B*|94ygK~bC<@#lllYEq^XqZT>=jAyoLH#pxp#7zOHT@)zYZ?iZ zvugEqa;~+1Th%X~Fvgfq&?1?DCPYB40&B1ksogPvvYO8!+TCI~p-uy2hjC4&uqbAc zx?nX+H15Ius8_!t@`43il!E1QXEagPowvM``}}2C$#PRnJ*6v1MyJdUF@qSqNV<(4FOFFk!&@B_g>g63t_vMbGMipwDGj& z5$=uDb_H&M6_+dJRV9lZv0 z#b*UKm(J+iyD%$o4u#!dksmCe(K9ATD^cJJ`LIimwH2Ug(>+`w);Qnuwl zm{&y@gv#xt#=IYow_BpK&OT-}PTCzIe_Ml9!sr%3`PHL!QqPyp7kb$eRPvuwUO;6P z+kuSIB()iO3v%ObwqYh8IMI46W}9;cJR{Zgb@MEdast0AkK|MQWaV}5X#B%-nc%bl833gHp93(bt$0|5DH*M=iY2kn=f-)04JBp? zQ?*@zahi~BkWc=dMp#5Eu_!-y-f&^Tq|?XIZIgGb$fP_5u6PDImhY(h_a>Vi1i<;- zz%qIaOP`s5$d;@*a=mUID_?6FKQx>>X8)8z%KrN*UK=VpriFgmQ~8ub$NqxBgn+3q z$~CU*5jzlibN6k+Sz|_Uuu+dXI|lu_`c3QKerDwO$D|QYBT^tnGUgV+8k7nOAwxE(hCyq3O+(h@5D`~K? zQUy0Yumk1t2PIe3^@SLiw$TCop@v2%gLi?8ADP=->?8309y&u3SQ^=m*pd;L^(%l~ zv7sWracH(<$`SxnsH8$a0q3xT&TgSkF61=azA>Trjr<`k^yt{z5O?ofXsAatO@@t9 zu$ExkW!&9MKT=qmHstH=s4-Tg;XIoXfEUep=cDuINUuJG`~@*lt-xqy1d$s2Bv1Xy zp$T^9ILoQ+Q@`+Ccj=cOEiRaaAaIzqwQ zY*uq>>((+i_9?K?tA6OE|Km*{>x4zS=Op*t-b!uNEY+WH!Xa-=sP{ZCjeRC(M%(*sxc5=@b&5s#OC z`oc4|OZOO*oCAe~yy>^5c@<31S}CQL29vO1UGSXDK|Ddb---przd~ltftuEzCi$IB zt(<--ld7a=V^1It#?n|

MPv zcagi?-4|Usv}Uu-rNhnAPq|ZEilfwIRdj*P$KfVZem(0q0^2fxO)c|>ZLOxDPg8-l zoQUUtw0Z>Sbvj)Jn8S;;Z0~nT^MN0;x7=lADKLaZ_}j1XQyB7=RTgV{5{R9}rs#V6 zCV)n2Y`|cVDqR3IVsl}5TMHp5yok>kVV!3 z5=BQdI#EyhoGvr!-8uhh@vEu9no1(6N4g8L_XtR2eKzae+!a838qcFlQr=j8hC`;C zgXhl!rt$zg_gRUWg(coZ|9>s4e+dGdzaUPbk-2-Vr(m;JGre+LW1{il%)D`UO~;2T zy4i%ENx+YLA=#P1cjXI9c+na?!!fbtBjLD=Y}U+Oe5%0AZIwSTa}^&?1N?P9m~GnU zVM_jjIJN+>L%Dc)Dpqza%e^ych%dTu$mRt7P@w&CSdBI2`mDrvE57Mfxj}VY$xlt$ z5;8*>?r_A5129l;P+(lV|K^-J*^cJ?Tx?mid$5S;`4IeV1jxGfA)Gb8+&uo=Z|a4e z#SAZ1R7?;h8R?8@KTx3|Jc}M-T7wL9X}pf0+i3 zTy^P{1%XU1gkvM9RN!n9C65dMa$xpxIzDu?=KWo&@mGncwxDn^XMT$gq z8ZJQ(pvT-%;ku~okcYo!YA$LT_`@gj2dHw_v_)CleoUXl6Eo1cp&w8;P`d)2TU7eSz99hfxwpas< zKHy1c7x%4+X6i2O{;u-JmaZ!(^8qd;Vrf`49|Sd_I<*d!BgVYk>NIrh#){L3i&LYA zII`UzYCMtvk%8#i%B3;OD_K147gz2!!D_3`>f+CiTJ0a0D$1NReg$TC#mkLvG62dZ zle=#4`K|;-BFSGSpqMhQ*(5@liwSZ6ryS;%6kV_rfrtH6`ZuZi^p91Cfwj+&thO(c``O|Q}0Wuj59RYr>*7oQ%E_Q#^?(6eFtppoGAJCD3M;wZ%PHudGq9h>tGUSRDksi`AQvU%x zjBQY zttfiyV^v{wE2VAvoAIhwOU4|0JF#t)b1l&WXuf$c7VwOYVNyed!4?DnfCNW{HdCZIFd>0d8H3W@SHZgP{2X{Q;_QnU60>1z=e8@ z0>7P5g|k)J|Do{nSwCU-BG7lIXmV|5+9jQ10nnW&p|{K`xARP2V&8%j}{)FaqO&d0jcshSK4oDS! zy>a=)n&ESt+Z_)^|4E#6sxz+kSZhsrlBz=Go7xPga>nBEQ11E2cE}$f=M0IHBgN<- zIt=sAGn{3c7fY+s?r~)f3q={>QpPZTD8lk%Uagkex2MwoC?%Q^*H_F;6lPVRh37tI z{<1X<=K#k3{O)1pf4ntXdMes!ayieQAb6KNh>Hndjqwb-*QWTe*&7M#9HQ(<*A2am zW*oh&Wq#T-Y%KOBp)FgH7IC@NyAA5*1uo9_`~}U|mT6TBcKvSqe~TPOxLC}`r^$xP z0B7`a(|P%Jemr%;JUwp#0CuqFwqC?u*oOc}>PP#t#XHkWOq2U%{O{w|O};!=TYcFP zg+sS1bQg)`DZ)(rw2ek>y^cOW(eY8SM^Yw&XLhcnR4m8 zp)$Q>jDIc@0+P8^;Ji_aR$bK(J2k^IwVQiJIk))sAR+KFQ(^%JmmS=*t zqF1khgy@9MxI@;`+>-Jivm@>b9(aIjBOL(uJqTd9mbJU`g+BPXbdFmWw)>RBY1II~ z{dLG*h?(Ro1s1j2QTpM`C7h%o=Um%EuK0miMq#n(0T=0ap5+;Z<@u-1!AoTH$_4Yh zWqf=@b|a6!q`8y#&5=NWIajS!-m{tvG^J`oOPw04s>{fOj^MG1nCykbp#YPc(gN71 z%Eawo5I<%n7?ZP_9c_=+dN%6FyhbdqqmnWq{lzPcgNNB?epzB<)if0N!HfVZ6De?9 zXe;=4Nx$4f)F#+5i%ypva8k63?#DBftC}Ei{pdT(%`omLgvyk^$(lX!+k(29u>!JZ zLFY6J#`quV*73Gw?TgU`?MDQLQ2jSe69-$h4OKNsYvLzgdR}R~3w9lT1U6fV`~9P~ zGk}{N=1?*3lh_~23Z%s1NCF~6qpubB=pE9YA*R?^r3)5@|o0QwcnD3$SNEdUrOHcK9--7JO3`5mNU&_m0hb z!boKW;Yv_h_;*oXnl~DEa$Ktmd0hp~)9zohY=buqxfz(UCR zQf5tkw(*D0*&f31@~8lkY~tKuMr;Tz>Cv`r$WY_;uT7a24xV>C-uflE9tX<;1-#;` z%%AT#9w?m4b1kwm=U%~CF7*j-%ktBBzor&@O2{$yDeYCbWBxZToG?Ea_n)+toZxIp zswZXUR=5zOG*0zHAy}6GM6qE&_?yN`RX*NLXl+W5*KazYXd#VGememm?n-Iq57IRv zx?XYj(pEY_z;yfn;=Y-q3d~#6NpLZLDtElJ=u@M{dR)Epi_ro0>tsq00dZf`t2+ytL{LR zT?aVT&cNm9w&x0B{nZQXgtR>EzdvV-rK+J>Fv5#CC7dtcQD9b94xm{(3JIjBM)l;o z7tAjh851Y}uQeJ9+OTzfcR5X@hXK4!5|b z;WAQxiWilPDj&-$*dGuExU1LIL_s}yRHJlBVjh|*(m_h0W<4X}Qlz0(gqm6a6n3!; za3LMj=;Ilj$pnktN|v*N@8j&SR55?GF2tM>$98$&SsG*g^tD%SVP9WXIGAVXHf0h} z2_6(gHQ8F{?-K1f?2fCkuhhNDw65_>zj!N00DaQaU*j$KTL&DS)_)z7^e;&joK$A7 zXJ24kPM3?KA5n$mF!?ihG9uq{>flbh zr-`@LMD|nq#RUY8B^@_Y#}@N!E@?%w!32l0=cFUSuUP#>scE1%}IoHO-uROjTj9EIzip z`}bU#_IkI6?}u8N6yzAwPySz=y>(QS{on69bjr{nU800^we`vW?%L2wOG@Adtw)F_6mvIzio=)O_XSOSi|ADQ3P92lP&X&1{RDot6)gteQW zeU+3#dbX$a-vrP9IhOgqMl~DW|1YK3f2z)~4)XB39yqO#Bk#sPR;j!tvHbKd4VR3} zNck_*5ymv1H7FQwP7rg;=u^0hT>dV} zTb6nibz0 zC{`H&c4#NfC3pdWgZ(h{Y>{s>FYdzkLXq6(Q=pVSz5bhb33jT6RAEa%QMvl>%xW_fPI|6k=PG( z0cU19p~DHTcHdAT^{y_thPpP|)Oxi~_1Hn+H^y3yOgWRFfGHcJTZXTV@oVELQ;R*d za<9!^^k%&rpyh#sj0uJlMTX4egt;=mW~Lh?t*8!>s0NfPyvI8IOmnem+%);LZrJ@k z2dJVz3G^03d=z0Xp9?3KRWd;3IvpYu{Eaz+I=Yqq0BpO`|F6kPs6ltqJp`V<9 zPM`^3jAQcC5UYI6N?)F8Alt2+-<5aaH`lhf6hHN5C}dX$ZQTVV&}GqgXx3XJs9^tr zle?VfmnEx#bY+1}2$!2k6$xp$Jl`Lh#!ym1PMkX+ePV@PTE`FM3eAc`*`XY*@2O4R zeAO*bmO38tT>J{D`fIFjk76}%u8*{he5Fr;PfZyIXyno$>G{||P694G>rM)x#Y{vT zdaU+JC8`Y75nzRG;*X46_SEnVuu{Km?~cnBi~%~1=KU{XdIpg-YZ2i+-ZQZ0r%7|0 zv+uFadeH}nwN)Te?#2P&y)^`D!-LE`Pf>!gf{hlCnEd$~yx9jk>c`-+Hn^a@gpqDk^>YbP+owD8M?!KZKN2rN_Q)p50&|;H3xuh z;)O<@>qQ&m#S%!Pqe|DUQ#bC348NFKZ@nAc_UHcm1S^v5A%pZm4p6<> z5{GCczMiY*OmS2DNE2Pg&v!^zRc#k8RnQ!3pNP%D8wz3u+~-b``zB~CpelnX7n8~F z0_{(cidmN_!mL6BHNrFEhg3nn$T$`DME6Y(qGu;cD}iL4``NpTez#(xFNeTa_;-ER~SIE&92<9;q5DsXU@*njEjx&2C$qT^?Y?{CL^ zJt6cTedt1kEx#rjsaxJ@)I@B!U3>lJ3WU zfktc3am4`l5yj|Kr7J&8SdRj_YVkLcxr(k!d`iHr(9Aabn)&1|*D|Ma-OIrkW5mo4@HP3FIZ;^5po|DgpidL&QIz zYoHT@kOE>^l+8O@W1t^IaR!03WI~@OZ85#lFkOgJt0RUSfa`%ew;U(1E&;|<06Gw* zr8N~~wr-_j(3C41{Mt?|#G1K~JtvYgVN=n@Wnaz?RoHJH9VIR+AS3&|=J8=s7|sxd z>K0#`SAe6u^q#fk#(|T7oO_M=;xi_p{y>f|ylX}(OaSnQEL6UGE&A^p4D4+RP_1TO zv^1x)90==w^s?t1Sge77>IN2Tt{8s?k(X7A+r08)b@qR)6bh=s4olBD8V|HzP1ZrP z+BwtpTk$#_exbDVPgB3aH|rR(%32%oh1;GN7x|7X8tvtgp!m1ZjB?X)hNAy4p3LnRU_R6h- zoAuf{PA-DKm}I$msc!SIpvQts1*#jS#PpX2P%XSaeh_lDh;MacHT--CTLbDN>S^ld z(gJfAn=$Y&;eo)PI-k~6iJq{7DE0?iZ8ycKh70pMI{gr`6;Z@~7F4c;M^lHW1Wr5q zFBVp++~I+{l!k5zB}#;fk->?~)eZJUxS9k?F3`n?c!3DhXNCJR=U0-SMr>l z*A5kc2U?iuI^)7iN*bzjFy_BHaF0`bZ7osNy%xel=J(+8oA7;pP$UZf!10{*3EURf=GyeGso1ljs zhuN?Nsn&=895$b8EYctqUT#sJv@>+fXO08~^294hKwn?>2P9^^GpV<f35I1fiuc~k%d}z&TqaQ@{wIO+}=Cg>! z0C$_H+*rfwC)~CoA)x@JR&Es#|BtZ_UNe_u6%Kk0y5aSAjBw%*{{S&ak01B5oNs2F z*&Nc8nkz;%w3h%2(n?e6X_yCK!vZWwZj+N*o#w9%jm=t60bs{Za`5J38G3;7!RaCN z(6pt#5RryC_m}>*&!E56F5inBjxb{~4kv)fg*ZXig{;Q`%to50(`oH|^UTb`40?XA z=n+xw*n=#mp`G7|y2~D#?e|#jCS91xxmx#df0#?IM!4Jf@~8!X9(4H}xcX=8mDA#j zYTt_yI7`6rc53EA19PjwE!-RBu2bKF_OD`aeQAHMHB&_QmE~Ku3o)z{E9*%gtfB+o z`|)bq@meuE3pupEdh2o*2KB{&d#x|%EizD*VMb3SNi9W6@p8_?TxU?=@#^|`@w&$0 zWBj~8_Y1vd{Aa{eWG1VQz(B00wU%|kjg;w<6?%Q^C_l;G{pE>Tr+3|DK47o|&}af^ z;KT030u9v=+5)lO7Xw0(q{~rWo>wMgHC`I$QngLBVoQ8WW&$;??3qOGi&DQ~Gw9e` zzq*k&HRp+NtLd6x)ZsoTR5zsMt!P&b{ssfmiV-~<-5}^p)i%(6{QVt5ls#7fE&3(N zF7hRZ)dz?*&i=dD`2Ww>v5s>|;h0CsBzp(;Yn6PJuVRfEYMAZ)5&{YE)kq;Bo=iE%E%P(Xj}!;7pdjlY0%5OVSKWNL?^{U; z!?N@VE?sxOq7#1t+ZngH1HlL1gG`*Ak$FRxzIB$pUlg-MqOIT*oVI5_>ai8xnb6sI z^!PnZAoF`Doexey$N()ycRGs+Z^(s}Dtw3HM?uEiH!xd@GHyZydeg+^A5a50#@DlW zFV9kA7$dKJR|4DgnL$n5YQYs&I|EMK?MCv+lGXl3j~(6>*1w&bbb`I6#V#q`46Vi& zr1_tOzwqE4Z${1xUbB3XC@9|@&z!uS7Wv84u2xPwura%V-6?xv3G`S5V3;*6554 z95IP68@g?MRYGM(iPW~F={+pfS8#Fbyc;jP(@~7Gx~&Ns%3I?}5Kxbuup#BXFvp?iUq(piCf8JjPf zX}99q>I{B`8zL@hjxqNM2IO#xone-L*PU_cq1tE{%g#}JMDoqKH5}e?CkmjLEn4dI7=#Ih-E3U;zE?P=L?-W^UDzdI;&R2e*Q~Cq2L8&E$f(N=D)4kWk+rt=&vr#5weQRf%O^1 z!y5mb1my`{TAN(T06^uOoo<&sNA%sgnyTSU!^lDyR$nWxj!O7)=fU^ge6T)Rxdyt8 zH*3(hXn4Q6Bh7#2u8^>j&xie6$8|zpel;vkW@o^^gHuih69y^Wn{MMjwGc|#knZho znp9^(1UP1|-hKFld2~-3ws)HotjAY@o&s8u!@w4*0m^}el%WdRmD%Ehhb8rn-bJ_Y zsprYqU)&xjDO(Ck?=%|JM6Y%K+&IMggb61QUf#vYohuGi4O37+NFJ^?R1kN@nZB;egKf!`|AMT~N)i zHrixETk{8FyA{o-=tJme`qKo%Gg-4R)|eWZuEpuEi?0fd$L&;7SGRzyM#};kv9=l~ z;P$v$JGa2)=@gMLhfIp{CM97n8v^wQ(hST1M4;&Iup7E>wskPP#doEOn~u}n2Y@(c z_ccIB6WDKy9I7l|cA=}fzrC%R{RAKDrT<2wG0>=#C+S_KYg{F9(sL#^U$C<1)5&~o z`~%y|B#ME7>vTd~YdDWuyq2Ng+1&@Pu}UJ%f1i!uBdF)4X>Mo^I0O<`2+Vt5tg?1! zF%hOYFTP2r!17hYOD@N%yE2jZ2SnuqNA;m=Y5xI13{Qd*^V(0kTj5g2Ip@cgubsv~ z0i&y|x2h&6pw*)N|z@}SC>}IC4R;` z8=K}iNxhOL$SV#=3z<~lpubnV_wG{G;cs5_-CNBZG>uYd^;_5W+A*5dWc(C`*F^dd zgr`f84+7hmj}-%c>gH(aYUss`N^3K+D;B@>Wxd<7EH3e8P_OB?qg@f{;U*VYJYbx~ zm`qy5!Z~@^u&khAGMe@)R#NDuAm;U==){}c^QDk+))O5@O!q5{mWhHw^R0!}@%14g z%RitFKq=B@GOS4HeC^s68!r)P|v zz^>3!BC?J^bjP=?Mv9r9eS%oXescPlVC~^LLiC*h*TEruLJpocrNrEvtnd(GE!xoB z&=EdAzp`Q@MTm8khl{O6HAyuC&)%)ZC_>Yuj3vL$yzz_ zK{sWG6;t_-g+_-B$pr< zYg9~#mbLed9&AMR32@#_1Lr4}IS8p|1?x*x8wl`=%edus5Uh2GOjBnLQB4l_qg0(F zB#>r8_zij~76&x{0Y*+ZJ1=0nEa=O-&9_*KE`UvSJ#C)XPG2UuYi#mI(n`)+<|kLt zJV-^&6X1W>oe!e1m&mhshj_blb$OW0eS4R{M8jD_V18YplR?5FL^Kwp$a6g%JO=Vx z5l6SyK%1YXYqmDkH#KIw|GvrhS_yy&zVsX+NY+F=di1aV_u=JT2$E(Ih!L1?2 zZBWog-A+>DFra4it1?H=sLj+gUd}z3XZ#tpzuR=asKoxwbYpu1zca8mR~A_b)zUEY zIUw=pY>bmk3XDvg%K(I@%KM4{;+z{g(+_7QzvcFJEm!k)E%`CwEv6{*@bz;qnbdIT zo)Q??ZhjG%C^Hz69ES4wfo5GB`ifDQJ^Q5X)6n`hELKoJ%iXqrix`U_70#0!aOt^G zOT>Ehu_5`EH2D@X+Cc(fG1O2xgC^-NdP|G@j0$6Ns~igB;L!lbvI(17YAnbgjO11| zNVZHAeQMBc7*-?!=e})yayuY79xR18wNW&g!#qM=MjG@7q-uxs`9hp2^cm*WUcX`k zwXa#z2xp<&}M8 zp2+%IeAAr%z~+g%5yEjP?%eq9a{;d}I#VeSR*(SO_1Rt}Y=j6g0cCkgwb`3-VtRd!iF7RuDF6RiKyxq-CG9D$rV%oeg!f4;tE=GPRsvHD#oS z%=a=NQd0qA);yaz08?CAZL_?To=ysXysy6Np~0jNt-##V8n|veFBfq4_UE4xrsp>M z&cNIvp7mc11#M>^PkXN{34`yho)n{A1A}JZGLC}c{CnjccI%KbhwxySQJOTCs%nQ< zRcSaOPtH91=Bd^*5O*Lvyo(MUr9*F4A=K3VgrD8W3B?TmbfypWxpl${B`J_na)Fc^jo85kJ zZ*$Pwul=oq5Uzw)P$86)<<>;MyS@Y0d)*m^B!!{g=26``uI=6WBW~igH5tpqmZGw^ zExfnC@QK)Nd8ePGhgs*y`L8|LTesp-Jio{Tpga5!-|W1w4huhf_YjozucltlH-Fxh z>#7p=G8k_fN_O#~Xbu6=0;DZtK!jAzYgv{r`H5(RN!LS{O!L)baaR@XtnsGoXA5jh zLWV|Mfes-?qmu-+x131Y#wxVuxt1V})o#jf%_q-Z+Xyy?k45Rh51ym@5!#TalWeoK z9Y%pQ%1kR}Nr#^vntGV<_vd)eH2Nsw$l0wefw7;J?wdCjXN+}ByII5<%SF8jZ{_67 zog*0TrgOTV7e3wpe6Cv1jjL5{$(1dgIDa#(b=X=I=;^u44CZovYt{-BfB_2I;{NoQ0c!!>32Lej#kV{P&E)N_GF0TA5TqsSxeZtbU2b!J%)i z3NtN58U&w0$)vfXr3sSfJ94|;qg6J5?(ePQOtqj-C7)Kjk<(_}`m4mS-#S<^BfU&< zpi^)xfZRg94R||$oo1~TDAmMRc(P!5BAbJh!DPUdZ{!MM?+wSq{Y?I-59cM5?tM(e za8V8z6ZR|3@}F6_%Ib$-s|(I=VMf=LV)7%>q$U9LP7LtcqzjT>p;$5tX31IhXhJV> zk-=p++pMf;eH#L3k`-@YWE84*jAYc48asf}c@yvM@@_N&8ro_J-=9ow0)TSC%X1c$a*qN|>JYmxym1suH ziyTe}?}*z|5Cl#*KO6A5tgL{`kE8uvj%PF-KRGFxgt>5fdgLYvnAa4;)J&0gNu@qD z!Jm)W%?9+A+=;6s1xHQ2Blawt2U(-jp!k85&Fd#CNQI%33n_;ogsS@RsF1hy0`g|q zn$8B#X>y`^!9v^JnVNFYQ)Fm=H%y0Ac}R_*rB?dxi=1}h!Srs^;NgMk?-P)P zD)e*rb^+OtMTNWgT;^ACo^QU3o0U?j=dfnNURl$+HqPLK7aO;UE_HtCn(p$sz5@Qo83$ z4|)&iS*^(bWwar%E;_-aHLpsnNEMe<`t`j^Aa3=g{xf?t(HO*n!$Zf#tol^cUnWBL6 z{h;ioYRA$ZLv241Wd`>)hF@5DZ?Z2&0A?Lj6C1-Fe0Q<%_xRuFqV9}%j#5nz_8oLR zbY} zTPt=kw$qJ#Ke^NK2B^dWQ8Q9<)BB5&6TgY zlEEk;7v}QqMfz6X>jOy-d${sO?*FLCTFWhmd_kY8&x#YtRXV-mPy;N@iX3hnP60oK zzcgEdJJK{5f*wdE4&so}eU;U)f$O1E&$7Wc*!i&zc!fU{mC1*EI74*?^(6FOuak=MgI-Shl^j$H2> zWy;28ijF4qBn~*(MRRt7{NV%Lk@`?korbOmLyKKE9@0v{gv=PA7|rFF(R0x4b!dHR_vi2G4V7cN73rP{d1wxGgPRa>sy<%tO4RId5*+~{ z`<;6TX0p#dkCKjN015|^}Bnz`S+|O2No}~-ZU+~ZY(ofs^z1m zdmNuWiy(h!cJ5AnAYlw|iySoygRDR=_3*`` zM@4K7ktVnZKZGtp-QbO!>Qu7mKn){~?~-t6a96;VoIDb46u<5RkFNGft{W?T>K3w0 zhFF&%X?DV89){vN;^gYUpb^7elyC8X9q1)#yPU6>D!y&bn%JSx&D8i=Q(KkmYkLE2 zon<#geOa=iFyhfK|6S;L{ykTNHIvxKZ*O9<*2ya0I;gT}_e&+cP&f$MiCB#|tNqMa z&Jxwt*^Z7_W3+JQQDC1bq9}Jwwd8Bv$k+AKX$*Q7a>Ht%M5Ohfa*h8@Nm>BJ{XZ?a zfo3(9^Iyv{bIt!2hJ)o)nYpN1>Icf6(P_)&%#rh`e>B`eUvX&+r%n4%lb=RJz|Ey! zeRUrc2eiKNY6H=t$eVuii7>>RxT_dIpVE5pyr$=&F)s*|1i%zKC_CNPvIcL!#L!n_ zu3FkHS>Jt~Hkvg2XE2PK#a(zqx-51U0>6FlE}L46u(o z`lCRN=4Dj3Pk$Qs+O$WIv(8a!-$PAN*;)r>vtFE)@b=-`YDJw~T&k*R4QWdRHV8xo z=aH^C&_Iu-JI_yRtywu2IarP1$`OQyBxEmU${Yi%@3P>wZp z9VHW;K{s)xT9jgYz` z8qFa9h48F9RN>y_?F(^8qSvG~qYbZ^s&}KG8yCVpm#-P1i2m&J?IND5sd`)X=SaXc z7g`+qN%p-1PMR`7v43fAG81qzaVr$KEE=s8(=1lCG+0fuKTmGITm4jPANNOrEy$d1 zg>shP4!Kh9a+xaLl6fQ#&}PED$6v_m;%D_uL@UtD2Hh*M*$>dSwz4WzKhZn{(2OA9 zNe~a9hk+`2|Fyv`rL%;qgS%kb_^a4hW7Xq`ybl4Gmz8Lo4b{(UtHcY=_cOD+xtd&b zHQWp5J?O6*=0>^PVuj0uEzLg#FqC)3T|vUV}uteX6~{=i(@Jz{)-2k8d}_m$XG`ok;PS zPL+@&@}DCU1PRFjC~&L-rvfi3c%?*u>zhFn1!*1>kN&EYan^TFiD?d)UC1;ssWKj) zheYXRQmERn0twDNZu9mT_dhA}TZvFqV=g~Tw`)nU`Z0}2L5_@Q-M~nqaDbPZBlF_Ay!&9rbkZoql{o_6i=hxZC zZye03wlxMsA3f<|+Td~shU~JUEmMF2s?v4vEdlF>5(^cIQ?+q2$g{#Z!0X#$FWX6f zP6~0<@T4QoV(a26%D2*U9q?DQaWS6fPg~JAvG|f@OY9S&W8N(_MZ5y z--Xw*#R{%;XyX3&++iF7&k2CS!y<>aUt_ev-@h~t&@}&TVg6mtYv?e;L0?T@l}sqO zQgKbOKbgcO1P?&08fa;?SLUC{jvNk;ngC0So+fz%oMdiYw^psDJ#*GRkbSa@SQgf; zU~=UN_XlBobkO08AL{rpt;3{a>J zwOEm?SDC{GKAQ}{@7puFYT#-Ut4j`5P(RuU4^htj3=p^H&yyorNYk}jO&gpLDFaKd z5`T%LP6EPBVN08ye?TCpyCpKvMP@u(@UivU#%NLxM~x6Qw!F)HHBiyh`1B%|Pk;qE z!S$wmhnkRx8L2Tm-TDtGVwDoM#*Ey|e%{87DcBlyFsau@FE2#9af2yxv(dJleeJ-*e#tRjC++fRyb-4>ryOMPq7&%m+Q7}rN(zQd1-T=;`*hK2#w`ug z`ip^qW;{uz%gubenampWQoS3+{;Yk9F~%6Ywr;vi{pF>V3d$*pP6p8=A|_^lsN&nj zkKQZZUt#W^X}EiQ&eI--00aAq9@tkel;i>@@ss=9qpfLYZ|~Wa?FX2zST0{B9rHD* zGL?a_g@g)GRe6bB*vN~-;yuX#`K({Lf#;2>_FgKSd`yU))aK-BL$n_%SqC{B>H5}H z>qYCwLoHt5%#Xs0f6HaNnLUm!Mj68g=L397K9)}4`Zi-9W+Ot)LfH*LI!rkLwCn(I zNFffQqxLMAEJ%MD`S@t-L)dK2c`YG&YHeAzUqKuqz`}`F^_6SISc7mNMQOrK)Vkgg zRlwaCr-saJnc(0{r&dl$Q;cjGxvXVC7q2G+z;%nq`TRxS)Hh6NAHn7y>c24cmF7vB z#ri(=Z*XJTINeAx(y^q)R~d7fIMp{QdX62+gRFihQyY6$Ewh_?Z~3=S*G$ynIQ7PA z_H`K058?tTer00|s0a87MyL3n2c6Wox1Ou3BbaEtaBNorXXB!N$X( z2PK>5ZK(CH-gCkbeKMF9TeJ6Z=Z6Z*+VuQ4#s?~Vi%dWmxK>Q zhPm*(M?m33%)*PM=+CHtpMzZtuB_TKj_6FF+{`(P-6c9-Ev+}++hoLBN>2br487b3X%%sv&Z(X zA{)9%*J3}G9`8|?cqpA+B{|{1gB+3ledK5pqg=R#mh)O;8bDQ9^7>Y8H=8=^J!p5! zhW1-aH0f!Q$t^A6-kO_AUD)(R5HTpxb0$0vX;@_lvemd1FSAft`)p0n8kSGF{Op?; znM9iUL@iEhRbHeGHGbmS!?)!P_f&8GB>Z^rEqN4Il=iBV%oIbPkB2@*xPzg7{SpD0 z@ul(7UGr?VNt$0B>?hMl_=e=Qp~V5+L$Db8fotvD_t^p$xs{TO_qBdq(6VcW7S3!- zD{7uPHCP!NnsLksa;Y+ys56DjD`SVsu)dRFPL8?tcDAb+de!vsM9g->(})W@U}Mil zxx23xx8r%MvQrPw@1yllIUcLVq<)L;`giruTs^te$7Um67Aj`wRg*;c*Sf&{Brd$P z#0P3+pJ9t0{$7wH&B6B6vvv-Me6J<2oH*9umKV^saRdnN(QAL?&ZG?-EgDN8}*9^Mn;fxtPekuwPgwraZB9z5zwnHVq^G##!-9BSsoO;|)E~a&;Yz&iU`7YNd2_m#xIzpi* z3A9l_OE#(@4|~O>?L`iMP1T}=YU~o}{b%}&T$^^Wf4prJ)2%0ffVx zWXS4drNsACc?&8iT2Fo)yfp1wuDE463kH?UinwRX|L{VdgZ z$QUci<4GEL)?WNnF|q@;4kle)S>A)c)DVn?gn3BDvP!Q6U&-Cf=f#8wplfZWd1l;s znvzz0me``0s}7a}&gAcaEb!i9Ly+YCv32>!(uQL0euS^=QhHiOIO<&j**)CWXhR$q zNs{?~-YlJ-Y^;i5UF>2h8G*2Ra2kU;QCF0Tv63O?k;|m~9>QrRwey_{3kpJa*#f6d z7EPpA-GNdVfkK9BPisyTjW}w9aTTo@_gRb6ccQhP_3>{QnM_E>cBu4^Yn}_oM8!3- z*$0N$s2KM13i z(^^k_)G(&%q(Aptw2I=7v+xM+CMwr+AJ1$OBLrBxO;?{C_#YNWEj&?u9q;YRjPE^3 z_SbD7d9YHQ8<=I~04$j;n$>>iAL*jV!DS3`f$&NMmJOGSwFpWlAxOJ;lxMZvVx;mF za|!7pqGGIMi=WHtS^)eX;dYA>xuGIx7YqUq#KEY7f#E$A8-vX zg6KYzNDO3}s*0IZXRve-T7-(9Org}`gIG6$HLs6c+xkVhf4#@okg*RN0`<}#CU}~h zD<)usx@UG8AfHS#UkQI?kA{Q>AT~FW&aeP8k3Yht{qcTD;;L!>S}863Dz;PyJrw{> z)@c=NtorOR09xbnl6r%aLH){X15cUBBS@Ob%&*AS|wH zUA+5zGm2}^p#7MFRhX&_C=n%j}Wuq;EuU{Pw#$`D_5$vZyQ9+x2LDkDnheA#cIA8_6 zK&jZTZQI88-gLo_^ibjDiTuIN1QGgjAcQP!UqHc!#Ph*bVT<+#>`AbSuR_aqHMDR& zEB02q=zEC^9XWSZ5d$v88-XMx9(mD;H6>QnTiIB9Bui*rnets84tWG9ou2DXLt5c{ zYqgP@x3K>tQje1{lAnNEEmMhS(gEXR-$Q~jz)&zN-biSR{^1b5aBp%PJ?W0lm$NSk zeAx<)m#~)YoIh)~p;p5ifP;o#X2SLteu9+tP95F6fs=Ooy-5iW@x-D0ms<8#b>x7E zQheOG8+?AJHYlmU6tzH!nRx&cM#gm5HGB7t3=v3EWBmxT(ZYP7}liu^#k9hNc(UZd#N5YUIXG8-3NZ z!80RYKhp^c03Q-jyCc{AoaS<6W8#D@Gk)4`%y0rg+t2qO9-akCtv^eor6#Uu0l6}P z_>&N{B)s_4E4gTf+hkk>TCCrJ>65Fk;a4lv3=I zG9x2;5GjanyLRz0;8+?}&16TKsaH=f+_E(kJOMH(*xN%T|1rKPneyJM37Nvwm!fR# z{c2?4nb6XHj0lQ#z-0P7X5RJXQ&Nc(c2hM~g9SB(tBC**Vz*Z76T&@#L)GC1Hz7YI z*2C-9ACY|TBS;=Hy1$kj{Y|TJCO?k-^@?Gw#|GBO9Ar(MV1tStz^S zeMUmlNczpPzw5R(S56OEH~?g^TXTZ;joB-dq}`4w^=$GZY@WGN#gl(8Fy(36nW`g! z;^t7-^S{yKb*|>KL(ZKix6rbQ*pU!(-*tz23p#d;Zkk=3`7{$&9K$ zer&LZF@?*C9w?nYcSJ920*uy*qC=&9dLYIB4B4#yLGNpg>s;dma;;XiO_mzt8Gd`mb|qfoskld+Os z-e2ZC{o!%-FF7wkWa@42t{N~v;Cdmx#Y%i=8^3Oo%*-D9+>bE65wu964iJ)36nRf; z(bm?06Gd(|41)tZKgdDuBnK(yL;SLc#I2;tgGmF4Nn|wf+7@;)e)a9&%QGxi~!Zp)*`?oc|e}>=epvL{jyk@bESL^vb zVRkMjYfq#044V{O+r1OcYc|$LJ%1v1tWG&iNL)q>qCENww!2)BHc37c_#ffVKI59u z3sQgYlmxHiBrthmo;jmGt#`)-Jp+zr>)*=E4FmpNPXig?Z9DRSE@Q2%=Cr??@YZ0R znV0#+w5)0M5&rJ&RUE*aLa^MkM=$j(!o#omA< zj^sj(Ey=lT3rH`J0QZE0J?A7M70`V8)UN@J=v?YK{DQ)9jx1+Y(pD3LS&H8dIOo(@ z7taQq`N7m>-#E*SRT)SDL$&~uk0Ij^D-nUQG}NLt>P2BcLFEZ~Lmfkx8;%KXu0^3b z&SdAjTuG!dI|=3Ad(Xs6kkZ$^2+hM0l$i#k- zjC;8}SnkxaG+BXexEMX7G+ixj5nC{CI`HlH?a5QavX|;ToVfO-=lS5G-jNY$nk^W( z)^!Wrzwt~f#;llvn)qskE^4?2j9&z(c{z*8y4c_ocO5)H7Gq7czWhLk1X^*U6}MFs zC{0}TgE)fx59u96GNN$4HyH$; zPNSddP_E820=@riZpfzAv5W>pHz-qWvtCZS!xJRZmPB9$L$Ou42%_+W^^aMwnYuqv^iv zhp@~Xp9^UI13IMB<=V7(Z>ZbPcJGId$^jrcs$6TRTSDI`XFAhL&0p}>H-WCA18&Hf z5+YI{TY2$79s00?$geb>q`aoOHUs%!=*(d$R|yo+VxMfj-raCq2UzE3v0kFOheBmw z3Eo_*l#N4CPd^52GCUz1NmORIV2e1mgsmxa<)mx;t574cD(!u=5xpLgFr!{qvuEPn zg#Y{nXs4A6>)6uB4NU^50N&*dE{3GInJ>FB9i@<_R1_!4BU=FH(hF?>3%bGq+$~seRCv})n&nFoEbQ8N;)>p9C2!E<9c)*;=T;K^5->vnkTii6zm>AZ zhJH1c9~OD*Fq)3=#hzOvOtNI+5ALJyq%aj}Uip@=GWl82N+qt04spRBxl zA8mH=MSa;)C*m<@y+>T-=N4&$^kH+*{9Vab4AfpLV$^`o`k3OVmzUG_g*+DD6?`IN z--ZFhth`1-BA_{8*wLX;EF@zsVP;Ht5J=zE`HyL>gZ-=N)-S3^yE8VHw4a6OaTgNXygwLn5F4;RK*dIdGHh0$f7(H$Q?d=EILBpzGoYd_7Fn6lM6X(6NlmuZ$o>Lz?UM5m zC&Bj|pnQns%)+%`8O*W89DC*oUW?Nqw;{ky55+PV2lLFg*2b)oY)s2(=bM_jWKj)Q zX&sw}2HkG( zFek}O5=<^uO)WWJ8t8DS-G8$6dC3WfdCdR#&jG(X@@y!nc%v;v1n0*YCugJXyaJ)~ z>Oz*{EBLwZ2h~BmfeKV6PGY|k&uNX$8eKn$J2lix+x*hFyYZA;nAcqD1zkB;L~chw zh#cSnbVwqmcPrJBxcMy2Y@MP~f`oiQiW$A#K1S>0CKfMUy{YjMs+TeKjoy_h!kPjF zS@S84>JSkRlsD0BCD$xNtw^P_Ct>M>dI@T4Pfwg`YdI3QObj@{ZIn+yw^;dLqq~yV zu^mPwLus6vnIC)zxV-@G21jW}tt@TE!iV$6UOMZ}4WLEyV+?taLL+J!5NTgTZqCnN zXfX`Xul!l6`;H5e$U*H{mBx4B?_QfkHIm#(8gx;a^_-TqJ{);+Z@?q}jHMHpZ$dNw z-AU8N(lm?g98w0KtG9)FFMA14Rf3Bg>x2!=v=6eyQyZUp6o2)77d&>xz;F#0M|Q8t z=XS60Sy|th&AKlBX*I3pBCIHgJI#=mCO}rL#=}Sv$ib4&{`M8iDa6pjIgNXbE$lgXZSBMfbN+(-j4b#scO-df3uHAcVj< z)qEfway@^hRQ2++5z%p@W)9d5GTg-jwa1W8(22jJ`}Q0ESxcOTR0_6^zD$amo)W9M zEHArbU&V=z5~x-PP2}gUc*{*fic%k|`2LU(krsW_%$y`u`#Bqf3cKeg%ZE)U(gZ%x zQ@$3F(Zxw#7-6FFH^ZBZy9Q1JL_5x>F9qK30 zf%HgGesB!wJJGc7gVw0E=YG?L4vm4{k2qe>0i#(RzwNASzdY(3RsNPICi$X-OJYvcGlQwo=*`806(V3Q|6iP*Q zS$YS?xQ42x!chv-z;DYf@s2zn9;~GhkN;;fveU=i<<89PAlu<|rQ-0C-k{th6&uDV zCAi1rl>`x|FrPP^qKOj-sMJma2M1iuq#QZ#VGQh}%gVBz1FM;ebZxG|M#D8nB{(oyO{MTlKo|Ymx=)L_{ z3@x;pK#4n!Y?58dY!)|fIpe@=6dNL^%F1q{4WJ1|ttAzR*9#yWC`U3KYmIopYG&u& zc6_+kc74*2?=zIe+Ilgd9Z9FTe+ZkYRY z!J}F@$K?E%{Lf`DW@Q%F4U;hT%iPw{Sdzy{#HWFR-6eUja@3MDp1})8YIXd}>f(Bf z3qGXWY-|4Z*r6=SClW1utW?r62VcC=}i1?L+( z`=_=;@S|sLr3|(5%yk zq@UF~cE~%so_i^>*3zjMAC6P+Fv_7G7dklC1wyBp<|cP_`mj`%em+G=pwk+K0FmQ- z?2A;k=IoWcD)0SX!4HRz%VA&~;KyH^zNiq&o8Iv$oZ(J$Hmcw$ApA*&0KGU@4x z_fP|y!d-&H$nzIg@Od+b)KVwPUXC37!0+G0++?YaIB3R*cD@iSu>a`E#yZ&A>MQe# zxjgq~)diKETml1F6;;Cgk3LVJwUf2{CN+lH8NF=v;)qt9cegD0EqBl4@OJmc10+#_ zr5mZWlkn$vmMX8&L87DDAZ-Aj#!~??#aH8aI!;?dTo0Q0_0F8fmtq-hVk2f{KJg|< zcPww2wou|LmMBr=8$do;QdM759Zh}}nU(TXw>X8=CMModCuW`&dx-9wyMHvS_NltQ zdT%983(0f``t9EcYTW_jK%0x(?~xIW-j#@|Vo!ci<8Z_Go~@C{Lx>A&-^?&)x3BZx zMei5+`}mzh#_%gwU@Ps+4M332xWYbGA|WC^9ca8|$~G7h4J$b%5rb@jD9&j}ff=}EUSwm?jB}1@VkR@2lPHJU zB#nL+sx|6bX=(TE_Fv7O7gx}fsPDBPGhXBXK7KW_i!sU zrv{%*J9%lpK8X*f^8D`2@q)vMM3L(!)(QiQJkXMVGHKlCbiJsLh-w^55_{QWtV4KH0YfhHOcrmaJpO`OU3cuD(P<{UaqrQr z8ZSi%1C~A5Lr31J2;aE`r{ zcFU@1d?jV&ej4nZGoLNk4en5>m+Naw%6P4Mq@=qif2wjfgJ}ILPBKM!g?lS$3pdiK z^Mck{)MSi0yUT}~dx8Cy*Yn4rc?Z3ByOFkmm$0yo2FByijAPvqVNsW7^^20s_BM2(0tP7wbA3$p&CeyTkFWUlp8XA z(W&|Qb3<*;tb{{o@Ma;w zjl6%Zmt}D>AHR$#ekl^bQ)W7~*|GTxGhxgVd)3V(T?5KX6q0qg@h=Z!#enPheYHp&nm%iFfcP>r=8n9l35q?48|iN`QdMTbcCes%X>;zr{vF*9AJh~c z-vwORV#zTAD>gpTB?!JTS!*hvGB-Holj;;@XvptR6#DP{FVL=L`61Ct?Qj~GRx~{| z8(5`mI(?hvslZAo1R#JiAUoXn`o;9C>VXc|gz}^1^j$1v07n^3fgi=l%XV`~IL&c> ztNBY&zMn#)#GAOlv78_ng}A?S0bQS9n~Z;4GcKI8c{xAHnjx#~R*#qr?Qdir4PEpZ zlPf9S6qzH1-ARtT^gw7C%GE_9I0%dm)s-7TQ6j=@PHg&cyPCQB^|k;XCEFfkNG%Wq zrJ;IO+5r~W$VlN9_-2}617B=KcY{#k04XIq*(}t6MD8!O(l(D;lyZ~SL4j}NP%zk( zUAFGMTPyNI9X~kyMi;O(a$saq34ik2It}#U(a9V3BtH-Sp8vJ@fr_m(W;)gP3ueMq zM%1*Mp||To`k>amtSe6`4yaWyyP^3X>ykLl&81^fqNShCu5l2hkRTpfs+=M0{ak7> zXVv4**Baxde-y>tHS%zoElVm6YnOML^MaEgO}8G@Md-jbK1IextXCx6;qao$yAs%* z2NT8fEdPN}uXE9-{WIZu(d(8bEU>`NX!}^^%75^d={sc>Lf?-ZIIC>eO40SWb37-$ z^jMQOWMjICg9_U$n`*WPTN4D^;)Ed94GQOr^RvwDsb`oAXwkFeN*CW8e zN-kg6qTj5!GGF~c`I^~FnlRtMUWY#ZRgOeO&f^$`8+ZjPO2>~T0B!5L{WL;4iT_gi zo084E$9DWVDY4ks7(|2wiezddqov?TuuLt{#8T>Ni1%YPqGjqh-@eMmo_R{_08-1) z{y!w~2uaT2Uy?$vT`gaTr46MWI zXddj`-moCRndRu^Xf>usIxLiKH;K9Y8||CzL_ZpGl(`#~!uU;OH_3tujmWX;7-*!N{8DDNlxg!B!-Atj+KcA~<0f*}9r}uY#B^syBLT!6l@1lwGz^wj%6Py3nSp8pL zD}sL<%v0nMrvhyJe-;J($LGJBNU$>|xIEu&3e%~A-%jcKq5d!7UFxg57ANP~xpK%X zTF^6vpMP$SR~&IZ5at^G=L@Nl16^1BZrmS;zHh}%2o{aE10Jv{z^|VPk?|@>>E_4Z0;#*?*#;P^!3|sxlyFI1GSN|nfx-q%&(!k9r+6)# zNU1ehK$%&6zOvu85qD7N7kiSKAhrNy9AErP;pfkr82!u5W|-n+Gres|?W7Sr#A<$F zctif~c+RcU?1`j|_(;C*sb8&qDa?1CrHo-c41)A#ZE$ECJYbUX4lBbCx`6+Nwz z(_2?GTX`R6gNP44r>@Ac(2am2;~!!RrvrhutMb6isg#z>KNUFZxaqyoCBG?;9A?BE zf-=qOaL2Y&%+fXB8%253_;~P zxW&0stp``FtiMD15i|rlFYK8`eVwzM`AO8{9CGs5v9O#vY3?{B#@WpU_-ezbh;W6R z+(&Jp=dT6lcF7ZCwx@t!10$~B;=W5Y$nb%^&ks#=XNaFwXM7ADUP|erFHKVmpQM%~ z#mx1Px|bLJW^JYj7*lnZ$~nF3E$S`gi&3yoRdXOrJduIisgwZd1dufr zTq8PoqtozTPpl52OeXG1pSAhgLC&?^as=_=6~$ezsr}AWE(YsIuUwm&Wtv<4C|gSh z<-OZZ5+^Nrkex7NOB{#rZvEO4cwP)67?@g}Bf*MR%cJ%_&mS7C?S#sI0atW-t4A_W z+1bpXY0O2re9}{Hs6;{cn-+(^r-8t~CO(Od_M5A}J?yT1@qpK2K5xaSM&<0@t#Hx{ zc0)HZOj@{-eBUJ+XpPQ9lR`9PQ_c##3BS^0aa8wW@5?tP7U9Pad7N3>Hd5s^rQ!EO zm)quVM(}4?(QMjBQQp2Y+{SroWOQ~nIOr;9%qn=GB+>8JNqzyap7^8$rR)|bf2R-I zZxM_)(`wkgE~0*lPEw+f5Hl2m0yjPh$VnbfS>o^Pt5!GI#%X1yKI8Pc#A*OZg7PEo zZ?@W10lsY(>YJtB$xXxKp77yLYB!q;lYBsId8G)y9` z&h>=<2GFG?14`^-izss6YMda&m0g0@UsI{AdqXcg^lRQI;luqBuI8=2Mi6{d@e`7@ z(5Jhwi*T!3-$}OfQZX+gE>COnKK@Wr|IDThLi4^BwM2sRIj8i?y&!JI8>Y2C2okUl zjtSmFt=FQ<*GVuI;_73EPPJ8|&qdH8JeP6=t_gi5YVRY7bKEId61}qFt#7d@!^_6~4~u${)zqIpW|(Sru-L5OsQd9#)AuFw=!51PJ|=Csr+x zD-}=SWp+(#suGvuFhA23S?8)hRwPmSyT=K_m!)DQn}d2fb;g^--SZVcr}s`%GNl9y zw%=@m^zuMU&=z>`=DbuT<<}VLHnk!t!J;L&SNBTTW zk|KKZ+O8eidOx>=?eJfBj_Gy(!TwPciK=8poBfQ#=3Ul@vw#4LMz_8+0vURkgQSOeb#nG+=*1p`=$}D&2lPwK$dT?+!-#sE*dw&w}){ z?&7k%=7AWwPoSx|f_SEL!r=uw#k!ueq#Vc%ye)(_S4OcG%`J_=Jw-$xN)G;hr>;)s=S%yGtaH!Ztxy>(ZYb?nR|&~Q;rXm zq!bqH9NqfVXXvdVMHbuebjd7K)&^IZhEea*p|il>-x=C!_sNiNp17LyYbpKBjo%8Y zkesgE4#4Mn20vjs1aW~5sCDO2>?jBl7HA}QWK5eO0k!Y(MqO;xB@H77T4#+rl`GDVSY+>5suapZ#>=L~8D*mtG)c+V#{U1O6 z4@Tw30$kO@U_a~H(ugM2Ifoz z;Z{yC2?$OgEEmvp#)I#c1>b!JDmHQmg0=_zF-YffHUB_%LB*Xyq1^|Z51@q&`XS+e z0kqf}0(eE~7#!zF6g~q13aq;qVY`4;1qL;7l6`@pyBC2Xi2tJ>?$KoL-P0&yKQC%O zcgW-V6|7K!SHhePvm!0flB5QWHQ5!bFIY@26NQk}QVG_g`Oec3E81hNzppKv-jq4~ z0Kw1nF6_x8!!A0E5U01xXlmua+vJ&yLj2|ftE_P6$9S#%hegxPScNd!@(Z;VI)jYGu*axe|HtdC{^ZK-J#NP*7 zUsy)b%UqflQ4@Q!O|(_|qHoE1s2*HCr8P%w-!3&7&fhR}o9*Q5o&IOuRGODtUka|2 z3_@7bzq8BJ*oD_1W}zw{$lnw*6>cZHM^Rp`9&n2r?rvPA3)RHH;6NkmZS`cr%R$7s zmhAR(jZWH8S;gV+lSWIsp=`gt6qq^+qipR(3h2u9U7k^>J;L7ZYe(Ngql4g1Y+l2m zRcaAOb2Y}D^g&oEu{=X~2&pb~*zzjg6OGsDI#tUB#&H&obVPO%bPQe5ibD@55)(Xk zyZsgjAcSLVUH1X1g*I@jm%Y?6^VLP&ILdaB$?uq*vYbJGcHK6$aWCC?w-q><2%X&6%2-rQLvMb*0}PTT%jb@3A0x zJn()I*NA^u-%!&~>p-f#^63)!S<@Bq96zH2X2UtjFaVu8 zi9kDLq(zAi?tJm#o9owY%pwYwfj}UzBHhBgOO;=ij)T2KIkmkTUSQ4C?HRuA)5p}f zL0IJ$&)m(k{jGlYmzYeqG&)Fj-5!=oSdUv|GMHzHIdy?&LmnfO7jtU!?T!2jtbWe% z6+SbH6K=1eF^-9GyIUd$$9kIK6vAlGjH}s&MwenV?_j!>dXNAa+3la!Rbix4iv@0& z%^?8;tnZYX=!W`~OGw5hT-yT;J9HvVA87?b(_P{@qRoi`q2;ndmo79SKoRd$hP*aE z|LYVpy*ZCjVM!tmGU{`Y=wI5Kh8Gv+aX+WwwwVZ@j5W%ad4R3&`AHI?vz zX*ut4aOv?d*?ykw?LSiT1tIBJT`gY2kbWF>eT!Yc=yd($Lih?*vk>4Yg z5KbLa$jKU5eUSub?}n@vj)}?p5qAU9W8Nmn6(m7!c5=g;cjs#q1du)$Dh1m5x=A@r z*HUBUzEKsr%R}NZSpu^}JTb>Z(iICo_YFtYGAm{RKQpFRGiUMK8L_c7zXh#Px~Oi( zeR(umfOr3JZ}OvW8bxAHe!I{N5H0?uh6GeUh}L|Z&Kv&CCDP+_*56iowODGftku77 zzgvGG3sE{31Li5`+dcWdlB>T*bUfXDX+8}q?`<|L$|G)eUAzci&fKQJY;~F*m>Rqu zi2W>(>>sj!<+7|l%};`0zu*S!=dwO{8ecM2w2adYGmgdJz65_|zw`iR16qEu_~s>m z*EGd@3%1g2ecx;dczEdraQ`#oBDq4mbFdpf)D+#4e4a0<1;ZQdnNs$dVlYTT$#}cn zw1*os51mm$?9bl#nzGMT=si;8@nc+HBvrX=^j7PVGji=Bf*i5iWkt|-fAJ*vjs${n ztAQrpy~M#ZYvS1F?tY;(N}QiJp)J&NcHdN{whP7VBok00lUglQGfJh&5-kDXg9cEp zpOvPdo)aG)#EuV`G`^Q(!l(xh!_N;bJgCEo zeBnZqe6x=HN<`}MvF}6~%}WwCbbGp4WihqvF(|to)G-~+4!Hgj`tTG|=4unF@G{8j z^_hxeD)l@2kHRa$0%Q6fCGAnFhE%hDbOJ<2z<;5PGfN=A)`_djVCs8$OdTgB17Fl( z|DbPW{UVBS3rw`J>+j((9jxMJggomZ)JK}-UhLiI{o%+zbi!HTDHyFfj zy|E4fLnJ|qO~WM9dR4P+=Wg$sWg{-_(MLensGv$qJzI;l ziS-FJsU-=)lbjeVM95U;x-W9V`kQM}H-`kWe1jRhbSV<2K*MSQ1MA$@X&bf|1-z%| zZDZb#$;WA6vSCLs0quJJzFJT)>OjlPzr?mKwkWYco%BO?9{oliA_W-^vA36*FIqZR zbv*zXP`zI5w|_Xoros}`$BdK$0X;J9C~11mDXB`Va*~q4-hI}^fHa7#?$i`=hz>ht zj&qeT%gm*6`s84E-J0yBR_W*yVrJ@ELa}@XZ^Q-ATy!Uyg zdc&U($IjvwfKJBbOlH$iaPd_I+a9eu^-NXN5DBEsSz100wrY5Ra5N-Ynj8Go6gFI! zah1Wk-R|a69OQsFSngO8Pm5mIs5$m!r{+$`tVG7_yQ#iqO^5@K-fct`^~5GccIy716@>Sw}hZb zVgvoND0y5WVV$@&hHx7vfw$3f+GUuroKdHg`>4tiUZ^xc-L7suDtCY;L8A$te{pWq z6TJ0JwYo>1*DkOD-})5c`WlRT)DZ5-}_!C7Jr~-@mEzKv5DwowVb(kU@cn=t3tehM5pC7jw!^1Qh@51mNDs#!?bS*@z%YH3FFd16(ZgzBK(rMOp~^LX_6es70-!}# zax3Jp*~Pa}OX)cqhr%S4@Oz|Gxc-%p-U`n{5P^TmKCj4)6QSF~2q^q`itBCu$XS~Y z+%AX^;Slo%JPGEq^V1~G`yH4^%7&7&MZ zujTw`IrQ&xwmkPQcbG6tnl)cAu&PHI7V&CfLEDLA)#GQfqk2I0kk_<-^8DjS30fyw}#e0#Ar5m+C?mSchaXaMPzg%Ppl( za;dA?U27EW>g`imzzJ~ekxj^NKwF9qru*MBqW>`~`hWHjR(}~&XBC&brzIJGW&@(_ zq{c5Jbvkjads<%q{z9;V0ov*OZPl5{vkBLgm-64p1l~On|77MaeJrakiAmXoe_kN? zf$+USi2#gqow?qphWy)vtFP#`;QJZP#j`-Ad=)S1>35p(nN$^txot*6UI(q)aF7)p zBDPLN&o6EL0^$+Tqo)AMBQQEXX8P|?-sj7VN-JVOesota3wP%mb2`ij++VjD+mUS#!gFb&<8=UUN^L5WY zZ{y5;_T*8{PYxp@XB=kglKYFB0;xn;InAn5B|tS$P471kz6 zTQ`RP)X*ed`3*KlD6W!zako>Oxog^`Ah3e<*DZ_ca=n5#6pTtj8k-MxvOig@PWHl} zs;)ep@?#@6lRiaV7E)Y%iW_u8L4&TZ2eFQNu?su9aEs4ZkLh1?g?a=(T24@#Z{AmA zLor;?<2{wJw!hE?72VC<`RZ2-MJkIr5|lQMLx!u7CbD&$h-`m!H~cjh=VTMJf8qKs zjPK6ZtD>x`sx&M~I%jpg%}5J~Bl6+>cyrwK%p%RJ;i4jROkS)v$=iA6TY%mx{OTh5 zCx};2R<&rjCm+&wCV%F#%{wn8g0Om}C6@Fte z8bc7osc3w`=Tn2rqT6CE+2Eo$7&vJ0jq^M7G`b6)mdDzJ69OFPogX%NyGGUYx8VPCY#N z{txZ0FLGOg;$6Dj=BI~4_9T@f7v#u$4o8?h*|JxJ>IKoPhrT?OKibgO@xzPTi1vBD z+S(B|oiDR9ukLwp_J}7Hl8lAL>|1_$AK-$0?Qpb>JocXCGL3xQFIpYE;=;kU=Ni*; zYm{JvF%~&^llFUJ0>hZ3$k69Q_dq6C9C#^&wQ<^?Vg=w8WWiLEjRY~g_ENKJdyVP) z%36RR_RrdWlLxq_5cog)Tb3|O9JUuT8F8RPX5wJlXx{E+Zz^7MZ7Jl53bBk=53C6C znD89$^$xFc|6}Wqdp}M_Pv}$ycd7T1Sm^tDZpZHhWHj3?+jTiJcTXEc7VYy8r^C+rIh z3iL>@Of7cG3SMXWXKGhUX_x~(S19a3ecwU3Xef%dN{8G*D#N9At^3#TTH&YnSdR~N z+#WJ^h?U&{9sHN2W=}wJGK`yKW@h%cIe8~=#f7v+$MS7&GsFU`(>_e;$i*DeJHgH>Wc=)u7C>nEcqeq`>B!k zB046BX_RoIXlhf()78_#iLJ^Vk#6r8j=O(j%Jw#&Ltf zYzCz_$+&(4*V?WPSffUuy67@!8MxH?>Qp|rTidRSS$9^w*ztChUfXOR12+|5peT(> z5Y22Zkjn9$XSyFrfTE3!4M&T6;7b~s(e_g#KCI_bTo@*vvZbs-XkTN$O5@cH#Sr(6 zv3(2g3S4K0*8ADO#GM`f+aJMm^8Toogg@l4Qhjl^>m1stz!E5Gc6N`Bckrf|nz;u1 z#`hRql8ajbB7dKRrt4nLS9C}}ev8`R4ettxLr{648PN{-PMo?TPW0taa>2>ImfY8G zdtG`q26uPIa#Ao=kSZW*^fPLQ5pP(EHH=#si%S@4UNK&FZp_f0P36FSDQ@;@5yML@ z`6DPCeKI`EXJ)6(G~HsJi2zbBi+o+T8;VrUs|bH*%^=>yWP<$%s|kPy~5K5C)vzL7#R*zn1b^dJBs* z*Hjf2MV1o1xPsN8z%9XLSf@OFr>S?E?>pA*iyz3E&t>&)KEx_d4|rT3oK#qaFEGi; z<|9RhH72OLn+MZhobZV7>UW1xy*Qidt+pjR#;51wbG1jEC6wfPrg1KXzVrhm_S_;c zeV-tfJepq%i>R=I5S~=_dUFAqUmj9$a~kN9r6A6(qXwwvJvUCx>T8l1vY8(IW5R1A ze+r61hm+Ma0dCkA32Gw)K2$MzRnLgMxt4h<<;%Jigpu!uyDx4ztNatETBD&qrxnB8 z&hIVj1wy9HJQ_v!HnbuB26k5s7nB`{s;C+j62Gi3F!LBL@T4nF$J~YxFMu|}^WZ4e zv2mFw>k9G$`fvPk8ly;rz$Fcixb)!@ZcgT|;g>DIGl?1X!9mc|)!m-7KrGFO6;cY_ znrLFlnrtFy;%k?h?b>}#pC!5Q*EJDyvx$X}De)#BY*mxnO^D*SW7{Y9n2q)dk+^AaX0AnRwiOS$}@!^tju{G_CCq zur_Kdbgd6IXJke-5TIs=hqi7^JN(J*ZxrlGx$plHDUu*+*FVJes9bu zjL&Vt*_-${W40ZSedu2*m|JREY)?JK3Xr_UOJPTqa5@o1<6f;*54PQYT;mNV4kyXE z%e!_^dLcsi`#eO-UA>MjAYV+Ke?${|)hp2?^mtv(GZ(#Nwm8`}2kn?=OS?aDG0O06 zsYqSpbp#cZ!{c)AmSKq9vf#$?lISY1f%+ z%`X_&5!fa<2E^(nC`=P&G@XGuN!K>N7Owb}r2GT&#V-@KbUj=9Z;w*BZ*k=}zUW<6 z|{%s&q=#;%rvnpqcO_NY9Jg6bGdB-r)Xh7dRk#^-=oWgySuw*?soL) z*-)%PShkKmbNwc8K0U4!7I>?NRii8QU{9|}B8};In5JLPd&F9v`x4Yun*hd8WKwg& z6f^IymOy4aP#!&GOdFT$&vKMUwGh)vb%3C>KVplB8toXhs3?EMlg=1+dS%4B<=Ngks4{9^^H8MdF{MiMKX2swEr(iU`)V{{= zvlP8kp&*S^(6XQRt;|n(!xY~qTy@OK!QuBEO5}@e1(6|jaU`8DNN8MyTc(VBh9C=X zhVySsR>GDo!}!6nC6jpm%Js#|P!h^iUbyK#C}~Br{nN_gET@(*^rBBVsK<)si9$fj zf}&Zmx#*H*Fi=Fwu_S)s>z?^Y8qV~M68-=L0C*#Slb)>fYOeqWKz?4!ruRPOib^8_ z5{Gn(UHxW(zvSyqQ=_nIYOVWb;HZ@*EdCRrxag%1tFR^r;TT8M{|@|u1j3oyVV7b*OiVsEQ0%X_ z&cMdM%HgU3$@n4l%^r1sdlN4C7sUxGW6=>i5*5#f>Cf|qlGIt<&fYwA@8faZQe+MiB;3RvM-o`Q z%I!PATDlYSSuT7+ai}!T&&JzBAy?1s>yJ$t|^_9I@AIWo2tQ z^)oR+2rJmXY=Qn8vsH%w;kEx{#QL9lTyT$^lnw$o8IsJ(|H?#v^jrMcFcR zoq(Tnydl~v^6&j!3ucjnK)O|#fVv^rzBP}qZN(qppyw}YFZ6YxwG;yXHc9|}8}#E3 zWKZYeMe94zrm}3YM;HBp*#Ci?@Rx%A+z-^csFj2+^att-KjBqF+WAYfKM+-Q%W6;w z6GQc7mZGLqTMr(2Z9O8w)yB83_<_Ih5_HUM3^`qpXBX<|E2ydaYkweWl0bPfZbeo# z!*ao~LC5!o=zY|HAOq=_RPLlew@aW~#98HhH?vdhJQ^-$abMQwaibQ1&GtxNSd>AR z;-fCEbuO_ROIT62mpb5>G_I#~J+1WYF?N9@56OS&eiC4YgU0R@kElnzcr(I89&lz& z#s?FS9(*1%Pf_*kX0hjk5*^O_=|s?(YG2UeiVJv{A8@~6Qc_l9 zyIPg&-b9l6-sO+C>Y==Dmmm9XW$8jknPqIlzlo{Hr&J|s6mgnzs}SE(Lcey^^Z5cKP5T6y((Wu9fze@^&pJ2avWC0)FbY)ATc1YN~FO%u1DevuY&=X~qN zBLn`drwQV}1xRR;8g#DUDZ4JXKA*V&PNA!dv1n#92eF@>6Gx!7b|K*oF6dLSdSJ2r zXD#PjUu8AfT>phnQ31)po1zLGTBMT+c6`LlsfNyE;F_y;*Dd$a2-2|KLEG;{st!nh zrExiR)R8dLkXB=CQFJ~DNsHDM$Z4+Q>{ts6en96-#?G#C`qdDvdJ`KD2A{SkI*S4% ziSJ@)+ZWG%C_+sID?j3`m*tg**c-Je3?po5!i!?r9oD_S5F^;yClBOxIvYYhO;m7A zEJDuApgf;Q>0pcYBnr`jMxBx=lbb(0%-=+oY4ErevOmKXKoUJ`jyi zxQ35}zI0yvPLOgf-N1g~6~B)Wk69xTl7eUgtT!h34p{A+H`}Jx>0e8ys*NQPZ#HTw zli$?XPP8nhNw;>Ta_h8nN$eqh2gV|Z+Bl4c7Fgs|md(OP3vhShnc}JR(--%G-F9SC z`&l6e$7_-vqSGW+#taH6#TuIsA)nYTFKOx?dy=w?j#20~nbGen&o73~EC+K%GqYTK z{{g}QzkSPsJ88wU?Y!bY&H5RF%L?ul-r`9>qJbJ=>Dg9TuNC$UhRAdE#PY%L+zD#- z@Oa2G*F~^6bNQkL+xN*K?h)p!wtN|9@ zq=Nam^Xx~}hG;bB=$%znL%G8zWIB0eGRMII21{Ecs4@q-8PiaPubfXIxfbPa^1}>7 zgNBnK&ceG*4)09oy1pn${?_Yfe_<;`L{=q%isTZVsG33_%(4Ee%ChBk;L+|JBYO<4 zcqyt78P=9D_vSPiXHoECKzHo2qlATX!+%HIX5ZRuTXNp^GHfL~cL`?3oc}+Y{+;mjV&KIM{K{x9N zDaVN|Q3Ml*%-Yzu4@<8}5FN}{7j8^87>qZM$2NWJr;JdG@$3$G>_KDJ>{nTU+p=o- zlrztHr(=9o`JI7Kw|I{tKj+yb?NaL{e0YOyTGHxnJ!5vZF&Bd}CzNhQA}8LzP|Ql(bQoNjl@YG*|E)%_EKp^~eAo zG&1FvHobPzMjpvv|04>WC&nSUE>%RYRbOO)=i%Yf8CmQ2fv@v6L%4%VK{)@U124=I z%v#{RFmtYd!Bj6CuGggP+)V;aH^nD3^0;WouNzKQT{+AXO}8c7wi*Ibop0{Oq#Dvd zp$Qs&JK`C=d6x$H7{Dpo`XwjxSpZCU##DKK08nycfgvzj{^ctFaT|Ol425&ovm*zVx_D^vYt6OkJ7)aArB& zxWmba)XA<4Dsa1n1{#cwHJc-E7DmYWuGP2$V-4=g;wAZ%ntU9+T6J>>f2ii$Di^V0 zif>}FdzTOo@V3jGfN(b;PiJy9+lNOkG5@9?2NN{sC!<~k2s9#F1gh#As_vMEm(r_! z4&GeEjnQ4^`OCAjC)q-OoJI{7!jHF|R&=6(Y8<(2QvD@bIxk{JRb$+(MEpDK{%-41 z-r>S2$wq4?kgOQAnL4yw#T`e9iU;aWn$lt=o+#V&E@c1gBf3tve!2Zg);yZzqsUPS+)}pK>hmSDkdne{s){rd z>O!rrlunG^&uo|mR?)Uo$5y#_5nsDJ384vgw^ATjgV+N7{KXlH!9HPwsl!GnNRNis z?Uq=3+~rFjrHA}6#gWU$QTmrhETkT!s_L@#d(8&A(OiOf$yB}qu~et>#t)QT`|7}B zmuzC{_jUv@oDhG zW3z4tHbR7gN3q;;k`{fhd9dJEF;2bPHe?DZ*01rfFWWfY?cG1QMUa3qjP$i?98Iw+ z@#W=+&8MoCAqIUUdmk7j5BAOHXLUTfhJ|8z>se&`2i#{eB7!N#u_;x|IX8Z6?-bCf zNdBhPh%C5~Knxo*G|(v;^mHgOot@e_Gk?g_S>+zxUR9mK-;pVhixLz=+ zBXPWo;MW(_FfE;FO7j?Fr5Z{iPrBzP`f9W;k7b_jx(-#UVsfJgiKPfvnVzOVJs*_i z$Wvtn3Jx8h=#X# z{D_K}4*&5vSjYZ*lNc#cLb~3A%ZK{v%T~AOEqifRT?z_y*#0UUG;(RA>SR?386mYKRR1N5hO9=dT=+qmdUKBPLS$h#-Ve|(=oJ#=Nz{|7l)&sH4Ry-5X zDJgO-pJ~qC%OMSAdd!!4B=-9JH`(w$**5gBFr+`0o&T`KGVre5DL^6PWMs zPnvVag{SB^7X25`1^Zvj*%*K4&z7jTzuEoTIr;ZKut3-i1$Z89f-A`OtjhmD8ksNR z?$2PQ2(M(A&%-MIKn_eN`%PeJ6_R1w`?*3p5 z!2T>gYknRy)L)xRUlUl4IdLKTWUu3YQNr6fGO9_9z{Jq^dw(jPgDcX<#E zO~?)x=jXw4*3tjqJ+Yy#EbH*;kxkrGC(mcKGHAww(NLXlb!z~5mW)jTpsH6Ny32$K zYI^LYVFR}OJBj-o2WwztQqXqaq#gV-lery#CjnYlTtm=^C_YN74pfu9qqG>IdF@=pf~9;3a^(-33G*XL)b zRX(JF#YlCax>|Pe2)a_J$ZOh?oPw@6i%x)KD?SRfcNHI2OVFxB9GDrpw&$K)mw}(V zqt02z_qmENtyeZv;~h%2e2%?`Lw{(?ie-67gEks6*W$;vc1>Ejl}Vd%OP>oXv-_qS ztQz8N_!9+Iigd}_BnxZo6s^~|qx_(T_UkNsdDc~S8 zk7mm~N9CRq36}ngwA%@ujY=RXEud?$7?Er?QuT#v07`|WF3DS70dt!4cGl8r6#Z61 z_s5!Xuh(0BDp1-%S(zP98uBegxG@2kRwmMeKYm0FiU@!fZgHl6#Rj{6To0xw4ERG7 z*pV5o}hZni04As-f{fm3q{uk|&@Y5S*Yp!7}3=?uXAFO5_Ob@1cOx1vES}iL_ zy)+Azn1a40|43slM}QTEIvIe_(7v|pUWplk(I+M4+iZcAx_ZO zUt@q66$TBp?&8i+r24Yyn)+hjIMPp4qIjlBNxxiB2Ubb(}>q{(=)*y-*N1 zsYP}DdU!JVQhPa`agbeIG|EdkuE;pB^aL6zGie6EbqzzU)Z0gCm>X5n%hn{#HCkU6 z{7H$Ms164q{yIH~lj|9~=Wg&*w(*6Cg*|R0>Y^Q#y4Q)D0L{H~fp!Y7wt2DJW0diY zDcsiIDaF>@jy=L6LM;%%wwEo{ZOJ`Sp<0<#y#0OW+}nNcJKg=x9q*0N-S3YYWqXfNwfCB9%{AvwXjoP?53af%1A7w1 z{7!-y#b7C=cQ7qSpKL&7c8Yo2J*5BVoFXv6F*Zl7Y3UJ`R@X z#rLVL8|id6cZt`WdH@0(Kr;;7q8ep@fFW(MA8(Q=w#-t?+0{;XrjhvMT_5@7Fjx7S z1VlfQar#2P@z=UoJWEn4>&`cv41#iJmFovHP(&5<=3yD$eLi$UAhY!7+6@TbS0r1m zv@cIM1GDsWAq|mLgt$y<3l)U~QfO;A6D7dr2Ki%RVC(vj+n!u*dr{;wh+-o(9VASQ z=5Mrk%LN#og!tO8)UKbz0$j;i$I&pVV*r_W5I^WGevDP?w2o)e(Q9P;Oo{%Q&rm;s zt1A`+BIy%T5D{0!9y8+q6OI8)0%eb*?@6ae?r?$NG0cX}+uzeLf2QR_TCKSpci&aI z@7}71k?Uz!r;6qjw(?*ciLW^J0#FOx6#_zQUt%TM-tH?s&gS*T9Mbmh)HFFY6jCA@ zdp$4E3-5EUj|Gx{gQjG`_hii8%S+Ar>jeH6!%tOyHcvGvIhMY%xE1xue`%TK&xr2i z@G3Q3Osl(g|9EQ=4-wMwwofQ)_DIzSjf?N1EdyWi(iixBNftmjn*+g(=c8y$k6i8) zS|;#*pAchBYgV3RsK3zpeV&4z+G3Nw;Ly5iN#w+!uMfpIO+ju;zbN@o0&s>dSZlqe ztd7391GX8ml!F=+qm{08-Rxrd|L z9it~69p{`N!q$`u);QHP0ez4XB>G8lpMb3_p5pD>rOP!5sd^`e*eBN#%7^va$)H=^ zpZN*^V2EzE7xkI~m_wu~z~{te*bOJJ(9(nSxjo~lA3;)D@=Kco4e1#X=cPw0h%YX~ z_6m(tRjtGZzf}2Kl;>5u1_pQ)K?7JqjtD@WEb-48uIDHm7rKnx+tj->4yPk%lK$cT z@dDU=lP>JJ!lNkV?PQ)W4iUc`e96LuJ{fjW_zxMR|7Pk5_+$TzM*aVaJ0hr8dAlXa zVySJ!bSzkqJ7apl_pIu}ZxEq2LG8SlBg}JQqHJZFf6wbyZW(h5!eKEJ5pg@+077o5 zV7M31d_&r64-nyVY|zL2lA^zVBL_6$EW*IQ^x)uQEEPZtW&oi32>^+tP&eH7QGk)jw`^e`Bm(thnHV}&T?}2ph21I99S0$^QGq34M^J2YUP4lSzy1ZgCZ6su`+9VmiW zP9yO1t;5YyTo#H5TTRe<>maPB87ttO+k2(q`E)N;>|P^)C>OKWMR!RAtaIa$I+`VG z^L5n(q!6-%;RdSk*<6|3;*IDrxDv>R?>Md-MH9Y?O0;q*(U~{UX9$zuPBZ-=^K?9< zZ##eOCm{RS*?Gmi+P7aV=G9X(MRaIf`SK4Y$W$yZ-za*CJBj{yiPo)lMOEB0vC!O-==AxP!`J zM1cK?b1!x}U{>CJ9NkWsRd)HTUK8C(iGgL|ZV}QZ$!$qWOK~zh9TZRX#&$EOFtD=L zp&z|P;syGS$eq)wzL&>yD52FSQ}r(92|mL8LIsu?TtuH+Y;j`TKvPlQ5W&b%w^UKQzNFAkCWUDUVx8dcIc2aWI$sMXmp>o&v6IZorgNJn_4FpvGqUsJn+b- z9bjR(d0vuiHs_-+Hc_nukZ;oyf&GSJ9^Iv`0=%v~GaJzXt)aPR5J?cUZ<7=fphVka4eS*PcA0OPbkI6ooSRpJNhX}J2G)~J2(^Bo@oicJisBwLo&!%?jC3p#?A+k+lKUUPUyVHixz)X6YiPG5xq zZBAT~^LHkAYS|E!S*q((BVXDSGQAkDim4Ivg~0cs^+bQQHn3_G-I_8gDbFWtcZ@M7 zMt)o5L(UTg^7agT;IBk_^8wMW97&vf5ZLrh?+r%Gs6%;!=nE}Oo$dUUt1_YcY(NEg zPyJUygL0<57js=w?n~hqRmnEVk1??Lx?y)<`qXP7np}m8tL0iDn7<0Ssda3++wg%L zr6`F1b{+8R7^OmDeuI>NippBiHh1V8b6N16Q^8JyCm=D>r)IE`Z>H7bgSAfpH2(6l znkQGH0S}zLr6;y3x|66`N9Im?ZbndH-3nkMa+W%V9f&}9(nWa#fIj)2VfrsB%Gu;)I5dF%7tl74aC--D zQZ&7f_aa%Fnr^lkEWgp@Jkhp-QKwb!cBk z9%)TTMGevD>TGYZ#MDN30`{rQZ+vvIJGe34JnC|O{jCs_>$JbVd|}lOfYFkjz&btM z0`JT(_4t)Ys=hIy^m#sfjeUr57RA@kkJe@Jthw>h^+Yf{=hs|(R7rup&VoQ&(wm_# zF9+V^l<44Y^`l#sCC{)xV(pGlved+G*EK95|lf&m-D!^k7ogZz*h^RS0>BJ}o>iSlH-l&r!3d=RC zj%mI1K}&gbk1r{s!wGF%z=+a@=?x<0P6O$&dLY!*LQ*?Xl0L$zzOOeD)8|<0iPtGY zzdBm0%s!sDD&&vEmJadX;|Jmoq8jrR3OZ>b7Yp0*hJtNAi~ePIdyS8? za=FVse!)$HRG;hcfOUyN3T1n3v|k*${~}Rfx_xw7MsO!~eBMfaQ9`t~r0YCS<&rMu z#5$iyn3fYIPcfs>aHNXqdfk~eeYu9<> z9|Nit(yJma5)c)#xaL7wA^8E~7u2AKd+AQe z?8PV`{eWGXvYJp}mF<2e-#Yc_$(#(&853jRKs*Lum1{5zREeZc@&bd6iL8Jbn+}Oc zxZ^*G4qinDuU#Js$a7{){m`||O}(~yRwU_b-iEzJj$Puetve*#l#&%VHpBRXHkn+_UlC)ft7zzeF}?Ly z_w0k`d;$;4KUuige9|?kPco%Xq6iYAm3_FOSw`;7cKlV6Y?`HFLJRKBX_hRaZZYte zMO4&F_Z=PBLlm{UsrE5vR6*fRt8GIAFCSmu$_xCy2Yzy&MoTTxT4|d!GnSEF%H@+6 z`jGzaQ-hbaf4n6jF(8s>!q)+8*h;)zR`pZdw~U|cgjKxwL}d6(kf1R3p=3`}-nv^S zAU$kJiJ#_`rCa?+tWj>|*O(+-U7d}fzaj+TV+$<7s?yXJZxq5_XfO^x-)5}uB-oUq zWj)!}pR8`B&Y4qMGnB#1ao{f(RIRfxL3d`v6`|IKU3R-cDVqfH-1U1*Mr9*2YSL#d zPQNES>i+%&XPb`8<9QumBvkeR`FJIgpsT?(*Q0dqaZQ+JknmtdVT6VF6DK4>_k?{F z76@eg7`tvauE`vSB_H|Zm_FBUxH5eI0xY82Z+sYz_g0}_%bLh?jdt}c<_=2_;2zAe zTG$+X5f4BQ>dV57@$bZPA(G_b3e{drQ%9INdsMNH5{zW`SMGY}0hLPiu$va}*OrIx zHAQjd&?)#7<1Vg?fI4ol5nbZCY)PgfMyypr!k1k!Q`R0GmI3+pIiXaibS#Ao0^&07 zdziRd=1vP?{p1CwI=p9yvlPD6nIE zJVr|lH1B*X0i8D!pq3hc7VHVQ^ZW)OEeRe$$H2M(zmo@gG5}u95C=*OQuh|9yKbP; z$qwih{%1LCtqFQkx@S<4qI7&;Ttga%7wyP!f5zTEF@VTYB0Do$t}8C{6iJT0M|j zHh*o#fHfv<0TlPh_lyVuv?+EEH{C39>r4eTR8{w7uG<(-61`D`Vuc7qB`b1K1_so4 z{@gBIEC(8so&$)Y50*F0k$Y9@eBI1HwU)>?4skm%=-zE#*RCPDeYksQsg<}D%A2D3 za6ZNEXgZK(sN#$`jW|a8E-5HV(Mj(>>AWQyTpD&M&2wrMDMLvguA41J(B!64`@C@k z_RHP+`pcoo#-X-0&jt0IedVLnF=E-Va2;?!k=y7j8U0&L4FI1)pw*J#v8|Z%fm!-& z5Cb51$o>`S1^6s>q8JfJ7m<>t_|-yl^3|RED)k`-L}+w8+afF43FA_4QQMlvfBuZNVp`&^`8V9pU$w)$J6dE~o;$lz)8qw2EGaYH zUV?Up{iR996PYJGeh=y4h_2{}WTB8IX1DAeN}uE8BkRgV!0jstn-Nz*1OOMDSib-j zq3EVyZxA*DuOr@GYUHf(I?zGTBW~C7fh1gxU=F=w2D9qGjLy;&e5>9h)@wvh`{KkX zvzWQwpDWeo)hvVNt5Tmn5X`d!2(^pWugj$TqYsfT=Osoxr%G}B<^48=vv^ZahIk?z zBev(MubF|ws+FXL)5@3QxPQVq{BhSAvv&Mpyat-Glxp!T)5QqS(R^S2-{MvReFdl! zfzb_j(KRa5y&%&n$;+7IajwEl5AfHQCmNw-cMfA0VBB3%*TlTt=ewuawOFf+fFivW zQ!tCLH~?vWnB^w{Tg~g?WbE`Msvf_9P;DoS)kPktQ5ZfZ?OzxSi1mR8t@U;*5|HUy z!5bvvr_}To!fU-U3Kd__gGi#OQ&SP~z?FDChZ>`$g1AS*Q;ImcWlyLbD}s9IiB@9L zHy)kRg`zmZdyLq9)Zw!S2MNqU49|rCk#8F!w0?1PgWSJ?#IzgS>DKK+bKH$pCIxK8 zRBx41*|BWLX(--*R91X|LsM=<1`Kytmwg!Ammbjs_Q}~f;S2mYLP^G&nWZXi>b%%d<#i1i&U+(?GmB)B{>7~hCT2@6q{8#{@C-0 z>Ts%hY}i&Wb77H%11Jc$lf12m4`z1;OlwCx=|jbfMo4e8L79^ST!Ai zHL0jMtbi&dpBD4>m28L*UQR4^dN93d@&NMFNwC84=+}}eTdkgprRh{T=89}AEt6%z zEpqx;;Zc(H_o~=M-a5#JW+tY)KzT ziI}!MuNQrhL7<2fJ7Mc=C%u9L-ZN8U2Ey@XHIGKfaX$oo;EB4vlMn^ddRvcIFF#t^ z(Nl)y;~X)bh8qCRdg*zaCJN`|cV2Xaz)0+EgHbsfl6BNjniqZawc}WOVpsdx?2{4N zrav}~EISODc%n7g<=GPGlsz+2ZYA{K=$`%2gzyK~RUJP`wGpRLWs%ceVD_!^1tULX zRVBNY(xE#Lc1cMbd$+>|>I@mzN~+7cIc2(qD7}|WnuGUy_&C z_1FCL?CR3*_-bbB7AvVxVx)7EW4tF{pW2S0AN?1e<1&sUn^nt`aW3a(qn<-ekD-%( zYm(?85Q9G>d0RUM;tw?`+TIhHHB|f>^pQ%|4yjQ7m2$dG;@dD1%}!iQ)7ZEEBf@3W zf1j8CrIh9W51XX_qgnd5XZ646CjSnm^54T-{^$En^+m>CO`gAWhpuM1}c( zmHYdD`a2T<)+`%@UE!#7t-4y8X(E+v0~f{ez)t|y$)}hhS7e32@t_R249UiJW?67+ z+q;(&fSLXBVU{Si zysud0UitUYZyS?m-p+5w;6KRU?FD*-w%$}uPOI}NK{ZX`>i$6t&K-TQ{S7l%Z*8-Q zfu(436@frciW9Ls@5uN!Yl1I118i-S^Qz`MW`{R$DtVCy(s- z_o^Bz;|9~k<83q68L`V~@NA$R_G@%EIIZ`$D#rku;ja@C|2iT0Mmd*~KX&Izz7U70 zqYWgQ9$x@-Eo(!dzn=dFUG0E7vg%KKo1JW#n{NCqbJe2+=?7xI09~h0eNm%e8n}Of zb)p8p+Wbe8w$bm~NH5Rmk&2Slgk9$#3~r9E&t4p=NE-zgxclD7JD1_<&iB=*y^KBG zCRg7+`s}%4LFacMMGX!ug+nizdQRjml-RaA61o~!!$sgF*~46XNrH39bCoKfZ)m3v7O7%%mp zDKtT;>!X6WEsY!v;2<1A|IM8-Y<|p}9a=gkF`P1ASRBO5?2|{2K8X!4a}PF}C=J{! zrlqZ|Nz0)(U}_3zjEl@JgBKu)1`n24D-1p{#0}ejXo^NW@Z(rQKGvzMN?y~hntjCg zQ&-smOBsvRwu$s?zL$4TPtRWJ*2spi$H9H?eUm25;$dyRu|Rc4mpP+jsNW-DgH*@1 zq9?O@-7Bp28-aSekaApJ+Ds{o^;3Sd&)lGex3T1t$Zklt|1L*qy#sEoaTa3ynZy~-d82;ud`8` zKFb-es;>^;sH%3Kki`gKw>3)I)2Aa82%L7gjCwvK7|^eAzeX?wl>|T9CZrjeQeT!% z2JN=48Ua|3v$K!CLC|QRTcZrdul|;iwC}jLO?O}$uGgsF z40US1XlE(WJGN`KD1sc%cdp)xj{8~Rj?1Dy$5uOyLK35O>?J7V?U8AHt9L)xpjvNS&Jaa90s!5)efc(V`dX zOD)~iG)UTMbYd}U<6ZC+ z?CseuXqQu*kpIS|dZQsXvSOik>qSyns;|Ku;}ZK}=v4$wb}6}rxv|3G8LDKILOm(vK zeD<{){0O~n0X>2UkWa5bI4|Q{y-a(yU|>D@8n<$Qmx0M=`UZN_zXPz~A|S5Ni(LYg z_!L@~=7E^plmNdKhs{=+>dN;XDam;JGz-COFTAvh!vu?m**CC{Q;33JLauXm&9T!o~z6V})=hE1UGbwV~v6S=qq>BZHoY7JY99hXXiG>^=B| z7YM7;QK0ikS-uLpCa)U3xeNYN5)-Wz?8j95y_c){N6+9Cf1vzLnEf67HY#9FheiL< zg0+lk=kR7e)$|cdgC@?%c4atC$W0VvoZ5YWx5c+U=k*Xes1utmOq0e<_h)rsfBMUF6h*NMcMFzyKRZM0BNSvHuK`p_n?L6pJ7XO>xk2fVwNxOuX97E zecr|xY9b<w2w#1vqg_A5eI^(%(erhANZ+(AxH&c<>)$8 z^HY(JTaA*mcT?-23Qv7)_XTOk=DR2|r&n!UD(a4t@T44wXizj`|Br<#0(!I%&;~)i zzz!`=-3iTMqGN&aJH(^$7>M1fs{Ha2AOW9_E!hH$6eO&Zcju-lp~1HKVo0rUO9n^8 z#%#quLd*D45?J-5_D1ip?T3*F|s+8}cyfzcy?3dw4^z!(8$Vm35B~y; zIx!a`tK+K!Yoaae1*WUqIN4qoj-Or0g3bjx$Ns*V__e?kGUVU(;ca^G%ox^E(yE8R z;}?fPMY;2g+naTDbL-gD5nfu6iT;rrJ?uViib-u8gBXo{=ml*hhMZ>8{F^P+$1RbY zd?Zv0tfRhJwRhZB;@T5c4YVmHxJ>pn4mzpPf2@~h;1i$-@3FWqP^`f_B#q#`NSwj9H`OEVBhX3eM;Srqk;fbN3w92A1=6YF{6T9ZA|5 zV*|s2)5DR3%?a5sCvv%7JiksRN4+JwUOIqMWlAT7 zr{C_gnsjUroHbSKO1@hc84-m?&|)^#8Vl*QR!uS-PRLWVf`q|oHtyDgT1rk@ioZ%3 z(`_Y<8O&0=&>A116M52kZl!ccGJv5rxa#W8HWgC-Wm*3~~(2Ais44R<%GG z-U1-#X8h)v(accBeHL8kyxWzRsyO`=EH~(#JO;e3!uEWE~hL|y}_`t2|+#R=? z<9TI{Jf(`zkIZBXj(+m%0RHABnsLXCnP*RQ$Kh74?|mJlN9P63&9v-CBwT+? zRLGjU*ZK*4bV9n8Mz3Brloje&L1z;b1065pRhI61Y_^vHP@`o!t9}_pINg2@O>UY$t_lSKQ~ozzx+3GEVU z3ng3?!`iSJH|SF`nf_T!+i?w#tc&}SqDpJ(>F`&a)!UEHMZ`|^Gf{t_^)0(w=>DvQ zgcngf3w5CAr#4%0mi$r0d52jBzWMdB#^{J+&<`My3zV!Yv1?S-ROxgB9$&;zNYWui z%mxz_M%W-r=M;a_Y%<`lT>}+pg$iixIFZ!FLmk?wJnWY?PHvJcIMJ}dA2x6 zGtRn8Q0nQlEE!*vJ7bkjR3EN-A~8BAAQH71;=RR@ZrCh;aaRE$=$*5@XvuM5wUb=5 za(C-=ZswVFU-{sCK^F;f;@3J1Tvd`Ytb`Oj_meF(y_N{aMo}o( zaR$EZp9+Tm@OK<@BrIE!$W` zV+#Wa`pbC2k8VYK>ipod?ep`M8Qs2p!izSzXWbC|U__GnIE~DaJ(?{5Ql5C9#`f$yt_5Fyly5@Z8 zD`k;6o{<;mDJR+?-1gnwX+23+4{%hD5=ga$S&E)6H`{x2-r{A2>V5LM-7zAThVyGK zI9;rtUlkia-IZi%3EN#lQT?b?P;>%}D{mS#8v&pX*RMAXSoDd**swmB6cqTGR zNtRgWP>}zFJpKwz=+0z*F%&6(CkBAwQjL*BP-HR)KH)<5FokUF09vIffb)&-C&h>? zmKSccir&VZeqMVnGHADFto{p8rdw+I`=_H*@b37nwBo&F&6fC#l88{(Sn+UH?+g*~ z4?Ym?o{7*V?E>EOrzCfNbQ69&MKu%HcZ{@;>!x$i?5nvK=A(9DODNlGZLw8DhU#JS zrAfTar!DGY^m{1ug=s)DowC7N4>4QfP?NyeAF7=Pm>hJO{=x(~nSo|SL;Az;w4Y@|!=)?aGb0{aD`R%yoC_K*Y!pmymsTeJg~H z!XCRURneU;y(sEwoMPsc_jG+v>*q?0Hf2q!)xtZmG3{KZ#i@ms()j5&C;@ix;sWI( zdf_jEGy?b-z#=fFEs?d05ar59toi)pm&1VX9{x)7+Z~n!#j+s4v*+OK`IKTR?gRO4 z^gKP#dZcUKl`&3>@wKg608;2TNJpC(QZ>;!i6YLI_gol3LRHwA?nm#GrI1s?oJ8h)WSYJuMa=u zs_3A1ecciw>gxlg?8_-&2vf+F&oms_Cy^@yINEhxS6u*(w~$R+Cp7FN)80T3sC9u+V)*A zFhT82K>^o~H?F@yWZ(@EpcG-04Fr)j$UgX)3a-1gp|3Nik1MXAwVae^4cIv2su0kU z(33lHOJgSj$f4mbOzTWjTE(>W-<&Z4*t`rBP0A@sP@+{`XFSjPT(ZdXqf!2OD3lz~j#kAQ4L^F; za!ecNgqh=Ni696uxh;rXkHAL`JH*c;(+9Sdj024 z=mYXM=v$t*IuKS-;G6U4>uU8A-#kcudk$x6#IU{jx500+52>R(rCE<(scNpimt@#|i1j!H>XBB+0ZMECXuzBkF`K*F7WgJ$P! z*ohri`8J=c2ww=(NXBnT+!s|HzdoLKx9ECms9&-mCDTCUdPrf%H}c{^*`-_%_?LAY zppj!#eZD6li0D{i5Y;N43;Olc{bhyC?aUrR9Hkiv)N8SWp}*9Dv5!u5$8M@P#Xk>l z9gxAdVhV;rW;m@+Da3~kdsTTN=tlg66%~jcf6qxK6yr#5U9Mn_j&~S zG^FPiEHF%^QlIRI`zTR6jJm>O=305#MC7taG(u%PQnKfCQE)-p9e##3{miw`bRP?> zH+T}a(=hZBlI%UtRci1?AX6mHoxJa|2fLN#l-<7lL~dnKqU>Up61(M7B)#}gRo^oL zo3mdM!19`k40fYIgl+*yh;h;xXqzYMb&SELoqO%;`Uw{@5Ap^51)f-{s4OXujVQjo z>+?bs*?3Q5k(`+w>7vfl<{%XszrOwq+y-`;F3R`GTxE_Q*{>rB$-PY-=$|;A3&8%$ z=xF_ayn%3c4w5uY9v;QpErc9Y*3qTJMNMa}q}WPws=HQ=T~G;HKIBzaQ0J{z`z9g~ zn6qqE|S~^^+D`)LhqyAeC$0#?ttpYkVsk0@GiIHI1Y?!jh^?Qhu8Io;1ZA% z*lZji$7sQ!f~sz670y8Uht1{K0LxtQJ_pACo?dsTVatETw@*`}5hf;;AyU46qT*&>gHShDT5 zY1reAZ6h0Wtdrr{80zzSZR-3nGVB9hA8s74;bn8CKY^zAc0=UzP?m4<6`)Ql6P)&Q z>}P&-tNB|MQ`HHg6}xz($r}c57#u`K(9Ko=QGwvOnoK9R0?y(4p@9Tm`7DK`pVH)v zECELv%@LAx6XGgv7$KKe9GoRd59xz8{TL@*32uK>`ceYWq(3LM57*%2F>K+v`1QqZ z59X==|^@s#myzH zHmh`!`7-;>>44G{ztYhgwVkD~&V_9!T;Mu20opQc7I>tv=HKf`kCx z82J_srm}(*?CfHd*<{)s^wL_=P(brGXS8`Tcfm8B`Sr2mT(mFSn6eLBP4}fU@O#@l zneWqnWacjiP=D4HS7VG$jjHecz|$?a>J~frTD}Bu$kwSE-m2&;G>b^*Nn4&sSy?nW z(*d|ok+SCBQgB{o2TI3uM0M1UKC%{UaLqEEI#^F1*zr!P{qeT!>W>TZqv&nGA<2ec zOn=dJVhvbj1U=(80CHLtND5Pu+=2m z+FPv8%X8R~?qW15#pcmRY?paE_pKEmM^UoPDyT*Qko6rWuoM(P_afrQULr%cXnxQ% zw@z!P=qp@$#2Wf{{1nsqMo`{9LI7`Okvm4Elr`5>Vz`N#bU&+t4+Rgzwe~3sOPpLk z0!^O)M(KH~D*J>_YEF9&<|R?ezE_g*F>Ce*Fr-u&c$H!PvbE~RVx#Z&U{tEhuoFt8 z9G-(XEHf{!(=n{+=@a5=d)XA>|BY|>^Zwy7JI|-{=YAZ&PMVSqZy%g2 zMEJ0e-!YmpH(mWudD@xJuK%(_*?3^XdwXl0wt8z$>1%R&F6JNT+RHl z*d4RyKBaAwnv888Ke!R}ppz=JS0kMd=&qs4@`f=RexAH($tq!Et&Li#7m-YoVC#*( z@Od@gUD*zSy5L%u`;`w3h^Wm_zrRt2*_L*%)@M9n9&TDv8nW`!eY&+Ebx94=D)s7i zE6R5-ymS_k5uBFHbU?d*feu-VKbSxUDXLT{U7AwTF%F6GA;bW+IrbuC9^ma^1H0$} zZyMx+{#~nT^@;xpTfkVm+_ae+e(q?aBlenVlS8g zLt;?gMb6)GBF^gKyiDb(qE7~VdY_eAi?(DIf^S^*&#pSb1CrrmzM8UOpy^|HJvg|` ztf3~7$?a?R*=0XAI9FFv;Do?dS><*W+VRse11|+evZw8OgxP)DPaLK>R(RKTLBkdm zQ(&+obh0!nteee(D=SAI&^tE$_QivJ&yqE1mq*Ipjbq{)>v;WOM73P*vS3>N8`qee z9>%m-+#<(-7|r|){OTsC@3)GXnHgO^d?Vp{l>1%x=coh5%6g6=MIz5QrN84$;bY>` zju{k7BUDZtg~STaHnXoZ?nSkfh|wetrn~P44VK=W4Q(^0Ze{SRNs&4)zB*E@*H6J& zq9Ci2@Co@PS6(DZKfQ&p$J{vXq)vGSW96y4N#95w&hirwps`atpzT$5SJUcsg=1`< z?diU+QpKwAFYC{`UeYWn85pd6wk9#t#l=)@F6TQv86C5J|NJlGq5vv&v(=>Is$M@;V$gWjgRnW9o`){p-}74hJQ zNZjXda=GeRenQn{q=wcFo-;lP6BF~kr#~4CU_)?xIqQE#UMse`QZj|wx`t7dZ0Jf! z;9yYG4HS(_QQp8p1vp=!Aek=Wv>!(kafe9UdqBgQD8v-7Of!1{e$jX+fwy5G#8i{C zOTH!0Md&>u#pcENp5V$Np;7~(1z@c6(@k%l=N9W*E7?AQIKMfK9x5e{8}V@(re!!$ z>Zyc~Wb4n(%}dRHb=~oAL|jNnK#0OX3*+93&*RL1S*p_cwxn1`L0mu8%e-JOxp|~I zUt)7>`ddEe*{T#xkdyAYDz?&n0ptgg-wLjQWA8$`{P|VPW=79b$-FyA5-_&eRg~eh zgYr}S$Tcm+O{pOj_NutvZQVaqF}^8oC{Z_(4B1dYA`03^9v|}yny5L= z#hr&{WIlo|1!|-F_MiZegeBoD8PqeBaiR>eG&C?XGoy}(ej5fXA>dVs>DKMYL=<}T zNghiVp1I&6rEbB+ll2<}`x_9De;=n?FP|L&{b|4TJL}SdwM7|?^4N(jsyAWp%zE+R zD*^3y?2p1gW3|(VCY3?`AqgX_T__p<>X2Q6&jh&!CuE^ix?kpI^rS6pZ3q&H4SAvW zM)ARKKnC^G_FVjfwt~9Q@sh;B|v)KqfX^?Iiv(J8#f}GdDtQ2xJ~aR1&cr0JF}v zE!)*S2Wzl-Yeb_lt(=D_7 zM18&dBIUKJhZ#Ryb9DWWS$}byOv?6DOJn^7R}}f^8|eySN?K-4oBD@H=cx3 zn#@d3dQ++66yAWWkSV&hZkmu;h9~8kN*UXE&E~)${C8Kh|KEPE`2XD4_J0Q`{#Sed z-w;swA2<#FPekay!wdhn_~QSmKTimI`_B>M|8HpH|5K>qe}(h@gj4>j{OP~*Ykxpr zs|rAzV{!tS=%1#P|J5GYs#%5#Pr diff --git a/examples/arealite/configs/gsm8k_sft.yaml b/examples/arealite/configs/gsm8k_sft.yaml index 75a94b488..b9bad0956 100644 --- a/examples/arealite/configs/gsm8k_sft.yaml +++ b/examples/arealite/configs/gsm8k_sft.yaml @@ -6,13 +6,19 @@ cluster: name_resolve: type: nfs nfs_record_root: /tmp/areal/name_resolve +seed: 1 +total_train_epochs: 1 +tokenizer_path: ${model.path} model: + experiment_name: ${experiment_name} + trial_name: ${trial_name} path: /storage/openpsi/models/Qwen__Qwen3-1.7B/ init_from_scratch: false - pad_mbs_to_max_tokens: true gradient_checkpointing: false bf16: true + mb_spec: + max_tokens_per_mb: 4096 optimizer: type: adam lr: 2e-5 @@ -24,30 +30,46 @@ model: gradient_clipping: 1.0 backend: fsdp -trainer: - experiment_name: ${experiment_name} - trial_name: ${trial_name} - fileroot: ${cluster.fileroot} - wandb: - mode: disabled - seed: 1 - exp_ctrl: - total_train_epochs: 1 - eval_freq_steps: 1 - tokenizer_path: ${model.path} - mb_spec: - max_tokens_per_mb: 4096 - train_dataset: - type: gsm8k-sft batch_size: 128 shuffle: true pin_memory: true num_workers: 4 valid_dataset: - type: gsm8k-sft batch_size: 128 shuffle: true pin_memory: true - num_workers: 4 \ No newline at end of file + num_workers: 4 + +# Utilities +saver: + experiment_name: ${experiment_name} + trial_name: ${trial_name} + fileroot: ${cluster.fileroot} + freq_epochs: 1 + freq_steps: null + freq_secs: null + +checkpointer: + experiment_name: ${experiment_name} + trial_name: ${trial_name} + fileroot: ${cluster.fileroot} + freq_epochs: 1 + freq_steps: null + freq_secs: 3600 + +evaluator: + experiment_name: ${experiment_name} + trial_name: ${trial_name} + fileroot: ${cluster.fileroot} + freq_epochs: null + freq_steps: 1 + freq_secs: null + +stats_logger: + experiment_name: ${experiment_name} + trial_name: ${trial_name} + fileroot: ${cluster.fileroot} + wandb: + mode: disabled \ No newline at end of file diff --git a/examples/arealite/gsm8k_sft.py b/examples/arealite/gsm8k_sft.py index 2fbbc71e0..a9b5380a5 100644 --- a/examples/arealite/gsm8k_sft.py +++ b/examples/arealite/gsm8k_sft.py @@ -1,18 +1,19 @@ import os import sys -import torch from datasets import Dataset, load_dataset from datasets.distributed import split_dataset_by_node from torchdata.stateful_dataloader import StatefulDataLoader -from transformers import DataCollatorWithPadding from arealite.api.cli_args import SFTConfig, load_expr_config from arealite.api.io_struct import FinetuneSpec -from arealite.engine.fsdp_engine import FSDPEngine -from arealite.trainer.sft import SFTTrainer +from arealite.engine.sft.lm_engine import FSDPLMEngine from arealite.utils.data import pad_sequences_to_tensors +from arealite.utils.evaluator import Evaluator +from arealite.utils.saver import Saver +from arealite.utils.stats_logger import StatsLogger from realhf.api.core.data_api import load_hf_tokenizer +from realhf.base import stats_tracker def process_gsm8k_sft_dataset(dataset: Dataset, tokenizer): @@ -42,10 +43,9 @@ def main_sft(): rank = int(os.getenv("RANK")) world_size = int(os.getenv("WORLD_SIZE")) - tokenizer = load_hf_tokenizer(config.trainer.tokenizer_path) + tokenizer = load_hf_tokenizer(config.tokenizer_path) # Create dataset and dataloaders - assert config.train_dataset == "gsm8k-sft" train_dataloader = StatefulDataLoader( get_gsm8k_dataset("train", tokenizer, rank, world_size), batch_size=config.train_dataset.batch_size // world_size, @@ -54,7 +54,6 @@ def main_sft(): collate_fn=pad_sequences_to_tensors, drop_last=config.train_dataset.drop_last, ) - assert config.valid_dataset == "gsm8k-sft" valid_dataloader = StatefulDataLoader( get_gsm8k_dataset("test", tokenizer, rank, world_size), batch_size=config.valid_dataset.batch_size // world_size, @@ -66,22 +65,52 @@ def main_sft(): # Initialize engine ft_spec = FinetuneSpec( - total_train_epochs=config.trainer.exp_ctrl.total_train_epochs, - dataset_size=len(train_dataloader), + total_train_epochs=config.total_train_epochs, + dataset_size=len(train_dataloader) * config.train_dataset.batch_size, train_batch_size=config.train_dataset.batch_size, ) - engine = FSDPEngine(config=config.model) + engine = FSDPLMEngine(config=config.model) engine.initialize(None, ft_spec) # Run training. - trainer = SFTTrainer( - config=config.trainer, - train_dataloader=train_dataloader, - valid_dataloader=valid_dataloader, - engine=engine, - inf_engine=None, - ) - trainer.train() + saver = Saver(config.saver, ft_spec, for_recover=False) + logger = StatsLogger(config.stats_logger, ft_spec) + evaluator = Evaluator(config.evaluator, ft_spec) + + total_epochs = config.total_train_epochs + steps_per_epoch = len(train_dataloader) + + logger.info(f"total_epochs={total_epochs} step_per_epoch={steps_per_epoch}") + global_step = 0 + for epoch in range(total_epochs): + for step, data in enumerate(train_dataloader): + with ( + stats_tracker.record_timing("train_step"), + stats_tracker.scope("sft"), + ): + stats = engine.train_lm(data) + engine.step_lr_scheduler() + stats_tracker.scalar(**stats) + + with stats_tracker.record_timing("save"): + saver.save(engine, epoch, step, global_step) + + with stats_tracker.record_timing("eval"), stats_tracker.scope("sft-eval"): + # No need to log anything. Logging will be handled outside + # via stats_tracker.export(). + evaluator.evaluate( + valid_dataloader, + engine.evaluate_lm, + epoch, + step, + global_step, + ) + + logger.commit(epoch, step, global_step, stats_tracker.export()) + global_step += 1 + + engine.destroy() + logger.close() if __name__ == "__main__": diff --git a/functioncall/code/local_verify.py b/functioncall/code/local_verify.py index d8c9ff6c4..a174ebb13 100644 --- a/functioncall/code/local_verify.py +++ b/functioncall/code/local_verify.py @@ -95,7 +95,7 @@ def call_verify(problem, generation, debug, timeout=SINGLE_CASE_EXEC_TIMEOUT): return result["result"], result["info"] -def code_verify(id2info, generateds, query_ids, debug=False): +def code_verify(id2info, generateds, query_ids, max_workers=None, debug=False): assert len(generateds) == len(query_ids) problems = [id2info[qid] for qid in query_ids] @@ -106,8 +106,10 @@ def code_verify(id2info, generateds, query_ids, debug=False): infer_args.append((problem, generated, debug, SINGLE_CASE_EXEC_TIMEOUT)) run_results = [] - num_process = max(1, os.cpu_count() // 8) - with concurrent.futures.ProcessPoolExecutor(num_process) as executor: + if max_workers is None: + max_workers = max(1, os.cpu_count() // 8) + + with concurrent.futures.ProcessPoolExecutor(max_workers) as executor: run_results = executor.map(call_verify, *zip(*infer_args)) for run_result in run_results: diff --git a/pyproject.toml b/pyproject.toml index 962c9b9bf..4f752ebc5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -53,9 +53,9 @@ dependencies = [ "hydra-core==1.4.0.dev1", "packaging", "tabulate", + "gymnasium>=1.1.1", "torchdata", "autoflake", - "gymnasium", "tensordict", # Monitoring and logging diff --git a/realhf/api/core/data_api.py b/realhf/api/core/data_api.py index ce6d9bf95..f698a76d6 100644 --- a/realhf/api/core/data_api.py +++ b/realhf/api/core/data_api.py @@ -8,6 +8,7 @@ import random import time from contextlib import contextmanager +from functools import lru_cache # NOTE: We don't sue wildcard importing here because the type # `Sequence` has a very similar name to `SequenceSample`. @@ -47,6 +48,7 @@ RL_TASKS = ["math", "code", "rlhf", "stem"] +@lru_cache(maxsize=8) def load_hf_tokenizer( model_name_or_path: str, fast_tokenizer=True, diff --git a/realhf/base/stats_tracker.py b/realhf/base/stats_tracker.py index 0ecc7af8b..7fea2935d 100644 --- a/realhf/base/stats_tracker.py +++ b/realhf/base/stats_tracker.py @@ -1,4 +1,6 @@ +import time from collections import defaultdict +from contextlib import contextmanager from enum import Enum, auto from typing import Dict @@ -49,6 +51,17 @@ def _get_full_key(self, key): return key return "/".join(self.scope_stack + [key]) + @contextmanager + def record_timing(self, key): + start_time = time.perf_counter() + try: + yield + finally: + # NOTE: timing records are fixed under the "timeperf" scope + full_key = f"timeperf/{key}" + self._set_reduce_type(full_key, ReduceType.SCALAR) + self.stats[full_key].append(time.perf_counter() - start_time) + def denominator(self, **kwargs): for key, value in kwargs.items(): if not isinstance(value, torch.Tensor) or value.dtype != torch.bool: @@ -252,3 +265,4 @@ def _max_of(self, key, reduce_group): export = DEFAULT_TRACKER.export scope = DEFAULT_TRACKER.scope scalar = DEFAULT_TRACKER.scalar +record_timing = DEFAULT_TRACKER.record_timing diff --git a/realhf/impl/dataset/math_parser.py b/realhf/impl/dataset/math_parser.py index 5189c5db1..30e8bd1d4 100644 --- a/realhf/impl/dataset/math_parser.py +++ b/realhf/impl/dataset/math_parser.py @@ -7,7 +7,7 @@ import regex from latex2sympy2 import latex2sympy -from pebble import ProcessPool +from pebble import ProcessExpired, ProcessPool from sympy import N, simplify from sympy.parsing.latex import parse_latex from sympy.parsing.sympy_parser import parse_expr @@ -289,6 +289,7 @@ def strip_string(string, skip_unit=False): # remove percentage string = string.replace("\\%", "") + string = string.replace("\%", "") string = string.replace("%", "") # " 0." equivalent to " ." and "{0." equivalent to "{." Alternatively, add "0" if "." is the start of the string @@ -398,7 +399,7 @@ def extract_answer(pred_str, data_name, use_last_number=True): pred = pred_str.split("答案是")[1].strip().split("\n\n")[0].strip() else: # use the last number if use_last_number: - pattern = r"-?\d*\.?\d+" + pattern = "-?\d*\.?\d+" pred = re.findall(pattern, pred_str.replace(",", "")) if len(pred) >= 1: pred = pred[-1] @@ -836,6 +837,12 @@ def parse_lines_in_parallel( # print("[debug: timeout]") logger.warning(f"Timeout occurred while justifying the math answer.") x = (0, "timeout", "timeout") + except ProcessExpired as e: + logger.warning(f"Process terminated abnormally: {e}") + x = (0, "error", "error") + except Exception as e: + logger.warning(f"Other error occurred: {e.__class__.__name__}, {e}") + x = (0, "error", "error") label = label or x[0] labels.append(label) return labels diff --git a/realhf/impl/environment/math_code_single_step_env.py b/realhf/impl/environment/math_code_single_step_env.py index c07278f0a..ce9061efe 100644 --- a/realhf/impl/environment/math_code_single_step_env.py +++ b/realhf/impl/environment/math_code_single_step_env.py @@ -57,6 +57,7 @@ async def step(self, action: Tuple[str, List[str]]): self.id2info, answers, [qid for _ in range(group_size)], + max_workers=1, ) elif cur_task == "code": answers = [extract_code(x) for x in answers] @@ -65,6 +66,7 @@ async def step(self, action: Tuple[str, List[str]]): self.id2info, answers, [qid for _ in range(group_size)], + max_workers=1, ) else: raise NotImplementedError() diff --git a/requirements.txt b/requirements.txt index 0af83fcaa..5318511cd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -69,8 +69,8 @@ word2number Pebble timeout-decorator prettytable +gymnasium>=1.1.1 swanlab[dashboard] torchdata autoflake -gymnasium -tensordict \ No newline at end of file +tensordict From e2745b921aecb69ad1da8b146d0cc1c4e077423e Mon Sep 17 00:00:00 2001 From: Jayon02 <12012211@mail..sustech.edu.cn> Date: Fri, 11 Jul 2025 08:52:26 -0700 Subject: [PATCH 05/10] add autotp for hf --- arealite/api/cli_args.py | 9 +++ arealite/api/io_struct.py | 1 + arealite/engine/hf_engine.py | 112 ++++++++++++++++++++++++----------- arealite/utils/save_load.py | 30 +++++++--- 4 files changed, 109 insertions(+), 43 deletions(-) diff --git a/arealite/api/cli_args.py b/arealite/api/cli_args.py index ca4b78bd8..2b5c5f6aa 100644 --- a/arealite/api/cli_args.py +++ b/arealite/api/cli_args.py @@ -104,6 +104,14 @@ class FSDPEngineConfig: ) +@dataclass +class HFEngineConfig: + autotp_size: Optional[int] = field( + default=1, + metadata={"help": "DeepSpeed AutoTP size"}, + ) + + @dataclass class TrainEngineConfig: experiment_name: str = MISSING @@ -136,6 +144,7 @@ class TrainEngineConfig: ) backend: str = "" fsdp: FSDPEngineConfig = field(default_factory=FSDPEngineConfig) + hf: HFEngineConfig = field(default_factory=HFEngineConfig) @dataclass diff --git a/arealite/api/io_struct.py b/arealite/api/io_struct.py index 3033af8c3..b9144e877 100644 --- a/arealite/api/io_struct.py +++ b/arealite/api/io_struct.py @@ -175,6 +175,7 @@ class SaveLoadMeta: with_optim: bool tokenizer: PreTrainedTokenizerFast | None base_model_path: str | None + distribute: bool = False @dataclass diff --git a/arealite/engine/hf_engine.py b/arealite/engine/hf_engine.py index ea25709db..d264ed88f 100644 --- a/arealite/engine/hf_engine.py +++ b/arealite/engine/hf_engine.py @@ -3,9 +3,11 @@ import time from typing import Any, Callable, Dict, List, Optional +import deepspeed import torch import torch.distributed as dist import transformers +from safetensors.torch import save_file from tensordict import TensorDict from transformers import ( AutoConfig, @@ -14,10 +16,9 @@ get_linear_schedule_with_warmup, ) -from arealite.api.cli_args import TrainEngineConfig +from arealite.api.cli_args import MicroBatchSpec, TrainEngineConfig from arealite.api.engine_api import ( FinetuneSpec, - MicroBatchSpec, SaveLoadMeta, TrainEngine, WeightUpdateMeta, @@ -34,7 +35,10 @@ unsqueeze_mb_list, ) from arealite.utils.fsdp import get_cosine_schedule_with_warmup -from arealite.utils.save_load import get_state_dict_from_repo_id_or_path +from arealite.utils.save_load import ( + get_state_dict_from_repo_id_or_path, + is_existing_local_path, +) from realhf.api.core.data_api import load_hf_tokenizer from realhf.base import logging, name_resolve, names @@ -54,6 +58,7 @@ def __init__(self, config: TrainEngineConfig): # initialization self.initialized = False self.weight_update_group_initialized = False + self.local_rank = int(os.environ.get("LOCAL_RANK", 0)) self.world_size = int(os.environ.get("WORLD_SIZE", 1)) def train(self, mode: bool = True): @@ -67,15 +72,10 @@ def initialize(self, addr: str | None, ft_spec: FinetuneSpec | None): """Initialize distributed communication and model.""" if not dist.is_initialized(): - dist.init_process_group(backend="nccl") - if dist.get_world_size() > 1: - raise RuntimeError( - "Distributed training is not supported in this engine. " - "Please use FSDP for distributed training." - ) + deepspeed.init_distributed(dist_backend="nccl", world_size=self.world_size) - torch.cuda.set_device(int(os.environ.get("LOCAL_RANK", 0))) - self.device = torch.device(int(os.environ.get("LOCAL_RANK", 0))) + torch.cuda.set_device(self.local_rank) + self.device = torch.device(f"cuda:{self.local_rank}") dtype = torch.bfloat16 if self.config.bf16 else torch.float16 self.model_config = AutoConfig.from_pretrained( @@ -83,15 +83,13 @@ def initialize(self, addr: str | None, ft_spec: FinetuneSpec | None): trust_remote_code=True, ) self.tokenizer = load_hf_tokenizer(self.config.path) - with torch.device("cuda"): - # initialize scratch model from config - model = AutoModelForCausalLM.from_config( - self.model_config, - torch_dtype=dtype, - attn_implementation=self.config.attn_impl, - ) + model = AutoModelForCausalLM.from_config( + self.model_config, + torch_dtype=dtype, + attn_implementation=self.config.attn_impl, + ) - self.model = model.to("cuda") + self.model = model if not self.config.init_from_scratch: # Load model from a initial checkpoint path, @@ -102,9 +100,20 @@ def initialize(self, addr: str | None, ft_spec: FinetuneSpec | None): with_optim=False, tokenizer=None, base_model_path=self.config.path, + distribute=False, ) self.load(load_meta) + if self.world_size > 1: + if self._check_autotp(): + self.model = deepspeed.tp_model_init( + self.model, tp_size=self.config.hf.autotp_size, dtype=dtype + ) + else: + raise RuntimeError("DeepSpeed AutoTP configuration error in HFEngine. ") + + self.model = self.model.to(device=self.device, non_blocking=True) + # Set up optimizer if self.optimizer_config is not None: assert ( @@ -153,6 +162,21 @@ def initialize(self, addr: str | None, ft_spec: FinetuneSpec | None): self.initialized = True + def _check_autotp(self): + tp_size = self.config.hf.autotp_size + config = self.model_config + num_attention_heads = config.num_attention_heads + num_key_value_heads = config.num_key_value_heads + hidden_size = config.hidden_size + intermediate_size = config.intermediate_size + + return ( + num_attention_heads % tp_size == 0 + and num_key_value_heads % tp_size == 0 + and hidden_size % tp_size == 0 + and intermediate_size % tp_size == 0 + ) + def destroy(self): """Destroy the engine and release GPU memory.""" self.model = None @@ -164,7 +188,7 @@ def destroy(self): def save(self, meta: SaveLoadMeta): if meta.weight_format == "hf": - self._save_model_to_hf(meta.path, meta.tokenizer) + self._save_model_to_hf(meta.path, meta.tokenizer, meta.distribute) elif meta.weight_format == "dcp": # TODO: implement DCP save/load for HF raise NotImplementedError("DCP format saving is not implemented yet. ") @@ -176,7 +200,7 @@ def save(self, meta: SaveLoadMeta): def load(self, meta: SaveLoadMeta): if meta.weight_format == "hf": - self._load_model_from_hf(meta.path) + self._load_model_from_hf(meta.path, meta.distribute) elif meta.weight_format == "dcp": # TODO: implement DCP save/load for HF raise NotImplementedError("DCP format loading is not implemented yet. ") @@ -198,27 +222,47 @@ def _load_optimizer_state(self, path: str): self.optimizer.load_state_dict(optimizer_state_dict) def _save_model_to_hf( - self, path: str, tokenizer: Optional[transformers.PreTrainedTokenizerFast] + self, + path: str, + tokenizer: Optional[transformers.PreTrainedTokenizerFast], + distribute: bool = False, ): """Save model in HuggingFace format.""" if self.model is None: raise RuntimeError("Model not initialized") - os.makedirs(path, exist_ok=True) + + if self.local_rank == 0: + os.makedirs(path, exist_ok=True) + + if self.world_size > 1: + dist.barrier() state_dict = {k: v.cpu() for k, v in self.model.state_dict().items()} - self.model.save_pretrained(path, state_dict=state_dict) - self.model_config.save_pretrained(path) - if tokenizer is not None: - tokenizer.save_pretrained(path) - def _load_model_from_hf(self, path: str): + if distribute: + save_file( + state_dict, f"{path}/tp_rank_{self.local_rank:02d}_model.safetensors" + ) + else: + self.model.save_pretrained(path, state_dict=state_dict) + + if self.local_rank == 0: + self.model_config.save_pretrained(path) + if self.tokenizer is not None: + self.tokenizer.save_pretrained(path) + + def _load_model_from_hf(self, path: str, distribute: bool = False): """Load model from HuggingFace format.""" - full_state = get_state_dict_from_repo_id_or_path(path) - self.model.load_state_dict( - full_state, strict=not self.model_config.tie_word_embeddings - ) - if self.model_config.tie_word_embeddings: - self.model.tie_weights() + if self.local_rank == 0 or is_existing_local_path(path): + if distribute: + path = f"{path}/tp_rank_{self.local_rank:02d}_model.safetensors" + full_state = get_state_dict_from_repo_id_or_path(path) + self.model.load_state_dict( + full_state, strict=not self.model_config.tie_word_embeddings + ) + + if self.model_config.tie_word_embeddings: + self.model.tie_weights() def upload_weights(self, meta: WeightUpdateMeta): if meta.type == "nccl": diff --git a/arealite/utils/save_load.py b/arealite/utils/save_load.py index 7df9a3f7b..19bc9850d 100644 --- a/arealite/utils/save_load.py +++ b/arealite/utils/save_load.py @@ -1,4 +1,5 @@ import os +from pathlib import Path from typing import Dict import torch @@ -41,18 +42,21 @@ def get_state_dict_from_repo_id_or_path(repo_id_or_path: str) -> Dict: else: # Assume it's a local path local_path = repo_id_or_path - if not os.path.isdir(local_path): - raise ValueError( - f"Local path {local_path} does not exist or is not a directory, " - f"or {local_path} is a huggingface repo id but huggingface_hub is not installed." - ) # Step 3: Load all .safetensors and .bin files file_paths_to_load = [] - for filename in os.listdir(local_path): - filepath = os.path.join(local_path, filename) - if filename.endswith(".safetensors") or filename.endswith(".bin"): - file_paths_to_load.append(filepath) + if os.path.isdir(local_path): + for filename in os.listdir(local_path): + filepath = os.path.join(local_path, filename) + if filename.endswith(".safetensors") or filename.endswith(".bin"): + file_paths_to_load.append(filepath) + elif os.path.isfile(local_path): + file_paths_to_load.append(local_path) + else: + raise ValueError( + f"Local path {local_path} does not exist or is not a valid path, " + f"or {local_path} is a huggingface repo id but huggingface_hub is not installed." + ) def _load(filepath: str): if filepath.endswith(".safetensors"): @@ -82,3 +86,11 @@ def _load(filepath: str): except Exception as e: raise RuntimeError(f"Error loading checkpoint from {path}: {e}") return state_dict + + +def is_existing_local_path(path: str) -> bool: + try: + path_obj = Path(path) + return path_obj.exists() and (path_obj.is_file() or path_obj.is_dir()) + except (ValueError, OSError): + return False From f35262f86209b56471f2d98862e03813b715d43a Mon Sep 17 00:00:00 2001 From: Jayon02 <12012211@mail..sustech.edu.cn> Date: Fri, 11 Jul 2025 10:11:42 -0700 Subject: [PATCH 06/10] refactor test --- arealite/engine/hf_engine.py | 17 +- .../{test_fsdp_engine.py => test_engine.py} | 50 ++++-- arealite/tests/test_hf_engine.py | 161 ------------------ 3 files changed, 41 insertions(+), 187 deletions(-) rename arealite/tests/{test_fsdp_engine.py => test_engine.py} (81%) delete mode 100644 arealite/tests/test_hf_engine.py diff --git a/arealite/engine/hf_engine.py b/arealite/engine/hf_engine.py index d264ed88f..68d7fde0e 100644 --- a/arealite/engine/hf_engine.py +++ b/arealite/engine/hf_engine.py @@ -294,15 +294,15 @@ def step_lr_scheduler(self): assert self.lr_scheduler is not None return self.lr_scheduler.step() - def _prepare_mb_list( - self, input_: TensorDict, mb_spec: MicroBatchSpec - ) -> MicroBatchList: + def _prepare_mb_list(self, input_: TensorDict) -> MicroBatchList: assert "attention_mask" in input_ and "input_ids" in input_ + if isinstance(input_, dict): + input_ = TensorDict(input_, batch_size=[input_["input_ids"].shape[0]]) input_ = amend_position_ids(input_) packed_input = pack_tensor_dict(input_) mb_list = split_packed_tensor_dict_into_mb_list( packed_input, - mb_spec, + self.config.mb_spec, ) mb_list = pad_mb_list(mb_list, pad_value=0.0) # NOTE: We unsqueeze here because huggingface transformer models requires @@ -313,7 +313,6 @@ def _prepare_mb_list( def train_batch( self, input_: TensorDict, - mb_spec: MicroBatchSpec, loss_fn: Callable[[torch.Tensor, Dict], torch.Tensor], loss_weight_fn: Callable[[Dict], float], ) -> Dict[str, float]: @@ -324,7 +323,7 @@ def train_batch( assert self.lr_scheduler is not None self.optimizer.zero_grad() - mb_list = self._prepare_mb_list(input_, mb_spec) + mb_list = self._prepare_mb_list(input_) total_loss_weight = torch.tensor( sum([loss_weight_fn(mb) for mb in mb_list.mbs]), dtype=torch.float32 @@ -372,12 +371,11 @@ def train_batch( def eval_batch( self, input_: TensorDict, - mb_spec: MicroBatchSpec, loss_fn: Callable[[torch.Tensor, Dict], torch.Tensor], loss_weight_fn: Callable[[Dict], float], ) -> torch.Tensor | None: """Evaluate on a batch.""" - mb_list = self._prepare_mb_list(input_, mb_spec) + mb_list = self._prepare_mb_list(input_) total_loss_weight = torch.tensor( sum([loss_weight_fn(mb) for mb in mb_list.mbs]), dtype=torch.float32 ) @@ -405,14 +403,13 @@ def eval_batch( def forward( self, input_: TensorDict, - mb_spec: MicroBatchSpec, output_seqlens: List[int] | None = None, post_hook: Callable[[torch.Tensor, Dict], Any] | None = None, aggregate_fn: Callable[[List[Any]], Any] = torch.cat, ) -> Any | None: """Forward pass with optional post-processing.""" cu_seqlens = pack_tensor_dict(input_)["cu_seqlens"] - mb_list = self._prepare_mb_list(input_, mb_spec) + mb_list = self._prepare_mb_list(input_) if output_seqlens is None: output_seqlens = (cu_seqlens[1:] - cu_seqlens[:-1]).cpu().numpy().tolist() diff --git a/arealite/tests/test_fsdp_engine.py b/arealite/tests/test_engine.py similarity index 81% rename from arealite/tests/test_fsdp_engine.py rename to arealite/tests/test_engine.py index d295408d3..0d741fd30 100644 --- a/arealite/tests/test_fsdp_engine.py +++ b/arealite/tests/test_engine.py @@ -1,7 +1,7 @@ # Copyright 2025 Ant Group Inc. # Licensed under the Apache License, Version 2.0 -"""Test script for HF Engine implementation.""" +"""Test script for Engine implementation.""" import os from typing import Dict @@ -52,29 +52,47 @@ def mock_input( ) -def mock_loss_fn(logits: torch.Tensor, input_data: Dict) -> torch.Tensor: - """Mock loss function for testing.""" - return torch.mean(logits) - +def get_engine(engine_type: str, model_path: str): + from arealite.engine.fsdp_engine import FSDPEngine + from arealite.engine.hf_engine import HFEngine -@pytest.fixture(scope="module") -def engine(): - os.environ["WORLD_SIZE"] = "1" - os.environ["RANK"] = "0" - os.environ["LOCAL_RANK"] = "0" - os.environ["MASTER_ADDR"] = "localhost" - os.environ["MASTER_PORT"] = "7777" + engine_cls = {"hf": HFEngine, "fsdp": FSDPEngine}[engine_type] engine_config = TrainEngineConfig( - experiment_name="test-fsdp-engine", + experiment_name=f"test-{engine_type}-engine", trial_name="test0", - path=MODEL_PATH, + path=model_path, optimizer=OptimizerConfig(), ) - engine = FSDPEngine(engine_config) + engine = engine_cls(engine_config) ft_spec = FinetuneSpec(total_train_epochs=1, dataset_size=100, train_batch_size=2) engine.initialize(None, ft_spec) - print("✓ Engine created successfully") + return engine + + +def mock_loss_fn(logits: torch.Tensor, input_data: Dict) -> torch.Tensor: + """Mock loss function for testing.""" + return torch.mean(logits) + + +@pytest.fixture(scope="module", params=["fsdp", "hf"]) +def engine(request): + os.environ.update( + { + "WORLD_SIZE": "1", + "RANK": "0", + "LOCAL_RANK": "0", + "MASTER_ADDR": "localhost", + "MASTER_PORT": "7777", + } + ) + + model_path = "/storage/testing/models/Qwen__Qwen3-1.7B/" + if not os.path.exists(model_path): + model_path = "Qwen/Qwen2-0.5B" + + engine = get_engine(request.param, model_path) + print(f"✓ {request.param.upper()} Engine created successfully") yield engine diff --git a/arealite/tests/test_hf_engine.py b/arealite/tests/test_hf_engine.py deleted file mode 100644 index 927b3e985..000000000 --- a/arealite/tests/test_hf_engine.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright 2025 Ant Group Inc. -# Licensed under the Apache License, Version 2.0 - -"""Test script for HF Engine implementation.""" - -import os -from typing import Dict - -import pytest -import torch -from tensordict import TensorDict -from transformers import AutoTokenizer - -from arealite.api.cli_args import MicroBatchSpec, OptimizerConfig, TrainEngineConfig -from arealite.api.io_struct import FinetuneSpec, SaveLoadMeta -from arealite.engine.hf_engine import HFEngine - -VOCAB_SIZE = 100 -MODEL_PATH = "/storage/testing/models/Qwen__Qwen3-1.7B/" -if not os.path.exists(MODEL_PATH): - MODEL_PATH = "Qwen/Qwen2-0.5B" - - -@pytest.fixture(scope="module") -def mock_input( - batch_size=5, - min_seqlen=10, - max_seqlen=20, - device="cuda:0", -) -> Dict: - """Create mock padded input data (same format for huggingface) for testing. - Returns a dict with input_ids, attention_mask, and position_ids. - """ - pad_token_id = 0 - seqlens = torch.randint( - min_seqlen, max_seqlen, (batch_size,), dtype=torch.int, device=device - ) - max_seqlen = int(max(seqlens)) - input_ids = torch.randint( - 0, VOCAB_SIZE, (batch_size, max_seqlen), dtype=torch.long, device=device - ) - attn_mask = torch.zeros((batch_size, max_seqlen), dtype=torch.bool, device=device) - - attn_mask[ - torch.arange(0, max_seqlen, device=device).unsqueeze(0) < seqlens.unsqueeze(1) - ] = 1 - input_ids.masked_fill_(~attn_mask, pad_token_id) - - return TensorDict( - input_ids=input_ids, - attention_mask=attn_mask, - ) - - -def mock_loss_fn(logits: torch.Tensor, input_data: Dict) -> torch.Tensor: - """Mock loss function for testing.""" - return torch.mean(logits) - - -@pytest.fixture(scope="module") -def engine(): - os.environ["WORLD_SIZE"] = "1" - os.environ["RANK"] = "0" - os.environ["LOCAL_RANK"] = "0" - os.environ["MASTER_ADDR"] = "localhost" - os.environ["MASTER_PORT"] = "7777" - - engine_config = TrainEngineConfig( - experiment_name="test-hf-engine", - trial_name="test0", - path=MODEL_PATH, - optimizer=OptimizerConfig(), - ) - engine = HFEngine(engine_config) - ft_spec = FinetuneSpec(total_train_epochs=1, dataset_size=100, train_batch_size=2) - engine.initialize(None, ft_spec) - print("✓ Engine created successfully") - yield engine - - -@torch.no_grad() -def test_forward_microbatch(engine, mock_input): - engine.eval() - x2 = ( - engine.forward( - input_=mock_input, - mb_spec=MicroBatchSpec(n_mbs=2, max_tokens_per_mb=100), - ) - .squeeze(0) - .mean(-1) - ) - x1 = ( - engine.forward( - input_=mock_input, - mb_spec=MicroBatchSpec(n_mbs=1, max_tokens_per_mb=100), - ) - .squeeze(0) - .mean(-1) - ) - input_ids = mock_input["input_ids"] - assert x1.shape[:1] == input_ids.shape[:1] - assert x2.shape[:1] == input_ids.shape[:1] - assert torch.allclose(x1, x2, atol=1e-1, rtol=1e-2), (x1 - x2).abs().max().item() - - -@torch.no_grad() -def test_eval_batch(engine, mock_input): - engine.eval() - eval_result = engine.eval_batch( - input_=mock_input, - mb_spec=MicroBatchSpec(n_mbs=2, max_tokens_per_mb=100), - loss_fn=mock_loss_fn, - loss_weight_fn=lambda x: x["cu_seqlens"][-1], - ) - assert isinstance(eval_result, torch.Tensor), "Evaluation should return a tensor" - assert eval_result.is_cuda, "Evaluation tensor should be on CUDA device" - assert eval_result is not None, "Evaluation should return a loss value" - print(f"✓ Evaluation successful, loss: {eval_result.item()}") - - -def test_train_batch(engine, mock_input): - engine.train() - train_result = engine.train_batch( - input_=mock_input, - mb_spec=MicroBatchSpec(n_mbs=2, max_tokens_per_mb=100), - loss_fn=mock_loss_fn, - loss_weight_fn=lambda x: x["cu_seqlens"][-1], - ) - assert isinstance(train_result, dict), "Training should return a dictionary" - assert train_result["grad_norm"] is not None - assert train_result["lr"] is not None - print("✓ Training successful") - - -@torch.no_grad() -def test_hf_save_load_weights(tmp_path_factory, engine, mock_input): - tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH) - path = tmp_path_factory.mktemp("hf_engine_test") - save_load_meta = SaveLoadMeta( - path=path, - weight_format="hf", - tokenizer=tokenizer, - with_optim=True, - base_model_path=None, - ) - - old = engine.forward( - input_=mock_input, - mb_spec=MicroBatchSpec(n_mbs=1, max_tokens_per_mb=100), - ) - engine.save(save_load_meta) - - for name, param in engine.model.named_parameters(): - param.zero_() - - engine.load(save_load_meta) - new = engine.forward( - input_=mock_input, - mb_spec=MicroBatchSpec(n_mbs=1, max_tokens_per_mb=100), - ) - assert torch.allclose(old, new) From decf3c1799762624becb1f107952ece9fd061ef8 Mon Sep 17 00:00:00 2001 From: Jayon02 <12012211@mail..sustech.edu.cn> Date: Fri, 11 Jul 2025 10:16:17 -0700 Subject: [PATCH 07/10] fix bugs --- arealite/tests/test_engine.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/arealite/tests/test_engine.py b/arealite/tests/test_engine.py index 0d741fd30..6c5d07a3f 100644 --- a/arealite/tests/test_engine.py +++ b/arealite/tests/test_engine.py @@ -87,11 +87,7 @@ def engine(request): } ) - model_path = "/storage/testing/models/Qwen__Qwen3-1.7B/" - if not os.path.exists(model_path): - model_path = "Qwen/Qwen2-0.5B" - - engine = get_engine(request.param, model_path) + engine = get_engine(request.param, MODEL_PATH) print(f"✓ {request.param.upper()} Engine created successfully") yield engine From fbccf6de2e4779a1f62e78beea114aaeee4b1792 Mon Sep 17 00:00:00 2001 From: Jayon02 <12012211@mail..sustech.edu.cn> Date: Tue, 15 Jul 2025 10:17:03 -0700 Subject: [PATCH 08/10] fix issues --- arealite/api/io_struct.py | 2 +- arealite/engine/hf_engine.py | 105 +++++++++++++++++++++++------------ pyproject.toml | 1 + requirements.txt | 1 + 4 files changed, 71 insertions(+), 38 deletions(-) diff --git a/arealite/api/io_struct.py b/arealite/api/io_struct.py index b9144e877..40ff81f67 100644 --- a/arealite/api/io_struct.py +++ b/arealite/api/io_struct.py @@ -175,7 +175,7 @@ class SaveLoadMeta: with_optim: bool tokenizer: PreTrainedTokenizerFast | None base_model_path: str | None - distribute: bool = False + naive_distributed: bool = False @dataclass diff --git a/arealite/engine/hf_engine.py b/arealite/engine/hf_engine.py index 68d7fde0e..6f865c231 100644 --- a/arealite/engine/hf_engine.py +++ b/arealite/engine/hf_engine.py @@ -2,8 +2,6 @@ import os import time from typing import Any, Callable, Dict, List, Optional - -import deepspeed import torch import torch.distributed as dist import transformers @@ -58,8 +56,6 @@ def __init__(self, config: TrainEngineConfig): # initialization self.initialized = False self.weight_update_group_initialized = False - self.local_rank = int(os.environ.get("LOCAL_RANK", 0)) - self.world_size = int(os.environ.get("WORLD_SIZE", 1)) def train(self, mode: bool = True): assert self.model is not None @@ -67,15 +63,20 @@ def train(self, mode: bool = True): return self def initialize(self, addr: str | None, ft_spec: FinetuneSpec | None): - # Initialize distributed enviroments and load model. + """Initialize distributed communication and model.""" assert addr is None, "HFEngine does not support remote initialization." - """Initialize distributed communication and model.""" - if not dist.is_initialized(): - deepspeed.init_distributed(dist_backend="nccl", world_size=self.world_size) + world_size = int(os.environ.get("WORLD_SIZE", 0)) + if not dist.is_initialized() and world_size > 1: + try: + import deepspeed + except ImportError: + print("Warning: deepspeed is not installed. Some functionality may be disabled.") + deepspeed.init_distributed(dist_backend="nccl", world_size=world_size) - torch.cuda.set_device(self.local_rank) - self.device = torch.device(f"cuda:{self.local_rank}") + local_rank = int(os.environ.get("LOCAL_RANK", 0)) + torch.cuda.set_device(local_rank) + self.device = torch.device(f"cuda:{local_rank}") dtype = torch.bfloat16 if self.config.bf16 else torch.float16 self.model_config = AutoConfig.from_pretrained( @@ -83,13 +84,12 @@ def initialize(self, addr: str | None, ft_spec: FinetuneSpec | None): trust_remote_code=True, ) self.tokenizer = load_hf_tokenizer(self.config.path) - model = AutoModelForCausalLM.from_config( + + self.model = AutoModelForCausalLM.from_config( self.model_config, torch_dtype=dtype, attn_implementation=self.config.attn_impl, - ) - - self.model = model + ).to(f"cuda:{local_rank}") if not self.config.init_from_scratch: # Load model from a initial checkpoint path, @@ -100,20 +100,19 @@ def initialize(self, addr: str | None, ft_spec: FinetuneSpec | None): with_optim=False, tokenizer=None, base_model_path=self.config.path, - distribute=False, + naive_distributed=False ) + self.load(load_meta) - if self.world_size > 1: + if world_size > 1: if self._check_autotp(): self.model = deepspeed.tp_model_init( - self.model, tp_size=self.config.hf.autotp_size, dtype=dtype + self.model, tp_size=self.config.hf.autotp_size, dtype=dtype ) else: raise RuntimeError("DeepSpeed AutoTP configuration error in HFEngine. ") - self.model = self.model.to(device=self.device, non_blocking=True) - # Set up optimizer if self.optimizer_config is not None: assert ( @@ -188,7 +187,7 @@ def destroy(self): def save(self, meta: SaveLoadMeta): if meta.weight_format == "hf": - self._save_model_to_hf(meta.path, meta.tokenizer, meta.distribute) + self._save_model_to_hf(meta.path, meta.tokenizer, meta.naive_distributed) elif meta.weight_format == "dcp": # TODO: implement DCP save/load for HF raise NotImplementedError("DCP format saving is not implemented yet. ") @@ -200,7 +199,7 @@ def save(self, meta: SaveLoadMeta): def load(self, meta: SaveLoadMeta): if meta.weight_format == "hf": - self._load_model_from_hf(meta.path, meta.distribute) + self._load_model_from_hf(meta.path, meta.naive_distributed) elif meta.weight_format == "dcp": # TODO: implement DCP save/load for HF raise NotImplementedError("DCP format loading is not implemented yet. ") @@ -225,38 +224,70 @@ def _save_model_to_hf( self, path: str, tokenizer: Optional[transformers.PreTrainedTokenizerFast], - distribute: bool = False, + naive_distributed: bool ): """Save model in HuggingFace format.""" if self.model is None: raise RuntimeError("Model not initialized") - if self.local_rank == 0: + rank = dist.get_rank() + world_size = dist.get_world_size() + if rank == 0: os.makedirs(path, exist_ok=True) + self.model_config.save_pretrained(path) + if tokenizer is not None: + tokenizer.save_pretrained(path) - if self.world_size > 1: + if world_size > 1: dist.barrier() - state_dict = {k: v.cpu() for k, v in self.model.state_dict().items()} + state_dict = self.model.state_dict() - if distribute: - save_file( - state_dict, f"{path}/tp_rank_{self.local_rank:02d}_model.safetensors" + if hasattr(self.model, "module"): + state_dict = { + k.replace("module.", "", 1) if k.startswith("module.") else k: v.cpu() + for k, v in state_dict.items() + } + else: + state_dict = {k: v.cpu() for k, v in state_dict.items()} + + if world_size > 1 and naive_distributed: + # Only support store parameters from model partitions respectively + gathered_state_dicts = None + if rank == 0: + gathered_state_dicts = [None for _ in range(world_size)] + + dist.gather_object( + obj=state_dict, + object_gather_list=gathered_state_dicts, + dst=0 ) + + if rank == 0: + for i, state_dict in enumerate(gathered_state_dicts): + save_file( + state_dict, + f"{path}/rank_{i:02d}_model.safetensors" + ) else: self.model.save_pretrained(path, state_dict=state_dict) - if self.local_rank == 0: - self.model_config.save_pretrained(path) - if self.tokenizer is not None: - self.tokenizer.save_pretrained(path) + if world_size > 1: + dist.barrier() - def _load_model_from_hf(self, path: str, distribute: bool = False): + def _load_model_from_hf(self, path: str, naive_distributed: bool): """Load model from HuggingFace format.""" - if self.local_rank == 0 or is_existing_local_path(path): - if distribute: - path = f"{path}/tp_rank_{self.local_rank:02d}_model.safetensors" + + rank = dist.get_rank() + # Only support load full model parameters from huggingface + # and load model partition locally + if rank == 0 or is_existing_local_path(path): + if naive_distributed: + path = f"{path}/rank_{rank:02d}_model.safetensors" full_state = get_state_dict_from_repo_id_or_path(path) + + if hasattr(self.model, "module") and not hasattr(full_state): + full_state = {f"module.{k}" if not k.startswith("module.") else k: v for k, v in full_state.items()} self.model.load_state_dict( full_state, strict=not self.model_config.tie_word_embeddings ) @@ -270,7 +301,7 @@ def upload_weights(self, meta: WeightUpdateMeta): self._init_distributed_weight_update(meta) self._update_weights_from_distributed() elif meta.type == "disk": - self._save_model_to_hf(meta.path, self.tokenizer) + self._save_model_to_hf(meta.path, self.tokenizer, meta.naive_distributed) update_name = names.update_weights_from_disk( self.config.experiment_name, self.config.trial_name, diff --git a/pyproject.toml b/pyproject.toml index 4f752ebc5..cd4e26dae 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -86,6 +86,7 @@ dependencies = [ # Distributed computing "ray", "redis", + "deepspeed>=0.17.2", # Web frameworks "fastapi>=0.115.12", diff --git a/requirements.txt b/requirements.txt index 5318511cd..178e95835 100644 --- a/requirements.txt +++ b/requirements.txt @@ -74,3 +74,4 @@ swanlab[dashboard] torchdata autoflake tensordict +deepspeed>=0.17.2 From 24390d776f58b12342e35a21f45fb89f4e5a2287 Mon Sep 17 00:00:00 2001 From: Jayon02 <12012211@mail..sustech.edu.cn> Date: Tue, 15 Jul 2025 10:22:32 -0700 Subject: [PATCH 09/10] format files --- arealite/engine/hf_engine.py | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/arealite/engine/hf_engine.py b/arealite/engine/hf_engine.py index 6f865c231..b3c0b6532 100644 --- a/arealite/engine/hf_engine.py +++ b/arealite/engine/hf_engine.py @@ -2,6 +2,7 @@ import os import time from typing import Any, Callable, Dict, List, Optional + import torch import torch.distributed as dist import transformers @@ -71,7 +72,9 @@ def initialize(self, addr: str | None, ft_spec: FinetuneSpec | None): try: import deepspeed except ImportError: - print("Warning: deepspeed is not installed. Some functionality may be disabled.") + print( + "Warning: deepspeed is not installed. Some functionality may be disabled." + ) deepspeed.init_distributed(dist_backend="nccl", world_size=world_size) local_rank = int(os.environ.get("LOCAL_RANK", 0)) @@ -100,7 +103,7 @@ def initialize(self, addr: str | None, ft_spec: FinetuneSpec | None): with_optim=False, tokenizer=None, base_model_path=self.config.path, - naive_distributed=False + naive_distributed=False, ) self.load(load_meta) @@ -108,7 +111,7 @@ def initialize(self, addr: str | None, ft_spec: FinetuneSpec | None): if world_size > 1: if self._check_autotp(): self.model = deepspeed.tp_model_init( - self.model, tp_size=self.config.hf.autotp_size, dtype=dtype + self.model, tp_size=self.config.hf.autotp_size, dtype=dtype ) else: raise RuntimeError("DeepSpeed AutoTP configuration error in HFEngine. ") @@ -224,7 +227,7 @@ def _save_model_to_hf( self, path: str, tokenizer: Optional[transformers.PreTrainedTokenizerFast], - naive_distributed: bool + naive_distributed: bool, ): """Save model in HuggingFace format.""" if self.model is None: @@ -258,17 +261,12 @@ def _save_model_to_hf( gathered_state_dicts = [None for _ in range(world_size)] dist.gather_object( - obj=state_dict, - object_gather_list=gathered_state_dicts, - dst=0 + obj=state_dict, object_gather_list=gathered_state_dicts, dst=0 ) if rank == 0: for i, state_dict in enumerate(gathered_state_dicts): - save_file( - state_dict, - f"{path}/rank_{i:02d}_model.safetensors" - ) + save_file(state_dict, f"{path}/rank_{i:02d}_model.safetensors") else: self.model.save_pretrained(path, state_dict=state_dict) @@ -280,14 +278,17 @@ def _load_model_from_hf(self, path: str, naive_distributed: bool): rank = dist.get_rank() # Only support load full model parameters from huggingface - # and load model partition locally + # and load model partition locally if rank == 0 or is_existing_local_path(path): if naive_distributed: path = f"{path}/rank_{rank:02d}_model.safetensors" full_state = get_state_dict_from_repo_id_or_path(path) if hasattr(self.model, "module") and not hasattr(full_state): - full_state = {f"module.{k}" if not k.startswith("module.") else k: v for k, v in full_state.items()} + full_state = { + f"module.{k}" if not k.startswith("module.") else k: v + for k, v in full_state.items() + } self.model.load_state_dict( full_state, strict=not self.model_config.tie_word_embeddings ) From 4f0ac55b4a0c30e58b18b46561b12ed957ce8583 Mon Sep 17 00:00:00 2001 From: Jayon02 <12012211@mail..sustech.edu.cn> Date: Tue, 15 Jul 2025 10:31:38 -0700 Subject: [PATCH 10/10] Squashed commit of the following: commit 9ed043f6ab83cf9c9b38e0251ad08009a73243e8 Author: Wei Fu <36355462+garrett4wade@users.noreply.github.com> Date: Tue Jul 15 10:24:48 2025 +0800 format (#174) commit 8cc9b1feb5ed03cfa42f3ed588dd5f6ec16e5e55 Author: Night <32424487+PrinsYin@users.noreply.github.com> Date: Mon Jul 14 19:22:00 2025 -0700 added LocalSGlangEngine and test (#170) * added LocalSGLangEngine * upload test file * add build args * fix sgl_local generate * improved sgl local robustness * test * test updated * added fallback when sgl engine isn't initialized * finish test local engine * added LocalSGlangEngine and test * format and fix format and fix, raise when generate missing field format * change cli_args.py * add comment header format --------- Co-authored-by: ChangyiYang --- arealite/api/cli_args.py | 48 ++- arealite/engine/sglang_engine.py | 377 ++++++++++++++++++++- arealite/tests/test_sglang_local_engine.py | 144 ++++++++ 3 files changed, 551 insertions(+), 18 deletions(-) create mode 100644 arealite/tests/test_sglang_local_engine.py diff --git a/arealite/api/cli_args.py b/arealite/api/cli_args.py index 2b5c5f6aa..4763d5134 100644 --- a/arealite/api/cli_args.py +++ b/arealite/api/cli_args.py @@ -213,6 +213,39 @@ def build_cmd( dist_init_addr: Optional[str] = None, served_model_name: Optional[str] = None, skip_tokenizer_init: bool = True, + ): + args = SGLangConfig.build_args( + sglang_config=sglang_config, + model_path=model_path, + tp_size=tp_size, + base_gpu_id=base_gpu_id, + dist_init_addr=dist_init_addr, + served_model_name=served_model_name, + skip_tokenizer_init=skip_tokenizer_init, + ) + + # convert to flags + flags = [] + for k, v in args.items(): + if v is None or v is False or v == "": + continue + if v is True: + flags.append(f"--{k.replace('_','-')}") + elif isinstance(v, list): + flags.append(f"--{k.replace('_','-')} {' '.join(map(str, v))}") + else: + flags.append(f"--{k.replace('_','-')} {v}") + return f"python3 -m sglang.launch_server {' '.join(flags)}" + + @staticmethod + def build_args( + sglang_config: "SGLangConfig", + model_path, + tp_size, + base_gpu_id, + dist_init_addr: Optional[str] = None, + served_model_name: Optional[str] = None, + skip_tokenizer_init: bool = True, ): from realhf.base import network, pkg_version, seeding from realhf.experiments.common.utils import asdict as conf_as_dict @@ -256,20 +289,7 @@ def build_cmd( args.pop("allow_auto_truncate") args.pop("file_storage_path") - flags = [] - for k, v in args.items(): - if v is None or v is False or v == "": - continue - if v is True: - flags.append(f"--{k.replace('_','-')} ") - continue - if isinstance(v, list): - values = " ".join(map(str, v)) - flags.append(f"--{k.replace('_','-')} {values}") - continue - flags.append(f"--{k.replace('_','-')} {v}") - flags = " ".join(flags) - return f"python3 -m sglang.launch_server {flags}" + return args @dataclass diff --git a/arealite/engine/sglang_engine.py b/arealite/engine/sglang_engine.py index 6d8ee10be..777091e8d 100644 --- a/arealite/engine/sglang_engine.py +++ b/arealite/engine/sglang_engine.py @@ -1,6 +1,375 @@ -from arealite.api.cli_args import SGLangEngineConfig +import asyncio +import threading +import time +import traceback +from concurrent.futures import ThreadPoolExecutor +from queue import Empty, Full, Queue +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional +import sglang as sgl +import torch.distributed as dist +from tensordict import TensorDict -class SGLangEngine: - def __init__(self, config: SGLangEngineConfig): - pass +from arealite.api.cli_args import InferenceEngineConfig +from arealite.api.engine_api import InferenceEngine +from arealite.api.io_struct import ( + LLMRequest, + LLMResponse, + RolloutStat, + WeightUpdateMeta, +) +from realhf.base import logging, name_resolve, names, pkg_version + +if TYPE_CHECKING: + from arealite.api.workflow_api import RolloutWorkflow +logger = logging.getLogger(__name__) + +if pkg_version.is_available("sglang"): + if pkg_version.is_version_greater_or_equal("sglang", "0.4.4"): + SGLANG_TOKEN_OUTPUT_IDENTIFIER = "output_ids" + else: + SGLANG_TOKEN_OUTPUT_IDENTIFIER = "token_ids" + +ROLLOUT_POLL_WAIT_TIME = 0.4 +RID_CACHE_SIZE = 128 + +""" +Local SGLang Inference Engine +SGLangEngine currently only supports single-controller. Cannot be used in SPMD +""" + + +class SGLangEngine(InferenceEngine): + + def __init__( + self, + config: InferenceEngineConfig, + engine_args: Optional[Dict[str, Any]] = None, + ): + config.max_concurrent_rollouts = ( + config.max_concurrent_rollouts or config.consumer_batch_size + ) + self.config = config + self.engine_args = engine_args or {} + + qsize = config.queue_size or config.max_concurrent_rollouts * 10 + self.input_queue = Queue(maxsize=qsize) + self.output_queue = Queue(maxsize=qsize) + self.result_cache = [] + + self.exiting = threading.Event() + self.lock = threading.Lock() + + self.rollout_stat = RolloutStat() + + self._version = 0 + + def initialize(self, addr: str | None, ft_spec: Optional[Dict[str, Any]] = None): + self.engine = sgl.Engine(**self.engine_args) + + self.rollout_thread = threading.Thread(target=self._rollout_thread) + self.rollout_thread.start() + + def destroy(self): + self.exiting.set() + self.rollout_thread.join() + + if hasattr(self, "engine") and self.engine is not None: + try: + self.engine.shutdown() + except Exception as e: + logger.warning(f"Error shutting down engine: {e}") + + def set_version(self, version): + with self.lock: + self._version = version + + def get_version(self): + with self.lock: + return self._version + + def _rollout_thread(self): + """Thread that runs the rollout loop.""" + try: + asyncio.run(self._rollout_thread_async()) + except Exception as e: + traceback.print_exc() + + async def _rollout_thread_async(self): + data = None + + rollout_tasks: Dict[str, asyncio.Task] = {} + rid = 0 + + try: + while not self.exiting.is_set(): + # Load next data from controller + if data is None: + try: + data, workflow = self.input_queue.get_nowait() + logger.info(f"Get data from puller: {data}") + except Empty: + logger.debug(f"No data from puller stream.") + + # Check capacity + if dist.is_initialized(): + world_size = dist.get_world_size() + else: + world_size = 1 + + cannot_rollout_reason = [] + capacity = max(1, self.config.max_concurrent_rollouts // world_size) + can_rollout = len(rollout_tasks) < capacity + if not can_rollout: + cannot_rollout_reason.append( + f"Exceeding capacity: # running tasks {len(rollout_tasks)} >= capacity {capacity}" + ) + + # Staleness control + version = self.get_version() + ofp = self.config.max_head_offpolicyness + with self.lock: + sample_cnt = self.rollout_stat.accepted + self.rollout_stat.running + expected_version = sample_cnt // self.config.consumer_batch_size + not_staled = expected_version <= ofp + version + can_rollout &= not_staled + if not not_staled: + cannot_rollout_reason.append( + f"Staled: expected version ({expected_version}) = " + f"global sample cnt ({sample_cnt}) // batch size ({self.config.consumer_batch_size}), " + f"current latest version {version}, " + f"offpolicyness {self.config.max_head_offpolicyness}." + ) + + if not can_rollout: + logger.debug( + f"Cannot submit new rollouts. " + + "\n".join(cannot_rollout_reason) + ) + + # Create new rollout task + if can_rollout and data is not None: + task = asyncio.create_task( + workflow.arun_episode(self, data), name=str(rid) + ) + rollout_tasks[str(rid)] = task + + with self.lock: + self.rollout_stat.submitted += 1 + self.rollout_stat.running += 1 + logger.info( + f"Submit rollout rid {rid}. " + f"Submit: {self.rollout_stat.submitted}, " + f"running: {self.rollout_stat.running}, " + f"accepted: {self.rollout_stat.accepted}." + ) + + rid += 1 + data = None + + # Wait for rollout completion + tasks = list(rollout_tasks.values()) + done = [] + if tasks: + done, _ = await asyncio.wait( + tasks, + timeout=ROLLOUT_POLL_WAIT_TIME, + return_when=asyncio.FIRST_COMPLETED, + ) + else: + await asyncio.sleep(ROLLOUT_POLL_WAIT_TIME) + + # Collect done results + for task in done: + traj = await task + traj: TensorDict + task_rid = task.get_name() + rollout_tasks.pop(task_rid) + self.rollout_stat.accepted += 1 + + try: + self.output_queue.put_nowait(traj) + except Full: + raise RuntimeError( + "Output queue full. Please increase queue_size." + ) + + with self.lock: + self.rollout_stat.running -= 1 + logger.info( + f"Finish rollout {task_rid}. " + f"Submit: {self.rollout_stat.submitted}, " + f"running: {self.rollout_stat.running}, " + f"accepted: {self.rollout_stat.accepted}." + ) + finally: + # Cancel remaining tasks + for task in rollout_tasks.values(): + if not task.done(): + task.cancel() + try: + await task + except asyncio.CancelledError: + pass + + async def agenerate(self, req: LLMRequest) -> LLMResponse: + """Async version of generate using local sglang engine.""" + if not hasattr(self, "engine") or self.engine is None: + raise RuntimeError( + "Local SGLang engine is not initialized, cannot generate." + ) + + # Prepare request payload + gconfig = req.gconfig + stop_token_ids = gconfig.stop_token_ids + + if gconfig.n_samples != 1: + raise ValueError( + "LocalSGLangEngine does not support n_samples > 1. " + "Please call generate for multiple times with n_samples = 1." + ) + sample_params = { + "top_p": gconfig.top_p, + "top_k": gconfig.top_k, + "max_new_tokens": gconfig.max_new_tokens, + "temperature": 0.0 if gconfig.greedy else gconfig.temperature, + "stop_token_ids": stop_token_ids, + } + + completions = "" + prompt = req.text if req.text else None + input_ids = req.input_ids if req.input_ids else None + + # Make request + start_time = time.perf_counter() + accumulated_output_tokens = [] + accumulated_output_logprobs = [] + accumulated_versions = [] + stop_reason = "length" + while ( + stop_reason != "stop" + and len(accumulated_output_tokens) < gconfig.max_new_tokens + ): + + try: + outputs = await self.engine.async_generate( + prompt=prompt, + input_ids=input_ids, + sampling_params=sample_params, + return_logprob=True, + ) + + completions += outputs["text"] + if prompt is None: + prompt = outputs["text"] + else: + prompt += outputs["text"] + + meta_info = outputs["meta_info"] + output_tokens = [x[1] for x in meta_info["output_token_logprobs"]] + output_logprobs = [x[0] for x in meta_info["output_token_logprobs"]] + + finish_reason = meta_info.get("finish_reason", {}) + stop_reason = finish_reason.get("type", "length") + + accumulated_output_tokens.extend(output_tokens) + accumulated_output_logprobs.extend(output_logprobs) + accumulated_versions.extend([-1] * len(output_tokens)) + + except Exception as e: + raise RuntimeError(f"Local SGLang engine generation failed: {e}") + + latency = time.perf_counter() - start_time + + return LLMResponse( + completions=completions, + input_tokens=req.input_ids if req.input_ids else [], + output_tokens=accumulated_output_tokens, + output_logprobs=accumulated_output_logprobs, + output_versions=accumulated_versions, + stop_reason=stop_reason, + latency=latency, + ttft=latency, + ) + + def update_weights(self, meta): + executor = ThreadPoolExecutor(max_workers=1) + return executor.submit(self._update_weights, meta) + + def _update_weights(self, meta: WeightUpdateMeta): + if not hasattr(self, "engine") or self.engine is None: + raise RuntimeError( + "Local SGLang engine is not initialized, cannot update weights." + ) + if meta.type == "disk": + try: + update_name = names.update_weights_from_disk( + self.config.experiment_name, + self.config.trial_name, + meta.model_version, + ) + save_timestamp = int(name_resolve.wait(update_name, timeout=120)) + load_timestamp = time.time_ns() + logger.info( + f"Begin update weights from {meta.path}, responded in {(load_timestamp - save_timestamp)/1e6:.2f} ms" + ) + # Update weights from disk, + self.engine.update_weights_from_disk(model_path=meta.path) + + logger.info( + f"Loading weights done in {(time.time_ns() - load_timestamp)/1e6:.2f} ms" + ) + self.set_version(meta.model_version) + except Exception as e: + logger.error(f"Failed to update weights: {e}") + raise + else: + raise NotImplementedError(f"Unsupported weight update type: {meta.type}") + + def submit(self, data: Dict[str, Any], workflow: "RolloutWorkflow") -> None: + try: + self.input_queue.put_nowait((data, workflow)) + except Full: + raise RuntimeError("Input queue full. Please increase queue_size.") + + def wait(self, count: int, timeout: float, should_accept: Callable) -> TensorDict: + tik = time.perf_counter() + accepted = len(self.result_cache) + while ( + accepted < count + and not self.exiting.is_set() + and time.perf_counter() - tik < timeout + ): + try: + result = self.output_queue.get(timeout=ROLLOUT_POLL_WAIT_TIME) + if should_accept(result): + self.result_cache.append(result) + accepted += 1 + else: + with self.lock: + self.rollout_stat.accepted -= 1 + except Empty: + time.sleep(ROLLOUT_POLL_WAIT_TIME) + if self.exiting.is_set(): + raise RuntimeError("Rollout engine is exiting, cannot wait for results.") + if accepted < count: + raise TimeoutError( + f"Timed out waiting for {count} rollouts, " f"only received {accepted}." + ) + results, self.result_cache = ( + self.result_cache[:count], + self.result_cache[count:], + ) + return TensorDict.cat(results, dim=0) + + def rollout( + self, data: List[Dict[str, Any]], workflow: "RolloutWorkflow" + ) -> TensorDict: + """Submit a batch of requests to the inference engine and wait for the results.""" + for item in data: + self.submit(item, workflow) + return self.wait( + count=len(data), + timeout=self.config.request_timeout, + should_accept=lambda x: True, + ) diff --git a/arealite/tests/test_sglang_local_engine.py b/arealite/tests/test_sglang_local_engine.py new file mode 100644 index 000000000..2cf4dce0b --- /dev/null +++ b/arealite/tests/test_sglang_local_engine.py @@ -0,0 +1,144 @@ +# ================================================================ +# NOTE: This test file is dedicated to LocalSGLangEngine testing. +# +# Unlike remote engine setup which is managed via a pytest fixture, +# the LocalSGLangEngine requires explicit `initialize()` to construct +# the engine instance at runtime. Therefore, each test must manually +# create and destroy the engine. +# +# Because of this lifecycle difference, tests for local and remote +# engines cannot be placed in the same test file. +# ================================================================ +import os +import time +import uuid + +import pytest +import torch +from tensordict import TensorDict + +from arealite.api.cli_args import ( + GenerationHyperparameters, + InferenceEngineConfig, + SGLangConfig, +) +from arealite.api.io_struct import LLMRequest, LLMResponse +from arealite.engine.sglang_engine import SGLangEngine +from arealite.workflow.rlvr import RLVRWorkflow +from realhf.api.core.data_api import load_hf_tokenizer +from realhf.base import seeding + +EXPR_NAME = "test_sglang_local_engine" +TRIAL_NAME = "trial_0" +MODEL_PATH = "/storage/testing/models/Qwen__Qwen3-1.7B/" +if not os.path.exists(MODEL_PATH): + MODEL_PATH = "Qwen/Qwen2-0.5B" + + +def build_engine_config(**kwargs): + return InferenceEngineConfig( + experiment_name=EXPR_NAME, + trial_name=TRIAL_NAME, + **kwargs, + ) + + +def build_engine_args(): + return SGLangConfig.build_args( + sglang_config=SGLangConfig(mem_fraction_static=0.3, enable_metrics=False), + model_path=MODEL_PATH, + tp_size=1, + base_gpu_id=0, + served_model_name=MODEL_PATH, + skip_tokenizer_init=False, + ) + + +@pytest.mark.asyncio +async def test_local_sglang_generate(): + seeding.set_random_seed(1, EXPR_NAME) + config = build_engine_config() + engine = SGLangEngine(config, engine_args=build_engine_args()) + engine.initialize(None, None) + + req = LLMRequest( + rid=str(uuid.uuid4()), + text="hello! how are you today", + gconfig=GenerationHyperparameters(max_new_tokens=16), + ) + resp = await engine.agenerate(req) + print(resp.completions) + + assert isinstance(resp, LLMResponse) + assert resp.input_tokens == req.input_ids + assert ( + len(resp.output_logprobs) + == len(resp.output_tokens) + == len(resp.output_versions) + ) + assert isinstance(resp.completions, str) + + time.sleep(5) + engine.destroy() + + +@pytest.mark.parametrize("n_samples", [1, 2, 4]) +def test_local_sglang_rollout(n_samples): + seeding.set_random_seed(1, EXPR_NAME) + config = build_engine_config(max_concurrent_rollouts=2, consumer_batch_size=2) + engine = SGLangEngine(config, engine_args=build_engine_args()) + engine.initialize(None, None) + + gconfig = GenerationHyperparameters( + max_new_tokens=16, greedy=False, n_samples=n_samples + ) + tokenizer = load_hf_tokenizer(MODEL_PATH) + + workflow = RLVRWorkflow( + reward_fn=lambda **kwargs: 1.0, + gconfig=gconfig, + tokenizer=tokenizer, + ) + + data = {"messages": [{"role": "user", "content": "Hello, how are you?"}]} + result = engine.rollout([data] * 2, workflow=workflow) + + print("Here is the result ", result) + assert isinstance(result, TensorDict) + assert result.batch_size == torch.Size([2 * n_samples]) + engine.destroy() + + +@pytest.mark.parametrize("ofp", [1, 2, 4, 8, 16]) +@pytest.mark.parametrize("bs", [2, 4]) +@pytest.mark.parametrize("n_samples", [2, 1]) +def test_local_sglang_staleness_control(bs, ofp, n_samples): + seeding.set_random_seed(1, EXPR_NAME) + config = build_engine_config(consumer_batch_size=bs, max_head_offpolicyness=ofp) + engine = SGLangEngine(config, engine_args=build_engine_args()) + engine.initialize(None, None) + + gconfig = GenerationHyperparameters( + max_new_tokens=16, greedy=False, n_samples=n_samples + ) + tokenizer = load_hf_tokenizer(MODEL_PATH) + + workflow = RLVRWorkflow( + reward_fn=lambda **kwargs: 1.0, + gconfig=gconfig, + tokenizer=tokenizer, + ) + + data = {"messages": [{"role": "user", "content": "Hello, how are you?"}]} + for _ in range(bs * 2): + engine.submit(data, workflow=workflow) + time.sleep(5) + assert engine.output_queue.qsize() == min(bs * 2, bs * (ofp + 1)) + + engine.set_version(1) + for _ in range(bs * 2): + engine.submit(data, workflow=workflow) + time.sleep(5) + assert engine.output_queue.qsize() == min(bs * 4, bs * (ofp + 2)) + + engine.destroy()