From a38c1041432b2c2bfaa30cbdc959c5e751c3493a Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Thu, 3 Jul 2025 15:49:23 -0700 Subject: [PATCH 01/47] adding support for Bradley-Terry reward model training Signed-off-by: Julien Veron Vialard --- docs/guides/rm.md | 21 ++ examples/configs/rm.yaml | 140 +++++++++++ examples/run_sft.py | 153 ++++++++---- nemo_rl/algorithms/loss_functions.py | 140 ++++++++--- nemo_rl/algorithms/sft.py | 220 ++++++++++++++++-- nemo_rl/data/datasets.py | 34 ++- .../models/policy/dtensor_policy_worker.py | 48 +++- tests/functional/rm.sh | 43 ++++ 8 files changed, 692 insertions(+), 107 deletions(-) create mode 100644 docs/guides/rm.md create mode 100644 examples/configs/rm.yaml create mode 100644 tests/functional/rm.sh diff --git a/docs/guides/rm.md b/docs/guides/rm.md new file mode 100644 index 0000000000..234555d60d --- /dev/null +++ b/docs/guides/rm.md @@ -0,0 +1,21 @@ +# Reward Model Training in NeMo RL + +This document explains how to train reward models (RM) within NeMo RL. Currently, only Bradley-Terry reward models are supported. + +## Launch a Training Job + +The script, [examples/run_sft.py](../../examples/run_sft.py), is used to train a Bradley-Terry reward model. This script can be launched either locally or via Slurm. For details on how to set up Ray and launch a job using Slurm, refer to the [cluster documentation](../cluster.md). + +Be sure to launch the job using `uv`. The command to launch a training job is as follows: + +```bash +uv run examples/run_sft.py --config +``` + +The YAML config must be specified. It uses the same base template as the SFT config but includes a new `reward_model_type` key that triggers Reward Model training. An example RM config file can be found at [examples/configs/rm.yaml](../../examples/configs/rm.yaml). + +**Reminder**: Don't forget to set your `HF_HOME`, `WANDB_API_KEY`, and `HF_DATASETS_CACHE` (if needed). You'll need to do a `huggingface-cli login` as well for Llama models. + +## Datasets + +By default, NeMo RL supports the `HelpSteer3` dataset. This dataset is downloaded from Hugging Face and preprocessed on-the-fly, so there's no need to provide a path to any datasets on disk. diff --git a/examples/configs/rm.yaml b/examples/configs/rm.yaml new file mode 100644 index 0000000000..c01d78c781 --- /dev/null +++ b/examples/configs/rm.yaml @@ -0,0 +1,140 @@ +# Bradley-Terry (BT) Reward Model Training Configuration +# (uses same base template as the SFT config but includes a new `reward_model_type` key that triggers Reward Model training) +sft: + ## total number of steps to train will equal + ## min((max_num_epochs * len(train_dataloader)), max_num_steps) + max_num_epochs: 1 + max_num_steps: -1 # by default, train for 1 epoch + + val_period: 16 + val_batches: -1 + val_global_batch_size: 32 + val_micro_batch_size: 1 + val_at_start: false + seed: 42 + +checkpointing: + enabled: true + checkpoint_dir: "results/rm" + metric_name: "val_loss" + higher_is_better: false + keep_top_k: 3 + save_period: ${sft.val_period} + +policy: + model_name: "meta-llama/Llama-3.2-1B-Instruct" + tokenizer: + name: ${policy.model_name} ## specify if you'd like to use a tokenizer different from the model's default + # We don't use the "default" chat template because the Llama tokenizer inserts the current + # date in the system prompt, which could make the reward model's output date-dependent. + chat_template: "{{- bos_token }}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n{%- else %}\n {%- set system_message = '' %}\n{%- endif %}\n\n{#- System message #}\n{{- '<|start_header_id|>system<|end_header_id|>\n\n' }}\n{{- system_message }}\n{{- '<|eot_id|>' }}\n\n{%- for message in messages %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' + message['content'] | trim + '<|eot_id|>' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id>\n\n' }}\n{%- endif %}" + reward_model_type: "bradley_terry" + train_global_batch_size: 128 + train_micro_batch_size: 1 + max_total_sequence_length: 8192 + precision: "bfloat16" + fsdp_offload_enabled: false + activation_checkpointing_enabled: false + + dtensor_cfg: + enabled: true + cpu_offload: false + sequence_parallel: false + activation_checkpointing: false + tensor_parallel_size: 1 + context_parallel_size: 1 + custom_parallel_plan: null + + dynamic_batching: + enabled: false + + # makes the training sequence length divisible by the tensor parallel size + # this is useful for sequence parallel training + make_sequence_length_divisible_by: ${policy.dtensor_cfg.tensor_parallel_size} + max_grad_norm: 1.0 + + optimizer: + name: "torch.optim.AdamW" + kwargs: + lr: 2.0e-6 + weight_decay: 0.1 + betas: [0.9, 0.98] + eps: 1e-5 + # when using Dtensor, we need to set `foreach` and `fused` to false + foreach: false + fused: false + + ## ignored since enabled=false, but needed for testing purposes + megatron_cfg: + enabled: false + empty_unused_memory_level: 1 + activation_checkpointing: false + tensor_model_parallel_size: 2 + pipeline_model_parallel_size: 2 + context_parallel_size: 1 + pipeline_dtype: ${policy.precision} + num_layers_in_first_pipeline_stage: null + num_layers_in_last_pipeline_stage: null + sequence_parallel: false + + optimizer: + optimizer: "adam" + lr: 2.0e-6 + min_lr: 1.9999e-6 + weight_decay: 0.1 + bf16: false + fp16: false + params_dtype: "float32" + + #adam + adam_beta1: 0.9 + adam_beta2: 0.98 + adam_eps: 1e-5 + + #sgd + sgd_momentum: 0.9 + + #distributed optimizer + use_distributed_optimizer: true + use_precision_aware_optimizer: true + + clip_grad: ${policy.max_grad_norm} + + scheduler: + start_weight_decay: ${policy.megatron_cfg.optimizer.weight_decay} + end_weight_decay: ${policy.megatron_cfg.optimizer.weight_decay} + weight_decay_incr_style: "constant" + lr_decay_style: "constant" + lr_decay_iters: null + lr_warmup_iters: 50 + lr_warmup_init: 1.9999e-6 + + distributed_data_parallel_config: + grad_reduce_in_fp32: false + overlap_grad_reduce: true + overlap_param_gather: false + average_in_collective: true + data_parallel_sharding_strategy: "optim_grads_params" + + +data: + max_input_seq_length: ${policy.max_total_sequence_length} + dataset_name: "HelpSteer3" + +logger: + log_dir: "logs" # Base directory for all logs + wandb_enabled: true # Make sure you do a ``wandb login [Your API key]'' before running + tensorboard_enabled: true + monitor_gpus: true # If true, will monitor GPU usage and log to wandb and/or tensorboard + wandb: + project: "rm-dev" + name: "rm-dev-${data.dataset_name}" + tensorboard: + log_dir: "tb_logs-rm-dev-${data.dataset_name}" + gpu_monitoring: + collection_interval: 10 # How often to collect GPU usage metrics (in seconds) + flush_interval: 10 # How often to flush GPU usage metrics to the loggers (in seconds) + +cluster: + gpus_per_node: 1 + num_nodes: 1 diff --git a/examples/run_sft.py b/examples/run_sft.py index ce5b258b0c..fb1eb1aaee 100644 --- a/examples/run_sft.py +++ b/examples/run_sft.py @@ -13,6 +13,7 @@ # limitations under the License. import argparse +import logging import os import pprint from functools import partial @@ -89,36 +90,118 @@ def sft_preprocessor( return output -def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig): +def rm_preprocessor( + datum_dict: dict[str, Any], + task_data_spec: TaskDataSpec, + tokenizer, + max_seq_length: int, + idx: int, +) -> DatumSpec: + """Process a datum dictionary for RM training.""" + messages_chosen = datum_dict["prompt"] + [ + {"role": "assistant", "content": datum_dict["chosen_response"]} + ] + messages_rejected = datum_dict["prompt"] + [ + {"role": "assistant", "content": datum_dict["rejected_response"]} + ] + + message_log_chosen = get_formatted_message_log( + messages_chosen, tokenizer, task_data_spec + ) + message_log_rejected = get_formatted_message_log( + messages_rejected, tokenizer, task_data_spec + ) + + length_chosen = sum(len(m["token_ids"]) for m in message_log_chosen) + length_rejected = sum(len(m["token_ids"]) for m in message_log_rejected) + + loss_multiplier = 1.0 + if max(length_chosen, length_rejected) > max_seq_length: + # make smaller and mask out + logging.warning( + f"Truncating chosen and rejected messages to {max_seq_length} tokens" + ) + for message in message_log_chosen: + message["token_ids"] = message["token_ids"][ + : min(4, max_seq_length // len(message_log_chosen)) + ] + for message in message_log_rejected: + message["token_ids"] = message["token_ids"][ + : min(4, max_seq_length // len(message_log_rejected)) + ] + loss_multiplier = 0.0 + + length_chosen = sum(len(m["token_ids"]) for m in message_log_chosen) + length_rejected = sum(len(m["token_ids"]) for m in message_log_rejected) + + # safeguard against edge case where there are too many turns to fit within the max length + assert max(length_chosen, length_rejected) <= max_seq_length + + output = { + "message_log_chosen": message_log_chosen, + "length_chosen": length_chosen, + "message_log_rejected": message_log_rejected, + "length_rejected": length_rejected, + "extra_env_info": None, + "loss_multiplier": loss_multiplier, + "idx": idx, + } + return output + + +def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig, model_type: str): print("\nā–¶ Setting up data...") data_cls = data_config["dataset_name"] - if data_cls == "open_assistant": - data = hf_datasets.OasstDataset(output_dir="/tmp/open_assistant") - elif data_cls == "squad": - data = hf_datasets.SquadDataset() - elif data_cls == "prompt_response_dataset": - data = hf_datasets.PromptResponseDataset( - data_config["train_data_path"], - data_config["val_data_path"], - data_config["input_key"], - data_config["output_key"], - ) - elif data_cls == "openmathinstruct2": - data = hf_datasets.OpenMathInstruct2Dataset( - split=data_config["split"], - output_key=data_config["output_key"], - prompt_file=data_config["prompt_file"], - ) - elif data_cls == "openai_format": - data = hf_datasets.OpenAIFormatDataset( - data_config["train_data_path"], - data_config["val_data_path"], - data_config["chat_key"], - data_config["system_key"], - data_config["system_prompt"], + + if model_type == "lm": + data_preprocessor = partial( + sft_preprocessor, + add_bos=data_config["add_bos"], + add_eos=data_config["add_eos"], + add_generation_prompt=data_config["add_generation_prompt"], ) + + if data_cls == "open_assistant": + data = hf_datasets.OasstDataset(output_dir="/tmp/open_assistant") + elif data_cls == "squad": + data = hf_datasets.SquadDataset() + elif data_cls == "prompt_response_dataset": + data = hf_datasets.PromptResponseDataset( + data_config["train_data_path"], + data_config["val_data_path"], + data_config["input_key"], + data_config["output_key"], + ) + elif data_cls == "openmathinstruct2": + data = hf_datasets.OpenMathInstruct2Dataset( + split=data_config["split"], + output_key=data_config["output_key"], + prompt_file=data_config["prompt_file"], + ) + elif data_cls == "openai_format": + data = hf_datasets.OpenAIFormatDataset( + data_config["train_data_path"], + data_config["val_data_path"], + data_config["chat_key"], + data_config["system_key"], + data_config["system_prompt"], + ) + else: + raise ValueError( + f"Unknown dataset class: {data_cls} for model_type: {model_type}" + ) + elif model_type == "reward": + data_preprocessor = rm_preprocessor + + if data_cls == "HelpSteer3": + data = hf_datasets.HelpSteer3Dataset() + else: + raise ValueError( + f"Unknown dataset class: {data_cls} for model_type: {model_type}" + ) else: - raise ValueError(f"Unknown dataset class: {data_cls}") + raise ValueError(f"Unknown model type: {model_type}") + print( f" āœ“ Training and validation datasets loaded with {len(data.formatted_ds['train'])} and {len(data.formatted_ds['validation'])} samples, respectively." ) @@ -131,12 +214,7 @@ def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig): train_dataset, tokenizer, sft_task_spec, - partial( - sft_preprocessor, - add_bos=data_config["add_bos"], - add_eos=data_config["add_eos"], - add_generation_prompt=data_config["add_generation_prompt"], - ), + data_preprocessor, max_seq_length=data_config["max_input_seq_length"], ) @@ -144,12 +222,7 @@ def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig): val_dataset, tokenizer, sft_task_spec, - partial( - sft_preprocessor, - add_bos=data_config.get("add_bos", True), - add_eos=data_config.get("add_eos", True), - add_generation_prompt=data_config["add_generation_prompt"], - ), + data_preprocessor, max_seq_length=data_config["max_input_seq_length"], ) @@ -178,6 +251,8 @@ def main(): print("Final config:") pprint.pprint(config) + model_type = "reward" if "reward_model_type" in config["policy"] else "lm" + config["logger"]["log_dir"] = get_next_experiment_dir(config["logger"]["log_dir"]) print(f"šŸ“Š Using log directory: {config['logger']['log_dir']}") if config["checkpointing"]["enabled"]: @@ -195,7 +270,7 @@ def main(): dataset, val_dataset, sft_task_spec, - ) = setup_data(tokenizer, config["data"]) + ) = setup_data(tokenizer, config["data"], model_type) ( policy, diff --git a/nemo_rl/algorithms/loss_functions.py b/nemo_rl/algorithms/loss_functions.py index 1078da5fa3..4d16f87adb 100644 --- a/nemo_rl/algorithms/loss_functions.py +++ b/nemo_rl/algorithms/loss_functions.py @@ -372,6 +372,107 @@ def __call__( } +class PreferenceLossDataDict(TypedDict): + """Required keys for the preference loss function.""" + + input_ids: torch.Tensor + token_mask: torch.Tensor + sample_mask: torch.Tensor + + +class PreferenceLoss(LossFunction): + """Preference Loss function. + + Optimizes the model to prefer chosen responses over rejected ones + + The preference loss is computed as: + L_pref(Īø) = -E[log(σ(β * (r_chosen - r_rejected)))] + + where: + - σ is the sigmoid function + - β is a scaling factor (ex: `reference_policy_kl_penalty` in DPO) + - r_chosen and r_rejected are the rewards for chosen and rejected responses + + Returns: + tuple[torch.Tensor, dict]: A tuple containing: + - The preference loss value + - A dictionary with metrics including: + - loss: Preference loss + - accuracy: Fraction of examples where chosen response has higher reward + """ + + def __init__(self): + self.loss_type = LossType.SEQUENCE_LEVEL + + def split_output_tensor(self, tensor: Tensor) -> tuple[Tensor, Tensor]: + return tensor[::2], tensor[1::2] + + def _preference_loss( + self, + rewards: Tensor, + sample_mask: Tensor, + global_valid_seqs: Tensor, + beta: float = 1.0, + ) -> tuple[Tensor, Tensor, Tensor, Tensor]: + rewards_chosen, rewards_rejected = self.split_output_tensor(rewards) + rewards_delta = rewards_chosen - rewards_rejected + + per_sample_loss = ( + -torch.nn.functional.logsigmoid(beta * rewards_delta) * sample_mask[::2] + ) ## zero out invalid samples + + ## divide by 2 because each preference example corresponds to 2 samples (chosen, rejected) + return ( + masked_mean( + per_sample_loss, + sample_mask[::2], + global_normalization_factor=global_valid_seqs / 2, + ), + masked_mean( + rewards_chosen > rewards_rejected, + sample_mask[::2], + global_normalization_factor=global_valid_seqs / 2, + ), + masked_mean( + rewards_chosen, + sample_mask[::2], + global_normalization_factor=global_valid_seqs / 2, + ), + masked_mean( + rewards_rejected, + sample_mask[1::2], + global_normalization_factor=global_valid_seqs / 2, + ), + ) + + def __call__( + self, + rewards: Tensor, + data: BatchedDataDict[PreferenceLossDataDict], + global_valid_seqs: Tensor, + global_valid_toks: Tensor | None, + ) -> tuple[torch.Tensor, dict[str, Any]]: + sample_mask = data["sample_mask"] + + ( + preference_loss, + accuracy, + rewards_chosen_mean, + rewards_rejected_mean, + ) = self._preference_loss(rewards, sample_mask, global_valid_seqs) + + ## divide by 2 because we're summing over (chosen, rejected) pairs + num_valid_samples = sample_mask.sum() / 2 + + return preference_loss, { + "loss": preference_loss.item(), + "accuracy": accuracy.item(), + "rewards_chosen_mean": rewards_chosen_mean.item(), + "rewards_rejected_mean": rewards_rejected_mean.item(), + "num_valid_samples": num_valid_samples.item(), + } + + class DPOLossConfig(TypedDict): reference_policy_kl_penalty: float preference_loss_weight: float @@ -389,7 +490,7 @@ class DPOLossDataDict(TypedDict): sample_mask: torch.Tensor -class DPOLossFn(LossFunction): +class DPOLossFn(PreferenceLoss): """Direct Preference Optimization (DPO) loss function. This loss function implements the DPO algorithm as described in: @@ -455,9 +556,6 @@ def __init__(self, cfg: DPOLossConfig): self.loss_type = LossType.SEQUENCE_LEVEL - def split_output_tensor(self, tensor: Tensor) -> tuple[Tensor, Tensor]: - return tensor[::2], tensor[1::2] - def _preference_loss( self, next_token_logits: Tensor, @@ -505,38 +603,8 @@ def _preference_loss( if self.preference_average_log_probs: rewards = rewards / token_mask.sum(-1).clamp(min=1) - rewards_chosen, rewards_rejected = self.split_output_tensor(rewards) - rewards_delta = rewards_chosen - rewards_rejected - - per_sample_loss = ( - -torch.nn.functional.logsigmoid( - self.reference_policy_kl_penalty * rewards_delta - ) - * sample_mask[::2] - ) ## zero out invalid samples - - ## divide by 2 because each preference example corresponds to 2 samples (chosen, rejected) - return ( - masked_mean( - per_sample_loss, - sample_mask[::2], - global_normalization_factor=global_valid_seqs / 2, - ), - masked_mean( - rewards_chosen > rewards_rejected, - sample_mask[::2], - global_normalization_factor=global_valid_seqs / 2, - ), - masked_mean( - rewards_chosen, - sample_mask[::2], - global_normalization_factor=global_valid_seqs / 2, - ), - masked_mean( - rewards_rejected, - sample_mask[1::2], - global_normalization_factor=global_valid_seqs / 2, - ), + return super()._preference_loss( + rewards, sample_mask, global_valid_seqs, self.reference_policy_kl_penalty ) def __call__( diff --git a/nemo_rl/algorithms/sft.py b/nemo_rl/algorithms/sft.py index f988e7788b..1c5242526d 100644 --- a/nemo_rl/algorithms/sft.py +++ b/nemo_rl/algorithms/sft.py @@ -22,11 +22,17 @@ from transformers import AutoTokenizer from nemo_rl.algorithms.loss_functions import ( + LossFunction, NLLLoss, + PreferenceLoss, ) from nemo_rl.algorithms.utils import set_seed from nemo_rl.data import DataConfig -from nemo_rl.data.datasets import AllTaskProcessedDataset, rl_collate_fn +from nemo_rl.data.datasets import ( + AllTaskProcessedDataset, + preference_collate_fn, + rl_collate_fn, +) from nemo_rl.data.interfaces import TaskDataSpec from nemo_rl.data.llm_message_utils import ( add_loss_mask_to_message_log, @@ -80,6 +86,18 @@ class MasterConfig(TypedDict): checkpointing: CheckpointingConfig +class SFTValMetrics(TypedDict): + val_loss: float + + +class RMValMetrics(TypedDict): + val_loss: float + accuracy: float + rewards_chosen_mean: float + rewards_rejected_mean: float + num_valid_samples: float + + # ======================================================= # Setup & Initialization # ======================================================= @@ -93,7 +111,7 @@ def setup( RayVirtualCluster, StatefulDataLoader, StatefulDataLoader, - NLLLoss, + LossFunction, MasterConfig, Logger, TaskDataSpec, @@ -104,6 +122,19 @@ def setup( Returns: Tuple of policy, cluster, dataloader, tokenizer, loss_fn, math_env, master_config, logger """ + model_type = "reward" if "reward_model_type" in master_config["policy"] else "lm" + + if model_type == "lm": + collate_fn = rl_collate_fn + loss_fn_class = NLLLoss + elif model_type == "reward": + collate_fn = preference_collate_fn + loss_fn_class = PreferenceLoss + else: + raise NotImplementedError( + f"Model type {model_type} not implemented for SFT training." + ) + set_seed(master_config["sft"]["seed"]) # Extract individual configs for easier access @@ -147,7 +178,7 @@ def setup( train_dataset, batch_size=policy_config["train_global_batch_size"], shuffle=True, - collate_fn=rl_collate_fn, + collate_fn=collate_fn, drop_last=True, ) @@ -161,7 +192,7 @@ def setup( val_dataset, batch_size=sft_config["val_global_batch_size"], shuffle=False, - collate_fn=rl_collate_fn, + collate_fn=collate_fn, drop_last=True, ) @@ -196,7 +227,7 @@ def setup( init_optimizer=True, init_reference_model=False, ) - loss_fn = NLLLoss() + loss_fn = loss_fn_class() print(" āœ“ Model initialized") print("\n" + "=" * 60) @@ -230,6 +261,7 @@ def validate( val_batches: int, val_batch_size: int, val_mbs: int, + model_type: str, ): """Run validation on the validation dataset.""" if val_dataloader is None: @@ -244,7 +276,8 @@ def validate( # Show a progress indicator for validation # val_total = len(val_dataloader) - val_metrics = {"val_loss": 0.0} + list_of_val_metrics = [] + num_valid_batches = 0 policy.prepare_for_training() @@ -273,13 +306,29 @@ def validate( ) ## just run model fwd - val_results = policy.train( - val_data, - loss_fn, - eval_mode=True, - gbs=val_batch_size, - mbs=val_mbs, - ) + + if model_type == "lm": + val_results = policy.train( + val_data, + loss_fn, + eval_mode=True, + gbs=val_batch_size, + mbs=val_mbs, + ) + elif model_type == "reward": + val_results = policy.train( + val_data, + loss_fn, + eval_mode=True, + ## NOTE: we double the batch size here because each preference example corresponds to a pair of + ## examples, chosen and rejected, and the pair needs to be processed as part of the same microbatch. + gbs=val_batch_size * 2, + mbs=val_mbs * 2, + ) + else: + raise NotImplementedError( + f"Model type {model_type} not implemented for SFT training." + ) if len(val_results["all_mb_metrics"]) == 0: warnings.warn( @@ -287,19 +336,92 @@ def validate( " This is likely because there were no valid samples." ) else: - val_metrics["val_loss"] += float(val_results["loss"]) + if model_type == "lm": + list_of_val_metrics.append( + SFTValMetrics(val_loss=float(val_results["loss"])) + ) + elif model_type == "reward": + list_of_val_metrics.append( + RMValMetrics( + val_loss=sum(val_results["all_mb_metrics"]["loss"]), + accuracy=sum(val_results["all_mb_metrics"]["accuracy"]), + rewards_chosen_mean=sum( + val_results["all_mb_metrics"]["rewards_chosen_mean"] + ), + rewards_rejected_mean=sum( + val_results["all_mb_metrics"]["rewards_rejected_mean"] + ), + num_valid_samples=sum( + val_results["all_mb_metrics"]["num_valid_samples"] + ), + ) + ) + else: + raise NotImplementedError( + f"Model type {model_type} not implemented for SFT training." + ) + num_valid_batches += 1 if val_batches > 0 and batch_idx >= val_batches - 1: break if num_valid_batches > 0: - val_metrics["val_loss"] /= num_valid_batches + if model_type == "lm": + val_metrics = SFTValMetrics( + val_loss=sum([m["val_loss"] for m in list_of_val_metrics]) + ) + val_metrics["val_loss"] /= num_valid_batches + elif model_type == "reward": + sum_num_valid_samples = sum( + [m["num_valid_samples"] for m in list_of_val_metrics] + ) + val_metrics = RMValMetrics( + val_loss=sum( + [ + m["val_loss"] * m["num_valid_samples"] + for m in list_of_val_metrics + ] + ) + / sum_num_valid_samples, + accuracy=sum( + [ + m["accuracy"] * m["num_valid_samples"] + for m in list_of_val_metrics + ] + ) + / sum_num_valid_samples, + rewards_chosen_mean=sum( + [ + m["rewards_chosen_mean"] * m["num_valid_samples"] + for m in list_of_val_metrics + ] + ) + / sum_num_valid_samples, + rewards_rejected_mean=sum( + [ + m["rewards_rejected_mean"] * m["num_valid_samples"] + for m in list_of_val_metrics + ] + ) + / sum_num_valid_samples, + num_valid_samples=sum_num_valid_samples, + ) else: warnings.warn( "No validation metrics were collected." " This is likely because there were no valid samples in the validation set." ) + if model_type == "lm": + val_metrics = SFTValMetrics(val_loss=0.0) + elif model_type == "reward": + val_metrics = RMValMetrics( + val_loss=0.0, + accuracy=0.0, + rewards_chosen_mean=0.0, + rewards_rejected_mean=0.0, + num_valid_samples=0.0, + ) # Calculate validation metrics policy.prepare_for_training() @@ -313,6 +435,18 @@ def validate( print("\nšŸ“Š Validation Results:") print(f" • Validation loss: {val_metrics['val_loss']:.4f}") + if model_type == "reward": + print(f" • Validation accuracy: {val_metrics['accuracy']:.4f}") + print( + f" • Validation rewards chosen mean: {val_metrics['rewards_chosen_mean']:.4f}" + ) + print( + f" • Validation rewards rejected mean: {val_metrics['rewards_rejected_mean']:.4f}" + ) + print( + f" • Validation num valid samples: {val_metrics['num_valid_samples']:.0f}" + ) + # Print timing information print("\n ā±ļø Validation Timing:") validation_time = timing_metrics.get("total_validation_time", 0) @@ -355,6 +489,8 @@ def sft_train( val_at_start = sft_config["val_at_start"] max_num_epochs = sft_config["max_num_epochs"] + model_type = "reward" if "reward_model_type" in master_config["policy"] else "lm" + # Run validation at the start if configured if val_at_start and total_steps == 0: print("\nšŸ” Running initial validation...") @@ -369,6 +505,7 @@ def sft_train( val_batches=sft_config["val_batches"], val_batch_size=sft_config["val_global_batch_size"], val_mbs=sft_config["val_micro_batch_size"], + model_type=model_type, ) logger.log_metrics(val_metrics, total_steps, prefix="validation") @@ -376,15 +513,15 @@ def sft_train( policy.prepare_for_training() - while ( - current_epoch < max_num_epochs - and total_steps < master_config["sft"]["max_num_steps"] + while current_epoch < max_num_epochs and ( + master_config["sft"]["max_num_steps"] == -1 + or total_steps < master_config["sft"]["max_num_steps"] ): print(f"\n{'=' * 25} Epoch {current_epoch + 1}/{max_num_epochs} {'=' * 25}") for batch in train_dataloader: print( - f"\n{'=' * 25} Step {current_step + 1}/{min(len(train_dataloader), master_config['sft']['max_num_steps'])} {'=' * 25}" + f"\n{'=' * 25} Step {current_step + 1}/{min(len(train_dataloader), master_config['sft']['max_num_steps'] if master_config['sft']['max_num_steps'] != -1 else len(train_dataloader))} {'=' * 25}" ) maybe_gpu_profile_step(policy, total_steps + 1) val_metrics, validation_timings = None, None @@ -417,11 +554,28 @@ def sft_train( ) print("ā–¶ Taking a training step...") - train_results = policy.train(train_data, loss_fn) - is_last_step = total_steps + 1 >= master_config["sft"][ - "max_num_steps" - ] or ( + if model_type == "lm": + train_results = policy.train(train_data, loss_fn) + elif model_type == "reward": + train_results = policy.train( + train_data, + loss_fn, + eval_mode=False, + ## NOTE: we double the batch size here because each preference example corresponds to a pair of + ## examples, chosen and rejected, and the pair needs to be processed as part of the same microbatch. + gbs=master_config["policy"]["train_global_batch_size"] * 2, + mbs=master_config["policy"]["train_micro_batch_size"] * 2, + ) + else: + raise NotImplementedError( + f"Model type {model_type} not implemented for SFT training." + ) + + is_last_step = ( + master_config["sft"]["max_num_steps"] != -1 + and total_steps + 1 >= master_config["sft"]["max_num_steps"] + ) or ( current_epoch + 1 == max_num_epochs and current_step + 1 == len(train_dataloader) ) @@ -441,6 +595,7 @@ def sft_train( val_batches=sft_config["val_batches"], val_batch_size=sft_config["val_global_batch_size"], val_mbs=sft_config["val_micro_batch_size"], + model_type=model_type, ) logger.log_metrics( validation_timings, total_steps + 1, prefix="timing/validation" @@ -490,6 +645,7 @@ def sft_train( "loss": train_results["loss"].numpy(), "grad_norm": train_results["grad_norm"].numpy(), } + metrics.update(train_results["all_mb_metrics"]) for k, v in metrics.items(): if k in {"lr", "wd", "global_valid_seqs", "global_valid_toks"}: @@ -500,6 +656,19 @@ def sft_train( print("\nšŸ“Š Training Results:") print(f" • Loss: {float(metrics['loss']):.4f}") + + if model_type == "reward": + print(f" • Accuracy: {float(metrics['accuracy']):.4f}") + print( + f" • Rewards chosen mean: {float(metrics['rewards_chosen_mean']):.4f}" + ) + print( + f" • Rewards rejected mean: {float(metrics['rewards_rejected_mean']):.4f}" + ) + print( + f" • Num valid samples: {float(metrics['num_valid_samples']):.0f}" + ) + print("\nā±ļø Timing:") # Display total time first, separately total_time = timing_metrics.get("total_step_time", 0) @@ -520,7 +689,10 @@ def sft_train( current_step += 1 total_steps += 1 - if total_steps >= master_config["sft"]["max_num_steps"]: + if ( + master_config["sft"]["max_num_steps"] != -1 + and total_steps >= master_config["sft"]["max_num_steps"] + ): return current_epoch += 1 diff --git a/nemo_rl/data/datasets.py b/nemo_rl/data/datasets.py index d9427d29a1..647c16c937 100644 --- a/nemo_rl/data/datasets.py +++ b/nemo_rl/data/datasets.py @@ -197,16 +197,20 @@ def eval_collate_fn(data_batch: list[DatumSpec]) -> BatchedDataDict[Any]: return output -def dpo_collate_fn( +def preference_collate_fn( data_batch: list[DPODatumSpec], - tokenizer: TokenizerType, - make_sequence_length_divisible_by: int, ) -> BatchedDataDict[Any]: - """Collate function for DPO training. + """Collate function for preference data training. This function separates the chosen and rejected responses to create two examples per prompt. The chosen and rejected examples are interleaved along the batch dimension, resulting in a batch size of 2 * len(data_batch). + + Args: + data_batch: List of data samples with message_log_chosen, message_log_rejected, length_chosen, length_rejected, loss_multiplier, idx, and task_name fields. + + Returns: + BatchedDataDict with message_log, length, loss_multiplier, task_name, and idx fields. """ message_log = [] length = [] @@ -236,6 +240,26 @@ def dpo_collate_fn( batch_max_length=batch_max_length, ) + return batch + + +def dpo_collate_fn( + data_batch: list[DPODatumSpec], + tokenizer: TokenizerType, + make_sequence_length_divisible_by: int, +) -> BatchedDataDict[Any]: + """Collate function for DPO training. + + Args: + data_batch: List of data samples with message_log_chosen, message_log_rejected, length_chosen, length_rejected, loss_multiplier, idx, and task_name fields. + tokenizer: Tokenizer for text processing + make_sequence_length_divisible_by: Make the sequence length divisible by this value + + Returns: + BatchedDataDict with input_ids, input_lengths, token_mask, and sample_mask fields. + """ + batch = preference_collate_fn(data_batch) + ## add loss mask based on role to every message add_loss_mask_to_message_log( batch["message_log"], @@ -253,7 +277,7 @@ def dpo_collate_fn( "input_ids": cat_and_padded["token_ids"], "input_lengths": input_lengths, "token_mask": cat_and_padded["token_loss_mask"], - "sample_mask": loss_multiplier_batch, + "sample_mask": batch["loss_multiplier"], } ) diff --git a/nemo_rl/models/policy/dtensor_policy_worker.py b/nemo_rl/models/policy/dtensor_policy_worker.py index 6872250d10..6a27e3c998 100644 --- a/nemo_rl/models/policy/dtensor_policy_worker.py +++ b/nemo_rl/models/policy/dtensor_policy_worker.py @@ -35,7 +35,12 @@ from torch.distributed.tensor.experimental._attention import ( set_rotate_method, ) -from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer +from transformers import ( + AutoConfig, + AutoModelForCausalLM, + AutoModelForSequenceClassification, + AutoTokenizer, +) from transformers.integrations.accelerate import find_tied_parameters from transformers.models.gemma3.modeling_gemma3 import Gemma3ForCausalLM @@ -164,6 +169,8 @@ def __init__( else: raise ValueError(f"Unknown precision: {self.cfg['precision']}") + print(f"[Rank {self.rank}] Loading model {model_name} on CPU...") + model_config = AutoConfig.from_pretrained( model_name, # Always load the model in float32 to keep master weights in float32. @@ -175,15 +182,40 @@ def __init__( ), # due to https://github.com/huggingface/transformers/issues/38002 ) + if "reward_model_type" in self.cfg: + if self.cfg["reward_model_type"] == "bradley_terry": + model_class = AutoModelForSequenceClassification + if model_config.num_labels != 1: + # For Bradley-Terry reward models, the linear head has a single output. + # In the transformers library, the default setting for model_config.num_labels is 2 + # (https://github.com/huggingface/transformers/blob/v4.52.4/src/transformers/configuration_utils.py#L259). + # Since num_labels is used as the out_features for the linear head + # (https://github.com/huggingface/transformers/blob/v4.52.4/src/transformers/models/llama/modeling_llama.py#L738) + # if num_labels is not 1, we set it to 1. This change may trigger a warning that some weights are not initialized + # from the model checkpoint and are instead initialized using model_config.initializer_range + # (https://github.com/huggingface/transformers/blob/v4.52.4/src/transformers/models/llama/configuration_llama.py#L62). + print( + "model_config.num_labels is not 1. Setting it to 1 since this value is used as the out_features " + "for the linear head of Bradley-Terry reward models." + ) + model_config.num_labels = 1 + else: + raise ValueError( + f"Unknown reward model type: {self.cfg['reward_model_type']}" + ) + else: + model_class = AutoModelForCausalLM + full_state_dict = None if self.rank == 0: print(f"[Rank {self.rank}] Loading model {model_name} on CPU...") - model = AutoModelForCausalLM.from_pretrained( + model = model_class.from_pretrained( model_name, device_map="cpu", # load weights onto CPU initially trust_remote_code=True, config=model_config, ) + full_state_dict = model.state_dict() del model @@ -192,10 +224,20 @@ def __init__( # The actual weights will be broadcast from rank 0. with init_empty_weights(): - self.model = AutoModelForCausalLM.from_config( + self.model = model_class.from_config( model_config, ) + if self.model.config.pad_token_id is None: + if isinstance(self.model.config.eos_token_id, int): + self.model.config.pad_token_id = self.model.config.eos_token_id + elif isinstance(self.model.config.eos_token_id, list): + self.model.config.pad_token_id = self.model.config.eos_token_id[0] + else: + raise ValueError( + f"Unknown eos_token_id type: {type(self.model.config.eos_token_id)}" + ) + # caching since this property is not always preserved after FSDP self.num_tied_weights = len(find_tied_parameters(self.model)) self.skip_tie_check = os.environ.get( diff --git a/tests/functional/rm.sh b/tests/functional/rm.sh new file mode 100644 index 0000000000..88129c9744 --- /dev/null +++ b/tests/functional/rm.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +# clean up checkpoint directory on exit +trap "rm -rf /tmp/rm_checkpoints" EXIT + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd) +PROJECT_ROOT=$(realpath $SCRIPT_DIR/../..) +# Mark the current repo as safe, since wandb fetches metadata about the repo +git config --global --add safe.directory $PROJECT_ROOT + +set -eou pipefail + +EXP_NAME=$(basename $0 .sh) +EXP_DIR=$SCRIPT_DIR/$EXP_NAME +LOG_DIR=$EXP_DIR/logs +JSON_METRICS=$EXP_DIR/metrics.json +RUN_LOG=$EXP_DIR/run.log +export PYTHONPATH=${PROJECT_ROOT}:${PYTHONPATH:-} + +rm -rf $EXP_DIR $LOG_DIR +mkdir -p $EXP_DIR $LOG_DIR + +cd $PROJECT_ROOT +uv run $PROJECT_ROOT/examples/run_sft.py \ + --config examples/configs/rm.yaml \ + cluster.gpus_per_node=2 \ + sft.max_num_steps=3 \ + sft.val_batches=1 \ + sft.val_period=3 \ + logger.tensorboard_enabled=true \ + logger.log_dir=$LOG_DIR \ + logger.wandb_enabled=false \ + logger.monitor_gpus=true \ + checkpointing.enabled=true \ + checkpointing.save_period=3 \ + checkpointing.checkpoint_dir=/tmp/rm_checkpoints \ + $@ \ + 2>&1 | tee $RUN_LOG + +uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS + +uv run tests/check_metrics.py $JSON_METRICS \ + 'data["train/accuracy"]["3"] > 0.1' From 5b9e9768c98346ae608ab9aab452fc5179788809 Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Tue, 15 Jul 2025 10:03:55 -0700 Subject: [PATCH 02/47] update docs Signed-off-by: Julien Veron Vialard --- README.md | 48 +++++++++++++++++++++++++++++++++++++++++++++++ docs/guides/rm.md | 5 ++++- docs/index.md | 1 + 3 files changed, 53 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 3cd472c6ad..7356b63b32 100644 --- a/README.md +++ b/README.md @@ -17,6 +17,9 @@ - [DPO](#dpo) - [DPO Single Node](#dpo-single-node) - [DPO Multi-node](#dpo-multi-node) + - [RM](#rm) + - [RM Single Node](#rm-single-node) + - [RM Multi-node](#rm-multi-node) - [Evaluation](#evaluation) - [Convert Model Format (Optional)](#convert-model-format-optional) - [Run Evaluation](#run-evaluation) @@ -341,6 +344,51 @@ sbatch \ ray.sub ``` +## RM + +We provide a sample RM experiment that uses the [HelpSteer3 dataset](https://huggingface.co/datasets/nvidia/HelpSteer3) for preference-based training. + +### RM Single Node + +The default RM experiment is configured to run on a single GPU. To launch the experiment: + +```sh +uv run python examples/run_rm.py +``` + +This trains a RM based on `meta-llama/Llama-3.2-1B-Instruct` on one GPU. + +If you have access to more GPUs, you can update the experiment accordingly. To run on 8 GPUs, we update the cluster configuration: + +```sh +uv run python examples/run_rm.py cluster.gpus_per_node=8 +``` + +Refer to the [RM documentation](docs/guides/rm.md) for more information. + +### RM Multi-node + +For distributed RM training across multiple nodes, modify the following script for your use case: + +```sh +# Run from the root of NeMo RL repo +## number of nodes to use for your job +NUM_ACTOR_NODES=2 + +COMMAND="uv run ./examples/run_rm.py --config examples/configs/rm.yaml cluster.num_nodes=2 cluster.gpus_per_node=8 checkpointing.checkpoint_dir='results/rm_llama1b_2nodes' logger.wandb_enabled=True logger.wandb.name='rm-llama1b-2nodes'" \ +RAY_DEDUP_LOGS=0 \ +CONTAINER=YOUR_CONTAINER \ +MOUNTS="$PWD:$PWD" \ +sbatch \ + --nodes=${NUM_ACTOR_NODES} \ + --account=YOUR_ACCOUNT \ + --job-name=YOUR_JOBNAME \ + --partition=YOUR_PARTITION \ + --time=4:0:0 \ + --gres=gpu:8 \ + ray.sub +``` + ## Evaluation We provide evaluation tools to assess model capabilities. diff --git a/docs/guides/rm.md b/docs/guides/rm.md index 234555d60d..3d65bfb828 100644 --- a/docs/guides/rm.md +++ b/docs/guides/rm.md @@ -9,7 +9,10 @@ The script, [examples/run_sft.py](../../examples/run_sft.py), is used to train a Be sure to launch the job using `uv`. The command to launch a training job is as follows: ```bash -uv run examples/run_sft.py --config +uv run examples/run_rm.py --config examples/configs/rm.yaml + +# Can also add overrides on CLI, like changing the model +uv run examples/run_rm.py --config examples/configs/rm.yaml policy.model_name=Qwen/Qwen2.5-1.5B ``` The YAML config must be specified. It uses the same base template as the SFT config but includes a new `reward_model_type` key that triggers Reward Model training. An example RM config file can be found at [examples/configs/rm.yaml](../../examples/configs/rm.yaml). diff --git a/docs/index.md b/docs/index.md index f9252656e5..72bda9f36b 100644 --- a/docs/index.md +++ b/docs/index.md @@ -28,6 +28,7 @@ guides/sft.md guides/dpo.md guides/grpo.md guides/grpo-deepscaler.md +guides/rm.md guides/eval.md guides/deepseek.md model-quirks.md From 68e96ea21c054e80d854ff6a5be44110fa8d0c46 Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Tue, 15 Jul 2025 10:52:29 -0700 Subject: [PATCH 03/47] add separate run_rm.py and unit tests Signed-off-by: Julien Veron Vialard --- examples/run_rm.py | 212 +++++++++++++++++++++++++++++++ examples/run_sft.py | 153 ++++++---------------- tests/unit/algorithms/test_rm.py | 163 ++++++++++++++++++++++++ 3 files changed, 414 insertions(+), 114 deletions(-) create mode 100644 examples/run_rm.py create mode 100644 tests/unit/algorithms/test_rm.py diff --git a/examples/run_rm.py b/examples/run_rm.py new file mode 100644 index 0000000000..5c3abfaffe --- /dev/null +++ b/examples/run_rm.py @@ -0,0 +1,212 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import logging +import os +import pprint +from typing import Any + +from omegaconf import OmegaConf +from transformers import AutoTokenizer + +from nemo_rl.algorithms.sft import MasterConfig, setup, sft_train +from nemo_rl.algorithms.utils import get_tokenizer +from nemo_rl.data import DataConfig, hf_datasets +from nemo_rl.data.datasets import AllTaskProcessedDataset +from nemo_rl.data.interfaces import DatumSpec, TaskDataSpec +from nemo_rl.data.llm_message_utils import get_formatted_message_log +from nemo_rl.distributed.virtual_cluster import init_ray +from nemo_rl.utils.config import load_config, parse_hydra_overrides +from nemo_rl.utils.logger import get_next_experiment_dir + + +def parse_args(): + """Parse command line arguments.""" + parser = argparse.ArgumentParser(description="Run RM training with configuration") + parser.add_argument( + "--config", type=str, default=None, help="Path to YAML config file" + ) + + # Parse known args for the script + args, overrides = parser.parse_known_args() + + return args, overrides + + +# ======================================================= +# Data Processing +# ======================================================= +def rm_preprocessor( + datum_dict: dict[str, Any], + task_data_spec: TaskDataSpec, + tokenizer, + max_seq_length: int, + idx: int, +) -> DatumSpec: + """Process a datum dictionary for RM training.""" + messages_chosen = datum_dict["prompt"] + [ + {"role": "assistant", "content": datum_dict["chosen_response"]} + ] + messages_rejected = datum_dict["prompt"] + [ + {"role": "assistant", "content": datum_dict["rejected_response"]} + ] + + message_log_chosen = get_formatted_message_log( + messages_chosen, tokenizer, task_data_spec + ) + message_log_rejected = get_formatted_message_log( + messages_rejected, tokenizer, task_data_spec + ) + + length_chosen = sum(len(m["token_ids"]) for m in message_log_chosen) + length_rejected = sum(len(m["token_ids"]) for m in message_log_rejected) + + loss_multiplier = 1.0 + if max(length_chosen, length_rejected) > max_seq_length: + # make smaller and mask out + logging.warning( + f"Truncating chosen and rejected messages to {max_seq_length} tokens" + ) + for message in message_log_chosen: + message["token_ids"] = message["token_ids"][ + : min(4, max_seq_length // len(message_log_chosen)) + ] + for message in message_log_rejected: + message["token_ids"] = message["token_ids"][ + : min(4, max_seq_length // len(message_log_rejected)) + ] + loss_multiplier = 0.0 + + length_chosen = sum(len(m["token_ids"]) for m in message_log_chosen) + length_rejected = sum(len(m["token_ids"]) for m in message_log_rejected) + + # safeguard against edge case where there are too many turns to fit within the max length + assert max(length_chosen, length_rejected) <= max_seq_length + + output = { + "message_log_chosen": message_log_chosen, + "length_chosen": length_chosen, + "message_log_rejected": message_log_rejected, + "length_rejected": length_rejected, + "extra_env_info": None, + "loss_multiplier": loss_multiplier, + "idx": idx, + } + return output + + +def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig): + print("\nā–¶ Setting up data...") + data_cls = data_config["dataset_name"] + + if data_cls == "HelpSteer3": + data = hf_datasets.HelpSteer3Dataset() + else: + raise ValueError(f"Unknown dataset class: {data_cls}") + print( + f" āœ“ Training and validation datasets loaded with {len(data.formatted_ds['train'])} and {len(data.formatted_ds['validation'])} samples, respectively." + ) + + train_dataset = data.formatted_ds["train"] + val_dataset = data.formatted_ds["validation"] + sft_task_spec = data.task_spec + + train_dataset = AllTaskProcessedDataset( + train_dataset, + tokenizer, + sft_task_spec, + rm_preprocessor, + max_seq_length=data_config["max_input_seq_length"], + ) + + val_dataset = AllTaskProcessedDataset( + val_dataset, + tokenizer, + sft_task_spec, + rm_preprocessor, + max_seq_length=data_config["max_input_seq_length"], + ) + + return train_dataset, val_dataset, sft_task_spec + + +def main(): + """Main entry point.""" + # Parse arguments + args, overrides = parse_args() + + if not args.config: + args.config = os.path.join(os.path.dirname(__file__), "configs", "rm.yaml") + + config = load_config(args.config) + print(f"Loaded configuration from: {args.config}") + + if overrides: + print(f"Overrides: {overrides}") + config = parse_hydra_overrides(config, overrides) + + config: MasterConfig = OmegaConf.to_container(config, resolve=True) + print("Applied CLI overrides") + + # Print config + print("Final config:") + pprint.pprint(config) + + config["logger"]["log_dir"] = get_next_experiment_dir(config["logger"]["log_dir"]) + print(f"šŸ“Š Using log directory: {config['logger']['log_dir']}") + if config["checkpointing"]["enabled"]: + print( + f"šŸ“Š Using checkpoint directory: {config['checkpointing']['checkpoint_dir']}" + ) + + init_ray() + + # setup tokenizer + tokenizer = get_tokenizer(config["policy"]["tokenizer"]) + + # setup data + ( + dataset, + val_dataset, + sft_task_spec, + ) = setup_data(tokenizer, config["data"]) + + ( + policy, + cluster, + train_dataloader, + val_dataloader, + loss_fn, + logger, + checkpointer, + sft_save_state, + master_config, + ) = setup(config, tokenizer, dataset, val_dataset) + sft_train( + policy, + train_dataloader, + val_dataloader, + tokenizer, + loss_fn, + master_config, + logger, + sft_task_spec, + checkpointer, + sft_save_state, + ) + + +if __name__ == "__main__": + main() diff --git a/examples/run_sft.py b/examples/run_sft.py index fb1eb1aaee..ce5b258b0c 100644 --- a/examples/run_sft.py +++ b/examples/run_sft.py @@ -13,7 +13,6 @@ # limitations under the License. import argparse -import logging import os import pprint from functools import partial @@ -90,118 +89,36 @@ def sft_preprocessor( return output -def rm_preprocessor( - datum_dict: dict[str, Any], - task_data_spec: TaskDataSpec, - tokenizer, - max_seq_length: int, - idx: int, -) -> DatumSpec: - """Process a datum dictionary for RM training.""" - messages_chosen = datum_dict["prompt"] + [ - {"role": "assistant", "content": datum_dict["chosen_response"]} - ] - messages_rejected = datum_dict["prompt"] + [ - {"role": "assistant", "content": datum_dict["rejected_response"]} - ] - - message_log_chosen = get_formatted_message_log( - messages_chosen, tokenizer, task_data_spec - ) - message_log_rejected = get_formatted_message_log( - messages_rejected, tokenizer, task_data_spec - ) - - length_chosen = sum(len(m["token_ids"]) for m in message_log_chosen) - length_rejected = sum(len(m["token_ids"]) for m in message_log_rejected) - - loss_multiplier = 1.0 - if max(length_chosen, length_rejected) > max_seq_length: - # make smaller and mask out - logging.warning( - f"Truncating chosen and rejected messages to {max_seq_length} tokens" - ) - for message in message_log_chosen: - message["token_ids"] = message["token_ids"][ - : min(4, max_seq_length // len(message_log_chosen)) - ] - for message in message_log_rejected: - message["token_ids"] = message["token_ids"][ - : min(4, max_seq_length // len(message_log_rejected)) - ] - loss_multiplier = 0.0 - - length_chosen = sum(len(m["token_ids"]) for m in message_log_chosen) - length_rejected = sum(len(m["token_ids"]) for m in message_log_rejected) - - # safeguard against edge case where there are too many turns to fit within the max length - assert max(length_chosen, length_rejected) <= max_seq_length - - output = { - "message_log_chosen": message_log_chosen, - "length_chosen": length_chosen, - "message_log_rejected": message_log_rejected, - "length_rejected": length_rejected, - "extra_env_info": None, - "loss_multiplier": loss_multiplier, - "idx": idx, - } - return output - - -def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig, model_type: str): +def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig): print("\nā–¶ Setting up data...") data_cls = data_config["dataset_name"] - - if model_type == "lm": - data_preprocessor = partial( - sft_preprocessor, - add_bos=data_config["add_bos"], - add_eos=data_config["add_eos"], - add_generation_prompt=data_config["add_generation_prompt"], + if data_cls == "open_assistant": + data = hf_datasets.OasstDataset(output_dir="/tmp/open_assistant") + elif data_cls == "squad": + data = hf_datasets.SquadDataset() + elif data_cls == "prompt_response_dataset": + data = hf_datasets.PromptResponseDataset( + data_config["train_data_path"], + data_config["val_data_path"], + data_config["input_key"], + data_config["output_key"], + ) + elif data_cls == "openmathinstruct2": + data = hf_datasets.OpenMathInstruct2Dataset( + split=data_config["split"], + output_key=data_config["output_key"], + prompt_file=data_config["prompt_file"], + ) + elif data_cls == "openai_format": + data = hf_datasets.OpenAIFormatDataset( + data_config["train_data_path"], + data_config["val_data_path"], + data_config["chat_key"], + data_config["system_key"], + data_config["system_prompt"], ) - - if data_cls == "open_assistant": - data = hf_datasets.OasstDataset(output_dir="/tmp/open_assistant") - elif data_cls == "squad": - data = hf_datasets.SquadDataset() - elif data_cls == "prompt_response_dataset": - data = hf_datasets.PromptResponseDataset( - data_config["train_data_path"], - data_config["val_data_path"], - data_config["input_key"], - data_config["output_key"], - ) - elif data_cls == "openmathinstruct2": - data = hf_datasets.OpenMathInstruct2Dataset( - split=data_config["split"], - output_key=data_config["output_key"], - prompt_file=data_config["prompt_file"], - ) - elif data_cls == "openai_format": - data = hf_datasets.OpenAIFormatDataset( - data_config["train_data_path"], - data_config["val_data_path"], - data_config["chat_key"], - data_config["system_key"], - data_config["system_prompt"], - ) - else: - raise ValueError( - f"Unknown dataset class: {data_cls} for model_type: {model_type}" - ) - elif model_type == "reward": - data_preprocessor = rm_preprocessor - - if data_cls == "HelpSteer3": - data = hf_datasets.HelpSteer3Dataset() - else: - raise ValueError( - f"Unknown dataset class: {data_cls} for model_type: {model_type}" - ) else: - raise ValueError(f"Unknown model type: {model_type}") - + raise ValueError(f"Unknown dataset class: {data_cls}") print( f" āœ“ Training and validation datasets loaded with {len(data.formatted_ds['train'])} and {len(data.formatted_ds['validation'])} samples, respectively." ) @@ -214,7 +131,12 @@ def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig, model_type: st train_dataset, tokenizer, sft_task_spec, - data_preprocessor, + partial( + sft_preprocessor, + add_bos=data_config["add_bos"], + add_eos=data_config["add_eos"], + add_generation_prompt=data_config["add_generation_prompt"], + ), max_seq_length=data_config["max_input_seq_length"], ) @@ -222,7 +144,12 @@ def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig, model_type: st val_dataset, tokenizer, sft_task_spec, - data_preprocessor, + partial( + sft_preprocessor, + add_bos=data_config.get("add_bos", True), + add_eos=data_config.get("add_eos", True), + add_generation_prompt=data_config["add_generation_prompt"], + ), max_seq_length=data_config["max_input_seq_length"], ) @@ -251,8 +178,6 @@ def main(): print("Final config:") pprint.pprint(config) - model_type = "reward" if "reward_model_type" in config["policy"] else "lm" - config["logger"]["log_dir"] = get_next_experiment_dir(config["logger"]["log_dir"]) print(f"šŸ“Š Using log directory: {config['logger']['log_dir']}") if config["checkpointing"]["enabled"]: @@ -270,7 +195,7 @@ def main(): dataset, val_dataset, sft_task_spec, - ) = setup_data(tokenizer, config["data"], model_type) + ) = setup_data(tokenizer, config["data"]) ( policy, diff --git a/tests/unit/algorithms/test_rm.py b/tests/unit/algorithms/test_rm.py new file mode 100644 index 0000000000..0f1f8af059 --- /dev/null +++ b/tests/unit/algorithms/test_rm.py @@ -0,0 +1,163 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from unittest.mock import MagicMock + +import pytest +import torch +from torchdata.stateful_dataloader import StatefulDataLoader + +from nemo_rl.algorithms.loss_functions import PreferenceLoss +from nemo_rl.algorithms.sft import _default_sft_save_state, sft_train + + +@pytest.fixture +def mock_components(): + # Create mock components + policy = MagicMock() + policy.train.return_value = { + "loss": torch.tensor(0.5), + "grad_norm": torch.tensor(1.0), + "all_mb_metrics": { + "loss": [0.5], + "accuracy": [1.0], + "rewards_chosen_mean": [4.5], + "rewards_rejected_mean": [3.5], + "num_valid_samples": [1.0], + }, + } + + # Create a proper message log structure with token_ids + mock_batch = { + "message_log": [ + [ # chosen + {"role": "user", "token_ids": torch.tensor([1, 2, 3])}, + {"role": "assistant", "token_ids": torch.tensor([4, 5, 6])}, + ], + [ # rejected + {"role": "user", "token_ids": torch.tensor([1, 2, 3])}, + {"role": "assistant", "token_ids": torch.tensor([7, 8, 9, 10, 11])}, + ], + ], + "length": torch.tensor([6, 8]), + "loss_multiplier": torch.tensor([1.0, 1.0]), + } + + # Create mock dataloader with 10 batches that can be iterated multiple times + train_dataloader = MagicMock(spec=StatefulDataLoader) + + def train_iter(self): + return iter([mock_batch] * 10) + + train_dataloader.__iter__ = train_iter + train_dataloader.__len__ = MagicMock(return_value=10) + + val_dataloader = MagicMock(spec=StatefulDataLoader) + + def val_iter(self): + return iter([mock_batch] * 10) + + val_dataloader.__iter__ = val_iter + val_dataloader.__len__ = MagicMock(return_value=10) + + tokenizer = MagicMock() + tokenizer.pad_token_id = 0 + + loss_fn = PreferenceLoss() + logger = MagicMock() + checkpointer = MagicMock() + sft_task_spec = MagicMock() + + # Create mock master config + master_config = { + "sft": { + "max_num_steps": 5, + "max_num_epochs": 2, + "val_period": 100, + "val_batches": 1, + "val_global_batch_size": 1, + "val_micro_batch_size": 1, + "val_at_start": False, + }, + "policy": { + "train_global_batch_size": 1, + "make_sequence_length_divisible_by": 1, + "reward_model_type": "bradley_terry", + "train_micro_batch_size": 1, + }, + "checkpointing": {"enabled": False}, + } + + return { + "policy": policy, + "train_dataloader": train_dataloader, + "val_dataloader": val_dataloader, + "tokenizer": tokenizer, + "loss_fn": loss_fn, + "logger": logger, + "checkpointer": checkpointer, + "sft_task_spec": sft_task_spec, + "master_config": master_config, + } + + +def test_exit_on_max_steps(mock_components): + """Test that training loop exits when max_num_steps is reached""" + # Set max steps to 12, which is less than len(train_dataloader) * max_num_epochs + mock_components["master_config"]["sft"]["max_num_steps"] = 12 + + sft_save_state = _default_sft_save_state() + + # Run training + sft_train( + mock_components["policy"], + mock_components["train_dataloader"], + mock_components["val_dataloader"], + mock_components["tokenizer"], + mock_components["loss_fn"], + mock_components["master_config"], + mock_components["logger"], + mock_components["sft_task_spec"], + mock_components["checkpointer"], + sft_save_state, + ) + + # Verify we only trained for 12 steps. + assert mock_components["policy"].train.call_count == 12 + + +def test_exit_on_max_epochs(mock_components): + """Test that training loop exits when max_num_epochs is reached""" + # Set max epochs to 2 and max steps to a large number + mock_components["master_config"]["sft"]["max_num_epochs"] = 2 + mock_components["master_config"]["sft"]["max_num_steps"] = 100 + + sft_save_state = _default_sft_save_state() + + # Run training + sft_train( + mock_components["policy"], + mock_components["train_dataloader"], + mock_components["val_dataloader"], + mock_components["tokenizer"], + mock_components["loss_fn"], + mock_components["master_config"], + mock_components["logger"], + mock_components["sft_task_spec"], + mock_components["checkpointer"], + sft_save_state, + ) + + # Verify we trained for exactly two epochs (20 batches). + assert mock_components["policy"].train.call_count == 20 From 21d67a0d9e8a222ec2f7092e7c6f07b89620eed7 Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Tue, 15 Jul 2025 11:39:28 -0700 Subject: [PATCH 04/47] fix small typos and nit changes Signed-off-by: Julien Veron Vialard --- docs/guides/rm.md | 2 +- nemo_rl/algorithms/sft.py | 1 - nemo_rl/models/policy/dtensor_policy_worker.py | 3 --- tests/functional/rm.sh | 2 +- 4 files changed, 2 insertions(+), 6 deletions(-) diff --git a/docs/guides/rm.md b/docs/guides/rm.md index 3d65bfb828..7accacb2fb 100644 --- a/docs/guides/rm.md +++ b/docs/guides/rm.md @@ -4,7 +4,7 @@ This document explains how to train reward models (RM) within NeMo RL. Currently ## Launch a Training Job -The script, [examples/run_sft.py](../../examples/run_sft.py), is used to train a Bradley-Terry reward model. This script can be launched either locally or via Slurm. For details on how to set up Ray and launch a job using Slurm, refer to the [cluster documentation](../cluster.md). +The script, [examples/run_rm.py](../../examples/run_rm.py), is used to train a Bradley-Terry reward model. This script can be launched either locally or via Slurm. For details on how to set up Ray and launch a job using Slurm, refer to the [cluster documentation](../cluster.md). Be sure to launch the job using `uv`. The command to launch a training job is as follows: diff --git a/nemo_rl/algorithms/sft.py b/nemo_rl/algorithms/sft.py index fbbec48809..012f3501de 100644 --- a/nemo_rl/algorithms/sft.py +++ b/nemo_rl/algorithms/sft.py @@ -647,7 +647,6 @@ def sft_train( "loss": train_results["loss"].numpy(), "grad_norm": train_results["grad_norm"].numpy(), } - metrics.update(train_results["all_mb_metrics"]) for k, v in metrics.items(): if k in {"lr", "wd", "global_valid_seqs", "global_valid_toks"}: diff --git a/nemo_rl/models/policy/dtensor_policy_worker.py b/nemo_rl/models/policy/dtensor_policy_worker.py index 27131ce57a..a6c4fa8274 100644 --- a/nemo_rl/models/policy/dtensor_policy_worker.py +++ b/nemo_rl/models/policy/dtensor_policy_worker.py @@ -175,8 +175,6 @@ def __init__( else: raise ValueError(f"Unknown precision: {self.cfg['precision']}") - print(f"[Rank {self.rank}] Loading model {model_name} on CPU...") - model_config = AutoConfig.from_pretrained( model_name, # Always load the model in float32 to keep master weights in float32. @@ -221,7 +219,6 @@ def __init__( trust_remote_code=True, config=model_config, ) - full_state_dict = model.state_dict() del model diff --git a/tests/functional/rm.sh b/tests/functional/rm.sh index 88129c9744..b7d23e7132 100644 --- a/tests/functional/rm.sh +++ b/tests/functional/rm.sh @@ -21,7 +21,7 @@ rm -rf $EXP_DIR $LOG_DIR mkdir -p $EXP_DIR $LOG_DIR cd $PROJECT_ROOT -uv run $PROJECT_ROOT/examples/run_sft.py \ +uv run $PROJECT_ROOT/examples/run_rm.py \ --config examples/configs/rm.yaml \ cluster.gpus_per_node=2 \ sft.max_num_steps=3 \ From 0aff4508dd5dc4a9b18a125d070f33f3197f3c9a Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Tue, 15 Jul 2025 15:43:26 -0700 Subject: [PATCH 05/47] adding generic preference dataset class and support for multiple validation preference datasets Signed-off-by: Julien Veron Vialard --- examples/configs/rm.yaml | 2 + examples/run_rm.py | 91 +++++++++++++++---- nemo_rl/algorithms/sft.py | 65 +++++++++---- nemo_rl/data/hf_datasets/__init__.py | 2 + .../data/hf_datasets/preference_dataset.py | 44 +++++++++ 5 files changed, 168 insertions(+), 36 deletions(-) create mode 100644 nemo_rl/data/hf_datasets/preference_dataset.py diff --git a/examples/configs/rm.yaml b/examples/configs/rm.yaml index c01d78c781..f6e0106c03 100644 --- a/examples/configs/rm.yaml +++ b/examples/configs/rm.yaml @@ -120,6 +120,8 @@ policy: data: max_input_seq_length: ${policy.max_total_sequence_length} dataset_name: "HelpSteer3" + # You can optionally specify a list of validation preference datasets: + # validation_dataset_name: ["PreferenceData::"] logger: log_dir: "logs" # Base directory for all logs diff --git a/examples/run_rm.py b/examples/run_rm.py index 5c3abfaffe..bd7a32a420 100644 --- a/examples/run_rm.py +++ b/examples/run_rm.py @@ -56,12 +56,27 @@ def rm_preprocessor( idx: int, ) -> DatumSpec: """Process a datum dictionary for RM training.""" - messages_chosen = datum_dict["prompt"] + [ - {"role": "assistant", "content": datum_dict["chosen_response"]} - ] - messages_rejected = datum_dict["prompt"] + [ - {"role": "assistant", "content": datum_dict["rejected_response"]} - ] + # Generic preference dataset format + if task_data_spec.task_name == "PreferenceData": + assert len(datum_dict["completions"]) == 2 # Currently only supporting 2 responses + if datum_dict["completions"][0]["rank"] < datum_dict["completions"][1]["rank"]: + chosen_completion = datum_dict["completions"][0] + rejected_completion = datum_dict["completions"][1] + else: + chosen_completion = datum_dict["completions"][1] + rejected_completion = datum_dict["completions"][0] + messages_chosen = datum_dict["context"] + chosen_completion["completion"] + messages_rejected = datum_dict["context"] + rejected_completion["completion"] + # Legacy dataset format + elif task_data_spec.task_name == "HelpSteer3": + messages_chosen = datum_dict["prompt"] + [ + {"role": "assistant", "content": datum_dict["chosen_response"]} + ] + messages_rejected = datum_dict["prompt"] + [ + {"role": "assistant", "content": datum_dict["rejected_response"]} + ] + else: + raise ValueError(f"Unknown task name: {task_data_spec.task_name}") message_log_chosen = get_formatted_message_log( messages_chosen, tokenizer, task_data_spec @@ -111,16 +126,26 @@ def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig): print("\nā–¶ Setting up data...") data_cls = data_config["dataset_name"] - if data_cls == "HelpSteer3": + # Generic preference dataset format + if data_cls.startswith("PreferenceData:"): + _, _, data_path = data_cls.split(":") + data = hf_datasets.PreferenceDataset(data_path) + train_dataset = data.formatted_ds["local"] + val_dataset = None + print( + f" āœ“ Training dataset loaded with {len(data.formatted_ds['local'])} samples." + ) + # Legacy dataset format + elif data_cls == "HelpSteer3": data = hf_datasets.HelpSteer3Dataset() + train_dataset = data.formatted_ds["train"] + val_dataset = data.formatted_ds["validation"] + print( + f" āœ“ Training and validation datasets loaded with {len(data.formatted_ds['train'])} and {len(data.formatted_ds['validation'])} samples, respectively." + ) else: raise ValueError(f"Unknown dataset class: {data_cls}") - print( - f" āœ“ Training and validation datasets loaded with {len(data.formatted_ds['train'])} and {len(data.formatted_ds['validation'])} samples, respectively." - ) - train_dataset = data.formatted_ds["train"] - val_dataset = data.formatted_ds["validation"] sft_task_spec = data.task_spec train_dataset = AllTaskProcessedDataset( @@ -131,13 +156,41 @@ def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig): max_seq_length=data_config["max_input_seq_length"], ) - val_dataset = AllTaskProcessedDataset( - val_dataset, - tokenizer, - sft_task_spec, - rm_preprocessor, - max_seq_length=data_config["max_input_seq_length"], - ) + if "validation_dataset_name" in data_config: + # Only supported for generic preference dataset format + if isinstance(data_config["validation_dataset_name"], list): + val_dataset = { + "validation": AllTaskProcessedDataset( + val_dataset, + tokenizer, + sft_task_spec, + rm_preprocessor, + max_seq_length=data_config["max_input_seq_length"], + ) + } if val_dataset else {} + for val_data_cls in data_config["validation_dataset_name"]: + assert val_data_cls.startswith("PreferenceData:") + _, val_dataset_name, val_data_path = val_data_cls.split(":") + val_data = hf_datasets.PreferenceDataset(val_data_path) + val_dataset.update({ + val_dataset_name: AllTaskProcessedDataset( + val_data.formatted_ds["local"], + tokenizer, + val_data.task_spec, + rm_preprocessor, + max_seq_length=data_config["max_input_seq_length"], + ) + }) + else: + raise ValueError(f"Invalid type for validation_dataset_name: {type(data_config['validation_dataset_name'])}") + else: + val_dataset = AllTaskProcessedDataset( + val_dataset, + tokenizer, + sft_task_spec, + rm_preprocessor, + max_seq_length=data_config["max_input_seq_length"], + ) return train_dataset, val_dataset, sft_task_spec diff --git a/nemo_rl/algorithms/sft.py b/nemo_rl/algorithms/sft.py index 012f3501de..bf1058654f 100644 --- a/nemo_rl/algorithms/sft.py +++ b/nemo_rl/algorithms/sft.py @@ -176,13 +176,24 @@ def setup( ) train_dataloader.load_state_dict(dataloader_state_dict) - val_dataloader = StatefulDataLoader( - val_dataset, - batch_size=sft_config["val_global_batch_size"], - shuffle=False, - collate_fn=collate_fn, - drop_last=True, - ) + if isinstance(val_dataset, dict): + val_dataloader = { + k: StatefulDataLoader( + v, + batch_size=sft_config["val_global_batch_size"], + shuffle=False, + collate_fn=collate_fn, + drop_last=True, + ) for k, v in val_dataset.items() + } + else: + val_dataloader = StatefulDataLoader( + val_dataset, + batch_size=sft_config["val_global_batch_size"], + shuffle=False, + collate_fn=collate_fn, + drop_last=True, + ) # ========================== # Cluster @@ -250,8 +261,35 @@ def validate( val_batch_size: int, val_mbs: int, model_type: str, + logger: Logger, +): + if isinstance(val_dataloader, dict): + for k, v in val_dataloader.items(): + k_val_metrics, k_validation_timings = validate_one_dataset(policy, v, tokenizer, loss_fn, step, master_config, sft_task_spec, val_batches, val_batch_size, val_mbs, model_type) + logger.log_metrics(k_val_metrics, step, prefix=f"validation-{k}") + logger.log_metrics(k_validation_timings, step, prefix=f"timing/validation-{k}") + else: + val_metrics, validation_timings = validate_one_dataset(policy, val_dataloader, tokenizer, loss_fn, step, master_config, sft_task_spec, val_batches, val_batch_size, val_mbs, model_type) + logger.log_metrics(val_metrics, step, prefix="validation") + logger.log_metrics(validation_timings, step, prefix="timing/validation") + + return None, None + + +def validate_one_dataset( + policy: PolicyInterface, + val_dataloader: StatefulDataLoader, + tokenizer, + loss_fn, + step: int, + master_config: MasterConfig, + sft_task_spec: TaskDataSpec, + val_batches: int, + val_batch_size: int, + val_mbs: int, + model_type: str, ): - """Run validation on the validation dataset.""" + """Run validation on one validation dataset.""" if val_dataloader is None: print(" āš ļø No validation dataloader provided, skipping validation") return @@ -494,11 +532,9 @@ def sft_train( val_batch_size=sft_config["val_global_batch_size"], val_mbs=sft_config["val_micro_batch_size"], model_type=model_type, + logger=logger, ) - logger.log_metrics(val_metrics, total_steps, prefix="validation") - logger.log_metrics(validation_timings, total_steps, prefix="timing/validation") - policy.prepare_for_training() while current_epoch < max_num_epochs and ( @@ -582,12 +618,7 @@ def sft_train( val_batch_size=sft_config["val_global_batch_size"], val_mbs=sft_config["val_micro_batch_size"], model_type=model_type, - ) - logger.log_metrics( - validation_timings, total_steps + 1, prefix="timing/validation" - ) - logger.log_metrics( - val_metrics, total_steps + 1, prefix="validation" + logger=logger, ) ## Checkpointing diff --git a/nemo_rl/data/hf_datasets/__init__.py b/nemo_rl/data/hf_datasets/__init__.py index aa5596397c..107769494f 100644 --- a/nemo_rl/data/hf_datasets/__init__.py +++ b/nemo_rl/data/hf_datasets/__init__.py @@ -18,6 +18,7 @@ from nemo_rl.data.hf_datasets.oai_format_dataset import OpenAIFormatDataset from nemo_rl.data.hf_datasets.oasst import OasstDataset from nemo_rl.data.hf_datasets.openmathinstruct2 import OpenMathInstruct2Dataset +from nemo_rl.data.hf_datasets.preference_dataset import PreferenceDataset from nemo_rl.data.hf_datasets.prompt_response_dataset import ( PromptResponseDataset, ) @@ -29,6 +30,7 @@ "OasstDataset", "OpenAIFormatDataset", "OpenMathInstruct2Dataset", + "PreferenceDataset", "PromptResponseDataset", "SquadDataset", "COMMON_CHAT_TEMPLATES", diff --git a/nemo_rl/data/hf_datasets/preference_dataset.py b/nemo_rl/data/hf_datasets/preference_dataset.py new file mode 100644 index 0000000000..3b96151c0c --- /dev/null +++ b/nemo_rl/data/hf_datasets/preference_dataset.py @@ -0,0 +1,44 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any + +from absl import logging +from collections import defaultdict +from datasets import Dataset, DatasetDict, load_dataset + +from nemo_rl.data.interfaces import TaskDataSpec + + +class PreferenceDataset: + """Preference dataset. + + This class handles loading of preference data. + The input JSON files should contain examples with the following structure: + { + "context": list of dicts, # The input message + "completions": list of dicts, # The list of completions + { + "rank": int, # The rank of the completion (lowest is preferred) + "label": float, # The label of the completion + "completion": list of dicts, # The completion message + } + } + """ + + def __init__(self, dataset_path: str) -> None: + self.formatted_ds = DatasetDict({"local": load_dataset("json", data_files=dataset_path, split="train")}) + + self.task_spec = TaskDataSpec( + task_name="PreferenceData", + ) \ No newline at end of file From 8a28af76f7431947c062e61e3028aae330449228 Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Tue, 15 Jul 2025 16:01:08 -0700 Subject: [PATCH 06/47] rewards tensor shape Signed-off-by: Julien Veron Vialard --- nemo_rl/algorithms/loss_functions.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nemo_rl/algorithms/loss_functions.py b/nemo_rl/algorithms/loss_functions.py index 6e48babda8..4e46a8a85c 100644 --- a/nemo_rl/algorithms/loss_functions.py +++ b/nemo_rl/algorithms/loss_functions.py @@ -406,6 +406,7 @@ def __init__(self): self.loss_type = LossType.SEQUENCE_LEVEL def split_output_tensor(self, tensor: Tensor) -> tuple[Tensor, Tensor]: + # tensor is of shape (2*micro_batch_size,) return tensor[::2], tensor[1::2] def _preference_loss( @@ -455,6 +456,8 @@ def __call__( ) -> tuple[torch.Tensor, dict[str, Any]]: sample_mask = data["sample_mask"] + rewards = rewards.squeeze(-1) + ( preference_loss, accuracy, From 7de3b93404bbbc4f3863c652ddeec0097037e6fe Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Tue, 15 Jul 2025 16:16:41 -0700 Subject: [PATCH 07/47] adding unit tests Signed-off-by: Julien Veron Vialard --- .../data/hf_datasets/preference_dataset.py | 1 - .../hf_datasets/test_preference_dataset.py | 117 ++++++++++++++++++ 2 files changed, 117 insertions(+), 1 deletion(-) create mode 100644 tests/unit/data/hf_datasets/test_preference_dataset.py diff --git a/nemo_rl/data/hf_datasets/preference_dataset.py b/nemo_rl/data/hf_datasets/preference_dataset.py index 3b96151c0c..962867f613 100644 --- a/nemo_rl/data/hf_datasets/preference_dataset.py +++ b/nemo_rl/data/hf_datasets/preference_dataset.py @@ -30,7 +30,6 @@ class PreferenceDataset: "completions": list of dicts, # The list of completions { "rank": int, # The rank of the completion (lowest is preferred) - "label": float, # The label of the completion "completion": list of dicts, # The completion message } } diff --git a/tests/unit/data/hf_datasets/test_preference_dataset.py b/tests/unit/data/hf_datasets/test_preference_dataset.py new file mode 100644 index 0000000000..e2f04de379 --- /dev/null +++ b/tests/unit/data/hf_datasets/test_preference_dataset.py @@ -0,0 +1,117 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +import tempfile + +import pytest + +from nemo_rl.data.hf_datasets.preference_dataset import PreferenceDataset + + +@pytest.fixture +def mock_preference_data(): + """Create temporary preference dataset files with sample data.""" + preference_data = [ + { + "context": [ + {"role": "user", "content": "What is 2+2?"} + ], + "completions": [ + { + "rank": 1, + "completion": [ + {"role": "assistant", "content": "The answer is 4."} + ] + }, + { + "rank": 2, + "completion": [ + {"role": "assistant", "content": "I don't know."} + ] + } + ] + }, + { + "context": [ + {"role": "user", "content": "What is the capital of France?"} + ], + "completions": [ + { + "rank": 1, + "completion": [ + {"role": "assistant", "content": "The capital of France is Paris."} + ] + }, + { + "rank": 2, + "completion": [ + {"role": "assistant", "content": "The capital of France is London."} + ] + }, + ] + } + ] + + with tempfile.NamedTemporaryFile( + mode="w", suffix=".json", delete=False + ) as preference_file: + json.dump(preference_data, preference_file) + preference_path = preference_file.name + + yield preference_path + # Cleanup + os.unlink(preference_path) + + +def test_preference_dataset_initialization(mock_preference_data): + """Test that PreferenceDataset initializes correctly with valid data files.""" + preference_path = mock_preference_data + + dataset = PreferenceDataset(dataset_path=preference_path) + + # Verify dataset initialization + assert dataset.task_spec.task_name == "PreferenceData" + + # Verify formatted_ds structure + assert "local" in dataset.formatted_ds + assert len(dataset.formatted_ds["local"]) == 2 + + +def test_preference_dataset_data_format(mock_preference_data): + """Test that PreferenceDataset correctly loads and formats the data.""" + preference_path = mock_preference_data + dataset = PreferenceDataset(dataset_path=preference_path) + + # Verify data format + sample = dataset.formatted_ds["local"][0] + assert "context" in sample + assert "completions" in sample + + # Verify context structure + assert isinstance(sample["context"], list) + assert len(sample["context"]) == 1 + assert "role" in sample["context"][0] + assert "content" in sample["context"][0] + + # Verify completions structure + assert isinstance(sample["completions"], list) + assert len(sample["completions"]) == 2 + + for completion in sample["completions"]: + assert "rank" in completion + assert "completion" in completion + assert isinstance(completion["rank"], int) + assert isinstance(completion["completion"], list) From 63dd1f3ae3ad04f0d9cc3c2d7e0b6d92b1e12d7d Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Tue, 15 Jul 2025 16:24:49 -0700 Subject: [PATCH 08/47] updating docs Signed-off-by: Julien Veron Vialard --- docs/guides/rm.md | 29 +++++++++++++++++++ .../data/hf_datasets/preference_dataset.py | 2 +- 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/docs/guides/rm.md b/docs/guides/rm.md index 7accacb2fb..ec77434a0d 100644 --- a/docs/guides/rm.md +++ b/docs/guides/rm.md @@ -22,3 +22,32 @@ The YAML config must be specified. It uses the same base template as the SFT con ## Datasets By default, NeMo RL supports the `HelpSteer3` dataset. This dataset is downloaded from Hugging Face and preprocessed on-the-fly, so there's no need to provide a path to any datasets on disk. + +You can also use custom preference datasets by configuring as follows: +``` +data: + dataset_name: "PreferenceData::" + validation_dataset_name: ["PreferenceData::"] +``` + +Each custom preference dataset should be a JSON file formatted as: +``` +{ + "context": list of dicts, # The input message + "completions": list of dicts, # The list of completions + { + "rank": int, # The rank of the completion (lower rank is preferred) + "completion": list of dicts, # The completion message + } +} +``` + +NeMo RL supports using multiple custom validation preference datasets during RM training. +``` +data: + dataset_name: "PreferenceData::" + validation_dataset_name: [ + "PreferenceData::", + "PreferenceData::", + ] +``` \ No newline at end of file diff --git a/nemo_rl/data/hf_datasets/preference_dataset.py b/nemo_rl/data/hf_datasets/preference_dataset.py index 962867f613..b1c1dc08ea 100644 --- a/nemo_rl/data/hf_datasets/preference_dataset.py +++ b/nemo_rl/data/hf_datasets/preference_dataset.py @@ -29,7 +29,7 @@ class PreferenceDataset: "context": list of dicts, # The input message "completions": list of dicts, # The list of completions { - "rank": int, # The rank of the completion (lowest is preferred) + "rank": int, # The rank of the completion (lower rank is preferred) "completion": list of dicts, # The completion message } } From 8fb280b65f5aee006d83a0cc242c3a319970c2f0 Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Wed, 16 Jul 2025 10:48:04 -0700 Subject: [PATCH 09/47] update config and skip is_tied_lm_head for RM Signed-off-by: Julien Veron Vialard --- examples/configs/rm.yaml | 5 ++--- examples/run_rm.py | 3 +++ nemo_rl/models/policy/dtensor_policy_worker.py | 2 +- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/examples/configs/rm.yaml b/examples/configs/rm.yaml index c01d78c781..75d9355c0d 100644 --- a/examples/configs/rm.yaml +++ b/examples/configs/rm.yaml @@ -1,6 +1,5 @@ # Bradley-Terry (BT) Reward Model Training Configuration -# (uses same base template as the SFT config but includes a new `reward_model_type` key that triggers Reward Model training) -sft: +rm: ## total number of steps to train will equal ## min((max_num_epochs * len(train_dataloader)), max_num_steps) max_num_epochs: 1 @@ -19,7 +18,7 @@ checkpointing: metric_name: "val_loss" higher_is_better: false keep_top_k: 3 - save_period: ${sft.val_period} + save_period: ${rm.val_period} policy: model_name: "meta-llama/Llama-3.2-1B-Instruct" diff --git a/examples/run_rm.py b/examples/run_rm.py index 5c3abfaffe..5a232c5aa0 100644 --- a/examples/run_rm.py +++ b/examples/run_rm.py @@ -153,6 +153,9 @@ def main(): config = load_config(args.config) print(f"Loaded configuration from: {args.config}") + # Uses the same base template as the SFT config but includes a new `reward_model_type` key that triggers Reward Model training + config.sft = "${.rm}" + if overrides: print(f"Overrides: {overrides}") config = parse_hydra_overrides(config, overrides) diff --git a/nemo_rl/models/policy/dtensor_policy_worker.py b/nemo_rl/models/policy/dtensor_policy_worker.py index 485a5330bc..8cf69947c1 100644 --- a/nemo_rl/models/policy/dtensor_policy_worker.py +++ b/nemo_rl/models/policy/dtensor_policy_worker.py @@ -318,7 +318,7 @@ def __init__( embed_tokens_weight = param break - if embed_tokens_weight is not None: + if embed_tokens_weight is not None and hasattr(self.model, "lm_head"): self.model.lm_head.weight = embed_tokens_weight # Manually broadcast buffers From 3e3b03a21674739862e4569ecf86eb0407c66213 Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Wed, 16 Jul 2025 11:20:32 -0700 Subject: [PATCH 10/47] use tokenizer.pad_token_id if model.config.pad_token_id is not defined Signed-off-by: Julien Veron Vialard --- nemo_rl/models/policy/dtensor_policy_worker.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/nemo_rl/models/policy/dtensor_policy_worker.py b/nemo_rl/models/policy/dtensor_policy_worker.py index 8cf69947c1..81e593aac9 100644 --- a/nemo_rl/models/policy/dtensor_policy_worker.py +++ b/nemo_rl/models/policy/dtensor_policy_worker.py @@ -232,14 +232,7 @@ def __init__( ) if self.model.config.pad_token_id is None: - if isinstance(self.model.config.eos_token_id, int): - self.model.config.pad_token_id = self.model.config.eos_token_id - elif isinstance(self.model.config.eos_token_id, list): - self.model.config.pad_token_id = self.model.config.eos_token_id[0] - else: - raise ValueError( - f"Unknown eos_token_id type: {type(self.model.config.eos_token_id)}" - ) + self.model.config.pad_token_id = tokenizer.pad_token_id # caching since this property is not always preserved after FSDP self.num_tied_weights = len(find_tied_parameters(self.model)) From ed24aea0fb25baea3bad935c544ed2371c254e43 Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Wed, 16 Jul 2025 11:48:14 -0700 Subject: [PATCH 11/47] nit Signed-off-by: Julien Veron Vialard --- nemo_rl/models/policy/dtensor_policy_worker.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nemo_rl/models/policy/dtensor_policy_worker.py b/nemo_rl/models/policy/dtensor_policy_worker.py index 81e593aac9..1886fe9ccf 100644 --- a/nemo_rl/models/policy/dtensor_policy_worker.py +++ b/nemo_rl/models/policy/dtensor_policy_worker.py @@ -301,7 +301,7 @@ def __init__( # Handle tied word embeddings after loading the state dict # We need to actually tie the parameters at the model level - is_tied_lm_head = getattr( + is_tied_lm_head = hasattr(self.model, "lm_head") and getattr( getattr(self.model, "config", {}), "tie_word_embeddings", False ) if is_tied_lm_head: @@ -311,7 +311,7 @@ def __init__( embed_tokens_weight = param break - if embed_tokens_weight is not None and hasattr(self.model, "lm_head"): + if embed_tokens_weight is not None: self.model.lm_head.weight = embed_tokens_weight # Manually broadcast buffers From af173149b57964e36ae791924a5bfb4847016caf Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Wed, 16 Jul 2025 13:51:08 -0700 Subject: [PATCH 12/47] update functional test and cicd Signed-off-by: Julien Veron Vialard --- .github/workflows/cicd-main.yml | 1 + tests/functional/rm.sh | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/cicd-main.yml b/.github/workflows/cicd-main.yml index 06a2022bbe..80d4813040 100644 --- a/.github/workflows/cicd-main.yml +++ b/.github/workflows/cicd-main.yml @@ -231,6 +231,7 @@ jobs: time uv run --no-sync bash ./tests/functional/grpo_multiturn.sh time uv run --no-sync bash ./tests/functional/grpo_non_colocated.sh time uv run --no-sync bash ./tests/functional/dpo.sh + time uv run --no-sync bash ./tests/functional/rm.sh time uv run --no-sync bash ./tests/functional/eval.sh time uv run --no-sync bash ./tests/functional/eval_async.sh time uv run --no-sync bash ./tests/functional/test_mcore_extra_installed_correctly.sh diff --git a/tests/functional/rm.sh b/tests/functional/rm.sh index b7d23e7132..1bfe666d95 100644 --- a/tests/functional/rm.sh +++ b/tests/functional/rm.sh @@ -24,9 +24,9 @@ cd $PROJECT_ROOT uv run $PROJECT_ROOT/examples/run_rm.py \ --config examples/configs/rm.yaml \ cluster.gpus_per_node=2 \ - sft.max_num_steps=3 \ - sft.val_batches=1 \ - sft.val_period=3 \ + rm.max_num_steps=3 \ + rm.val_batches=1 \ + rm.val_period=3 \ logger.tensorboard_enabled=true \ logger.log_dir=$LOG_DIR \ logger.wandb_enabled=false \ From 1034634a519c2ed23769e667d50fd5e5bb4b2511 Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Thu, 17 Jul 2025 08:49:46 -0700 Subject: [PATCH 13/47] nit docs Signed-off-by: Julien Veron Vialard --- docs/guides/rm.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/guides/rm.md b/docs/guides/rm.md index 7accacb2fb..3a42f55806 100644 --- a/docs/guides/rm.md +++ b/docs/guides/rm.md @@ -15,9 +15,9 @@ uv run examples/run_rm.py --config examples/configs/rm.yaml uv run examples/run_rm.py --config examples/configs/rm.yaml policy.model_name=Qwen/Qwen2.5-1.5B ``` -The YAML config must be specified. It uses the same base template as the SFT config but includes a new `reward_model_type` key that triggers Reward Model training. An example RM config file can be found at [examples/configs/rm.yaml](../../examples/configs/rm.yaml). +You must specify the YAML config. It shares the same base template as the SFT config but adds a new `reward_model_type` key to trigger RM training. You can find an example RM config file at [examples/configs/rm.yaml](../../examples/configs/rm.yaml). -**Reminder**: Don't forget to set your `HF_HOME`, `WANDB_API_KEY`, and `HF_DATASETS_CACHE` (if needed). You'll need to do a `huggingface-cli login` as well for Llama models. +**Reminder**: Set your `HF_HOME`, `WANDB_API_KEY`, and `HF_DATASETS_CACHE` (if needed). Make sure to log in using `huggingface-cli` if you're working with Llama models. ## Datasets From 02687ce15f59513b1a3b9baff8ff3a80327421b6 Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Thu, 17 Jul 2025 09:08:42 -0700 Subject: [PATCH 14/47] keep individual metrics then aggregate on the entire dataset Signed-off-by: Julien Veron Vialard --- nemo_rl/algorithms/sft.py | 88 ++++++++++++++++----------------------- 1 file changed, 36 insertions(+), 52 deletions(-) diff --git a/nemo_rl/algorithms/sft.py b/nemo_rl/algorithms/sft.py index bf1058654f..b1abeb42fd 100644 --- a/nemo_rl/algorithms/sft.py +++ b/nemo_rl/algorithms/sft.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from collections import defaultdict import os import warnings from pathlib import Path @@ -302,7 +303,7 @@ def validate_one_dataset( # Show a progress indicator for validation # val_total = len(val_dataloader) - list_of_val_metrics = [] + dict_val_metrics = defaultdict(list) num_valid_batches = 0 @@ -363,25 +364,23 @@ def validate_one_dataset( ) else: if model_type == "lm": - list_of_val_metrics.append( - SFTValMetrics(val_loss=float(val_results["loss"])) - ) + dict_val_metrics["val_loss"].append(float(val_results["loss"])) elif model_type == "reward": - list_of_val_metrics.append( - RMValMetrics( - val_loss=sum(val_results["all_mb_metrics"]["loss"]), - accuracy=sum(val_results["all_mb_metrics"]["accuracy"]), - rewards_chosen_mean=sum( - val_results["all_mb_metrics"]["rewards_chosen_mean"] - ), - rewards_rejected_mean=sum( - val_results["all_mb_metrics"]["rewards_rejected_mean"] - ), - num_valid_samples=sum( - val_results["all_mb_metrics"]["num_valid_samples"] - ), - ) - ) + sum_num_valid_samples = sum(val_results["all_mb_metrics"]["num_valid_samples"]) + + dict_val_metrics["val_loss"] += [ + value * sum_num_valid_samples for value in val_results["all_mb_metrics"]["loss"] + ] + dict_val_metrics["accuracy"] += [ + value * sum_num_valid_samples for value in val_results["all_mb_metrics"]["accuracy"] + ] + dict_val_metrics["rewards_chosen_mean"] += [ + value * sum_num_valid_samples for value in val_results["all_mb_metrics"]["rewards_chosen_mean"] + ] + dict_val_metrics["rewards_rejected_mean"] += [ + value * sum_num_valid_samples for value in val_results["all_mb_metrics"]["rewards_rejected_mean"] + ] + dict_val_metrics["num_valid_samples"] += val_results["all_mb_metrics"]["num_valid_samples"] else: raise NotImplementedError( f"Model type {model_type} not implemented for SFT training." @@ -395,42 +394,27 @@ def validate_one_dataset( if num_valid_batches > 0: if model_type == "lm": val_metrics = SFTValMetrics( - val_loss=sum([m["val_loss"] for m in list_of_val_metrics]) + val_loss=sum(dict_val_metrics["val_loss"]) / num_valid_batches ) - val_metrics["val_loss"] /= num_valid_batches elif model_type == "reward": - sum_num_valid_samples = sum( - [m["num_valid_samples"] for m in list_of_val_metrics] - ) + assert len(dict_val_metrics["val_loss"]) == len(dict_val_metrics["accuracy"]) \ + == len(dict_val_metrics["rewards_chosen_mean"]) == len(dict_val_metrics["rewards_rejected_mean"]) \ + == len(dict_val_metrics["num_valid_samples"]) + + sum_num_valid_samples = sum(dict_val_metrics["num_valid_samples"]) val_metrics = RMValMetrics( - val_loss=sum( - [ - m["val_loss"] * m["num_valid_samples"] - for m in list_of_val_metrics - ] - ) - / sum_num_valid_samples, - accuracy=sum( - [ - m["accuracy"] * m["num_valid_samples"] - for m in list_of_val_metrics - ] - ) - / sum_num_valid_samples, - rewards_chosen_mean=sum( - [ - m["rewards_chosen_mean"] * m["num_valid_samples"] - for m in list_of_val_metrics - ] - ) - / sum_num_valid_samples, - rewards_rejected_mean=sum( - [ - m["rewards_rejected_mean"] * m["num_valid_samples"] - for m in list_of_val_metrics - ] - ) - / sum_num_valid_samples, + val_loss=sum([ + value * weight for value, weight in zip(dict_val_metrics["val_loss"], dict_val_metrics["num_valid_samples"]) + ]) / sum_num_valid_samples, + accuracy=sum([ + value * weight for value, weight in zip(dict_val_metrics["accuracy"], dict_val_metrics["num_valid_samples"]) + ]) / sum_num_valid_samples, + rewards_chosen_mean=sum([ + value * weight for value, weight in zip(dict_val_metrics["rewards_chosen_mean"], dict_val_metrics["num_valid_samples"]) + ]) / sum_num_valid_samples, + rewards_rejected_mean=sum([ + value * weight for value, weight in zip(dict_val_metrics["rewards_rejected_mean"], dict_val_metrics["num_valid_samples"]) + ]) / sum_num_valid_samples, num_valid_samples=sum_num_valid_samples, ) else: From 24c5fd0d3f2b0387140d931c02447d0728c9d855 Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Fri, 18 Jul 2025 10:58:01 -0700 Subject: [PATCH 15/47] nit code and doc changes Signed-off-by: Julien Veron Vialard --- docs/guides/rm.md | 38 +++++++--- examples/configs/rm.yaml | 7 +- examples/run_rm.py | 72 +++++++++---------- nemo_rl/algorithms/sft.py | 67 +++++++---------- .../data/hf_datasets/preference_dataset.py | 8 +-- .../hf_datasets/test_preference_dataset.py | 8 ++- 6 files changed, 103 insertions(+), 97 deletions(-) diff --git a/docs/guides/rm.md b/docs/guides/rm.md index ec77434a0d..224bb899ae 100644 --- a/docs/guides/rm.md +++ b/docs/guides/rm.md @@ -23,31 +23,47 @@ The YAML config must be specified. It uses the same base template as the SFT con By default, NeMo RL supports the `HelpSteer3` dataset. This dataset is downloaded from Hugging Face and preprocessed on-the-fly, so there's no need to provide a path to any datasets on disk. -You can also use custom preference datasets by configuring as follows: +You can also configure custom preference datasets (for training and/or validation) as follows: ``` data: - dataset_name: "PreferenceData::" - validation_dataset_name: ["PreferenceData::"] + dataset_name: "PreferenceData::" + val_dataset_name: ["PreferenceData::"] ``` +Note: +- The name of any custom preference dataset must not contain `:`. +- If you are using a custom preference dataset for training, you must specify a custom preference dataset for validation. +- If you are using a logger, the prefix used for the custom validation preference dataset will be `validation-`. -Each custom preference dataset should be a JSON file formatted as: +When using `HelpSteer3` as the training dataset, the default validation set is also used and logged under the prefix `validation`. You can replace it with a custom preference dataset as follows: +``` +data: + dataset_name: "HelpSteer3" + val_dataset_name: ["PreferenceData:validation:"] +``` + +Each custom preference dataset should be a JSONL file, with each line containing a valid JSON object formatted like this: ``` { - "context": list of dicts, # The input message + "context": list of dicts, # The prompt message (including previous turns, if any) "completions": list of dicts, # The list of completions { "rank": int, # The rank of the completion (lower rank is preferred) - "completion": list of dicts, # The completion message + "completion": list of dicts, # The completion message(s) } } ``` -NeMo RL supports using multiple custom validation preference datasets during RM training. +Currently, RM training supports only two completions (where rank 0 is preferred and rank 1 is rejected), with each completion being a single response. For example: +``` +{"context": [{"role": "user", "content": "What's the capital of France?"}, {"role": "assistant", "content": "The capital of France is Paris."}, {"role": "user", "content": "Thanks! And what's the capital of Germany?"}], "completions": [{"rank": 0, "completion": [{"role": "assistant", "content": "The capital of Germany is Berlin."}]}, {"rank": 1, "completion": [{"role": "assistant", "content": "The capital of Germany is Munich."}]}]} +``` + +NeMo RL supports using multiple custom validation preference datasets during RM training: ``` data: - dataset_name: "PreferenceData::" - validation_dataset_name: [ - "PreferenceData::", - "PreferenceData::", + dataset_name: "PreferenceData::" + val_dataset_name: [ + "PreferenceData::", + "PreferenceData::", ] ``` \ No newline at end of file diff --git a/examples/configs/rm.yaml b/examples/configs/rm.yaml index f6e0106c03..eade1bef5c 100644 --- a/examples/configs/rm.yaml +++ b/examples/configs/rm.yaml @@ -120,8 +120,11 @@ policy: data: max_input_seq_length: ${policy.max_total_sequence_length} dataset_name: "HelpSteer3" - # You can optionally specify a list of validation preference datasets: - # validation_dataset_name: ["PreferenceData::"] + # You can optionally configure (multiple) custom validation preference datasets as follows: + # val_dataset_name: [ + # "PreferenceData::", + # "PreferenceData::", + # ] logger: log_dir: "logs" # Base directory for all logs diff --git a/examples/run_rm.py b/examples/run_rm.py index bd7a32a420..91aceabfcb 100644 --- a/examples/run_rm.py +++ b/examples/run_rm.py @@ -56,18 +56,21 @@ def rm_preprocessor( idx: int, ) -> DatumSpec: """Process a datum dictionary for RM training.""" - # Generic preference dataset format + # Custom preference dataset if task_data_spec.task_name == "PreferenceData": - assert len(datum_dict["completions"]) == 2 # Currently only supporting 2 responses + assert len(datum_dict["completions"]) == 2 # Currently only supporting 2 completions + # Lower rank is preferred if datum_dict["completions"][0]["rank"] < datum_dict["completions"][1]["rank"]: chosen_completion = datum_dict["completions"][0] rejected_completion = datum_dict["completions"][1] - else: + elif datum_dict["completions"][0]["rank"] > datum_dict["completions"][1]["rank"]: chosen_completion = datum_dict["completions"][1] rejected_completion = datum_dict["completions"][0] + else: + raise NotImplementedError("Ties are not supported yet.") messages_chosen = datum_dict["context"] + chosen_completion["completion"] messages_rejected = datum_dict["context"] + rejected_completion["completion"] - # Legacy dataset format + # Legacy dataset elif task_data_spec.task_name == "HelpSteer3": messages_chosen = datum_dict["prompt"] + [ {"role": "assistant", "content": datum_dict["chosen_response"]} @@ -126,16 +129,16 @@ def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig): print("\nā–¶ Setting up data...") data_cls = data_config["dataset_name"] - # Generic preference dataset format + # Custom preference dataset if data_cls.startswith("PreferenceData:"): - _, _, data_path = data_cls.split(":") + _, _, data_path = data_cls.split(":", 2) data = hf_datasets.PreferenceDataset(data_path) train_dataset = data.formatted_ds["local"] val_dataset = None print( f" āœ“ Training dataset loaded with {len(data.formatted_ds['local'])} samples." ) - # Legacy dataset format + # Legacy dataset elif data_cls == "HelpSteer3": data = hf_datasets.HelpSteer3Dataset() train_dataset = data.formatted_ds["train"] @@ -156,41 +159,38 @@ def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig): max_seq_length=data_config["max_input_seq_length"], ) - if "validation_dataset_name" in data_config: - # Only supported for generic preference dataset format - if isinstance(data_config["validation_dataset_name"], list): - val_dataset = { - "validation": AllTaskProcessedDataset( - val_dataset, - tokenizer, - sft_task_spec, - rm_preprocessor, - max_seq_length=data_config["max_input_seq_length"], - ) - } if val_dataset else {} - for val_data_cls in data_config["validation_dataset_name"]: - assert val_data_cls.startswith("PreferenceData:") - _, val_dataset_name, val_data_path = val_data_cls.split(":") - val_data = hf_datasets.PreferenceDataset(val_data_path) - val_dataset.update({ - val_dataset_name: AllTaskProcessedDataset( - val_data.formatted_ds["local"], - tokenizer, - val_data.task_spec, - rm_preprocessor, - max_seq_length=data_config["max_input_seq_length"], - ) - }) - else: - raise ValueError(f"Invalid type for validation_dataset_name: {type(data_config['validation_dataset_name'])}") - else: - val_dataset = AllTaskProcessedDataset( + val_dataset = { + "validation": AllTaskProcessedDataset( val_dataset, tokenizer, sft_task_spec, rm_preprocessor, max_seq_length=data_config["max_input_seq_length"], ) + } if val_dataset else {} + + if data_config.get("val_dataset_name") is not None: + # Only supported for custom preference datasets + assert isinstance(data_config["val_dataset_name"], list), f"Invalid type for val_dataset_name: {type(data_config['val_dataset_name'])}" + for val_data_cls in data_config["val_dataset_name"]: + assert val_data_cls.startswith("PreferenceData:") + _, val_dataset_name, val_data_path = val_data_cls.split(":", 2) + assert val_dataset_name not in val_dataset or val_dataset_name == "validation" # Users can override the default "validation" set + if val_dataset_name == "validation": + print(f" āœ“ Overriding the default validation dataset") + val_data = hf_datasets.PreferenceDataset(val_data_path) + print( + f" āœ“ Validation dataset '{val_dataset_name}' loaded with {len(val_data.formatted_ds["local"])} samples." + ) + val_dataset[val_dataset_name] = AllTaskProcessedDataset( + val_data.formatted_ds["local"], + tokenizer, + val_data.task_spec, + rm_preprocessor, + max_seq_length=data_config["max_input_seq_length"], + ) + else: + assert len(val_dataset) == 1, f"Expected 1 validation dataset, got {len(val_dataset)}" return train_dataset, val_dataset, sft_task_spec diff --git a/nemo_rl/algorithms/sft.py b/nemo_rl/algorithms/sft.py index b1abeb42fd..559daea935 100644 --- a/nemo_rl/algorithms/sft.py +++ b/nemo_rl/algorithms/sft.py @@ -188,13 +188,15 @@ def setup( ) for k, v in val_dataset.items() } else: - val_dataloader = StatefulDataLoader( - val_dataset, - batch_size=sft_config["val_global_batch_size"], - shuffle=False, - collate_fn=collate_fn, - drop_last=True, - ) + val_dataloader = { + "validation": StatefulDataLoader( + val_dataset, + batch_size=sft_config["val_global_batch_size"], + shuffle=False, + collate_fn=collate_fn, + drop_last=True, + ) + } # ========================== # Cluster @@ -264,16 +266,15 @@ def validate( model_type: str, logger: Logger, ): - if isinstance(val_dataloader, dict): - for k, v in val_dataloader.items(): - k_val_metrics, k_validation_timings = validate_one_dataset(policy, v, tokenizer, loss_fn, step, master_config, sft_task_spec, val_batches, val_batch_size, val_mbs, model_type) - logger.log_metrics(k_val_metrics, step, prefix=f"validation-{k}") - logger.log_metrics(k_validation_timings, step, prefix=f"timing/validation-{k}") - else: - val_metrics, validation_timings = validate_one_dataset(policy, val_dataloader, tokenizer, loss_fn, step, master_config, sft_task_spec, val_batches, val_batch_size, val_mbs, model_type) - logger.log_metrics(val_metrics, step, prefix="validation") - logger.log_metrics(validation_timings, step, prefix="timing/validation") + for k, v in val_dataloader.items(): + k_val_metrics, k_validation_timings = validate_one_dataset(policy, v, tokenizer, loss_fn, step, master_config, sft_task_spec, val_batches, val_batch_size, val_mbs, model_type) + if k == "validation": + prefix = "validation" + else: + prefix = f"validation-{k}" + logger.log_metrics(k_val_metrics, step, prefix=prefix) + logger.log_metrics(k_validation_timings, step, prefix=f"timing/{prefix}") return None, None @@ -367,19 +368,10 @@ def validate_one_dataset( dict_val_metrics["val_loss"].append(float(val_results["loss"])) elif model_type == "reward": sum_num_valid_samples = sum(val_results["all_mb_metrics"]["num_valid_samples"]) - - dict_val_metrics["val_loss"] += [ - value * sum_num_valid_samples for value in val_results["all_mb_metrics"]["loss"] - ] - dict_val_metrics["accuracy"] += [ - value * sum_num_valid_samples for value in val_results["all_mb_metrics"]["accuracy"] - ] - dict_val_metrics["rewards_chosen_mean"] += [ - value * sum_num_valid_samples for value in val_results["all_mb_metrics"]["rewards_chosen_mean"] - ] - dict_val_metrics["rewards_rejected_mean"] += [ - value * sum_num_valid_samples for value in val_results["all_mb_metrics"]["rewards_rejected_mean"] - ] + for k in ["loss", "accuracy", "rewards_chosen_mean", "rewards_rejected_mean"]: + dict_val_metrics[k if k != "loss" else "val_loss"] += [ + value * sum_num_valid_samples for value in val_results["all_mb_metrics"][k] + ] dict_val_metrics["num_valid_samples"] += val_results["all_mb_metrics"]["num_valid_samples"] else: raise NotImplementedError( @@ -403,19 +395,12 @@ def validate_one_dataset( sum_num_valid_samples = sum(dict_val_metrics["num_valid_samples"]) val_metrics = RMValMetrics( - val_loss=sum([ - value * weight for value, weight in zip(dict_val_metrics["val_loss"], dict_val_metrics["num_valid_samples"]) - ]) / sum_num_valid_samples, - accuracy=sum([ - value * weight for value, weight in zip(dict_val_metrics["accuracy"], dict_val_metrics["num_valid_samples"]) - ]) / sum_num_valid_samples, - rewards_chosen_mean=sum([ - value * weight for value, weight in zip(dict_val_metrics["rewards_chosen_mean"], dict_val_metrics["num_valid_samples"]) - ]) / sum_num_valid_samples, - rewards_rejected_mean=sum([ - value * weight for value, weight in zip(dict_val_metrics["rewards_rejected_mean"], dict_val_metrics["num_valid_samples"]) - ]) / sum_num_valid_samples, num_valid_samples=sum_num_valid_samples, + **{ + k: sum([value * weight for value, weight in zip(dict_val_metrics[k], dict_val_metrics["num_valid_samples"])]) + / sum_num_valid_samples + for k in ["val_loss", "accuracy", "rewards_chosen_mean", "rewards_rejected_mean"] + } ) else: warnings.warn( diff --git a/nemo_rl/data/hf_datasets/preference_dataset.py b/nemo_rl/data/hf_datasets/preference_dataset.py index b1c1dc08ea..292b507f30 100644 --- a/nemo_rl/data/hf_datasets/preference_dataset.py +++ b/nemo_rl/data/hf_datasets/preference_dataset.py @@ -23,14 +23,14 @@ class PreferenceDataset: """Preference dataset. - This class handles loading of preference data. - The input JSON files should contain examples with the following structure: + This class handles loading of custom preference data. + The input JSONL file should contain valid JSON objects formatted like this: { - "context": list of dicts, # The input message + "context": list of dicts, # The prompt message (including previous turns, if any) "completions": list of dicts, # The list of completions { "rank": int, # The rank of the completion (lower rank is preferred) - "completion": list of dicts, # The completion message + "completion": list of dicts, # The completion message(s) } } """ diff --git a/tests/unit/data/hf_datasets/test_preference_dataset.py b/tests/unit/data/hf_datasets/test_preference_dataset.py index e2f04de379..e15d88dfa1 100644 --- a/tests/unit/data/hf_datasets/test_preference_dataset.py +++ b/tests/unit/data/hf_datasets/test_preference_dataset.py @@ -71,9 +71,11 @@ def mock_preference_data(): json.dump(preference_data, preference_file) preference_path = preference_file.name - yield preference_path - # Cleanup - os.unlink(preference_path) + try: + yield preference_path + finally: + # Cleanup + os.unlink(preference_path) def test_preference_dataset_initialization(mock_preference_data): From 24807c3f6d5b7817c6041fd0acef268657ae369c Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Mon, 21 Jul 2025 11:07:45 -0700 Subject: [PATCH 16/47] split sft.py and rm.py Signed-off-by: Julien Veron Vialard --- examples/run_rm.py | 23 +- nemo_rl/algorithms/rm.py | 630 +++++++++++++++++++++++++++++++ nemo_rl/algorithms/sft.py | 219 ++--------- tests/unit/algorithms/test_rm.py | 30 +- 4 files changed, 679 insertions(+), 223 deletions(-) create mode 100644 nemo_rl/algorithms/rm.py diff --git a/examples/run_rm.py b/examples/run_rm.py index 5a232c5aa0..952626197f 100644 --- a/examples/run_rm.py +++ b/examples/run_rm.py @@ -21,7 +21,7 @@ from omegaconf import OmegaConf from transformers import AutoTokenizer -from nemo_rl.algorithms.sft import MasterConfig, setup, sft_train +from nemo_rl.algorithms.rm import MasterConfig, rm_train, setup from nemo_rl.algorithms.utils import get_tokenizer from nemo_rl.data import DataConfig, hf_datasets from nemo_rl.data.datasets import AllTaskProcessedDataset @@ -121,12 +121,12 @@ def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig): train_dataset = data.formatted_ds["train"] val_dataset = data.formatted_ds["validation"] - sft_task_spec = data.task_spec + rm_task_spec = data.task_spec train_dataset = AllTaskProcessedDataset( train_dataset, tokenizer, - sft_task_spec, + rm_task_spec, rm_preprocessor, max_seq_length=data_config["max_input_seq_length"], ) @@ -134,12 +134,12 @@ def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig): val_dataset = AllTaskProcessedDataset( val_dataset, tokenizer, - sft_task_spec, + rm_task_spec, rm_preprocessor, max_seq_length=data_config["max_input_seq_length"], ) - return train_dataset, val_dataset, sft_task_spec + return train_dataset, val_dataset, rm_task_spec def main(): @@ -153,9 +153,6 @@ def main(): config = load_config(args.config) print(f"Loaded configuration from: {args.config}") - # Uses the same base template as the SFT config but includes a new `reward_model_type` key that triggers Reward Model training - config.sft = "${.rm}" - if overrides: print(f"Overrides: {overrides}") config = parse_hydra_overrides(config, overrides) @@ -183,7 +180,7 @@ def main(): ( dataset, val_dataset, - sft_task_spec, + rm_task_spec, ) = setup_data(tokenizer, config["data"]) ( @@ -194,10 +191,10 @@ def main(): loss_fn, logger, checkpointer, - sft_save_state, + rm_save_state, master_config, ) = setup(config, tokenizer, dataset, val_dataset) - sft_train( + rm_train( policy, train_dataloader, val_dataloader, @@ -205,9 +202,9 @@ def main(): loss_fn, master_config, logger, - sft_task_spec, + rm_task_spec, checkpointer, - sft_save_state, + rm_save_state, ) diff --git a/nemo_rl/algorithms/rm.py b/nemo_rl/algorithms/rm.py new file mode 100644 index 0000000000..9732c84259 --- /dev/null +++ b/nemo_rl/algorithms/rm.py @@ -0,0 +1,630 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import warnings +from pathlib import Path +from typing import Optional, TypedDict + +import numpy as np +import torch +from torchdata.stateful_dataloader import StatefulDataLoader +from transformers import AutoTokenizer + +from nemo_rl.algorithms.loss_functions import ( + PreferenceLoss, +) +from nemo_rl.algorithms.utils import set_seed +from nemo_rl.data import DataConfig +from nemo_rl.data.datasets import ( + AllTaskProcessedDataset, + preference_collate_fn, +) +from nemo_rl.data.interfaces import TaskDataSpec +from nemo_rl.data.llm_message_utils import ( + add_loss_mask_to_message_log, + batched_message_log_to_flat_message, +) +from nemo_rl.distributed.batched_data_dict import BatchedDataDict +from nemo_rl.distributed.virtual_cluster import ClusterConfig, RayVirtualCluster +from nemo_rl.models.policy import PolicyConfig +from nemo_rl.models.policy.interfaces import PolicyInterface +from nemo_rl.models.policy.lm_policy import Policy +from nemo_rl.utils.checkpoint import CheckpointingConfig, CheckpointManager +from nemo_rl.utils.logger import Logger, LoggerConfig +from nemo_rl.utils.nsys import maybe_gpu_profile_step +from nemo_rl.utils.timer import Timer + + +class RMSaveState(TypedDict): + epoch: int # Track current epoch + step: int # Track step within current epoch + total_steps: int # Track total number of steps across all epochs + val_loss: float + consumed_samples: int + + +def _default_rm_save_state() -> RMSaveState: + return { + "epoch": 0, + "step": 0, + "total_steps": 0, + "consumed_samples": 0, + } + + +class RMConfig(TypedDict): + max_num_steps: int + max_num_epochs: int + val_period: int + val_batches: int + val_global_batch_size: int + val_micro_batch_size: int + val_at_start: bool + seed: int + + +class MasterConfig(TypedDict): + policy: PolicyConfig + data: DataConfig + rm: RMConfig + logger: LoggerConfig + cluster: ClusterConfig + checkpointing: CheckpointingConfig + + +class RMValMetrics(TypedDict): + val_loss: float + accuracy: float + rewards_chosen_mean: float + rewards_rejected_mean: float + num_valid_samples: float + + +# ======================================================= +# Setup & Initialization +# ======================================================= +def setup( + master_config: MasterConfig, + tokenizer: AutoTokenizer, + train_dataset: AllTaskProcessedDataset, + val_dataset: AllTaskProcessedDataset, +) -> tuple[ + Policy, + RayVirtualCluster, + StatefulDataLoader, + StatefulDataLoader, + PreferenceLoss, + MasterConfig, + Logger, + TaskDataSpec, + RMSaveState, +]: + """Main entry point for running RM algorithm. + + Returns: + Tuple of policy, cluster, dataloader, tokenizer, loss_fn, math_env, master_config, logger + """ + set_seed(master_config["rm"]["seed"]) + + # Extract individual configs for easier access + policy_config = master_config["policy"] + data_config = master_config["data"] + logger_config = master_config["logger"] + cluster_config = master_config["cluster"] + rm_config = master_config["rm"] + + # ========================== + # Logger + # ========================== + logger = Logger(logger_config) + logger.log_hyperparams(master_config) + + # ========================== + # Checkpointing + # ========================== + checkpointer = CheckpointManager(master_config["checkpointing"]) + last_checkpoint_path = checkpointer.get_latest_checkpoint_path() + rm_save_state: Optional[RMSaveState] = checkpointer.load_training_info( + last_checkpoint_path + ) + + # ========================== + # Data + # ========================== + train_dataloader = StatefulDataLoader( + train_dataset, + batch_size=policy_config["train_global_batch_size"], + shuffle=True, + collate_fn=preference_collate_fn, + drop_last=True, + ) + + if last_checkpoint_path is not None: + dataloader_state_dict = torch.load( + os.path.join(last_checkpoint_path, "train_dataloader.pt") + ) + train_dataloader.load_state_dict(dataloader_state_dict) + + val_dataloader = StatefulDataLoader( + val_dataset, + batch_size=rm_config["val_global_batch_size"], + shuffle=False, + collate_fn=preference_collate_fn, + drop_last=True, + ) + + # ========================== + # Cluster + # ========================== + print("\nā–¶ Setting up compute cluster...") + cluster = RayVirtualCluster( + name="rm_cluster", + bundle_ct_per_node_list=[cluster_config["gpus_per_node"]] + * cluster_config["num_nodes"], + use_gpus=True, + num_gpus_per_node=cluster_config["gpus_per_node"], + max_colocated_worker_groups=1, + ) + print(f" āœ“ Ray cluster initialized with {cluster_config['num_nodes']} nodes") + + # ========================== + # Training + # ========================== + print("\nā–¶ Setting up model...") + policy = Policy( + cluster=cluster, + config=policy_config, + tokenizer=tokenizer, + weights_path=Path(last_checkpoint_path) / "policy" / "weights" + if last_checkpoint_path + else None, + optimizer_path=Path(last_checkpoint_path) / "policy" / "optimizer" + if last_checkpoint_path + else None, + init_optimizer=True, + init_reference_model=False, + ) + loss_fn = PreferenceLoss() + print(" āœ“ Model initialized") + + print("\n" + "=" * 60) + print(" " * 18 + "SETUP COMPLETE") + print("=" * 60 + "\n") + + return ( + policy, + cluster, + train_dataloader, + val_dataloader, + loss_fn, + logger, + checkpointer, + rm_save_state, + master_config, + ) + + +# ======================================================= +# Training & Validation +# ======================================================= +def validate( + policy: PolicyInterface, + val_dataloader: StatefulDataLoader, + tokenizer, + loss_fn, + step: int, + master_config: MasterConfig, + rm_task_spec: TaskDataSpec, + val_batches: int, + val_batch_size: int, + val_mbs: int, +): + """Run validation on the validation dataset.""" + if val_dataloader is None: + print(" āš ļø No validation dataloader provided, skipping validation") + return + + timer = Timer() + + with timer.time("total_validation_time"): + print(f"ā–¶ Starting validation at step {step}...") + + # Show a progress indicator for validation + # val_total = len(val_dataloader) + + list_of_val_metrics = [] + + num_valid_batches = 0 + + policy.prepare_for_training() + for batch_idx, val_batch in enumerate(val_dataloader): + ## add loss mask based on role to every message + add_loss_mask_to_message_log( + val_batch["message_log"], + roles_to_train_on=["assistant"], + ) + + cat_and_padded, input_lengths = batched_message_log_to_flat_message( + val_batch["message_log"], + pad_value_dict={"token_ids": tokenizer.pad_token_id}, + make_sequence_length_divisible_by=master_config["policy"][ + "make_sequence_length_divisible_by" + ], + ) + + val_data: BatchedDataDict = BatchedDataDict( + { + "input_ids": cat_and_padded["token_ids"], + "input_lengths": input_lengths, + "token_mask": cat_and_padded["token_loss_mask"], + "sample_mask": val_batch["loss_multiplier"], + } + ) + + ## just run model fwd + val_results = policy.train( + val_data, + loss_fn, + eval_mode=True, + ## NOTE: we double the batch size here because each preference example corresponds to a pair of + ## examples, chosen and rejected, and the pair needs to be processed as part of the same microbatch. + gbs=val_batch_size * 2, + mbs=val_mbs * 2, + ) + + if len(val_results["all_mb_metrics"]) == 0: + warnings.warn( + "No validation metrics were collected for this batch." + " This is likely because there were no valid samples." + ) + else: + list_of_val_metrics.append( + RMValMetrics( + val_loss=sum(val_results["all_mb_metrics"]["loss"]), + accuracy=sum(val_results["all_mb_metrics"]["accuracy"]), + rewards_chosen_mean=sum( + val_results["all_mb_metrics"]["rewards_chosen_mean"] + ), + rewards_rejected_mean=sum( + val_results["all_mb_metrics"]["rewards_rejected_mean"] + ), + num_valid_samples=sum( + val_results["all_mb_metrics"]["num_valid_samples"] + ), + ) + ) + + num_valid_batches += 1 + + if val_batches > 0 and batch_idx >= val_batches - 1: + break + + if num_valid_batches > 0: + sum_num_valid_samples = sum( + [m["num_valid_samples"] for m in list_of_val_metrics] + ) + val_metrics = RMValMetrics( + val_loss=sum( + [ + m["val_loss"] * m["num_valid_samples"] + for m in list_of_val_metrics + ] + ) + / sum_num_valid_samples, + accuracy=sum( + [ + m["accuracy"] * m["num_valid_samples"] + for m in list_of_val_metrics + ] + ) + / sum_num_valid_samples, + rewards_chosen_mean=sum( + [ + m["rewards_chosen_mean"] * m["num_valid_samples"] + for m in list_of_val_metrics + ] + ) + / sum_num_valid_samples, + rewards_rejected_mean=sum( + [ + m["rewards_rejected_mean"] * m["num_valid_samples"] + for m in list_of_val_metrics + ] + ) + / sum_num_valid_samples, + num_valid_samples=sum_num_valid_samples, + ) + else: + warnings.warn( + "No validation metrics were collected." + " This is likely because there were no valid samples in the validation set." + ) + val_metrics = RMValMetrics( + val_loss=0.0, + accuracy=0.0, + rewards_chosen_mean=0.0, + rewards_rejected_mean=0.0, + num_valid_samples=0.0, + ) + + # Calculate validation metrics + policy.prepare_for_training() + + # Get timing metrics + timing_metrics = timer.get_timing_metrics(reduction_op="sum") + validation_time = timing_metrics.get("total_validation_time", 0) + + if num_valid_batches > 0: + # Print summary of validation results + print("\nšŸ“Š Validation Results:") + print(f" • Validation loss: {val_metrics['val_loss']:.4f}") + print(f" • Validation accuracy: {val_metrics['accuracy']:.4f}") + print( + f" • Validation rewards chosen mean: {val_metrics['rewards_chosen_mean']:.4f}" + ) + print( + f" • Validation rewards rejected mean: {val_metrics['rewards_rejected_mean']:.4f}" + ) + print( + f" • Validation num valid samples: {val_metrics['num_valid_samples']:.0f}" + ) + + # Print timing information + print("\n ā±ļø Validation Timing:") + validation_time = timing_metrics.get("total_validation_time", 0) + print(f" • Total validation time: {validation_time:.2f}s") + + # Make sure to reset the timer after validation + timer.reset() + + return val_metrics, timing_metrics + + +def rm_train( + policy, + train_dataloader, + val_dataloader, + tokenizer, + loss_fn, + master_config, + logger, + rm_task_spec, + checkpointer, + rm_save_state, +): + # Run basic rm training + timer = Timer() + + if rm_save_state is None: + rm_save_state = _default_rm_save_state() + current_epoch = 0 + current_step = 0 + total_steps = 0 + else: + current_epoch = rm_save_state["epoch"] + current_step = rm_save_state["step"] + total_steps = rm_save_state["total_steps"] + + rm_config = master_config["rm"] + # Validation configuration + val_period = rm_config["val_period"] + val_at_start = rm_config["val_at_start"] + max_num_epochs = rm_config["max_num_epochs"] + + # Run validation at the start if configured + if val_at_start and total_steps == 0: + print("\nšŸ” Running initial validation...") + val_metrics, validation_timings = validate( + policy, + val_dataloader, + tokenizer, + loss_fn, + step=0, + master_config=master_config, + rm_task_spec=rm_task_spec, + val_batches=rm_config["val_batches"], + val_batch_size=rm_config["val_global_batch_size"], + val_mbs=rm_config["val_micro_batch_size"], + ) + + logger.log_metrics(val_metrics, total_steps, prefix="validation") + logger.log_metrics(validation_timings, total_steps, prefix="timing/validation") + + policy.prepare_for_training() + + while current_epoch < max_num_epochs and ( + master_config["rm"]["max_num_steps"] == -1 + or total_steps < master_config["rm"]["max_num_steps"] + ): + print(f"\n{'=' * 25} Epoch {current_epoch + 1}/{max_num_epochs} {'=' * 25}") + + for batch in train_dataloader: + print( + f"\n{'=' * 25} Step {current_step + 1}/{min(len(train_dataloader), master_config['rm']['max_num_steps'] if master_config['rm']['max_num_steps'] != -1 else len(train_dataloader))} {'=' * 25}" + ) + maybe_gpu_profile_step(policy, total_steps + 1) + val_metrics, validation_timings = None, None + + with timer.time("total_step_time"): + # Prepare batch and generate responses + print("ā–¶ Preparing batch...") + with timer.time("data_processing"): + ## add loss mask based on role to every message + add_loss_mask_to_message_log( + batch["message_log"], + roles_to_train_on=["assistant"], + ) + + cat_and_padded, input_lengths = batched_message_log_to_flat_message( + batch["message_log"], + pad_value_dict={"token_ids": tokenizer.pad_token_id}, + make_sequence_length_divisible_by=master_config["policy"][ + "make_sequence_length_divisible_by" + ], + ) + + train_data: BatchedDataDict = BatchedDataDict( + { + "input_ids": cat_and_padded["token_ids"], + "input_lengths": input_lengths, + "token_mask": cat_and_padded["token_loss_mask"], + "sample_mask": batch["loss_multiplier"], + } + ) + + print("ā–¶ Taking a training step...") + + train_results = policy.train( + train_data, + loss_fn, + eval_mode=False, + ## NOTE: we double the batch size here because each preference example corresponds to a pair of + ## examples, chosen and rejected, and the pair needs to be processed as part of the same microbatch. + gbs=master_config["policy"]["train_global_batch_size"] * 2, + mbs=master_config["policy"]["train_micro_batch_size"] * 2, + ) + + is_last_step = ( + master_config["rm"]["max_num_steps"] != -1 + and total_steps + 1 >= master_config["rm"]["max_num_steps"] + ) or ( + current_epoch + 1 == max_num_epochs + and current_step + 1 == len(train_dataloader) + ) + + # Run validation if it's a validation step + if val_period > 0 and (total_steps + 1) % val_period == 0: + val_metrics, validation_timings = validate( + policy, + val_dataloader, + tokenizer, + loss_fn, + step=total_steps + 1, + master_config=master_config, + rm_task_spec=rm_task_spec, + val_batches=rm_config["val_batches"], + val_batch_size=rm_config["val_global_batch_size"], + val_mbs=rm_config["val_micro_batch_size"], + ) + logger.log_metrics( + validation_timings, total_steps + 1, prefix="timing/validation" + ) + logger.log_metrics( + val_metrics, total_steps + 1, prefix="validation" + ) + + ## Checkpointing + rm_save_state["consumed_samples"] += master_config["policy"][ + "train_global_batch_size" + ] + if master_config["checkpointing"]["enabled"] and ( + is_last_step + or (total_steps + 1) % master_config["checkpointing"]["save_period"] + == 0 + ): + ## +1 because step is 0-indexed + rm_save_state["step"] = (current_step + 1) % len(train_dataloader) + rm_save_state["total_steps"] = total_steps + 1 + rm_save_state["epoch"] = current_epoch + if val_metrics is not None: + rm_save_state["val_loss"] = val_metrics["val_loss"] + elif "val_loss" in rm_save_state: + del rm_save_state["val_loss"] + + if master_config["checkpointing"]["metric_name"] is not None: + if ( + master_config["checkpointing"]["metric_name"] + not in rm_save_state + ): + warnings.warn( + f"You asked to save checkpoints based on {master_config['checkpointing']['metric_name']} but the metric is not found in the save state. " + "Saving most recent k checkpoints instead." + ) + master_config["checkpointing"]["metric_name"] = None + + with timer.time("checkpointing"): + print(f"Saving checkpoint for step {total_steps + 1}...") + checkpoint_path = checkpointer.init_tmp_checkpoint( + total_steps + 1, rm_save_state, master_config + ) + + policy.save_checkpoint( + weights_path=os.path.join( + checkpoint_path, "policy", "weights" + ), + optimizer_path=os.path.join( + checkpoint_path, "policy", "optimizer" + ), + tokenizer_path=os.path.join( + checkpoint_path, "policy", "tokenizer" + ), + ) + torch.save( + train_dataloader.state_dict(), + os.path.join(checkpoint_path, "train_dataloader.pt"), + ) + checkpointer.finalize_checkpoint(checkpoint_path) + + losses = train_results["loss"] + metrics = { + "loss": train_results["loss"].numpy(), + "grad_norm": train_results["grad_norm"].numpy(), + } + metrics.update(train_results["all_mb_metrics"]) + for k, v in metrics.items(): + if k in {"lr", "wd", "global_valid_seqs", "global_valid_toks"}: + metrics[k] = np.mean(v).item() + else: + metrics[k] = np.sum(v).item() + timing_metrics = timer.get_timing_metrics(reduction_op="sum") + + print("\nšŸ“Š Training Results:") + print(f" • Loss: {float(metrics['loss']):.4f}") + print(f" • Accuracy: {float(metrics['accuracy']):.4f}") + print( + f" • Rewards chosen mean: {float(metrics['rewards_chosen_mean']):.4f}" + ) + print( + f" • Rewards rejected mean: {float(metrics['rewards_rejected_mean']):.4f}" + ) + print(f" • Num valid samples: {float(metrics['num_valid_samples']):.0f}") + + print("\nā±ļø Timing:") + # Display total time first, separately + total_time = timing_metrics.get("total_step_time", 0) + print(f" • Total step time: {total_time:.2f}s") + + # Display all other timing metrics (if any) + for k, v in sorted( + timing_metrics.items(), key=lambda item: item[1], reverse=True + ): + if k != "total_step_time": + percent = (v / total_time * 100) if total_time > 0 else 0 + print(f" • {k}: {v:.2f}s ({percent:.1f}%)") + + logger.log_metrics(metrics, total_steps + 1, prefix="train") + logger.log_metrics(timing_metrics, total_steps + 1, prefix="timing/train") + + timer.reset() + current_step += 1 + total_steps += 1 + + if ( + master_config["rm"]["max_num_steps"] != -1 + and total_steps >= master_config["rm"]["max_num_steps"] + ): + return + + current_epoch += 1 + current_step = 0 # Reset step counter for new epoch diff --git a/nemo_rl/algorithms/sft.py b/nemo_rl/algorithms/sft.py index 012f3501de..ee227e0aa6 100644 --- a/nemo_rl/algorithms/sft.py +++ b/nemo_rl/algorithms/sft.py @@ -22,17 +22,11 @@ from transformers import AutoTokenizer from nemo_rl.algorithms.loss_functions import ( - LossFunction, NLLLoss, - PreferenceLoss, ) from nemo_rl.algorithms.utils import set_seed from nemo_rl.data import DataConfig -from nemo_rl.data.datasets import ( - AllTaskProcessedDataset, - preference_collate_fn, - rl_collate_fn, -) +from nemo_rl.data.datasets import AllTaskProcessedDataset, rl_collate_fn from nemo_rl.data.interfaces import TaskDataSpec from nemo_rl.data.llm_message_utils import ( add_loss_mask_to_message_log, @@ -86,18 +80,6 @@ class MasterConfig(TypedDict): checkpointing: CheckpointingConfig -class SFTValMetrics(TypedDict): - val_loss: float - - -class RMValMetrics(TypedDict): - val_loss: float - accuracy: float - rewards_chosen_mean: float - rewards_rejected_mean: float - num_valid_samples: float - - # ======================================================= # Setup & Initialization # ======================================================= @@ -111,7 +93,7 @@ def setup( RayVirtualCluster, StatefulDataLoader, StatefulDataLoader, - LossFunction, + NLLLoss, MasterConfig, Logger, TaskDataSpec, @@ -122,19 +104,6 @@ def setup( Returns: Tuple of policy, cluster, dataloader, tokenizer, loss_fn, math_env, master_config, logger """ - model_type = "reward" if "reward_model_type" in master_config["policy"] else "lm" - - if model_type == "lm": - collate_fn = rl_collate_fn - loss_fn_class = NLLLoss - elif model_type == "reward": - collate_fn = preference_collate_fn - loss_fn_class = PreferenceLoss - else: - raise NotImplementedError( - f"Model type {model_type} not implemented for SFT training." - ) - set_seed(master_config["sft"]["seed"]) # Extract individual configs for easier access @@ -166,7 +135,7 @@ def setup( train_dataset, batch_size=policy_config["train_global_batch_size"], shuffle=True, - collate_fn=collate_fn, + collate_fn=rl_collate_fn, drop_last=True, ) @@ -180,7 +149,7 @@ def setup( val_dataset, batch_size=sft_config["val_global_batch_size"], shuffle=False, - collate_fn=collate_fn, + collate_fn=rl_collate_fn, drop_last=True, ) @@ -215,7 +184,7 @@ def setup( init_optimizer=True, init_reference_model=False, ) - loss_fn = loss_fn_class() + loss_fn = NLLLoss() print(" āœ“ Model initialized") print("\n" + "=" * 60) @@ -249,7 +218,6 @@ def validate( val_batches: int, val_batch_size: int, val_mbs: int, - model_type: str, ): """Run validation on the validation dataset.""" if val_dataloader is None: @@ -264,8 +232,7 @@ def validate( # Show a progress indicator for validation # val_total = len(val_dataloader) - list_of_val_metrics = [] - + val_metrics = {"val_loss": 0.0} num_valid_batches = 0 policy.prepare_for_training() @@ -294,29 +261,13 @@ def validate( ) ## just run model fwd - - if model_type == "lm": - val_results = policy.train( - val_data, - loss_fn, - eval_mode=True, - gbs=val_batch_size, - mbs=val_mbs, - ) - elif model_type == "reward": - val_results = policy.train( - val_data, - loss_fn, - eval_mode=True, - ## NOTE: we double the batch size here because each preference example corresponds to a pair of - ## examples, chosen and rejected, and the pair needs to be processed as part of the same microbatch. - gbs=val_batch_size * 2, - mbs=val_mbs * 2, - ) - else: - raise NotImplementedError( - f"Model type {model_type} not implemented for SFT training." - ) + val_results = policy.train( + val_data, + loss_fn, + eval_mode=True, + gbs=val_batch_size, + mbs=val_mbs, + ) if len(val_results["all_mb_metrics"]) == 0: warnings.warn( @@ -324,92 +275,19 @@ def validate( " This is likely because there were no valid samples." ) else: - if model_type == "lm": - list_of_val_metrics.append( - SFTValMetrics(val_loss=float(val_results["loss"])) - ) - elif model_type == "reward": - list_of_val_metrics.append( - RMValMetrics( - val_loss=sum(val_results["all_mb_metrics"]["loss"]), - accuracy=sum(val_results["all_mb_metrics"]["accuracy"]), - rewards_chosen_mean=sum( - val_results["all_mb_metrics"]["rewards_chosen_mean"] - ), - rewards_rejected_mean=sum( - val_results["all_mb_metrics"]["rewards_rejected_mean"] - ), - num_valid_samples=sum( - val_results["all_mb_metrics"]["num_valid_samples"] - ), - ) - ) - else: - raise NotImplementedError( - f"Model type {model_type} not implemented for SFT training." - ) - + val_metrics["val_loss"] += float(val_results["loss"]) num_valid_batches += 1 if val_batches > 0 and batch_idx >= val_batches - 1: break if num_valid_batches > 0: - if model_type == "lm": - val_metrics = SFTValMetrics( - val_loss=sum([m["val_loss"] for m in list_of_val_metrics]) - ) - val_metrics["val_loss"] /= num_valid_batches - elif model_type == "reward": - sum_num_valid_samples = sum( - [m["num_valid_samples"] for m in list_of_val_metrics] - ) - val_metrics = RMValMetrics( - val_loss=sum( - [ - m["val_loss"] * m["num_valid_samples"] - for m in list_of_val_metrics - ] - ) - / sum_num_valid_samples, - accuracy=sum( - [ - m["accuracy"] * m["num_valid_samples"] - for m in list_of_val_metrics - ] - ) - / sum_num_valid_samples, - rewards_chosen_mean=sum( - [ - m["rewards_chosen_mean"] * m["num_valid_samples"] - for m in list_of_val_metrics - ] - ) - / sum_num_valid_samples, - rewards_rejected_mean=sum( - [ - m["rewards_rejected_mean"] * m["num_valid_samples"] - for m in list_of_val_metrics - ] - ) - / sum_num_valid_samples, - num_valid_samples=sum_num_valid_samples, - ) + val_metrics["val_loss"] /= num_valid_batches else: warnings.warn( "No validation metrics were collected." " This is likely because there were no valid samples in the validation set." ) - if model_type == "lm": - val_metrics = SFTValMetrics(val_loss=0.0) - elif model_type == "reward": - val_metrics = RMValMetrics( - val_loss=0.0, - accuracy=0.0, - rewards_chosen_mean=0.0, - rewards_rejected_mean=0.0, - num_valid_samples=0.0, - ) # Calculate validation metrics policy.prepare_for_training() @@ -423,18 +301,6 @@ def validate( print("\nšŸ“Š Validation Results:") print(f" • Validation loss: {val_metrics['val_loss']:.4f}") - if model_type == "reward": - print(f" • Validation accuracy: {val_metrics['accuracy']:.4f}") - print( - f" • Validation rewards chosen mean: {val_metrics['rewards_chosen_mean']:.4f}" - ) - print( - f" • Validation rewards rejected mean: {val_metrics['rewards_rejected_mean']:.4f}" - ) - print( - f" • Validation num valid samples: {val_metrics['num_valid_samples']:.0f}" - ) - # Print timing information print("\n ā±ļø Validation Timing:") validation_time = timing_metrics.get("total_validation_time", 0) @@ -477,8 +343,6 @@ def sft_train( val_at_start = sft_config["val_at_start"] max_num_epochs = sft_config["max_num_epochs"] - model_type = "reward" if "reward_model_type" in master_config["policy"] else "lm" - # Run validation at the start if configured if val_at_start and total_steps == 0: print("\nšŸ” Running initial validation...") @@ -493,7 +357,6 @@ def sft_train( val_batches=sft_config["val_batches"], val_batch_size=sft_config["val_global_batch_size"], val_mbs=sft_config["val_micro_batch_size"], - model_type=model_type, ) logger.log_metrics(val_metrics, total_steps, prefix="validation") @@ -501,15 +364,15 @@ def sft_train( policy.prepare_for_training() - while current_epoch < max_num_epochs and ( - master_config["sft"]["max_num_steps"] == -1 - or total_steps < master_config["sft"]["max_num_steps"] + while ( + current_epoch < max_num_epochs + and total_steps < master_config["sft"]["max_num_steps"] ): print(f"\n{'=' * 25} Epoch {current_epoch + 1}/{max_num_epochs} {'=' * 25}") for batch in train_dataloader: print( - f"\n{'=' * 25} Step {current_step + 1}/{min(len(train_dataloader), master_config['sft']['max_num_steps'] if master_config['sft']['max_num_steps'] != -1 else len(train_dataloader))} {'=' * 25}" + f"\n{'=' * 25} Step {current_step + 1}/{min(len(train_dataloader), master_config['sft']['max_num_steps'])} {'=' * 25}" ) maybe_gpu_profile_step(policy, total_steps + 1) val_metrics, validation_timings = None, None @@ -542,28 +405,11 @@ def sft_train( ) print("ā–¶ Taking a training step...") + train_results = policy.train(train_data, loss_fn) - if model_type == "lm": - train_results = policy.train(train_data, loss_fn) - elif model_type == "reward": - train_results = policy.train( - train_data, - loss_fn, - eval_mode=False, - ## NOTE: we double the batch size here because each preference example corresponds to a pair of - ## examples, chosen and rejected, and the pair needs to be processed as part of the same microbatch. - gbs=master_config["policy"]["train_global_batch_size"] * 2, - mbs=master_config["policy"]["train_micro_batch_size"] * 2, - ) - else: - raise NotImplementedError( - f"Model type {model_type} not implemented for SFT training." - ) - - is_last_step = ( - master_config["sft"]["max_num_steps"] != -1 - and total_steps + 1 >= master_config["sft"]["max_num_steps"] - ) or ( + is_last_step = total_steps + 1 >= master_config["sft"][ + "max_num_steps" + ] or ( current_epoch + 1 == max_num_epochs and current_step + 1 == len(train_dataloader) ) @@ -581,7 +427,6 @@ def sft_train( val_batches=sft_config["val_batches"], val_batch_size=sft_config["val_global_batch_size"], val_mbs=sft_config["val_micro_batch_size"], - model_type=model_type, ) logger.log_metrics( validation_timings, total_steps + 1, prefix="timing/validation" @@ -657,19 +502,6 @@ def sft_train( print("\nšŸ“Š Training Results:") print(f" • Loss: {float(metrics['loss']):.4f}") - - if model_type == "reward": - print(f" • Accuracy: {float(metrics['accuracy']):.4f}") - print( - f" • Rewards chosen mean: {float(metrics['rewards_chosen_mean']):.4f}" - ) - print( - f" • Rewards rejected mean: {float(metrics['rewards_rejected_mean']):.4f}" - ) - print( - f" • Num valid samples: {float(metrics['num_valid_samples']):.0f}" - ) - print("\nā±ļø Timing:") # Display total time first, separately total_time = timing_metrics.get("total_step_time", 0) @@ -690,10 +522,7 @@ def sft_train( current_step += 1 total_steps += 1 - if ( - master_config["sft"]["max_num_steps"] != -1 - and total_steps >= master_config["sft"]["max_num_steps"] - ): + if total_steps >= master_config["sft"]["max_num_steps"]: return current_epoch += 1 diff --git a/tests/unit/algorithms/test_rm.py b/tests/unit/algorithms/test_rm.py index 0f1f8af059..278b6be05a 100644 --- a/tests/unit/algorithms/test_rm.py +++ b/tests/unit/algorithms/test_rm.py @@ -19,7 +19,7 @@ from torchdata.stateful_dataloader import StatefulDataLoader from nemo_rl.algorithms.loss_functions import PreferenceLoss -from nemo_rl.algorithms.sft import _default_sft_save_state, sft_train +from nemo_rl.algorithms.rm import _default_rm_save_state, rm_train @pytest.fixture @@ -77,11 +77,11 @@ def val_iter(self): loss_fn = PreferenceLoss() logger = MagicMock() checkpointer = MagicMock() - sft_task_spec = MagicMock() + rm_task_spec = MagicMock() # Create mock master config master_config = { - "sft": { + "rm": { "max_num_steps": 5, "max_num_epochs": 2, "val_period": 100, @@ -107,7 +107,7 @@ def val_iter(self): "loss_fn": loss_fn, "logger": logger, "checkpointer": checkpointer, - "sft_task_spec": sft_task_spec, + "rm_task_spec": rm_task_spec, "master_config": master_config, } @@ -115,12 +115,12 @@ def val_iter(self): def test_exit_on_max_steps(mock_components): """Test that training loop exits when max_num_steps is reached""" # Set max steps to 12, which is less than len(train_dataloader) * max_num_epochs - mock_components["master_config"]["sft"]["max_num_steps"] = 12 + mock_components["master_config"]["rm"]["max_num_steps"] = 12 - sft_save_state = _default_sft_save_state() + rm_save_state = _default_rm_save_state() # Run training - sft_train( + rm_train( mock_components["policy"], mock_components["train_dataloader"], mock_components["val_dataloader"], @@ -128,9 +128,9 @@ def test_exit_on_max_steps(mock_components): mock_components["loss_fn"], mock_components["master_config"], mock_components["logger"], - mock_components["sft_task_spec"], + mock_components["rm_task_spec"], mock_components["checkpointer"], - sft_save_state, + rm_save_state, ) # Verify we only trained for 12 steps. @@ -140,13 +140,13 @@ def test_exit_on_max_steps(mock_components): def test_exit_on_max_epochs(mock_components): """Test that training loop exits when max_num_epochs is reached""" # Set max epochs to 2 and max steps to a large number - mock_components["master_config"]["sft"]["max_num_epochs"] = 2 - mock_components["master_config"]["sft"]["max_num_steps"] = 100 + mock_components["master_config"]["rm"]["max_num_epochs"] = 2 + mock_components["master_config"]["rm"]["max_num_steps"] = 100 - sft_save_state = _default_sft_save_state() + rm_save_state = _default_rm_save_state() # Run training - sft_train( + rm_train( mock_components["policy"], mock_components["train_dataloader"], mock_components["val_dataloader"], @@ -154,9 +154,9 @@ def test_exit_on_max_epochs(mock_components): mock_components["loss_fn"], mock_components["master_config"], mock_components["logger"], - mock_components["sft_task_spec"], + mock_components["rm_task_spec"], mock_components["checkpointer"], - sft_save_state, + rm_save_state, ) # Verify we trained for exactly two epochs (20 batches). From 5c764652aa8c6cb48b3741825cbca903cc3a9f50 Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Mon, 21 Jul 2025 13:00:37 -0700 Subject: [PATCH 17/47] nit code and doc changes Signed-off-by: Julien Veron Vialard --- docs/guides/rm.md | 39 ++++++++++++++++++- examples/run_rm.py | 14 +++---- nemo_rl/algorithms/sft.py | 38 +++++++----------- .../data/hf_datasets/preference_dataset.py | 5 ++- 4 files changed, 62 insertions(+), 34 deletions(-) diff --git a/docs/guides/rm.md b/docs/guides/rm.md index 224bb899ae..6ebbe9fce5 100644 --- a/docs/guides/rm.md +++ b/docs/guides/rm.md @@ -53,9 +53,44 @@ Each custom preference dataset should be a JSONL file, with each line containing } ``` -Currently, RM training supports only two completions (where rank 0 is preferred and rank 1 is rejected), with each completion being a single response. For example: +Currently, RM training supports only two completions (where the lowest rank is preferred and the highest one is rejected), with each completion being a single response. For example: ``` -{"context": [{"role": "user", "content": "What's the capital of France?"}, {"role": "assistant", "content": "The capital of France is Paris."}, {"role": "user", "content": "Thanks! And what's the capital of Germany?"}], "completions": [{"rank": 0, "completion": [{"role": "assistant", "content": "The capital of Germany is Berlin."}]}, {"rank": 1, "completion": [{"role": "assistant", "content": "The capital of Germany is Munich."}]}]} +{ + "context": [ + { + "role": "user", + "content": "What's the capital of France?" + }, + { + "role": "assistant", + "content": "The capital of France is Paris." + }, + { + "role": "user", + "content": "Thanks! And what's the capital of Germany?" + } + ], + "completions": [ + { + "rank": 0, + "completion": [ + { + "role": "assistant", + "content": "The capital of Germany is Berlin." + } + ] + }, + { + "rank": 1, + "completion": [ + { + "role": "assistant", + "content": "The capital of Germany is Munich." + } + ] + } + ] +} ``` NeMo RL supports using multiple custom validation preference datasets during RM training: diff --git a/examples/run_rm.py b/examples/run_rm.py index 91aceabfcb..4cd26af4f6 100644 --- a/examples/run_rm.py +++ b/examples/run_rm.py @@ -132,11 +132,11 @@ def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig): # Custom preference dataset if data_cls.startswith("PreferenceData:"): _, _, data_path = data_cls.split(":", 2) - data = hf_datasets.PreferenceDataset(data_path) - train_dataset = data.formatted_ds["local"] + data = hf_datasets.PreferenceDataset(data_path, split="train") + train_dataset = data.formatted_ds["train"] val_dataset = None print( - f" āœ“ Training dataset loaded with {len(data.formatted_ds['local'])} samples." + f" āœ“ Training dataset loaded with {len(data.formatted_ds['train'])} samples." ) # Legacy dataset elif data_cls == "HelpSteer3": @@ -176,14 +176,14 @@ def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig): assert val_data_cls.startswith("PreferenceData:") _, val_dataset_name, val_data_path = val_data_cls.split(":", 2) assert val_dataset_name not in val_dataset or val_dataset_name == "validation" # Users can override the default "validation" set - if val_dataset_name == "validation": + if val_dataset_name == "validation" and "validation" in val_dataset: print(f" āœ“ Overriding the default validation dataset") - val_data = hf_datasets.PreferenceDataset(val_data_path) + val_data = hf_datasets.PreferenceDataset(val_data_path, split="validation") print( - f" āœ“ Validation dataset '{val_dataset_name}' loaded with {len(val_data.formatted_ds["local"])} samples." + f" āœ“ Validation dataset '{val_dataset_name}' loaded with {len(val_data.formatted_ds["validation"])} samples." ) val_dataset[val_dataset_name] = AllTaskProcessedDataset( - val_data.formatted_ds["local"], + val_data.formatted_ds["validation"], tokenizer, val_data.task_spec, rm_preprocessor, diff --git a/nemo_rl/algorithms/sft.py b/nemo_rl/algorithms/sft.py index 559daea935..e49077a193 100644 --- a/nemo_rl/algorithms/sft.py +++ b/nemo_rl/algorithms/sft.py @@ -106,12 +106,12 @@ def setup( master_config: MasterConfig, tokenizer: AutoTokenizer, train_dataset: AllTaskProcessedDataset, - val_dataset: AllTaskProcessedDataset, + val_dataset: AllTaskProcessedDataset | dict[str, AllTaskProcessedDataset], ) -> tuple[ Policy, RayVirtualCluster, StatefulDataLoader, - StatefulDataLoader, + StatefulDataLoader | dict[str, StatefulDataLoader], LossFunction, MasterConfig, Logger, @@ -177,26 +177,18 @@ def setup( ) train_dataloader.load_state_dict(dataloader_state_dict) - if isinstance(val_dataset, dict): - val_dataloader = { - k: StatefulDataLoader( - v, - batch_size=sft_config["val_global_batch_size"], - shuffle=False, - collate_fn=collate_fn, - drop_last=True, - ) for k, v in val_dataset.items() - } - else: - val_dataloader = { - "validation": StatefulDataLoader( - val_dataset, - batch_size=sft_config["val_global_batch_size"], - shuffle=False, - collate_fn=collate_fn, - drop_last=True, - ) - } + if not isinstance(val_dataset, dict): + val_dataset = {"validation": val_dataset} + + val_dataloader = { + k: StatefulDataLoader( + v, + batch_size=sft_config["val_global_batch_size"], + shuffle=False, + collate_fn=collate_fn, + drop_last=True, + ) for k, v in val_dataset.items() + } # ========================== # Cluster @@ -254,7 +246,7 @@ def setup( # ======================================================= def validate( policy: PolicyInterface, - val_dataloader: StatefulDataLoader, + val_dataloader: StatefulDataLoader | dict[str, StatefulDataLoader], tokenizer, loss_fn, step: int, diff --git a/nemo_rl/data/hf_datasets/preference_dataset.py b/nemo_rl/data/hf_datasets/preference_dataset.py index 292b507f30..d864297488 100644 --- a/nemo_rl/data/hf_datasets/preference_dataset.py +++ b/nemo_rl/data/hf_datasets/preference_dataset.py @@ -35,8 +35,9 @@ class PreferenceDataset: } """ - def __init__(self, dataset_path: str) -> None: - self.formatted_ds = DatasetDict({"local": load_dataset("json", data_files=dataset_path, split="train")}) + def __init__(self, dataset_path: str, split: str) -> None: + # Specifying split="train" returns Dataset instead of DatasetDict({"train": Dataset}) + self.formatted_ds = DatasetDict({split: load_dataset("json", data_files=dataset_path, split="train")}) self.task_spec = TaskDataSpec( task_name="PreferenceData", From 0aaf29674fcb43fd828574cd063fadf32df4ead4 Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Tue, 22 Jul 2025 13:52:39 -0700 Subject: [PATCH 18/47] pull from main Signed-off-by: Julien Veron Vialard --- examples/configs/rm.yaml | 4 ++++ .../models/policy/dtensor_policy_worker.py | 23 ++++++++++++------- 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/examples/configs/rm.yaml b/examples/configs/rm.yaml index 75d9355c0d..cf37750c08 100644 --- a/examples/configs/rm.yaml +++ b/examples/configs/rm.yaml @@ -47,6 +47,9 @@ policy: dynamic_batching: enabled: false + sequence_packing: + enabled: false + # makes the training sequence length divisible by the tensor parallel size # this is useful for sequence parallel training make_sequence_length_divisible_by: ${policy.dtensor_cfg.tensor_parallel_size} @@ -124,6 +127,7 @@ logger: log_dir: "logs" # Base directory for all logs wandb_enabled: true # Make sure you do a ``wandb login [Your API key]'' before running tensorboard_enabled: true + mlflow_enabled: false monitor_gpus: true # If true, will monitor GPU usage and log to wandb and/or tensorboard wandb: project: "rm-dev" diff --git a/nemo_rl/models/policy/dtensor_policy_worker.py b/nemo_rl/models/policy/dtensor_policy_worker.py index 7952e65dd5..45d9584f8d 100644 --- a/nemo_rl/models/policy/dtensor_policy_worker.py +++ b/nemo_rl/models/policy/dtensor_policy_worker.py @@ -672,14 +672,21 @@ def train( with DTensorPolicyWorker.train_context(context_parallel_ctx): with torch.autocast(device_type="cuda", dtype=self.dtype): - outputs = self.model( - input_ids=input_ids, - attention_mask=attention_mask, - position_ids=position_ids, - use_cache=False, - flash_attn_kwargs=flash_attn_kwargs, - ) - + if self.cfg["reward_model_type"] == "bradley_terry": + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + use_cache=False, + ) + else: + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + use_cache=False, + flash_attn_kwargs=flash_attn_kwargs, + ) # Get logprobs if not hasattr(outputs, "logits"): logits = self.model.lm_head(outputs.last_hidden_state) From 5b3f1ad949b1fc480f9d341ae6d6753dd645be71 Mon Sep 17 00:00:00 2001 From: Olivier Delalleau <507137+odelalleau@users.noreply.github.com> Date: Wed, 23 Jul 2025 10:55:39 -0400 Subject: [PATCH 19/47] Update docs/guides/rm.md Co-authored-by: Terry Kong Signed-off-by: Olivier Delalleau <507137+odelalleau@users.noreply.github.com> --- docs/guides/rm.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/rm.md b/docs/guides/rm.md index 3a42f55806..f4b8171914 100644 --- a/docs/guides/rm.md +++ b/docs/guides/rm.md @@ -1,6 +1,6 @@ # Reward Model Training in NeMo RL -This document explains how to train reward models (RM) within NeMo RL. Currently, only Bradley-Terry reward models are supported. +This document explains how to train reward models (RM) within NeMo RL. Currently, only Bradley-Terry reward models are supported on the DTensor backend. Megatron backend support is tracked [here](https://github.com/NVIDIA-NeMo/RL/issues/720). ## Launch a Training Job From 6534c7c7522ae5f1b33059dcdaa2d83820897c93 Mon Sep 17 00:00:00 2001 From: Olivier Delalleau <507137+odelalleau@users.noreply.github.com> Date: Wed, 23 Jul 2025 10:58:52 -0400 Subject: [PATCH 20/47] Remove the `-RAY_DEDUP_LOGS=0` examples in the README Signed-off-by: Olivier Delalleau <507137+odelalleau@users.noreply.github.com> --- README.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/README.md b/README.md index 6065c11ff2..b1f720fdd7 100644 --- a/README.md +++ b/README.md @@ -331,7 +331,6 @@ For distributed DPO training across multiple nodes, modify the following script NUM_ACTOR_NODES=2 COMMAND="uv run ./examples/run_dpo.py --config examples/configs/dpo.yaml cluster.num_nodes=2 cluster.gpus_per_node=8 dpo.val_global_batch_size=32 checkpointing.checkpoint_dir='results/dpo_llama81_2nodes' logger.wandb_enabled=True logger.wandb.name='dpo-llama1b'" \ -RAY_DEDUP_LOGS=0 \ CONTAINER=YOUR_CONTAINER \ MOUNTS="$PWD:$PWD" \ sbatch \ @@ -376,7 +375,6 @@ For distributed RM training across multiple nodes, modify the following script f NUM_ACTOR_NODES=2 COMMAND="uv run ./examples/run_rm.py --config examples/configs/rm.yaml cluster.num_nodes=2 cluster.gpus_per_node=8 checkpointing.checkpoint_dir='results/rm_llama1b_2nodes' logger.wandb_enabled=True logger.wandb.name='rm-llama1b-2nodes'" \ -RAY_DEDUP_LOGS=0 \ CONTAINER=YOUR_CONTAINER \ MOUNTS="$PWD:$PWD" \ sbatch \ From b79d0ee23623bd60e245f822d0981362340a4560 Mon Sep 17 00:00:00 2001 From: Olivier Delalleau <507137+odelalleau@users.noreply.github.com> Date: Wed, 23 Jul 2025 11:28:15 -0400 Subject: [PATCH 21/47] Refactor RM config to include a dedicated `reward_model_cfg` section Signed-off-by: Olivier Delalleau <507137+odelalleau@users.noreply.github.com> --- docs/guides/rm.md | 2 +- examples/configs/rm.yaml | 5 ++++- examples/run_rm.py | 2 ++ nemo_rl/models/policy/__init__.py | 5 +++++ nemo_rl/models/policy/dtensor_policy_worker.py | 11 +++++++---- tests/unit/algorithms/test_rm.py | 5 ++++- 6 files changed, 23 insertions(+), 7 deletions(-) diff --git a/docs/guides/rm.md b/docs/guides/rm.md index f4b8171914..287e422e3c 100644 --- a/docs/guides/rm.md +++ b/docs/guides/rm.md @@ -15,7 +15,7 @@ uv run examples/run_rm.py --config examples/configs/rm.yaml uv run examples/run_rm.py --config examples/configs/rm.yaml policy.model_name=Qwen/Qwen2.5-1.5B ``` -You must specify the YAML config. It shares the same base template as the SFT config but adds a new `reward_model_type` key to trigger RM training. You can find an example RM config file at [examples/configs/rm.yaml](../../examples/configs/rm.yaml). +You must specify the YAML config. It shares the same base template as the SFT config but includes a new `reward_model_cfg` section with `enabled: true` to load the model as a Reward Model. You can find an example RM config file at [examples/configs/rm.yaml](../../examples/configs/rm.yaml). **Reminder**: Set your `HF_HOME`, `WANDB_API_KEY`, and `HF_DATASETS_CACHE` (if needed). Make sure to log in using `huggingface-cli` if you're working with Llama models. diff --git a/examples/configs/rm.yaml b/examples/configs/rm.yaml index cf37750c08..06abcce233 100644 --- a/examples/configs/rm.yaml +++ b/examples/configs/rm.yaml @@ -27,7 +27,6 @@ policy: # We don't use the "default" chat template because the Llama tokenizer inserts the current # date in the system prompt, which could make the reward model's output date-dependent. chat_template: "{{- bos_token }}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n{%- else %}\n {%- set system_message = '' %}\n{%- endif %}\n\n{#- System message #}\n{{- '<|start_header_id|>system<|end_header_id|>\n\n' }}\n{{- system_message }}\n{{- '<|eot_id|>' }}\n\n{%- for message in messages %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' + message['content'] | trim + '<|eot_id|>' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id>\n\n' }}\n{%- endif %}" - reward_model_type: "bradley_terry" train_global_batch_size: 128 train_micro_batch_size: 1 max_total_sequence_length: 8192 @@ -35,6 +34,10 @@ policy: fsdp_offload_enabled: false activation_checkpointing_enabled: false + reward_model_cfg: + enabled: true # loads model as a Reward Model (do not change) + reward_model_type: "bradley_terry" # only "bradley_terry" is currently supported + dtensor_cfg: enabled: true cpu_offload: false diff --git a/examples/run_rm.py b/examples/run_rm.py index 952626197f..6586d8edb7 100644 --- a/examples/run_rm.py +++ b/examples/run_rm.py @@ -164,6 +164,8 @@ def main(): print("Final config:") pprint.pprint(config) + assert config["policy"]["reward_model_cfg"]["enabled"] + config["logger"]["log_dir"] = get_next_experiment_dir(config["logger"]["log_dir"]) print(f"šŸ“Š Using log directory: {config['logger']['log_dir']}") if config["checkpointing"]["enabled"]: diff --git a/nemo_rl/models/policy/__init__.py b/nemo_rl/models/policy/__init__.py index 384a8bf5a5..188e00b359 100644 --- a/nemo_rl/models/policy/__init__.py +++ b/nemo_rl/models/policy/__init__.py @@ -34,6 +34,10 @@ class SequencePackingConfig(TypedDict): algorithm: str +class RewardModelConfig(TypedDict): + enabled: bool + reward_model_type: str + class MegatronOptimizerConfig(TypedDict): optimizer: str lr: float @@ -134,6 +138,7 @@ class PolicyConfig(TypedDict): int ] # used in static batched (framework) generation precision: str + reward_model_cfg: NotRequired[RewardModelConfig] dtensor_cfg: DTensorConfig megatron_cfg: NotRequired[MegatronConfig] dynamic_batching: DynamicBatchingConfig diff --git a/nemo_rl/models/policy/dtensor_policy_worker.py b/nemo_rl/models/policy/dtensor_policy_worker.py index 45d9584f8d..26069ffb68 100644 --- a/nemo_rl/models/policy/dtensor_policy_worker.py +++ b/nemo_rl/models/policy/dtensor_policy_worker.py @@ -203,8 +203,11 @@ def __init__( else None, ) - if "reward_model_type" in self.cfg: - if self.cfg["reward_model_type"] == "bradley_terry": + self._is_reward_model = self.cfg.get("reward_model_cfg", {}).get("enabled", False) + if self._is_reward_model: + # Load model as a Reward Model. + rm_type = self.cfg["reward_model_cfg"]["reward_model_type"] + if rm_type == "bradley_terry": model_class = AutoModelForSequenceClassification if model_config.num_labels != 1: # For Bradley-Terry reward models, the linear head has a single output. @@ -222,7 +225,7 @@ def __init__( model_config.num_labels = 1 else: raise ValueError( - f"Unknown reward model type: {self.cfg['reward_model_type']}" + f"Unknown reward model type: {rm_type}" ) else: model_class = AutoModelForCausalLM @@ -672,7 +675,7 @@ def train( with DTensorPolicyWorker.train_context(context_parallel_ctx): with torch.autocast(device_type="cuda", dtype=self.dtype): - if self.cfg["reward_model_type"] == "bradley_terry": + if self._is_reward_model: outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, diff --git a/tests/unit/algorithms/test_rm.py b/tests/unit/algorithms/test_rm.py index 278b6be05a..a373cfc12f 100644 --- a/tests/unit/algorithms/test_rm.py +++ b/tests/unit/algorithms/test_rm.py @@ -93,7 +93,10 @@ def val_iter(self): "policy": { "train_global_batch_size": 1, "make_sequence_length_divisible_by": 1, - "reward_model_type": "bradley_terry", + "reward_model_cfg": { + "enabled": True, + "reward_model_type": "bradley_terry", + }, "train_micro_batch_size": 1, }, "checkpointing": {"enabled": False}, From 51cc9f832b73bee14539a7a0d2a23623b07d7e51 Mon Sep 17 00:00:00 2001 From: Olivier Delalleau <507137+odelalleau@users.noreply.github.com> Date: Wed, 23 Jul 2025 11:35:11 -0400 Subject: [PATCH 22/47] Provide user-friendly error message regarding unsupported RMs in mcore Signed-off-by: Olivier Delalleau <507137+odelalleau@users.noreply.github.com> --- nemo_rl/models/policy/megatron_policy_worker.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/nemo_rl/models/policy/megatron_policy_worker.py b/nemo_rl/models/policy/megatron_policy_worker.py index 867f27ea1d..ba1948c561 100644 --- a/nemo_rl/models/policy/megatron_policy_worker.py +++ b/nemo_rl/models/policy/megatron_policy_worker.py @@ -369,6 +369,13 @@ def __init__( } self.dtype = dtype_map[self.cfg["precision"]] + # Reward models are not yet supported with Megatron. + if self.cfg.get("reward_model_cfg", {}).get("enabled", False): + raise NotImplementedError( + "Reward models are not yet supported with the Megatron backend, this issue is " + "tracked in https://github.com/NVIDIA-NeMo/RL/issues/720" + ) + # Only enable expandable_segments on Hopper and newer architectures (compute capability 9.x+) configure_expandable_segments() From 597d5eb36561fe102baac4b3e941631656b8ec77 Mon Sep 17 00:00:00 2001 From: Olivier Delalleau <507137+odelalleau@users.noreply.github.com> Date: Wed, 23 Jul 2025 11:44:18 -0400 Subject: [PATCH 23/47] Simplify code and guard against enabling sequence packing in RMs Signed-off-by: Olivier Delalleau <507137+odelalleau@users.noreply.github.com> --- .../models/policy/dtensor_policy_worker.py | 27 +++++++++---------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/nemo_rl/models/policy/dtensor_policy_worker.py b/nemo_rl/models/policy/dtensor_policy_worker.py index 26069ffb68..3ab88b4222 100644 --- a/nemo_rl/models/policy/dtensor_policy_worker.py +++ b/nemo_rl/models/policy/dtensor_policy_worker.py @@ -205,6 +205,9 @@ def __init__( self._is_reward_model = self.cfg.get("reward_model_cfg", {}).get("enabled", False) if self._is_reward_model: + # Ensure sequence packing is disabled. + if self.enable_seq_packing: + raise NotImplementedError("Sequence packing is not supported for reward models") # Load model as a Reward Model. rm_type = self.cfg["reward_model_cfg"]["reward_model_type"] if rm_type == "bradley_terry": @@ -676,20 +679,16 @@ def train( with DTensorPolicyWorker.train_context(context_parallel_ctx): with torch.autocast(device_type="cuda", dtype=self.dtype): if self._is_reward_model: - outputs = self.model( - input_ids=input_ids, - attention_mask=attention_mask, - position_ids=position_ids, - use_cache=False, - ) - else: - outputs = self.model( - input_ids=input_ids, - attention_mask=attention_mask, - position_ids=position_ids, - use_cache=False, - flash_attn_kwargs=flash_attn_kwargs, - ) + # Reward models don't support sequence packing so there + # should be no specific flash attention arguments. + assert not flash_attn_kwargs + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + use_cache=False, + flash_attn_kwargs=flash_attn_kwargs, + ) # Get logprobs if not hasattr(outputs, "logits"): logits = self.model.lm_head(outputs.last_hidden_state) From ba2e4b6b0b1ed7d8e0b66acf00fc328d40ebcba0 Mon Sep 17 00:00:00 2001 From: Olivier Delalleau <507137+odelalleau@users.noreply.github.com> Date: Wed, 23 Jul 2025 12:12:15 -0400 Subject: [PATCH 24/47] Fix likely crash with Reward Models introduced in previous commit Signed-off-by: Olivier Delalleau <507137+odelalleau@users.noreply.github.com> --- nemo_rl/models/policy/dtensor_policy_worker.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/nemo_rl/models/policy/dtensor_policy_worker.py b/nemo_rl/models/policy/dtensor_policy_worker.py index 3ab88b4222..8f6f88bcca 100644 --- a/nemo_rl/models/policy/dtensor_policy_worker.py +++ b/nemo_rl/models/policy/dtensor_policy_worker.py @@ -678,17 +678,23 @@ def train( with DTensorPolicyWorker.train_context(context_parallel_ctx): with torch.autocast(device_type="cuda", dtype=self.dtype): - if self._is_reward_model: - # Reward models don't support sequence packing so there - # should be no specific flash attention arguments. - assert not flash_attn_kwargs - outputs = self.model( + model_args = dict( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, use_cache=False, flash_attn_kwargs=flash_attn_kwargs, ) + + if self._is_reward_model: + # `flash_attn_kwarg` is not supported for `LlamaForSequenceClassification`. + # Note that it should be empty anyway since sequence packing + # is not supported for reward models. + assert not flash_attn_kwargs + del model_args["flash_attn_kwargs"] + + outputs = self.model(**model_args) + # Get logprobs if not hasattr(outputs, "logits"): logits = self.model.lm_head(outputs.last_hidden_state) From 47337178d69c78f03eb5c1e27ede0c06bfadf6af Mon Sep 17 00:00:00 2001 From: Olivier Delalleau <507137+odelalleau@users.noreply.github.com> Date: Wed, 23 Jul 2025 12:17:03 -0400 Subject: [PATCH 25/47] Fix linting issues Signed-off-by: Olivier Delalleau <507137+odelalleau@users.noreply.github.com> --- nemo_rl/models/policy/__init__.py | 1 + nemo_rl/models/policy/dtensor_policy_worker.py | 12 +++++++----- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/nemo_rl/models/policy/__init__.py b/nemo_rl/models/policy/__init__.py index 188e00b359..94b472d1b4 100644 --- a/nemo_rl/models/policy/__init__.py +++ b/nemo_rl/models/policy/__init__.py @@ -38,6 +38,7 @@ class RewardModelConfig(TypedDict): enabled: bool reward_model_type: str + class MegatronOptimizerConfig(TypedDict): optimizer: str lr: float diff --git a/nemo_rl/models/policy/dtensor_policy_worker.py b/nemo_rl/models/policy/dtensor_policy_worker.py index 8f6f88bcca..0532602703 100644 --- a/nemo_rl/models/policy/dtensor_policy_worker.py +++ b/nemo_rl/models/policy/dtensor_policy_worker.py @@ -203,11 +203,15 @@ def __init__( else None, ) - self._is_reward_model = self.cfg.get("reward_model_cfg", {}).get("enabled", False) + self._is_reward_model = self.cfg.get("reward_model_cfg", {}).get( + "enabled", False + ) if self._is_reward_model: # Ensure sequence packing is disabled. if self.enable_seq_packing: - raise NotImplementedError("Sequence packing is not supported for reward models") + raise NotImplementedError( + "Sequence packing is not supported for reward models" + ) # Load model as a Reward Model. rm_type = self.cfg["reward_model_cfg"]["reward_model_type"] if rm_type == "bradley_terry": @@ -227,9 +231,7 @@ def __init__( ) model_config.num_labels = 1 else: - raise ValueError( - f"Unknown reward model type: {rm_type}" - ) + raise ValueError(f"Unknown reward model type: {rm_type}") else: model_class = AutoModelForCausalLM From 3297cd111b23546f81126bff33f12887211eb548 Mon Sep 17 00:00:00 2001 From: Olivier Delalleau <507137+odelalleau@users.noreply.github.com> Date: Wed, 23 Jul 2025 13:27:30 -0400 Subject: [PATCH 26/47] Fix a typing issue Signed-off-by: Olivier Delalleau <507137+odelalleau@users.noreply.github.com> --- nemo_rl/algorithms/loss_functions.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nemo_rl/algorithms/loss_functions.py b/nemo_rl/algorithms/loss_functions.py index c8bd2a523f..aeb7181333 100644 --- a/nemo_rl/algorithms/loss_functions.py +++ b/nemo_rl/algorithms/loss_functions.py @@ -568,7 +568,7 @@ def __init__(self, cfg: DPOLossConfig): self.loss_type = LossType.SEQUENCE_LEVEL - def _preference_loss( + def _dpo_loss( self, next_token_logits: Tensor, data: BatchedDataDict[DPOLossDataDict], @@ -619,7 +619,7 @@ def _preference_loss( if self.preference_average_log_probs: rewards = rewards / token_mask.sum(-1).clamp(min=1) - return super()._preference_loss( + return self._preference_loss( rewards, sample_mask, global_valid_seqs, self.reference_policy_kl_penalty ) @@ -661,7 +661,7 @@ def __call__( accuracy, rewards_chosen_mean, rewards_rejected_mean, - ) = self._preference_loss( + ) = self._dpo_loss( next_token_logits, data, global_valid_seqs, From 179767e35d547562d6caf1dfcbf37b046055c6a2 Mon Sep 17 00:00:00 2001 From: Olivier Delalleau <507137+odelalleau@users.noreply.github.com> Date: Fri, 25 Jul 2025 10:42:42 -0400 Subject: [PATCH 27/47] Quick fix to typing issue (with TODO item for better fix) Signed-off-by: Olivier Delalleau <507137+odelalleau@users.noreply.github.com> --- nemo_rl/algorithms/loss_functions.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nemo_rl/algorithms/loss_functions.py b/nemo_rl/algorithms/loss_functions.py index aeb7181333..b875dd20f3 100644 --- a/nemo_rl/algorithms/loss_functions.py +++ b/nemo_rl/algorithms/loss_functions.py @@ -623,7 +623,8 @@ def _dpo_loss( rewards, sample_mask, global_valid_seqs, self.reference_policy_kl_penalty ) - def __call__( + # TODO a cleaner typing fix would be required (probably that DPOLossFn should not inherit from PreferenceLoss) + def __call__( # type: ignore self, next_token_logits: Tensor, data: BatchedDataDict[DPOLossDataDict], From 76f77d88c14db7e9babed3ce3a99c9271c78727a Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Mon, 28 Jul 2025 11:19:10 -0700 Subject: [PATCH 28/47] unify data logic between DPO and RM training Signed-off-by: Julien Veron Vialard --- docs/guides/dpo.md | 191 +++++++----------- docs/guides/rm.md | 55 ++--- examples/configs/rm.yaml | 11 +- examples/run_dpo.py | 124 +++++++----- examples/run_rm.py | 51 ++--- nemo_rl/algorithms/dpo.py | 104 +++++++--- nemo_rl/algorithms/rm.py | 6 +- nemo_rl/data/datasets.py | 45 ----- nemo_rl/data/hf_datasets/dpo.py | 23 ++- nemo_rl/data/hf_datasets/helpsteer3.py | 12 +- .../unit/data/hf_datasets/test_dpo_dataset.py | 14 +- tests/unit/data/hf_datasets/test_helpsteer.py | 60 ++++-- .../hf_datasets/test_preference_dataset.py | 10 +- tests/unit/data/test_datasets.py | 76 +++---- 14 files changed, 387 insertions(+), 395 deletions(-) diff --git a/docs/guides/dpo.md b/docs/guides/dpo.md index 46c4d56197..179c8f34e5 100644 --- a/docs/guides/dpo.md +++ b/docs/guides/dpo.md @@ -32,129 +32,90 @@ uv run examples/run_dpo.py \ ## Datasets -Each class representing a NeMo RL DPO dataset is expected to have the following attributes: -1. `formatted_ds`: The dictionary of formatted datasets. This dictionary should contain `train` and `validation` splits, and each split should conform to the format described below. +Each DPO dataset class is expected to have the following attributes: +1. `formatted_ds`: The dictionary of formatted datasets, where each dataset should be formatted like +``` +{ + "context": list of dicts, # The prompt message (including previous turns, if any) + "completions": list of dicts, # The list of completions + { + "rank": int, # The rank of the completion (lower rank is preferred) + "completion": list of dicts, # The completion message(s) + } +} +``` 2. `task_spec`: The `TaskDataSpec` for this dataset. This should specify the name you choose for this dataset. -DPO datasets are expected to follow a specific format with three key fields: -- `prompt`: The input prompt/context -- `chosen_response`: The preferred/winning response -- `rejected_response`: The non-preferred/losing response - -[data/hf_datasets/helpsteer3.py](../../nemo_rl/data/hf_datasets/helpsteer3.py) provides an example of how to format data for DPO: - -```python -def format_helpsteer3(data): - response_1 = data["response1"] - response_2 = data["response2"] - overall_preference = data["overall_preference"] - - if overall_preference < 0: - chosen = response_1 - rejected = response_2 - elif overall_preference == 0: - chosen = response_1 - rejected = response_1 - else: - chosen = response_2 - rejected = response_1 - - return { - "prompt": data["context"], - "chosen_response": chosen, - "rejected_response": rejected, - } +Currently, DPO training supports only two completions (where the lowest rank is preferred and the highest one is rejected), with each completion being a single response. For example: ``` - -We also provide a [DPODataset](../../nemo_rl/data/hf_datasets/dpo.py) class that is compatible with jsonl-formatted preference datsets. This class assumes train and validation datasets have been split and processed into the expected format offline. The jsonl files should consist of examples with `prompt`, `chosen_response`, and `rejected_response` keys. - -## Adding Custom DPO Datasets - -Adding a new DPO dataset is straightforward. Your custom dataset class should: -1. Implement the required format conversion in the constructor -2. Set up the appropriate `task_spec` - -Here's a minimal example which simply re-keys an existing jsonl dataset: - -```{testcode} -from datasets import load_dataset -from nemo_rl.data.interfaces import TaskDataSpec -from docs.helpers import make_dpo_dataset - -class CustomDPODataset: - def preprocess_dataset( - self, - data, - prompt_key: str = "context", - chosen_key: str = "chosen", - rejected_key: str = "rejected" - ): - return { - "prompt": data[prompt_key], - "chosen_response": data[chosen_key], - "rejected_response": data[rejected_key], +{ + "context": [ + { + "role": "user", + "content": "What's the capital of France?" + }, + { + "role": "assistant", + "content": "The capital of France is Paris." + }, + { + "role": "user", + "content": "Thanks! And what's the capital of Germany?" } - - def __init__( - self, - train_data_path: str, - val_data_path: str, - prompt_key: str, - chosen_key: str, - rejected_key: str, - ): - # Load and format your dataset - fn_kwargs={ - "prompt_key": prompt_key, - "chosen_key": chosen_key, - "rejected_key": rejected_key - } - formatted_ds = { - "train": load_dataset("json", data_files=train_data_path, split="train").map( - self.preprocess_dataset, - fn_kwargs=fn_kwargs, - ), - "validation": load_dataset("json", data_files=val_data_path, split="train").map( - self.preprocess_dataset, - fn_kwargs=fn_kwargs, - ), + ], + "completions": [ + { + "rank": 0, + "completion": [ + { + "role": "assistant", + "content": "The capital of Germany is Berlin." + } + ] + }, + { + "rank": 1, + "completion": [ + { + "role": "assistant", + "content": "The capital of Germany is Munich." + } + ] } - - # Initialize task spec with dataset name - self.task_spec = TaskDataSpec( - task_name="custom_dpo", - ) - self.formatted_ds = formatted_ds - -# Create temporary files using helper function -train_file, val_file = make_dpo_dataset() - -# Initialize dataset -dataset = CustomDPODataset( - train_data_path=train_file.name, - val_data_path=val_file.name, - prompt_key="context", - chosen_key="chosen", - rejected_key="rejected" -) - -# Test dataset properties -print(f"Task name: {dataset.task_spec.task_name}") -print(f"Train examples: {len(dataset.formatted_ds['train'])}") -print(f"Validation examples: {len(dataset.formatted_ds['validation'])}") -print(f"First train example prompt: {dataset.formatted_ds['train'][0]['prompt']}") -print(f"First train example chosen response: {dataset.formatted_ds['train'][0]['chosen_response']}") -print(f"First train example rejected response: {dataset.formatted_ds['train'][0]['rejected_response']}") + ] +} +``` + +NeMo RL supports the `HelpSteer3` dataset. This dataset is downloaded from Hugging Face and preprocessed on-the-fly, so there's no need to provide a path to any datasets on disk. + +We also provide a [PreferenceDataset](../../nemo_rl/data/hf_datasets/preference_dataset.py) class that is compatible with JSONL-formatted preference datasets. You can modify your config as follows: +``` +data: + dataset_name: PreferenceData + train_data_path: + val_datasets: + - dataset_name: PreferenceData + val_data_name: + val_data_path: + - dataset_name: PreferenceData + val_data_name: + val_data_path: ``` +Note: +- If you are using a custom preference dataset for training, you must specify a custom preference dataset for validation. +- If you are using a logger, the prefix used for the custom validation preference dataset will be `validation-`. -```{testoutput} -Task name: custom_dpo -Train examples: 2 -Validation examples: 2 -First train example prompt: What is 2+2? -First train example chosen response: 4 -First train example rejected response: 5 +When using `HelpSteer3` as the training dataset, the default validation set is also used and logged under the prefix `validation`. You can replace it with a custom preference dataset as follows: ``` +data: + dataset_name: HelpSteer3 + val_datasets: + - dataset_name: PreferenceData + val_data_name: validation + val_data_path: +``` + +[DPODataset](../../nemo_rl/data/hf_datasets/dpo.py) class is deprecated. This class is also compatible with JSONL-formatted preference datsets. It assumes train and validation datasets have been split and processed into the expected format offline. The JSONL files should consist of examples with `prompt`, `chosen_response`, and `rejected_response` keys. ## DPO-Specific Parameters diff --git a/docs/guides/rm.md b/docs/guides/rm.md index fe86ce330b..fa07279a14 100644 --- a/docs/guides/rm.md +++ b/docs/guides/rm.md @@ -21,27 +21,8 @@ You must specify the YAML config. It shares the same base template as the SFT co ## Datasets -By default, NeMo RL supports the `HelpSteer3` dataset. This dataset is downloaded from Hugging Face and preprocessed on-the-fly, so there's no need to provide a path to any datasets on disk. - -You can also configure custom preference datasets (for training and/or validation) as follows: -``` -data: - dataset_name: "PreferenceData::" - val_dataset_name: ["PreferenceData::"] -``` -Note: -- The name of any custom preference dataset must not contain `:`. -- If you are using a custom preference dataset for training, you must specify a custom preference dataset for validation. -- If you are using a logger, the prefix used for the custom validation preference dataset will be `validation-`. - -When using `HelpSteer3` as the training dataset, the default validation set is also used and logged under the prefix `validation`. You can replace it with a custom preference dataset as follows: -``` -data: - dataset_name: "HelpSteer3" - val_dataset_name: ["PreferenceData:validation:"] -``` - -Each custom preference dataset should be a JSONL file, with each line containing a valid JSON object formatted like this: +Each RM dataset class is expected to have the following attributes: +1. `formatted_ds`: The dictionary of formatted datasets, where each dataset should be formatted like ``` { "context": list of dicts, # The prompt message (including previous turns, if any) @@ -52,6 +33,7 @@ Each custom preference dataset should be a JSONL file, with each line containing } } ``` +2. `task_spec`: The `TaskDataSpec` for this dataset. This should specify the name you choose for this dataset. Currently, RM training supports only two completions (where the lowest rank is preferred and the highest one is rejected), with each completion being a single response. For example: ``` @@ -93,12 +75,31 @@ Currently, RM training supports only two completions (where the lowest rank is p } ``` -NeMo RL supports using multiple custom validation preference datasets during RM training: +NeMo RL supports the `HelpSteer3` dataset. This dataset is downloaded from Hugging Face and preprocessed on-the-fly, so there's no need to provide a path to any datasets on disk. + +We also provide a [PreferenceDataset](../../nemo_rl/data/hf_datasets/preference_dataset.py) class that is compatible with JSONL-formatted preference datasets. You can modify your config as follows: +``` +data: + dataset_name: PreferenceData + train_data_path: + val_datasets: + - dataset_name: PreferenceData + val_data_name: + val_data_path: + - dataset_name: PreferenceData + val_data_name: + val_data_path: +``` +Note: +- If you are using a custom preference dataset for training, you must specify a custom preference dataset for validation. +- If you are using a logger, the prefix used for the custom validation preference dataset will be `validation-`. + +When using `HelpSteer3` as the training dataset, the default validation set is also used and logged under the prefix `validation`. You can replace it with a custom preference dataset as follows: ``` data: - dataset_name: "PreferenceData::" - val_dataset_name: [ - "PreferenceData::", - "PreferenceData::", - ] + dataset_name: HelpSteer3 + val_datasets: + - dataset_name: PreferenceData + val_data_name: validation + val_data_path: ``` \ No newline at end of file diff --git a/examples/configs/rm.yaml b/examples/configs/rm.yaml index 5c56f1311c..872e92009a 100644 --- a/examples/configs/rm.yaml +++ b/examples/configs/rm.yaml @@ -126,10 +126,13 @@ data: max_input_seq_length: ${policy.max_total_sequence_length} dataset_name: "HelpSteer3" # You can optionally configure (multiple) custom validation preference datasets as follows: - # val_dataset_name: [ - # "PreferenceData::", - # "PreferenceData::", - # ] + # val_datasets: + # - dataset_name: PreferenceData + # val_data_name: + # val_data_path: + # - dataset_name: PreferenceData + # val_data_name: + # val_data_path: logger: log_dir: "logs" # Base directory for all logs diff --git a/examples/run_dpo.py b/examples/run_dpo.py index a9702ed93e..66a6b98707 100644 --- a/examples/run_dpo.py +++ b/examples/run_dpo.py @@ -69,9 +69,11 @@ def dpo_preprocessor( >>> task_spec = TaskDataSpec(task_name="test_dpo") >>> >>> datum = { - ... "prompt": "What is 2+2?", - ... "chosen_response": "4", - ... "rejected_response": "5" + ... "context": [{"role": "user", "content": "What is 2+2?"}], + ... "completions": [ + ... {"rank": 0, "completion": [{"role": "assistant", "content": "4"}]}, + ... {"rank": 1, "completion": [{"role": "assistant", "content": "5"}]} + ... ] ... } >>> >>> processed = dpo_preprocessor(datum, task_spec, tokenizer, max_seq_length=128, idx=0) @@ -84,11 +86,13 @@ def dpo_preprocessor( >>> processed["message_log_rejected"][-1]["content"] '5<|eot_id|>' >>> - >>> # prompt can also be a list with multiple messages + >>> # context can also contain multiple turns >>> datum = { - ... "prompt": [{"role": "user", "content": "I have a question."}, {"role": "assistant", "content": "Sure!"}, {"role": "user", "content": "What is 2+2?"}], - ... "chosen_response": "4", - ... "rejected_response": "5" + ... "context": [{"role": "user", "content": "I have a question."}, {"role": "assistant", "content": "Sure!"}, {"role": "user", "content": "What is 2+2?"}], + ... "completions": [ + ... {"rank": 0, "completion": [{"role": "assistant", "content": "4"}]}, + ... {"rank": 1, "completion": [{"role": "assistant", "content": "5"}]} + ... ] ... } >>> processed = dpo_preprocessor(datum, task_spec, tokenizer, max_seq_length=128, idx=0) >>> len(processed["message_log_chosen"]) @@ -102,36 +106,18 @@ def dpo_preprocessor( ``` """ - if isinstance(datum_dict["prompt"], list): - messages_chosen = datum_dict["prompt"].copy() - messages_rejected = datum_dict["prompt"].copy() + assert len(datum_dict["completions"]) == 2 + # Lower rank is preferred + if datum_dict["completions"][0]["rank"] < datum_dict["completions"][1]["rank"]: + chosen_completion = datum_dict["completions"][0] + rejected_completion = datum_dict["completions"][1] + elif datum_dict["completions"][0]["rank"] > datum_dict["completions"][1]["rank"]: + chosen_completion = datum_dict["completions"][1] + rejected_completion = datum_dict["completions"][0] else: - messages_chosen = [ - { - "role": "user", - "content": datum_dict["prompt"], - }, - ] - messages_rejected = [ - { - "role": "user", - "content": datum_dict["prompt"], - }, - ] - - messages_chosen.append( - { - "role": "assistant", - "content": datum_dict["chosen_response"], - }, - ) - - messages_rejected.append( - { - "role": "assistant", - "content": datum_dict["rejected_response"], - }, - ) + raise NotImplementedError("Ties are not supported yet.") + messages_chosen = datum_dict["context"] + chosen_completion["completion"] + messages_rejected = datum_dict["context"] + rejected_completion["completion"] message_log_chosen = get_formatted_message_log( messages_chosen, tokenizer, task_data_spec @@ -174,15 +160,33 @@ def dpo_preprocessor( def setup_data(data_config: DataConfig, policy_config: PolicyConfig): print("\nā–¶ Setting up data...") - if data_config["dataset_name"] == "HelpSteer3": + if data_config["dataset_name"] == "PreferenceData": + data_path = data_config["train_data_path"] + data = hf_datasets.PreferenceDataset(data_path, split="train") + train_dataset = data.formatted_ds["train"] + val_dataset = None + print( + f" āœ“ Training dataset loaded with {len(data.formatted_ds['train'])} samples." + ) + elif data_config["dataset_name"] == "HelpSteer3": data = hf_datasets.HelpSteer3Dataset() - else: + train_dataset = data.formatted_ds["train"] + val_dataset = data.formatted_ds["validation"] + print( + f" āœ“ Training and validation datasets loaded with {len(data.formatted_ds['train'])} and {len(data.formatted_ds['validation'])} samples, respectively." + ) + elif data_config["dataset_name"] == "DPODataset": data = hf_datasets.DPODataset( train_data_path=data_config["train_data_path"], val_data_path=data_config["val_data_path"], ) - train_dataset = data.formatted_ds["train"] - val_dataset = data.formatted_ds["validation"] + train_dataset = data.formatted_ds["train"] + val_dataset = data.formatted_ds["validation"] + print( + f" āœ“ Training and validation datasets loaded with {len(data.formatted_ds['train'])} and {len(data.formatted_ds['validation'])} samples, respectively." + ) + else: + raise ValueError(f"Unknown dataset class: {data_cls}") dpo_task_spec = data.task_spec @@ -195,13 +199,39 @@ def setup_data(data_config: DataConfig, policy_config: PolicyConfig): max_seq_length=data_config["max_input_seq_length"], ) - val_dataset = AllTaskProcessedDataset( - val_dataset, - tokenizer, - dpo_task_spec, - dpo_preprocessor, - max_seq_length=data_config["max_input_seq_length"], - ) + val_dataset = { + "validation": AllTaskProcessedDataset( + val_dataset, + tokenizer, + dpo_task_spec, + dpo_preprocessor, + max_seq_length=data_config["max_input_seq_length"], + ) + } if val_dataset else {} + + if data_config.get("val_datasets") is not None: + # Only supported for custom preference datasets + assert isinstance(data_config["val_datasets"], list), f"Invalid type for val_datasets: {type(data_config['val_datasets'])}" + for val_dataset_config in data_config["val_datasets"]: + assert val_dataset_config["dataset_name"] == "PreferenceData" + val_dataset_name = val_dataset_config["val_data_name"] + val_data_path = val_dataset_config["val_data_path"] + assert val_dataset_name not in val_dataset or val_dataset_name == "validation" # Users can override the default "validation" set + if val_dataset_name == "validation" and "validation" in val_dataset: + print(f" āœ“ Overriding the default validation dataset") + val_data = hf_datasets.PreferenceDataset(val_data_path, split="validation") + print( + f" āœ“ Validation dataset '{val_dataset_name}' loaded with {len(val_data.formatted_ds["validation"])} samples." + ) + val_dataset[val_dataset_name] = AllTaskProcessedDataset( + val_data.formatted_ds["validation"], + tokenizer, + val_data.task_spec, + dpo_preprocessor, + max_seq_length=data_config["max_input_seq_length"], + ) + else: + assert len(val_dataset) == 1, f"Expected 1 validation dataset, got {len(val_dataset)}" return train_dataset, val_dataset, tokenizer, dpo_task_spec diff --git a/examples/run_rm.py b/examples/run_rm.py index 968521c986..d0f536f79a 100644 --- a/examples/run_rm.py +++ b/examples/run_rm.py @@ -56,30 +56,18 @@ def rm_preprocessor( idx: int, ) -> DatumSpec: """Process a datum dictionary for RM training.""" - # Custom preference dataset - if task_data_spec.task_name == "PreferenceData": - assert len(datum_dict["completions"]) == 2 # Currently only supporting 2 completions - # Lower rank is preferred - if datum_dict["completions"][0]["rank"] < datum_dict["completions"][1]["rank"]: - chosen_completion = datum_dict["completions"][0] - rejected_completion = datum_dict["completions"][1] - elif datum_dict["completions"][0]["rank"] > datum_dict["completions"][1]["rank"]: - chosen_completion = datum_dict["completions"][1] - rejected_completion = datum_dict["completions"][0] - else: - raise NotImplementedError("Ties are not supported yet.") - messages_chosen = datum_dict["context"] + chosen_completion["completion"] - messages_rejected = datum_dict["context"] + rejected_completion["completion"] - # Legacy dataset - elif task_data_spec.task_name == "HelpSteer3": - messages_chosen = datum_dict["prompt"] + [ - {"role": "assistant", "content": datum_dict["chosen_response"]} - ] - messages_rejected = datum_dict["prompt"] + [ - {"role": "assistant", "content": datum_dict["rejected_response"]} - ] + assert len(datum_dict["completions"]) == 2 # Currently only supporting 2 completions + # Lower rank is preferred + if datum_dict["completions"][0]["rank"] < datum_dict["completions"][1]["rank"]: + chosen_completion = datum_dict["completions"][0] + rejected_completion = datum_dict["completions"][1] + elif datum_dict["completions"][0]["rank"] > datum_dict["completions"][1]["rank"]: + chosen_completion = datum_dict["completions"][1] + rejected_completion = datum_dict["completions"][0] else: - raise ValueError(f"Unknown task name: {task_data_spec.task_name}") + raise NotImplementedError("Ties are not supported yet.") + messages_chosen = datum_dict["context"] + chosen_completion["completion"] + messages_rejected = datum_dict["context"] + rejected_completion["completion"] message_log_chosen = get_formatted_message_log( messages_chosen, tokenizer, task_data_spec @@ -129,16 +117,14 @@ def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig): print("\nā–¶ Setting up data...") data_cls = data_config["dataset_name"] - # Custom preference dataset - if data_cls.startswith("PreferenceData:"): - _, _, data_path = data_cls.split(":", 2) + if data_cls == "PreferenceData": + data_path = data_config["train_data_path"] data = hf_datasets.PreferenceDataset(data_path, split="train") train_dataset = data.formatted_ds["train"] val_dataset = None print( f" āœ“ Training dataset loaded with {len(data.formatted_ds['train'])} samples." ) - # Legacy dataset elif data_cls == "HelpSteer3": data = hf_datasets.HelpSteer3Dataset() train_dataset = data.formatted_ds["train"] @@ -169,12 +155,13 @@ def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig): ) } if val_dataset else {} - if data_config.get("val_dataset_name") is not None: + if data_config.get("val_datasets") is not None: # Only supported for custom preference datasets - assert isinstance(data_config["val_dataset_name"], list), f"Invalid type for val_dataset_name: {type(data_config['val_dataset_name'])}" - for val_data_cls in data_config["val_dataset_name"]: - assert val_data_cls.startswith("PreferenceData:") - _, val_dataset_name, val_data_path = val_data_cls.split(":", 2) + assert isinstance(data_config["val_datasets"], list), f"Invalid type for val_datasets: {type(data_config['val_datasets'])}" + for val_dataset_config in data_config["val_datasets"]: + assert val_dataset_config["dataset_name"] == "PreferenceData" + val_dataset_name = val_dataset_config["val_data_name"] + val_data_path = val_dataset_config["val_data_path"] assert val_dataset_name not in val_dataset or val_dataset_name == "validation" # Users can override the default "validation" set if val_dataset_name == "validation" and "validation" in val_dataset: print(f" āœ“ Overriding the default validation dataset") diff --git a/nemo_rl/algorithms/dpo.py b/nemo_rl/algorithms/dpo.py index 30ba78f6f2..424e8563ed 100644 --- a/nemo_rl/algorithms/dpo.py +++ b/nemo_rl/algorithms/dpo.py @@ -28,7 +28,7 @@ ) from nemo_rl.algorithms.utils import set_seed from nemo_rl.data import DataConfig -from nemo_rl.data.datasets import AllTaskProcessedDataset, dpo_collate_fn +from nemo_rl.data.datasets import AllTaskProcessedDataset, preference_collate_fn from nemo_rl.distributed.virtual_cluster import ClusterConfig, RayVirtualCluster from nemo_rl.models.policy import PolicyConfig from nemo_rl.models.policy.interfaces import PolicyInterface @@ -38,6 +38,12 @@ from nemo_rl.utils.nsys import maybe_gpu_profile_step from nemo_rl.utils.timer import Timer +from nemo_rl.data.llm_message_utils import ( + add_loss_mask_to_message_log, + batched_message_log_to_flat_message, +) +from nemo_rl.distributed.batched_data_dict import BatchedDataDict + class DPOSaveState(TypedDict): epoch: int # Track current epoch @@ -153,13 +159,7 @@ def setup( train_dataset, batch_size=policy_config["train_global_batch_size"], shuffle=True, - collate_fn=partial( - dpo_collate_fn, - tokenizer=tokenizer, - make_sequence_length_divisible_by=policy_config[ - "make_sequence_length_divisible_by" - ], - ), + collate_fn=preference_collate_fn, drop_last=True, ) @@ -169,19 +169,18 @@ def setup( ) train_dataloader.load_state_dict(dataloader_state_dict) - val_dataloader = StatefulDataLoader( - val_dataset, - batch_size=dpo_config["val_global_batch_size"], - shuffle=False, - collate_fn=partial( - dpo_collate_fn, - tokenizer=tokenizer, - make_sequence_length_divisible_by=policy_config[ - "make_sequence_length_divisible_by" - ], - ), - drop_last=True, - ) + if not isinstance(val_dataset, dict): + val_dataset = {"validation": val_dataset} + + val_dataloader = { + k: StatefulDataLoader( + v, + batch_size=dpo_config["val_global_batch_size"], + shuffle=False, + collate_fn=preference_collate_fn, + drop_last=True, + ) for k, v in val_dataset.items() + } # ========================== # Cluster @@ -234,12 +233,33 @@ def setup( ) -def add_ref_logprobs_to_data(dataloader, policy, master_config, is_val=False): +def add_ref_logprobs_to_data(dataloader, policy, master_config, tokenizer, is_val=False): dataloader_iter = iter(dataloader) while True: try: batch = next(dataloader_iter) + ## add loss mask based on role to every message + add_loss_mask_to_message_log( + batch["message_log"], + only_unmask_final=True, + ) + + cat_and_padded, input_lengths = batched_message_log_to_flat_message( + batch["message_log"], + pad_value_dict={"token_ids": tokenizer.pad_token_id}, + make_sequence_length_divisible_by=master_config["policy"]["make_sequence_length_divisible_by"], + ) + + batch: BatchedDataDict[Any] = BatchedDataDict( + { + "input_ids": cat_and_padded["token_ids"], + "input_lengths": input_lengths, + "token_mask": cat_and_padded["token_loss_mask"], + "sample_mask": batch["loss_multiplier"], + } + ) + micro_batch_size = ( master_config["dpo"]["val_micro_batch_size"] * 2 if is_val @@ -265,6 +285,30 @@ def add_ref_logprobs_to_data(dataloader, policy, master_config, is_val=False): # Training & Validation # ======================================================= def validate( + policy: PolicyInterface, + val_dataloader: StatefulDataLoader | dict[str, StatefulDataLoader], + tokenizer, + loss_fn, + step: int, + master_config: MasterConfig, + val_batches: int, + val_batch_size: int, + val_mbs: int, + logger: Logger, +): + for k, v in val_dataloader.items(): + k_val_metrics, k_validation_timings = validate_one_dataset(policy, v, tokenizer, loss_fn, step, master_config, val_batches, val_batch_size, val_mbs, logger) + if k == "validation": + prefix = "validation" + else: + prefix = f"validation-{k}" + + logger.log_metrics(k_val_metrics, step, prefix=prefix) + logger.log_metrics(k_validation_timings, step, prefix=f"timing/{prefix}") + return None, None + + +def validate_one_dataset( policy: PolicyInterface, val_dataloader: StatefulDataLoader, tokenizer, @@ -274,6 +318,7 @@ def validate( val_batches: int, val_batch_size: int, val_mbs: int, + logger: Logger, ): """Run validation on the validation dataset.""" if val_dataloader is None: @@ -288,7 +333,7 @@ def validate( val_metrics = defaultdict(lambda: 0.0) num_valid_batches = 0 for batch_idx, val_batch in enumerate( - add_ref_logprobs_to_data(val_dataloader, policy, master_config, is_val=True) + add_ref_logprobs_to_data(val_dataloader, policy, master_config, tokenizer, is_val=True) ): ## just run model fwd val_results = policy.train( @@ -394,15 +439,13 @@ def dpo_train( val_batches=dpo_config["val_batches"], val_batch_size=dpo_config["val_global_batch_size"], val_mbs=dpo_config["val_micro_batch_size"], + logger=logger, ) if validation_result is not None: val_metrics, validation_timings = validation_result else: val_metrics, validation_timings = None, None - logger.log_metrics(val_metrics, total_steps, prefix="validation") - logger.log_metrics(validation_timings, total_steps, prefix="timing/validation") - policy.prepare_for_training() while ( @@ -411,7 +454,7 @@ def dpo_train( ): print(f"\n{'=' * 25} Epoch {current_epoch + 1}/{max_num_epochs} {'=' * 25}") - for batch in add_ref_logprobs_to_data(train_dataloader, policy, master_config): + for batch in add_ref_logprobs_to_data(train_dataloader, policy, master_config, tokenizer): print( f"\n{'=' * 25} Step {current_step + 1}/{min(len(train_dataloader), master_config['dpo']['max_num_steps'])} {'=' * 25}" ) @@ -449,17 +492,12 @@ def dpo_train( val_batches=dpo_config["val_batches"], val_batch_size=dpo_config["val_global_batch_size"], val_mbs=dpo_config["val_micro_batch_size"], + logger=logger, ) if validation_result is not None: val_metrics, validation_timings = validation_result else: val_metrics, validation_timings = None, None - logger.log_metrics( - validation_timings, total_steps + 1, prefix="timing/validation" - ) - logger.log_metrics( - val_metrics, total_steps + 1, prefix="validation" - ) ## Checkpointing dpo_save_state["consumed_samples"] += master_config["policy"][ diff --git a/nemo_rl/algorithms/rm.py b/nemo_rl/algorithms/rm.py index 19342e0db7..0c1c6f7c86 100644 --- a/nemo_rl/algorithms/rm.py +++ b/nemo_rl/algorithms/rm.py @@ -231,14 +231,13 @@ def validate( loss_fn, step: int, master_config: MasterConfig, - rm_task_spec: TaskDataSpec, val_batches: int, val_batch_size: int, val_mbs: int, logger: Logger, ): for k, v in val_dataloader.items(): - k_val_metrics, k_validation_timings = validate_one_dataset(policy, v, tokenizer, loss_fn, step, master_config, rm_task_spec, val_batches, val_batch_size, val_mbs) + k_val_metrics, k_validation_timings = validate_one_dataset(policy, v, tokenizer, loss_fn, step, master_config, val_batches, val_batch_size, val_mbs) if k == "validation": prefix = "validation" else: @@ -256,7 +255,6 @@ def validate_one_dataset( loss_fn, step: int, master_config: MasterConfig, - rm_task_spec: TaskDataSpec, val_batches: int, val_batch_size: int, val_mbs: int, @@ -433,7 +431,6 @@ def rm_train( loss_fn, step=0, master_config=master_config, - rm_task_spec=rm_task_spec, val_batches=rm_config["val_batches"], val_batch_size=rm_config["val_global_batch_size"], val_mbs=rm_config["val_micro_batch_size"], @@ -511,7 +508,6 @@ def rm_train( loss_fn, step=total_steps + 1, master_config=master_config, - rm_task_spec=rm_task_spec, val_batches=rm_config["val_batches"], val_batch_size=rm_config["val_global_batch_size"], val_mbs=rm_config["val_micro_batch_size"], diff --git a/nemo_rl/data/datasets.py b/nemo_rl/data/datasets.py index 2b844fc013..d056be7980 100644 --- a/nemo_rl/data/datasets.py +++ b/nemo_rl/data/datasets.py @@ -23,10 +23,6 @@ TaskDataProcessFnCallable, TaskDataSpec, ) -from nemo_rl.data.llm_message_utils import ( - add_loss_mask_to_message_log, - batched_message_log_to_flat_message, -) from nemo_rl.distributed.batched_data_dict import BatchedDataDict TokenizerType = PreTrainedTokenizerBase @@ -241,44 +237,3 @@ def preference_collate_fn( ) return batch - - -def dpo_collate_fn( - data_batch: list[DPODatumSpec], - tokenizer: TokenizerType, - make_sequence_length_divisible_by: int, -) -> BatchedDataDict[Any]: - """Collate function for DPO training. - - Args: - data_batch: List of data samples with message_log_chosen, message_log_rejected, length_chosen, length_rejected, loss_multiplier, idx, and task_name fields. - tokenizer: Tokenizer for text processing - make_sequence_length_divisible_by: Make the sequence length divisible by this value - - Returns: - BatchedDataDict with input_ids, input_lengths, token_mask, and sample_mask fields. - """ - batch = preference_collate_fn(data_batch) - - ## add loss mask based on role to every message - add_loss_mask_to_message_log( - batch["message_log"], - only_unmask_final=True, - ) - - cat_and_padded, input_lengths = batched_message_log_to_flat_message( - batch["message_log"], - pad_value_dict={"token_ids": tokenizer.pad_token_id}, - make_sequence_length_divisible_by=make_sequence_length_divisible_by, - ) - - train_data: BatchedDataDict[Any] = BatchedDataDict( - { - "input_ids": cat_and_padded["token_ids"], - "input_lengths": input_lengths, - "token_mask": cat_and_padded["token_loss_mask"], - "sample_mask": batch["loss_multiplier"], - } - ) - - return train_data diff --git a/nemo_rl/data/hf_datasets/dpo.py b/nemo_rl/data/hf_datasets/dpo.py index 03d5c7e872..d04ac281bd 100644 --- a/nemo_rl/data/hf_datasets/dpo.py +++ b/nemo_rl/data/hf_datasets/dpo.py @@ -12,9 +12,22 @@ # See the License for the specific language governing permissions and # limitations under the License. from datasets import load_dataset +from typing import Any from nemo_rl.data.interfaces import TaskDataSpec +import warnings + + +def to_preference_data_format(data: dict[str, Any]) -> dict[str, list[dict[str, Any]]]: + return { + "context": [{"role": "user", "content": data.pop("prompt")}], + "completions": [ + {"rank": 0, "completion": [{"role": "assistant", "content": data.pop("chosen_response")}]}, + {"rank": 1, "completion": [{"role": "assistant", "content": data.pop("rejected_response")}]} + ] + } + class DPODataset: """Dataset class for Direct Preference Optimization (DPO) training. @@ -34,9 +47,15 @@ class DPODataset: """ def __init__(self, train_data_path: str, val_data_path: str): + warnings.warn( + "DPODataset is deprecated and will be removed in a future version. Use PreferenceDataset instead.", + category=DeprecationWarning, + stacklevel=2 + ) + self.formatted_ds = { - "train": load_dataset("json", data_files=train_data_path, split="train"), - "validation": load_dataset("json", data_files=val_data_path, split="train"), + "train": load_dataset("json", data_files=train_data_path, split="train").map(to_preference_data_format), + "validation": load_dataset("json", data_files=val_data_path, split="train").map(to_preference_data_format), } self.task_spec = TaskDataSpec( diff --git a/nemo_rl/data/hf_datasets/helpsteer3.py b/nemo_rl/data/hf_datasets/helpsteer3.py index 7d694c4c06..05c66cad64 100644 --- a/nemo_rl/data/hf_datasets/helpsteer3.py +++ b/nemo_rl/data/hf_datasets/helpsteer3.py @@ -19,7 +19,7 @@ from nemo_rl.data.interfaces import TaskDataSpec -def format_helpsteer3(data: dict[str, Any]) -> dict[str, str | dict[str, str]]: +def to_preference_data_format(data: dict[str, Any]) -> dict[str, str | dict[str, str]]: response_1 = data["response1"] response_2 = data["response2"] overall_preference = data["overall_preference"] @@ -40,9 +40,11 @@ def format_helpsteer3(data: dict[str, Any]) -> dict[str, str | dict[str, str]]: rejected = response_1 return { - "prompt": data["context"], - "chosen_response": chosen, - "rejected_response": rejected, + "context": [{"role": "user", "content": data["context"]}] if isinstance(data["context"], str) else data["context"], + "completions": [ + {"rank": 0, "completion": [{"role": "assistant", "content": chosen}]}, + {"rank": 1, "completion": [{"role": "assistant", "content": rejected}]}, + ], } @@ -51,7 +53,7 @@ class HelpSteer3Dataset: def __init__(self) -> None: ds = load_dataset("nvidia/HelpSteer3", "preference") - self.formatted_ds = ds.map(format_helpsteer3) + self.formatted_ds = ds.map(to_preference_data_format) self.task_spec = TaskDataSpec( task_name="HelpSteer3", diff --git a/tests/unit/data/hf_datasets/test_dpo_dataset.py b/tests/unit/data/hf_datasets/test_dpo_dataset.py index ed13df2c99..2e1fa3e2d2 100644 --- a/tests/unit/data/hf_datasets/test_dpo_dataset.py +++ b/tests/unit/data/hf_datasets/test_dpo_dataset.py @@ -94,11 +94,13 @@ def test_dpo_dataset_data_format(mock_dpo_data): # Verify data format train_sample = dataset.formatted_ds["train"][0] - assert "prompt" in train_sample - assert "chosen_response" in train_sample - assert "rejected_response" in train_sample + assert "context" in train_sample + assert "completions" in train_sample # Verify data content - assert train_sample["prompt"] == "What is 2+2?" - assert train_sample["chosen_response"] == "The answer is 4." - assert train_sample["rejected_response"] == "I don't know." + print(train_sample["completions"]) + assert train_sample["context"] == [{'content': 'What is 2+2?', 'role': 'user'}] + assert train_sample["completions"] == [ + {'completion': [{'content': 'The answer is 4.', 'role': 'assistant'}], 'rank': 0}, + {'completion': [{'content': "I don't know.", 'role': 'assistant'}], 'rank': 1} + ] \ No newline at end of file diff --git a/tests/unit/data/hf_datasets/test_helpsteer.py b/tests/unit/data/hf_datasets/test_helpsteer.py index 036ba75669..94df072fbc 100644 --- a/tests/unit/data/hf_datasets/test_helpsteer.py +++ b/tests/unit/data/hf_datasets/test_helpsteer.py @@ -17,7 +17,7 @@ from nemo_rl.data.hf_datasets.helpsteer3 import ( HelpSteer3Dataset, - format_helpsteer3, + to_preference_data_format, ) @@ -31,7 +31,7 @@ def helpsteer3_dataset(): yield -def test_format_helpsteer3(): +def test_to_preference_data_format(): """Test the format_helpsteer3 function with different preference values.""" # Test case 1: response1 is preferred (overall_preference < 0) data1 = { @@ -40,10 +40,12 @@ def test_format_helpsteer3(): "response2": "I don't know.", "overall_preference": -1, } - result1 = format_helpsteer3(data1) - assert result1["prompt"] == "What is 2+2?" - assert result1["chosen_response"] == "The answer is 4." - assert result1["rejected_response"] == "I don't know." + result1 = to_preference_data_format(data1) + assert result1["context"] == [{"content": "What is 2+2?", "role": "user"}] + assert result1["completions"] == [ + {"rank": 0, "completion": [{"role": "assistant", "content": "The answer is 4."}]}, + {"rank": 1, "completion": [{"role": "assistant", "content": "I don't know."}] + }] # Test case 2: response2 is preferred (overall_preference > 0) data2 = { @@ -52,10 +54,12 @@ def test_format_helpsteer3(): "response2": "The capital of France is Paris.", "overall_preference": 1, } - result2 = format_helpsteer3(data2) - assert result2["prompt"] == "What is the capital of France?" - assert result2["chosen_response"] == "The capital of France is Paris." - assert result2["rejected_response"] == "The capital of France is London." + result2 = to_preference_data_format(data2) + assert result2["context"] == [{"content": "What is the capital of France?", "role": "user"}] + assert result2["completions"] == [ + {"rank": 0, "completion": [{"role": "assistant", "content": "The capital of France is Paris."}]}, + {"rank": 1, "completion": [{"role": "assistant", "content": "The capital of France is London."}]} + ] # Test case 3: no preference (overall_preference = 0) data3 = { @@ -64,13 +68,36 @@ def test_format_helpsteer3(): "response2": "The weather is sunny.", "overall_preference": 0, } - result3 = format_helpsteer3(data3) - assert result3["prompt"] == "What is the weather like?" + result3 = to_preference_data_format(data3) + assert result3["context"] == [{"content": "What is the weather like?", "role": "user"}] # When preference is 0, neither response is preferred, so # response 1 is used for both chosen and rejected - assert result3["chosen_response"] == "It's sunny today." - assert result3["rejected_response"] == "It's sunny today." + assert result3["completions"] == [ + {"rank": 0, "completion": [{"role": "assistant", "content": "It's sunny today."}]}, + {"rank": 1, "completion": [{"role": "assistant", "content": "It's sunny today."}]} + ] + # Test case 4: context is a list of dicts + data1 = { + "context": [ + {"role": "user", "content": "Can I ask you a question?"}, + {"role": "assistant", "content": "Sure, what do you want to know?"}, + {"role": "user", "content": "What is 2+2?"} + ], + "response1": "4.", + "response2": "I don't know.", + "overall_preference": -1, + } + result1 = to_preference_data_format(data1) + assert result1["context"] == [ + {"role": "user", "content": "Can I ask you a question?"}, + {"role": "assistant", "content": "Sure, what do you want to know?"}, + {"role": "user", "content": "What is 2+2?"} + ] + assert result1["completions"] == [ + {"rank": 0, "completion": [{"role": "assistant", "content": "4."}]}, + {"rank": 1, "completion": [{"role": "assistant", "content": "I don't know."}] + }] def test_helpsteer3_dataset_initialization(helpsteer3_dataset): """Test that HelpSteer3Dataset initializes correctly.""" @@ -96,6 +123,5 @@ def test_helpsteer3_dataset_data_format(helpsteer3_dataset): # Verify data format sample = dataset.formatted_ds["train"][0] - assert "prompt" in sample - assert "chosen_response" in sample - assert "rejected_response" in sample + assert "context" in sample + assert "completions" in sample diff --git a/tests/unit/data/hf_datasets/test_preference_dataset.py b/tests/unit/data/hf_datasets/test_preference_dataset.py index e15d88dfa1..97fc867b2f 100644 --- a/tests/unit/data/hf_datasets/test_preference_dataset.py +++ b/tests/unit/data/hf_datasets/test_preference_dataset.py @@ -82,23 +82,23 @@ def test_preference_dataset_initialization(mock_preference_data): """Test that PreferenceDataset initializes correctly with valid data files.""" preference_path = mock_preference_data - dataset = PreferenceDataset(dataset_path=preference_path) + dataset = PreferenceDataset(dataset_path=preference_path, split="train") # Verify dataset initialization assert dataset.task_spec.task_name == "PreferenceData" # Verify formatted_ds structure - assert "local" in dataset.formatted_ds - assert len(dataset.formatted_ds["local"]) == 2 + assert "train" in dataset.formatted_ds + assert len(dataset.formatted_ds["train"]) == 2 def test_preference_dataset_data_format(mock_preference_data): """Test that PreferenceDataset correctly loads and formats the data.""" preference_path = mock_preference_data - dataset = PreferenceDataset(dataset_path=preference_path) + dataset = PreferenceDataset(dataset_path=preference_path, split="train") # Verify data format - sample = dataset.formatted_ds["local"][0] + sample = dataset.formatted_ds["train"][0] assert "context" in sample assert "completions" in sample diff --git a/tests/unit/data/test_datasets.py b/tests/unit/data/test_datasets.py index d879b09a85..9050815480 100755 --- a/tests/unit/data/test_datasets.py +++ b/tests/unit/data/test_datasets.py @@ -16,17 +16,13 @@ import torch -from nemo_rl.data.datasets import dpo_collate_fn +from nemo_rl.data.datasets import preference_collate_fn from nemo_rl.data.interfaces import DatumSpec from nemo_rl.distributed.batched_data_dict import BatchedDataDict -def test_dpo_collate_fn(): - """Test that dpo_collate_fn correctly processes DPO training data.""" - # Create mock tokenizer - mock_tokenizer = MagicMock() - mock_tokenizer.pad_token_id = 0 - +def test_preference_collate_fn(): + """Test that preference_collate_fn correctly processes preference data.""" # Create test data with varying sequence lengths data_batch = [ DatumSpec( @@ -93,56 +89,32 @@ def test_dpo_collate_fn(): ), ] - # Call dpo_collate_fn - train_data = dpo_collate_fn( - data_batch, mock_tokenizer, make_sequence_length_divisible_by=16 - ) + # Call preference_collate_fn + train_data = preference_collate_fn(data_batch) - # Verify the output structure - assert isinstance(train_data, BatchedDataDict) - assert "input_ids" in train_data - assert "input_lengths" in train_data - assert "token_mask" in train_data - assert "sample_mask" in train_data + # Verify the output structure matches the actual format + assert "message_log" in train_data + assert "length" in train_data + assert "loss_multiplier" in train_data + assert "task_name" in train_data + assert "idx" in train_data + assert "batch_max_length" in train_data # Verify batch size is doubled (chosen + rejected for each example) - assert train_data["input_ids"].shape[0] == 4 # 2 examples * 2 (chosen + rejected) - - # Verify input_ids shape and padding - max_length = 16 # max of all sequence lengths, padded to be divisible by 16 - assert train_data["input_ids"].shape == (4, max_length) + assert len(train_data["message_log"]) == 4 # 2 examples * 2 (chosen + rejected) - # Verify input_lengths + # Verify length tensor expected_lengths = [7, 5, 6, 7] # chosen1, rejected1, chosen2, rejected2 - assert torch.equal(train_data["input_lengths"], torch.tensor(expected_lengths)) + assert torch.equal(train_data["length"], torch.tensor(expected_lengths)) - # Verify token_mask - assert train_data["token_mask"].shape == (4, max_length) - # First example chosen (length 7) - assert torch.all(train_data["token_mask"][0][0:3] == 0) - assert torch.all(train_data["token_mask"][0][3:7] == 1) - # First example rejected (length 5) - assert torch.all(train_data["token_mask"][1][0:3] == 0) - assert torch.all(train_data["token_mask"][1][3:5] == 1) - assert torch.all(train_data["token_mask"][1][5:] == 0) + # Verify loss_multiplier tensor + expected_loss_multiplier = [1.0, 1.0, 0.0, 0.0] # loss_multiplier repeated for chosen/rejected + assert torch.equal(train_data["loss_multiplier"], torch.tensor(expected_loss_multiplier)) - # Verify sample_mask - expected_sample_mask = [ - 1.0, - 1.0, - 0.0, - 0.0, - ] # loss_multiplier repeated for chosen/rejected - assert torch.equal(train_data["sample_mask"], torch.tensor(expected_sample_mask)) + # Verify idx list + expected_idx = [0, 0, 1, 1] # idx repeated for chosen/rejected + assert train_data["idx"] == expected_idx - # Verify message content is preserved - # First example chosen - assert torch.equal(train_data["input_ids"][0][0:3], torch.tensor([1, 2, 3])) # user - assert torch.equal( - train_data["input_ids"][0][3:7], torch.tensor([4, 5, 6, 7]) - ) # assistant - # First example rejected - assert torch.equal(train_data["input_ids"][1][0:3], torch.tensor([1, 2, 3])) # user - assert torch.equal( - train_data["input_ids"][1][3:5], torch.tensor([8, 9]) - ) # assistant + # Verify batch_max_length tensor + expected_batch_max_length = [7, 7, 7, 7] # max length for each sequence + assert torch.equal(train_data["batch_max_length"], torch.tensor(expected_batch_max_length)) From 6ca42878104978bb9466f3557ff0ccfed4f1508e Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Mon, 4 Aug 2025 12:24:09 -0700 Subject: [PATCH 29/47] nit code and docs Signed-off-by: Julien Veron Vialard --- docs/guides/dpo.md | 31 ++++++---------- docs/guides/rm.md | 29 ++++++--------- examples/configs/dpo.yaml | 7 +++- examples/configs/rm.yaml | 14 +++----- examples/run_dpo.py | 35 +++++++++++-------- examples/run_rm.py | 29 +++++++++------ nemo_rl/algorithms/dpo.py | 2 +- nemo_rl/algorithms/rm.py | 7 ++-- nemo_rl/data/hf_datasets/dpo.py | 8 ++--- .../data/hf_datasets/preference_dataset.py | 2 +- tests/unit/data/hf_datasets/test_helpsteer.py | 2 +- .../hf_datasets/test_preference_dataset.py | 2 +- 12 files changed, 83 insertions(+), 85 deletions(-) diff --git a/docs/guides/dpo.md b/docs/guides/dpo.md index 179c8f34e5..2906d944b8 100644 --- a/docs/guides/dpo.md +++ b/docs/guides/dpo.md @@ -88,34 +88,25 @@ Currently, DPO training supports only two completions (where the lowest rank is NeMo RL supports the `HelpSteer3` dataset. This dataset is downloaded from Hugging Face and preprocessed on-the-fly, so there's no need to provide a path to any datasets on disk. -We also provide a [PreferenceDataset](../../nemo_rl/data/hf_datasets/preference_dataset.py) class that is compatible with JSONL-formatted preference datasets. You can modify your config as follows: +We also provide a [PreferenceDataset](../../nemo_rl/data/hf_datasets/preference_dataset.py) class that is compatible with JSONL-formatted preference datasets. You can modify your config as follows to use such a custom preference dataset: ``` data: - dataset_name: PreferenceData + dataset_name: PreferenceDataset train_data_path: - val_datasets: - - dataset_name: PreferenceData - val_data_name: - val_data_path: - - dataset_name: PreferenceData - val_data_name: - val_data_path: + val_data_path: ``` -Note: -- If you are using a custom preference dataset for training, you must specify a custom preference dataset for validation. -- If you are using a logger, the prefix used for the custom validation preference dataset will be `validation-`. - -When using `HelpSteer3` as the training dataset, the default validation set is also used and logged under the prefix `validation`. You can replace it with a custom preference dataset as follows: +with support for multiple validation sets achieved with: ``` data: - dataset_name: HelpSteer3 - val_datasets: - - dataset_name: PreferenceData - val_data_name: validation - val_data_path: + dataset_name: PreferenceDataset + train_data_path: + val_data_paths: + : + : ``` +If you are using a logger, the prefix used for each validation set will be `validation-`. -[DPODataset](../../nemo_rl/data/hf_datasets/dpo.py) class is deprecated. This class is also compatible with JSONL-formatted preference datsets. It assumes train and validation datasets have been split and processed into the expected format offline. The JSONL files should consist of examples with `prompt`, `chosen_response`, and `rejected_response` keys. +The older [DPODataset](../../nemo_rl/data/hf_datasets/dpo.py) class is deprecated. This class is also compatible with JSONL-formatted preference datsets. It assumes train and validation datasets have been split and processed into the expected format offline. The JSONL files should consist of examples with `prompt`, `chosen_response`, and `rejected_response` keys. ## DPO-Specific Parameters diff --git a/docs/guides/rm.md b/docs/guides/rm.md index 6a6f460107..c256b891b7 100644 --- a/docs/guides/rm.md +++ b/docs/guides/rm.md @@ -77,29 +77,20 @@ Currently, RM training supports only two completions (where the lowest rank is p NeMo RL supports the `HelpSteer3` dataset. This dataset is downloaded from Hugging Face and preprocessed on-the-fly, so there's no need to provide a path to any datasets on disk. -We also provide a [PreferenceDataset](../../nemo_rl/data/hf_datasets/preference_dataset.py) class that is compatible with JSONL-formatted preference datasets. You can modify your config as follows: +We also provide a [PreferenceDataset](../../nemo_rl/data/hf_datasets/preference_dataset.py) class that is compatible with JSONL-formatted preference datasets. You can modify your config as follows to use such a custom preference dataset: ``` data: - dataset_name: PreferenceData + dataset_name: PreferenceDataset train_data_path: - val_datasets: - - dataset_name: PreferenceData - val_data_name: - val_data_path: - - dataset_name: PreferenceData - val_data_name: - val_data_path: + val_data_path: ``` -Note: -- If you are using a custom preference dataset for training, you must specify a custom preference dataset for validation. -- If you are using a logger, the prefix used for the custom validation preference dataset will be `validation-`. - -When using `HelpSteer3` as the training dataset, the default validation set is also used and logged under the prefix `validation`. You can replace it with a custom preference dataset as follows: +with support for multiple validation sets achieved with: ``` data: - dataset_name: HelpSteer3 - val_datasets: - - dataset_name: PreferenceData - val_data_name: validation - val_data_path: + dataset_name: PreferenceDataset + train_data_path: + val_data_paths: + : + : ``` +If you are using a logger, the prefix used for each validation set will be `validation-`. \ No newline at end of file diff --git a/examples/configs/dpo.yaml b/examples/configs/dpo.yaml index 4524338e4f..e6a2b7d5f9 100755 --- a/examples/configs/dpo.yaml +++ b/examples/configs/dpo.yaml @@ -148,8 +148,13 @@ policy: data_parallel_sharding_strategy: "optim_grads_params" data: - dataset_name: "HelpSteer3" max_input_seq_length: ${policy.max_total_sequence_length} + dataset_name: HelpSteer3 + # You can use the following to configure a custom preference dataset for training and validation + # dataset_name: PreferenceDataset + # train_data_path: + # val_data_path: + logger: log_dir: "logs" # Base directory for all logs wandb_enabled: false # Make sure you do a ``wandb login [Your API key]'' before running diff --git a/examples/configs/rm.yaml b/examples/configs/rm.yaml index 872e92009a..f1c66514ca 100644 --- a/examples/configs/rm.yaml +++ b/examples/configs/rm.yaml @@ -124,15 +124,11 @@ policy: data: max_input_seq_length: ${policy.max_total_sequence_length} - dataset_name: "HelpSteer3" - # You can optionally configure (multiple) custom validation preference datasets as follows: - # val_datasets: - # - dataset_name: PreferenceData - # val_data_name: - # val_data_path: - # - dataset_name: PreferenceData - # val_data_name: - # val_data_path: + dataset_name: HelpSteer3 + # You can use the following to configure a custom preference dataset for training and validation + # dataset_name: PreferenceDataset + # train_data_path: + # val_data_path: logger: log_dir: "logs" # Base directory for all logs diff --git a/examples/run_dpo.py b/examples/run_dpo.py index 66a6b98707..acbb6f6810 100644 --- a/examples/run_dpo.py +++ b/examples/run_dpo.py @@ -159,8 +159,9 @@ def dpo_preprocessor( def setup_data(data_config: DataConfig, policy_config: PolicyConfig): print("\nā–¶ Setting up data...") + data_cls = data_config["dataset_name"] - if data_config["dataset_name"] == "PreferenceData": + if data_cls == "PreferenceDataset": data_path = data_config["train_data_path"] data = hf_datasets.PreferenceDataset(data_path, split="train") train_dataset = data.formatted_ds["train"] @@ -168,14 +169,14 @@ def setup_data(data_config: DataConfig, policy_config: PolicyConfig): print( f" āœ“ Training dataset loaded with {len(data.formatted_ds['train'])} samples." ) - elif data_config["dataset_name"] == "HelpSteer3": + elif data_cls == "HelpSteer3": data = hf_datasets.HelpSteer3Dataset() train_dataset = data.formatted_ds["train"] val_dataset = data.formatted_ds["validation"] print( f" āœ“ Training and validation datasets loaded with {len(data.formatted_ds['train'])} and {len(data.formatted_ds['validation'])} samples, respectively." ) - elif data_config["dataset_name"] == "DPODataset": + elif data_cls == "DPODataset": data = hf_datasets.DPODataset( train_data_path=data_config["train_data_path"], val_data_path=data_config["val_data_path"], @@ -209,17 +210,26 @@ def setup_data(data_config: DataConfig, policy_config: PolicyConfig): ) } if val_dataset else {} - if data_config.get("val_datasets") is not None: - # Only supported for custom preference datasets - assert isinstance(data_config["val_datasets"], list), f"Invalid type for val_datasets: {type(data_config['val_datasets'])}" - for val_dataset_config in data_config["val_datasets"]: - assert val_dataset_config["dataset_name"] == "PreferenceData" - val_dataset_name = val_dataset_config["val_data_name"] - val_data_path = val_dataset_config["val_data_path"] + if data_cls == "PreferenceDataset": + if data_config.get("val_data_path"): + assert data_config.get("val_data_paths") is None, "val_data_path and val_data_paths cannot be used together" + val_data_paths = [{"validation": data_config.get("val_data_path")}] + + elif data_config.get("val_data_paths"): + assert isinstance(data_config["val_data_paths"], list), f"Invalid type for val_data_paths: {type(data_config['val_data_paths'])}" + val_data_paths = data_config.get("val_data_paths") + + else: + raise ValueError("Either val_data_path or val_data_paths must be provided") + + for d in val_data_paths: + assert len(d) == 1, "val_data_paths must be a list of pairs." + val_dataset_name = list(d.keys())[0] + val_dataset_path = list(d.values())[0] assert val_dataset_name not in val_dataset or val_dataset_name == "validation" # Users can override the default "validation" set if val_dataset_name == "validation" and "validation" in val_dataset: print(f" āœ“ Overriding the default validation dataset") - val_data = hf_datasets.PreferenceDataset(val_data_path, split="validation") + val_data = hf_datasets.PreferenceDataset(val_dataset_path, split="validation") print( f" āœ“ Validation dataset '{val_dataset_name}' loaded with {len(val_data.formatted_ds["validation"])} samples." ) @@ -230,12 +240,9 @@ def setup_data(data_config: DataConfig, policy_config: PolicyConfig): dpo_preprocessor, max_seq_length=data_config["max_input_seq_length"], ) - else: - assert len(val_dataset) == 1, f"Expected 1 validation dataset, got {len(val_dataset)}" return train_dataset, val_dataset, tokenizer, dpo_task_spec - def main(): """Main entry point.""" args, overrides = parse_args() diff --git a/examples/run_rm.py b/examples/run_rm.py index d0f536f79a..c872579a15 100644 --- a/examples/run_rm.py +++ b/examples/run_rm.py @@ -117,7 +117,7 @@ def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig): print("\nā–¶ Setting up data...") data_cls = data_config["dataset_name"] - if data_cls == "PreferenceData": + if data_cls == "PreferenceDataset": data_path = data_config["train_data_path"] data = hf_datasets.PreferenceDataset(data_path, split="train") train_dataset = data.formatted_ds["train"] @@ -155,17 +155,26 @@ def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig): ) } if val_dataset else {} - if data_config.get("val_datasets") is not None: - # Only supported for custom preference datasets - assert isinstance(data_config["val_datasets"], list), f"Invalid type for val_datasets: {type(data_config['val_datasets'])}" - for val_dataset_config in data_config["val_datasets"]: - assert val_dataset_config["dataset_name"] == "PreferenceData" - val_dataset_name = val_dataset_config["val_data_name"] - val_data_path = val_dataset_config["val_data_path"] + if data_cls == "PreferenceDataset": + if data_config.get("val_data_path"): + assert data_config.get("val_data_paths") is None, "val_data_path and val_data_paths cannot be used together" + val_data_paths = [{"validation": data_config.get("val_data_path")}] + + elif data_config.get("val_data_paths"): + assert isinstance(data_config["val_data_paths"], list), f"Invalid type for val_data_paths: {type(data_config['val_data_paths'])}" + val_data_paths = data_config.get("val_data_paths") + + else: + raise ValueError("Either val_data_path or val_data_paths must be provided") + + for d in val_data_paths: + assert len(d) == 1, "val_data_paths must be a list of pairs." + val_dataset_name = list(d.keys())[0] + val_dataset_path = list(d.values())[0] assert val_dataset_name not in val_dataset or val_dataset_name == "validation" # Users can override the default "validation" set if val_dataset_name == "validation" and "validation" in val_dataset: print(f" āœ“ Overriding the default validation dataset") - val_data = hf_datasets.PreferenceDataset(val_data_path, split="validation") + val_data = hf_datasets.PreferenceDataset(val_dataset_path, split="validation") print( f" āœ“ Validation dataset '{val_dataset_name}' loaded with {len(val_data.formatted_ds["validation"])} samples." ) @@ -176,8 +185,6 @@ def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig): rm_preprocessor, max_seq_length=data_config["max_input_seq_length"], ) - else: - assert len(val_dataset) == 1, f"Expected 1 validation dataset, got {len(val_dataset)}" return train_dataset, val_dataset, rm_task_spec diff --git a/nemo_rl/algorithms/dpo.py b/nemo_rl/algorithms/dpo.py index 424e8563ed..a0e0eb61b3 100644 --- a/nemo_rl/algorithms/dpo.py +++ b/nemo_rl/algorithms/dpo.py @@ -251,7 +251,7 @@ def add_ref_logprobs_to_data(dataloader, policy, master_config, tokenizer, is_va make_sequence_length_divisible_by=master_config["policy"]["make_sequence_length_divisible_by"], ) - batch: BatchedDataDict[Any] = BatchedDataDict( + batch = BatchedDataDict( { "input_ids": cat_and_padded["token_ids"], "input_lengths": input_lengths, diff --git a/nemo_rl/algorithms/rm.py b/nemo_rl/algorithms/rm.py index 0c1c6f7c86..c5abd4f05a 100644 --- a/nemo_rl/algorithms/rm.py +++ b/nemo_rl/algorithms/rm.py @@ -237,7 +237,7 @@ def validate( logger: Logger, ): for k, v in val_dataloader.items(): - k_val_metrics, k_validation_timings = validate_one_dataset(policy, v, tokenizer, loss_fn, step, master_config, val_batches, val_batch_size, val_mbs) + k_val_metrics, k_validation_timings = validate_one_dataset(policy, v, tokenizer, loss_fn, step, master_config, val_batches, val_batch_size, val_mbs, k) if k == "validation": prefix = "validation" else: @@ -258,6 +258,7 @@ def validate_one_dataset( val_batches: int, val_batch_size: int, val_mbs: int, + dataset_name: str, ): """Run validation on one validation dataset.""" if val_dataloader is None: @@ -366,7 +367,7 @@ def validate_one_dataset( if num_valid_batches > 0: # Print summary of validation results - print("\nšŸ“Š Validation Results:") + print(f"\nšŸ“Š Validation Results for {dataset_name}:") print(f" • Validation loss: {val_metrics['val_loss']:.4f}") print(f" • Validation accuracy: {val_metrics['accuracy']:.4f}") print( @@ -380,7 +381,7 @@ def validate_one_dataset( ) # Print timing information - print("\n ā±ļø Validation Timing:") + print(f"\n ā±ļø Validation Timing for {dataset_name}:") validation_time = timing_metrics.get("total_validation_time", 0) print(f" • Total validation time: {validation_time:.2f}s") diff --git a/nemo_rl/data/hf_datasets/dpo.py b/nemo_rl/data/hf_datasets/dpo.py index d04ac281bd..26154cd779 100644 --- a/nemo_rl/data/hf_datasets/dpo.py +++ b/nemo_rl/data/hf_datasets/dpo.py @@ -21,10 +21,10 @@ def to_preference_data_format(data: dict[str, Any]) -> dict[str, list[dict[str, Any]]]: return { - "context": [{"role": "user", "content": data.pop("prompt")}], + "context": data["prompt"] if isinstance(data["prompt"], list) else [{"role": "user", "content": data["prompt"]}], "completions": [ - {"rank": 0, "completion": [{"role": "assistant", "content": data.pop("chosen_response")}]}, - {"rank": 1, "completion": [{"role": "assistant", "content": data.pop("rejected_response")}]} + {"rank": 0, "completion": [{"role": "assistant", "content": data["chosen_response"]}]}, + {"rank": 1, "completion": [{"role": "assistant", "content": data["rejected_response"]}]} ] } @@ -48,7 +48,7 @@ class DPODataset: def __init__(self, train_data_path: str, val_data_path: str): warnings.warn( - "DPODataset is deprecated and will be removed in a future version. Use PreferenceDataset instead.", + "DPODataset is deprecated and will be removed in a future version. Use PreferenceDataset instead (see function `to_preference_data_format()` on how to convert your data to this new format).", category=DeprecationWarning, stacklevel=2 ) diff --git a/nemo_rl/data/hf_datasets/preference_dataset.py b/nemo_rl/data/hf_datasets/preference_dataset.py index d864297488..5d03125105 100644 --- a/nemo_rl/data/hf_datasets/preference_dataset.py +++ b/nemo_rl/data/hf_datasets/preference_dataset.py @@ -40,5 +40,5 @@ def __init__(self, dataset_path: str, split: str) -> None: self.formatted_ds = DatasetDict({split: load_dataset("json", data_files=dataset_path, split="train")}) self.task_spec = TaskDataSpec( - task_name="PreferenceData", + task_name="PreferenceDataset", ) \ No newline at end of file diff --git a/tests/unit/data/hf_datasets/test_helpsteer.py b/tests/unit/data/hf_datasets/test_helpsteer.py index 94df072fbc..9e6afdc67d 100644 --- a/tests/unit/data/hf_datasets/test_helpsteer.py +++ b/tests/unit/data/hf_datasets/test_helpsteer.py @@ -32,7 +32,7 @@ def helpsteer3_dataset(): def test_to_preference_data_format(): - """Test the format_helpsteer3 function with different preference values.""" + """Test the `to_preference_data_format()` function with different preference values.""" # Test case 1: response1 is preferred (overall_preference < 0) data1 = { "context": "What is 2+2?", diff --git a/tests/unit/data/hf_datasets/test_preference_dataset.py b/tests/unit/data/hf_datasets/test_preference_dataset.py index 97fc867b2f..8ca97bcde4 100644 --- a/tests/unit/data/hf_datasets/test_preference_dataset.py +++ b/tests/unit/data/hf_datasets/test_preference_dataset.py @@ -85,7 +85,7 @@ def test_preference_dataset_initialization(mock_preference_data): dataset = PreferenceDataset(dataset_path=preference_path, split="train") # Verify dataset initialization - assert dataset.task_spec.task_name == "PreferenceData" + assert dataset.task_spec.task_name == "PreferenceDataset" # Verify formatted_ds structure assert "train" in dataset.formatted_ds From 1894caf5485b99ec58e56e09e3814c9462b72829 Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Tue, 5 Aug 2025 13:34:04 -0700 Subject: [PATCH 30/47] put data processing in collate_fn Signed-off-by: Julien Veron Vialard --- nemo_rl/algorithms/dpo.py | 43 ++++++++--------------- nemo_rl/algorithms/rm.py | 74 +++++++++------------------------------ nemo_rl/data/datasets.py | 34 ++++++++++++++++-- 3 files changed, 62 insertions(+), 89 deletions(-) diff --git a/nemo_rl/algorithms/dpo.py b/nemo_rl/algorithms/dpo.py index a0e0eb61b3..577a071a39 100644 --- a/nemo_rl/algorithms/dpo.py +++ b/nemo_rl/algorithms/dpo.py @@ -38,12 +38,6 @@ from nemo_rl.utils.nsys import maybe_gpu_profile_step from nemo_rl.utils.timer import Timer -from nemo_rl.data.llm_message_utils import ( - add_loss_mask_to_message_log, - batched_message_log_to_flat_message, -) -from nemo_rl.distributed.batched_data_dict import BatchedDataDict - class DPOSaveState(TypedDict): epoch: int # Track current epoch @@ -159,7 +153,13 @@ def setup( train_dataset, batch_size=policy_config["train_global_batch_size"], shuffle=True, - collate_fn=preference_collate_fn, + collate_fn=partial( + preference_collate_fn, + tokenizer=tokenizer, + make_sequence_length_divisible_by=policy_config[ + "make_sequence_length_divisible_by" + ], + ), drop_last=True, ) @@ -177,7 +177,13 @@ def setup( v, batch_size=dpo_config["val_global_batch_size"], shuffle=False, - collate_fn=preference_collate_fn, + collate_fn=partial( + preference_collate_fn, + tokenizer=tokenizer, + make_sequence_length_divisible_by=policy_config[ + "make_sequence_length_divisible_by" + ], + ), drop_last=True, ) for k, v in val_dataset.items() } @@ -239,27 +245,6 @@ def add_ref_logprobs_to_data(dataloader, policy, master_config, tokenizer, is_va try: batch = next(dataloader_iter) - ## add loss mask based on role to every message - add_loss_mask_to_message_log( - batch["message_log"], - only_unmask_final=True, - ) - - cat_and_padded, input_lengths = batched_message_log_to_flat_message( - batch["message_log"], - pad_value_dict={"token_ids": tokenizer.pad_token_id}, - make_sequence_length_divisible_by=master_config["policy"]["make_sequence_length_divisible_by"], - ) - - batch = BatchedDataDict( - { - "input_ids": cat_and_padded["token_ids"], - "input_lengths": input_lengths, - "token_mask": cat_and_padded["token_loss_mask"], - "sample_mask": batch["loss_multiplier"], - } - ) - micro_batch_size = ( master_config["dpo"]["val_micro_batch_size"] * 2 if is_val diff --git a/nemo_rl/algorithms/rm.py b/nemo_rl/algorithms/rm.py index c5abd4f05a..b0696075d1 100644 --- a/nemo_rl/algorithms/rm.py +++ b/nemo_rl/algorithms/rm.py @@ -16,6 +16,7 @@ import warnings from pathlib import Path from typing import Optional, TypedDict +from functools import partial import numpy as np import torch @@ -32,11 +33,6 @@ preference_collate_fn, ) from nemo_rl.data.interfaces import TaskDataSpec -from nemo_rl.data.llm_message_utils import ( - add_loss_mask_to_message_log, - batched_message_log_to_flat_message, -) -from nemo_rl.distributed.batched_data_dict import BatchedDataDict from nemo_rl.distributed.virtual_cluster import ClusterConfig, RayVirtualCluster from nemo_rl.models.policy import PolicyConfig from nemo_rl.models.policy.interfaces import PolicyInterface @@ -147,7 +143,13 @@ def setup( train_dataset, batch_size=policy_config["train_global_batch_size"], shuffle=True, - collate_fn=preference_collate_fn, + collate_fn=partial( + preference_collate_fn, + tokenizer=tokenizer, + make_sequence_length_divisible_by=policy_config[ + "make_sequence_length_divisible_by" + ], + ), drop_last=True, ) @@ -165,7 +167,13 @@ def setup( v, batch_size=rm_config["val_global_batch_size"], shuffle=False, - collate_fn=preference_collate_fn, + collate_fn=partial( + preference_collate_fn, + tokenizer=tokenizer, + make_sequence_length_divisible_by=policy_config[ + "make_sequence_length_divisible_by" + ], + ), drop_last=True, ) for k, v in val_dataset.items() } @@ -279,32 +287,9 @@ def validate_one_dataset( policy.prepare_for_training() for batch_idx, val_batch in enumerate(val_dataloader): - ## add loss mask based on role to every message - add_loss_mask_to_message_log( - val_batch["message_log"], - roles_to_train_on=["assistant"], - ) - - cat_and_padded, input_lengths = batched_message_log_to_flat_message( - val_batch["message_log"], - pad_value_dict={"token_ids": tokenizer.pad_token_id}, - make_sequence_length_divisible_by=master_config["policy"][ - "make_sequence_length_divisible_by" - ], - ) - - val_data: BatchedDataDict = BatchedDataDict( - { - "input_ids": cat_and_padded["token_ids"], - "input_lengths": input_lengths, - "token_mask": cat_and_padded["token_loss_mask"], - "sample_mask": val_batch["loss_multiplier"], - } - ) - ## just run model fwd val_results = policy.train( - val_data, + val_batch, loss_fn, eval_mode=True, ## NOTE: we double the batch size here because each preference example corresponds to a pair of @@ -455,35 +440,10 @@ def rm_train( with timer.time("total_step_time"): # Prepare batch and generate responses - print("ā–¶ Preparing batch...") - with timer.time("data_processing"): - ## add loss mask based on role to every message - add_loss_mask_to_message_log( - batch["message_log"], - roles_to_train_on=["assistant"], - ) - - cat_and_padded, input_lengths = batched_message_log_to_flat_message( - batch["message_log"], - pad_value_dict={"token_ids": tokenizer.pad_token_id}, - make_sequence_length_divisible_by=master_config["policy"][ - "make_sequence_length_divisible_by" - ], - ) - - train_data: BatchedDataDict = BatchedDataDict( - { - "input_ids": cat_and_padded["token_ids"], - "input_lengths": input_lengths, - "token_mask": cat_and_padded["token_loss_mask"], - "sample_mask": batch["loss_multiplier"], - } - ) - print("ā–¶ Taking a training step...") train_results = policy.train( - train_data, + batch, loss_fn, eval_mode=False, ## NOTE: we double the batch size here because each preference example corresponds to a pair of diff --git a/nemo_rl/data/datasets.py b/nemo_rl/data/datasets.py index d056be7980..f751a4eedd 100644 --- a/nemo_rl/data/datasets.py +++ b/nemo_rl/data/datasets.py @@ -23,6 +23,10 @@ TaskDataProcessFnCallable, TaskDataSpec, ) +from nemo_rl.data.llm_message_utils import ( + add_loss_mask_to_message_log, + batched_message_log_to_flat_message, +) from nemo_rl.distributed.batched_data_dict import BatchedDataDict TokenizerType = PreTrainedTokenizerBase @@ -195,6 +199,8 @@ def eval_collate_fn(data_batch: list[DatumSpec]) -> BatchedDataDict[Any]: def preference_collate_fn( data_batch: list[DPODatumSpec], + tokenizer: TokenizerType, + make_sequence_length_divisible_by: int, ) -> BatchedDataDict[Any]: """Collate function for preference data training. @@ -204,9 +210,10 @@ def preference_collate_fn( Args: data_batch: List of data samples with message_log_chosen, message_log_rejected, length_chosen, length_rejected, loss_multiplier, idx, and task_name fields. - + tokenizer: Tokenizer for text processing + make_sequence_length_divisible_by: Make the sequence length divisible by this value Returns: - BatchedDataDict with message_log, length, loss_multiplier, task_name, and idx fields. + BatchedDataDict with input_ids, input_lengths, token_mask, and sample_mask fields. """ message_log = [] length = [] @@ -236,4 +243,25 @@ def preference_collate_fn( batch_max_length=batch_max_length, ) - return batch + add_loss_mask_to_message_log( + batch["message_log"], + only_unmask_final=True, + roles_to_train_on=["assistant"], + ) + + cat_and_padded, input_lengths = batched_message_log_to_flat_message( + batch["message_log"], + pad_value_dict={"token_ids": tokenizer.pad_token_id}, + make_sequence_length_divisible_by=make_sequence_length_divisible_by, + ) + + data: BatchedDataDict[Any] = BatchedDataDict( + { + "input_ids": cat_and_padded["token_ids"], + "input_lengths": input_lengths, + "token_mask": cat_and_padded["token_loss_mask"], + "sample_mask": batch["loss_multiplier"], + } + ) + + return data \ No newline at end of file From 74eb553c3748c90ca5ea473419b51c39d010e62c Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Tue, 5 Aug 2025 14:32:24 -0700 Subject: [PATCH 31/47] updates to val metrics and save state Signed-off-by: Julien Veron Vialard --- nemo_rl/algorithms/dpo.py | 22 ++++++++++++++-------- nemo_rl/algorithms/rm.py | 18 ++++++++++++------ 2 files changed, 26 insertions(+), 14 deletions(-) diff --git a/nemo_rl/algorithms/dpo.py b/nemo_rl/algorithms/dpo.py index 577a071a39..143ad391a5 100644 --- a/nemo_rl/algorithms/dpo.py +++ b/nemo_rl/algorithms/dpo.py @@ -281,16 +281,22 @@ def validate( val_mbs: int, logger: Logger, ): + val_metrics, validation_timings = {}, {} for k, v in val_dataloader.items(): - k_val_metrics, k_validation_timings = validate_one_dataset(policy, v, tokenizer, loss_fn, step, master_config, val_batches, val_batch_size, val_mbs, logger) + k_val_metrics, k_validation_timings = validate_one_dataset(policy, v, tokenizer, loss_fn, step, master_config, val_batches, val_batch_size, val_mbs, k) if k == "validation": - prefix = "validation" + prefix = "val" else: - prefix = f"validation-{k}" + prefix = f"{k}-val" logger.log_metrics(k_val_metrics, step, prefix=prefix) logger.log_metrics(k_validation_timings, step, prefix=f"timing/{prefix}") - return None, None + + val_metrics[prefix+"_loss"] = k_val_metrics["val_loss"] + val_metrics[prefix+"_accuracy"] = k_val_metrics["accuracy"] + validation_timings[prefix+"_total_validation_time"] = k_validation_timings["total_validation_time"] + + return val_metrics, validation_timings def validate_one_dataset( @@ -303,7 +309,7 @@ def validate_one_dataset( val_batches: int, val_batch_size: int, val_mbs: int, - logger: Logger, + dataset_name: str, ): """Run validation on the validation dataset.""" if val_dataloader is None: @@ -366,12 +372,12 @@ def validate_one_dataset( else: # Print summary of validation results - print("\nšŸ“Š Validation Results:") + print(f"\nšŸ“Š Validation Results for `{dataset_name}` set:") print(f" • Validation loss: {float(val_metrics['loss']):.4f}") print(f" • Validation accuracy: {float(val_metrics['accuracy']):.4f}") # Print timing information - print("\n ā±ļø Validation Timing:") + print(f"\n ā±ļø Validation Timing for `{dataset_name}` set:") validation_time = timing_metrics.get("total_validation_time", 0) print(f" • Total validation time: {validation_time:.2f}s") @@ -497,7 +503,7 @@ def dpo_train( dpo_save_state["total_steps"] = total_steps + 1 dpo_save_state["epoch"] = current_epoch if val_metrics is not None: - dpo_save_state["val_loss"] = val_metrics["loss"] + dpo_save_state.update(val_metrics) elif "val_loss" in dpo_save_state: del dpo_save_state["val_loss"] diff --git a/nemo_rl/algorithms/rm.py b/nemo_rl/algorithms/rm.py index b0696075d1..05b2dcb1ec 100644 --- a/nemo_rl/algorithms/rm.py +++ b/nemo_rl/algorithms/rm.py @@ -244,16 +244,22 @@ def validate( val_mbs: int, logger: Logger, ): + val_metrics, validation_timings = {}, {} for k, v in val_dataloader.items(): k_val_metrics, k_validation_timings = validate_one_dataset(policy, v, tokenizer, loss_fn, step, master_config, val_batches, val_batch_size, val_mbs, k) if k == "validation": - prefix = "validation" + prefix = "val" else: - prefix = f"validation-{k}" + prefix = f"{k}-val" logger.log_metrics(k_val_metrics, step, prefix=prefix) logger.log_metrics(k_validation_timings, step, prefix=f"timing/{prefix}") - return None, None + + val_metrics[prefix+"_loss"] = k_val_metrics["val_loss"] + val_metrics[prefix+"_accuracy"] = k_val_metrics["accuracy"] + validation_timings[prefix+"_total_validation_time"] = k_validation_timings["total_validation_time"] + + return val_metrics, validation_timings def validate_one_dataset( @@ -352,7 +358,7 @@ def validate_one_dataset( if num_valid_batches > 0: # Print summary of validation results - print(f"\nšŸ“Š Validation Results for {dataset_name}:") + print(f"\nšŸ“Š Validation Results for `{dataset_name}` set:") print(f" • Validation loss: {val_metrics['val_loss']:.4f}") print(f" • Validation accuracy: {val_metrics['accuracy']:.4f}") print( @@ -366,7 +372,7 @@ def validate_one_dataset( ) # Print timing information - print(f"\n ā±ļø Validation Timing for {dataset_name}:") + print(f"\n ā±ļø Validation Timing for `{dataset_name}` set:") validation_time = timing_metrics.get("total_validation_time", 0) print(f" • Total validation time: {validation_time:.2f}s") @@ -489,7 +495,7 @@ def rm_train( rm_save_state["total_steps"] = total_steps + 1 rm_save_state["epoch"] = current_epoch if val_metrics is not None: - rm_save_state["val_loss"] = val_metrics["val_loss"] + rm_save_state.update(val_metrics) elif "val_loss" in rm_save_state: del rm_save_state["val_loss"] From 5aba6d6468dd1e79481e3fcdab5a412f807e3c43 Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Wed, 27 Aug 2025 12:06:30 -0700 Subject: [PATCH 32/47] pull from main Signed-off-by: Julien Veron Vialard --- .dockerignore | 4 +- .github/actions/test-template/action.yml | 5 + .../_automodel_integration_check.yml | 259 ++ .github/workflows/_submodule_check.yml | 5 +- .github/workflows/cicd-main.yml | 17 +- .gitignore | 3 +- .gitmodules | 7 +- .pre-commit-config.yaml | 19 +- 3rdparty/Automodel-workspace/Automodel | 1 + 3rdparty/NeMo-workspace/NeMo | 2 +- README.md | 76 +- docker/Dockerfile | 22 +- docker/Dockerfile.ngc_pytorch | 128 + docker/README.md | 4 +- docs/adding-new-models.md | 39 + docs/assets/fp8_curves.png | Bin 0 -> 351315 bytes docs/conf.py | 51 + docs/design-docs/generation.md | 2 +- docs/docker.md | 37 +- docs/fp8.md | 39 + docs/guides/eval.md | 2 +- docs/guides/grpo-deepscaler.md | 18 +- docs/guides/grpo.md | 2 +- docs/index.md | 1 + docs/model-quirks.md | 15 +- docs/nsys-profiling.md | 17 +- docs/testing.md | 12 +- examples/configs/dpo.yaml | 7 +- examples/configs/grpo_math_1B.yaml | 71 +- examples/configs/grpo_math_1B_megatron.yaml | 2 + examples/configs/grpo_math_8B_megatron.yaml | 1 + .../configs/grpo_math_8B_megatron_fp8.yaml | 13 + .../grpo_math_qwen30ba3b_megatron.yaml | 2 +- examples/configs/grpo_sliding_puzzle.yaml | 2 + ....1-8b-instruct-4n8g-fsdp2tp2-quick.v2.yaml | 3 +- ...o-llama3.1-8b-instruct-4n8g-fsdp2tp4.yaml} | 11 +- ...lama3.1-8b-instruct-4n8g-megatron.v2.yaml} | 11 +- ...8b-instruct-4n8g-megatrontp2pp2-quick.yaml | 3 +- .../dpo-llama3.1-8b-tulu3-1n8g-fsdp2tp1.yaml | 51 + ...llama3.2-1b-instruct-1n8g-fsdp2tp1.v2.yaml | 4 +- ...truct-2407-1n8g-fsdp2tp8-actckpt-long.yaml | 106 + .../llm}/grpo-deepscaler-1.5b-16K.yaml | 1 + .../llm}/grpo-deepscaler-1.5b-24K.yaml | 9 +- .../llm}/grpo-deepscaler-1.5b-8K.yaml | 9 +- .../llm/grpo-gemma3-1b-it-1n8g-fsdp2tp1.yaml | 3 + ...-27b-it-16n8g-fsdp2tp8sp-actckpt-long.yaml | 3 + .../llm/grpo-gspo-deepscaler-1.5b-8K.yaml | 146 + ...lama3.1-8b-instruct-1n8g-megatron-fp8.yaml | 161 + ...3.1-8b-instruct-4n8g-fsdp2tp1-long.v3.yaml | 3 + ...llama3.2-1b-instruct-1n8g-fsdp2tp1.v3.yaml | 3 + ...po-llama3.2-1b-instruct-1n8g-megatron.yaml | 159 + ...po-math-qwen3-30ba3b-megatron-tp4-32k.yaml | 168 + .../grpo-moonlight-16ba3b-4n8g-megatron.yaml | 169 + ...-32b-32n8g-fsdp2tp8sp-actckpt-long.v3.yaml | 3 + ...en2.5-32b-32n8g-fsdp2tp8sp-actckpt.v3.yaml | 3 + ...wen2.5-7b-instruct-4n8g-fsdp2tp4sp.v3.yaml | 3 + ...rpo-qwen2.5-7b-instruct-4n8g-megatron.yaml | 181 + ...5-math-1.5b-instruct-1n8g-fsdp2tp1.v3.yaml | 3 + .../llm/grpo-qwen3-30ba3b-8n8g-megatron.yaml | 154 + ...lama3.1-70b-8n8g-tp4pp2-long-megatron.yaml | 133 + ...lama3.1-8b-1n8g-fsdp2tp1-dynamicbatch.yaml | 80 + ...> sft-llama3.1-8b-1n8g-fsdp2tp1-long.yaml} | 45 +- ...l => sft-llama3.1-8b-1n8g-fsdp2tp2sp.yaml} | 34 +- ...sft-llama3.1-8b-1n8g-megatron-seqpack.yaml | 125 + ...aml => sft-llama3.1-8b-1n8g-megatron.yaml} | 44 +- ... => sft-llama3.2-1b-1n8g-fsdp2tp1.v3.yaml} | 17 +- ...en2.5-32b-4n8g-fsdp2tp8sp-actckpt.v3.yaml} | 16 +- ...3b-instruct-clevr-1n2g-dtensor2tp1.v1.yaml | 173 + ...2b-instruct-clevr-1n2g-dtensor2tp1.v1.yaml | 173 + examples/configs/rm.yaml | 2 +- examples/configs/sft.yaml | 13 +- examples/configs/sft_openmathinstruct2.yaml | 9 +- .../sft_openmathinstruct2_megatron.yaml | 149 + examples/configs/sft_vlm_3B.yaml | 49 + examples/configs/vlm_grpo_3B.yaml | 174 + examples/prompts/clevr_cogent_cot.txt | 5 + examples/prompts/geo3k.txt | 5 + examples/prompts/refcoco.txt | 5 + examples/run_dpo.py | 7 + examples/run_grpo_math.py | 7 +- examples/run_grpo_sliding_puzzle.py | 4 +- examples/run_sft.py | 36 +- examples/run_vlm_grpo.py | 393 ++ examples/run_vlm_sft.py | 18 + nemo_rl/algorithms/dpo.py | 57 +- nemo_rl/algorithms/grpo.py | 56 +- nemo_rl/algorithms/loss_functions.py | 92 +- nemo_rl/algorithms/rm.py | 2 +- nemo_rl/algorithms/sft.py | 54 +- nemo_rl/algorithms/utils.py | 57 +- nemo_rl/data/__init__.py | 3 + nemo_rl/data/datasets.py | 20 +- nemo_rl/data/hf_datasets/__init__.py | 4 + nemo_rl/data/hf_datasets/clevr.py | 141 + nemo_rl/data/hf_datasets/geometry3k.py | 101 + nemo_rl/data/hf_datasets/oasst.py | 4 +- nemo_rl/data/hf_datasets/refcoco.py | 262 ++ nemo_rl/data/hf_datasets/tulu3.py | 67 + nemo_rl/data/llm_message_utils.py | 182 +- nemo_rl/data/multimodal_utils.py | 163 + nemo_rl/data/processors.py | 4 +- nemo_rl/distributed/batched_data_dict.py | 57 +- nemo_rl/distributed/model_utils.py | 279 +- .../ray_actor_environment_registry.py | 19 +- nemo_rl/distributed/virtual_cluster.py | 3 + nemo_rl/distributed/worker_group_utils.py | 1 + nemo_rl/environments/rewards.py | 173 + nemo_rl/environments/vlm_environment.py | 252 ++ nemo_rl/evals/eval.py | 5 +- nemo_rl/experience/rollouts.py | 12 + nemo_rl/models/dtensor/parallelize.py | 310 +- nemo_rl/models/generation/fp8.py | 574 +++ nemo_rl/models/generation/vllm.py | 2023 ----------- nemo_rl/models/generation/vllm/__init__.py | 24 + nemo_rl/models/generation/vllm/config.py | 35 + nemo_rl/models/generation/vllm/utils.py | 81 + .../generation/{ => vllm}/vllm_backend.py | 42 +- .../models/generation/vllm/vllm_generation.py | 784 ++++ nemo_rl/models/generation/vllm/vllm_worker.py | 849 +++++ .../generation/vllm/vllm_worker_async.py | 561 +++ nemo_rl/models/huggingface/common.py | 6 - nemo_rl/models/megatron/common.py | 11 + nemo_rl/models/megatron/community_import.py | 7 +- nemo_rl/models/policy/__init__.py | 5 + .../models/policy/dtensor_policy_worker.py | 204 +- .../models/policy/dtensor_policy_worker_v2.py | 1413 ++++++++ nemo_rl/models/policy/lm_policy.py | 58 +- .../models/policy/megatron_policy_worker.py | 95 +- nemo_rl/models/policy/utils.py | 70 +- nemo_rl/package_info.py | 2 +- nemo_rl/utils/checkpoint.py | 1 + nemo_rl/utils/flops_formulas.py | 553 +++ nemo_rl/utils/flops_tracker.py | 142 + nemo_rl/utils/logger.py | 4 +- nemo_rl/utils/native_checkpoint.py | 3 + nemo_rl/utils/nsys.py | 16 + nemo_rl/utils/timer.py | 74 + pyproject.toml | 27 +- pyrefly.toml | 12 +- ray.sub | 53 +- tests/check_metrics.py | 28 +- tests/functional/L1_Functional_Tests_GPU.sh | 1 + tests/functional/dpo_megatron.sh | 45 + tests/functional/eval.sh | 2 +- tests/functional/eval_async.sh | 2 +- tests/functional/grpo.sh | 2 +- tests/functional/grpo_megatron.sh | 2 +- tests/functional/grpo_multiturn.sh | 2 +- tests/functional/grpo_non_colocated.sh | 2 +- tests/functional/sft.sh | 2 +- tests/functional/sft_megatron.sh | 45 + tests/functional/vlm_grpo.sh | 42 + tests/json_dump_tb_logs.py | 20 +- tests/test_suites/README.md | 4 + ...a3.1-8b-instruct-4n8g-fsdp2tp2-quick.v2.sh | 3 +- ...dpo-llama3.1-8b-instruct-4n8g-fsdp2tp4.sh} | 3 +- ...-llama3.1-8b-instruct-4n8g-megatron.v2.sh} | 3 +- ...1-8b-instruct-4n8g-megatrontp2pp2-quick.sh | 3 +- .../dpo-llama3.1-8b-tulu3-1n8g-fsdp2tp1.sh | 43 + ...o-llama3.2-1b-instruct-1n8g-fsdp2tp1.v2.sh | 3 +- ...nstruct-2407-1n8g-fsdp2tp8-actckpt-long.sh | 40 + .../llm/grpo-deepscaler-1.5b-16K.sh | 69 + .../llm/grpo-deepscaler-1.5b-24K.sh | 68 + .../llm/grpo-deepscaler-1.5b-8K.sh | 123 + .../llm/grpo-gemma3-1b-it-1n8g-fsdp2tp1.sh | 7 +- .../llm/grpo-gspo-deepscaler-1.5b-8K.sh | 67 + ...-llama3.1-8b-instruct-1n8g-megatron-fp8.sh | 40 + ...o-llama3.2-1b-instruct-1n8g-fsdp2tp1.v3.sh | 3 +- ...grpo-llama3.2-1b-instruct-1n8g-megatron.sh | 42 + ...grpo-math-qwen3-30ba3b-megatron-tp4-32k.sh | 39 + .../grpo-moonlight-16ba3b-4n8g-megatron.sh | 41 + .../grpo-qwen2.5-7b-instruct-4n8g-megatron.sh | 41 + ...2.5-math-1.5b-instruct-1n8g-fsdp2tp1.v3.sh | 3 +- .../llm/grpo-qwen3-30ba3b-8n8g-megatron.sh | 40 + ...-llama3.1-70b-8n8g-tp4pp2-long-megatron.sh | 42 + ...llama3.1-8b-1n8g-fsdp2tp1-dynamicbatch.sh} | 13 +- .../llm/sft-llama3.1-8b-1n8g-fsdp2tp1-long.sh | 42 + ....sh => sft-llama3.1-8b-1n8g-fsdp2tp2sp.sh} | 12 +- ... sft-llama3.1-8b-1n8g-megatron-seqpack.sh} | 8 +- .../llm/sft-llama3.1-8b-1n8g-megatron.sh | 39 + ...sh => sft-llama3.2-1b-1n8g-fsdp2tp1.v3.sh} | 11 +- ...qwen2.5-32b-4n8g-fsdp2tp8sp-actckpt.v3.sh} | 2 +- tests/test_suites/nightly.txt | 39 +- tests/test_suites/release.txt | 16 +- tests/test_suites/vlm/common.env | 1 + ...l-3b-instruct-clevr-1n2g-dtensor2tp1.v1.sh | 40 + ...2.2b-instruct-clevr-1n2g-dtensor2tp1.v1.sh | 40 + tests/unit/L0_Unit_Tests_Generation.sh | 2 + tests/unit/L0_Unit_Tests_Other.sh | 2 + tests/unit/L0_Unit_Tests_Policy.sh | 2 + tests/unit/algorithms/test_loss_functions.py | 349 +- tests/unit/algorithms/test_sft.py | 6 +- tests/unit/conftest.py | 52 +- .../data/test_data_shuffle_reproducity.py | 151 + tests/unit/data/test_llm_message_utils.py | 452 ++- tests/unit/data/test_multimodal_dict.py | 318 ++ .../distributed/test_batched_data_dict.py | 77 + tests/unit/distributed/test_model_utils.py | 54 +- .../models/generation/test_vllm_generation.py | 328 +- .../generation/test_vllm_large_model.py | 9 - tests/unit/models/huggingface/test_common.py | 2 - .../converters/test_converters_common.py | 252 ++ .../unit/models/policy/test_dtensor_worker.py | 118 +- .../models/policy/test_dtensor_worker_v2.py | 243 ++ .../models/policy/test_megatron_worker.py | 92 +- tests/unit/models/policy/test_utils.py | 139 - tests/unit/prepare_unit_test_assets.py | 98 + tests/unit/rewards/test_rewards.py | 259 ++ tests/unit/test_config_validation.py | 3 + tests/unit/test_recipes_and_test_suites.py | 14 +- tests/unit/utils/test_native_checkpoint.py | 11 - tests/unit/utils/test_timer.py | 47 +- tools/code_snapshot.sh | 16 +- tools/launch | 1 + .../3.check_hf_model_embeddings_untrained.py | 288 ++ uv.lock | 3157 ++++++++++------- 216 files changed, 17477 insertions(+), 4330 deletions(-) create mode 100644 .github/workflows/_automodel_integration_check.yml create mode 160000 3rdparty/Automodel-workspace/Automodel create mode 100644 docker/Dockerfile.ngc_pytorch create mode 100644 docs/assets/fp8_curves.png create mode 100644 docs/fp8.md create mode 100644 examples/configs/grpo_math_8B_megatron_fp8.yaml rename examples/configs/recipes/llm/{dpo-llama3.1-8b-instruct-4n8g-fsdp2tp1.v2.yaml => dpo-llama3.1-8b-instruct-4n8g-fsdp2tp4.yaml} (89%) rename examples/configs/recipes/llm/{dpo-llama3.1-8b-instruct-4n8g-megatron.yaml => dpo-llama3.1-8b-instruct-4n8g-megatron.v2.yaml} (92%) create mode 100644 examples/configs/recipes/llm/dpo-llama3.1-8b-tulu3-1n8g-fsdp2tp1.yaml create mode 100644 examples/configs/recipes/llm/dpo-mistral-nemo-instruct-2407-1n8g-fsdp2tp8-actckpt-long.yaml rename examples/configs/{ => recipes/llm}/grpo-deepscaler-1.5b-16K.yaml (94%) rename examples/configs/{ => recipes/llm}/grpo-deepscaler-1.5b-24K.yaml (79%) rename examples/configs/{ => recipes/llm}/grpo-deepscaler-1.5b-8K.yaml (91%) create mode 100644 examples/configs/recipes/llm/grpo-gspo-deepscaler-1.5b-8K.yaml create mode 100644 examples/configs/recipes/llm/grpo-llama3.1-8b-instruct-1n8g-megatron-fp8.yaml create mode 100755 examples/configs/recipes/llm/grpo-llama3.2-1b-instruct-1n8g-megatron.yaml create mode 100644 examples/configs/recipes/llm/grpo-math-qwen3-30ba3b-megatron-tp4-32k.yaml create mode 100644 examples/configs/recipes/llm/grpo-moonlight-16ba3b-4n8g-megatron.yaml create mode 100755 examples/configs/recipes/llm/grpo-qwen2.5-7b-instruct-4n8g-megatron.yaml create mode 100755 examples/configs/recipes/llm/grpo-qwen3-30ba3b-8n8g-megatron.yaml create mode 100644 examples/configs/recipes/llm/sft-llama3.1-70b-8n8g-tp4pp2-long-megatron.yaml create mode 100644 examples/configs/recipes/llm/sft-llama3.1-8b-1n8g-fsdp2tp1-dynamicbatch.yaml rename examples/configs/recipes/llm/{sft-llama3.1-8b-instruct-1n8g-fsdp2tp1-long.v2.yaml => sft-llama3.1-8b-1n8g-fsdp2tp1-long.yaml} (61%) rename examples/configs/recipes/llm/{sft-llama3.1-8b-instruct-1n8g-fsdp2tp2sp.v2.yaml => sft-llama3.1-8b-1n8g-fsdp2tp2sp.yaml} (64%) create mode 100644 examples/configs/recipes/llm/sft-llama3.1-8b-1n8g-megatron-seqpack.yaml rename examples/configs/recipes/llm/{sft-llama3.1-8b-instruct-1n8g-megatron.yaml => sft-llama3.1-8b-1n8g-megatron.yaml} (71%) rename examples/configs/recipes/llm/{sft-llama3.2-1b-1n8g-fsdp2tp1.v2.yaml => sft-llama3.2-1b-1n8g-fsdp2tp1.v3.yaml} (85%) rename examples/configs/recipes/llm/{sft-qwen2.5-32b-4n8g-fsdp2tp8sp-actckpt.v2.yaml => sft-qwen2.5-32b-4n8g-fsdp2tp8sp-actckpt.v3.yaml} (85%) create mode 100644 examples/configs/recipes/vlm/vlm_grpo-qwen2.5-vl-3b-instruct-clevr-1n2g-dtensor2tp1.v1.yaml create mode 100644 examples/configs/recipes/vlm/vlm_grpo-smolvlm2-2.2b-instruct-clevr-1n2g-dtensor2tp1.v1.yaml create mode 100644 examples/configs/sft_openmathinstruct2_megatron.yaml create mode 100644 examples/configs/sft_vlm_3B.yaml create mode 100644 examples/configs/vlm_grpo_3B.yaml create mode 100644 examples/prompts/clevr_cogent_cot.txt create mode 100644 examples/prompts/geo3k.txt create mode 100644 examples/prompts/refcoco.txt create mode 100644 examples/run_vlm_grpo.py create mode 100644 examples/run_vlm_sft.py create mode 100644 nemo_rl/data/hf_datasets/clevr.py create mode 100644 nemo_rl/data/hf_datasets/geometry3k.py create mode 100644 nemo_rl/data/hf_datasets/refcoco.py create mode 100644 nemo_rl/data/hf_datasets/tulu3.py create mode 100644 nemo_rl/data/multimodal_utils.py create mode 100644 nemo_rl/environments/rewards.py create mode 100644 nemo_rl/environments/vlm_environment.py create mode 100644 nemo_rl/models/generation/fp8.py delete mode 100644 nemo_rl/models/generation/vllm.py create mode 100644 nemo_rl/models/generation/vllm/__init__.py create mode 100644 nemo_rl/models/generation/vllm/config.py create mode 100644 nemo_rl/models/generation/vllm/utils.py rename nemo_rl/models/generation/{ => vllm}/vllm_backend.py (80%) create mode 100644 nemo_rl/models/generation/vllm/vllm_generation.py create mode 100644 nemo_rl/models/generation/vllm/vllm_worker.py create mode 100644 nemo_rl/models/generation/vllm/vllm_worker_async.py create mode 100644 nemo_rl/models/policy/dtensor_policy_worker_v2.py create mode 100644 nemo_rl/utils/flops_formulas.py create mode 100644 nemo_rl/utils/flops_tracker.py create mode 100755 tests/functional/dpo_megatron.sh create mode 100755 tests/functional/sft_megatron.sh create mode 100755 tests/functional/vlm_grpo.sh rename tests/test_suites/llm/{dpo-llama3.1-8b-instruct-4n8g-fsdp2tp1.v2.sh => dpo-llama3.1-8b-instruct-4n8g-fsdp2tp4.sh} (91%) rename tests/test_suites/llm/{dpo-llama3.1-8b-instruct-4n8g-megatron.sh => dpo-llama3.1-8b-instruct-4n8g-megatron.v2.sh} (91%) create mode 100755 tests/test_suites/llm/dpo-llama3.1-8b-tulu3-1n8g-fsdp2tp1.sh create mode 100755 tests/test_suites/llm/dpo-mistral-nemo-instruct-2407-1n8g-fsdp2tp8-actckpt-long.sh create mode 100755 tests/test_suites/llm/grpo-deepscaler-1.5b-16K.sh create mode 100755 tests/test_suites/llm/grpo-deepscaler-1.5b-24K.sh create mode 100755 tests/test_suites/llm/grpo-deepscaler-1.5b-8K.sh create mode 100755 tests/test_suites/llm/grpo-gspo-deepscaler-1.5b-8K.sh create mode 100755 tests/test_suites/llm/grpo-llama3.1-8b-instruct-1n8g-megatron-fp8.sh create mode 100755 tests/test_suites/llm/grpo-llama3.2-1b-instruct-1n8g-megatron.sh create mode 100755 tests/test_suites/llm/grpo-math-qwen3-30ba3b-megatron-tp4-32k.sh create mode 100755 tests/test_suites/llm/grpo-moonlight-16ba3b-4n8g-megatron.sh create mode 100755 tests/test_suites/llm/grpo-qwen2.5-7b-instruct-4n8g-megatron.sh create mode 100755 tests/test_suites/llm/grpo-qwen3-30ba3b-8n8g-megatron.sh create mode 100755 tests/test_suites/llm/sft-llama3.1-70b-8n8g-tp4pp2-long-megatron.sh rename tests/test_suites/llm/{sft-llama3.1-8b-instruct-1n8g-fsdp2tp1-long.v2.sh => sft-llama3.1-8b-1n8g-fsdp2tp1-dynamicbatch.sh} (81%) create mode 100755 tests/test_suites/llm/sft-llama3.1-8b-1n8g-fsdp2tp1-long.sh rename tests/test_suites/llm/{sft-llama3.1-8b-instruct-1n8g-fsdp2tp2sp.v2.sh => sft-llama3.1-8b-1n8g-fsdp2tp2sp.sh} (83%) rename tests/test_suites/llm/{sft-llama3.1-8b-instruct-1n8g-megatron.sh => sft-llama3.1-8b-1n8g-megatron-seqpack.sh} (87%) create mode 100755 tests/test_suites/llm/sft-llama3.1-8b-1n8g-megatron.sh rename tests/test_suites/llm/{sft-llama3.2-1b-1n8g-fsdp2tp1.v2.sh => sft-llama3.2-1b-1n8g-fsdp2tp1.v3.sh} (82%) rename tests/test_suites/llm/{sft-qwen2.5-32b-4n8g-fsdp2tp8sp-actckpt.v2.sh => sft-qwen2.5-32b-4n8g-fsdp2tp8sp-actckpt.v3.sh} (96%) create mode 120000 tests/test_suites/vlm/common.env create mode 100755 tests/test_suites/vlm/vlm_grpo-qwen2.5-vl-3b-instruct-clevr-1n2g-dtensor2tp1.v1.sh create mode 100755 tests/test_suites/vlm/vlm_grpo-smolvlm2-2.2b-instruct-clevr-1n2g-dtensor2tp1.v1.sh create mode 100644 tests/unit/data/test_data_shuffle_reproducity.py create mode 100644 tests/unit/data/test_multimodal_dict.py create mode 100755 tests/unit/models/megatron/converters/test_converters_common.py create mode 100644 tests/unit/models/policy/test_dtensor_worker_v2.py create mode 100644 tests/unit/prepare_unit_test_assets.py create mode 100644 tests/unit/rewards/test_rewards.py create mode 100755 tools/model_diagnostics/3.check_hf_model_embeddings_untrained.py diff --git a/.dockerignore b/.dockerignore index a5aa48cb04..8e4e560ff5 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,6 +1,8 @@ # Adding to .gitignore helps reduce the size of your working_dir -.git +# Note: removing .git from .dockerignore since it is valuable to have the git history to +# know where this container was built +# .git *.out *.log *.tar diff --git a/.github/actions/test-template/action.yml b/.github/actions/test-template/action.yml index 3e16304fcf..54ed323466 100644 --- a/.github/actions/test-template/action.yml +++ b/.github/actions/test-template/action.yml @@ -132,6 +132,11 @@ runs: ls -al $MNT_PATH/TestData + - name: Docker system cleanup + shell: bash + run: | + docker system prune -af --filter "until=48h" --force || true + - name: Docker pull image shell: bash run: | diff --git a/.github/workflows/_automodel_integration_check.yml b/.github/workflows/_automodel_integration_check.yml new file mode 100644 index 0000000000..757f2fbb81 --- /dev/null +++ b/.github/workflows/_automodel_integration_check.yml @@ -0,0 +1,259 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: "Automodel Integration Files Consistency Check" + +on: + workflow_call: + inputs: + base_ref: + required: true + type: string + description: "Target branch to check against" + head_ref: + required: true + type: string + description: "Feature branch name" + pr_number: + required: true + type: string + description: "Pull request number" + head_sha: + required: true + type: string + description: "Head commit SHA of the feature branch" + +jobs: + check: + name: Related FilesSynchronization Check + runs-on: ubuntu-latest + outputs: + needs_attention: ${{ steps.check.outputs.needs_attention }} + comment_body: ${{ steps.check.outputs.comment_body }} + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Fetch target branch reference + run: | + git fetch origin ${{ inputs.base_ref }} + + - name: Check parallel plans sync status + id: check + shell: bash -x -e {0} + run: | + echo "Checking if parallel plan files are synchronized..." + + # Define the file paths + PARALLELIZE_FILE="nemo_rl/models/dtensor/parallelize.py" + OPTIMIZED_TP_PLANS_FILE="3rdparty/Automodel-workspace/Automodel/nemo_automodel/components/distributed/optimized_tp_plans.py" + PARALLELIZER_FILE="3rdparty/Automodel-workspace/Automodel/nemo_automodel/components/distributed/parallelizer.py" + + needs_attention=0 + comment_body="" + + # Check if parallelize.py was modified in this PR + if git diff --name-only origin/${{ inputs.base_ref }}..HEAD | grep -q "^${PARALLELIZE_FILE}$"; then + echo "āœ… Found changes in ${PARALLELIZE_FILE}" + parallelize_changed=1 + else + echo "ā„¹ļø No changes found in ${PARALLELIZE_FILE}" + parallelize_changed=0 + fi + + # Check if optimized_tp_plans.py was modified in this PR + if git diff --name-only origin/${{ inputs.base_ref }}..HEAD | grep -q "^${OPTIMIZED_TP_PLANS_FILE}$"; then + echo "āœ… Found changes in ${OPTIMIZED_TP_PLANS_FILE}" + optimized_tp_plans_changed=1 + else + echo "ā„¹ļø No changes found in ${OPTIMIZED_TP_PLANS_FILE}" + optimized_tp_plans_changed=0 + fi + + # Check if parallelizer.py was modified in this PR + if git diff --name-only origin/${{ inputs.base_ref }}..HEAD | grep -q "^${PARALLELIZER_FILE}$"; then + echo "āœ… Found changes in ${PARALLELIZER_FILE}" + parallelizer_changed=1 + else + echo "ā„¹ļø No changes found in ${PARALLELIZER_FILE}" + parallelizer_changed=0 + fi + + # Check if all files exist + if [[ ! -f "$PARALLELIZE_FILE" ]]; then + echo "āš ļø Warning: ${PARALLELIZE_FILE} does not exist" + fi + + if [[ ! -f "$OPTIMIZED_TP_PLANS_FILE" ]]; then + echo "āš ļø Warning: ${OPTIMIZED_TP_PLANS_FILE} does not exist" + fi + + if [[ ! -f "$PARALLELIZER_FILE" ]]; then + echo "āš ļø Warning: ${PARALLELIZER_FILE} does not exist" + fi + + # Analyze the relationship between the changes + # Success case: If parallelize.py is changed, either optimized_tp_plans.py OR parallelizer.py should also be changed + if [[ $parallelize_changed -eq 1 && $optimized_tp_plans_changed -eq 0 && $parallelizer_changed -eq 0 ]]; then + echo "āŒ parallelize.py was modified but neither optimized_tp_plans.py nor parallelizer.py was updated" + needs_attention=1 + comment_body+="### āš ļø Parallel Plans Synchronization Warning"$'\n\n' + comment_body+="The file \`${PARALLELIZE_FILE}\` was modified in this PR, but neither \`${OPTIMIZED_TP_PLANS_FILE}\` nor \`${PARALLELIZER_FILE}\` was updated."$'\n\n' + comment_body+="**Why this matters:**"$'\n' + comment_body+="These files contain similar parallel plan implementations that should be kept synchronized to ensure consistency across the codebase."$'\n\n' + comment_body+="**Action required:**"$'\n' + comment_body+="- Please review if the changes in \`${PARALLELIZE_FILE}\` should also be applied to \`${OPTIMIZED_TP_PLANS_FILE}\` or \`${PARALLELIZER_FILE}\`"$'\n' + comment_body+="- Update the appropriate related file(s) if necessary to maintain functional consistency"$'\n' + comment_body+="- Request access to the [NVIDIA-NeMo/Automodel](https://github.com/NVIDIA-NeMo/Automodel/) repository, create a PR against the \`nemo-rl-submodule\` branch, and update the Automodel submodule in the nemo-rl index"$'\n' + comment_body+="- Add @ffrujeri as a reviewer of this PR if you have any questions about the consistency requirements"$'\n' + comment_body+="- If the files are intentionally different, please add a comment in the PR explaining why"$'\n\n' + comment_body+="**Files to check:**"$'\n' + comment_body+="- Modified: \`${PARALLELIZE_FILE}\`"$'\n' + comment_body+="- Not modified: \`${OPTIMIZED_TP_PLANS_FILE}\`"$'\n' + comment_body+="- Not modified: \`${PARALLELIZER_FILE}\`"$'\n\n' + + else + echo "ā„¹ļø No consistency issues detected for parallel plan files" + # Don't set comment_body in this case to avoid unnecessary comments + fi + + echo "" + echo "Checking if dtensor policy worker files are synchronized..." + + # Define the dtensor policy worker file paths + DTENSOR_POLICY_WORKER_FILE="nemo_rl/models/policy/dtensor_policy_worker.py" + DTENSOR_POLICY_WORKER_V2_FILE="nemo_rl/models/policy/dtensor_policy_worker_v2.py" + + # Check if dtensor_policy_worker.py was modified in this PR + if git diff --name-only origin/${{ inputs.base_ref }}..HEAD | grep -q "^${DTENSOR_POLICY_WORKER_FILE}$"; then + echo "āœ… Found changes in ${DTENSOR_POLICY_WORKER_FILE}" + dtensor_worker_changed=1 + else + echo "ā„¹ļø No changes found in ${DTENSOR_POLICY_WORKER_FILE}" + dtensor_worker_changed=0 + fi + + # Check if dtensor_policy_worker_v2.py was modified in this PR + if git diff --name-only origin/${{ inputs.base_ref }}..HEAD | grep -q "^${DTENSOR_POLICY_WORKER_V2_FILE}$"; then + echo "āœ… Found changes in ${DTENSOR_POLICY_WORKER_V2_FILE}" + dtensor_worker_v2_changed=1 + else + echo "ā„¹ļø No changes found in ${DTENSOR_POLICY_WORKER_V2_FILE}" + dtensor_worker_v2_changed=0 + fi + + # Check if both dtensor policy worker files exist + if [[ ! -f "$DTENSOR_POLICY_WORKER_FILE" ]]; then + echo "āš ļø Warning: ${DTENSOR_POLICY_WORKER_FILE} does not exist" + fi + + if [[ ! -f "$DTENSOR_POLICY_WORKER_V2_FILE" ]]; then + echo "āš ļø Warning: ${DTENSOR_POLICY_WORKER_V2_FILE} does not exist" + fi + + # Analyze the relationship between the dtensor policy worker changes + if [[ $dtensor_worker_changed -eq 1 && $dtensor_worker_v2_changed -eq 0 ]]; then + echo "āŒ dtensor_policy_worker.py was modified but dtensor_policy_worker_v2.py was not updated" + needs_attention=1 + comment_body+="### āš ļø DTensor Policy Worker Synchronization Warning"$'\n\n' + comment_body+="The file \`${DTENSOR_POLICY_WORKER_FILE}\` was modified in this PR, but \`${DTENSOR_POLICY_WORKER_V2_FILE}\` was not updated."$'\n\n' + comment_body+="**Why this matters:**"$'\n' + comment_body+="These files contain related DTensor policy worker implementations that should be kept synchronized to ensure consistency across different versions."$'\n\n' + comment_body+="**Action required:**"$'\n' + comment_body+="- Please review if the changes in \`${DTENSOR_POLICY_WORKER_FILE}\` should also be applied to \`${DTENSOR_POLICY_WORKER_V2_FILE}\`"$'\n' + comment_body+="- Update \`${DTENSOR_POLICY_WORKER_V2_FILE}\` if necessary to maintain consistency"$'\n' + comment_body+="- If the files are intentionally different, please add a comment in the PR explaining why"$'\n\n' + comment_body+="**Files to check:**"$'\n' + comment_body+="- Modified: \`${DTENSOR_POLICY_WORKER_FILE}\`"$'\n' + comment_body+="- Not modified: \`${DTENSOR_POLICY_WORKER_V2_FILE}\`"$'\n\n' + + elif [[ $dtensor_worker_changed -eq 0 && $dtensor_worker_v2_changed -eq 1 ]]; then + echo "āŒ dtensor_policy_worker_v2.py was modified but dtensor_policy_worker.py was not updated" + needs_attention=1 + comment_body+="### āš ļø DTensor Policy Worker Synchronization Warning"$'\n\n' + comment_body+="The file \`${DTENSOR_POLICY_WORKER_V2_FILE}\` was modified in this PR, but \`${DTENSOR_POLICY_WORKER_FILE}\` was not updated."$'\n\n' + comment_body+="**Why this matters:**"$'\n' + comment_body+="These files contain related DTensor policy worker implementations that should be kept synchronized to ensure consistency across different versions."$'\n\n' + comment_body+="**Action required:**"$'\n' + comment_body+="- Please review if the changes in \`${DTENSOR_POLICY_WORKER_V2_FILE}\` should also be applied to \`${DTENSOR_POLICY_WORKER_FILE}\`"$'\n' + comment_body+="- Update \`${DTENSOR_POLICY_WORKER_FILE}\` if necessary to maintain consistency"$'\n' + comment_body+="- If the files are intentionally different, please add a comment in the PR explaining why"$'\n\n' + comment_body+="**Files to check:**"$'\n' + comment_body+="- Modified: \`${DTENSOR_POLICY_WORKER_V2_FILE}\`"$'\n' + comment_body+="- Not modified: \`${DTENSOR_POLICY_WORKER_FILE}\`"$'\n\n' + + elif [[ $dtensor_worker_changed -eq 1 && $dtensor_worker_v2_changed -eq 1 ]]; then + echo "āœ… Both DTensor policy worker files were modified" + comment_body+="### āœ… DTensor Policy Worker Synchronization Check"$'\n\n' + comment_body+="Both DTensor policy worker files were modified in this PR:"$'\n' + comment_body+="- \`${DTENSOR_POLICY_WORKER_FILE}\`"$'\n' + comment_body+="- \`${DTENSOR_POLICY_WORKER_V2_FILE}\`"$'\n\n' + comment_body+="Please ensure that the changes are consistent between both files where applicable."$'\n\n' + + else + echo "ā„¹ļø No DTensor policy worker files were modified in this PR" + # Don't set comment_body in this case to avoid unnecessary comments + fi + + # Set outputs + echo "needs_attention=$needs_attention" >> $GITHUB_OUTPUT + if [[ -n "$comment_body" ]]; then + echo "comment_body<> $GITHUB_OUTPUT + echo "$comment_body" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + fi + + if [[ $needs_attention -eq 1 ]]; then + echo "" + echo "āš ļø Files consistency needs attention" + echo "Please review the changes and ensure related files are properly synchronized" + else + echo "" + echo "āœ… File consistency check completed" + fi + + comment: + name: Comment on PR + needs: [check] + runs-on: ubuntu-latest + if: always() && needs.check.outputs.comment_body != '' + steps: + - name: Comment on PR + uses: actions/github-script@v7 + with: + script: | + const needsAttention = ${{ toJSON(needs.check.outputs.needs_attention) }} === '1'; + const title = needsAttention ? + '## āš ļø File Consistency Check' : + '## ā„¹ļø File Consistency Check'; + + const headSha = ${{ toJSON(inputs.head_sha) }}; + const prNumber = ${{ toJSON(inputs.pr_number) }}; + const headRef = ${{ toJSON(inputs.head_ref) }}; + const checkOutputs = ${{ toJSON(needs.check.outputs.comment_body) }}; + + const commentBody = title + '\n\n' + + '**Check based on commit:** ' + headSha + ' (PR #' + prNumber + ' from `' + headRef + '`)\n\n' + + checkOutputs + '\n\n' + + '---\n' + + 'This check ensures that related file implementations remain synchronized across the codebase. If you believe this warning is incorrect or the files should intentionally differ, please add a comment explaining the reasoning.'; + + await github.rest.issues.createComment({ + issue_number: parseInt(prNumber), + owner: context.repo.owner, + repo: context.repo.repo, + body: commentBody + }); \ No newline at end of file diff --git a/.github/workflows/_submodule_check.yml b/.github/workflows/_submodule_check.yml index 6930432c2c..bc6c580538 100644 --- a/.github/workflows/_submodule_check.yml +++ b/.github/workflows/_submodule_check.yml @@ -86,8 +86,9 @@ jobs: target_commit=$(git ls-tree origin/${{ inputs.base_ref }} "$submodule_path" | awk '{print $3}') if [[ -z "$target_commit" ]]; then - echo "āŒ Could not find $submodule_name in ${{ inputs.base_ref }} branch" - failed=1 + echo "āœ… $submodule_name: New submodule being added (not present in ${{ inputs.base_ref }} branch)" + changed=1 + success_body+="$submodule_name: āœ… New submodule being added"$'\n' continue fi diff --git a/.github/workflows/cicd-main.yml b/.github/workflows/cicd-main.yml index 12fedda72b..e3f546c3f4 100644 --- a/.github/workflows/cicd-main.yml +++ b/.github/workflows/cicd-main.yml @@ -114,6 +114,17 @@ jobs: pr_number: ${{ github.event.number }} head_sha: ${{ github.event.pull_request.head.sha }} + automodel-integration-check: + name: Check if changes in nemo-automodel are in sync with nemo-rl and vice versa + needs: [pre-flight] + if: github.event_name == 'pull_request' + uses: ./.github/workflows/_automodel_integration_check.yml + with: + base_ref: ${{ github.base_ref }} + head_ref: ${{ github.head_ref }} + pr_number: ${{ github.event.number }} + head_sha: ${{ github.event.pull_request.head.sha }} + lint-check: name: Lint check needs: [pre-flight] @@ -162,13 +173,15 @@ jobs: build-container: if: ${{ needs.pre-flight.outputs.test_level != 'none' }} needs: [pre-flight] - uses: NVIDIA-NeMo/FW-CI-templates/.github/workflows/_build_container.yml@v0.30.0 + uses: NVIDIA-NeMo/FW-CI-templates/.github/workflows/_build_container.yml@v0.52.0 with: build-ref: ${{ github.sha }} image-name: nemo_rl_container dockerfile: docker/Dockerfile image-label: nemo-rl target: hermetic + build-contexts: | + nemo-rl=${{ github.run_id }}/ build-args: | MAX_JOBS=32 NEMO_RL_COMMIT=${{ github.sha }} @@ -254,6 +267,7 @@ jobs: - pre-flight - lint-check - sphinx-build + - build-container - cicd-doc-tests - cicd-unit-tests - cicd-functional-tests @@ -270,6 +284,7 @@ jobs: ( needs.pre-flight.outputs.test_level != 'none' && needs.sphinx-build.result == 'success' && + needs.build-container.result == 'success' && ( ( (needs.cicd-doc-tests.result == 'skipped' || needs.cicd-doc-tests.result == 'success') && diff --git a/.gitignore b/.gitignore index 55a992fece..acb3116f06 100644 --- a/.gitignore +++ b/.gitignore @@ -34,11 +34,12 @@ hf_datasets_cache/ datasets/ docker/* !docker/Dockerfile +!docker/Dockerfile.ngc_pytorch !docker/README.md wandb/ checkpoints/ results/ -code_snapshots/ +code_snapshots*/ # Runtime env *runtime_env.yaml diff --git a/.gitmodules b/.gitmodules index 09342d3495..307a59361d 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,10 +1,15 @@ [submodule "3rdparty/NeMo"] path = 3rdparty/NeMo-workspace/NeMo url = https://github.com/NVIDIA/NeMo.git - branch = zhiyul/yukih/prepare-refit-info + branch = pjin/ashors/rl-qwen3-export shallow = true [submodule "3rdparty/Megatron-LM"] path = 3rdparty/Megatron-LM-workspace/Megatron-LM url = https://github.com/terrykong/Megatron-LM.git branch = sahilj/megatron-external-loss-norm shallow = true +[submodule "3rdparty/Automodel-workspace/Automodel"] + path = 3rdparty/Automodel-workspace/Automodel + url = https://github.com/NVIDIA-NeMo/Automodel.git + branch = nemo-rl-submodule + shallow = true diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d22fdd475a..7d1a05182d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -3,11 +3,9 @@ repos: rev: v4.4.0 hooks: - id: end-of-file-fixer - # only include python files - files: \.py$ + types_or: [python, pyi] # Only include Python files. - id: trailing-whitespace - # only include python files - files: \.py$ + types_or: [python, pyi] # Only include Python files. - repo: https://github.com/astral-sh/ruff-pre-commit rev: "v0.9.9" # Use the appropriate version @@ -36,8 +34,15 @@ repos: exclude: '^\.github/' types: [file] - - repo: https://github.com/facebook/pyrefly - rev: 0.24.2 + - repo: local hooks: - id: pyrefly-typecheck - files: \.py$ \ No newline at end of file + name: pyrefly check + entry: uv run --group dev pyrefly check + types_or: [python, pyi] + language: system + pass_filenames: false # Pyrefly reads config & project roots itself. + args: [] + require_serial: true + additional_dependencies: [] + minimum_pre_commit_version: "2.9.2" diff --git a/3rdparty/Automodel-workspace/Automodel b/3rdparty/Automodel-workspace/Automodel new file mode 160000 index 0000000000..256f74c8d3 --- /dev/null +++ b/3rdparty/Automodel-workspace/Automodel @@ -0,0 +1 @@ +Subproject commit 256f74c8d3bc12cc789488e72cf3b1d05601a955 diff --git a/3rdparty/NeMo-workspace/NeMo b/3rdparty/NeMo-workspace/NeMo index 8ddf438734..5c42641e34 160000 --- a/3rdparty/NeMo-workspace/NeMo +++ b/3rdparty/NeMo-workspace/NeMo @@ -1 +1 @@ -Subproject commit 8ddf4387344c6423763ec9ee0c9a755cbb5d8d35 +Subproject commit 5c42641e344a487c7ca5b253a7483f0af8ef40e6 diff --git a/README.md b/README.md index dab00ba6a8..77ec8274eb 100644 --- a/README.md +++ b/README.md @@ -105,41 +105,37 @@ sudo apt-get update sudo apt-get install cudnn-cuda-12 ``` -Install `uv`. -```sh -# For faster setup and environment isolation, we use `uv` -pip install uv +For faster setup and environment isolation, we use [uv](https://docs.astral.sh/uv/). +Follow [these instructions](https://docs.astral.sh/uv/getting-started/installation/) to install uv. -# Initialize NeMo RL project virtual environment -# NOTE: Please do not use -p/--python and instead allow uv venv to read it from .python-version -# This ensures that the version of python used is always what we prescribe. +Then, initialize NeMo RL project virtual environment via: +```sh uv venv +``` +> [!NOTE] +> Please do not use `-p/--python` and instead allow `uv venv` to read it from `.python-version`. +> This ensures that the version of python used is always what we prescribe. -# If working outside a container, it can help to build flash-attn and warm the -# uv cache before your first run. The NeMo RL Dockerfile will warm the uv cache -# with flash-attn. See https://docs.nvidia.com/nemo/rl/latest/docker.html for -# instructions if you are looking for the NeMo RL container. +If working outside a container, it can help to build [flash-attn](https://github.com/Dao-AILab/flash-attention) and warm the uv cache before your first run. +```sh bash tools/build-flash-attn-in-uv-cache.sh -# If sucessful, you should see "āœ… flash-attn successfully added to uv cache" - -# If you cannot install at the system level, you can install for your user with -# pip install --user uv - -# Use `uv run` to launch all commands. It handles pip installing implicitly and -# ensures your environment is up to date with our lock file. - -# Note that it is not recommended to activate the venv and instead use `uv run` since -# it ensures consistent environment usage across different shells and sessions. -# Example: uv run python examples/run_grpo_math.py ``` +> [!NOTE] +> On the first install, `flash-attn` can take a while to install (~45min with 48 CPU hyperthreads). After it is built once, it is cached in your uv's cache dir making subsequent installs much quicker. + +> [!TIP] +> The NeMo RL Dockerfile will warm the uv cache with flash-attn. +> See https://docs.nvidia.com/nemo/rl/latest/docker.html for instructions if you are looking for the NeMo RL container. -**Important Notes:** +If sucessful, you should see `āœ… flash-attn successfully added to uv cache`. -- Use the `uv run ` to execute scripts within the managed environment. This helps maintain consistency across different shells and sessions. -- Ensure you have the necessary CUDA drivers and PyTorch installed compatible with your hardware. -- On the first install, `flash-attn` can take a while to install (~45min with 48 CPU hyperthreads). After it is built once, it is cached in your `uv`'s cache dir making subsequent installs much quicker. -- If you update your environment in `pyproject.toml`, it is necessary to force a rebuild of the virtual environments by setting `NRL_FORCE_REBUILD_VENVS=true` next time you launch a run. -- **Reminder**: Don't forget to set your `HF_HOME`, `WANDB_API_KEY`, and `HF_DATASETS_CACHE` (if needed). You'll need to do a `huggingface-cli login` as well for Llama models. +Use `uv run` to launch all commands. It handles pip installing implicitly and ensures your environment is up to date with our lock file. +> [!NOTE] +> - It is not recommended to activate the `venv`, and you should use `uv run ` instead to execute scripts within the managed environment. +> This ensures consistent environment usage across different shells and sessions. Example: `uv run python examples/run_grpo_math.py` +> - Ensure you have the necessary CUDA drivers and PyTorch installed compatible with your hardware. +> - If you update your environment in `pyproject.toml`, it is necessary to force a rebuild of the virtual environments by setting `NRL_FORCE_REBUILD_VENVS=true` next time you launch a run. +> - **Reminder**: Don't forget to set your `HF_HOME`, `WANDB_API_KEY`, and `HF_DATASETS_CACHE` (if needed). You'll need to do a `huggingface-cli login` as well for Llama models. ## Training Backends @@ -413,13 +409,13 @@ uv run python examples/converters/convert_dcp_to_hf.py \ --hf-ckpt-path results/grpo/hf ``` -If you have a model saved in Megatron format, you can use the following command to convert it to Hugging Face format prior to running evaluation: +If you have a model saved in Megatron format, you can use the following command to convert it to Hugging Face format prior to running evaluation. This script requires mcore, so make sure to launch with the mcore extra: ```sh # Example for a GRPO checkpoint at step 170 -uv run python examples/converters/convert_megatron_to_hf.py \ +uv run --extra mcore python examples/converters/convert_megatron_to_hf.py \ --config results/grpo/step_170/config.yaml \ - --dcp-ckpt-path results/grpo/step_170/policy/weights/iter_0000000 \ + --megatron-ckpt-path results/grpo/step_170/policy/weights/iter_0000000 \ --hf-ckpt-path results/grpo/hf ``` @@ -477,6 +473,24 @@ For detailed instructions on how to set up and launch NeMo RL on Slurm or Kubern NRL_FORCE_REBUILD_VENVS=true uv run examples/run_grpo.py ... ``` +- Large amounts of memory fragmentation might occur when running models without support for FlashAttention2. + If OOM occurs after a few iterations of training, it may help to tweak the allocator settings to reduce memory fragmentation. + To do so, specify [`max_split_size_mb`](https://docs.pytorch.org/docs/stable/notes/cuda.html#optimizing-memory-usage-with-pytorch-cuda-alloc-conf) + at **either** one of the following places: + 1. Launch training with: + ```sh + # This will globally apply to all ray actors + PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:64 uv run python examples/run_dpo.py ... + ``` + 2. Make the change more permanently by adding this flag in the training configuration: + ```yaml + policy: + # ... + dtensor_cfg: + env_vars: + PYTORCH_CUDA_ALLOC_CONF: "max_split_size_mb:64" + ``` + ## Citation If you use NeMo RL in your research, please cite it using the following BibTeX entry: diff --git a/docker/Dockerfile b/docker/Dockerfile index 828156d039..b12e1b929f 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,4 +1,14 @@ +# Usage: +# Self-contained build (default: builds from main): docker buildx build -f docker/Dockerfile --tag /nemo-rl:latest --push . +# Self-contained build (specific git ref): docker buildx build -f docker/Dockerfile --build-arg NRL_GIT_REF=r0.3.0 --tag /nemo-rl:r0.3.0 --push . +# Self-contained build (remote NeMo RL source; no need for a local clone of NeMo RL): docker buildx build -f docker/Dockerfile --build-arg NRL_GIT_REF=r0.3.0 --tag /nemo-rl:r0.3.0 --push https://github.com/NVIDIA-NeMo/RL.git +# Local NeMo RL source override: docker buildx build --build-context nemo-rl=. -f docker/Dockerfile --tag /nemo-rl:latest --push . + ARG BASE_IMAGE=nvcr.io/nvidia/cuda-dl-base:25.05-cuda12.9-devel-ubuntu24.04 +FROM scratch AS nemo-rl +ARG NRL_GIT_REF=main +ADD --keep-git-dir=true https://github.com/NVIDIA-NeMo/RL.git#${NRL_GIT_REF} / + FROM ${BASE_IMAGE} AS base # It is more convenient for users to run as root @@ -65,8 +75,8 @@ VIRTUAL_ENV=$UV_PROJECT_ENVIRONMENT uv pip install --link-mode symlink flash-att EOF # First copy only the dependency files -COPY pyproject.toml uv.lock ./ -COPY --link 3rdparty/ ./3rdparty/ +COPY --from=nemo-rl pyproject.toml uv.lock ./ +COPY --from=nemo-rl --link 3rdparty/ ./3rdparty/ RUN <<"EOF" bash -exu # uv sync has a more reliable resolver than simple uv pip install which can fail @@ -100,7 +110,11 @@ LABEL com.nvidia.build.ref="${NVIDIA_BUILD_REF}" ENV NEMO_RL_VENV_DIR=/opt/ray_venvs -# Copy in source and prefetch all virtual environments -COPY . /opt/nemo-rl +# Copy in source from build context (defaults to cloned repo, can be overridden) +COPY --from=nemo-rl . /opt/nemo-rl +# Unshallow the repo to get the full history (in the case it was from the scratch layer). +# Potentially not necessary if the repo is passed in as a complete repository (w/ full git history), +# so do a quick check before trying to unshallow. +RUN git rev-parse --is-shallow-repository | grep -q true && git fetch --unshallow || true RUN UV_LINK_MODE=symlink uv run nemo_rl/utils/prefetch_venvs.py diff --git a/docker/Dockerfile.ngc_pytorch b/docker/Dockerfile.ngc_pytorch new file mode 100644 index 0000000000..e61aca65e1 --- /dev/null +++ b/docker/Dockerfile.ngc_pytorch @@ -0,0 +1,128 @@ +# This Dockerfile is used to build a Docker image for NeMo RL with the NGC PyTorch base image. +# However, it is still a work in progress and is not yet ready for production use. +# +# Usage: +# Self-contained build (default: builds from main): docker buildx build -f docker/Dockerfile.ngc_pytorch --tag /nemo-rl:latest --push . +# Self-contained build (specific git ref): docker buildx build -f docker/Dockerfile.ngc_pytorch --build-arg NRL_GIT_REF=r0.3.0 --tag /nemo-rl:r0.3.0 --push . +# Self-contained build (remote NeMo RL source; no need for a local clone of NeMo RL): docker buildx build -f docker/Dockerfile.ngc_pytorch --build-arg NRL_GIT_REF=r0.3.0 --tag /nemo-rl:r0.3.0 --push https://github.com/NVIDIA-NeMo/RL.git +# Local NeMo RL source override: docker buildx build --build-context nemo-rl=. -f docker/Dockerfile.ngc_pytorch --tag /nemo-rl:latest --push . +# +# If installing new dependencies in the container, then use "uv pip install new-dependency" +ARG BASE_IMAGE=nvcr.io/nvidia/pytorch:25.06-py3 +FROM scratch AS nemo-rl +ARG NRL_GIT_REF=main +ADD --keep-git-dir=true https://github.com/NVIDIA-NeMo/RL.git#${NRL_GIT_REF} / + +FROM ${BASE_IMAGE} AS base + +# It is more convenient for users to run as root +USER root + +RUN <<"EOF" bash -exu -o pipefail +export DEBIAN_FRONTEND=noninteractive +export TZ=America/Los_Angeles + +apt-get update +apt-get install -y --no-install-recommends \ + jq \ + curl \ + git \ + rsync \ + wget \ + less \ + vim \ + + +apt-get clean +rm -rf /var/lib/apt/lists/* +EOF + +# Install uv at /usr/local/bin in case the root home directory is bind mounted +ARG UV_VERSION=0.7.2 +RUN curl -LsSf https://astral.sh/uv/${UV_VERSION}/install.sh | XDG_BIN_HOME=/usr/local/bin sh + +# Disable usage stats by default for users who are sensitive to sharing usage. +# Users are encouraged to enable if they wish. +ENV RAY_USAGE_STATS_ENABLED=0 +ENV NEMO_RL_VENV_DIR=/opt/ray_venvs + +# Build vLLM from source to use with the NVIDIA PyTorch base image +FROM base AS build_vllm + +ARG MAX_JOBS=32 +WORKDIR /opt +COPY --from=nemo-rl uv.lock /tmp/uv.lock + +RUN <<"EOF" bash -exu +echo "Building vLLM from source for PyTorch base image" +VLLM_VERSION=$(grep -A 1 'name = "vllm"' /tmp/uv.lock | grep 'version =' | sed 's/version = "\(.*\)"/\1/') && \ +echo "Building vLLM version: $VLLM_VERSION" +git clone https://github.com/vllm-project/vllm.git +cd vllm +git checkout v$VLLM_VERSION +python use_existing_torch.py +pip install -r requirements/build.txt +pip wheel --no-deps --no-build-isolation -v . +EOF + +FROM base AS hermetic + +WORKDIR /opt/nemo-rl + +# Variables to control the build of TE. If there are issues with parallelization, consider +# setting these to 1. +ARG MAX_JOBS +ARG NVTE_BUILD_THREADS_PER_JOB + +ENV UV_PROJECT_ENVIRONMENT=/opt/nemo_rl_venv +ENV UV_CACHE_DIR=/opt/uv_cache +ENV UV_LINK_MODE=copy + +# Define the no-install-package arguments for PyTorch base images +ARG BASE_IMAGE +ARG UV_NO_INSTALL_PACKAGES="--no-install-package torch --no-install-package torchvision --no-install-package triton --no-install-package nvidia-cublas-cu12 --no-install-package nvidia-cuda-cupti-cu12 --no-install-package nvidia-cuda-nvrtc-cu12 --no-install-package nvidia-cuda-runtime-cu12 --no-install-package nvidia-cudnn-cu12 --no-install-package nvidia-cufft-cu12 --no-install-package nvidia-cufile-cu12 --no-install-package nvidia-curand-cu12 --no-install-package nvidia-cusolver-cu12 --no-install-package nvidia-cusparse-cu12 --no-install-package nvidia-cusparselt-cu12 --no-install-package nvidia-nccl-cu12 --no-install-package vllm --no-install-package flash-attn --no-install-package transformer-engine --no-install-package transformer-engine-cu12 --no-install-package transformer-engine-torch --no-install-package numpy" +ENV UV_NO_INSTALL_PACKAGES=${UV_NO_INSTALL_PACKAGES} +ENV PATH="/opt/nemo_rl_venv/bin:$PATH" + +# First copy only the dependency files +COPY --from=nemo-rl pyproject.toml uv.lock ./ +COPY --from=nemo-rl --link 3rdparty/ ./3rdparty/ + + +RUN --mount=type=bind,from=build_vllm,source=/opt/,target=/tmp/build_vllm/ <<"EOF" bash -exu + +# uv sync has a more reliable resolver than simple uv pip install which can fail +# The venv is symlinked to avoid bloating the layer size +uv venv --system-site-packages ${UV_PROJECT_ENVIRONMENT} +uv pip install --no-cache-dir --no-deps /tmp/build_vllm/vllm/vllm*.whl +uv sync --link-mode symlink --locked --inexact --extra vllm --extra mcore --extra automodel --all-groups --no-install-project $UV_NO_INSTALL_PACKAGES +EOF + +ENV NEMO_RL_VENV_DIR=/opt/ray_venvs + +WORKDIR /opt/nemo-rl + +FROM hermetic AS release + +ARG NEMO_RL_COMMIT +ARG NVIDIA_BUILD_ID +ARG NVIDIA_BUILD_REF +ENV UV_NO_SYNC=1 +ENV NEMO_RL_COMMIT=${NEMO_RL_COMMIT:-} +ENV NVIDIA_BUILD_ID=${NVIDIA_BUILD_ID:-} +ENV NVIDIA_BUILD_REF=${NVIDIA_BUILD_REF:-} +ENV NEMO_RL_PY_EXECUTABLES_SYSTEM=1 +# The 25.06 Pytorch container is not compatible with vllm standalone compile so we disable it +ENV VLLM_USE_STANDALONE_COMPILE=0 +LABEL com.nvidia.build.id="${NVIDIA_BUILD_ID}" +LABEL com.nvidia.build.ref="${NVIDIA_BUILD_REF}" + +ENV NEMO_RL_VENV_DIR=/opt/ray_venvs + +# Copy in source from build context (defaults to cloned repo, can be overridden) +COPY --from=nemo-rl . /opt/nemo-rl +# Unshallow the repo to get the full history (in the case it was from the scratch layer). +# Potentially not necessary if the repo is passed in as a complete repository (w/ full git history), +# so do a quick check before trying to unshallow. +RUN git rev-parse --is-shallow-repository | grep -q true && git fetch --unshallow || true +RUN UV_LINK_MODE=symlink uv sync --locked --inexact $UV_NO_INSTALL_PACKAGES diff --git a/docker/README.md b/docker/README.md index b21c3e7401..66b1da6855 100644 --- a/docker/README.md +++ b/docker/README.md @@ -3,8 +3,8 @@ NOTE: *We use `docker buildx` instead of `docker build` for these containers* This directory contains the `Dockerfile` for NeMo-RL Docker images. You can build two types of images: -- A **base image**: A minimal image where Python dependencies can be specified at runtime. -- A **hermetic image**: An image that includes default dependencies for offline use. +- A **release image** (recommended): Contains everything from the hermetic image, plus the nemo-rl source code and pre-fetched virtual environments for isolated workers. +- A **hermetic image**: Includes the base image plus pre-fetched NeMo RL python packages in the `uv` cache. For detailed instructions on building these images, please see [docs/docker.md](../docs/docker.md). \ No newline at end of file diff --git a/docs/adding-new-models.md b/docs/adding-new-models.md index e0de97ae40..30fab20a3e 100644 --- a/docs/adding-new-models.md +++ b/docs/adding-new-models.md @@ -152,3 +152,42 @@ uv run --extra vllm tools/model_diagnostics/2.long_generation_decode_vs_prefill. # ... # [Qwen/Qwen2.5-1.5B] ALL GOOD! ``` + +## [3.check_hf_model_embeddings_untrained.py](https://github.com/NVIDIA-NeMo/RL/blob/main/tools/model_diagnostics/3.check_hf_model_embeddings_untrained.py) + +Detects untrained or improperly initialized Hugging Face model embeddings by scanning for near-zero rows and rows with near-identical values in both input and output embeddings. The script also reports whether word embeddings are tied and summarizes basic statistics. + +```sh +# Example run +uv run --extra mcore tools/model_diagnostics/3.check_hf_model_embeddings_untrained.py --model nvidia/Nemotron-H-8B-Base-8K + +# .... +#================================================================================ +#EMBEDDING SUMMARIES +#================================================================================ +# +#--- Input Embeddings Summary --- +#Shape: torch.Size([131072, 4096]), Dtype: torch.bfloat16 +#Near-zero embeddings (abs < 1.00e-10): 1039/131072 (0.8%) +# Indices: 0-1,3-999,1192-1193,1245-1255,55014,77579,81772,81819,82312,82500,82725,82737,82977,84020,84121,84521,84794,85015,86409,87411,89412,90320,91368,94485,96385,104097,108262,112147,112327,112497,114755 +#Identical embeddings (std < 1.00e-08): 1041/131072 (0.8%) +# Indices: 0-1,3-999,1192-1193,1245-1255,55014,77579,81772,81819,82312,82500,82725,82737,82977,83855,84020,84121,84521,84794,85015,86409,87411,89412,90320,91368,94485,96385,101707,104097,108262,112147,112327,112497,114755 +#Statistics: mean_abs=0.007874, max_abs=0.196289, std_range=[0.000000, 0.015442] +#āš ļø POTENTIAL ISSUES: 1039 near-zero embeddings, 1041 identical embeddings +# +#--- Output Embeddings Summary (Tied: False) --- +#Shape: torch.Size([131072, 4096]), Dtype: torch.bfloat16 +#Near-zero embeddings (abs < 1.00e-10): 0/131072 (0.0%) +#Identical embeddings (std < 1.00e-08): 0/131072 (0.0%) +#Statistics: mean_abs=0.006775, max_abs=0.200195, std_range=[0.004089, 0.021240] +#āœ… No obvious untrained patterns detected +# +#=== Final Summary === +#Model: nvidia/Nemotron-H-8B-Base-8K +#Analysis complete. +``` + +- Thresholds can be adjusted via flags: + - `--near-zero-threshold` (default: `1e-10`) + - `--identical-threshold` (default: `1e-8`) +- If any near-zero or identical rows are reported, the model may have issues of numerical instability (e.g., inf grad norms) during post-training if any of these problematic tokens are encountered. We have observed this happening when special tokens are reserved in the tokenizer and embedding, but none are encountered during pre-training. It may help to initialize these embeddings similar to how they were initialize during pre-training. \ No newline at end of file diff --git a/docs/assets/fp8_curves.png b/docs/assets/fp8_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..1825877a9e774f4771bbda7602eae01d9adbdecd GIT binary patch literal 351315 zcmeFYWmud`(m#y51c#u31eXB8JrE>V2*KR~39d7^y9I|ZxCSS41 z$+s9Be&_UIZ{?AK2+dx|hIgzU#0+h49G_u(w&t`Z#18ObB15uf2585S}i7vE0Ju$;P9W#RffpU5#QY z5$5gW%!K_c6T2F>*5CS9(#=6FtqadXVH$t&AjA^KRD<0+n2Ii0(HS-gA9J}oicilY z*Y0_$d7-2uAI0-AwSj3qp(jzBdILYcHcURO4pGvvx@ycYJS{Ea3ouFSGs72WlTies z4enErwxI+q!zOx=lnsVs^K&Np*44t722}aA{@2n&Kipdd!!GD52nUhPZf#Wt82fR` zJb4ZJw$9JSuD^@ZCf=+P&!12wi5F(DY+US{#h|7+hCfk%hBrXQBqLdd=olm_!e6$^ zSU{IR$!*M_{)5@9+E1G?9_6<5y!w}m&O4=!`%(RC$%DCYC2p=K-qLc#Z@=mNcK2hb29~47>U-QqN97=eV2p@b_Q{xM0BK{U;)WBZ z%14L-lSJe3!w@{@Mdl^Em3>|njf%Ra8YUwtE;!a<{{iLn8Qc}4bn{0bsKgav_ulMn zlFc#khW-;Yr#0U`wk99jo;bKo9=zsYPz51-Zx?neh^m6fe83F0#WGK<#FPJ^64qUP}Jyn94QXRF-Guy4bN9MML* zsn_uEy1goI)*QbB`x?cX}_$t7t~DH{fH#>+4A&%NWt2GfZZHWE(d-{iaI z8{~uYOY`x5p?`lmA9SFEN6(b-V)L~Vi#yXb`8De`#WnAn1XG2Re3|z_iMxpwiMENd zKP}#(Z}DnGepbFH>{F{LJ}b7E(%qxj6WgQY_aAt6tErMWSr?2_D;b$^7+s?r*cGL5~Ef^rI23D>$9qDju9G;kQwH3`Evd87rG*K zoK|A9jGy#Ap?}7%BeSxf7B9F@_2(X@&=2wB&Z>@f44KH0o~oW%-!xo|Jf2agm*e@NAtRi-JXQ1~o->8B#2Mq+qcYX$?<)UFRu*lA&wc2 zCAl4+4fg>z4?h-v{L^w?VT&n?;a8WuqdbPTUc-%(K_(k!^!z%uPD`8k$()frKTVRV zRce}*ah0W&NDB3o^pqvib<%s&NxmL(x7e%jS*91ISMuBQAJxCCCb#CE%gu7 z$TR6wz9lqtr8%P+u8Cc~SDvZ2uX9$0U1nW&G(T?RWm`D+VL`v)#lm;n8e4%m!)ob6 z{X?EZ)&tJPyi2=F(_7q2TLugUQCWG}4>65f+0ahM0YI@MaA&{;2f*%92JjO4Womvp zK*BHF$X+U*OD7$lq}a#r0sEUENV zNG-`A1sw?;!|*R~&k800>6Huu=@Y7VRO3&JsmW=zX<&2%?92?sjKv~zd_dXb@F%b3 zwFe@;=!r69`e}{qg zx2fdD&mWhL4E-Zd0W5=b3ZSVkN^e!X71X*umCn0Byo^D6)3*G!Qsc4I(Y0NHDa_0J|n1A4XZ&)Jtnye)3d zaE_uhC^FL8(+biIOe-4o?^xNMTdU999t<2%&$kJM3wb&XHhpUfeX$U4Gq^k0mr$f^ zm$0B1nSZD*T2z+yivv5sHih97^E5=18kVz~TcviF4@^BuFp7hleD0n?o~$iYD6CUz zNN)$;)C6P)7=2vhMdAHw`E?W|aJJ4qTE?^YqR4Ts`bIQajxsE<&!)2BID(&AqD;!M$XPpto-o z+t~@&n_y?fSwlWhgRkwiu4#+kqldzWUJTh+92=f`Ur-X3%1z1AlUo#v!QKHvRGTg=7L`|gJVkCv{>_(LOryj!hS?}v;Ex5|o@PmRtx zxVm}8yGu#m_kTkDmT~Lw>hvyOUL_u&dXjj;=Te%FwjZkQN6_ccKjZ7t==+8_7B}L3 zOE;V~$kJI|`7$-`CV1}PoTmlOVTh4)dzHqoK{YMbd_aCN7hu-bj~#Ob`jWXLoaT7y zc$UB2Qq-5x>?3^RvY%GP3mHSp5Oa-jhQi#B&XmbSxv1I)+O>TL58oR^S`XzI8Tg7# z?ak-{=I@<)NMhNdZOs;93@+L{PIWu$YG&PMxt7BkC){`+wh#4K8W!xa?Ik)AoFUWp z*B##NB3Cr)h9}-5w*~k(v>zVqZf`T)8ZPtjk!k8hBfMR%s@F~;nNFFEgh@pU#5lY^ zo|fG-ot+hqW{1h2YvXj?X9kkyNA!aG_%VmZ!b%(F5(4l?A=;j z-4~aLrq9+4peFK&bjgk!4nq&ogh9+t0UkCwg-Yo^4HSZSUKoRtE+}_lJ6KL!3 zn-790Q24QEYvE!-2eh@Za~1}Q(f_T7@MHORH5WbI-@3S1i_vQ$$^4`6dl}h(kUxGCqqlN#aS-O>0ssJ<0A5aeCrd7FAt52ImpoiNJRFZb zIGo+>Tugu*cFqicXYvOh84G7Kr%w(ppX}}Ee)BakwRd$9qo@Bp(4U{b%V`1p^w&ss z&i|O!;{>^Ww{UTDzU2Co_amw3?^@w^pMVxNIx?SZA9eOPhPVK)py=QF|358%jrdQL z+J8}U^9k|%GwDAy{X40KvxSqiz3t1Gf zEK#mM7fl??a*$8>aUH2X$*6vKd_P**?+>Em;~(a~zaPuUW&H3fj|2n+NdyHMsSiNJ zgJo1NEsI-;<1t}l$~-fdK)+Y=4aM5A!QlZJ#bKxs_~&IMsig*q^+z`!Jci)WDo+Xzc$*50BjmslForN%bG)pAvjj!sn$I zlYTG$n@$Pe1xP#pM+Sc`ga1>eY!tcZ7W1c7|9ySlI#}WTN%bG)e+NcMTq|}=Q9@4s z?>fcA8~Kk6{+h~X(4!Lgpz)tN|GOTTyK;p7qWVuZAsJ2dE+DVZfCpFm|1i9NjPPhw z=>LhqUsL&?QT=_@{%2JGGpc{=h5y^B{!PKtkNG( zHb+9`U%MI#H6CHSmOtfKRWH1G;ZcOA@n_#JQ(%rCq7Sck9${X>UJYJhpTszuV-+H!D{}8obBoU!sDGNOK*0eKUvZ7XB;#74x1bKuLCIs&{ z2FVvI6ZEdv6Svnh-0?RiH73ZN`bZkru9{tfA}61X|8wYPTviG`EgS*%-7=|sOl^9b zQ4ne#&~rvkM5Tf$4&2mjJ#~J{xa!8mNj~XmWX*}Ia|Mt$ja6Vhn9w@fZk14lK%i{FH zZ{dAAQKu#0Vn-CBB8F~u-pM(43}%A40)&OnsROz17c|Sdy3A!%G+BLgE{SIE=CJxZ zI8_dKF&I?#5PcDq4R1~kQ zO;aECk^RA)g#I!=TW6n>T*U3wLdZ14;{}b~@H!CblIio$p|^3Ft>}s= z>^(CXYR9-0vCr~U-yFq6SDW>`Ez@qmE^p2VI+1R!@dz&90qG5Tx@~|fle1N7Mg`68 z-qju6NIbwk1O6r0@05`KZODDDGdh15vcoVE9=*VwUmCsyq&~#MbS1%RvY0<`(n9-s zS`ToA3oOVVyduL8+s#D?8B1qV}*PDo^27Y9t3o z{*KLp=I-jgp{@1p9(ResQWU<|z_^W;mT0hKSKSUg_wxriF(ZeUwPtMiHj1OZ_VBbrEL@bizD4KDf#->-a zKPjgS%`Ial8xsXGk(R6e2<)>aa!qCl4`<_xmWH@Dfa4~`W4Y+`BydjXfO)ab9!S7jOvDQ_AojT zKoyQJnY$W}I>P#m;ApO2>wDZG8Ikq4UR7Qv0WS@~`lD~WoFexVG-Cu@(ib(~LiIJszi*f=?68R~CE zhjv0r-JUf{k+U)Ot)p;yv>z<0o=jGX5X_t*iCztuPgNccy$}-!-ElLhYbY= z$G*lK=8+h=4`1;|MG|huBD$6*q1~4*v`b>1c(Ru^;Txbf;C3AP@!<{1($9V8&v3Wn z(;I9uscR;0E7yvD+nW?SCm8<%G2RtO>L~c1tt-X~_Q+)%EWx|UR7^fKC8U!cEs0*g zhD>2sxVpW7G#|0CaJpb&VP)jz(lb4}p!wPZUd!&mJP@~~{!SUbs>|Hx!TIwZw$bBf zJs^dKQ2@_b1oy9%Sc09B_s#-B}QD=8`p?oH~N zU)Gi!wpm8Uu!Z17N=M( zL&K9+?;5~WzqNBU9s#8HB%%RZ0J&xC9YIoFnh+Zg}5YmsI=_VURwWaSi zX~PsRUOzARR!DR4EpXTZ+fqcj5sq$n*UiGr?q*tF=E9gE&_&@p`hkftu)kC^`xft> z{x6&PJ8mV%mW>tTt0}H%$@WKl#EmI9cfUx#MN;#vsE^?(QS$vMTfI%JaVe{6LqVdE1Ci({EMaG|9kHInn@B+H7*F>?jO^^A z5vH}8D-cyq^8js02uvW8pJdj@Fm`MQCq3TOC4$m}Cm~f%*(VQtf@LEU6&#eu_o|B3 zZue-d$LO#?S7oYxFMVd+dNU4=IG`riFYN>nZOagIggm8-tl0br;? zV72>ThvQ-o_PY*S2~oj)P8X}EI3as+PaoJgn;*o1Q=ei(X*0v2BS=eFFoH>_~swy{@xuq%@h8Dh~WGl~qvlV6?YfwJm6}Wu{aVC*qW++Wp=h_GuTZ z?An|6`>EnP{nW7;T|`01Wsw88kyINtrPO+J;ejLWj0J6m>Q4!`P7~wSBmmZLtP69C zO=w#MuT*q>{e-x@q7329B=|}csFl*8?Qq!5>wiB0#jt0i^v8Lhm;C|1!X5mP>c4ro zEPP;B#^_{F9hF-6WqCYo+2S}cMPbu4gkxW_7(&BS>O|&a~Nsp3`gyE7I3M630 zxB++8Z^zAi0~jJOW8^NG1l=kg&>BWSfJdyA6zjYCN$2@Dhi3h-G_B}2*;xX!ivxY7 zs!Or4$#!q@W|kc;)4=0`*-xCaQe)X)xe-xOFQGi9eJ#X=SRc6)L-3BgfQJ_|yGP9Q)~Fc;4R1e+g=Fxd0t`Jlk?>R%${-Z_9Zz50ggQZAxnFEJ5np+B=W_ zwzp|bsXkG7OVskBzOhI5gP5LeWJ%eN!_g5f{WTmK?os!%A)}1=Upx&Q>aX-ZWbpee zJ&Z-!M=$4d3Dzd2QwH-w(v_#=MQ>Ownjj>K3t{WAC57%VG`i5`rmszMc zB_vn%G==dKaW+qchpB!XDx_!PcFJ(>V*!Jnfw?TOE6f|-Oi$WKS(N906z#89v6vo?Bd3@+AcOEcEZ#+)A7%o7r{I^A?_GWf1y zO6N}Zi6MiP04LFcTip-QZWWETF+?fJTHJUv9f@xIc#CCk=C`vXocdyEz)I=xBwD`8nEyEHw&rRJ<8W-+;cm_i=qVBk{HecoJ=IHPh}KxSD$cx&^H1*VhkY_X|h5 zbr_m1gNg!k%LJDs4)N|tGcb+d2=&aTL0Xc`tr5SzXy{_s&}Kxg;EWc@l=pHpEXTGr zb#`LTu0c!tO~&@F9ESpra?ZxjxUVV{*TIM3KL%F0&oXbTM4Fx$u81ESzo%=oLxvIO zGYq9$dklM{Eb9dsA6^}qjGi4mtLc<-kbm+v;_Y~=0oxDgQgDnkSO3y3q%#z}Snu)- z3Tk;G^kxeK1MBtCI%ETCJ@%u6wvE8PQmOM88`^1*5YnBX#b$P-B6_nHsNEuhx+Y9t z{|bd{NcQ%j$fS=W;fZF}{%7K?jrmY$_iTqOI9HGM#|twL2izE*gBw?Z!m&(zrecOn zA1J}AQjMrtnr38cMGV-kKtli3M^P+EgLg+7?;VTKuaYNqpT) zMcdhOLmtediajYd=odf9*wprb-!;f4QMT}2r`Ps_RmCE<{d-{!T{Xb7QVr&HaHbdH z(S_7u$RYR9t`J74LCn|+5RJ>LG3e;ZFRK#2)%$*;!j<=6i6VTRY&k}RizehWmufiE zhs_6btcOEYG*(?WxcRKUwHCvu^-MI=^GYu>gDfD>I;XLbSUIgKEPSjlNN;y<)CzS0Zi}aV>lL820o>x+N&2?-6lQ<=ojQyt9 zr%7`p=>gk^B6&tfL~%`)Oz}=b%d`B&>ev=9g6CS@UL@arCn#^h@0sgbeg~q7p6-y# zRY~CAnQdEcUjtG+f*+>65nFSu*7#p*cOwwJn{!{Xpn0CPjW-z4PL&_Ve%}y1n7b3v zygQ|gXp{PIt6@0Q-e;DBFy=X)!P-WN`+%8b26TCWOC*$Nsaf4487{`?1mpW2P z5SKW150H+POQQs@?q+czYBRzv&=j;c?vNy?l0d?2uZ^A_==3(XN344kBXY@{%*SZFcBfY-d4( zFYp=lPOBrwD)l-r*Q-x3Y!_(qWO1a6_?4G`hEZ{TZ2!C)t6k%YuM2>Nh_`v>kUCXm zV5bXMEt7~X2n6?>U3(u^*0Q`wX-A%*pYnyDa~hjkpM~(jEJi-ULil?+r2?PqB?TI& zg=lno#k@m03@&F*~F9-6Y#ItrqMy9 z+CwqC8fZI>@@qUQYq-=!@E?KVA67wC_Zym4E+G2@COcLot=dBR>SeDoxX&mjHOEpW#JQR)S1FLvj;ezra^P3$fA>d%qB4QT99UFb;B>2*Z^ z(3OEeDbhD!mWWDwDlMtcuqxw=f?i z*nL+Uwm}5+Jz>;$7|BTWx_rrKf|oicUi&9L|HpBv`Z4Rm(Z3;y-$4rk&4=^I2N9jMY_r;oPCRST)Fji$8uQbAkIUB_LerA`5#-3UgvF3b#_VdY1 zp-7=VFDmudkYW$8L{T(N+`nGaJR+TM9jJKIR{lA=!9S0v%2g&lAz^)sHb}UuZhaqI z)1fglaVS+8|F7h9{(HZ3F;rw@|9k$+aRbStpR)J-^y=lm_^*GvQ}9D6`JF`}Od-bk z@8{Ql(MRX1<|Aa=aab_X`DbAKZ-V`uzW8@i&7S#r<6lbqPyKxLeqUA9r^q7z_w1o; z;m3X@2@~Ldk>)Rs#r{Y!;Le_&JhT{n>EBqOgUxUCrl%Nk{9PW{B$7%Ka#22Urud3q z(Zj?^kyITrrhF#}vn5?#qJ)u`s2IiglZZd0g`4}Io;m?fk5CqV&-1Plo4Mh4uaT8sm8IjKA9_e=33_M#KZe!qcB+TYTnAgO7V34 z{6zcVSjT9a%5CA#^@%3$+2hp8>2~jhHPINpv?}{iDjtBXuf-M#y92FNA=lUza+U_AL{)6wpH5G98q4MA^ zV)FYM$r}1mcISO89DdUsP`~)=wA#%ckJ0TD2G>^zaY|Pt?V_;}V|a{NjjGRkLvEj_ z(aw2lO1jQl*u#nCZS@H^$m`MJRyaM_oYY=BQh{$pItrqkKdYGwf4=oFk=Mqk?3u_i zadTu@Sxt_L)MDsH>UC4x5OJlkMB$EO?|fx{RCl;`)#Mz~ z@j=}3>`uE+U0XLM)-9h-xCWu9zA`p5`m+|*CPqhd|92z7FM<6|K@AS=i#5|(4PxG+ znNY*)ZBsO}19GQuphi0xC&eiavo7gL>m8yuJ#$Qs_(<&18ju(&ylFu)+fxze&53_+ zzTlj+xIU&KZn%8((c##&66&fUX4fwU175b>&uahZ`#F2cF%E zeZAV}BmB}i@o0dK=b9b`D9_*3pg#Wc=Vw*(QaV>v=uRdr1`J!MZI7C8EKqyHfIG^s z#4QD1b9DN!$9)YPwHezt%CoY6y0GMiD0=UO@3Fnp!ERaQdxRdjuF*pJG?V8^=50VO zMTd=4SLX9qLe_!_Omgtpl{|@4c!yRpSrXl8IDlY|A4hqN*7Y2Dy(2^MyHmCE%^60z zD5C3)TfMIdydYg&H>7phap2B^_wBEmb=sLHxWEkdM!T-d?w}9(Tsjc6W$*pq><5^C z)v6eKx509sFOaMVLT7d@ODxmd(seVQBc|4(J|t20mD%#i456z|hn*`C;j{Z@x?*c< zfv#lQB)=b5zJ5g#5c&iQ!@K3EeVjeKBgqbiQD>2{Dcad*TSHq`E~D}&_4v|~lQ+{H zp=#~FKwjo$D-i)|xQ5;u*^wIpZwyLrOL?rx{cS0#o z9oVJyf^zYYuYAwtcJS49wW;D_5*DYW@fij7PH(6_4OzT5_BO?H%iqqyGeG0RmolRn zBgCGzB^}pKNsYAfaZ@?cru9L;$Eke-L*a?%+}}4BzZF2Tiz8eRe+^(}s2t z@<=dp^y-YLxYCMjXFQCo9z!#^Qaf3C-A7bIWBVtVx*~=YkB6aJ)g9 zAlCCX86-kj){{rK?z_RW_x$-WT+I*8;|9Yg!7f}dI8eyqkRv6cI)t`?1~F(i55nFU zpP?P&RWvS+8(IPq7p<4qYEj?A@UJ5FuzW@!%LDOsvvXtQDcp_+Zj`Ddgr|1NW@HqIm^R?7&bj6DD5AQ&?V?C^@tai zy>}lr6*O+^mo+s#8;y8jrr-wdW-~zli1YUrFq?n~|5)9xkMPGW)M)wWQhbrtw*ld% zOX}|4>?L4jOZS8-)*7<^(7nnwmIgSKAukVrWurKB7E(R#|7=eWh&?*&+dL zd7dCuDUvCD#!^(%ogFFi{tD=(d?4tCBO*YF+yPkCx`wrTYh-5bJI|RVjcGg>l4nJv zUc*0{<`J}}P7OTRvVG>x84u{^vV8LLX5P0Y9ObCFmB`Imuyr3LXBrp2AW_MBYe34I25F4`962o z0!;9$2KoEUkqmJle56vOUzw1&kMWt=uCRc~#M&m}ZN=caNI7hU)ojBM-iNobpvkzv zx-CIl=k7Xg*;C?RYIH~ZrYz_?%G;HCD*?o*k2k$?6=iFD>`RL3&F(N*HuTs~2?LeE z()#&N?1(3B*WaKQ6&{KFF+7_*EE_$0Z1BR163=;~hep&3!4a5QSCcL zmcepn*eI*{bTE2%04oy1<OJtv@dNB+TUIWv3a^UOR$Ry491M{4E^w2 z@Sr!T-{s0|w%dC+^ILIWRi#aha61ODgm%^?U)zMsz4SN*@2K{iVv7A_qgZ0iz7NyUk?X09G zu6Jr#;N|$l$U>Y(qlvt4*2kCI&jlvlbtL0=>%1`czn*7U;8`>B?jDL~q;=-(vmoQ4 z*a+1g?X?;$5DBe)&BiEHvUu=bc)Jf^m@8ZamuV@UxSv|`kk<(4uHk9F^Wb`|rx zF24;5T6)bMKG3P)br`Jl++7Pv!x5n-b0pvB9>GZIUicTd!)5veV$udpmILUIGNRG92(8r{E3p!a>dvClp-RZUVL3scJZc&-9sBh+4c_ zy*e|7C%}8TShqcUvx?NNJs)pWUkDav>qs*E*ko#yTH8icURpgoQYPIjhuNg#N*rC% zKK_TI=cst1n`$d+EXTMFW(**exv8mVx)?&amzgGJSWS!n5Ve-IetT(VCe*l)ZMu5N zJ4{q5(-C_}q38IyCV{&Z(H^#j2ykAtcWP~LZFclM{c&jvpJy*2m+K{|y*4+RudA!H zt?BgLx?a3WGOMn%6{+I#tl02k%l!7BMd{poJKW`Crv)Ns1esIBnDHVg8EXsR|8`Q7Gp!ciJ4l z?6P6}Kq|u_gA7N*z_tomBekT%TD~NHqL?p7Er+V)L0!POgg2GpM`E%-3ZT07G z=wng@ygt;-yGC;%S;BQ{Uz0-5N$%vRkdJfjTeAHSp|x2v zEs#|!q(v*8%`bWs{kzaevkUuq@{7xi$~Q0Hes^9c`mir)YP?!kmGAGK&tay=smX(K zVH5j>=5X0OPAsKHC_w67lDNS9S^8mT+0PtimO%e2HxkUJ(3UUBZ#*Z}N)32rB<`Qx z>Ao`k(u{Zt=%vs0cAlb~8Y%y{rl|HcBeb@F^4$-t@HInfwITqzfP#8_nkQXX7ZzSo z8BSVjpQerp;*pakB)@WD*c&P(nO;~dP)1Mi4g?BEg0kg%E=HK++*GOFl*MJXx*j|Z4l$~3b5l^-G%n|?P`dO-wUVDGH!nZOg!XRKN)YG5N~4j0bi%p zXxsyDZeA)Rhd3}ceKqvJdaWvK0(TX@%OI^C)NaDu9pcX&DJODQQay0LPy{-N2OGE+ zrmuy$?}@p1@2UWul`K$|bXzZ|LEW!wTh~DmXX0K(mu)&S&^HG|S(B?|7qS;`1#)k_JrX_D3%1@C~$^@W?yQUO3c$);3DotG-9 z8~1<>AR8Z?v6F*SwN0G#OuS(LH~Ha*NDsEVA@bLdY>vQXQqXs-n0rjThhgR|z6B5{ zvJU5FW{dg|OaZK)(NM!NE;=1OZ*QGkS?SMt7LRDsV%Ca#<+>U#U;B-0K+;{PP}?Ex zNXP44Ko230o4B6fcAaO~uBNYJ`_>iwA%U)k3+ANBrW6@v5xMGZ2R$b;n2=h^yTb%r+2hL!))qQ zb0L7d>^ohg26CNtTfc#+=Vx&mv-q|8uJZBdB$8=mRXhsP;fBxDrLFSAY~91O3=R|F z%G?1Y=+m~B?IezAKznt18tP>{L{aF|(NL1=wA6);U{6l2vdDhidqIAWIWJg{hT@{S z6Su9gV#E9V5^v`Qs28ZgBR2gN#d0A{#%ErscbhSlUkXcs%N&l}AaKoQ^hgeqW^c9# z6bR(~ z3oCx>Xl;ttioFZcO%>zLYpoRn`>utTXkHTG_f)goT_#3H!_nqjeq6{uP3(RdE4m5t zf3%aLUfC7*1GEYpmq&W7Sak3TU}b!6=wpR41p|(+unzkW925mQeH{XNSOtI1tuWEs z)+>dah-PDX{hO$DA;P91ur+?+H zJWSlC+OLWOsITC-k zdBa|RCR{>^R=lE~s*K!dWZ5r$nCDH?G-Z&Q*KrcLmI$_5gx$ml@^@|*< z03LciDdwjaTLD(h6M4t$Z&$R&@Ae*Dl(ch0GsAsR9R~F;WcPJPjNL`l3%RQ|3#Y3D z@Sm2)*w#^E-uaDsE00a(Dd>cM|85}jN}N%tnPNV?5pATR>SxfX{GC;}Uf0BB zh2rv>=Ye8YUMisd_5=?eFed|h*}1q~p}5rP0E`LnRp3)Y3D9$zn> zYO~oNx*qdQ0xdu)A}ZP{F*Q*=X1-wiym#i+(`xq#=((7Bnn^D|J@HvZ65s}F46U}g zT?iG*7U4T32quwGNwq#G7*&}5vLw@IVstRgyd@9zBvflrw66DEz{W=#)?0ST)Z=?1 zc17(1@+88Q7W#;$ka;~WcD1dQ31zH|c?n)BV&_h3d)xzwk2Zp|pu+F!dalhC^3Jma z8V^erlsbZ(qszZO7}sRCzrZ?m>#fdRm(J|5z%VzBn(!-G9o#n#Z)%+Fcs)cp7VHc5 z2=m_MMX9w3lC{KhCZd`EDQ09fqi$=0zV^=gM2{na#=s?&YgH&o8UlwVZ$dQeP%`ES zqDN`^HV4U=qZCYUi{AlOKGwW`-Uwg{Qchb#^C=tALm&!>Zmdwoj3p5}@~&+I5PIwA zsEx2_cQb>|x(Tg0D#cp%&P2QOW^!3Kas&kX@T{DmVGOOOo$91A_rk2f`?|!`z^bp6 z?7;bNb{sg^rFzRO#?tTc3=N5n>hHf%?u-PS^LJ*{wqGx9+#R`kV*-IrNbHkhAAUyIXsCMswBf-hUA z7jlqCJs4&}cP$&UNJ!-d@HRu+NUypvk@FC*)cF4xd&_{hf@NJaI0Sch3mPD}y95gw z+y=Mc1b4Rp!Civ8I}94!-5r93$?GmX z{qZp<@Jeea843otX2U6M>)wTQv{LfHaZYR#)EZ z);F~SpQ$^C6NpJY7N4|H;J-He2GO!@M7;7~qX1F7c}#na&{tDwcNdg+a*<%RtEE{@ z^Na9UtsU{24o-WEy}F!&&K}Jwobtx_jEgBtKfq0%>GVvBkp@5VIAQAv5b}x?52PKb z6|Yj_k^ncvv=W|}7C3U(sB$584_UmL@0i_uh30S{a<(;drL8dO54_L0o_M2Z65wwf zmSr}AlVRG3=}s9!-btq@<{=9~)XY@Zbbw{wzOtL)ur8~G!LUuEL6vrF)@F`IPK6$OSQGwOQ$A{maJ1mqb4=lpQkHED1D=tA|E@ z8nTy;OY$&)P=|~)knOZXj3R=BD&|_-*3A?$VU2wspivH6;O9@qnRd3oh5I%AV*{Bm z$v^nhSt8`BEZjSzqja+3l*FFfdEe?y)AeVy$WFL4zphRB)_G;UeULL6#%`DP;`_K6 z^GL8u6emeeHx=8U&2a)Mxo1nBXQH3#S75PnF~0ef8qM09sNj$}P;F;Bpm4{0&>_dF zH1Jas|6EyGqtQe` zwDYD^A5zeoR29H*mxv^ZrHnwfxu@f+KU@(3+eQtsqk^@|_gnaT)|YjZB_2LMPHc32 z?W`YhXU$_0VF=Zs%Ig%8f=_7TPwtJYWAU}NFe{TCB*S{a0v28_Qc;YChVGp~k?pG< zL#uZL_s?w|yz7-ah&hi~g3mA0<||V695kMk&lb|ff1@5gEre+R=Oif=>2PP9>+y+)|QJy3k!$`OaEEgq#3m zD&0@L3tN|!Lq2+Qe+QsE;HR24^`i04UXh$ap-yTtc zMdy6lq06T7B}1;NV0q;FJtlbM@LXh1*TAnyHwJ(dsjfL}>QMEw&?1j7=+0j1H;@Hb zw(PNR(cXG6@Y$U-#S`%4Xppt1qkiLbZ>)Q7G8ydAY)vmS>&NMr1xMwUX+cd>_I$R z!JJc;Y21i5geoT=XKh++&}GJ@*-Uvy3xO+qra#>wJG)!(?oW24Cg&$_P9xPqsKoFl zc*Vgo7TkH_%yQQ!HcF3RkZmkIFn&Pvww^fO1OJ=0MzQa2Kz2tPOCStetIZOn*uw~Z z5}T#>>dtxv72@DB%AAf=Z=3=Fv$;)X*i!zVOW2{Clby98=4BOaJ0thoZvc*6*Smx6 zJoLppqAa!xg7^*J<2crE&EnK+H$SzLlQ9<>alF78wAK)fG(j@i0L%?*rG(KC8-+cli&$3AbmTvgd_WpFsR`8&`_<`)aXwuCH z78d!1tQk|0xZc1`m7bO_Lpg%)!9!iDH($M)g!{Hj28_%Kb14S!J+&^IP{c5`%_pwx z#jPzahBD7?TJw#4kimFlxCHy8Pn8S~1+C9a(4Jo;X(ZsH+0Mc1WB?`q+VBSHe6~C(&2+?@Jb8-&|Q%t_Tiq zo-3b<+s-VpNHjYeGK&}#==A&h$NkNLG9>M^$F_Pwynd6W`9J&`0G|@6k2}aR#qGsM z8OzGr>4p-wY6LEFRzOr#%S}Vj-7+f`R0Y~+crEq2G0%@*RHR%lPwetck)cx@w^AAe zhdSGR1@6I0bKsV)m!!9N$lxuO-+l}i-spG6zcAkF0g^YhDSewgdrc|1W};HRhII9u z3x4jlcg(|8dXyYI4?y_H-1au;j6sq*r?^HE8f{j#WCbfV=wssiu7!{*y(H~p5`_}b~t}W~t zU|y=XOVZ9*b#fX}4F~+91eU8t?*OoW}*gQ;zGcyDf924p809G)Q(-T}F!(o)D@Fp&O ziEAh7^TLee^{`Qma^UoNT%=43A#GG=&%|lJRLk1dPAXT_u4u#HCdlhom{PEJrcAU} zTuWAutC!MXG1T<*griCJlf(zah^^&ODlLREf-i1IYtut4HrsRa9M{%8{^TEyWK{}pCz{JeUbU6Rt&CtJNVIRi*kylTH{;F ztKDDV#VuZ5ph`PHzA0%X;sGwFz7j{5=t}8+3tOjM4o%*Hrn8$sKf8S#_0Q`1q1#YL zV_IGNT5W4RE&;8+=Itno#6%5|+|L(3g=a~qA< z88;8cHx_s1OM}Bg`x2g8$tuOGY_F6K7E3z~th4h2B9gUcxF!~>gq!v=f%tj!+M~;U z7Q{k{hq09H#g5wcZ}m}rHkh9Y*{p>hIeE6@FE>o~>|#X0%U^4E0;0bAK4as@+XMG7 zR;Ml>9@|Z{_&A_IG9br9c-#|X5=5G$eO#lVhR2f81*w=cikh3|Mto+msmnPrJ-s&k zUW@MYCSoUDO7@c9e)Uv|XPLH8pu2m0pqq>wN~~YBQrzb6ek9!!!3i$jC8`QSCQ;k^ zKk-?!+51nJZ%&JeH81VRm9rEg9NF_&uL?zaRzePIwOX`d3)LAOol4@fH_|QOaPMcK z@7GH#+O@_>h?a_$9aMKP2+4DZ~~G7xs4S&(bMg*Eyznr2jZYoeM}tOVl;-uG}T zmSxUza|qWqaA-cqLZ6$n*h^sA59aiBE6J;b=ap>Y}7Ui zuhf&O@3#?YWc?an7t&K3RCLheoUkjv1JQu(oX;E0RY#i=#soEmJHw?9KCdm?n(sb3|C=JkpCh zJA255%j-U9Lui5@+D3}as-UU)jCukV31<23SZ@<$M!CSXbb z9?!oKcE|&TQP*hmyye@_z$AIyQ6-*&v(@R?NWj;yAoQIS0Cwu z#wMYG2nc;F#izT-zSwByqGa9n&?+ZiResNe zOlHv-K&xh(gS@nI?4)VHiE`sj5!++?@?(#12y88sFU(M-JhABo!BYF>u8g`jnO z+$N2p{-$(dK&M!!XD_vp9sD&VGWmPX)hCQ$>pX{vobpFsnU6xJ3*`PEv6qYLgK;%c z;n{$-7|owbz9Ge`!k1MtnyDSQH8p=0Hxa`cbXK{hw1qQ6ie*_AaCFUx;FrjasR6fWn6BvIo=YehlX6 zI9+9ayxrd**WyYDlh;|wKefFY5(R%%AfR!6K`KXijGxD-C7B(lu&+&4cSB_vz8O1* zYl%h*6Rm#NWbBEI-^{;QNm2_N2DdqRNVS{%o2`9_ugbW?;NaUlB4o0MwS>h~z>Dhc z(zRTvl{WM~QdIMRX6hcc>4m@jv9!ZYxT=6d*>XVuA9~Lx{9m1b$q*w8&bflIt{>j zu=$&VZS)$yLQ0x0Aie2^HZRgGC9xhx;VR%T-YT8dGumIJxx_+j>7eq-yoM^X5fOJu zUA*;OA#sMTW~;F>F}~IWdy!})cwJP+N4o zEw91r)7b+c8mx`iWcTf631e@pl?{(l8?3%El+`~DG4LNnM0f&r&ofwJdM~^N6df8m z&Y9syN&0VI%pfR#M_n3QD82`7)OmTZ6lO8B#yWiNLbLrW7Y(*!`8ukygV|Vp-uxuR zw);i9iSJLGG>XDkI$kPsNP-nE8{*c5Okla#m}S4=HMO3FYqhGnyL3m=z`HRV1qp_6oSUowrl`6h*N|vgbM#9-3qgDQaPdG*sTfM7EMWt?UyGSkt z9nkKZnFcFDHPHC*o7~T{#c`>yR-U$DHewqM-9{udO*-uiuGP;o&NWeIQ|WsBZXW_( zW%u~mlRN5yQ)$0bk^5v+j!UzvEEZ5D=nn*}-P!6Pt|!_}>)h~rT%OzMRtSlBe#}2) zl^hzXvH&;cGHz&JO+ixYDdY0t&IiAS7UQ@w=w!EHW}xH8po`f!4xlm66Ir!7cCy#A z3Op5w6d|vrVhlXIPZfl;yM`SC>n0M)nolJmp~-VXf=Hqez$$r^K~36wT! z9e8XTF&8o%%n1+9G1~9f=uEF|RECyj6SX}d{K?_*C2i|ya6t}lu9;gV=)qesXx8n% z6E0+P_X#@rOD$Z19n`)>8gEN*fSdfFR$xJN62aF?FZopOMkdrKD%Gpo7Bu@4T3u|@ z>2uCzE_HSDE?qfCYlNoSRs1|285{RGO3vJoQi~IUF>S(_SV44~b+hW`!gyNVr`ngf z;5*PI36p9C*K`O%qj=pru(zi5t@+k+bOC`wR|NvT+`)W(B)YEYGgedY^>!{xp0Q25 zUeA`NOzb|KCOoGpNDe*QkZKYPUN3EllY4&-fA63V-)_+b2ojZK+ww!#C0I6DkD{8n z6iDP-z^mW@Ucu<{vY1Kr1XY;Vg7{8}GAA_k@dTTH91SS^R_9kA+jx7PFC(_?Oyc38 z%ST_t;|oE`8u^Tz@r|4C>^`~|YpszNy4bBkCh@6i*6)1$rC_J0&pSsWam>^S=w0cV z-*%O{{Yk;-*5a2qdD~L)fQ~$5)sJifbo;E@@f$X}5{M~{POOt%83Xj-ja~fB=n~tY zkpyur?IK8%7cHz2#`w_snL_f!FAL^pM80UY>qOSpv?`^e*)^cJ}_@zz9$W3C;TyfVi!SYqpUHiPu)-Il+?X686rt` z0TbJbbT(-|J+UU{a@&W+ z*J9W=3Pbp(#Atx$h+fUSaIbe3b+2iT&m>m&ig>P z!jdJ>aeZ?!mDHCX<`QtYIJNjVIPtNfjKMT>qVEeAU;H-pxS3Nj_KGNIEjDM{-QCUI z4tVPE`79UPD+Zqp=4lAGMP@JBrqYlTe{@KO9TyUhy>V7zVO!K<_J&r`9U4aS+c>DuR^M%=H$_WO47pH2$ant=R{ zYqlNqH~Kcv&Iy+@UU)ObX;<+UgN7%Su6+hPdb5|=4il|+@=;OaZQ)*SWsPT}A-KX} zHYP(~qb_}gzny8f*H)ZWq?-1{-snyP4+nNyGf(`$TO-mmA+C!Zpap#PQcbd6^js;U z@t9Ss^*8;BBsDGDE)lR-e#jBZtkA}!3~BO!y{Aoob1hzU!NN+`^1^O6jTT0J(|hBz zkbp%ZkWVJ)_=)sDU{v^7t)r2+TR^uSxkAQIspf~ zBA3-Z-Ym~4E6?P_5J<)^x;gpVWZ<4c=`jq-5T2^7cJ*7D=}KguGuZ>>#LA$rk^NFm z-c5d`6hm6kb9;G{)&=vb?Ripe%twHmj>Mr%Mw>Tca7pDMo587zO!sxg_U51#@%>#r z;#sZY!mh}ab4?x4a`R)b*%O!RyygklZP@+qz}bKz{A8em`_;@MAspKK4b8H==9XJ+NMrfG40Ho-*>pP=3TK(!H~ zJJ*Ir=&|LTNVJLJsx|Qw_OUaBH``B>rha_f6Aa+dMu|{n_xrCW<4+|e`mt8ne;hk& zNqwEN6rs`SMecpC$PO_v3C+EqCmJ0-F3>dCF!TPXknxRe<~EL9CcksG(d=@4yRThH zU};~^5ZD6mcH7rrm;wIOy{c!%86-rr@46G&#b2y%h00RZliUBE905nc%&62`WC z9%AEhD+Y094?G$RS==C)NKx?BxCnNeHdfg6@@CC1HlSt^-W`k?_Dig0EAn9kHNh zW6y;S=23a|;0T>9P!TIZ|BN5Qx%{WB-D`iC8kCEP&nD)QPM1n2wG?*KQZUvwK-w(fDEY>!4Uy*~DAc+4TIz zV-M)|pscCV^oZ~V(p(T}dR(2Lg^2uSamFyh!Mqgq6+ma?{cr)(a5cV=Q{j#+!tV2- zJUr30eOJXkALMwgcM`?2nPGAH8aC`)VdXV?^Efq9L#F@If!l6T&`1Ty0*$RKR-74) zcugCD9{%80*m;y5IR}8OZ@(iS|Eh@YU0e^`t*dTuNExPBqv&GdwJj$R=5WieHnHM9vFih|$YJVK1fgjk1oHlV{1DJ4 zDoV|ZCQ+Q|CWl1A0Q>p+z1ip(IgK-r1L@e0s=xXsScNo3Hc zUw|i;@H|0cbrd$oBD*RnW`9ocjpx=$&agu>VKb8firzKFe4W~C^4jwB;A4`~Qlrv% zkX_E3Ip-&SqcDN)YU1nnGrS;nE1x<|Dt$~&3Vw-d|Ko-hpET%cV#8F{S9%wg+0hRJ ztj~3bXh&?9xtGmy0`qih;oP4Bsm$jHkc3ZYu+w^OH5Jt62^m^Ge9pQY!zl!cN0#NK zq^)=9iUcMjHK47aNprq)^Xnssc2T&hLPR{R0t6$hBJzPvk4QNa)=)Ekon@t@a;gv; zrU$TR!a^O^E2gvFA!w>gBH7Boj&K(Eh+`fLG3Ec#o|I+l=$HJ+rjH9ZS1M*K<50uu zem6u(s{-?8KJyY2Pv?r|e)a8P6G8O{H0AUzKe54aH)~>e#ze}&_qz%56iTiobS$^W zchSxWw{%#AzV$^$5jYx0@}~vZF;lKva_bntfV;OFG%As*1asLG$tx%8OxU|)tNtPR z+MCv0YT2)tP?)Bt1WH+N#kSxF1SH;HDGl9VRqDZUUr)iF$iQ=)*`{1}po>B@%JyKl zf7cK}C1MdbpaE5?zSXTnV&*D7W9(+crv8J#VxiS_jO8|#gv>4UqUPT2()4nH#3Mu5 z)SJEtOX8ddvLX)LY?PhM`^MZZp_qx^*&0X*4;&2^6C&f5<~X*S41DavSkZG9bR5Ax z9}n3uNv3|*a`EyW5@V_WISry^tqJx%9wR5qe{iwZ5%=$ajo!ZP^_mt7#Hj48z0ht` zq9R{q`WXu{*Z|IHN7HIVqOjfK_OG^7O}{fJx0jUmDI)_!)C~_Rk2^ zKCVE?qMvwMKA$YTD+wq02mTJ|cue63(@4g1e_z!0aRF-Fxp~>v+nruTj03w5@egQ0 zwabq$UpDWgKX{{zNEwBB@`Q^FQLdw^DGLRJLlsd3QWCZ=s6{WiJ0FqtKv7gu3a(?7 z`fE~KQ(=~dk;!OFJM=u$jbd_La=UdlhlxH?7dOeQB*4>^ZD}W_m;QpB8yveul?HhD ziseh@iQH4}D-&(RExsuv--+3-%4kx5ri2Tel=$&JLVT5+vJJ-Xjq^B%0LV^0O|(gw zzCA9Mr3j0V116=SAV3xGjEY--R(>jQ6}STF(++Q~>He`^}DeMg)%N%C%VQ zhR#x`BdFxPpX7}s747ux^IWN_WQt{fxI~LV_9%uPk zE3X>OD-beJYL^hnXQE3C^kZ7_gk>mSImd;4!&c!Z7yXjn?e{Hnb-0j<>5Zl5f^CHe z_3k(|=sb03P)nj>6-oTh9GM49JSWbTeb}7Rw5&2-6sM*#wN4?-OiW(tP|&paMQ)BF zu$}3t18JHvT97dI(O6Q+1LQC28<0Fka^$ABhT1;H?!E~y=&KPfJ?xcD;Q_|jJq1|t z=SP^-nzwI*?ZSHzMOW%0(X1ULsUpLidx8SbqT*L7=@-lp57K>3{lWL5K|v=TC@r6! z4!kAp*!EY%B}W8ZbiC@7MhV*@q|@UXA5mv?*{#Dtj{wE2^u)YIcBImYwm`cR|3NI| zY+bgZAxyY=EF0vbQK6QimV@rtb+aep@SkC;Z1;{BW`|&+h)0HM;Y>b`B&uDEQj3uW z3CHIcPSqTGKL-80d5mD$koj=y-@R2YD(j*0ql0-IERIE(gxzSE2+q06@d2C?q?mj) z#AZA7*rr-@XR?Ps;o`4`u|i$6vY_)#5mb?W?b!#^+<|7`s3sIx`dyTpDVzeWs*3>h zo#grWD`LR{p6=1Mp@Y4mN2Yp+SC;2CC5$owGBfFQktE`j5TT-l=O+}nncaI150F)vm+_T;U*zhw zvuA%B)c%%YCh81BgsOb+Jtaw$ilZ_0&Z7bR0V&w%5jx!IExGNBCq<{)DBX5Kpf5<` zGqY0w)VidfDo@aH)hUb;3HHlYkY$w8YZeXKB)|}Xngx%G?*myE945(P1|r&Gixv;E zRV5vb!l%H^IXvS(bY!0}^Dt4x9^Jw_5C@hPDJnIi=!~TiLUztCOrGg^MAF`tp|&3h zBny90r|%0VR@vwYa`Co}glxmmRIK9HKXi$Yx8lO%>qZ^+j<38wmZsqABsF- z;0zfbGeT&-@53+|wUwwXd8oRb#VOo?-PMg=cy( zcZasw^u-kX4A4Y0$nLDtI_;{hs8+R+d3+p1CU>Xu&N&!tlN>MRl2`+)XW^vR?jpYd zcr&4PeIy%Ehy)gq9QSok`jDKQjyZ)Gn|?LB@;6!Y!Oj64_r{X$)R9PhVT~&Cj;>;> zDYl|lb-|AW zy`yd;r2K-ZBc-&N2*Bbyjx|dcWw0yUt%yzp=G~yi-AB}yfGmVI0{V0uQLoo!%Nb4; zY|6`@WK&y9wkV6j4ucNN3MV)y5 z?2a~Ze0o|){OEsedt=Wqaz#Y{2)CI83KdfM8wmXL6FgsVMnCjv7bWyI*4xD1o(?l8)1%Bf|>TbwM zQ%eke@9N&G zah$(qrm-m2(ut>1RU-9o@`cX1$n^<2>htuhg=PBWbN2DGE=F35;JB8y2FZTl&BKa@ z8K)PN6yM&miN3GRe^RhWx*K)63bEu5*+m}!HgI**nNhK*RyopSz60>)qnTpNoH!h- zB?gsd$A;{hAVlwV)0oQ=q4d78IiZPqmbkHn6Qjm9uI}R$8h;kkFH$FZSK#&iQ~0n+ z0Q<}2qAdCxCR!AB?y$csvUc=OF-}RfQEGT1l477GG5jtwYZ{KU@^c3;oRb_M4Hcy! zAm&>9EPVG%H|~hz`i$17i#9>vtwbBs9S@ZBBagMbjhJ$^V zBq+N=Ej1tSfb730xE9fgXH<$Mb$}7hD#c%@Ka8`pX(WiC9G&YRcrTndKwse&y&`6P z;Eb*J=FOC3+FDvt`(ly_`!Xf+-n|~}qB<-<03-ApKUPVx1KL3dN%v-|J?vg71F#9N zPrL7=SRy6JO*pU)16c?^_5fv5Ko?gV8vU0og+G%?;?l`jF1&knAc# ze`4mKRXLbR9VoHV8}Ed6%Tu!4%V`+y#x8zZG4vQME)o|;(SqLcb&fvy4Nb3<%b9BA z(*QOX?~xwheVJYbs3|~?K41prh4x#~-V?4;jGgTKL6l7~HJ1iH@BWCq@_PO8*Ixwx=qa%QW5Yt(n`xO&a!kgMskR%L8D1uMUiAYuO5vWo_|6_@J)Z$_iy;DAdd?x z6fR8JnHLj}26faOZ(sVwQ{p*;2uWdbZhR7y(MgP!$bOZ$FIkw}hT)0a1eM9~t7Kpr zR|)%dV~Vg23LXS%-=+Ayj(+hSP>?7_X>&S~0+_ZS^Tm+7SZau&F{K}NBPasrZ{qMO zi~Xr^^)fNTh<@c%FYe{5rLp^kf+Et#tAT;QfTK@OT*MaXKf8@WFh(3Sn{cb~kX824 zCooQG-RCW$eJKH(h zEhj6d)q&dwLB-KJwq-%kqJ->;*q+k}L!UZoHbr7f00xcn7PoYODx-nhNUVJHtW5Nd;V{szpea^#UFE33-89t>J1bHO=!B zwgP_w39P>X6s`=^@^q7{%0%1#4sLGkxc2;;@%~_cL68A0TPf1!!p1T~|JmZLJCOG( zheZ>Cei)@ey?72If;zbPRAYMiu|lvI03>KOE|lnKV{e-kH$j<2ts%yhMxo|FLybKL zob%{FRb8_koUT_5domB*{mDaf@2uC?xlkay5!@(L=~m}R88RAri3=YgKtUKW$b!1V ze!FxaTGifgwKMc_jxij+xfg~(KXg{qj>VSI<`cyKrGHis9R*W$m3Eh^4ej%h7&|BG zF3ZO_Lr`zG&u5NDd@qzKr}jSWfpe1WtJX$f(=b}zr7IX*6$De&Dp5w^x#*nuBL~0j!=;6OZo5i@YO0RW7tv5DPbP75 z4%k9_rU6e%8@A}Xt1y*uE|OMLz?#hfx4x|ktA)H@rGN#(_h0&Y1ycZ=K4KXi6C)j= zeoNd#t#%95stw4YwrYWJdNQ*UcC@FxYWfqn>gFESeJzrM#Gk|Bzh*clS7JAf_W6=M zUXa4%D1_t0a()?#ZO=KtO%yYG*WIR??c?L6ab}zYj%%&77T$m!DlI? z-;2JjI#_u;rhOQ5+P%`pK-UZsYy^XngAM!q|6r@NO1eks4o4Zm^_Nk#}(@H@Aci&?cZF);B zljQ*GI%KHX!P~>Kc2E9A55wTQrKPrY1Ph_MWu)S1CO?{C7@t+5c$+??{bl$Zi<_Ni z2m9|aXY3|@18x^<+e90}&&2m%0xfK#5Js=I>OBV8YeeokR`<0+rL=Rx)*)?))MDUA zH%1T0-sVZrDv=AsF&^Jl=GK*0VOOCq3>D~|pwObo=p8x}T8SZ`UI?JSFM}NXha+JY zuHs`Osi~?l(E5mpmROCMRO>ElroksNXM2o<>2$XVl8HM>YvP8>k*b=INnl$2)pwyL z>HFn}S2xUk(3WgVlNWDvB=ZTQaww$i=d4qg5Fvu_W`oa!_>_7%!=pFa0Ywl`bdI!i zSr7ZT&GeNN)&e)=G$Wq^{O6RO%x0lc8=#d!LhZ3~vHH>BoYHI!O7G|=FS^obti$ak z+l$z)?J5^}mAi*RDeuA@6FN^+ifD!VJyPp zCv>nt%qu6^4Qm!OCmQ`h5kdSF<+71lxC7y`ovQf_yyPbTew>mro{ItleXl*tV~i&rtqiG&`7K{Zv|8O!z}EF5^zfii zn2M;@=4!IRSWlNZ?|MLh)i*;HRgB~FP3!g6IluE?Vr$erY8G6s^Mp))$1KZ2_K@Pqbg? z5vMLQyX{i;Xem^8q5=BwOMhZX{0er^JkHyA+W%16Dgmxke2m6iI83QO7s4_`KvNJ* zxM+o~0)Izm_S>mO3ZH|^+1{{Xy3u&9FuSYHEEP+1n%VCH=TL?>+fMvFIrn&iBm&|; zX2I{@vi$W z{gdBQiRi)WSlkmR_NuFCWO=>%q>uN2K8j#V^Vwbg=Ann zbGCNh_XhxHaKb&eOy>Rfq5%3;ce2y30)_P^u(DDJY87UIm0m9+2L<`u=*?bU7^N7< zn1^Hm>(N)ubT3Tel5$khqaheS&m+UpEPhHTt_WqG%pm4O{>CZ-{ULl8imJb}8>W_X zjADcXU!Vt3D2RyY%1Iie4w97^sV8;NT$(U!7k*Pfmzk5Y+%*ELF4Kddh0vE_YaNRd zds#68)wL9;4_tzm?-{|SUKyAnu&7BeD+jy+R3RzoRC2dppg(VIVQ+0>+a6v5{nCZS zkUl`H=?JkTXm_f1k)8<(lA(Y4V?oFutH=NI#XJqf4y^-ct^aqDfB&T?j~YTOF=UWa z@qc3r8Ka))&(B+iq*M;E@c*kAVX+WmG*U00y8olJ|0c>8MF{99q2sMA`hO6Egdale z0QjbzJXij8@~jN8`sUK*!oOE$IOJN3gtl+icwxk&0CJS+0zknX&lP!nW4=L{UH`_jBF2dh+7piOom`}M+-5!oR%$rYMQQAq{ zJHS5e**9Iq{O9*spwsIK8$oxnJKj?p4F;RVZ3~Yx{?+KMT&ntFx_tITY7T*vzx&Y76~+h`+Lx z{}o#h1oRkRuDV&%9-Q^~huFo!yw!XV5d|O`_$v$8*CS`54kF-w)_v9?LiuT)XXs7q ze%300^`h&hO@HC>xWjOxRllnPoL%MBHs~g(fA3P+QRimJ*mhw%$$HVDxdGu^m;K`I z4!(Gpuvj{~p6#;t`AX1_lO9KR=OD z!?&I}&GWM(_y5_GwvH%_B4`;rR!yg?%o5ZR=t1$?`5A^kGdG_53jqH7k<`il^a2Z* z0t9C*fBh>L`mY1MQ!;CQ2$Ry$DZ;_Wmy5#7o5>Y>87VvR`Co4Gf%1z<(un*tfVR@8 z*yT3r=WU?NmEDrMv>OrgI1zWe=R?jvwyFN%?mfW2v4W_^m5-OBu%tw*w|5Sx0`pTx z<9`Z7J(yDd)5LrOt=Ew4%tEg(%B{8QW_5yqm@&mi9cedG#(2fm^zc|}>n|(X{}N7y z0Ls;Ymo6F&8C)}vae9$04xR%V&rU1p|@}ChXz%LCFlh*vboJ?DA8Ng*|u^c#_`Tr#}{3E#L zDq$LVmf@zI>~+SFkC3V~|E>*Kr)}qzp|ee8?E?9j!Op02m2K82u}=dT{}~J)nvqI^ z3gG+aAzJzVziCAwcZo4RNA>MTHP2)UWHFd#yDLvsU59zo){sKP4lI`~aNvk^honv# zOZ`2z@#Np0*x>;(KMPwWnYIE-sHwLGOjrLWP5iwG2omi9@vzCq^dJOOa(g%+;UwV= zAL@1`{jwU}J2r@u6_pTSkW!ZsSE}p4715rs_ej;RuqE0U8J%*m zNJmt{AYuk=T8Dm&-%P_P%~(b9Ftw&y#RC1!Vu0CulX5L#nU%6%R!n;vRT72ziQ&}d z3M(rAl3Ru-fd6bOH>g*Qkx?nX_~eeOJ)565wqd{OMdQCD&@BflBOqI*N{gR($m;GI}mdJ*uu|Ca(S!%-<&wlZz|M_-sE$s$mYUJ?$OR)tT%uOr zDB$$Oi~V)SkoM#e{9)()Uw|B}?odVaspZs`ZJ%ZR2AT6vG#O{t4N^kv@=o=CSWE!* zUsyw;CLIQcey^_HQG`iSm>c$=Ex3dq>fCgKiNW9BWfum;U*nsi4T&P`(bfYW2t5rQ zH8x&9>@$vAyO-C`o*q5)xAJZtmLJWahD@vyROpajQrE`^k62NNh4h|lOo~F1mfqdF zJla3nzDT>Y$?Nc_9O~OSEOP$~1GfBCHZi7hEJ`z#nITtT0ZK^rzm)QCAThuR$)Wg@ zTI(&!kvd(kuDx-mN-^RT@M?eIVn5{!*;MurIKz}%{&lx_mYKnw$pw{2&SsOX`i~_g z?UxML>+J^89JcWz`{*N=_J_H9zTC&gc7Un$j*OGVoZZa0$RB^Orx?;4T|n7Vz4cA& zO7dS_Y<>B=poD4;$$wn_#Tx%ep8s^j>OGqaOlvXn&|>GJUM@ zl4yf-OCnx1L^`w!f?3&2&47-HEp>7{c}w(dQ$pV=!2g%N=HUN^gv-7411_?2tFlb9 zGiTqTUuLo@Dz(8OA)kZbK{f!ThJQHjf1qp!bcVFec?@qNrfcymoB$%k#7$)g^{C{k{V|<+3`ac|_anh!- zZQHilSdALnjcr>K+qP}nNu$P2{?onBKF{x*=X}TI<3W*Ui13=A1mnN&Dha zFc+tmLP%fXGsnS}b#h06gWFkL6pZ@#03%{bIt+L;x7S>gMi!kP%J(NO;}GAB$fUr{ zp>Q-EH=&`9c0 zGRNwkJHJ-qVlPkZJsSaL*0493Cz}k!it=M!A}uNZ3Gq_hmcW~DD{NAfZQn+dRaxfW zml@}+{(t`{1u$q-jkFpuKG`(dKCUfTVaFG6xZ^}F$)7?yCxLAT{AJl+#OyOrSJkaW zTT8_5u+@h1Ki_Qz8$3;A`e1JOQ?Ke$0U_0kL?b|N&|M+w?>La93wdtuJOVS)|J;Ya zPiD8^&yR^$qhCa+T_&A&wvJbq#?H*#VaMjUqza#Wua3_mnfSi@O`FerY1Tkr4^DO%B z$M`F#;0XK_JDbvnd9@m{u{Rnz-Jd@scV~gXQ&6PGxq~nxdG}gUdLnYGWU-jm0Kg;EfD}l7EZ*5RFcbddDrOu9t|bi$8cW!uzUMpUZ{wC3eoELGn94AbH>cojD)KF#VB?O8;+|#4f4M_UyI~F#hmM``OZks&d5_PA|ku%^M4hi z%_tONVM&;w*~8bjU+e|@A?@w@1)OY~8B5FbTG$Q=2BuIsIKi-kCDaTGc{Xkg%onT8 z10~Q4XoGtmrCaJJ2I92hsBg9fus5HqPY&@*h$g?p&7}i>=PK47TA>skAN~awyj;uR z0UK_7gJL1MH%So@df9SQFlm<(sHOL_9M={k8`5w6jyDCrOjWlP?a25NtZHH zm*xJi5|jru$IeW9y{(pzT2Ri~E)4#{_T0dR3O zEC`>l!c!~Zg+|OvM*bPz#Ax3jfwnqqKheo%bcJ>*DO@X4xx`z%1>qEdQ~?s~PfgyH z($qgiE`S%{P~qM2e6Y9~7?RIe6HhYEE+gjLspIo8O6uggxttPDlTGGBncUDK!1d(B z(8_8FpSg0sDkFLBQFVm~anoov0tm}okJM0qsNs0a;5>|tR z#*glkP0&(&_hfG|d}Rw@<05*;sk2i5z+&)<`75a*qZH2}BFc}KT|)y;9uLaAnrI?+XvWb^m@|76(o2Y!+|phVl)z^~DK4+uutJdX9bV>u@5&IIi{jN>hzpe1V-m znZ2j24a|%W^-_JC&~pIJDGD4yPx?*c*y+q(b^7y%vB2_)lF~OKe6ooogq8IDO*$9p z@WBb*MJHWAzQ}`KTSyFLbf1+~v%AaVJ2xG-Vxje;!Xym$gb1SQyfPR4ru6fC`Al$Y zR19)<_uC?Vew?@5n{u3ka!jvHjAq~61GS>!sr#iqDuv(Ts5qsA$x5XOs^3=9tsl!t z@K$DctVKDcchkSFoNu2$@Tn~rkp2`}JQe>`n}1zKAjDZdtWA!u|3E!ajPBgmX}9h$ zTkYM)#dzOm5W{T{laFSganvL6_Ssnjv$hT;^2%b|)qJf!^47{5b#a882_~q|!9d{F z(U$#gHiDH2ZFf?bAq($4vjAf0T|&!3B;)eJkvoq+j|zv_;be|uqtjfZ!0<5Z*|uIg z|6P(TBT6Q_pVeMlKbmVIzx3vreR1b(pML+BbFy@{{uCl-n{4Hpo0d-uWJ$QON|V8C zqh#6JA_XqNTuq_6U=c;JU9tB1GfyTG_9o%GasRRgK!YX;wFP>{ggL%5rov6@AZj>$ z!e)Z-B8yM=??`wLOoYK}N)+^deREF0fYLMfUp-lOWOq?|SJIagYJ+CDSS!x-1I<4X z?Rsmf`$$c+YY`RnVlsMSmEe*@CSI@tYSeA#tw68?3P>$Zq^~F^kWN*3=tRMB$~`x6 zs16d{&6gN>9fXD4@UqDXUp1}y#OgV}l0`dq$@dz-A7Z4^9UnN^3~SO(#f4K%@94h) z)+>-ZJ~XUPTq{J_EVYgn3Ed9RkH&xvVQl0IHsM((HrQx^J@KqZ=4a0c@eB7W0Wkhv z&8v;1%Y#nq2Z=T^6liqYwpz2LE!&Xs*dB(s2eDE<(|&kmjhPwskg3ARS7g#^X?G}G z;-#jJdZ8=(Ch_AXe*pqQa@bfdda4iF#B)A?d$L&h-{oKgbZB$Nk^W@30S2S-zQ8SNpn z?9el)5Riz2r``h)C!7A%txiXJ*XK!>00#l`_V9}RXdP)lxX)Ki_@b$ueGqOA2#Zas zrP+*%t+8y^rmTy*kVp30vih0ha)?wMepxyThgeOQ`L*qyMz*8#7_ z!Rowm86K5kog?0Z#F*_-V=g+DAmAp_=E7< zzy^hOi1u?=PY>Hy9@nZ&J!nj*d=s6~eEGxmSUygJwEBnegtmvU`mRiY+=m}6U1P`? zr|B*7iQX>63K33^ir3nb!(quoD3bdVSgV>bh|rs6C*b6R3)9gxJ|E46$)=Twn6~Y_ z0gYTNROsAfE3547d=4VJlH<4E#Z4~afnMoLygJg-V=;MohOlsO ziBJf)^yX~zZ-4iN{{}D2hc7?6j<06FP-?nEI{FXx%CwMPlW{T@7h#>f2ObE%+t|3` zbA1`8MJP$NKDMn`LAe-l`%Uj?+9f_+-G)&z#<#)(Cs#!{ixRZKXcrH_RJw&1^nMw{ zBa+oHm3jSKuRMiulsWjwJDm7|elbJwB(RkGf|+>&vXamS8T5vU8g?AzJh3{W7h;Hk zXo@|jvXDqz#DR2b5CrHEWcln)dY&WPYlPUp@%8dmD`@*0dDp!YqOi{zHO;>Np&UP?pUAV17d-b98QRjE&yzdu__y;y3Gwt&)l5^-zhDtd%zu9mU zBR*rGI@N`?+Yfj)Hw-KT_QKf`Oy3)-;2$wLr%PmxSsjvd zXu$r4&$3%3w3@x6a}j6AAz0Ww?IZUH(vq{VVse1!x~=HwJ4qO?W`1PYf`I}7jx4Ev z6FRX9b2R11*|wl*Pv|Ifb}Bye+tP``%~q`)@<>=2qdiz;|=hx2gs>){vLuo%Qd zO0x?f(MB2*$IWZSqo#~Nc(dm+nBF@d@2CG*)L_Ybe0|~H_{4m|p9C(T@IRWjC<`!0 z+wwIua%I#_VY(-2($T8S_8_w;L6jq*vu7Zz@(Ncs^?K!?PtXC*-TkqG>#ssRb^wZ@ zPC4mSjD+QB+S5|wI)u-37?2zhYx~Jr`jSQ_`DIixQ3QGj?W3Kf!_R$$wSt~zP0=cv z6dGz|Dc58g12JQbOP5#>0X$~XBH1(Twcy9t8WFgD`v*SeP>ri$?Lar3=trnHTAMa9G=XWU(Pmk&D+SxWjMa4)s7|CM5*BrDBx6liO zbtNxG_qpFE#%4lG|NQz5pNqdioZw5WT)q+6FlWVNtV5q1y=BT|5fZR2xNBP?bXxEV zmR;?tMf?@|=ReLt{HrVgB!8VfI%x%VC)+3L|L#=(s+6jeK@c3*eP8bmZT6i3IC+Yh zyX9U&FT)2FQ+?LPOL8WV&bBZN6B9tENo>Ih^}aWZjm^Xs;aqIxFKYndFJCh(n}}NI zBGXDWhizk5{u75YGx@k9gWm^q#}8=yu>ohY*9uK2!&a9*-FxA&OozUS@;!=-0(5>1 zEp0}AMEhL;y0|KoiXIhp!ALODdcpk{Yl_Prkj#@k->WGnvl$@h&~44 z{x0v^0UP2*LZS3Z!>6ynYAT+QySnY&5X$#t|Db3AMFDLbW*w3onn4OFd_vh>jJ|UP zAvz}v_VmI*d>uROqXT$jhnv;!`9;T46JlLV^vs(xh9soI46Sy!;&T(qVl8K;vyg7rWYNOCKk+L;VU#6 z&psKDUzWAL^+ad1{PyE!sTRx}a3KmIF1>{gJw4VJb+0&vA3qkG^(zCt$W3OYFM)rI zd2w{`EHW;wOB!p@288%_U>4mSP3}BawJrZZ7$zy(|Fh~*@B=>$Lc+YME40ILbmj}Y z;rL{6IQX`TJs~U>4H`<%4E-5P3k=cOr2pjbo$bAhuh_^PLw)w=bq{{wwz{)eZ#i+{ z(1f%G1)Y3HsZF-NYnt+O!4R>VLxR18^>Ze+3#+FtAc*C|;a);|BfhHigyVOR{~LP+ z&vJf+d@*!PDgoB^r}pZ-A?Eb1iioR<-a~vEDgxkLk-KSgTv&Bfrn@aUiXw_ov8nrH z$+N3ea{3lC7&tW2Ro&XcMKtozq$}ty{O^FH5)KSF?fpQERM&6T@IML?DG-Qe>etLj zd4gPQL5Dzh@YuXk;+$M8FwRZA; zJd1DMuvf5L0GO0_6EH`7t{2f!gmXG(v879Ani$A6A9h8ui`^}j`$BZUSlvf^#Ky(a z$VrGrvBXed(2p>|If8LBLeqXQ=&W#_wW*qx+x5!j5iRUfpAW=T-63+mo8~f ze~djFmuLQl2pYm=&&8n;Dp&bY>Gfp>8#kBR!Kq=1atJ@YL6RuFfs^2R

y1*Uu4| zzx5gOB*Ls|?aD(Ky5nY;o-$^}RWGcZ$PF2B<$FVn*yKuQ*56_Qfdyc(-4~fmE)Es` zn=9IV*}xbdtH&UI`oCzk>S&OUu~ex08sui{*=ow`=Pe5yoV}RE0cf{(aUqda-j3$ie*5%-5Owap@oyVO$;u-4>akkiGqu z=w8+!bPh<=jH?sVvmiXoLyEs7O&dslT(Zbk^V-uwuPNOzOc3@tNk!iXe$ELIa{I0VZsNA zd878qy*+2e6Kum{eJOhb=wukI#Ff3yOPRhI-y{ZfDw#P~2Gde~my(>=udhcvcXd1t z6_V7ffH{2*vpgQGfJ`SU?hj0N+%4-jP%a!{K&F;^&gd_^vlqwi@+qkp7k%Uhe>_PP z=WAv9fZjz_-bcPke}j*y`GM(WjDwkOS@1^}ZuHUi1p0$SqwGK!XMu)@57l$*Ax@tm z8`Kco-$eviyZWzmfF8x$FNbcb&r;^#=i!bbU{A!~b}x$4ca}euSR|X+t=gx8i{s>5 zd}B91aq~p)OcT-AR2(*Aa?aM9$K{{x61aw zq5?5Ig*l+gvW3Pgu>ZjoO9lQ=`WjhzEN5$3JUhR(t51&q$~gbRB!NKCviMIZ)r)B# zgtqud7;1P6v1PY8n*H1@c$5C?{UD$C|P*3>~BC9&<%JAbVcQFqSttxwlpv zsgveFUiPOC;z18ge2K)3gXMnYEOslwy5!WYpv|CMjP$BD&D%DH1D-ZNTy?k?p2+EC zUy9jr)k)L`>GVrVU~yp2aLpDt3hlzm#O0ZhRTlHWnqQ`L&eU(tJOt~QskozI%;~9S zzyiT>7)$wVzFBovR1xzT)baYiDJMwC=$gH zSmZl>xsP&fnDf}lv}#N9LJL{1uca#zk@_n#sM<#H7rwYyV|*bBE-r@-ct2v_Qz}Ht zLcYLmFMyADOT}A*s`ZCM$a0nMBwT}4DXOAkz;JBUw%IpeDIgqz^^Ea08bIs{fuml* zV>Y=wY>#rn$Y&77Qn~~()n4~aE+^9Up{Uz!87@|youDBJ?*MJv24QJDEyCxw7V|s@ zKX2mUGY)XuC^)q8v9d`w{J=>1n_`Rdg3r=`Dha~V2tJKJO`R?BUlhIV|Dfo%i%A8H z%!TB_B1FTY#XZ)Me2*3fdl*emA8Ki=0(s`0ngiGn`O4u(#pjFxmlJneplu5_=eAa< zIWLvH@K5+GxN<_bg9}!4T*j}dOlFXIjC_8>51E

@ygSbjH2IZ`aR^Oaz`FI1DeB zDy9KF?Nhd_D`F(M|B}CV3y&HOpGHFF@+B;Ai!PiVIN@bu$^y=hn_eTr1d4XS9%tdI>jcCaG z02Jw&x2g4FEK9_X zZxxei<&!u#=1z}mBIoH#C|<)Z4+a28ODOX6BLBuxBax#muu9E<1aDMfDh!HZJvRh}vf3QmlLk`0}FeH0Z@H+g{u0PCBXBmCh= zByLswV|nCs+9UR9EocUVED0JSCs+1!;C+cyT_MfNN7{3;Cpftxy-17>5}}9bb6KW@ zGAk72Y+9cp8$1e6=iS5C*vezysd69iSZvyTwc84wy{4jP9Qu6m!J!WDJ-1N1R_(>- z_P6JQ61=K6_&4OJz>}3$0o!&e=zL#35r}C$TSR+n{8Mke#AlsE(**#{QA8~FylLsV z%gma0qL7CZS|0q6kyS?E!-3H2Ty@q_NC^>#4sbBK;2Hp!SM8OTqGo9Wgs%Q!7rJEs z#Mxa(jpbmE=DMP3J}2Ad`v1fC|HT5G5$hMBNA+O^r)^YQ*6kYG_d2%Vw;2+r zH&QdVUzU}avXTlA?_h-xObKwMQOAT0n&TA4+#aXEqRuwA%;1};MD0}8^8Ln4-@%a> zM0C9HF3JZ`K}jxtSRENYS=@MH2AV~%E!Rvv%aF66h`kmrGkPHf^k=q>n?GjwZp@~~ zSR9q$AxXtUv2IaLge0-=z;J;YHG`qRsbUJ7M7VANT7A(+5hPkv7tCO=w)KLUf9)TV zj~*b0(NoaIs3UvLeVW0J!1=w%DZ03J?J_!MtF0Yq7RVfvSJK`ps{e}kXo`Lwi;{T} zqn|Bc6Qp_0#L_Om@`$1oZ^eF+1%~x3>9`2Br$JaHIgNjU?;WJQY@wfc7-UrFf4tIA zl_C*}%>aaR;joI11lv6rai~}xgZ0Zd$N6cY^3{0g$l3TME3G^v6Z@BS-bUk07vHBi z)PnRQJ<@l7T}CW0!I?Ky{s6v+;+yCjj=D%Mgye`^5`oAh@3K|ElPq5b{FSlr%pHDg zrzLX0bUN^z?%(-J+a4czlAnT2{Y-c8Is8sCKH?SGB}gKUe%E(JTf9O2B{;4j@heB4758vg)th#%X|%;mQSqGPSo~ zt_ds@ zVBDVexO&>KIy}`ePF_9$`P--IJ`T!Tk`@R>BEm?&cDA#cXnyMtozAs;a3W8<1ayzN zM8<;Y2!JNC?_|PI&RlQi>%AcON1N>`K(KCGkwcWHK(ltt{beL1sVUZ)0@*w$sMmbE zb7W%8d6ZhbXgnTB!=p@F=zlA@hA+tarOa%=lcz~%<5H3_^J+|4A7(6bMsj67jlqT0 zU`%j4Xvmb(J*)1zQw$`0mZ_8uIPYWSA6 z-H*H55y(d+Uh$l)7vFy}T}&Bd@``d^XR}U?bSE7Ri=u#+;q-_UEu~NQv_O@Vtm?~7 zvmQ^N_#Y>gXq3GXig}~t*_PU${ls9yO%Dw5L*?;tRTq^ zG^+9A7DILC5`HK)bSs7(hzYqw(H|agVJ@@bST3}2-W6tl6Z;o33lt(EUUIn66d)o- zYux7j-EIC&ObXzVZ%dK0h_jiHk9g?>3n{E46%sJIou*P9shw%_aW<L!=lmnjmxU}Zk@!&7P7fq%Ip1K zM0l2D7dkOHXtmHuw~uFG_q^ia-43 zMYen&7&sb;s2P4ZJUru1m!*Jr1as$ECuU8Da%fzlWj9znxTiMmfVXt^vDykTpqgb1 zheI7Vv3Jswrd-BmUKY=KI*jw5KgxD)-G{IHGrIW2_iyZvZ{;VfSCD;~akEv)2z0cb zJn}Fc;gl81WJi{Cz|3QZs^?fJySw-iZpDyChNEb;*-q`OQ>cs`KCjjRgnhJ-V|UMi zqn8BQ0#7DdyMN-xx5!oc_Jt@CKwDZhv55qV{gOgJTUzUAv%&l#a*Ii&D0R4@2y+*RO)4h#fHkS=cUk zSE@boZ;@J5D?`o#9@f1YIJ3)-{>B|_8DQqkp~ch3)6!P9`TyyHVgn9ISOIQI~<(rz*{-pUp2)z#Cl>dsqFtg zn38#`I@7wHQsl3yH2EEKFd^6sY2git~UOOG~2c$3Pr#IG!q$^suFS*|3hrVfd#8j z{wNz#4@FS*W_4ag8UA?~I7jE}Qdg+XaL4{=$PM)d?kR1$;n}q}Hg^Je?59&I7%}`G zcE&a@cQ(m;m5yAAx{-MFnc!T45oxPPFX9NEEqBnGVEIgQ>J7mLO{LJ0{x>@T8W0GP zgtjczk(kp^A^sObJ|2AgiLQ*qo_qpP9$0huel?rUKWB>H`l14_5dx0&I4?ArqZ>!K z;Yy6fh&$Ba&OE!&N|I8f3JaklGYwO~3Btf<%4Hu<$qkHDC1Zk415+=Fe8d3>kBeG!&_fo^E z;_HzEPiC7V%jV$A88MP$%i5{ijwfHoJ*$~FOS0vaM>jacq?b8h*+-YsT9^i*>HG3v*vKV|EsX6sQ9Z)%dBf^`xRqInxRJzN3g_*Z5 z^J&1kuYlBBIc3mNY`3Tu6S3c20*4K3vYRgCwPg6jJP$QI>>$(0M_OnHC)C0|X?^Ai zr}|#Ey0eg{q4V$j)cgmBt3f@k-d5Cv2-XqB#bJZ}Z}#v7MuC3E?TqaLeOb2B`o_nk z?fJ&;4FLdY@EpCytsQyVU<6~>KkRTSJ_dB5&e?8Fbj=B_xpmqk1aD!wSURXb&waGh zEIBqy$-!~%fDj)V9_NjV8h1}fbj`w1hLpNbyN>HzvYAJ&o7wVB8co!EYd+U--@bBo6s8v!%Xfe)jNBuPXRJ27rj z+g{;puscQf(OY2VdUiLg9Y1^~ZhB*&;GA=bYq3J0^k7JPrlIV&e$5!w5$Jjk26GR%G4D{hd&yzbQWg?g-*nVXcd)iZJ`6hJ; zm${$m*31r9)#%!}-P={wd?KT9OT`m6@&=RSv(W9UAw~o52`$aU;}DFc-s&MyNPf04 ze_^kDqpa`!*DDVJLR+q`(?65%BL!JB=tL$?mahZnLS{HEb4P?Wk_28aBK-V`n^AY%D(`4HpO~z?{C>nf z07UcgnCfp>X9uU+ZtWV_^RBgKL!z!%j&XtfQWV`^dV0^&|G%ETTT%Mw(+$4f4NruH z=}4dizhRMgZjBR>vlVl%k#Q0CC=s{CC%<#v>T62MfSppOx<)cuHH-`jr}LaxI&^b2 zGDDYJ1rGdHqoT&Pn$(sf%5qPWwy^?wN2F4Bz`iweTQ?=0x9sT$Gt>*d($|p(tkL;7NNX3`;N?Q&NB0Q#=xffEA4;(lIKOhRO)L2O6 zr2z@qobIWkhSQ(E#cVha^z2u#4q*&E;^nF28MNQ|&al;LLIc)YCd)D(WOi<5pVf1DR0K zAPfmBJ>Etagu4+l75u(~hep}!Rz{(UdF0009P0V`4SESK(zV0Fo&OXH%>j`) zWz5olq~Aw@RrtcS3?yqz*A$`4m_GzX#%uw*a8z1fC+#P1n4ooMY0&Iu7wDxr*J`>& z^mx~u00C^3?FgnG4R)Qlh{^0Ip$k}+0p@G=;Wr0xB0ugBe;FK(>Ym;-eniaM`Ha2U zk_0}_Tc?%M+E&VT+vsZ@&%j5vm;8{mMW`BF)%2Zr^S_ExP0AX5^#&XbmoHb$Qv8M< z8K;hEcznq52i%@gx+Hk~*{AYQM%DD`8 zlM%8K;C}0mKn+rpnv$el@^uu@!T_I#F4t}S+sq8%7J*l^QlPYSb@q`%=`ewN+$Ruy z#6?1w71jL5Dxp_`6+`_{OrS)_(Nc$XKoxXgwt?RLulRCP!t9UQ`T~1w95mh?ft{Mq zPjCFP&oY%va`AiAR^cVSx}q#%+#-bQpL&eeVkG=`K4IAM2gtz;Gw)Om67;*U57?pSZ#ANNj?0ZiS|SU|!{=#^z9sVUKYVo<}A9{zU(gPlv|%DHVO-Fm}3 zho$bXH@z#HezO4ZJ(S|M-r`JwGeA*wE4w>^BZOyU@lX8RitEuD`Fty%l_X^X2llfV z>^4z?xB`Qf_~VQfWe6)cj(DOsVuSh4MxmjIr#B(2eAyOCd-`VwhK%9@2{)BAb&3MW zz}-2+Y=_XvFuiSwSE$Q-VTqM5GbH20%;}8$D_biP1NB5eSaTg05h42!6+C#d|8eA+ zAg^MT-73;p2HM`GafingpQ9-2a8$Fh-Jw!^lAE}`GT?sTd`F2dY|&`BAc5Mli08=Aati=6~o=(@Agdq9%wCHXJi%{A?FGBJwN@ zP`ZWjJ1D`$u_Lg|#`iwxpEro;qatSxYlktB<0}g4b_vND(MT8YAmJE6LXv;c4;01FQgSEUoO#Q+95kFdI z*FeI>feF4P`$rnBX^*cx=zGbqz%=k0GSaw6I{i;A9}XkBAU;78 zXoR6M3`*aY7VsI?A%h4xVtco{e8PJktG*X|*uz3Sv+V1z)Kke8Eq#V;FlfQgL|{S( zE-?MdIqS;qENz5Xn@2V;s^}N5!a!BTvG+G>XH0J<+PQ#$lU`Qt(i>!vR~Ea?d)LX@ zOJQ{*?8WekGL-aK;_QCw__Ege1iT*a$CvlcQpXU6w2)Xj;+W?$qAO?hUx1Q0{|9{% zB_20wtgcJ*gNz;aIy09j;~_F>U8&N~1>Yt5y1%2^opUPLROz!W4N!?J$ZY)1jQ|dn z@3Q(*LChD5;$RbtpON-JtP~`4o}TM*BQ_4vf;V{tq$92@c&lmOo#!}FV1w1y$Y{%M z+j^dPFbJ=5XHG){qsD=wwMAxZ`(^x7Ubc(VQ9>6-76fQt({WOh&OA869HhS%|Pw*I!QJ$5DjHoRG6l22Fp^CR&o-_qE<}7BE8pfev43Dn#-Gwx9VhsGz#S9F_3GV%7=n|DBI7xF=_V=j{SonLJWDg{qUm z8By3BU49k%!s#(ntP4THh7|=ds;+k`0gh0>3h0D!{?3wtL{2;~rOFVdKA6Ep2sFdU zP6pI`DAgO`dqU-Z-t;xxp? z8}lulOU7Vg^T$xf2$fpJt1;o7%*X{Q@8^a19#qm+&dC#;;YGK~6U2=5oL)&knHklXm#aihakTtMCuuP zl_kP`7X#SLYTek6yP_QkTEQHxCjfP9r|9te=5Xp`Sp|$rU3SEjv~)>9-m(REv`5n5 zdfByPi+y$@xAqa%Tu_!dwNFRx1`zPLj1SunKZ$;EPqcbsJ2-}EPyhN+MdZE=%9T#m zbJBn|W1n2#0&?0~RB}4uUJD%))WFMVkOo1(x?#%^PxX8*DUz^d3}EJ=-_^$a*{OE4 zeFvS~S(eF9)I&yCpj8Zi?r5}P zW90O#6VG+QR+d&@;Pkf}8TAiVs^9p>h5KKA)9qrCZku-?9Dhy4p6X%ryJIz<1xP^9ZtA+B$-0+F=Ka<(h;C}?TL-H5Wv7A(C3tha_vzaCM zfICb>a#v7-=%(%@w!IG#)6PGk9E}Nx7;!`!ujl9566AfJ4D=6+g6B(jyV{a|g;5l2 zlC+_ZXq#lC=9(I`_7sD)&1678jqC>E92&CYl8mtk{>bKb-_7*m(`ZRu>g>L`u0Blx zFL2;A@uON&-}1YU{#=*n)*F1mQ_9xW)m-k$4sl)geN1IVrQ~3veQ=dj6(yvQuJ{&txJNOb=WAY>BwFIQH zX{Z#?XO07n`FzIZnU`T4lLZFZ?A z`qYI<8^0vO71(;T*0(hO2S1AT;@6I>s?>6<{OX0;{Oh$fsn|G49)E1)rg-g@W04Cq4we#Oh;o zw)HT&s*#FG%N&dK%fhS&D9O|@unWsq_9$3*LQi3AV;MBvNN{&C{{=b`U+o7TLtzGd%LFS-Bm}ySGAhj^&;48?t?y2a&kP zvGsoy4K4CO+dY8VOOE;CMJBJ;ylal~JYjRe(cgJ1dGc}y@*va=Qwh13mWUC&>%ydP z=L@H_hB-`wwH;piv5C7u*|lx3dKm8eN8*o}jQ8Q&`+2Q+T2IKc&VYNNAlOS?p>JSUp?vR1L`8uXy^4t2GUY|TPA|;hu>`J+)k&Z zgX@fGaH45t-Akcr(ObEb>pL_igTx}(XH-0cw|vQDp>AnYHmQ+UDZb!hKI)i~=wc9+ z0qHE2gYD(iT?_QZch+ghyrgI^W1?PD?N^fH==ylA*ut__Xj<_RO<<>O6}G;f-%v09 zd1xrzTS%%|^x8H01pt}h|8H7S{4Xnam?jYZBQ3uB4_|8(8yHJzXVAtg%QD}>-uqj` z#$X7X|A97O920a8D6>Xn#p9bJX7t;`*pKh3+#ofp%9?r7K=gkMA9`<6BCak%oEDhq zbZ={0QO5qbXT9g-BbOZX9dJUH=BvnHk;7R6x;_{=kF)*#iD7~U!y*Q8pb>(&ujgdG zQ6?+aU3%Y)Ql!-^Ysw7`!f^1fBivljpkI6=yJ;L}AN0oh$b_CFjvPX~v1X9ch2A4s zV<|~;rirUEc=dTsA=l@DudJW?6MT#2wuz6M%7fJcE1(q>-E1ymuMCfSJ>yuU@h+c` zoyVGfF0)5WFrppY{`R)#6q(Jm6OAy_2!u&Yl^7BcyzY^x_`EB~ABb}(oW17)$js}@ z;~Dbb5Em5Fm`lj89+D-FYeb;f;hpkw)j-h(+(FZ((F-qBXaGvl?Ozb8c|pH*2_ z)?dIA%25_i&{4td^k)|ey?AsYqU(o^V86U*?@^7dhKy7S*;gUt=Zxw(stgzV4>U_l z=h3g%DpTS)u&6Xo14%OP%Lx@CePh$rd>UFVG*Pc)O0@e`L`)R9`O$-l$pk|X#(VtJ|e3hpM3LgjA+ z-T|rpCj}9c;7UJr!dO*ejeNVH?IIq3D*KgywLLqrpx_5;SUPOSe9LN-$vVADSwfWK z!rKyZZ+*vjII-pH40ZO$dKP{NiKT%S3*#I*e|}pgtrCxp=gTEeuQ&ZN!Figd;{J%c zJ&phDb6*M07iRJ1Js%?=+cs=qPan6$CXW z>L3$vzwAJM%w{&|{a)8*PI3MI(3x%HVrqJ^{&bPKVmtYe34UD;Fir60%L@1AEe9Ke z2%Yi?|H_B~1AnPLiesUej9)x6u0rwDq}s;k zB%9|(!Y?1JMh!w7(ys_n6-D7c)?;=A$V4K4U_vM_$SNiXFM=;{ORo&}S1JOw2!M`3i}ESVqyTq{RI zT?%Q%<%hiMf&>W2mKLET3%EC=_pkz+>OnW>GPa8XacUMcsA%7I+^(?lv21MHK1dYt z_PNoX=03)E)47MAi8%Bi67(fh#&CzhWUG6xVp3kjanyL*kL-fs{Tri@TS#*-h-qja z0XLUXtCZP|ZeIb3CISsKH$9eX5m#rC?_U-^G56y#Ns|ub1KwATqK>x+jv9pdtlyHL zK$sog!5`T-I>^mCMjw@sk`c*SpYlQ zvp2+$Q~qDK=M1B0n9uxA87bLS5Bt!_@cLa$BUxIt}-&#GbSbQ zT`Ca41}yU)UqJt3`Euiej(#^5xB#HD9A2szi9*VzgA3?J_(FsjBryv1?tyGjY%pVz z)ybY?shOpmMQ_4O6Na+lfLL;?*fDIl@e6Dtd2881o zNWoYnHmE7W9@gkcYWw?K7;@qT<5h;a&M)G*ZOsR%ME8 z%8z|XM9^`CVg75}BIReA(}Cocp9RLA*SJ@qPhT*|oLB4I>?0+ z-r}JJT1c8+-h;&#vIIh=hDMFh-#sl;225L`czz@8C0@67cIRFYU~`@BRqIbo;DX7~9zH@H zC|H;|*?K+)T%7P-u>~4ORfcjMR#Ie8cSf`wn*yW)hYJ#+-`d1^g6~l+ZPX=sF)3i% zqC<8w2I&f0~U-8YMEHT+l?SOw+CXH|+Da|3OH4@OifZXB6^p=M_yn+*0 z75y&SD{R6tj1(h&@WZ@iiRA-yG zOz~eUCFCa&hYUnPefsAf#Z`%tw|_$XKcDYWFbZsY(aS34mx!A2-1B}r9fFkc_ruM2 zu5-eI_@6*m9YDN}wqGPc8N!2dZA8EMI?nL@bk*7c{SDuaNl*Y@exWi7sR<8a!vT}z zf%7d5G9q%S;-u8_n#>{R?SgD-Jz;}!^HR=-Y}<#BRiP}-4pc(?w-YqT(3>4JAYU39 z6M_?8*l@(Q9Rg4@NA`?uM?fiCEZu zKC{MUJOo;*1XXO}o-vYqP(OL75ouT_2Q9DWvR>twA|X)F|CZ9Mayx^Vzh&j*Q`7PC zQYI@d(h{%RZuXA46jTe!$qTDhXPtQTwbh$&*zIMX#blC--gbv=#gC9m>ezt=$>kkb zTm0sngcv@H}3Afad&rj4=zDB7F-hCgWJY6xCNJ>!CkX)2?Te7yFbo7=X>|v`)XBn zSM`rxe|q&CbIdWvq!LcK>PORtF3#;@aW%qF>NJopRzhP|6eg(h|KeYsJogEZSD1}Q zO5=hb8mSEBdgIss9iQ1vAkbdY$+)NS+!U~KgN#_ye`S|$zR@Wm7d){^%%n;?2H||F z3%DDd3#tbjyUX&9(`F*{KhgUkNH$bBCN>T3AABB&i91uL$(vO;kQ7T$qF2x+(!H@0 z&b$}L=CtRert4|)^7WytWei%|-D|N5rq8|{WX{^{ ze~P1>|1fgkqb{{o9dym<`iRX>3)&xho;9E8sZT?_hUpDm{}vg`Vu0IfWKnaHTn|3A zD2XX~Y8*%|C;R7{S0aOwbx9G@9`w|PmA}X+XJvx^pUaaCdM@1*hmf@hu$DT9h*+{~ zqu_W@zW(bA=S%d2B;R!?NAn4Kzln5EgqfR}T%8{rrW*?vcqPcMT)`6lndzR3G~8q6 ziFF-T!dRopMUWa7uqJi5IYZ?0@Fym=cjlo1%=(HK{}UQ>8O>mn8;UB8Ze{619^Om* z+g>Yrh6VzqqBZ7@H6Jh3aX5!bm*f<7DvKk49Q}egKM^|3t$`@77D&xEd5)hXVIa0MZQi5RkgDGt-JJ zl*L|G9K4|bcUTYGcCwZUWvn#1Nna9lf`D*x$_5~~9mE)dml%}&-OpEgu zM*UDW^oHNelR26#JHQ8Gv6eRK^)?-;3G+b2^-(wx?ydL?X9itHOEJ!pR*-|_PEXv6 z!-KhZLlAdfga+G3+WH6z)6|Mmp!FADxN|CXRN$EVxr9rYLz;P70MrE@`&sUbK6lL7 zo?qUrXz$f>IWf`JlR(fJA05OSp6E74LIVBks2_dMcK8eNaM1QWo_$Q0sBwBDmAvzaFX&k% zzQj6suJk^7&$yjNx4kHPMQLx0O@f}ANE01x@zFmA{OM^={^ETXwgPKQ=_n5EB1S!I z^1GgXu3h52W2O;MFszDMVzbcXzSqkLgqH4SJ zr901K+0ZuYFz+Z-_vs~bW7g;x%L#q2CL(4KA$gqo-9B%m7*Vy<j;V4>hk*^L@Hf%{>xH_OTaQ`~2pLkw?GnegFF;K{IpFuCx#@JnZx zxT&h+e)HF_G^70#v^6!qKESodRQwZl{vCV%bzS|ANV6*u`s(mi#rXuqZ1B=_==;ba)*y}HH^El71$Uj!Q0ryX^H10xFjU66=z1ieCLP2&} z4D!%kzO{puVg9nynEWn@_w$#j(}WRf=ozOaX3W8N+vn){t2$_)8E&6hUL@M_o~#lv z=0~z&T=@LF%=G+Uag=yb9TO!D|Uh2xSX&cbjq{@id7UsxQW& zjDycYh+AyI%Y57r@gPN!V6Av_^1U$7$Sk;(lV)-vw;a{fMTI@r4+eHx^s-eiFqxdf z_@_gOL2cDUz8-W`n8^s;VuJbSSpp8?KvV!I~5q>Ze!IB-N8| zD_FX}mKRDiVSox_SIj3PY(7fjMN7|8y%3*``);5vKyzP} z!L*`FG`-J0+rM#&`c3PrVaI_c#(4uQ8i;Pbk(|cP&Sb{P_ErA;>?=y#b4}$%e|_zm z=Bs&FDo(4uMY>+pV2dk4r65Mcj}1?w#cuiSjb!K|JgDoU%j&_8EEO=F9hiP>u)GBq z#e_|2faQwa^r=O_!-*07gxF31;b}+cPZU%22@qS{>85m$6a3@-TL?#p4CXA?ol+DY zigLe(LhUx%79-ob$sbh0d@)=LXB1(M9SZLG5!yk#|5l{z&chYjpye}<@3(bsU zS=T3;9pT$fHnLdY#g{_s{@i8&d?KO->+f z9cbUe=Ml=|ZcDi@0nOj!9PTRefoYUEnZCV^lLk%4YM;6!v@F!)%59E=_R)={n~-XF zCIE9kYf;0S%Ro3G;LYgh?2Eq*a!dP1|H^>>$IaaVttcg}C2|!Hd2Jh{e(JI;ClO~yxwNOw(3yws zRmisf_5x3vEQQYf8(^;mu)F2HRl?nnW%noo#bt=51X#FsPrFG?;-Bkk@pz$OTcEb| zr6_(UBR!>?2K_IYCAveW7AQ{mp~>lPnp8d|DV;#+8p5csOp0(h@m$_kZKAs$nB#Bb zAdx+>Q!;+3wX}}`Dfl@Rb_AYsA4Ykf94`zy2t#D1kh8ZkKC)e8sR!ztf~@Tu|uDUAne0($-dBw+pdABJ5c?{txBQxvD=iOHO~;vw(n1w z+YjV>)9D(FJ!8z!r?5HcA0y{t^Yf&1jONEuWvq{rk)@e&fS|MP`n1k{^eRq($Nh4egF3p=L!OD@5t)2PVuDY0y}Aq_qI&ci^yXxpJv#6Vd%@#D@i& zoT=1%(<9x-yqEDOkz@S1dINb7Bin!Ti5ZFYb!wx$GN&-cA_$Ve^m*6ag@S9YSZ;`6 zYs2hC8SOdBq%{)VDRRa4h!%locV&re|D@t^m5zD}_%*IgAa zwR=T(*H+4e{8gB!PrJYgkJhR_GW>!1<}%xF6OE(?8$nKXQ}Le-{=H2;)wz14gsovS zizo%A%1i`8!bDT;b|oeCV)Yb(2}a~V6Eg$2H=E1Bb#CVEHYu!=_rES z`|(yvg9V%DAnybHdQ+$f;>l~Yg4b7xkbNUeBGJ|MV3tE|fhp1qkw9Oowy#h>{77=q zcq{bPLJHTOC=|;i@2lpDKd(|cUpj6`d81}sqy;tg;iPS%S>{_=Nar4i1tqA>L&axi zGLH55g-(1Xj&NuDM)@rn6;RJuAsGIII>&^zl=te4=l(*7kpMeIO4w{kA zFLdvKl`3?%5zw(cE_@)U*tT0l0 zob`7&IOv8p{#D!C#9_CDP60xIBs5oDp>`mr_f=opJA@xLv1a650i5- zR)!?iJ%}q-Tfc72T;)oIgv!9|cHTSqkh`h#JS`v*Z_mo+E5&eW|2cQ~i{ds{=Mw?R zI-*kjFECT6-GLKu(vM7^@N}?a_P2kV2#Xu4#y}$~J|6<#M13$r@N`-8P(}W?671ACL z7HG;7&9!{f*BMJM^7R~XF53d_>`ZSRYdj-lwg0%F+}~XZBN=E>B{*EHj$7*0X5nX6 zwZLu|7$yi@%_qfu6Jkipw6NR4?KNiGuZtvDZ2*P|i{Aml@H#9%Yc+QU{TSUHO$7VEYzS}Y-JO^TtIXLKD8cV&Kq)w8MVrw4M5t&V93uDM7J&(hOoK2aUT2zP?G1p63q3_Nh9=Hj z;|}lnoQn_oxmX~~rDIwokNdv$!V?u~-dEpSJTOi80CcSt(MVRMzBKod`ivjZ62@qL z8xXRkQGLn7#N%;D^+IZ_CEmAUDelL6a46OIx!>1t9dgc#_myQ7?&l|7$*fo&#Ut?j z1MKk_(XlgeE19@O0*hB%$Xf@P?X4h z#BjCL8CJy(2Rx>#Jnm0~4Xj(96WJLQbM>Eh1ebR`z~52tjtWVkPr7s_x!e=ZOVfnd zSe*W?z@sm??h-JqYJY;2(KB5Q^w;=SqBms=xkND?^Dmz}IDQS%&1@vI&!bz15%JNd zQM-Pf>@-oB0IwIakB8-;l}p*hk0tifdG6lphC3$B^5>OZKXW*AUBOINzofjQJ#$OWwf1ac=v*HyQGa`{19$u(BQ%8!TRE_fK2XRL%kJi zfr6A$q1G3KWj6+uAyqQ^fVq{bjrGCk4UX74pMSsZpFo*UY0Eq6J%}a4p%u`nHDffBH>vr(d3jI zG6&oX1Fx2~|JuP%p!z*Ri-c7Kr*T}NNuie8uQzU%3AI{UAkPBq&q7t3bcZSQu59n%{U`8rjZpnWA}Od#T9f< zqHt7GB0W_UmEByw-i-_WsV-XgG%a+NJw9U(w^KjrE#o!Hui4%v>3;sO(3&`~6sA;E z-8#G$MuB`L+KEw9;4C2o3WCiH_X1CVw!H%-tcEo+lEp)hI4t{}hRi-6$YI*-V^UIh-=+>-DO2jmBp>T#i`a8fzhcu2;1^JpigwtN-yBrme1_wavWKxOb{4gkofetNfd5#d$TGMrQfAhG1q{9C;wByDx;|JA^_9`eU^s~M)f7!=8*r9 zGbEHv@B^01%(7TeL>BdoJ1No~_z@rFu$(A^+7f%r<=z-VlSE)PnWTLfgiO%MUK4!B z?ibdiqkOa-u!fcd=F&*kY?E1L_OB+q)yulm@JZqg*2K<918O#yJb&^X=Wa#cekH`v zCF=CwqqakRsKNqDe#&U2Xr&0GQ2kR|LX>bJM#B{^S2luPRgef4yJ~72T0$nSWFW{z zO50I-qOH6naxCIj=oq~jw#EL)@2SQ94=363j%UewU17uvefgZhox?@%9EaAxc)NFK^LfLz=%IN6US#OA-6EGJuR8-7@s|@hsD|3$IrCBQd2`$QLti6r(#3CeS?0~ z7%A`jNjPjo42p<|lXW1jXyaq?|H8HJ$aYaaD09@eAIHjBzwo+ppXwxWs^~4QGVhxN zr1$G~1DgYNgN4y}=782r{uk+>M|E77>uXD;E2VP=Hh#%8suf}4%w19Np@ibtZ|NP-I@eRAUo z31CTB^jB%mm_D%)U*+c$hPqVBQ=ovXjcA;rg7Ja853-w4skC_*`x@G6-`ZF~i! zWIw`!W<*Cy16H5RPqMDXuHE57fJH=iE>#J)O9{&dDSbroV_?3f?9}$HwK`Mfv$g<8 zDbxTQP{rUoeiI2E(V~-=9Olj$D$x&d95vgk8`NTd#J|MrmR0as2Jf+TjBDn;3tXX8&>E07#Ox81~)l$Jn8Eu_8nKuWK@OdH< z%4uG-0cu6g{-Fyk^jO$q742HW@nE9U%5Hk_E6i!wdSPHPhT#jsr54=&O!q!>WdNTh zKB}c1v@>zDWpHAHD5usC&ka5#TL!+xRCN^g0*m#|TR=gN9l@zxajF{3qy0%^YwW%a zHx~~sWZUQv#q!PxsOf~#8~i!_3!lD43werA(PLC9bFMj!01$%vvK>H}uphaP^QWXi zhu1OISiH*m0ic8PS+Xxqt|aPvQ~wKkk&?4u?l5F)`5=OcL7K=ZNn>1s@S_4+hoThT zNQ@ml=cD+q{WGLMTe&~mFY?kgRpy&M+n&r4Ddf>FA6FKj?~Z^Xvkco-USdUTtN}Y$ zabk?7d8obiX&^&K?8PkJUD=7KdeUsMW?tY#c+hIAX{Gk}gW2h=^B?SrJ*1IJNv0P@ z!tLJ)6u5jS@P2&#EAntVrxP{#Ce8KUZasAT`%E5T#5eA1dgP~nPI90s?5R-5_G6R7 za=UfOvn#DpDI^D|#*3mPeZ_@ib>hjqV-cO)alV?&(KHiiLD))?l%2~Ev&WL+k$yWf zB&w>G+s+V0v^dZiU+Zx^tx7!qIhBcnjC)bR+W6-e)taltzt;V9imIypw|=fJb;)D= z%JMg7mAYZ|+p1eK>!l_-{}FinQwh+Z5$NVD=^92Z;^Ni=uCm;e#I5 zZPt=&rkn$(I)5%7kEiw?K{<#UkJum)}Y z-y=67n2mPYV0)YwZa#4;lDE5i=}X;4Jyq!O$F`;Yh4POL1Ot=Z*=JJ zWO;aBJh`ANAym5!K@|Ua1-#hA0pe{Kt+mA?Nh# zYF7OS2qH&#;NO0VZjd;WJf#<8H>hM|8U?M~&mgK%NPHe39+93FFT*NcR+%J2iiD$N z-jq%((3vBIxgC1RdR2ks=%4ufE^yn^gc!XNbR?_`JW( zeXL>C-8iV<5j>dXz?c_QEZ-^kkxB^#@k-dm>?zO+vDb(~QroFu(XQwB$`(T<^A7d`Id?CkosmUijiSR#wF3h)a&QbIj zJcxN`ar9@&5A;u)9RV&E%&`#URBoK})t@)t0?18J{MBxy+~Ra(PIYCidTK1uUND=; zXht+gB2^`9YAumW+g6 z6df9%SF-?TQX?YIx99QeyBQKhko{1{(^6S%q^{yz${PTAL~!2ch`!Mw@=E(YuxcS( zf%fln{}zhX`h*zY;jsL}CyW$hG}T7JMc_AXiv6Ay-FUdwc!9SqJw#kw%At}3jCYTq z^u@4h;>|kvtBo#>>3aK2jU!5ZbrS1S;ESr%`=Gm{A6Q#MGg6`bsQcOCxQBKQAo10g9*hEF%5amAe^ z_P8IjOnm6h@Rm&rXHS2v@h`6a$ZSB|sfG>K6rZ%of#)H9X7oLlKslQ#0ey_1mw@$n ztfd{%fD8`|k13Bk@y!$TGF!U@5;bUy!{&Y1XKef+_h{!mk+$^b(tfKJ5K}p_BviC9 zzENqlS{t_v>@@aOr2pv~O^ZJ*zA^q@wLd!=Gi!MTK%)TkJ567BNr1njGbj=@17uCE zm6jPY~Uc><$21fp&dBJ>IXv!LSb|I97$syh3 zL~IZk-UlyQ8H@`*`uwD(ej7S`E)=juVPLftt9RSXq*-MR2$`)unHf z<4i2ujdkDU@%fal^PAB4bP7&`s8d<&6?xdru3f{r1zD;&%%ZECmj*rxrfftr8-J2b(7P0ddm#Mu7&L#YAz4&6%M9I?x zg&4ysSjOM_oAo?yaRd9bBid*6#J2Vw>eJNG0b7DKpop=MDcaVACuM{uzTf_u9_-N7 zf?V{^!V2=s;|dAA=Q51r^??jh$i$({H_nR0tTC*bOc9)Gqe$(`D0$Tx24u_Jh3zzTs)k(Zs`dtev~kELb7le}vMGYRqXyd5=c=QElJ zgW%!{_U*`KasOr287`(?OYGjmpWO*FgRj0{JiaAjo1GZ#WC?^{{x~kzR#@{LNfLhYe1jcic?BFYkwOF0M?sVNkAoOO%`UR28$# z(rZA;HIHym;TSFv=1uz784@6Ejd!A&*C-cVRvT&Z< z73j2GORE@^ijixhaLyyrN_v*5-pt~V`@?+8e<{{_#A*JOjJvYRe{s-fjbJMoo8>-_ zXmpgMlXLRvs4{8zP_ySVegzSU7B@qsilyi#_3Jjfl~QbWVz3Oq!`1UWh}en{-P+3I z3S34%$C}k}!yD!c8m)b^mj@c0QXhGhQ<#vnTtjFaC1<=c+$| znF9{hLpiwy3uY18fT%ST|Jz=-T4@wOLU>>#!CA!e<(YIL6A9qYcwA|^t&H;35xaSs zGE&v^@xu%}7efLi&h2r&Y?l1c+@s}SlaDLen*(Sp5T_NBMx9p`XU*7}6V2Af)Pw~c z@V)FI0lr)p@Wn)_n`(XQKBD7MSx=ipTf5u;^8o$`37Pc5tL;Tt@2#7{z}H@*t;|Op zRbAm{PHj~Jx~r3*NXPls__n3vjQKa`@j2&D3&2-Lfv#KUg6Fq0(2E#t@{ zg<077atku82*eAwj2J?1k(k&;KDCZAXuBTFh?|3rpNQeN$tabtfkPjN^ad};!od7t>Zm9Vh0kke z&&FQZ$aXG-UQ~afoMpe|KIv!W|289JQWZG8P3Y za6uzt|66!UzBEW;qdS99`WC%Hm|U*pgQr3lQEvA3BOy69ANVv23W?pa7RB9CW!T4` zY;kphJbboXY<555>FQ?!&=()@7EvG;TEcWR?54%FvyIa`(=(_>{^@{G7v60Kb~-Rb zZ`uklXZfCandzKQ{EqQ;;@-1X>BK6>tuPh9tvTUlBrwbE(l_}eQfUks>-&YewqvuW zuv9o{v3tlu4)YUJg5e$?Mn#x_#Z?$R2J$sRb6Imkc!2%teBvXbY3|4bqIRRoeDN`A z``lWR%+?hr3e6(Q`7cfr%akudh!CQ+%54~+u+_>`YfK-c;Uz}i=e|jM zPt%s)uKrfTvd;qPN(o){pjBh*D-ZajOyNcNhYI8252f~(zsNCZYF!3iU)C^9MNmFg z=S=GIrrDsD##6q^#42rSlol2>|4S_KTr0m(LoMJ0B-!kV&jSq?RC1jjj(%8d%8%t1 zWz$b%GoTkF|7P|3{OMdicXO^_hD!LOE~_z(IY05p4_DE_U+u)?uZy&-dkua!lWulX zPmJC_q(#4%a+8KfWP3v>65a(G)zSU)c;fnUuo0R4bJqWh9Dk?BSIC2xRJ5s#V^au& zti_raxVj9;0M#uGX_-wwk+7w@Lt-KIz3p_Q?&zq>q!R z>21L=#cV-?_1CjO4oY?srRp&I%2a;rCJsDkA9T{t(xd@9{%9dZVV25(vJ31?EGVLa zVUX1bu^pP(XT%^kEpq(16I0?2ViVHoER~q=DmEtjvI-cJFfPmMMF}DszY93vQ$A4D z&)}QTh903Z@M79CeTjY92ZSa|ly)FOP<#*_!k_0gG1a&M64=jN4y%D!J4IBSUx>?R z1Gm5MTi?IEkBFba{h+K)6<$gc%T;SNDhzh~1hsYKiYc15fFQYn17uI@v-KmD(1IQ) zU4Y*0;R}CZpsq79>}C0*U4#lbE?ZBR1OM_t1&GBRNtl`U zsP;y|KfTF)-WGuLUaIn&T8+e@l)X2o1_|fu)}=fbnP%D7SXaIU3&I>MbeM~?V*ZId zUrFLg?GAyK;Ux9`V@k3CgD1CY=J*@lpqCqEL5-uM5IHaH&QR9wrxskRLP8ws{?~<%>HzIIqC*S9MfO~ z<8sk(&TNWalB2Htss1Ag9mm%ur-By0?L~m?%%@`?T=&%i#$*{8G1co~=UER7b%dH( zO#XYQ)l8c~ws=HzIdi5~+VcA^rpWBL;lD8oDKA)xTpjgg_gmhhU$AM0?ttz-!RN*nfJ?cd-QgFFp`si^XVgRJUNsY+IJ%bfAO4C#CEw9d3Ro^y$&buWwu#Y zb-tdn*Z}qE@k12M1oU$g;xqofovX^iZBEY)T_{}KI8X?Scj@Pk)zAv-={p8qK4}vO zirnAZTD*eFH#=R07h(d?|8MnIL*!p{9OXSH`s0BQlYP$pE=G}8YI7v$0WIG4Mo;6za|ECS+M1Soe7*% zov7)Q(udOg1!DWl5Ye|I%OU@82LIeWnV<{omK&a>z7`iH^)HvU$41mUvBDh2LR&81 zxX~T*pcdtYw$+Np#y9S^;nY&2_(pmJ4Rujs8q9ny-YKC>_DcO~ZJb(GP;zEE`xDcT z>2OI9u+YaPWktvJf`p%C51X+Fr7eOAv#D7QtgHAyCfD5vwc4hD{9a~*iwy~s!3mNd zBnj@xqw*ZpJebgmSX+i=7*)A`Iq#11}ZLyvAtj&xvz6jAu;_~aYDYpoeyDkj=~NQC$Q=t7&=_{=eL z-ukWn$qE5*r8qTFYdXGoUfFjCd%hl~9>!e3mcE&bV> zu9QS~2z#l;sr*Cs74;C%sxYjGedsDP#<}9D|m0A|8OAFt#L+pyxs%cf-0Zd3i zy1o2%t@2A-5Ll3UeKVIV%>6wu8a%BO2Buc)cjx^GASs5K!$S2b{=4$?{P7~32f~2K z5q_LLW1naK7j2`23;($IM_ejwjAmO;MVa0C*mSV}5I5$rlhcNU_k<^qrqS>!@4@Z% z!`G@9)#N9(h%p%dmFOn<={DhnzfWj@8Q%wi@y=5?H*IquZ@1h8acCVe8n%ve&OX}t zy+D-BJ_^w41ncZ_B6}_GHOo~{%7XumE&h+8@<|%hqxA+Zcqu+IIdWILKf9<_-`C=A z7%ajI@9l`w^BpPE#!(Ohmy>3e^O$j*7k3A`zxdE!Dhs}+EfJ!ft8x$5uEm|su@}X( z4);@c*CBTcA;j-TVBDul;;7K-^ThUyj2a!e^Q*s&feK!)@I)l%xC&o8d-e3k(Z%*V<*fy?8)jw3JNq%eey|y`YhZs+7iYWVrgpHej zG{=(5|NKL6jo}9WOxWnzwJI4HE7f7%wWz2T5xmyCsLEuv*R4tYs&vW~a4k_^t0|jQ z#5R68zf#=n%uRG{Mo=%OC~vVO`|!9jzcrKJB(hXX5>V{E8_{%3%TW-n^6j!%V8zg` zm-}5*UNNVRMQUYe|Hs(&M2M>x$nfT~%E{4O%gw8+lWOckc#vo`yTU=S1Pon#| zL#0w&twROiArdZu)!U@b7El>{2FuGBdi78uIAwzo&Z9xbI@^U`?4-gA-AL~eKiIlA z4MZ}8A)U4IxPDQ5tU^%mg{2wp&g#^LW%w0z*OLwqb~gt-G4-5CnSB&*O{s;&DQei3 zaPg!4W|r|1sYiiEDvA)5=MCo0T5zEXdNmqA{u%dm^Tq}!!!XS=QhDYGr4<)3eshji z;k?1Xp!Fi3T@`DVb{D_@L@tLes;NjjhJndTS5p%NFK~pqVS8L!y4^Fvh`y8c|$h?G^0j zDgp_eQYLv0oxFE$7(})CPLu(z`Cq$!tC79{; zVa@r%r`|X8QnHbqJ(U!*9Af6jtFc_t!c1h&#cx~x!`+EKAy|OfaohNBKB>usGZ)(4 zQ5CVd<-(g2;fga17B3v#(oN!S9)_9U*`e9J3P|y8mY~ES8{qnPsbl0jq7+2zUCL$b zu-n^SLmb#s0nil_pvvW*PVWkM$T1H3lv{%B@<&FjrbM4 z4<@&;Qs@D1M5?7IV3Il|e8DFX$IRydTW`Cc02FwX&}OcG7F3(oH+hgl(guOW?FG^0 z-M;)M0{w5>IpH>gBl7zSCbzOpN(9NU!MA_MqGv}IntwTR-o2>kjuYK}OuQrF&@=Vp zGd4c|$;;4#_E9J4v)(Ur^6vEEBQaR@+@~L|NiK$DT?W7C@>2IDOoR=3vx`!=`_}QA zqbE$!MLwjNMyBw1l}|{(MYt-DkEcJB^%rv%Q@#I-HU)JP8;R~}#47*kQ;F{RsOX|$ z$Rx_Cv{3t^7$UW;(XOv(-0=+R>Gv-w;BO)@_xWg$Eo_%Km2Q2)kpLiygG|WJP1$wTpD84p^8O1gr%AH-x=*`E+PjE2a?cUJa~oS zL_GE^w^EDxZ>o;&7VXsG^biIb5i4Pz32Ul~Yw~hNVrb_3FU;uVmv}5M@ zl6(r~L=`DQ%D7$0!e1$4C#e*SKL&rDXFd<8t98;VwV$hv+HMUJ>ypfo&xhU=D{H=w zE|%}u(~?MtTL5BsaHI~CDTx#uZex7)l%fZ#2g=UUb}$U`;#{(S;inBmg~$e``h)3+ z&)#sEJp53*xgNyeu`SkNSwm7{yHR24oV-*Yp(=s3?xgOMd?_ZfNWSlxv|>exyO}P#0Y0aZ?oi9#*8==-SS*zR~5Yx$_203}L%=cp^#UK>=P2lWlkv z`CgvOKCq27Z0LY5pg9f0$FOg`c@Ak5vYR3=6zrfLxcGFFUhe)5Z^8A2a(P#EQ1;dz z-JxK^^B?)%*7YzwTX)H1pLNQb=} zV+S8pgpI|Oy~w9VAuiOe+^5QI8FyFBofAV5u~%*Pg(G}X^J|cE5s+*jYbHXR%GQV5 zb`iQsVgV@f#6obqu!(AU#l#oFoaTz3PbEKxx^6$$M<1-0tpy{f^DAWB@ELRtyMKhv z6d_3pNq9g{ijQ}k5Oa=$Xf8mWWW58NUmb;2LS@;Mtgyq{IXV2O}BJla8TKBkJt6?a^Q~deyxEutzv_tJ93Wy zA5hB#?P&>%=x{34%x0zMfw}F}fzG=Zs0)jjzZzw%ntQi+E+dGaOa*iNbDZiRFv01A z{IkZSq&F(G;l(+=7e#~kAJKMY3Q)fmU6jP71&EFw;E;!2i{LJXH>62--h|a4BEOw5 zcE#cKIXvZq!EfFBps7_qR2paEmB(z66yRxExheRh8{^%Fj2D7wA`jhAP5=cYL*Pi} zGD<^KichxwBi?gm4s19=E7520c8B8);x2{p8^Bd;IDeqYDD|Yc6z+f14#93->08_$VCLo7vDtoCNlgiSh_;3R>b*^wudQ%h!6{!yLEAXR& z*9RthtvRRV(EU+c5oN=W>ka)&fJC4>gGSZH&*@{OI&EbYyfe+0Vz)S>OQj778sY%i zaVQyKXnDeA#3YND zA-1@TE8>O9j0r=N4_S1YIzJt$>zx-{!8zun`fXQ~&kB#Bk(M`M^*=TsUe-!LM{Bqmc`@A^~k5PUI}wyI~j~kF@0*x=i#XW=~@jtz7xZ< zto}_*=xe_cGoxDRZTgwZu@JZ9U8{9C#VPdz-JACLm@(pz$hG=6oIyeg)9mVBZFR(< zZh4zPfZA+wqSAQI7FHy=17^+MO=!lE;R|2;)f#hG#|~2JX&_`M2=Lv?OvR53U7kwx@D2x7kiB9)WJuQFNY%U^x#3Y}} z|Eu=%@naE{Geha3642`mx)MyTihGvB+Gh0rQt`^JD9>TIT|6}9|MDu^@t{;? z9CpYU+zyFH7zYB_Clld}-~#_%o-Mc#>UMjc;&{t(+gFzbOTi}bLn8`DP1rtagE<&f zi(n@`_4Jel0K@5;f6N#pwX(Jqg&g2rISNmci5cvU2y;WZL^KKcnNb~_l{B#&I-y@~ z+5tZ+q5d|^A;Qy1ne1!b8`LRTVK*Ez+NI{CQwdw%Znmr2`Nsy^)#s^BD`EfIpP%4( zAS|A;C0$&|6*{v*;6uW83Msf3%Six%5twWROfw%0I7YwKT7{`grFA-8PI3mqc;{(= z)Vh&163re=rwNDuj9l7$9^e5RiSd!kF0c%%iic?5M{p?T&5J%nj$X8C+x^v0{*(nc zIUtjQ-gYwt`%P_`4hEfxjQCG>Hb{M-JRXQ9c0Ue3zLeK~cn=#uIg{*!K} zY$OOaO!>N9^!_Nk^}5^iui-s`bz3^W$vahW(6{&j!QP$*)V7DfmK=FtMwGlF4K37z z*+KWqd5a<;hg9b)2^#m-)ejsNgkD^{hh)j{Myz8GELDJ)~|39vj2Diuxe2YeBwLDDyylE z#w0xi8k>k+P#hv^-3)ia<9^TIYz)x4c6uPYyNv+{H(sj*zV49BV18v@^g$cxCE{O; zcn4$SK47D)#b_d_nfumDzrDs?Gmi%AXECC^)u!qSyYBqMs=l9f`o(VK3T2FjO+x{g z$5k4X-{+&rRp?F?w>k5DkyL&KdCC=g6&TX-F(W`;$7X7CV|ZL;q#PI@r=DVEOOPr9 ziGyekjTQ~ojqk_omqVG9YO14J@rQ}$k-MKN*IizfaT2N0$?kpyZ0Ce3;UP8_3zs7+i5_~U=4ib9@rOQ^CWkmpSJ! zu;tCzzb>>Bm6)MhOXAq?SUE!07*8nOWi9uqk%kM(@_?U#2{)J`oEe)=IdGH51w4~g z+qI)XLKsTU+MR^T7b$yLNZ4%fM7YrBPgaY7QV!x_ zZGvYtndihD^3W-Jt7*5JSpw*arB-_B;~>HS{veFVO?U9(t6)QMWxsgvE4>c0PBQcN z%m(tI34gzj#UsI$31m}}dY#+epvV3NXFEK}@T-(+MF8u}2q|&PadG^GKpR_& z{2P^U(Jf%93iWc(h(x-;fOo<7nLSWxjAFm!L(?aV-8~?=y2m2waNHBqxXnverBv66 z=3>|7{I2&5@RzKQu?vo{x+h?=x}+VYBK)*&2X)bdr=n&}^a-o@C%lr)WcCLBgPhxu zFgUa~v%v6kyfmC5S8f9QWn(>?FO}shpvT==A#FN+V3pKl!1*uL{rg<(GjzrZwXoT9 z>X9zfdNGrsEKgo8NA}gx2@;b<<7*<=s8W1%R!wtr_Me5G!7@ap-UD?$+d>L4D3tXL zOd{_t$u+gI^Geuv{mvgM+wxj_CRSXglX%RVU$T@niSIw+p$HYt2r5?kX~->*(Vyfz zH~DU1XbV;c8UHC-!?4#evkuoQojgqMqj6fH^m6dPB3a+Uwye^K*00z4q+`eVQ?8o{#n=O8hZ9 zg00=~K0%&WK?sjtQsu@d*O4ZhFSr;(9kQ^4cT*8T%HUqr+O8f?Q4y!#pF5~8_8(jyApi_L3Q(*CQ2>WNeb`Fi1?*!u&n zILY<{RKwvPGpW}UNIo*uOI4Z~uatsM`Ip+oMl^6d+no}(7mfzqDCxZn^=Zp{nIPai zg%H_gww)9}QM>I=7%LpAO}wp~mkbRC-%TIYO@beUss`8Q$B2xOnbgGOn!R9RUB5y$ z+!C1viwyiwAhKHL80I`83xOV->xjBee#k}c>Llv-ji{-yB0+oLgs91p)bJ56^50X^ zvl;wAh;PoWXt;X?&6==;!@} zQA>vqyGH_<>KECX5-UYC&~Dkcb{3O6Cf$DR z$CQ2ljX&6F^rHxB%BjSckkZQUoy9?tf4 zu(Ed`_m-D=*6fC`>yw`+KDRMji-W%3n!&}~OK&+D_7GMLhOj)?>yse73+499X)>oP zY6NtvSu}yj#h>slLNYzAwBZT)qySNZZD8E!uwez>HeFiM;xpol>NTxWUdeg+RHg

e5JWe7RQ-920JSPD=Qp9!@ZOZxJH#s~nTB4TYfLJ%Z{_BPWS!gNX5WzXD4V z6KFC~D?9Ex{tsL47+wd|ZH?~Ows+jvb{n&?y^}O)(5SKP9ouMZHnx+-wr%U?ocEmX ze$R97@3sEUx#qweD;I47rF`$`j#kIh+cSzQ4j+qJl_(0sB4nEi386F5SR~ivpme{` zBzlaU)Wc2qz+@da>oaRn-JdqxB3ER9tGuDtKDYMv*z@RgN`tgdCmSw~0Y=^E!}9rS ztOa~n5Om65or(V>N$&x_7aMK{!a`P!DLIjZPvqHUIe&HvxvGw9eq5zh>>HE*H&%M; z1z1TLpOhIMiNpf?-pLwCSk?DaWgj&a{snqZRKg40{mH`@eyT<3TC2$%%q*9pU*+uC zswDr}2>693E{Z{nlR_o&OAY9H{Qdv%rhWqW5XXI;D5}#=ZTI)5P;V~=J}m`9ByFoJ z?pBcc34>Y)!{N%Tyl)H;F)RF+ow*#x^*-K~sDSo;LU_|sip?s*VAOTjZovjSeLRK1 zRYKo&Q*cRMEIq^|4j>|c!#@Ia=OFxzopP@ZX(K1i1Y92yW^EaoT1H@ejS^`QBD!3_ z$p8F&7xH!a_$I<}S8S-=DdaSwBq=_d!4CMXom##Kt_Qk_;%C%?8?Lw_g=kGM5H)2= zQFIm^z`+F@m+L6j2ZB6I$7C(l6J=vyHweU*H=OK9Q*K}`>t{QQT&uF&i$TL@Qeb7a zjTj1#jR8j77IoQTmx2x znBDdNK#&{7r9H8ViqwyMlcLtHEW!TGl*;=6V>rYvfPt#U`c=8m8BPWj za_Pb-)Agr=PC&#eS%U2|CQf);*3sP{L;(wGPBNFI-XPiLXTI-J<6o{5f8JF1;p)5_ zyG0Awp>i~cp4MDhQ^eh3c%GkgFqfPyalf?5rJ75|e-APdwz2Z5+Vj^rQc1=N zvE<=eVMtS5dCa4XVs7{+Vm7PbXNwGb&w|!p%wtj+A68r?J^hkchG$r}U>_s#QLSXV zdk`_WgNCiWkg;&FiGzt)(hE-X9c5RVe6KY1Rv8`4xk`Tcs1XlQJI!nK0>}Jb?$+D37o}^kVu_F(f&?ZKdA1^;a%61>X1%}j_FtV% z{ajSPz)$yE^qR3wNU1tzMsl6b%)aC#$ZLm`W*f8qd8lFXw%c;TL?bBsY&`EJY+e=0^+@_E)xAp2f4B>syp(DFcM1A^uwZ=u%zQJ~`K(U(EPl2B z;~d;XmE5EPY44MpW@`7?5bF-(`vTnW4@ijT_3e)#OY(1Og|019RFnXlx`G~8mCfKz zBd|SR*pTaoq^Hfv2rsBDS!$xVt(StA^F^(=-l2pk6Jos_DLkou)zb~pL4AJ6XhN*< zp-C(Pf)y8#HJ2I`C7Htrk@G_Yk0 zc>#1D{A4Oi22QtW@Z{p!@P-#o>^B18b$}K&hlE?gFSG(l-x>Cz!&_Po|5s; zH3b%iw?qC)&68P(z8~4c2OvH`pTBI`ARaiB#g&Ba zz@^zIxn)VfjRLqGCXrVtqAz9V9d@UAqp)sK-;U|U*hJ}9b&aaR)45*YzTG7_n_*~6 zmlI<_6{Nwqe}B`{G8ZQ7t@)kpy5+k-CrYTOqc6q5F;!@s8*qwO^H#J4FY&kgXn1JA z$ACeJmz}Boj}?5;OS?EERdb~wHTAlxMUJq6`*fh+w^|E!Pm;8reBaHdsFQ`D4uqP| zK!u-lN88WR7Sc#98*DA~@807rwaAHF1VFX{T-gjZNh(TVf3`Y(bzL`|GTSexIVUxY zXFnnMYQ&;@KXS;%DqeCO`lh`S6nv1Sb<`zJf5=Z*k;Jhh0Y{UdudOu#=z`|t3rPaD zys5gIy=6QJH6YD=@gLto@CoD9wmJ(l>~}LPfzo42Fa0FqV&m?tYus9O!Cu!wGM!Dz za|y9WROgAUqoFhgU`rj!D_3(NIRE^-Io4(rDi*G9J^LS24HrKtqDj)zZ=-V&+!(3K zKR3M2x!RO4FasZ^S&Bv1f=1$o-%b1Q)HU2A#-d@K+5qZGXJ&xiZpMaeWrVt&ZCtl& z2S>{{eB?Hp2uzJg7>11>jlzxoKD1K|w6x4J>?~5DUg^OS$QyhE;%WwC!eJY%uDQ1% zRYC98Ky5HMlhQDIg$98KRYvZQhJ)}Cqh|n(tdN>N0XJqfl9+8xUaYu`$nB1;>UTC5 z-ZTRS`%n2%sHR=!i`lk|BSNWUxfYlI#t30yLY6>s3bP1w|4>4b@V1c}MnO!744k1c z$$X`OLKeW!B*ali>-V!Ut|38hc{TocD~yhHv1BPTXKvNn-ww4JI>maeQ+cnF|19H) z;C|*5;VXQRLE<(|rB^NV`h<;$S05{T-~*maAp&vR;ppQ?yz%tz zZHOo!Hk<*TO5Lw6=Dp$R3+s)3Z&3Uu>OHhWt}Z28!He?rYyzpkL~7q^#^i*oWEMk; zmj*mQ=KRF(S}y$H9Xjo_B))E9S=kzsp0#CF$px=A_*^N9Emi5^@6ktlk~4s7Em+yd zh3h{<=o3Se(60O%8M2o0wyr|7VlFW2MPr|HgHP zDE$%GI%1WagGd3VityZwpG9m-A3)&~e9?sr_!}&_9%;(@%F}_G!GPwwORzbm8h`mD zw524?Q6V+|;;NSZn0(}Eww4OKr7h-n=&AdKT*`}_9dJvcg;T5X_b)Pdw8j>EQ4$>v zw3^eL?`jW?v?hSL)P#G4ryn5y`6TL;a4G}Yb?hD^2yua#$vQJMHG5Ngd+m@4|F6A9 z-*?#UfD4R|I0En2a;d4~yvA@DmCM(Td@qYZB~X#1qUD#H&wTk_qk zKNd_`%lW&axmzlP^b05V!onX(2dVj2qN>J?2-btmA;cWV_F${)a<|5sAg>t$i(% zUM6rXsB+RA*&#?kF8nlH2JR)O0@8wae}{#in38pu$p09sN7#!fUQBE(48r1yQ9K%C zfh#o53eh3#;#aLPG8(@MTw}b@p{0~9D;$9UJcD=Az9W1~Eu;mfI(<>+F$#1;%41;! z#|O!0^Z2Sk?e%Mq#vxDo?#k>O>NVhMBYTjagzzV%fqPRKL@TRK`Q|QZOs+j6Uh2{=t+^&;n+H8-VAq$y>TAKDZe@00a$I4;+A=R4F*r>^J z5}=Tx$;OpJd;eUK@8d#8{iV;~=1vWfH);H4l`_!*>)fNKAp+$!5I?;&ccVzLF)g+o zg`Qz_MeC{8!tnS?ZhQ8PboehJH368 z@3Sf4>$~1II!Xm;V4ON2Qov(*q%79> zj$n_9DQ&2qfdm}GEm-2%HrdmHk0cxn6`x8?5dE>pL!YE&+Gb}dY#2nti=s;Ar)~8u zB&`;2>Hbr{gsd;2MEytkhjZoyAh^-_4`7*?aa2a6C{3R`u>z7 z_b-KT#;_qO=at|wXbequ@zT!0SA zw~j>gaJ{8Jr!5|Ut!Z7994R~$0jUl@P{`rp6TB1@X$ug<`fuCh!>ZcEba)rO%|9>^ z2&`$EO6@AT5%&3^4RPnh(J%mcKQLSe+K~7dJfw-=kN_RbbxtH8QU>mnrE`>Fa&xu= z%3cBMD-H4`v8OMpaUz9LQ-!@DAcI1M>4iUJuT1>7sVw*1jK^bLtYPa;qI&KPSh|4y zMA7Hfutqp_HC>})VJP$I_4#7?=A}Gko6Dy{yFvo*9ZCG*$6t7Q{{L=rU`ty^rv0bI zsP<@n%yGJ#MYrsv%2#uOqVyD7{ToE|t#(?Z2|8K6C+?`Ni4FAh=J$a7riT6O<+N1( zGPa}B39X$L=nL_bK>p*QW!9!hvc-cNN?mIDX&np5f1OiQUsnk%621Vbpm5&eEz=q=})T16N-p6l5Y}Z_q(SxP|EXg*{rU zX$m5k)K+}E=@Ir0`ZGBnO0s-20@Wdsv>Y@$%6{<*d2}O1VyQKZCCuH9_^FN-!X}iw z?5-|Mvo31~KM>Q-l_o_4z6jFN>=K6l>)TND4Ma{XZ7APLkEdgXI>%d)>Zl-fX-4|E4xlC&UJav}GB;>=d>{(B}a`u_h@+(69ddq-v#TusnYli%(z z`;WHH^-~xWX-;U;i+`8zQ-7%O(NR@RAZMNceg=S0!>Xc55%)2lg-!VNcT5-2`Cx0& z-wtM}L5t8p?tRgxgf^#Clz*NS1un$oAIeh=O(OK#s1Q!M2f;j?-5;SciTb%$gn-`i zSuH@8UH9ZRMvA+Oi>+Y+p})~}Y#&xFV%m9t7o3#=a?$`65?!Sdvu6oLU@ssm3EC+w z4eEK*Gteq|A7GTaXGctniDWV3Z{*zpP@#%16s!2#f6q2|akf6Nco-+wyUF8FWqQIM z6%e?|6%d8l-Qx$d!j1c*?0O#A=I`uam-{M1!X zJd7kR50Krqv0y53TXNubWTG-T1rgScI-B^Tx|`;)eirYKZ3QEy zN*k?WF24ygh>`3UG3B6%33dLmf!pk#rtkDsc~L(Z9qw$0r5fS~P1nNJ*$>|wJE_zJ zsos989FM!Ur4*=&|K{|h+y9DKnJl>J;Im~n^c5^SmCA}L6@=U#%-AqGtZ9bY#zY!w zM-?@pfop;O`}bw-RGZnQ2N_IDOU#59U4;_g2v&>&CWEe;{wkFS<6W6o$j%M=Lo4gnN1L5-z6=Mw`#Tu;Bm+5{PH3={$31Yw%>^)aR1`V0l8*c0u-JWP!_qny{ znS3HozvrJnH7}?j9=0*14_dyJT59D(*2aJa2XeH+q$ep{kZ-Q}RW{cDUh)bb)QVAg zIa7%10nNAy{8|&#jQw(lCLe9u6B2UOrRSlSj7sc%G;&T+<^IK?!ooFMibog+g%|E1 zb$*jtV6UZeI#=3WtcaSm?RWF<)R0XJWLfjG*mN#2(Nwigf3{SYRkxLO#38L(|4%aE zYmC(92UoYT;_nfytnkkv%J8k7g7E=B^f)=4KbK~Su@mVT}({Ldfa8ci~Kf+U=p##wd z-cT(1-Y!Rpgcc zEy`?_@<1tZ`!09PLB|3UxVeA{;46w14nW*XyWRO=qF4ppXrU}7#sXp9tlDNBzM+~X z7}vpKh;W+-1LhW-pY?$3(am!iHCSC>@%?V!nR+#TCr_6PQv2;xOGE=l!M=Nx)RETB z>t_?a1HWtJjT#;Q7brx?H;+vQCb|dz>Fthm8Yopdy zWn-p0sa1<=gyY~13`|A}m?gb?JqzIrJ#@9t7H%5!xYgxjQfk6u1`%}rJ|lL zM=;u88u`^;_%VbW4v(HkZfb0ra#N=QZA)4TliOVMkfXw%@Xr8(ppcB1@Zj%{c3Qni zeGP^$Cey0V2-3g);R~S(6FdJAp;7-#dE?g5{C88*e$#%9E{*!e_iCpm zSLIo4xBCyx`9Xf1Gc~5K=g*qXKe-xj*bUw+`;@w69n(WH$nZ@K#F4UbnYjo$cQ7*U z6Y1-Z8T?4wR(pk_GG&e0l8++DwJl~_T#h2%AC#bC_ZNOST{V;HE4z*#U}6#9-JFeq z<{|?is0`;Q{pHMe6&oRmmdHJg6XihIeMifu4KqLXZ^B_6bK$NUVkuuzHy~5;e&@Tf zN_&DmqaX_c%g{A7nJ|L^<#T8vMyJv& z^oY*zwmx5wtciSBLlcYmWuU$hvw!zvMK@vzBRBo^ob5y1bC$oD^H7^>=Aw$TYO#|rp&(v}X-AnK1Px*hB*Y=9YsEH!waHg zdQv()OK+7_mGqY*>_~O^c_=UKXSe&b>CVo~n76>Y%u|mVBPtGXLLGq$IARu-5V{qN2OG>-&q%+}m-Ged^lYd%Z zIvpEEt$^;n@%Y^RVy$(dwe_QYrTnL6L75uSbZqpk0VGwy!@5V$hnwPc&@jk64scxQ zJ(xWP+GjZM=yaSI6P$bzRw7%l#jgXa_BeKk=FIYked5Oba*$E}@nBh9KJbd1WWPE3#Q*ZM%WGgsdqAu0wO!@Gr-m? zVaLHjIBq@7AUEsz9_MzNuIp@ECcf5ko0!EjRyp~U_?5TLc{8|is4*4hKMy*ZWRivQ z)dqR9^lQdbK{;6aMO5QH=Ftxu2|A#>34q{QzjoL$(;rEzOf?imH%>7IkCrK+RW<&50(Y&({{Y82%oqqn`a$<(HEki0??cd z7yt#C7a+dHMZS|0YKr=8e~; z3U~#21vv*J#TlMQ^jzuh~KLLXUVn_}~)0p`Uqq0@#6`nMiIojA6 zaLxpsiz&KD^k;{B+?WzA$G;b`7H3RH|2G{1Y?;CI%&4NU!n151BJ-$^^Bc}ka#5Vt z>5LT^cj_O=hZtHahrkZURx1AKLb>D``I*E!0@Dn7dtDSJLICtLNi<>uKh)H$*8z}! z#hO;+62x%(;zREru+T-$wO;nW%T>SbVSy))sQ*T`X#%Ec2^8t z;f;J{aN*GBs!7gns9mg%iYDH)G&9bth8?o2g&F@LzpY4TJ!+YJSX~My7Zez9pk^#+}vADQN1Ta~@`ZE0T?(Z5!249l&-N2+wM_DZobiqqcns5wp)Khf%U9aHE zInJ7{BZ(^Ya=RMk4r`<2TLPK%42tetkcQ2eY%veHs`%3%DPtPN+Emb<$AYHJT$HRE zsrv6mNf0%J&NHefYBr)lZ^Atf6_87{P4X7=r^{%ZWW_NtH%<4$WEt7euk5Ynm>ud| zM)mcekQrz&kts0S`416wm@t3xmv-yrk%X+==|X|fZUi{uB zV2q}gGK~Zd1w@MDq!eeAncTCIQF@$_4C~Ycg!CFYvwC=^2upX2g59o*u_t33xfw$7 z7!<)ahgTr4I5c-DW*#1?Vu30w6;|#d@SOaNw~ie^KsL)#=*v#St%w$?sE4C)(6Sv zTqC~}xL22HLPiSAV(p!$=i*(zCZ|kxIc$6M`%#N>sg|rC^z&2ObdDUQb%%-Ipzn@6 z(4}sC8ok25ax95!A^CRYb!IMs#NeXUp8~bSf=N8E&aU^zss`mvOV;Sp76yT_wVu=;T43P3L$xzLxc%k4cNtTwj_9=A_*Ajl%V`3o(L zKQ_5!^{+`#Ehd&K&$4A7BX7O@BgDa*80oy%L+m_4nB93JmEIAN+HFsxM44R=$ZK8E z47z|!JA^WQC?i-ijEzj^|X4lS=OrR#I0JM=C5A;!PY!%f;yQT_Wv4|)$L ziUPl4>JHd0Gwj>O>?qe$IG)?L|1^d<76Ee8I?6KvZAjB2oJ(eeZ;cQ6#%QY!dq{q> zqs2w`))p+}sMz|*<1dSIvdn>)>TpK?UIhyh{eKi#&q^N?4++zsU~7z0jGcP#QR`!(g9en&s%thp!r5`QJstax_357$ond`_4Inl4liUz59!Fd$6c$q=Ql!t(k0a4vW}uN!*KzQ#Uw4}3B?4<;Hp*>k{n zQdJz&Aa#l20e3m~?v4G997_dhcF1#D@~vC0A@;#(c6(9h-oukCOw@RjkZ7K|ZM!DB z)okdo?9I0s94~EbPZm$-JhqO7FO%qTmM*pSOa)grsM2Z*wRS8ncL;U>SIfmrpX1Wq z?s|dKprvN1BrmLH;R^Xhpz@O@_{|XEsAHX74vm76$=WU`HUOF@jn8b5TpV`Bfjx2Bt^q`S+IZ<|K4aJ znAnZir9dlNL356!(GON#^>F9m(uY6#Lt99?leV$8Kwc(5FN4REn^*vbDy!|;OLLsVuwqx%7hu@Df~n}ofNV>3Am4%HV;IrASOrd=321yz&H3F! z;JI`c=S*lvN+XHa2|17q{#m_~6kPE1_*JF%o$Bit*F-kj1a0Zfq?m|K9jc>zf$y$P zxFgE7V`2@fVAVjxhW~9SOv^WaO3dsfDs5zj&$d50B^T%N=np*>Fo-_SQE@@M5`<@x z7%NaVa%yiw#eZJrt8N4eg6H!K-4HAST`%4|6k+TQrqRvoIlk|Kpy?Fbo`p#FX z&$A9wZ~QKO4&_6fh9^(~Fd-DVvkG0-%rf4xAs!^9+xAXjV*2XsIM_LJVMyNwJF~CO zj*~j&;=#o|$mLSGI^y|+zT+F%?PD^LQo6p`5qUC|!4ZCm7@#TTsa#GmDE&+j4fjmL z7+?qKx7z>jmqpUe8F!Wd&KzNtR67HuSMZRU8u*f{GD2YH_bBH^kOQv_NToO#@=4c0 zI1WL(h+AGmA zM|YsB4hv|2K4_WfZqzkf;Eo63GlKPms^|;swPmMDqT;aGQFFFkFWistNdLf`E&Oda z1e%M$gRayQOLlYzlMMmB;cA8&hXvn>ZCLa-MQ%*iD*quTXpCp<`3NT9P)xV(!P7)c zL}$fhreNZA(9^zqk)F&%5}dvbb-{yz4=(jX;4$%tP*6Ls=JL{#mLL!HHFtiMfT-0x z#Q@am;7AW@xrX(V1-9Si8|#2DIHRoe1U?UseAIh8s0?%SAUHhR7~d2(I^^xBN>rPI z)^*&qXVBj?`6l}&?BV^Upo+YOpsaNDOHB(=-}CRWl|K}u5_77>Pe1#_q+um}nw96B zV{n^L{fepd@`hL^$S3MfIg-sB|8dDw3?~e(D8ygeDJ?v$-*!DvjWNhGgfTHDIs<8b z%nBjL$=!AueWtT!_j&qmJ+$W^w^1OwtH3yfcz?M!erF{lE031&LDrC9HJ|J`8PpR% zrr49V%DPTNbZVnJ89K(-po&7<;y253I^dOd{a~;^g%lyz;GNZcylg86(3KbCj_~Wm zMVs{R!WCm8c0E~iv9^(zOr-^@4#VML;)t*jLCf&N$0$`5kY~9i;eUeY7LibZl(SUUEIRIwn~c9@*7J7P4K00S^vchM*tNjOL{P!Iq@y)Md*tJ9=@zh%pIH_WrSCFMV!eQEU50quD zU=PDYu?M?fbRzC}EO-M$h@fE9dtycor~w9qgt2Qzx7wYrS2ZQ0m9{p2@IO%KbT>tQ z>)n;7F z6i9_RYcNgo0FAA;+_Qy7BBq3j_|fMKK*QDj+wk?wU>KNJW&P5~?!K{V?^C=mBMq4& z0!2P4sRZu`3fg+2o4m2TrEP8s5G9VRYi|7K^uCs27N?#Ck#FnbJ2y$W9}PgV^S9u; zW2e9ENWnO#6cQSTatnU@an2W3!2@*MwLPPcnHwdp2w)jf2dzB(PGMMZgz?Q|X$wEz zRZ9~f-KZ+BNWxS4FnIop7QzGl$_i+#2w~~F%GkMn8QKW_SP54hMbQDxcpy_U|3I!Rv+B+IW#M0n6Rj6icY?&wj zvcQs@2b{pxhQvW$AxiC@(BS>gIOWPpI1#9_2svsDF9$z@+7~dXN(AHiMVv#mf!JhnK_!pn{B z6du1#uN0Pa^LFnxC|eE{C?&nP2M!%PnVBSK&~dDt_2cdLeNGw*+OSV`$dPukf^RUw zBDvt5d^jYAG)6AS4(O={$+mgS6g>ldB*R#$lA*C=m~Pwcbv~^X^;kW#gvZZ0vtCgWXy@{ed|j z(g^L{_=3FZ)iYk4VWuEAsL_Q|0W%^nC4f2VJ3evkVp?}-B+ctb7oOY^fETQvu{;eO zg}qRl!{w4VS?2HJo)JkL3^DH!#)-4tm4en~B+I*Xwue9uIUckT-1!EmPalF&7Bhg) z_^>ng?2nK}09fYnlEIAoLclSwzW2Fn!1y;S7oKCgXqLY?u2=aOm%E*3C}L!Ez#VS#hA;S`h)a%=ZDYUuJ{X~yxcCr6Q3`^+04#!sNmpc1B< zYutn&_8vq_6Xz2~d?-(zD^d#oZA?IE*W<@&cvm1oOnMR4QjJzssflK!M-lBBnv3Cn zQ4y9hx4~o=31~Cf1)Ynz;cCz1Hfsjwv}_H#mfIY8n7KTY5(KpTqJO9_p!}Ry)brb5 zcODyt@ZT}}NIDLS&=EJ}h#y3UbP><1{UFp|!A-}5#*CTTYMT6Zl>CBB%l zrk54Rl&TvO9+fRnJD@yg^xBHjc`~9>mq-joV?QBj4esjA!~Od@{F&Bgf=y*A3FrRB zNSkE#FdMcr(3w>sn^QU?cCXTMm&nF_@MEle$dCMWWxBtxj1j&?6V7UezcJW^l_-^@CCCqdZZu0>c3FjCmijYEf!Ybk)H)A&7s~ClRc=VRDn*yzzaQwfxx)MJHZdmw!;9|AMuM+>{k=)v zZ6_hEX%QNR?ohF%>8z{anVp6zRi1x7@|^er>^)cuoPp+ig*WMrb@o;qyp++V?REZr z;LJ`d|FPv+5FS2uB~|gH$AbEvGZNpsV8a&wpM9UF_1B8V59Z!W1-VY4`6V#$SerhFap#M{2@)dehh4P@`Y5 zC+%?v2e6r&y>`4e{SP5p`5!{G?*E4nb%2J~YD36;Vh2$R2ba4k0lKXd@*dBYK5(9a zg7};_vHWG&ae^m~$~K}j7-R>yXqypSt1MWkbM=TAmFq%M?6|qMHWOh$ zBtSpIJ4itQMGuV~2yqz8V)q200GsfW?-hT&*VCDRRE>X5B(Pa9g7@zc;@QD2O2XZNjT`M?Wy;xGe?Z82SDaAcQ@Z=6p4-_cv*5%^!lToy3vN@e6w^#tO4G03j zleW-QAB*fk<|x3=0;ggV(Z4Z;=u-v${dfW6DuGSh{G}hywEtjTIqvKS3t46u9s|f8 z$@i2Fe0lY|7OM&YDv;nb0mq@G3<*@kFMEAVGLoVdi(LUow*#K=J094hlcZwS!{hJ* zRgu#nJ*Toy*J%5EnAz{%gs4K+xh=3heGzx>WVVvHk>aw~r6k}GOO_4}qf6xN{lOnp zgTnsUI=G?HhQ{qnp|Yu*YLNC5)4t2ig#dX2SkY!f^G5j;VJ*MWlYEr|C56IBqS=0m zOFPf<{rCNYb2jv?GS-FGzrW*MAIAjYfbD#I!qE>mfUzSilMuGtn}}+|2B&_f(T+SrP`;4S^srr*FR**|jyFsQ0j$aXN}a?-RW`;IJ(uAH zb`T(=9F*#kZ(HMfQUafqZ%-2r&KQc#KKN!Fd_}itxn!a1z^4^{h>Fwm(3N;cxpIle z08%j@V2|!EBB#QKl`0Y}#Ratt@QGGM&`_cJt0pComt>!*NbdY5?*P{`sMG9}kA((m zx3mnfRWk|eR(8$4#-95X|E`OxTMDWrm4(0IF zenB_264erws93j4JbT=M;r!2)?b%&OJTp~fvH_8&sFYbU6ZfSId#Rq=`9U5-F?5X?neaOW3B?S5pArLsm8zwcG^W zc02S$4*$dkm!De*2+?QJ_?q@Qf_dMF7hGWGm=n_3X?`I$&1KG6aTMhmkH^mrg?v#8}?a8Fr++zpqTxGL!Z3>6U z-Xwjeg-czMt#HG%BLdWuNsG{7wUfrSdY&pb)MM`|gbHH2R)M_ir7Z_uzFHlNYSa)% z%Onmxs)ZpUCFCm3vr|9@hI5GOgGoz@!-4o`{KW#E0$O(vj@!bSI?j^_hqxJSh~FRT zp+(rfl>-gU;M=a>PS^evFfyyZo|949{2C8|NV!lPOY#GU^TlfeA0!@SHIX(M;7lNp zM;!07FtZ6L`*Od{9EZ)X#wXd1YF&)-Gj34X#s6XDPBzX6HW^Qi8DW46nB+TiO@=_V z6D{m@O_4@}{AZwr&`IhF3BAo*Ck6!Pw8Tdh9KnID>L54Ip_U3P#5(wU<9k(hGTsq= z-5?7*;SUe|t)F3iFELa8!TIvCE2jNn>2X&BfclDtg@#Ko@4N7G#Z!&TV7kGfBaph% zxlgN59}(v8>~T(2U#(c%o24^K2S>+HNea=%_I0PfRN0(JAN*Q@@33g$4m+n$wgOtn9y)(c6Q+SJl8 zE`{%9LKTW z2rv$r1B|h4-5DyHipW~B;=5Le)9KcMf4@vg^g58G)Yr)lRm;*@d}yoN>RZ#vz!I&Jk z$%}sSC%e-iUr3E@G)F_oQY!8NKwy#fLB(6>-lW#%kEZ&E7_9W9j6%evSF*hA9AwFz z&g0jDa1w|gX`)KsJ5Uv(taTk$+0miw0HO|%&HAvvnxNwpCg(H->p9J#<1k}mPlHZ1 z%~y0df39Pv^~IB^=6Y&H#lk%;%Qr@|~%7+>13S|>2;K%qqs8vR>eq9ZoI zOB}K5LG7Eu0-{AyS0F!dc^g1-oHuQn#YW=`VqG-{if&6CRR{3m?+R*0oAt>S-YL5= zn)r$HJ9DnL4{x+BxTyz?l&3fr5Nod`#Eq1VeQQuSs z-nje^lXN|sMT{~CwP&B)P7m5HWhy4C%)r-W+BxVo56o|NXdDbh=i);vxEz6Tw*T?2q_Q$5SQFGh;|{MA;`W^7sffi#L*&(r*i{0r&`WP za;rDR0(jtBn)2;PhC&?8ur@zUsBUyhbTAMYuZKdi^8?dQ>3w*ExzS!UwM}pPt4#OZ ztj*kqA5m$9GCDO1<*ONeM3|~(XDg$@KQBuFhkRH70Y9raqWIh>JJiQrK889ZD(5rX z8nh6+E>|_vhMu}ynQYyw9-MNXi-2FzPK1mWi1(wQ$3A(;AsaJjha0T0EDC|V8BnDS z(a~xrM}a+1`M0pxDMrBOu1hZ9DOX^r8Mh;9q(HWun$k;(i~K@9Gos+`Q|PA#`0^Eo zI*^aoBht)bB`&pN38xdGJ%0tmkwF+egefL$?Sc0^awb-~FBcoYiRLg;0@8pf@%85L z!C$(<2l(%ZKWF=2L6*AQ$T#D1>L&Ph7RPqacEk`6)kt%&ENF+mceR-bT|~R_9QNCh z4OXPmXmXPC&Ici#n9^|##gznpr3u1V&%=LDF~=er0XHZ2lchs2Egjfb!~Iv~iDOfB znPiF4pI=ocq~w~^crxrpTT}ha>-0=&lo_CGF(RHPshF~}lsL6YnNtcXSOk^?2dW+~ z>r-HK*AB1gwPu-Tl+mV-sygeBx<_&*JZ zokG;c=-r6zo$YJ^k$_JY0_1D`1fe75p z%*Xd(M$ADAQgtk|0RPGjXYer6%J&`I?RqleAzM_!=k>dHM;$U@F*~lT%fRZ_2X>S& zrcaRDxtecy?+JC$kJ1;yffe`^_n8%m8~Yutm1HmZMv9eZ7_h|qZ)bwkGAte$WM9#= zUT#g29DT}|LB0<6&XV3#t%-1QXdw#J#iJx?2xdLa&fnNA=>KHny19_Cw3>?C+6!Rj)dv z@U2R)ts6l%sg<~NVG4;OV}kZ1=$5#P?9S)9ZcKQl?<~n12^av&KifWXj{KdaqVJwZ z&IG}qeDTH_uwDLPqnfNO8(MwG=z6^yJ_i)cFQiTnkerOZjIFR=ii1`?#d`@9n+r== zKI6*rfRm2Ov~2Ciz_H!~690J-q zd-radQ2-Ngva~!(Em~~~`=ZB1b~bP_hA)}ClEL9?m+RMTwYgrgbBe(9K9ss{JV_Pr zvgqpa5hlo|YIQ5Q(f;KH{ZCcEL5`itj7~eQ(Kg04@|l87L60d?XqJAqO}`L8th1Sh4K=dqx9v{oJ!MupLxewY_Ut z9f73IP14jr{5X(RH=838PrPa)x37lEd0Bcy*4{k(0uf7&xTqh67|q^iWMB2Y#?H@4 z<>zy%RucWEeikspEP5XQ8cFk1YI-tkC@-G{ZS1ST<4IEB~6-`^l(Oe=h(>6 z*;QG!#tKyf84>KVlO!W0tssoa-;^sC;;1MVe2tWfM$j(|#%uL+Oa1#hrf)~f zn2hLs^%x3zn!j9s~fyl6|7h=W^~saZg8h(8J> zsZT2gB5k4HXT-f7_5&jS8D-@)LqT8Bw+hsw9?vhe;kww;b%jCRh+{%@C&v@9!%@id z1li`cdr6ee*MZNV8Gm#%KE#LAhoWF|;)wUCl~n2av5}gJP?FMvt$< ziK;4|B*AsC!7!3=w!1yOAZ?R8Itfee*X zb)aQQ?syf)Pjq?R-mpGyC*_**j3o?)!1s#j&;JViKZ|mU*ed6nv&QOvF`G+ZLiR*TU%PeRQIU(8Zoh+Ph~CBS{ge!W%esyjz^o!0 z=+BP+)J+t(UZK z_$j#lIr$eg@kfYAG6ms%@GBuMuD1&K=8JZ`RQ~wOgd*O&4ygI-2n7+$oiEU`*Czq0 zXW^8dKF{YLN|r7IDRkp8D?(&JJfWl~X+8a$vnGTcxU)?vM&Pf%W#V%wOLjS7zyFn+ zLl}DDy5Zl4$zcp`KFu3?%jh-!Ec?9R#&_p8=I-YsaBY9MjUC*|F9sXaFj06{fCy^~ z!xXaG@6VYp%V(;bZcQl19Vm&4j#hFsC} z9Vh?dB*l#B-qF%gD{U(=cbql(!AGS=tY&czA=Ldu1O0?B{+XsVT467L#*K>3RPFkR zh5aucof=2GlfM@lMsOLh7gHmHjn8zCT$s5JB(Sw=VuPDlPq@uRxSi6Hd9kISnGUZx z(jdU4kgZ>3oYeBUs;ZQDXnj7n&b!{Sv+hl;vrMv*c~#piF3@~g`s0O-Kb^f>aEC@n zGoI~@dOtV)DkULPN_lW^=U;OTHcqmzT}x=h6pe8Qf*y@OFew@QPMljENz{j>Wwg~# zprrikvV)+x4?161KfHg(2oN5D2Wsy*2xA=1UaiE)eq&p%8N_cS`g7V+?o8IL3rsZumA;pxP? zkSNW23@vuf4RzofYtXqyl)8C=x-ykMj5Zsa%|O#l^jFwmV7Tf8ah@8`bu%hA;?UBN7JQx*XEo!K>O+;eB-K2^w7B z9&OxJRft!S*b^&dTCCLAykufRTyJZF~k>!hOvh#-yb>+dkC zBOoe(njxm}i$5+fg3(z8Gus^J`@U>YS(lNA)qer!^(@EZ$kvsF_TzrL!lZ`NgG*5O z8Sw3g;+FE3q>Bh^BADZ&GVAYf`9Atl9Y3{=4>v~cZyM9lG=1OiS+`7u_45z$Dy}<` z!39!5dJ=l^D=O%%^55?h9AFE1y!=lBYHiZyK6=ccf>YaZcMy^SKFHme(>fy*-9(3n z@>p)S{d|{JdM#U4G>Fknc=%{h0e+H%iGQ|o$Z_Au{a_xMhF&TMxPJjB978i1jduyH zA+Ow*3tI?R_e0>>KaK1WNs1z!dih899F<=|QC~?Cjx}MTJA?970m=KUU6vfV$YkEO zO?OwW_9;_=Rb``+v^ZVXTFNOqYk>AU z*+9n1EPgyr&0^ox>JzLkYQDS^3rcICp3?HBQu3$x2Fe5w&w#RIU|(doG$|Wk7i0tc z^wi5E0Uu1|=&o){_`6u+dwVlX+&vWAAVC30P}gSQ7|8;NLRLJqvxI>5?Z5`vL_l#F zfb>edbXSeyjgz7}J1_yWct0{|jqF~ZY53TsGFB^1T`uqIg$a>toSI^Ad!*pv7X^MNyTa%IEI72_47 zTH^xt)8A#e2a4@j*1zvG5%e_c?Rk)>~7kgE$;=F)c^R?JqBSJsJNn zzm!P7d+VePxl1*|j0Wt&pwCK)@gfAYlwnkZjusHl$4L26u+5nu^`NFJPLsD1Zc<_- zI(j~Mm9?5F6)qtc;mjx$9 zL(t%1#)Y#FNrD^p6x%b>HB0tR2(u<0qA_O{a93@x1d?Bl1!x^lnq>?H(N&ZlUb$hYsHTiDB4Nt9TGbV1DqVqdu6pTts^Z(n0&g`!hVgQNDWs(p!Gi9(|Jz56|92nN z@&DUL^|FItpErN0bfDfRkN=zPpsboyFuxCQVa8bo>ikP#d)_VY^2&JKxhd-2eIhRGZT_T06_N9b<{;*TTV~(CE`8oOq!Z(}9lZ(=ietXsMP|A3cJ2l7 zbpg4)yk1#%23g}`mGV*+sSx#j#)K05tKfNa-M8lfX3@7 zF|)dpjRWgv)%txyr)7P?NDp@Wa zf{f^e7b_dT&)w70W$G|}TDC{pv2l)?kf8p6G9tI3z5Y?sWv$)l(qrp+@A*?)EjLCp zse&%d0clfsHP(;W^}Bu*Dig_t^52z^PY?ah-49?4Yh%Ehlx;(czv1+HQX-7N?!ssqbJeo#tUFq?(2(p?Dt+G=G`(RsFi$P{mdeQ5e zYPT0wuPFwWkO;TFZNkwS7%vbBj}*~OX=e??Y?>$#P-7qeW$NL$@^Wx-F=v=k@duY> z30g>Q(b8v@Fx*Su`C_9I0MQF-^3+*4=g{ZKfRVOx$VhtTlZOT*^(U(0R8Vi{3OBWU zq)A+=h2G}ZPu~U#qpPhlY2KdVj>KB5NnM|}W*gAww4QSf-|H{m=Qc(K3Tji)k7m%3 zsgBavIXIGArJ|N72V@M5OEs^)o<5u`^_Usea<#F+MGC)-@cY$(Oob#D!`N-l;YHG< z`ihf@o8V(=VaT}5T;xNNv#|b_Q z23*SwF8hO7hmM){_h6v*c~EF(TRBZ}{2b;KLw0855xNpAf*2CAMy>AN;x$Dv+#~0I z^SbBGgzp!dPn)VUw3SAL3<2E#yYPI91|TPvLhn_EFimZH)FQgQ)emmA_|w(`caX4< zUIDh**R$R*IB0BobTnx3k2}nPt$jOU0G{%22^tg;Kvl7@hH13H1NT}_(q;U*|Es`Kg#yAPz*8J0 z0d=#!D9g0#6?mD7ypYCvP{pEfny*Il1pstSGnj|RA|b5cS|K)e>7hZA;1tI5)$>Ol z=QKzt@r4t&rr8n6ltq)8g_HB1X0T?IP44~+7vlhrqJ?+oUj9|qpPPy{E5dGyg!(JZ zF)l+KwZ7X2qgf*%>8B zo`g{sM}YAtko)*S4ZKgonq=|Eh@eN)7G4Y_k4t79T$d9s#Tx;uHI*cyBe?TVl-7|F zeOvfT+F}%s_&!p1ZCgq_+8dJ(j|h;9Q`iFKjP-<)zZt{u0r-O=mC+;4Jc@FRBDgTa0bB9NdKo)Hjr$n=}CdxlOlU?RE8)U7Q(*)4~G5 z8|RH0ntdL7Ql7~UeT&g9Eg7Hg=kFIEW0g;^V#ZmcD7%ur!fw-BHUj32}5#Vv*{FNjx7|DC@X>9C{#y~~w@mkZW; z8FQM*2S73WkEtxchYc;$QkulkRPIvrAx{fYWRtKX#1uH1@UAHeQkg{S0`P>SlxWya zDAKS-`FngM#++I4obAbR!90Y=9dPJ6)3X~0ZY3KEcH#VlcfPUMb<;`;A97_5*H4nH zfsj*CrbNe#j3-!dhBLh6_1o?NZyFEs2MFf-6DTX^Z#X6EwqiGgw2KH4LsdYsU~?aD zj(ECKn9ClpA z>v~5BLd(u7|NF7I1Q2Pq)05aR0uZWnu^oFtX+76AXP58_D|OE99)ow!`HBfh)B~gz zAQG-m#~*Eng%_E%fQByOhTAGBY+F6!1+y;W%_o&Jqum^3_x~|AL`>p#m6crhcgzn-hSE5f)b?AF6^b( z0V&klvnU5K^X4Lh*m!r$9U^_S=Oqxh3sxoHC%=a<|Sh_NA5 zg^2Xb)$yban4<|NA8cKS4vx%o+C<*}>SjpnFOV_%yfJM3k}d`Paj(qpt>!|50~%ir zREe#lsodu|x|PxMai3;l`2phb5R&IjRu?jllM z$R`c5gI`k5!_E-|Ryk15^OdZeyo=HaVqO^Q(;IBdyyK7Q*}AB7)~irrY2mk_Vj>bF z_a?#dlk);l*rK|z?uF%xYgnk7ZT}q&;8H3>^7NNG=jcEdpgN$u&NJGU!Y77@Y%TOE z)tUko%b!8z9T4F<6Muh!Q~fzRV(w`0R02yE$}GzM3KvwBVyK8Xo}^jxq|nMNJq5?Yjwd~B-}9KLW9B;bkwh0B$F zS9bBt569}|vjo*z$8OEg2EcXFf5*TbuBBu(V;?WZ$+Ws8r>XqWFs!xh`-5nO4gf6M z$;3ta_D{47vSXCuZJA(%ms9%8udrr7MW?vY$V3eU`7YGm#(tS_aV7h!e;f9^P3lwc zTwdu*wbX{v%|yz6T_cDtp5g=6bnhl*ekeC^{YPBM8P3Q%pr;^9N(>Z{XG} z?zDWXJ^^aosJy}ZUHMQ9P@tMt2wB&lv_5`C4HrHmS;*xTP?vRxRT&*go`lDB9uHWJ znw}*Ue7w19C^MEfoTeXZ022IUpBO{z`=RZ09hb)L@XrPb+DwT*)Q<07E8P|~t_-u& zz@YjG?!BH%&Fwa?HkBEeY_;p_zUC$#s0Rsf1xQ~<-mjr931AVVoqQyHR|@UGq=X$4 zE>yx9RH=Qe6_5AZhw*k>(6~6qf>g|xNLi|#vk)Dub;v%wF*{uk|E{Y-38y6^*UHW{ zqDb1yhX=itKFjaa5sx)A-x*!rQ;P6&jZW`!Vv(AG(S>vUl;#I(^oLQUcH>Hi>KuiO zX4ER8Z{I6TVbnz6Se=xK>N66(n9p1tXfwW#KYhs(k(a z5-I?|q{FsT{T?+g5WfJHFuuu=J$VHqhT!WiyLUSyu1yNxD>QcBbo=K*Rsx1#c{aeF zT9V__hR9m`iQHl4{aw$8VW@`e_#}OaijDFaUXo_OV4Ih`V45DF@k*qPLYMX$ev0^v z`%i>jp*f8hO9I!oj56nCL@FD=^;Lh9w1NLlGeJnX0SadDt$}=OG9h;C;zXoOmLTX^ zD87#$q}SD%Ybkcdra8DUNU`%nTiEX5Lt&6$CBOjwSN(|J3w$TWM_g_jvz*V!;YqRc zv70&PFc6L?dBA=#*5&w^iqI5J)bX)r7O&(Nj!Iq zu%7ZK(@n3+1xi`G#MrcKkYnq-{mwdeHObuoT`VHXmO+FxphL00o|zIaz4dFOcYL?9 z38mxv&%BH*jOdBxLlm_b^@g)$Mg8TLC^VR}&4!#g)rDe4gK`!@7ycAHu?)Pd1l(os zAXaIQ@#=loh^!T_jF$RIhA-^Qf04_hP>Zk6fL!+;o~cwSW(IjPLk4H)Q6V)LF;I$preNo0(Yjl-b-ZNns_f%nwWbU)7Wlr@Zg2f&C`_a#{wKbIGOh-%I_TKN(6x)T)( zXe7o})`pSlVb_ME|IIKF!kQ7U`MqOGSu<~=cr4v`dRWTZyQk*XHNy|C?3aLV_Vrb( zvaQ7UJLZ5r8St>mJUvINg&#StnVq}rVZs!jp$lTf2OT%&aHB;TFTF+|!TD;7V7NQ& z-zK_!2cbj`{ND9}T7M0-PNuwf_CHVpFj2rcY`)yQcVmg7VJtj<4r# z?*ja{l@yg&QvJg0pe_?b;j$l|Wim|7k?rf`AthGPLVrCJ0{x>^mV7`xzHMLaXCq9k ze|LMH1SpiZL6yrNCwHod9d+IinZq!4b5okZxNcS1RM={pC@%G%(*y(HzGxH+tTgE_ zeczMN(jI=}G*gbUUR{}@_{~tD$%O&I==+1oPQxhG@XMARFy@If@A*CD$da#yGHc*f zckhG3WvnAgE3>)^!I?dD^ptdUtNNdDn;Z@yjo4evjWz!115#4R`E8PwEN#)8p)Zbw&G%?67wH5u8(y>Uh0Cj}m%nv~4dG350pJrNI zCuOUatRm_r*J**!+1*~#suhV;5ECiE9zdmMqLwTPYDS{Vfmh5C{W61oKpbQceC8s>k z*_c5wUiBH%PalOU1z4J`{qbvB(~5D74mN)gmIf1kg{{~_GyHi{UfdS{Dz+dzP#upe zZqB7KB4zc&SY)=;O_FULz;A$-&qi;s@j-vNl^CoCVPZc?BXE z#Tl%4S8g}E)hr+s|gkk7Gr5s_&_Q z6ax?G+nawZn4Xv!8CjnCVT6idMcz4{;J@FJ&UA0WrQm7G7z__484Wsz?(_rZUyJtD zr|H*YSD<3$tD#;vBWStecsPb>D_1#!QX}53emKg`ph#4#2^Xw|1{>;9i&<_ZZ*q&<-$w?Ilf`+H2@Dya^F6Qv5b{i{N-k~3=@po@GO z5bnF!3X9faf8#8A8dT3n1c0+jMw zQV>py5rQ8^3L|*SUUj?PTEW{&w?bmuuWBK3)zFYlTnej&$_djQo%Tv%x08X5s+v%KlQ{jq8a0qW3RV{uV~)t6cL2w==@00i!o%!R&j{V~j9h=u5aqr`b|VgjF1rRs4`4>U_mLJXuq=4Fy==J|cnys%zqCJN~F4H{GSa=d;QWK+zPlJ8SrTEJ9BmsZ2Ax z6ifYTqQdh&fF@vWR|0C8%A9%0S_;xWQZ|Wzwpd4~gRS+qnd=rb9Bb!~Xu?g6QFr6N zgt_(<_PSi@$h9;%@DWgBh25x#Ab)&Q5d(!*?9s63);t7y<3!}nxHP*6 zCUKcR^Fnt*Yw}bgoYgQDEQt7gkE-RQ05>Oge<|TSPVTFvMuKy-WNq_-A{=_lA*6o< zze=L0n^W<(zkDXaUS1A%aCV${WiBd}b zI!bgU%zV*Q9AyfhzzS|Qxa(f>ko(xVx_c-_*?Ky(2Dj`PnKar& za+S#S-aY@0D5izZOabiF;}9EmOdop}3e(+_Wft2Hc+d?x`hmeeMq9c?&a_E)+mQr#0vwF|`t3alcJN z3J+*Bk9;azX`rCNL*LpjyrhF&f6?KcAb1)?-ETZ5rCBfA`|SjdVqe3fW21{^CQll+ z^q9X?R%}2DB)+9*Lsm3-6SRVi&dZ6lgONg&KRn1^Myzw%X}_{ktFMLs%OObQAmGwe zBdp5JsZ>H1V`KwIGWQyIg^T3!D+WIv7Z%JM>x%=D_t2!Qx*=&cz48vtO5+>LW1bjX zrpDXcHhO+K0Vg5|QaNPVxz5ioAFw4x6qMIJ1PKlU+&i0;YQ2)c zh#cc^-!Awq#l2aH)=9M%q@*Oj(9|8U{+cL|P|)Yqxo|ld9o7Gm`PO-HZgw$bBV8Sv z4q8c6!i&BPMjU1|#~_c}yRDwi0#^K7GgCtcGM^#$^!LOKlqyskYX9my7ISv>-`&4B zRFf@il|qQeqiUHJC~L3`H*@2agvD2>Of73qhdX@wzdIShe}?EB?T33z0xDg#xTJD@ zGyjK$yz{1!Nas-kygB^CwhWW$bVb6+&GNk;kpU$c!HDRHPZ8pqm}k1@!Gyb_I#i@m zW1(w%zhtiV!T|C7QcOn_U2hDxCoEseNXVmvW~*FJzrZi-@S7HVIoEvefjAeG+)s}o z0g%%{>%bvr4!aB#(p!~izAqCVzW)+~^CI`3iV9N0TQSZOTI#y#Q{m{|l?L7=_jWov zfL?P(F;B5e0i@h{p0$O{cO->Nj^I~xwiyC4*v}VrpLpA^Mdey{&%O($<-%34Coybt zF+%0bXciXI))jp6J2Jo(o@MFd;@&rqc9de)E1)nwjc{31m1LM556&rON{MJJvAeW!D@(NEzmEfSFB^2+?aR zO9$AHUVdg!9p&@oPThM@(+`$TI@G~JnlHe8nCDJxm}GcKTx|%p{5FO1W)03?lq5x$ zN5A%J#DxR3otQl5sq|eC0^^pb8|lEKhQSThvDJI_4IU#k=Hc)HF8+C{M4~19%6+v$ zyv=$Bl|9&yS3FPUhKMpz5mTNYLHXY{eVEg}Vm=E`yF;eO^+ZJT@Gfk>q`hk?oB&s8 zk3OVxs6)Lke@cG7W+RLeB9w4PqZt|LOW+!ND8e`(&kJZ+@5?FSxHFp6kGPP9Fz8xK&x4^s7^ml_ zH~1y1Gth+ABNXb_doX#V4m-eD0H-nJ^9(|rBfm--P#bEAD#Zb&Q0!45|Cqnu`tdoW z8}~&58jpPWiRZiG5m|&kq3VI+V#@XAvRn%NhxEt*j1n#+Je*oD4^ClxrKCO8moO#4{+H~{H=-eRF$E#mXBz0Nh@_s_wk3T3gYq?6 z-*a)MvV66Dn-XNnV-U4as6`(sYGA1Gu*`86Cpc z@4si2`d3OU$*< z0g+Liopzj}X2Zu2UQBZqq%V8F7H`g}++Vgm`W5 z!VNP&^;hH6)*IHl-X?eMra~~uBbLT{<2mNz2?iz}4k=+v+q!vLG3o;Cr;DA)hu)jp zmn%gjGf#*leD+VzhRR4YN@}>{%HOSgd*q!UpQwpVZYcRHoAdzXSRLh0bTWrN+**=D zb25#)xPZ@LOmba^{};%V_)Ya56K9P}9jW;m+HY-eP4GW05C9tl@B>)D@6wqakW>cl zaFrxw`^_qsS;y15$Y6dn1sVik>wlv;`*rOZKEaWV5^yjNJ6rv05lQq}`GH_3-KobO zZ>0CiS#5AIlF<;+sg2Sn&Sb2Ifn9odxuUC| ztgV@mgR`tGtZ2QTOz_8)BvL~zb1RS{g_jFcVi<5wLueL+9i8(ouu`ybY7psJ~73!Q0bKGsqTWZX^z5e5>0lYcGq$p7u0<4+3P2Q=$EmDa)&S{4*+B5?{ zJGe;m;~kc}!B(7Ox!?bxPV9TFGmMD(OO9@7!T73vVAkRO)cY6HwW~A9uQ3tY!nk7H zekQdTzINEX!4@RKYA`_uqzSdLIWfxz#TSKYJ>Cz$tg-ts9-Pyk;Ss~_gW=Y^7!;p0aH zxiNkBxb)-(%8_12EF4! z&IObSroWP25A`1)S!v^;wA&J&J#)i;t^RikriRe&3n13wg@^oc);!f1mhk`&)Y+&_`FAo=KAx99r!`|Lsrzeu&{Q4VHovq;{QJ z50Dk0NF~tn&pqj7_YMNb*cNMWY%z=2nNXRA!y9NOY)dkzl+4=eL3hnA5CXFyLVf4)md@XXE@6$K{{rApgnZxu23)AOK;7zhhdvY zBUxEZ<`4D<__x4U3^k5k{e)` zmfXh9M2V3$2R$5vy;&OL9WbH30K-Xrl(nyEh4-@xs7fW{ZV@%#zb_uZUnl|*{^aLM zm8Uo~mwaIQ4^#=GWlyufP?LGN~O#JRyV`?^kj^R69as(q`bd*!(|M4jPKO@N_()ln)2o|g-d*}wppzrdv zuCDAiH+0BdUeCBHB-`BLYxeFNC18sI3uwBje{MY-Wh|?~VO4*sPA8NG z-sPfKOf$GYZRQES+CvXXVV!e-nP&2OUkfC#;05k~TSd1BDl&#PQc!XW0s)I30PC|N z^q+)3STH82HVeyu9$m)mA3lgtIs#;`UCxkGAD+@I+kYlhyk`3)09FsHrIr%fRJGUZ zDQhp~uI0g1qd0FQ?wjt|%K(N8!uW~H)@B@SZ`If3C(Kdftv9yr?~k0~S+;k3Rf0n=bbr=`VgjvY^V>bod+k?t~o``{~?WXn-!5;s@ zs{gIn1No6#xiJPD5AG~U<&{DpFsomK3FL(Dc|MRP;jCCtq_h475oxU>6W0dC`)QkL zGrmnd5Yeu%{D8PY;b} z?YP$=Q`HPz!@;fKV{cN4cE%X;+kJ*1n5qWyK3+trv;2klOi77ZC43skk0?skg-Oi* zH7w?ho=*M4B;{z73P#?y-xejdyJ2ky%{Yi`<@^L6p-fb^Ycug zn2gN5*YCoB7O?Prp2S5Y@BEC(05emaK}|v$e@AZey9RuS>0>XZ?LwrkE-u-vJo|P9ZRa{>lR~optb$?ks|IO^0d~_7Ye@QFwmz&$PRQ%EwEhf$1i`z zOh{5UcBbUt0XQHr_a%|^qX2Nb`w#EAD~AY0kF*nl;GfTuN@8(x+b>~cJ+)wjPBr7y?jteZ6mW#RiCdP>#dC+?LtgWAp+>+kmP9|s};eFNks-+p< ziRxUmDMr0xJ+7Ed#-)tV7Sj4_-xuQ)eS3UjK~uo#cAi=*-MAc*S4d6w9(&gadJ6k0 zplwh)PgKiD7GHUaR+kKve$D>m(+JUPw_r;h;HLS2{tn!yMk}v36~x6<5_4sQ7PD)Q ze#_em<=l_Va*x+9F1YS@4bH_^oo|k%N}RdeQQ3vVx!kFa9K{&#;LVD=9?OY)D*q!K zcxx(mgKo3F7si3M-&{mdTF(JLysRKjg_9%Lp>AP%5gHQ!nGEkDn`d7ma18vk@bM`5 zMX~>8N`8nE_RiG#fBvV*t@j*BlxDet(GShcsUH^}+I71-JBoPS=YD>EN*%U=I)vov zLH{*Y{zqqhhJ*!rL=jihrp&vdX*jztVMSRgPa^@9_gDWMD&P@6^IjdD!4UE9Wl*MBYMI1* zxFE3Ey2W@cvT*ou#~H>mH^mNAkN25k^1z^4#Q-H(sE9%Db#l80ppZQ=&$F7m;0piL z>2)HVO~hj==Yu{Z>VyiY*L!t&;Kd^0yQCVovy)PbU!OgglL(GBOq+#%*TMYbPQ3~? zMWH}B!O-;FF5vtPKFOsoO6+rk*|sP4mpQ#9UamYM$WL8|pRgl<`wIWwxSRnn*-gMH zHFjFTfGCY95bbO;mH<$`m{TiiRjWSAsll;42Ha)R5Vp16ws^xMH^&Rz7LH0(= zGSX=pHW0qIKNdFB3)sW^OH$a05~k(FE}b+E-8z06Fsci&RyA)4 zY?ZNuk_k}qm8BCKEkw6lUXVhG9EWuFqt$6zHHHDqMxyO5@YMSZqonfO5sHuakAWy>@V={ay>! znda?CRRd$Qgzt46SdvhD2v7c>FdJ(2gH7d@g-~srd#;WVcAz^MY_LChec^{m+ z+SVYLiy2Ehhvu-=;fRO{NxT+w-o`leWt{3Q8^Zn6fC*lysJeo_=D@H@x)kH(ht^k3 zgqSmSn0v>VMI}T^Sqj9_z;}HKG|L_Qfdt9Ei1IIqN0@~(Rm0W-1b_60+A9i_&%>E* z({IcMjG`ToM~j#aR}ASI;>~s@Wv@;v9~*S*SY$f#&{HarM&Ji%%H6l#&q*B5 z=gtkfsRZ6#VcIm>$HI`FsT#!j6#E?o4yA zv=;*8umMfnB+dyxpDMVu;}5t#e(%~s;P%Sw5UL9J;`;Bp4pRvot?7k=v`#_3yU(u+ z_RDvYp_`XzTD1xzqQ~SACJaBQK9N4+Id??9rn89;0bUf|G?{MN)h6{`JPpdYd|=RD zBr1C$e=*;7VX1lK6rC(_$|TiHp=nF!zsmq^ci*0BS-jt(VA*4IqV~6mH9vr!?swyU zOd86F>or^)q%JQ>iP#U=Sa^7-iz@s_Nq)WfO6wBB!aD30mI551xmEmZ)cVZVgJfz(m zf1R<=TfbNDj6D0N0w?kc{S5`=h@R#M-LJR2No6AWx)%`>OE4Xz@Rqp=lhOjWc^MXc z-@V7OBCeUsd@Co0&H968{_YrM+itM^6_ zni;mE`+QDfrp0ZhebvSSHhbOivrxmAE6dy$#@-4A==qql(LLQ8?WRjTm7K0};eZZICQ^6NawoFrC@EbFHf` zxUAw%>;QbH@0=JYtI~Xe+v-FV-%iAe;DTm7FMQY}CVYckM|+9ZNFtubl?1ORq$$3^ z;(qMW>)6jS%)iJZ4doeietgEivML&l^|(_={sUHe@R_iaa0zExVj6|jw2kVeWB(Yy z$>q_rTD;@>JcOxLE>Id-yS;@doga>e@HRFi)=6m>fZ zFFs@TX*|kKg0fRY)z(f`^DzJgeij|qHIw{9U>sia{sA9i+8Kq;#n$Ai%3_-pfC6>S zDgum*MQ-F74Vyrj{S~5T8I*2xcap}RYD;3~!35T?M#xae|$93Nvolwq%@=$Ee(HXv&8f{4M5TC#^&lc%PXXzvwZT#cKZhJ*t1sIWXr1y(R*G7F0uj zlH=cXXL)IqT7ErYX8T^rl-S!bthBJG;q>b>^N`02><&p^;NzE1oIgxLIE$XA^uUxR zKd~w9;l{|j_FG$}KC`+!e$o}RZOeZ>Lokun3-ZB;##_HA*O(*(yh!1nz3Z|E&g0^i zF1HF5croqoh}cHY6h{LRpC%`p9AcFJNX%hWo=%Dr=70tyblWTh$BSrK*$H$j*2DU8 zXIO}z-d4)9EW~pY0=?1klzD3G^m@e6R+kv;WPYrZ4aB@bELr7lX~Y=4^iYCd6|pRg z(o-+Jf(2XH{gqgl^}WwnBk8Bj*?Z!iOhxzBNK$e|%=FMS*^W7}im*?|M|Ffz!^_SH zq3xTCm9-E?Tn{g3m}_-lu%Hd>f}F6Y;M6qINS)Qb##Y3?OJ6ak=lfodiKKI*_!V4h z-U-WPKJPtKgo>+=?x8qIVJ`*Kjfj#BV(3&{SUZ)Md?971iX{Lrq0)Es?d)MgRM@PB znDor)0YukJqPeoaOf4)XRw*ATK_&(;_I%v1|2-prh{MwLSq4=vv)JR1I!b*Ed_PX( zgNb0cVgFzk0%0iT86S#9Odzmn_c#TA$zjApeJs6To`0~Nl&lj=LQ4kAtnH87bU-5Xu9E{T@Ct_ub88D!zn?KPUtu@~ zl>-qhR!bW>U5w@o65>w>y^);2L1F8W(v6aQjkD`ArBXGQ72dMBLVqVk7j7-jm_ttT z&TS3gxF;n8rFZI~PycGa{qUlA+*Po6#?B{9D4)H*TVsd$;fM~#EquiX+%Q5VUfR#9 zM%Se<9}ikegC>p}dJeLxRg76DBs{AN2!c^~%FIh0hQunZRJqkldxr$Q&p)& zC;4wyZ8s`l&Smn&Uqmi;?ULu5ke%a{;FB)*T{xZpkEgc`h%(;3wukQS9$@H3x|^Xx zK#_)_q(f;KIwXcJ>6GpeB&3HF1f)BZ?uLi+p7(#g-=Fure%D@W?HLJ<1#44x{#s*- z%6nnk)ENPPvt9zjoO*-YC?ECD#MIDd--yT3lY`Bp-^k2MVyLRdKccs6*ZjnGEiQ9P z11As8nW9S6%5i-`o7RQ_CE9@qR^~-jmRZ-_sQZIHWh|Z>OAg5YZmH61DR~tc*GPaW z=slWfb!t5Hs{cB%?<6(mqLxZfMFsEdAK{!qh<+sLGbMmrY6Ji+n^m#P;bgYE*NB0h zq~z?;UV~T{<^r5PY1NfdVT$$+WzeziOE=tkEyQ8s-`7{z%Rp}Q#Sx;aOOOxZEvY~< z7cj+1K)84P<3L*_og3fDvhP*EAS)FnJrxH1JH6smR|WnK8i?nRm0AtO%+aB_#^PU; zj%!K%E6BfTCb!!r)t3of`b?=D3iHP8X(M~otP-R>(Yc(Yd7qmkTrj?Pd)Z-rHKT9r zO4%-s_{bPFu8MN6=;pr>tn+cB5*8jq(LFL860wI^>%<7%JR}j{s9)!%zkxy>%05Q$ zgHc`=x*9h0O{W%Z*oA8!80#;dGd(eohlPm(;#aZ9a=XNt0hxbIv{w=l@V?5V-Z@Qv zwQ8gLO2!ck2PSVx7Pc}BuwwN4-AvhhV;#MEZ=vwh0w2AQEm0#ZbccU&EpE7~Oe+?v z6Ej61KJ^yTtN!=~vP^_c8(Bg6bT_q}RZOf=O*e2w-}?z)AFBgssN{m0#qZ!<-D38{ z(Yr`fzAlCcyn8;RcH^l`;e|vV;0(^5M=4r-=;D-4XU-{-)8->wt*CXQxE{dMDd>r5 zhkh4pzZg@*$dka{5-SY{P3_{X+8%s?XUq7&s1RU$4xcQ?(MAFd!|zg252`FPBYi5+ z{9mg@>`a?>!fycZ~xQMdV7!%>d*h7>L}7PLi^2vkhfDgLng z(QD~WmZ{4eQ}JK>9loYvd-a0VV9nya(f&8-Oo%s_zW&-tsySF@+3$KbD!104F*$eT z6TA$6$+r?@ns6eie?#z5=5+#!#;bd81V+pZES<+Akv~jTEu~7Ne!g6m!r7SwD&6}8 zdqAxHQHM);`#g4CQo7BawS8q`&U3ZB17v6u<1Vlo{$c#ww_wXm-jq^UKM=z%+zw4{q?_O-Ewh!ErM!IqQDeTJfYrRK^g9VDWwulgM% z(ML**E?-I^Wv{W&09N#&Em9IrAJ|NV#%O9(*|D>S+4`-maq8YcPTwr`;Im@11RSwT zs>9?0alYHaL*)|aBvZ}|t&+}$`m%~-`|q<>!X8pbKwmf^C96||-&cMR&np3b zs>xzfqmB@u_g(B+k;(1R?RLF{DMe*VAq9m)6hReQxiNv-ce49licnvtYf{mSAZ=6m zSO**YeHK`Ioi@%K+S1LQ(C>(@2}P| zM03scoG+12cjfXiO``T(tG25+!xiRVB0N9nG&fesUSB|PcRK$r1VWlU`gz4Skhrmp z&zbePS62q9)yGH_)C^f%{7Sb**7XP4vVdQLek+Uk-K1eijIR&V--Xklq(dG0(NwUU?iw5{l;-K81J`uAx*~ zGFwv5Z1pyzZWyOSc_B={0svc1M-LzMtIxUU_bKjsKz+BTlP+p*(l`N(ZR+lk z<_E+X4L89O_?5_=iJ3J_TKnm({*y;U8Peg{kHJQ`QNkRq+GoEp^m(f(?`1`cx94cv z87K&P&(#1OApIt#9v~S&xFi)^oW|e~^xiUSna?SsEuxSTw|!{%d?a@ABp2$vSp`09U{oHQD?&03PTEAu_>B45fdWd1K**>pc2#Lr7hPzVW^kP0LnqQlzaNGp<8?OZxg#?wa&E&Nf=Z)+()RHQ3e3%-?#gSa{_F z-0;)Kl-?p=HCql)ehckR{>N#L)abUx36RM{u6Wsr%I-TO?Zlme3X}7$rP_Nx5lH8U zdR+d%pI5Iu|JJB=i%Uy2bSLn8)-l$&w4 zH^o%N;j~Jrh)We?api*|7=1e|8HzcHg zO{{(Uj`i<1*LS`WO-hX3<9I!`7Bm$Urs1(^tHv3?;B zU&>>jIq1}Ut=XZ1%K9X{!`U+Dcx&FPdnxw!d*|RwdH%67Gd2B~7UA3_8_4<$%ciYJ zt4x`>^+Y9FV3YM#E%NcZQ(sbAod&BEdP~OMdMXHp)s^=cPuu~0AddSVkrdtj3l~uXWb`tGzN$;|7cGvot^7(y~P2P)`38K-Wf}|C7XPI(%3RQ8s(DVQ1^keyXO7sfRLZxRid+IE7BvU z?!vOthhyX>`z#62@LL}n@+0MkSc*mD0lQ0$JOX&YuYE6??U;r{Yt1}VJirG5ZfQWz zB>`abm)JsO;SXE$<*z{5#-WNaL+5uY-kb}ZHNSeD8vghMmdQZ;F)5mUNj2`M=1O-e z{IvF1{WwxRg4BEVMh{1{5}v3C6tSeAmxE>}a!3Xm#?(uviemNTWOnFBXC_5D8ir_K zhRthM-K9dQG01$CnjniGIIT|dkbC3h+4m}c^Yj;}CH8xJCaaZ9X?kSvJCZZuo*GRvP}PJ4j!z{msn_EOfB<|&uZ3!+NczLEU5 zA`F5qu64m~HKYvHyZ+wt59G9mB@N9cZYw?kY$OW2jjqD#?OKCsmtSs#2o(1BkkK%f zXLpEIKuxzxqhWiWS$n4!MpX1~|0J1jWUOLjlB*UGXxm=l2gozP1Z3ncmN?o0GY7;Y zyNkH26$y%dCt8BV*gr+c&VB$fKy64%W8 z>M`GLWUYjM6fO%W+Y!TV=%Ys{Mf+^%)XGRSFHEL1+g1t+XB$$O#8?qhqr(-3@HwQE77LX4(+KzsPT7DOUP*LvD5bMOYK0 zh75`>?aKp2MQj?}Go`b*-Aw$|qafJ~J!-xx6ivWnC*1W|mz}}u$Kz%gZun82QLf`2 zBrsv4m?~U6=|6Po(#Vr#kxbl4G%u5!J2TO@$gyX|9PTN*6LC$2)~7vEeAaqkZd}AB zazm<{VD^!w-?E6~mo(;ScDmIZSwL^O{q_nsr#KZY`x6WHM+S^o(m`9$hnZJ){gHqU z9N4Rj6&9njB_aPpM)~>Sb%A$GB;|cYxm+~9X)J^Wa$-amC)`?Qe(-gF)2k4|TG9R? z@&b2!$)SEi?r~xtnQidUr6t<0n>?cvPo6c_uZAINlDXIavCX^hVUTRTN@KZh67H?H z?I?2&ALCQ~Ja!yBwOciY_g`3gIui?3ab5oTNf;yE2tBvG;=htLu%BSPkAsP&OQv*f zdpA-oB!>wgZf7c%0w9Ww5(Hc-Q^#aW2~vj6ESa+wf7=byuxjr9%GLo?OX^O#TX_vE zNFf=~_Bb7FTPia2%#^rOZCmbIiVM%&PHB1UcgvLi8#%_ZhT2XFtqUMLOeWgrnIxA~ zNGx*}f7ML;nmh?}>jqxyP;Ab&j^AHXlO+@Zdal>D)*=JrY>Np7Jz z__a7VQ6SP(m5QK5*RILwj?hr#q{R(2AwhcYW8+}{z6$Pe3Hl9UXsiyzV^M^8@L(pz zB@tmwZ0@1#fZdZ~53^R!GQd|!~FqnRCw6;7@0Mp&}=BKMMl6#ZK)-98)O5>kkAZwWg zHdUNHT;Z3D1V+H&C)fw>oD~QbL{<~wGVr@uF+6Nff24dw)+~@NAB-d8B7A3p)~V{P zewNA>1IVCuk5so$n^leA8GnGu`|*Cv)iVemcC*`*MziPkC3-Jpqx3c;N}=Bbh*l+N z^H(=;ixVU-Cvyk^P9(!5B>+-LdFHFS3m3yeyPqUYw3(@ruu?FZlVX1=*|(@_VGr`e zE>r=*e^}L0p8ukvQ2zpClvdu8nq|haM`?iut*i9(E@49t;lmAMJO~YBVsn@MMS?FLXds`3DCTAfB#(y42xdqz6!VIWr*O6 z8*tlC7|Fds?qyYJeDjc!BI>NOc(&3JpLX65SRRJ{F5om0++&S0k$#$ufp#i1OOa%z z%)`pYYYU0=!njT~mUg-9BV_tS6p6`LG-xIHC3d?RKuC8?Qi}ccYAab^=OF1kbx0;C z2rruFo1Q6a3NmBHh-%#939f2@is;zpiE|=gIw~QiF&eYS>$3mZNnO!c3TYM1B*Z_} zHj*oknD)Z{azL=W7_p}Oe&fgR*ZCfOLbpiSj}8CAiihxK?JOr^$l>_Skgfxv3AYB+ z9yij4J2Sc2l2M|S;?HKORd|D)MEA5z1!AG-O=W@%Sb6JBNbEKu zJk2Z)15OQMJ8#Kb@RS7268eDebR>7N?V0zb!OShKx5nSG$Hx*?w(g^i@Q>|LOY zs?}1$5^G7$C8(eO{m?-_7IO2N_akcmQ%|{D4Uc#xP*Gf~hl)|1faYr}_4>T^LXBDY zFof{s!#c$9til3|w*jVPq!% z)KwEgJsM_)$<(>8RZ?rCJ8Dt(3Xl3mK|f5`IG#R_-~6A*bY*-MnQn}v|7cXO91c4M z3ft~73H8(X_+Etmvyu9b;!In?QioY6#56oap1P7%(xQuhiwsmX^1N^lEpU**ay1{U zazf@u!0h(3PE^Op1SKO50c3h4$=vn3rC-Q00n*_e^av$B#!V&+(uYK+lom$wJP9cQpl?FB!1fSz*4t^Jf0soA&uxBvQ>}Nsf;vyd8O)qlvU#^G@rY>YQI@o z=nn~z8%vWkYS$3AgCzud^MVx;vk^p8WiB1S)H+G~!6JHqCWXyGs@#jukWsoYyaM%P8j-t(3DnuFRV_XSRes>)FTBO(N*6%ph8K^taYLj7EKHIUFv3N%IUe`=Z5I ze*kQYo@VyLH49C^IvqKcNl8bWP%NS>3FwQil(yI@p5oX?ioHufK^}S~c-ifjy^U#l zYAxVqVhXy)Hzkqb`wt$KK9oP2Us~WfSVpDuX$ok8C;t<(XS-U!=7KvkOvS<}95Skk z;dYVkvw;VHD0APT1)QZI&Gc*-Eojl+Kkuc8kFmiDFxXU!a)HzujwpRtk!EeZfx z^5KIUO7rPG)V9Ykd)^N6 zB0O$}JTaH2)qw+*u#1Gv*_K{DEVl1l)V0 zF>T6SE-;<@=1x2q0_XM~?Pu;&=p8yPci2$sk$z5kAZB34#&3I^gB^t z5po%hwiFTWWrZz^cj+^Q2O4H6vT!cZk9K$^2kQj$6QxovN>$V!uW1*^AUd+6$Mx5? zjHKC(iJp=b=+Q)%VP`ldtC!q~R^p;hl|ef*12;PraO03zCBaAWPi_>u#g?D|{2trp zAq~O)VxbOjgPZf^rbR?o;A(xd#vsURCGeXvX%}LPid235h49nNu9&v21K%+pJzIH{ z8aP?z(!wCrWT-tawEs^s|G6W7bKn8G{@bvW~EJYl;WuvKAy9{Z#Vp#UYGCb zM`YPR@Pe~Q>y7v=T{mWe{?b>_hUgZJon(%$Oqp5blwC_bNiCidZ5iuiesVv%*l8Oy z5AHU@`C`w<>C%`#fJQMMw*0hTwsMv|U+%WVA@jX-%8X4?-gk_$2%WLWT0h)WH*LFm zp@^MY0;pzpRs4rXJ5He?+iHrUi_8L6#$S=IyS)|&3o4humQN*kD*A)seNr^93EKTC zI?nH%g0*e?Pgo+5N{>3&M-)ULdc!8l??8mmqRbH9V7J2}tGe-AwB77U7Ct147*-oW zE2<=0e{QIJ%E^(7SJbby6I}iPjQeet zL)Q5Gf&~h3A#?E|1O)j3`O;qqSIIDj{{+$K3QK8L2kWc0;MW1Y&9~b1jygIE6Txux2(Wcf)z_!m5$RSF7O}MTu;o(%m}Q_ZLdHnljeFB^%?J=nSu1$<^S4g& zk5`3>ZoIK>I)SChn|J>y^QSX^r0I}B^}W(v0-g@PSlqIQ>QG)xAc+v6Ef5ze=^o&a z$3BRo4TtPM3{W8BS2m$T`^|F4=?tZ@E%uiblE&}Z z+_6g!yZ=6LV7OlE%<>qPllltnVu+@oSh)at6C!+zVv30XS#u47V}xb0j*`a3_VAhk zwQin&3q0+x671PL1|7t>PX+}#F1Zc*RnOk!s6)tuK5^v(lQA(%GGXfZZ2TbxK1@jG z+J31*-~EggI8MJ2X4ZrQe2}yaWkZbu7=3PS9+h?9p(QU)yxj@T?K;=oG~(xj6slu| zsJL7Z%s6b~11z%6QAIz8v|n+fz-Ci76v(qO6>U0@RNE<)% zJe{)g+B^gINsh)_ekBszAxNDTU9@{#Si5sWQXNI;AG=vpnw6HoJI;Pz44K*05eojr zCH+VGfUW)ALO^3-z#fPOxvx|<0D@iod(h|`Hl!P}>m~yJ**;7vEXIB9$K8t9MOH=H zh8NH+9VM5ViN`BQ)g!#MyC7YvIFvgnC&;Tm@k|-f2`V!E1@xn0g5p#aq+J&llNa# z)AaDId9b78N!dAH6apGVyZMbB+UdT*fDGmNboDa3RMnu+WjQjCRR2RwY;y8_KO8|zYe$6-FmES~J}tjc zqPob5n2IqHwH2dgI4w?Z5+Q1e*3K`B3_9fNPq4Y!k_)iLmi)wt*FVvXftrq(!M*qy zd)nS38@u>Ze*O6PIwkQ*B;L@O8XoNR|i#Pyli}F+og?%x7#b2 zc(u(Fa{l1^Q_ZH+6H&B*tbPH@usqKq_p-=KM|`OFIe@%)n0DC~EOx)o4*w$!-ek#l z-(-U+{}Zp$?tG~tmOuYuN|qby8?|;+Whp}^_=k+C=e>r!Wz4GN<#=1>pp^2rD%~-h z6w@ZAkcOHDmB1*Db$DsuH>|8 zKw|KRKOaGLy3)oRaU~?1e07n?f1?bad^Kii-xZ^i1gD62DCtvEouofX_SS2d@v!t~ zx(9>eSUH4HF|ZZe^7Q2Yac+Lg6nYB#jVel3_tSNR83UPZ_G3-I*Hw9zRS49N=dmUe zxt#-v@PpIPcgzq@>=a;<_JV+a>t_E7#z(S5SKCZQAaL z2sl4sv`meV>;{o{QsTuR50*Q*AoDjnBD-YN2UQ#vaw>m*G})N320=2>3!jM>+Z)sX zW!;H;-=z_zBtGQu%if%rs|LJwp=k|p_wOZgt(WPjqZf7uSQB)8XYPg;2vpKE4T?)u zB9^mE#0J^4E+HCL)XvEq?>pE7%AW`;5I(qILCHYnX{CW23Ble4ED@;ij}JvnzssF0#VU!aDsWvn~_$t`ln7gu(`NsNlx zu~lru9pbU-ytlawX?y)ge6LlSkMtuyy7Z5g2L`}v7J%OKluo2^MJs^R2O-lTcRig) zoG@74m+nq{d5S`+C$ZWZ?Nw;~9qX`*23{`1QsRg&dCzq*de9DD9VSIyN~F)qPvmrp z^gQ59&McL{{i>0tLRTchefL$MVWQ=to9l}Y&}CIdQ4=7+#b5zh1)P`X+48_>BxPMa zLlW(3K&g5hD!5tPRLzu^(JvkwF8tWrlZP-iK;YMuuz@|mC-fK?tiT_qq>HqKGx-8` z?@OGd#u$L=xqt6LD}sraQCk;>CR2<(bnn9Oc_O~1eEmI;ittMbe$PmgTJ&ZDwvAd4 zMhG~TLM7S^>e_s`M~#J?B#qKb02mK270$x)_;Er{=S~b<_SE~TQ!0;?$hM)_xDhzK zolA?ez&_?PfHt8f+9Q{W;-Et#+qi>0tjG4_!9vmq!CgT8MSOc7vqGC|Q$TA_$t2bK zubOPtc&bhpl961kH8N3t!Vw}O>Ozf4;y@+TU84e-B^gUADyzvDBgj!~TQduk{r*QH z2L7|yJ=hjMOCopJ)~4CPZmGfJi*PK3Mw@-HI9{qkVs6f>leF0(XIsNxsVCcLL=!6p%4x7c!zfsX`J1P%j=vxQ}Tp_g>BnR;8wM1dT+R$Q?82Jr&aYe*HB`CKKHg$@%SR0wG?s`>eRC)%SUpjyI~Dh2bOmLWCWjpoK&B`_~6|jT?S=o;2|!USL#LvXz5pb;=OAy2CaA>njzlu zAd3o!^676LQ!?S6I*u%P0|L1+z_Sb| zo;nR4X+GYyx8QU&5Qag7N}Be4W>-%PoJ`;;Za<{DX!)7oldg1+8lC&br6O|o&gb8- z2&EjVD!OJ`3WBUr6lm|60|Zm%RWFun%m&W`2(~xgTWoy!D3wR>1IKCr4ug-{!`8*~ zAHOcIlMG~>dp(ksE3tp9@wEzKvcI4Q>)#h%DoGe>$X(o=hQq%9*3wKvn|^FPFOt(& zna?dZ@33-J3vxP7fXk&WJ1Zi7K5{l5u(od89r{10@M>30UT+JpF(;&M#h38V|F2`< zF^aJ#axjN?Qo=}^o(D<$H-IICnd&WON^IXGfUxN%9~}2ms~s%EXhUXqvqmFkGI)#U zf>QJHzOOBHcl_?bg7q1>X)ilA)u-n2&HI$ga0cI8#P`(uhc374W7Zciif8Aa41aeh zST1zJ^MhiCm~{EY$FyxruZX>I{(Pqesm(!L!s!N7?k2msQQJJ_-(~?eH-B3v#j9r9 z>Ja2ho6@3D?YtWJ!n?s*wX5gk6}rw5>-kO7(%K97(3oHrnc@^dgJD06JVaynG_xyz zge6C;LBXRRTQ8(?tADk~Ug&jz>AB$xt+`XbS|ae_e9$$Aw^|%{Asu~X@SNc`9wZsn zNdO{y6|@eSR{s-v%{W!MG^{r;bjQ>?YM-eEuFWOioh)^_mO&wb1hi?{)}Q6LV}gT@ z*|-@#A8psg$akK;^FE`Y zW(gjinzIceEHCJmBfK0nL%l5>S6=M$rcrs%i7G09-k?fEf7e$Joh8;8_?Hmr8&T2r z@j&}JX5r#eO1ldkXmKtiN0+-kv-M^%qg$7h{UG~D^_kK2zTBXvg@NH$Lb7TB*~xi% z*lo$^*`|C0AfPi{IkrVDRZ>kmqZm$Zw{FY53PgD0f@F?Wm>jF?!6CYwvO^UH$8BXD zqY+7@grM*FcgRZ+(81e@Pb91PcUFLBmN>xQE%X0UWqV(0H~X*vxbsL;CENk5O)JqZ zZpXHBJQ~MrCsqqLMoy#g(zjJ^6tk+M8(#cG5!0a?(FKX>#yM;0CEM`5HYH7%fe+#h z2^rQ@@=eb8sZSeVX46dO^T-NxtU7X8AD7)SLCuB;*zq)Ly0b0rD^C1|Qo^2& zp$oNNxVV<;PB?M2FRc`?=9Ip!fqx|9Yqhd99r(-kapJj1#dgRqJaV#o(Yh1KISvKY z7HWj(3>JFWqNuSBiywXVK>Luk&tfj-6AwuM2Ee6u0AWf?XR>yG%)}w2f>(v zgK1|*qwJJ=JKD^#N!gczckAlkC8T+E{W&U`f{skxPL?$Er<)un2dM~0+8xY^6AsiU zDl|pNgeC}>fR?HWCP`5!z+D{*$p5Uc``)imqG*fH!LSb`@NQjs7n_mb~t7#k`xh90|%#6DmNxR@p56!#%vOTybxwY0Q-7F^l z+?-AnD33Cl+)k^3%k0_VhhePa)z?J{=s^PFluHu!It8l_J@{(sw^)-yn*_yEzPDGw zvHucLY45G=DcF4Offu@Sue9zcT`bU2yo;pZ3jGZ(hvT>SqtDOL1G;MW+c>pM=z}P4 z#S3haZ$=Ijz7?*0eJD~0#rDQDB~h9-ME?Eadd7v}iIHS$Nk>|yDO{n_6wq{&Wn=_& zk=gyI-s9v>2C2zdcu`zbZ4diMX25nU|9zMh_sD7f+%I>=&7e3x#O8DV%DzjHQE%etPe!F^rs;TJaI7*JBk$y{!B`9kQ$7HFWCY`5rF>!v_xro6+}r zdHWc2h^s-Jrx9WDxQ-1kjxbVW=yK|r8+ijbQ&gG)u*eJOx-V66uX~yNf(1)RmKylu zfoxx2H|}k5=&r-2ix~7prvJ$G%pM=#Zzr5qE82F?T+vk<)rOycvH7fWjclG};hF{P zEg2b;`waXgqUqzjhu~kUSTg5<1QZO8i8^?n7T`vMy<~QB_o$(7QNPCW&UP5>9VwDO zjtnzftYhdg3}b?EodnU9%J6ba(~QsyaGetT%OUPU?AP$E1i+cFa;$1j*q=*uSe?0- zkBH(~!UacR&x;LKU4Wes=xB`MdUyR-;0(F^krm=djR3U?>Y&*U<`CEzY5@o@2I^fO zp=RjGBQjI#1MasN6FVdviVjCbF)vo{bay<}RSzy?ou#;YMc!5GuzxYP7u}s5LRw~` za;LROC@h5PGKr-$iHV|}i~4Px@#;w+`16P6Ebdtki{NFVC3`?s4Pi#-)Z|#&DTYng z1fj?Ul&Ty$f1#p`{O?-|ezFj(`w=D1>LDnFgsyU3Vs+uATKJm0b|{IR`{F!E1d7TF zrOv_S|Cr(p6WY%!EluddG)859m-b3;=)~sqbOC37WZq4x;@LbBDU2u4cA4xr8?Ony zVd8ej0hPPs6PdEG+&bQBQF7N@evxrRhg%2yz%lQzO&;IZ%mZ)n>_$>%A9mwYq`vL4fsm-{6Z=P9?OF$AQp{?pwvdtOJIvuP0MmZFV z0Wb}%7n*>)c^7EUQ#Q+6Cd&KA*l%1iM%GbgVR&fZ;PrN*ew1P(#7^I?V2j)RW&ccG z`#KT7{Arwkj8Vej>TxbzeQ@gV91A^J*w{5UvgD^H;~gv91-R8LE|W*#T@w2vgY_xGGd;EDcr)tLJ{n}(&C*A`<+(<49EGp-Eu!r* zGdx*V*>mxO)p>2#rV?jJU38zkgnE{BcV}}(G5bz`sN4rHZw76L*I%1fe&7>1I)lNo zD|bM6G@|{fS;$}UA?O(J2M+vVLDScVu`}J)hP=n-> zQ2PJI@uNB-cyf)_MjrXHppNs+wwF>K2}vp#DG(bKbyK-62M=sh_g|uHuCzWxgJ2^* zZI_(c;2$+FLm0yF;Gqf;=l>TdToI`e5(@a&9>dYVY7xDRRJ8JZit#?PU2~d!CK2U1 z1o8QUBy3Y^fOA<4JX_ak!sBpOOMdjyd0Xlo+~zHDk8sv>nhpeXp_vj>H3*I143{cT&>WH>U3sTsoGzqHbFl#d&hOd z!iQv)X^tH?W1t&7zb(`%6C_*(D|qfCZFIxVNloc{2Uul$Qam5lK^x<?A!2Rt$edmhGSn_L9}ph5m~0}f}?t+iC4&I6%39H zyfEi6(>w78x&div`)}(n73w8E!7T$~;lPli3%R_+oih}8EdwD%=XZG*!?@x(r_5}?BQ>g8kitKF4?^miTduEXBVMq%zfPQzVOFe*NR0EZEk|N-9WOPvCLqyNoWdEglioUb`29%du-S9(f3mt_Ge zzqvZJJ{!8VG~i>>M%Gn$UAnj)@_rE;96lrq&OgM7tCK_>P{EPesw7DzM8BgKhE&WW zINE-SJPD&eg+)1ebRzG$XEi3t?@pbPlOUpMB&)*FevyP6TEBz2rA->3RxUh7CGmX$ zk_ZyGn=^)V^0hc}J71{9n}l4Z&}>~hhq|?+iWt>^Lr&BHLvc}8NT4N*_f(xCLKV-?e>hfCO}y0| zMszug;ph0l;jcg?2Zetyc{7DxrwuTIAEEr>hFJC{giE;uQP)VN2n`-wnX}!>D4QEn zH*VS`59bSVrk#Gb2_zE6(l<~lZtKm}4p zYJ0E9$0ep6@$-$`!DQ9v;&d!YDVrifYmq39Bd>};v#*n%i^L*TL_szFu9MAPQEn1$ z#|trmd~_ZV;bp9=xErR`$?l#r$+(Z|XfTz_e4SZ}@l>3}^RMDybe}F8Az3ybUUd_)`S3ago9Mp zZ;&9t*XN(oDSnr^1TuhRybyP@4qz)!YI?5T^bZS*eGCY{Qvs)1`5#|D4Md8jArp7fVYh)5c{v z{E_}>zT8PkafY@r#5e2(8$*gZ+Sx8>8R$yYP5q|+L%HaFef5;W36#c~clw+uplFBR zQ-D3mE3y6dgFF()722Q^I~|VR?@OfiGhV2_FLzzT*2D5j1Ia3|`|v6$tRtM#^zA*b zfBeTU)R5I~Olf>CaQvrY0_4@SR;?CuX`UiXpzcvwaX zts0^6om)UjV8#Es0RD1aU_hc*$shfoVop|Pc=#&eg1|TbnAH^CX+ifb;P|)jq zg=XlW;fDUDk+&l$1Z^8qsRV-Hel$Nn=(@L7X@o|iO1bwhFn-a+x@~9tYx)a*V`yXz z8Z=AC)e%hY;s-VhVdQUxVwoxRpS_W`V{}K9zPBCiB zvk@Bn);l zl02|VN5pDfY=;EL0D+42coKT3R`akT(dXBdme*&6at<`Zjv-z zxfW^F6$?Y{9=q%!C1KA#s8N&Z+4ak*QKJTFdHmf7Di<2K1jceD!Ibqcu zlkmUJ1m_+QoQLm}NU@hH^lnmpeJY3(OkgJ^TFW;Y)a&65h&S1Y#(6^z0xG3{HLL$} z#kX%yp|Hs^i!~qJ`6qX{3qR6HfXI{Nm_%F%fw6fWFs-Fog1nzOEGY}lJn%FnpX6)5%XMhGVUK6Hz=oReB+2{ith6&?Pe_h}Ve>8BA#P+F(a2$TFS}j6XFau-O*MGQD1I$*Pn$HFYl64AI zF$|*^ZpCcbqhrB_5Yiq5iGF`y@!IE_tTume`xa4^eh$&r=cQQ;8xJ=ePeS9b!JQxR z^X6fbCCPA@WK~f|Y@*({#TR;d zp}|daf~E(SV3E{5ZK;2TAf2AF!Q(-qxxg?ZBaG4F&Q{8Yt2BHt)ieuQWZCvvnyl|3 z?X6UO#TQ6iQl5gMGR5GVXccB+JKeW)LTl%uEh=>JRkf|`6zs>9MC?c4(vqM~O++1w zlEG$@gs*8w87}{8Lg4*RnwOmPK}rAv_hA#lZT;U-U4)KbC;9fXAwm!%0J7$2(DeKsTv=fq%SGtQmN5R0Cm9y!SJab!n z`S8cyqmlPqs?V4tcq8y) z2~u6U9At$vg>J zj!sJd&ZB2VVM1v7rWuaxtrvyYGsNr~zGT7lGd@HfNYpY+C%f7X?QC?+lf^uQ1QIrc z+YVPb>tN>7lQ&8+DQ}MQ4~$?Scxl_RPH^U92O$W4}jM9Do?jn58FpBLF?= z=J`K){1qN4?d<{);HDV?faai0JxE>%YPXo#_%`?GtoZUQ45OO+A98pa;Lz$?O2adA zeC$ZL5D!M&F{@-)U8~dVk@o;%dp>TeijPmRK%Ohs!JI#qFTJ+A)eUPsjF7>JMnG&x zD9lpI6m0cZTM+)(oIa|FuRGZ@HAmO*;ks$-y{&L(#4!^2VRmo@3S^x>d2yOYX2}4j z=iV3}r)vu}dft4$>OkzH;44{?I-{GO_ro3@nc;P&i-l*4a`h*+;1IH_@f45d-x5>& z{`)=b?H8KG{d*knO?RzeFy zd)RYU!r|pdaE*dgV0sKn2E}YfW%MTz-t07H!3CY9VP$d{50NysU3TG2rgkk@1TFaTIvOLh4==}QB zLTJJF5gH8jNTwRME1|8YChqAA-z!+28n{l{f$1%BkEcdVIT33T)GzcG$92a~Kf_%L z>&=q?jG6_>AZ&B*%N!mS+V`P}V5e4f3 zRmw(U{?1R@Ug5A`u^NdG=>vthBLkb$ZaT@Pt%`|LDR`g~UMTeWf(FLNliW&ITHhYs z?S$J6v@0Ijj2V!a$djz@dxtR5l0H^}v3|V7e~y-PqNDK{SD2-%r%A#EjW3|QIis>U z3&-C1rIci6Ek{d8PI;Q$r-iwJwi)3UQqi^#pzl2!ixD2LjVZReyMwSH8Y*wTQ?dUf zFhwJ(<6-!}P2FiSvxqlQBWIk@eeAs1OZ#GA`34TFCR^7VPOOefAfmYq=E*CCwlo_wBkaOGC;_0 zb_`OKe}BDBrEevzQV?r{TLFucAuoOX0ILy}T*NKCxD4u$P8<%_5c5?Dq*ut$GtHCK zo#w{d*+peaGLd71#BH4Zeaind7qA|H4Y=hPZZnM=bIr0Gq8w>6r%7@LX--%2>nNU8 zoujh3l&5qLb@LE7Z^T6L^Kb1wqsN?ZNuc@^#GNm7)qY7JS^Ix2>cC?v(%`upoD!g!NH=Pm+MvH|{`EeHExaN})ZaP4gy*GTSkvHct?4<$^=~0Kwh9MM?=XM@&R8G-BKhg)dM{0 zu0x-{PgX*aos2kYdHpIKCk3e_+?$0LYLYJN&31+k$3E5*q_cbzf*CvB40Bu{))Cl3 zpg?O~Rj8lZV>H;>@i0~-=GwL+oR<3?`6@ZNB-2>ixhX8`BY z2pqfF8;#wM6tX8Ci&J~I=PDWlw3lnrF_^TQ7M*M=r>-UDA$d2cA1(MiiX&fP>VNBb z5fKnD6?YiZJk2C*hE>_S5I6Cv3B%e?P-)=v&Bol zpz|c6#1TTnPSPhl{xkdg!5Hy1ASB)*X_h5B#+)qvp<0EPO_ks4Mf68R9lwME=vUSD zd4Vtnz=c;Pl;vObJQ_Tbg+fYsL<{Ne{GZedO^`s@i&Qi8vRTCun0l z5XHT&*S>RD)X_~ujpCgNFuuOS%W^dgad4=w{ekxCg~=~-_yT+dWyaUH8R`(3Du4xT zp4g-P^y-Wzk~lBODobjCWSjd5JZxMn`W3z4o(r&(P=4**q7~MkaIheo5%;piBwt~8 z-vgO)3-eDh`dqv5cRhIlh&efU?|O#w0myE_M=S+z`{_+C8g57e9x6Gq%rQ|7sZ?}$ z4e;WNUI}?ECmwJ!h23Z+4QJ~h5~SMd57Y1<-kq){Y`db2NJ9sCinMdw8?-$y}#O4j3yJpn|67OemU# zSrb(r^+~05c2ZjiA`dGN3>F)ZO8g9cn%V&eTnPQicwD6PSSV#-@y=A@i)9{B;{LL|L49WA^~+vzZxjFg^^?(i?&339&nbJ)HLgaC=(S44s;_XN^L6!+??Fo) zA4>pt7@jpUR2n(mi+@{X5ebSn1-&QuY3DK<-1&r^4k;&Z`tDsm>8B_3v3GI)_dBmU zi4!Dd7rLceQ~nMg+ry6E{O44B&nQ=b3EnWeh$|`JmCKXGzcypDqB2bGeb9@;L8HJ0X8i)QeU1V?wOyF7hkWF%p&w{IrBd|%2b>v}c$%D7)Jv?oMDJ`is5Z^D?Uqt5}2-2w; zJP)+wCQ;%9&xz+G-*@@uKT+{#wRZ8n1-D1r$9#eiGeV+^3~s3p^>h3Mu8%&Q`Izi& zAY4+)SH7*Btw|()9>aV#_GxDUwsJChhbm*_{dJe6@rgZDO1B-rg$Sg=3{`x@-{Kni z+FG3S_g~A;mGY{4R*zJwd0FwmVTLdM=WL*JsZh({uCisU1Ijn~dqM}()CF)?X?7J3 ztaA9^tsXd~+?)&{I6d5i%ql}PVoXy2nx?m&^k*PAcuC7!Tes9XC2!&`bluEYh|!1c zH0sY66s$AJ;OBe^MF-XU6;4sJw69{`#`(EwnN_N$FyXhUdm0+OZWd}=XTO;MCtZR! zeni_`%J`q#P+;X8+~YX}5ha~G2LSf{(=wKY{Ch`TVS0slRpkkWOZb0Q8W1ov?(!i!(OshsK6dLCx!?y}Li_*ReHZHA5r^LPUbQ>E3-1!ytWo?tf; zB?P?T-Mdr)kt9RI@`xhQ0ullh;L3dq?O16C5H*h~PrL(Wfc|9hsHsKH=9h<%PnFxW zLo`S%q0dRT$oNA_b}2{--+sRM#Rzr(h65A+93I7NSBZLUC$RGNm%Y_+qe9dR6x1NH zydr&(SoX$&a@pVK*-W+q&cYaB;A1#XejgHa!E@4|wZWWUGh~$8+WbH!dAVzTBwry6 zAPUFz=ho+N1qC%~5^vhXE8o8z z{uBY$>ti}L^w`jCeS%_jyGp_?i@mh=A_F%h@ju|;l2Z}trQpu0tl@!Y^?cJ%>8M}t z=n=`#uXN*yO|%qp99F+jP$`y&i`dvz8lk(if9rVV93jU3krPy=FM$_0kiQwY0Z)-= z=gEW3>t(F9qi}UennYXf$`}^D=&7xD;d1tVWA=LP=G`kjDFPLEQ4ZF=X{TgrEDebr zGjH~cMsi$+3k1;%H^y!+oIDe>gQ_%;BT(qkEo)|B&DXRhBqS_V@j&@PDc*&b8 z@NR#n)>XeA;g#fCZ=;@uUT~=vDQ2kC=d6{)YRe2lbMI3@93bA=Q+Lh>49F#Hx}4Di zTY5|Sw0%xM+L(oj=`kMeJ2M)QW27KXvDe>=axNWWW$wE#SOk+%fo6Aoo^#fZ2JTg4 z;FfhUIje$mjg4q&-y^~{=6QIlETckGsY2w$u;}mQ9+u}#Y3W-%F`6q!DlCP#GXJlY zLyh@AMmUs(e>D8gl-|1)|6$plOQ;LTeVJ0XJMNRtgXh2<0Blj1Ei1*>XTl}Mdq+q4 zVL4utqcPPyFQV?uyGd~ixX7eyzbP?w`#lp)Q`VpmTpj%dSD)|yo-8V(#Fx<99P9OF}z->;?c7Gt(+H|PZ-GrL6P|c9$W{&I2{|@ zg%Puk5dIC0!(MEYP3Ku2;=p7yg~>I=yehwr3c2|e8dlHam-Ae`RDM1M)+*`bOA`F z$@Er^>hX^)i1kiEo!1zxFf^01-EI}3n`e6bQM03LzQPF=gf2fo(Vh3bc&)~75vVAH zuo#(sMuSPeoE&P}WfgYtE|Zj$Xr?;I@i-=%DWms8E+jT>0ZT{Qd;9rmunmsO6GVX0 zBXiykfWhQ6H2BrFk?GSUy}y^bE;*gL4DG?7zV8rZ!1JLG6gk>}|K0@kSblshG&~ew&5`n-AS`X0B20pZ_^s`8h$BAt$_9NQ!hko6^{jtX7i^DdsI6>5X1 zhDfDy^@d*x-41_JA2ovf&m0oes6IDjStAF*OBx8tT9Ji4T}ila-aBepgMVnS8_N6Nv)Sr|vx($NJ8t$k-4j!agZq)P?R^^F={qu zD_Spfy}wioYZ-DZW1_s*-!bVvEFhlzMI!RV@1( zXm3RR@6*P&j$xRY*>dGF7qQ9(M*Y}lvq%~@U6Fr~^uAFrrOE!JSp3cPcZt9nycHW>1Q6ZK&RfN*I*p?fb^PD*|;Jm$Zfn(x{4} zM_`W92ihjcH+p*0#2MIW02Im<9fN3~sdnYHQ`Ca7XFpER( zi!cJsZBw?4LIby=+u8~v%SlY=UfPa!33m}Z`bl{Blh;Y@c4CwxA(wnEV#jCBV=BtJ z>-UAvb7l8V?5H}*U&Cn?)>t6d-l>^10}E-7%kAxp(2E zW`5Sx#>rRa{|U8Srzig}42=H&!oY!lu)yldn0~aMw=D}z^L4&O#NX4?4t6tdm~yC( zc~i?v`VfcG_{zs(Jh}jJ58z)e{2*9%*MK&Wpf1OWRjMnhv()7O@c81GUqwk>U}njp zlwa%szGT&fhL(4X!4BS2`C@p3KfV8MO7YXsWRV?n20&Pj+Ko2bH?4?ia=0G~G7?@f z{G=GmZusxQ;N;S>zwld?3I54K%2UqTup)QUTpBl7U!v!dAJwBeYrGZH4X| zcG~RM?nym;txjMSETwWn!<l9X^a4mx@ezi7(& zTktdWeufD%Xir-O{~dAs>?ff&VV%E4x&oMp7j)J1PkuS$`kfEjZ+bE8WWq$c~h-$ab#iR)>Ee1*RZo5M@n)3DO_Lx?@Y zGGvr2wWA)JsN9gk22sHA>>_>%bF|`wAMP}}_f^-e{&Yt4LpnnCdUBWe7AH@rGUPCB z#Ee}OFW@R)i`MqD{SE)Bmul&TmlOdHotyp0{wmHNzbp12V%ndCPI7NVPbS1x?FvL!zxhR~)e>&VB^gVFf zj=!faghsupd`aP$e zA7P7wvy{f(WfTNH!?yD-6G2-6V)1g?CG9!_qj$R@3MwB{qP7|3130Xw4DiInGu#@kkI@B zNSap&AN9tl@9;vwC^#~ zc@padW`c0VJok%^-KeNAlb%GYtAV~=0F4*_g&%OgG>f@H_( zCY|fO^_!3<&yu4;UmiRy-5ytCTRE`5C0#%8)2Md*sA4kpSL~-a3YFNNSTQWnYm(XOfZY#8a$jxbuy_r>m$4;E*S0j|k`kP=^jERq zM#`iQ1Rie|@;`iv#WD*)dnVfXH?vqZ+(c!<^a@6=WjraHqm)gGH7!3hoPsbujW*ql z)XB3Y=@7c>ExXDz)-4eL0^uD%JI;YSc2B6_Bi!^krKZ{#Jc^Q0$2~ zwH>7LlT(6`OLC} zdxJ{G&~liRW^|Q=pOH*r{bVISqH@}uET6uG59lM+q8oG`cw_p4#Iwp+$ z9lI<}Y_qmuWQN73{gC{+`eH?1|L;S?Bx7;|cK{D!FwMiqHB-e8^`Dg>CxqgzSai*X zl`M?OY+qPXP&Rxvw^7;Pq4XWFYti7noFMhNU*me9TiDEiZ|yFJA$b_}JVZX5r28`^ z11qRz>h;Ip-aoeF-%#$k^ivi7P$ zbM?vwrO^qNj+P$Kz(^w8q1eKp>cNMs;?{6JkaXYwGs_i)ndAFeVRw#Gf}Q;bo%Lfs z59#Bipg28+)dB*f0NE=JLjPN2s*&Ou{)g&ls=rkoFO}cYPPN44#7z@eV#JygF=RNz za@*=aMqA;h?)CmF|Ee6TA{U0AIEnV$2wpuL>7rWBTsRkoOaF{kAjb#tC~5J%`^CcU zpA6m|fu-My#utbIXbu2p6UHBDdkB%E6~}lfAjz%0{V;P@39u!z8(PNt@63^g@_9Q> zk7iX^OlTm1^ml@KUy1`fszg}Mb(9UE?_DbflfTfjw#x4JWdz*mWD?PiY11-dFI|(7 zA>_1B2)dMRgUPRm@KH@nQVDjGP6`0Q4sYWi>OVfS5=DcwIzS6|t-{ml5;Jh)ChMr0 zW(Qi;kV#8CxKVwcGIjTH)Ke#tRs7Te@`%`X$$hu`1wBOHIJ@4cg7i*NzPM{l4Euw* zmW$l3PYD%Oy?CrQYP1efLpFQS$CE==U|pem7f_+ndKlUil_}%flOpM ztwhfjfus1UW#s}OrPu}9N)A0%HO?$&8p&LYueg@?&3=g5=T)Brv} zr1`_p3Eb}Ao*|%Z_bZ2$G#Nz%w@)3 zwLjiS(J)L{+&7W(;~5wDl9I7Z@Fb0nI3Spnoln9cE-SIg3<)QnXeRDtZyohg1XsIh z$#x{Dn!B#kSZl5seQ@R|#Adf9lxGb-g<7yl(;8x0-Q1Hz>QK^zc}(f~D}UIk4pwiN<3mH=$`GZ(y1*-0*T0aXicd$+&mGmilg5U9Y;B?z+PwcD0oV7% z+h$C5C*S%~N$ywahtX-Hi4keYNt!kZsMk}Ld;9itK>E<6qOgi=%2xPZ3s#DSo?>NM zklOkO$or4Iv-z+(<@o}cJ*p_8CP(OK2IB=t*!sY5M_esWKPSx*(a&P=E{MQeZHw(* ztCz#sB>$^L!T$-Q4T*D9aLx~puSGJb5cbu zJO%erC=7Op;NI0ZWNlLFHovg&3@y0HVWxTH0HlZ}!uARc=W|YT zU4lzJLW96Qdfw>`d~vDQF56~E_^eVCI&?g?%x0wdOP;Td6?-3t<;eqM7zND#+i>5` z-2Y==zCcvOxr071rY?frkJonF$vg4lF72prxQ3du{+nMoZ$R)*;MD6H7?act{gufd zkGs!)>&j)%_Un|rCM$RIEOEp2y@VL94t&~I_66(_Ew{;E!z295@8elhW`?|~=OVm!(=&h7`(gsq)ZYkN%HgV6mj z7knq1A!@B-`u3;+wE5FwDKSIO@6<}#h4h+12N9DrhM_5&5t4%jl2Zt8>E#O**Mq z7D65BwO(V<22QEGU@QC%y|2kyhLHHqdJ8IwZuEsLlz5K@N*LVx{e2sc%5Oyp5%Nj+ zdM94rJF(j*;p%)a0Y^KlALnvmLBbWmC7Kh1Vds_NJh^Wl#M>o08Kjbt*r?*>*+;X& z*McaLG8qA!PsN-4`mxeja5PBq(IA^$?06v}gte%@{4qczGnL@>bP{m4fwYH}wruT4 zIMHv{)Wrx$mKYmLIoX#_^?W*Mu>xYdE|t#!@+a`}(vf8pan(`nSL@00$EC-iZ)1 zZTatWnfV9ztZ7ZA*ugPgcyh|I*!R=$BvCpGp7B7rjW;RSjsg0^7y8vYRb63p*QGyb zp6~}eupJTn;%%jl!7cThcvYyOPE2Y?mdN4@Ml^IaY9gAtLh{-C&@uM&x~MWE3p0YB z)#TbjwQ^A3(IuD0>HL;{&;fR+K4-&f3s%~$uqJDa;N5pbf-Mi{J=^Ld9HHfSst zSwp?sq|e1j5m;Q(vy33a*N zv?}gfw8ojWka}Ib5U*g+{L5F4VsXDskXlX0M1Ny({CPKcS$aTc%6R0EM_y&RU)b}a zxq=Fswt!IiyS{Gnq1$Y^)$fy$UPp1)o^f?w=cw(2f^eIU_giVqE%$MLK>K>1C&|^w z9S|EG{oHz=LBqnofu>eWB2)tD^^_8eJopZ&j|02CEcsT3@S9q2<3|c@p;8G}2ijNF zld~f_OR2A^je{>nmt2;zp}}+G$)|~$@pl8n?XruCQfo=yJK4Y6ggHJAth_|_gDc^j z)Q9l_YE(|B%WJV zb3d(Z-w4B`)g#;ixA(z{pgw+*uN2n|NoiIk$7AxWB=INJ2iIgQtp6C! z-tApK{Hf!CJBCo4y2a1-AK$;B!-E)X8^zAT{j;yrzF2~{cyIzpWTdj46JPTb<9 ztH0Ky(Eb>raWJI60q@CxNfBj0y5`C?d;-oFNhZJwt0T9G}0ga3$_{o z9&JRw47--}zJ_!m3Gh;A`w63A_e5UBO*Y*Cx;&c6GXGp%Fe$AdERIK-ZQmZl)C)Nz zrLb5=fPm%xgjE?P@dT4k$tMBF^`9eWrCy|_H;;P9)Pkjnubrknrwi>G zXlFHH$Ki8a)|&5*+WOpMmz~t&3{c3@(WOSA&QI@Lt*WwHbQS7FbqUqt&Ju%D5Fn0r zUnK+l#w$qKVS`NnM!Kv(FCjrnm_ENE#5hA!f})uuYns4{Q2-5g5vXE4lv@e6SbWGw zo4oK+1jlTgm}hc_QW>C<^H&b!zEl#spyxyM^I^!d*%!OjJT7+slYgK_Iyw);yXHH?-^!|!X`O1_ zdW9dl5$Lg`i+*@^Q(N|9acX&HQ7VRgn@~!_&Z)5;doUxi_$L)gAtM8168`J{D<5+C zm*#?$NBq-18NoJv8R!KJL&zjruE@bNblA`PIxO~CBKG2$+XqBQT4R0nI7Ud@Rx2|o zHJZ1bhfqNsPttIp1iq41Uqq?bJgPV>JseaarI=;Gd4iP?J$?&I)f~Y0Xz5UD5nd8e z_my<%!r3&){tA;HD^l)w1sg3aTVG3;SyW+WSd!l_&h-7K?500YUK2N94Mlzx1lt)S zQ1XLIB_)BRp)^&@uqOUnHj+!r86m~L`N@c(>TbCW;Xx7f#mVMdos;&%TuGVlEfT$z zA8PPVa{oOjuHT~OIoK45g)mhx$S7x@pY*>z&? zDDzU`<4Vz5cEGKJ+l(RSPh;hJZR^%v%V;0G1C1zfwD!0lBQj~nySYHa`!NDcKsGCE zyx^9l8SlW?)fvg`@&W-g8k)&HVtNjPct!3MtY53mG<7Ve6m<*sYz}#EF7qXJG{h&k@@P;}IK*wDc&+yi7s~lr>Xq@lD^*Nui!iXs~p& zXxaU}UGy!3yIW@+u2mc}%*W`)Eny$eJ90Fgs(*c>d;v$?l+Vde5u!))FLZc zy{x^(sTD)$c^0nnL&U}ll&9{(;VQW+{GYmTiXhm)Kkr%b$Xgf|-%Yl-Njj?oCTo66 z&n3N4PM!U?QT&V-Bm=bwLIKx^m6Yd2(8GqzBHt6YN1-|zRZd(=&W6$@g`9n{^4hf@ zOMAt9JPKh~$yL$PFLr(~PZb=UMF=7`BN7q(D{q|3AY55LxYsA`1CU1#yyu(yRPur# z|2(Us7iSvTaeZ~jSR5%*0tI}Psge8j0MvF>9zLf2=>BcsWqRZCsq(F?B-sX5%~95h z9pwqvpMXKa9i!S#fw__yL7~aGx>tK?d>%$q9A!W~qx`36mcU0}qU?er3)rYCg%^hW87|m*QZfWp z6|_=#6Ji(BC)kT-NuF^n{k<=TeIAQ*xWkM7`XO4pd9##J=8naT>~Hhle1S;YjZ8ZO ztSJ4Z4aFb{F5OdHK5bub)C1Z9uxxD7M_BK~R!=W@hBbwdn=1UvFSQvW0&-R!kNDa6 zoN%iYVPAoNN$D3>@t_K71qY>?#P$KJ5sbmxPI*S_PVDm>d30KH5N7KXus>Y?YRe5?l$(`Dcn0x*j zqR%qw1%YWE3xx%So3y{8y~dm--Ulwp_W*YAlrk{As_~IHcml5qP85a)FH*uJJQxKS zSUoUMzRs`b3ctG|vLjJ7z%K97bgsg|Go4UFEAib~#__i@;zdSuKQ|yOGrh2htm!fV zlm*k9YS5D=R&=drlpwAw$RznoC-nFbn`)9eYui0Y-M6+De;D5rfzvF<0qJHz6n@*2Ia;21icw+!B4R! zcjl9v9oBYz>ZS)t$(IKk@~LL%~0429HoZR(Xt&XUeSFP zV3U-7w?)F4L?Id3K-ubb6b|oBg>xz5+~+&2ilL?;8F}aMB2GsH?Qw*~^d+BD*kUuq zp^$eD2o*77;_O25zarQFnSwB5AS9;d7QsE~dGA>TN7<{0`B>IC*An#&%9@M{iJ_7C zBxw3AyK?>`N&yLu{Ckyj(gI%%;L{P6O`vI{{)MjzSkc(cWt_NLCp!aXJ)qe;B+c_A ziiss`B*SKCwUDAkG#l;hPAi5JkiEaPPGqHe8|6lAP$$44x7!^rFp@AS@h?sQDE&fw zj~oBKFRIJ86Y+gUDXKK_nS+qpz+nQne);j94v$T?0MJ?$5)rieC?;K0OzJL&T`7<; zNS+sl<$cGM)l}H{bPPhg5xK#Y_nqH5E@}_+OJF**1^-lt_iCCP!b7b=&|fCbk_KE3 z{xweE<+U_n(&vCs@d>-Ls6*esnVc8RFqMQbHYv}a{JM^z{NvyHn(8S`n4-=pm@Ds_ zjhPT-h-cOgx`@US3Pox>+HGsqQl*>tvvixeSX>%t9X?(t{xU9FhNezG`rP132w{#TTD)P~xqaL%)(?K`eILSM- z{v-ab5%;k^>HTK_%C%8hZ<>8uXUD>Qq5YqyMywEzfx3EOw^HG9e~|je;6c=5wR6KS zIvLNzI%HxD)W^K1L-z}?OpI86a5Ujy`{GH&98Y@@+Gik-8weI?V^tM3x{EKKH+%pq zvC?4EEsv&rBt?@@+u@LwkM2#J8i!<6+a_z2oG9*s*G%^ah@s+b03ZJ5F74b{txM)N z`8Q)AuUHsI>wEYU2V$Abg;EH!{;&fi+`iHxv_L~qM(`O^G!5}y&6x2wO|dzy2;S9? zY*U9V?&py!ZuLEQG|6O7!*AJT+$~ZBsv2N9k+BLk3DtwX7PWU&-4cg^?-tB)+)%tm z5u$NRKGrk((rf5l(vP1;JtHDzv(?AP<<3*vT)e<3oOEeaWm05tKN`(biT&w!E|7>6 zB$7BqVf&@((QJi*O?on2%=lT1)`2H;PNjf6))louk5Q^0nb7+A@!&hW0QRSx1+xH0 zl}K1b8r;(xuT=M{7_UuHUFlv2NCaobi-pX@L~dU_9=3YW{>|B@{nm2m=YqiJ6s3bd z7@PvdD&D}_&5*6!2oCg{_poTZ3t7UKu z0cZwe+{F1<`>Y~;W+tnjWvF*iGyK-R@4VY!!!%I&`OA>8DarG4$Ou zPV7ws)+w(Kx~1D1rUta+kco%y8S57+@j~*$7};G#4{Nnw!(Yj(1Aa#@ln1ITP*01` zoU1}~8osh?uMXX3m8w19dsrc#(Ob^3)}fuJXhG0~GEG_8ZqRfB2}yx>PS^sAc0Qap zBc4dPzWLAZMAKlfcJW;dJk8}&Riy>*YxXksVsdVca#bI|qe?1wV4R)qg|Y5b z9~xP-2_BR~O>wOv;`2j(J)S|&T00p<gtXDD>m9`S0u zlQbVh2Sl+G4!;h|p55i5jF5A-<>wivQu(LD=JHO`{XUvfvYtAdTzQ8+=}@uCswJ{K zv}q;a6Te^F>c=f{S=sHy{%ucz9p;cXHvj0_WkEqBs+rl1N{UCB7JH-|lZVcIs}*-A zqKi^z5%y-V2%hdUyISX{I%WVdDu;%J=g%M3b!v$%mir6qdw%8{KWuU!4&3n=i|x7r zmk3zNL0;3`H=l|GK;!4nXIp-v2)nl*oP^tJth9@(2G_){jm<+7>-sCc9H02pZe+qS zc3?s;n|QweYgLt-{29tkQW-j|#+ie+-c9?7yoDGO;H!f1RG4R0i;Dx*>4s^Oz=#f;Bmw{JE=Us<&KL{ zcO5;z$Cs**2s^wyL%K}ZD%7#H`+#3HcY9kd!ge)AewXYp7jPhoQ&+O0@8qZO44YDp z23ExLQew>(<&$`(z%!=Cesve%D3Mjg&rd-*XRkxRY{Oc7^yno|Jr$@1)SxTwqs&-) z?Jz!XqRqqne;a(RQk6mU8sCL2s1N+Wyzkf z5n(*1fQk2TgxQ>-O^2u&yCgxT3h8iq+gq@{#b;6uymsw5BG`&S&d0SrZLV+LGmZk<&qSp`S*6HpND~df&hL@EO-i#W9f=pr54dPA z3-JF&^pw*#WRh+>H2n6CxX2-d!|iXZvt~Zz;Nw(s4hOmb?9F zf*RE3TMI$(-^Xh}X|(#%mte0w1x679`i+_~dl041;@9F zJODquvXV?(&KUB78!XK%N>n-a+v%l)1~U(HyuNh$psD5a15<!msU6}x)MI;3!_K&HQ(a+%0O^`_WmwY4Z`#% zSDl|7&1(b&zX5G_LK=^*UR6}c6mlD_jerHd3{$!=N_d1A>uXNZrmd&~RUIu8AWZQd zFqO(p-x1_<5jW=eFwP)Gav$`o2|)dupD;;G)PQ&7A!QIJhmg^*MTdt7!O}E7*X#UN|1jV;9FVoGOA#6tvRhwKtqzsPWf~Gmf(mE|U(m8z)z;`vUPAqxscy|f z*Zl_>&}MOH8(EtZFO)nMBmn~Ji(b&?5iRjbX7!G_VGycq9Ct)p-bw954n%BM}& zFr75GXn4ZGT)wV!dUR($3GToy4B0B^waX<)-XscfKAv&AS-U%*UdH*eS?HLsLlIoG z2^@|dR84**{OC%}JXANWVpo1~jGGP7rmx$GzN2go{HTpRvQjO0G!=Z4`hN$RTmd6U zuxA=KcD{=;+gx!%F|*uW#QD+X=D%~!j4-5vga`7^psHTL6bM%Ju>a`zZ z=mIEb!pcV2Yk>P8aUY%P_9%O#k<&B)ZXiI15C>S&{N==09_xinQ2=I zuRZ(ED?`Lb&f0Q`?!jnH3`}x4|Hz(bDHlZdSNbg>cFDIi+sOg3w@^zlz78~mv{>p< z@Xz@Z=JK?I{a!NZBd1e?aVX8-8sBy*=S@V$^yTKg^l4g z{6pGzOZWiOnxan3R^t_d~$FE^Uu$mv_97*Ew({3iWLU(pi|% zk&jf6zIgWrGZ||R-lH=|BYM&{TV&omGgOGQ8WlV^A%aOtmjf0w!g|_`jgLfyKx4^p zHe-I1TcH=HP>oSeZC@<$8^pK+R@o8UG2(2=t{Vna$>nGhg-UWzb4KRj%Y*PPwI{nv zz8b9j6m~*hFC*{xmJ|i*9j983B(QrHCHg~a3Nl8ERY{XzwA(~i;Bb+@{vk=*v6iW)#rV$A|>`IkjKE z6fO$geSZbawX!1avjSUtYbGi2X~Ga$HD&3JDn&+edXRNcOEa;-)LcctEN4K3 z5be-{W%p^ZA`jCXh5M9%tamax(_w%b72rZVTcszbxbdIAi}!>!v1LJZ-*)g0iD0yQV>!-jW}5AQa=M3o zB`3TzGx5mvn&wVBGg9|;-Ump{jjCqt7*b~Z&8dyDPjINXmIV2KO6=h?r^=(f`|kOJ z%%iz8#kdqfoflfw3dyG;GGE@8x?;@hG1DF%gFUcg)F1C;Tp9A5TW_7K&A;&MOmeN+ z@Q}dq5zYU3!Zm1Fe{CyMRft?G*amotP4^Wm{9LG16TM55T1vKQPzfaZikwe+h5#?Y zgci&Jb$L`xJ&mga38~|cloyN@O>WZwy6DN=;96r zb(W-3X=(1xiG0j9d(KCuJZ006is-f`Wp>hUrb6U&(6D2CDu}-9N+ivXiaDZ1t?suj z7Ku0f_4of(Xky2R`>;b!V@W3e?+*k4@I&8YQsygi!zvahqgn3d+7!sG{k*+O!GM{I z80dZCsnk{f)&@hNFBlrR^@+tF(V!Ux41)uR!Q2|Gh_Up!sHb|sIoO*vr9((a-;U)r zNNYZUz0HV{eCcv4Z$0c4T4jAC%T}GBaAh@F!5D%f5iSumR zUYaqz#7eK6ryS4u^%Fm?009_3_)LKbmc-}_ev6|7?D?vUN*QZ|&&xDZtHPKkZ!z6C zAuPn4VxX5mn^|IBZ1B)VPaRqJW*j8*0)E}@bM(f>$zvYiS+;)%ym?76Ek@AHB$OU= zh)E6z-Rq_P+VJ?>558typFqLle00W=#G|j4`XMNp%URcSS9)uCu#a%q+`4YKaKW5 z1h{w!>Gv-#BDCQ)MUcBQL`esK;vSvV=&h#aor+l7V77}_Ifvvd)y=lN31R_U_kMD+ zx|S%Tnb9g--Cya4PUiz#z9Sv52YE$I8eRHd5h^yGj0&;p&P{{aD#!KjbBlaxrf zx-RFhb|5EIUx%%^&U)YAKZ3lcteYaf;y40@FP%*_X@we5VNpG+lJ^niJN8Wpe>$jmJff> zE)WAAT~9Vg|63<4J*dH=gN0gZ)SZ8Lrfa7L;UcarWw|7fubFG450xpPS8Gl4X!AZi z%6sV{xG4xSZDjLebJ!M9ouN(^>Y?qVITkEZJRal9jIh8*$73?PF3v?mH(L$;@4Ahm z(wWB^c?0tdE;NBZ=z{K%{}#HW2&L`1hd8Exh=+(p4TW+&<{{Kq@vIwRy|3qd2Aw+Y zQMc(IZxb&u+qvZFE{KX)Q1XjK^>B7X<&#_c^nL6cBmWGP4Z#oNYz-<_;XFRsO-~Z?(Xb+-ot9=T8^N`=oE&02J~G(CUXofjm>86w z^ikcJg6`~){M^2C4mf*;klgwqbka|^5Kro3sM-1;*6}Uh3Gi{p?&fvxZHj82HZ{?I zlAjJb^M9y%%cwY@ZCSTz8g~fpp>cN!(m-$s?i$?P-CY`&UxZ7i&eb2b>{$Bs*Saa4_RnHp6qPcUb7JRu@8qQhE`wcAiPmD5cYoxU2>(FXM znI(h7=v_+<(fO$g$7wlVm-iv{ab{ZrVD=TFrjhqbc36hdfA~Ioyd| zI8OwJiDB78Lv~7Zm{b_m9SZuM{Y6qezEW<3K{0$<#e89+WkRbJ)_h^s_&v^fUGW|j z!Yd@4n>IeOYltkiYmi6D!Rh$Xzl&xZTVpJSK-}X1F))ys^O%210-7*ZF%>c}hq4WG z8O)4t0Qc6>_1PL`vpfEZ@u+-r_5uc6W^Ag{BMxZX*_A&eYF%<^ksn1#t`kpB;X>oy z7~hnG!V39s&;zcigmzC`5>lu8RJZD3XqrHyzu+8p$f*NdDJ6cuxGDtNMzSxDCNzS2 zWP)9tJW|S_p4M_KBXe6y(YCbZC_F{>X&blCMJ>2GqVT5aE!St|6KVX_m6qRG0XOq8 zMPje4x1<-fcc}+kIok`P2C^)o#C}(lQJyc#1of~j2tVX0BAw*JX#1Ob{l2~)#U7y{ zRQ4yq8^WpSVS2KBPMr!cd__P+2KeHd6xJ=ut^w{Py;H|ugK=fa>{9NG1f?bqatmh+ zztBjd`kEC>K4!&6T>8VvU>^}MBt%TH$v75LQQgf~L{J~>qk9SZ7Nf#uT68QDzL`q5 zW7@t)OMm3EWw(rXk#Tm+p}}1O8chW8*LS-P3R=g#a8mh`2JRsAcN+ae1xh>>WmbcN z59c8PG5KB?(N!fCgDVGXcT>U3D**i~9}}EHTDBu-D>PnI9YhQ>M|vKz#qL5O?urSr zLhjLc1T+)}7{E|%Iouj1f&YezZ17f&a%KBz^k$^LVv&^sog=V1Y5rr$mH;dt;-6DQ zG6&yGh`lFjQp=c&8b|clQ`c$IQ%WTpC-@W;k*OY-S>^@K&s$jKvWA5*(05ia>o zUzTb$o&q8Z>_ucWr?`Wpi=v77+d{#c36Zs5)nT(NqubeW2)IEz!ujnV$l93(hgoC* z<>k?xYi9Hedfwtg6MCtUJi`8FxZ;;m$JEoSs!i~4BU+gF{QyT30%uYVqjA=vv60XZ ztq!h+wdmK0+@(x(uo=&w5q#{#@HiVcYD6fI>U`}yqp6D)DK)&&=HQsJ3nzk$>OeYa z_Ij0a0RI&&=?NVRh}Vk(%9~a;VBQ9Rn*b*tB9GR7uT=pqsUU2YpOwP}CqER|&jw#I^!1Zb?Wq zat}FK{bLWVleR1^&-?@$qj@!nYnf3rQ!_mAIQI{(mg)_1!M?mUJqc>2wJ=u}#2h|h zmP*aTOs2G5%Sm-LIGSVjJgXlhB1Xcl5`CTg@p>Kdzl+`fh>rn>{~1EF*I7wFI0On0 zrE)eck;7i&{yT|&#{wAf-494o>;Sih@Q72kuKkix-`+Yen_S&a85Bj^h6wJt1Q!?X zMsW#|GtivRZSGo&A8pQTCZ+;-?W0mOdonJ}J-lFg0?T6ch!p0}`D;rdKeC@>%}=e$ z!avWLU?q&PDVjz!2maNG4>eLI65hqJ+>P+HIndp&-{WQ2`W~3%{apRCK}?2>pVDT{ z&sF5%qhDt>Js;!Osqaa8xgXtgY`5AkIFSH>x%#QJXL`Ee&cq-QMpIdoC`xQP1m)9O zO1lEo8?sN_*B5`MKEW%fKkeuv`qDgS}J(@^CbS+RPW{Npr=fkAzA>2gV%`; zZ!IIdk|6%d)685oGeemCJdIPtcgGum$b;1TcdpN)c+5FsufhsIzuM>fjP8dV9p;R}G0b$OE)*bG)?m28) zWF5!dq)Q=0Z}T4{rF^s-??q(%m?~UtLp{(kA3YdhO2sd>z$`B^Y6*8c5o#&e7c)Vy zhL%?ycK)NF>7JBWd2}G@Xrn%VgsJ|Rxl>urM=D^O4Esp-)gXRhpLx0<#?BRi+C4#% zZij<;ZUD&07tHY{m3cv3*3;&5_j`ET>X;IRqNGSBm2gNj)825E8tIqMZrD#nIj&r{ zotvp+ZcI$=v9plRYEndId3Zr1Kdg9Vu&bQVW$0>Dg0(M-cdZ*t8mqJRKT!{4`$#eb zzz{)ne{&?&n~mO(IF*&AAdOmK!@VI+=BLjorL<70O2{-^QAp7{-AVHXMBN6iw3op} z7hG{%Zbc>02t!M(=a`OLCAZNKsLj;vTb=NC`-YS{R~V<0*NwsS(Bp)KP<*K@`r zG&jM9lk=W$~0J+5I6fAsA9YQ6A&qS#6q8+t;k<9oU?1SfYlv_)<_+VYR)H@ca#c0-2zin?$0-~%Vz@cTyLqX?wg%i}_ua%IV-dp8eDo8zeyNiR2 z^kMqw#{M9!f9_KF#g~%!J4$HlvwPz6Q$C$EdZyRk3DkHHQ3cN-oH*6JaV6~|Us#A~ z@o>VH4(I$J#^v6^;6`*@Gh;a|#6L)Ibk$9E%s6@B*==OCpwlCYd%Tm5eA*m<&3}?O zqO7aNLZ4^#GDJ_L<;VmW^@XqwsNu}m%Pxo4b6`E&7j1r?~Xv!EV2{2>RGY@>%=GtPo(fKP9+ z*}#4l(RWo&%Wn$`%*rE`&SFueo=7Y%Xs+AJ)XzmwjDCYjRMo_#+j%aQ);!bU&-VCC zhLnrSPircnGm}A`ef>|9?E@NKx6H^G_ly9q;ESXLuIs-$2H*e&8Gbu*d~p79=!7Fe zlxTJ_M%g?;Yru5HR6>sj5CDe%eRn&=hm-d)~oBTep3 zDRs4?mj-jjndJO;KPRaa1~L;TZBP7X{_(<@VS#98I?!o;RBlGzx~hcNrf7XM+KZL9 zQ9U3WT3@)+qqT@;HOXOuwW@{mCE0-khGj0kq0>{U)z>J8)8cDa{7gm|oqM!nqxl)i zI_|Pe)yMa|PSHGu`aoT~-T`Q~9@MCequD<(1vv#)8w-}7Wxx{zrPUL1@& z$k>^sbP)B+qA))2*@vj<=!>dP$zkt8$is9!B9unVMyizh{o(6oeZGw(9kf)`_-hUJ zBbQqgDeMTU)=i!loMeOmW@Hm-s6bC9B3` z+ax_ijHK^QG90%Tv&JYYNq1vTT`n}*iBkQ*HEMi{*Sc{++4=xHX>^y(zYq8FD~Jw} zQf+Xu9%`lpJZ9NwI0iNuk_b~oe^QvIeGGzp4zjKNVL*QAV*7_(5Jf_yOXMcjvBH+! zXE#r3dF*eJ90h{kzmJJNI6~s1eUy%9i6tPp`h;szUp}Oaqd;A+fVJ*6H(asN5TT_M z`n3#oIZbSrUAg;qd6?cli#kp}rv@a>7Iy4*iQ(!~1EC7YS?FiwlENY}ln_wPe4RS^XH;ry?bI=%CUfsyzWK`*?OhoBKW z2Z3QDz3`24YX-r7C*AnbG-07aR>XLj7S<*qXy+$TS!GF|AT?-U0j&$ZJNv4%!5cX# zoJ^987_1{Bz0#+-HF{PYf2rqmJw7lBd!Rqsc? z{E*(K#kp#QBrVwX7<_O`L!i@{%9#WOKHcoCA6FOeyxd;^>Q3GO86i4H@YQHz?Y<7p zt3M_;M)9JR&5i;WUnK`aKS0n>QHi>najcrz^cW zGvfgV$$o~ zvMlz=)`&{_i@ND$>AOoe^t>T$EX<-&4pJYKLnvYOO7NEUPmaw z)iNB)A^qhd^(ROabI~Rym`a+Njkj@=_Z0cjfH+z9hk|^EyQ6vKDgS|vI>8=d9Fe({ z$!jF6k4@{h=1s>jc%D^R!>{kEokqiwO!jk@L+Gz4HSQpLYC+#FRp4c7>7^%wbG)ky zx`X7JV+ZKW#rjemuN$qf&j2Ct)MyW6B&b`*STV426PrCPM6EjIp4=s>KN!FPHjiZ4 z^1s9|O!+Zio5Zyu$Afp8WY`z@>7GS>TZzY%zs?hh%ma4Wy!@gX#yq?mp&O{-z_#;( zjff$efEq3W98Kg2a>60ylw2B1+RZE7^dgvsOdo$yao(-VA|8vaWiJpiIW-tXt4YG8 z95&@(Fq3>jRqZ0+hJt>qp(<$pfuXRT6DLS31eO+;+1;H8W^!=pltT!V&0_*D2t2hd zA^ds$3D_FawxZ$Bt#8vdYk!ecf{8tdq{bBiYNCq|u zp)j`;*w`0>OyYtx#^glOeHh2WHaLOuBdcLOs5+eDC1w16(sWh0_kS&Kp006M2e5FN zWrW&CcmQ`?edusbCelF1HL$WJ)K1npCP1#<_`n9!l55w#$~p|DH1rwI%qrzG`lI<9 zt*F?E82l^nzU*E+ezIn^P4J)WidsNx;qXq-S@`B7^ z)1X^nYOqT^dg4`j91;FnHAG8aw^fcuB1}dtw0iQ$)T0gA)emtk_6BZcFV8nroysSjr9(9`1GY>P%6qqWjt;?LU~FE*0q z!!;SLQ;EzbpXap&gD9y={Wx3-1V}asi2f522ti0SNtR*rEW`rmj9g@KDEgbMS{~dK zSp@?h$5wOy11iAE+L9uG_befxUx_8b@If9gr-fm#q+0fV9D^~lyxq&L(?JOM#(>2Z zX7W<8_RXLCzw{GZ?E6oLK(Zib;}|6M3AUL0r&LcLA|@(p&{sP+j`WvPKOKM(p8Hh} zsiaj=^2j>*Avfa6Ocm_$#!9}w5&p*-pVn)Ez&{D5c&JrU6dFU4L*VE%u{PRPLA1?- zt8IH1IgR3~)vc@tqII0ichTH-7suE^P2b4ZU&5GXf?oeyp+;=LMsA*$gVj6ojaRPt zBip3#M?>|3HsxxoKX1>W49C!ca9|P|6v@n2i#ky&i$Y&uy7xD{BV34)YVqX$P*3dI zScAb=_{IN1;g+haOe;t63v1^T%rb>^ z={Dz0NU|zU1G>c)kIMY*r#+SYVo;G*4so(%}D_arnrS)emKjH>iPg5th>c^T!Tb;_Zwm~dpV5_{*^E#hhRpcn@=nzQf&;V!6! zcw&hquFd2En8bVdc1zz!Tj?CfTh@AR^QdL)!SyXKL}Gqa5QU%ae*)-0Sg?RUgR* zM$aD4Z`j;$rJm=(9^xAL)jCuO#;nIdOzw8wswTam|AK}kMWp|DW@X4RvGV8Wek9%s zU?X2g#426xRhr~U;; zw_mo>94+Mg2BO)OEmpk>lK)Y;H)4I>1M5aT#V8!HJEymU{I0FtNgT=Yal7 z8O<0YKVuq?aj;7QZVWgfvQ)5D#R%&u-5;iMg!l_^`p2Q+0xzV2UeH+?m>X!A>><7f z7U}28@Xt$vTKp|DYORi4tS+nWxXZSK&Wi8*VRF2D;`N@aL5>c;6iyD0c4@{`E%B$V zPK@Qt)&~11rE+4Kehv^c1V6N}IvSq6V_VG@Y-{z=kod%j1ONW=@1hia!{MIM%oU0L zg>eR466ZgV{GF3N^375DSdfGI9VWg@>ixOV?H90vmI+X^z_!UKm zL(}y9?&|MNNP;#AN8Qr<$Eb47*C{AAc3npj5>%eKx@&Yx`EZx9q=2m%T`2vBj&=*{ zB8H4Ls`?Le$XuEbPe7Q>esZCDl1ds=cTh9Tp6>*YuMrEMyM$H8CxCAojB9Y9j#O^} z5ulxz!((*4Qs`1|VZ83fyXv#IYZ@o6p>&37sUVuTTinEGnBXrF%dXQ!eLV-%^xZk@ z%m^ko4J57K8CV9vwrhHpv+oEH1v-~AcL*Z3EdykqFX&5_imtXJDYnAW81^3HgoM93 z%eFCdJWo6n2U&H0b|{rMd}p5oyMc-#lGPbpuXS6rH$L&RYvg_+y;&3(X6BmA-z5&V zl&&sT%_Q}=JJon7Ja&pgj!eW?dU$Vw;6?m=3jejY(o44Mp3uyP7p?Sy7`RmdlXMp` zF)IX+wgJTZ1s!2nS5Cu2rjCNA;=E~|zX7_3I21-eyK7-Vrf{bt1pl^BYzlQLNY!- z88Hf7^HnPOMT6;J&>WgswoQz@8PC{yrDAk?=L2J6r`{x?YW2f!RH)&QU@S7a8V_M? zp7w6c)O)mhqY6bee^31GU{=GV?DmPs4@0Rm-z#}mT3|00&veba(hx*630=P%W01{$o*qX4Mi$7rXv!g>9-2D~VCbQ0eI%)h%p zJ!}|TJ3&yB2z<@tQL+_4zm+#f0=HK{&2T1S3m#8Ls4z`3HGOo1{YA%|gDUbD8)ZqT z+UCi{HfA_7kVzinbRHDmC`2cHehui+KNM*lb4N!sKk$ zI0_@`5%_N&SiRGbU(c`m0Ss%<7cW|~&^sXoNC_F<6_%020JijfsCb^Abx(-L9_&X> zy@x!8`m>Rem7y)1nzh_U#oPUpbbQKj>}^b=*CGGD?(YM$!$^kGlz`WF*M>Pq`?sx? zv_?($U^-7SY;|P{1SUI><{NOEqM1Y7;P~~U>31D|{3kD-IvYE`KZ8vUn*2yT3Fi3| zNv9(!6jm3)9GiY8oizsJ$*G!kW`TLhrt5+q@sX!T-;Q6-%728~P&aWI;0-dT4fk1$ zP9CwTA$}O;99b(ujVp>H+yccQ-{vKsYm#9dOFj^)9x!t3`gZJY`-|s(V{k9ku?k|# z;!8A{tfo(LRU7bJZlUiRS^15%@{%%JSIMrMy)vymV_?zL;C7(%aFe+aqbkAskU7m4 z6&r7P6sf&};9(1`7D^aju5mhtCtSL2|MY;?%MAQJ2*@E9*Jy0D2AyVsLQ!C#ZLo+m z_p#)-n36N|C@zvRIM$UoY;r7B*kk0gGi{CKA?1(O57Sp~%wN&Ja@;Tp(nT{+?;egS zB#~#=zmy?t$-DjQs42y$aj=ruT6ghey6+oTYJ#r zDgLzXrPOeO;3)61r}-4d7#w`}3leTBZLT2rY*?OHWB}w(6|*$Cc@L6@kMU86TW$-Y z0AWvdT>;1fqGh2$ElaaiP#~C{*ZeeT$7^`6+xrz6BsG4N@%;Pa!}%k}DE%qZJgf!p z)zIehGGe67oRJF*b{3TyQM~<&;ZIQw1YeZoL!-dq%=5y-Xdfy>d)vRZKWQI7%y6x^ zn2HeedhC%0Bn50{r);;Xe8%x*(Xj!4u5;|d1=wk<4><6%80xMvBC&fQe4-9;XRYfj zSR=_=KSu1}sL0FQyY}DwJae6^&c#Woo)RUDGd(aqL$o%%@KwQP*pXzy`HQa)T40ap z_HSO5Jz}Qb>S0-xgfC5*@S^@`J}wp z>n6)jBX>tomEepCV@N3sr&Uq?e*6|7|N24J@50NhNDsCAQ@k=Zaw=Qp(RW*Q8|J1O z5(8xI=m}FZ86O=t|5_$^cC9dm%)VN&B@ph7+^j4!DI24Ulk@^$~1 zZb_1;hy2+#c9=c>&Y3xtF(d|)F~36~6jdxgexC=y2ajcxoy_1e2_iW$3wmo`dgKit z<-FfrS*F;(nloxLpQww-|38;w{Qq2z8%)IFWYFK0Dyeh-?Ff$kCuTtYJdm8LO#clB zLosHxJNo1T1FKp(oz4r%yfK_+k{SKq2+Dnp8m!&r(TSMTKcY3z2lqlo4RogP+?T;0 za&>^AW!N@oE1>nYm&sUJ?c-^Xpk*CLqM%E_K0`wl+qm@JcgPOpR1w_^`*P+aRiOz66*A#)xAfeT7b}}C~ zQTJat?^P$_JjFVy+DEb~Rse@Wd&LUTObS{otm5`jxwwMvjELE!+Uz!U;^!ev?gyT%!ALA^vd4nciKU+!aQ%HxJnw7(Fwn~7@Med23UY;(D{w1LQU;_}oqrwp+4djGj*N{+yOoi7PckjoxcqyiEJx#~FG1~Ly znC+J-IbrI0lFC~nb0J;C5q>hYxMcBadZj!Rat#Fzr)E#Q)^DR|b65}od3l1}mW*mT zlC(GVefRb+t7HX6ZBB%+Q4Yz*gkDzjOKAwXyj5o^LR6&Yx6pd};#K)201H>9!eIZC z+g$87$p9qaz=W-VH?;zp#J-mY#uedc3hip5i?qAxNs7qb!!#b*ClY%xF9yW;A{EV9 zRd^pX^)6tz_F*iuLUU##S=5OIQQIm3*^KA(>4Qq#)1R<%(%|2sQ|=rHZBO>@i)mFh z15KdJdoQ0aqHmwzx<#{R`9^(c&%L zo^fRP0NbJTS7I;}?}wpAymD3v9_Y{XV`w?MDTQhl&ECSwQhBr|d3f9?3@)JJ@at|M z!3PuHYqr1fvMEvUuUyeey+pg-$G|)&t!0WGctu<(1D9H=K6-cQ?sL}lAzMdN*_jNY zodh}U>@YEfb++gNTQ0IA{P$XvUzC;0h+>1d70sSZFZjS(G98=u2B@Gb8R7d)FDu0g zDcq@aDdRnF^_*CFX)K3{epw6N%$S$13ZH(Lfd>kh>#0oe9@6I>m;Sqa`H6lD3Vf+R zEfRF+(t;|Yb)iauOA@#w^g`i~e(n^cIdLzB2@*Hm00hDWxy28`i4pi{!MU)RV)sS0nYFSX5=(95QvY6$*dnYHsB%Dnvvhp(lY3IF#`# z!Se5;(tn8?M_sHLr1(|Dym#$M^4#CJM1#uY?ydbEKguya<>4#FH;})-F(tcCJLs*c z7uHB<*Gcd@+FOaio4+Pvyz@v$N^U#){BmzI^&~{~Sj5mD#mK%Z7L9-rqu=E_zna;t zMgT`QMNJ-}(bY?fqG`t)0pS@hko-BWVVbx=w6$7d34gp{lb(YFS21lM=>>l&ZPa1< zMZ`28qKNoYM+Wp9gN3h;nf{^|<}1$LkX4K>lo5c1Ie3_OFR zJ+}>yCuv(3nVOQ(c-kA%ID{;;BIM$7hR`s;UUwVnf*g=|y|4!Kt*vr^_~hXtVjh1N zb$?rie(T&TUD(H^w<^%!bbx8XT$9ypQy2Uy`On{B!UO(`9s)!zs_+6JsQs&_33>YF z`-BU};*)YF-fR_89z|(4gI8zvBtNqV##<)nah){IJycLffID;O7uoU0*R!K16|{Ww z{YQS?PTGhIa9&9uI@F!leks3sFYsI1bR)3jvxOnX3g`?)V+{nmz>`bt@{&w3u_495 zE)#qpw7k30?*^@Z3H?Zz8URzlH$hPV7U!UJ+T-@lZvoJmy8HZFEMP!cK{5ap|BH9Z zNBzAmcL-Ik$Ne-CQj}JM&7fr#=0ZACvqFaL(}jV~^B|8SKlSWQ4%rhxq9~1RHR%SE zi9pnQ_yp7aDagA+8weHt5)#Yy7P71`HJwLU%K69CDmTXl&=q~ICMA!(!Gi6^7j}61 z+93}N5f-mDNpS1wB;e6lu^n2=o@7qjK>t^c$lJf6dnuGxfwMwg2dB<@1))$vju{Uh z7?$k7(^aBjy$h@5jZEM>ku%)NJ=oMnvKb6PC@Fu5;ZRo;6&_0w_0~0!`{bgpfu|Y6 zIW&)>BK@?PJP&Drz$5MK_@)>MzaohxfQOU_Hi(0_HIc_I{F91y8ykcgs%)|zg(mko zMcMd55h5`n6%Ne{vcOD%R@mxaMlAE1_W0=r%G+)M0Vm%YkAy}RH&ma= zf&*Y9Ystk-o*T`N_!s<_)~mJR_nSg)-ETU{3W@7Kj0d24u<|$IFkDa!Mxs> zdV$F!uS#i#Ks&n!0Ws7sZ;Yhk4{3KWj)u(9>XJhN$8WUBI{%tora|ViQ$sS8U>;ze zCnQtBqoMnXy7rQuGYMwKI|%w9K2a_IkCODXd-AHq?$4;=zMVnM3j(x3K-E=S7y+xM zW?Xe{z9_~UPyQw)LN?*8^Vx%NM?{BaTSN{izVOoZVKp^?F1P@HqRKFMAeFm=O1_c! zKL2^^wq7N=B`(}HS~Ik}GTe67$X7Av{uWSv5M905)dl^^hMh-t52y>VB0v%HF-2)# z{x#~OHJ&plYcS+_yW6+S@6s$tA|M{_eIsS=+X##&2h8vR+HTT#fKA#qcZ`tem71K` zIOjNl?F;&tMU-B(vx|@{j#i{t9KEIwC)`I(ljQU-jj)%z3%{1Y-@fc-2PHle9UgLt z<4JDJgY0n@-Y8xvoPqPMF*4Bo|fDD|ePLMf+n{!#tY@6Z2s#_07)F?uN@XdC&(NjxyBPV3r+0FVbo z+FF-YDa3n4dBsRb!X_rhB(a)XT9O!o=Y21TYt87buKE-&gqBJd15oiLwFA6(}DGJ*yI;ccU68*p^$%Ff1BoO;_nsx=pUDE%!{ zMQc`@(1tjP=9!a;5SJ=XTqQCW_S)s|`QvAcL5uCepB*ZK6f5ds_w(I-m=Jp#v_$a8 zZ%0fQ!=H%pG$#7j3J+-~Hwc3+pEuy&3#_qJVK{QIHG>io@IQ@gD`Q8hV^@|tk}r3z z=n=(79Pbf(+RrTHPFtbMpVId41lyz;5FCy-i4{|m04@Q0)n7eXx;6LrKPk$Xsf1Xd zRIuNpLSr|xBE@iF6uL3K%LYBAVl29%nWp~j-il@lkaeDI?oS%B2()>(3towmSQg?U z!T+mD-!d^qPt9T1q=O{c+Tvw+T58|0T;vFN1)+ zg)8mM?p{{9tV{Ndg|i)08VzD!6dbIw1pfp_1< z$CVXb?;Jj76w5R`d6cclMy}6Bj>G2OFBIi0<>SO+HrlpzM{1WMGalZ{dl#2KJXBik zHWI0*-=Qrid5eDsQ+m`A5_iVGavoX!CE^*^mro0;a8bSc=d#FCmj0E8Bciva2L-V4 zykoP&>7@nZU)laoD~t} z3oV~Lm8VFpD&yD6$E*47-iQ_N!{ZghT;u@x>TR}q`0erJJ$=*GRjrkskbtP?;-zZv z$okU9{8jmMW`kmqFR$zMD!#z*bk2YHv?I3!z;Na-Y*#=%L8w)y#uR>TM7BoZfLQro zk5ayi+N+j=9c~w|uz02Pjo-gUjVYpkY)piP?8ogLWRM1$byGalv)+~ZCbt>9EU8XPJ6;;@zOdth4V@vlo5h112QxK&Munxw$2IGhGmeU)sKg4d`77-YY z>q0?M@rsFw{y(>aVi$3t7vEDRRY#!qXzwYTDAZnQeT+0e!DJk2DAWXmhbg`M!EcDh z{3~lIsb$2Q{Z69(Fow|SB#rVX|JGwMGs)?R%_6A%cY8mdT`9z6bKhFk6R6oK3g=-H z00EeD$mmh-g#q|$cY0FFai#ciC)36gP|j!zc-Db8Ba-ZSzU$$pgiU6A@7zUyCFUwM zZ}+@u@XhXqm+ULpy)l@%v`=ZH>#Kjg{Uu?XuETOwAt~3>km%v-=lpN}yT=>S^}peW zhY=`%cf7|wTMITn!iWcElLGqV7ABYZ7IpoK$FzJHFKu+1?QQ5d(g05m0GtktU8vnA zb*6a;ooKI{XGfh__;ylJ5{JS<-KQlPBq7-^hD1b_YesTD8<+#lIl+{NdPGbMaAY`4%)N`l=vYduqu-y#c3(1( zg`!Y3f@=g!zfS8;e~L=*fE23(1`gI#;u9n{!{fjp#!fAX_nLt^f!}nz4FsP>xgzr; z-jx$u`aaLJg2zbkpsyOcQ&lo%Ar==S4g6DkW+%z;^J}Z=Vcg3FjC6=wilvFjx3Ak@ zOk515K4Er6D(-X*$6UQ0Vu`hOXnTfR3IF7lwYm@#Zij&`9a$DQAT!37C)HCw7ZF;M zY6jkq!FwHukA9MNTOS?ju9an>?tMC}Nh(uGRceRK^tG%vS`6g8XZHO*tK!V1>OB09Km7mxIAZ_fARofA8lHZ)S;iZ^gkqR2 zTEhS;Hu8h}aUepXPjHDYMIv<*%%fl4=)>XxE5jq^bSvh$=Gn3T%SAc`l!+e8Wjt*+5wgvFtLB4Y6)+Z(N;>U=bJ||gX=H5{rqE= zGtx^+A@U-zhUkvEyQ!rm%3)t$wN8m9NJO9>@;rrh;i2)o94X)T$X>EMSH)y|OR-Gb zBP+Fl*8MDc$z)SdeE_B}iFVT`)yq^(5T_M0Oue%a%PTrS*HCb2T51x3&5Vk8vn(+h z)s-k#V8u0ws^uV6rlP1ql~?Acd2wF(z(m)XU>}lxc;N<`{Hu=?S!%4MO;an`%5KS;PWbBj62K0 zD+9PlBV^)7sFot&d)mYmT1KITxJv1^PPw;1275J!G#W(qlqj8&zMd?JqrMHkG}iXi zE%?^~K%+V=AsDEB$~rdHa>{Y4l7i}U!cI+JL@JjTt%#l08&aGgf67JR&rVTdL~zP& z4{h0)C!Ln;Uk_%XCN^T`++CqF+yzm*Tl_TZD57>T5)i&&ifF@aW_F=~G-TTJB1nGW z*JehCULYt9Fz16d&xa}ap2rzYXAb4IvG-a1RU_m`;|(JifJ9ZS0KQ3NM3=ayJMNP| zXT!`%d2ZTt?B_}xYt3?^2Cp|`pRo6SOAPrahRdsD38Ld%kU=fx_k&zqd79N0w;p`S zb|c6q0;(vGiWfaU-IWvFM;H2aN!WJirPi`PSveFmU~pzUWVha)&YigaE81g=pE77R z1d@@{#-IOSYs}j})C!z^8Dc?h&E@j!#@{z zaWvrzP5YEm60sWiuf@Lpdl~utYd&I3qYt&^>j1oo(R29p>rsomjO1V_)lX8yOW5Fr+H6^r=- z-y*{Ty_`jL`rR%K{Ye#UjgH}Xo+^&PUq9<7!n)z_IO8V%!nj<>Pzbe55vXdoIp$fU zLR*JqF{(a>cHz^AWc+Uy04=FqwiGrq*iewNf|FMuUz;28JrR95SynVuFofITKDMfl z?=a{JxYsyQi6b{OC|lU^a!^PR!20?1(n(NtQ6<#6;Un1VzBizsvgh)ST<&Lth)-&^ z!C1++0T>@78Ohg9T*N3U>?`jlr_X^P);A6JLmX=R=yR^x*=Ig`3G$-<2Zh7=pM^%D zj-H;OKs?KT0V%#%>F-7ZHcYUvQ>m;cvG!qdFzxvIg#O%MV}&3tJ8^jUWD$|h;Xds} zMeuK9FUH%v8MwRj*>hzr*B8}HG7bevuZ>j(y7l$V^L~F0u#ffESL@S#a4*|^qbTlY zAEKy=>oHr?oB)at$Pnrv!3ONgEb+jvlbJ*Fy~N)xaT*D5n9}ouCuk8y{+}da5Ua>G z?#Ca?t!2W0{o2!Sj1S%?t0+A zqT*~29q!HQ;z>?fl{0&b(^d}k$BrvG*@v@3DIc+az?FYrUh2Z=@2z_;2{err3&M?_!p>gTMcJ(hv zn3gg(i%F@kO}}p7YqOds{K(m(Ad1)+dgh;-OSd5n{!AaS+%hjEfit-QPJZQ+4j6{@ zGMKNfplL->o)~Onl>9PsuLgH;BP-^Mfw;8jcNJK{=07}@FsW<_#*@s zP28OS8YMB4cj5iu>6WqRh>wN|ceB7`);Hi5_cET~F!x&vnaW z$8j|_&Rc&G75Q~VIZ!MhqS{E_aG&daU+-cJbXf8!Ftbs~<USp*f;axV#@bzR-mxN&3X9S%A@}~P!_C$Y;%=eatesV=mMfeAmio&xo z$Ixvfn^A;+re$C=Tr4$4@_PJkp86gQy!t51pCV}5HIKRWVN>xir|_;z4DT6}pj|2h zOkCf!QXT7r^bh;*9(>~Ut4?jWLj;H3zKuE2PH%ahKLsMZSg?Hi%O%KkqklJ>Oa&~U zz>}?;s!m79*c8PZ8>H_oQC?1W+e%M~pNo4QFB);ZO+W_VAeg4?mB$w6>U2U5Ob(ivTW;^NDwm?Hx>M_`LO>j6(@XJAEx5HJ4n)`njk?oDxODHHW70~ zN&-1C0qb~{!&}(4y)9)EobOn z54N}e2&|D#Fs4K)Ux!y{Dd>O4VA?=uHYhn~Nk3!&1B|>YNYo`IL>O(^o?ik7+{qyM zdn4a!Xf{@>6`L|1auGk1x|Zk)4Jm{i9c2UWd6(%!E;b)R<2+?AV zQ4mdasS$SY;A!x7_%Gk~e>fBU|BRS5NhP=URVerlM<8|cm|dEnDmY~l1zOCn?D&NR z-!KUd%cT&(H167+5m@qlqf5{!RX&{3#Q`w%=J%jLAejIy3mWm|-YIcBK56PUp)q3? z{_7E}Cw{4G810vlr)Gg4IeqV=Jd63u9RXenbK;XhkC#3gn;+hOUCi^RCUpsWP5+#|B&K0ODnjfL4| zItMCfr9TLR=Zr)I%kq^#9mkDrI@=np6_!B5TP$P^muRCs#BH9pxqrN=4?n?mt&5CTaTD7uRhd6DFJ#&niAldzq| z4Z`Vtcs0qtQZMAObkc8q`O2|s9=lt$UQ&*b4H?inf|p6T9g|}ocu9V(SZ&z;Jr0G&UOEx*5F>)6&v(KpNWh{MX)3{$0QAKT6=>BCB=|Z4%R`EIX-c{B=8u+$ zqKRFyX`#&BlGYiRUW@*7aO42T=#X(>GZz`}uX|tS&g`JE7@|EK69OOAVckojC{*tt zx@p1?g|wyLK$D+CCJ@2OmvJtX|ii)qgB zsep8eYLY&tWx8b=o`%pu5<@vY5pH^aLnBwCS5*cY53qdLMX%%aqY({gm$F<{2iUpW zzXqm6qnf$}P+$c45}b0r=T<7&A4&IGNg%$W3%qu@Pu$wQB)i$YSF4cT)p{@PhY10H zSk;2~;76yGn7^7pVlPYt%)YO~Az`IuGS27U88qx=EW>%jDs?L+joYL=yfd*P)Wl~0 z%CnzTB@WC8M_ROdISUno@?6`TRLvaUE|%R|@d*!ih|nsJt!?&>70Acoo|wGoA7zjb zVarxB!WGz_8BHgXjV$F}OX%A&`Y(^IGM+lIA5RwG^k#DK0V)DtV)k#F=JfqSi#~@wWMSvl-#WTYmq}lWE*d;-b zkVoQY8qJ7QM!glKs#&RegXzy@%AYUoj1ED!g2{GqnS`^t`C*ELITn-!;vst)#P6|G zlbIv$i$Tb{sH(=3B$vzfJr}XV1bDRAl-NYU4!huk>rS>CG0IyXR74Lp z8vQwywD#y=YU-w#$?uuw+GIze+tRdXW}lwyMT^_JWqVrMBkKNRglvNQX-476G=w!qYj}Gxtu4-uM)4dU1x$JTGo5=WA28EKC z9p#>07J`1m(K}BQ-SsnhS8;^8G_bQvsnqzBxzr>fW1^DX-6T5SLl=xT&;y*zznQWu zd2h{*kNNbOXgTc3Yzr6Sqjt7u5;u)5$$X>@Xds);Al?rMmBGxw6;8pdcX#`+5fR=< z0cUT7F73E13LNqHxNbtUH)yFhB}*)k?b(EWL`ssX3v|w-?i4W#`Q0HK0XkGUl_zV;ANAn21~Nrt8=RFQ3Ef<<^Du95OnH?_62o^DKLqp|1+ z#gO5SW4uHqqB~cp2r(mn3J_1y1`aRW1iJ`@DG+wyxMcfPBibKnLNC;#a$V<8rtEd4 zT)@f6cfUuL^vahj{UvPyWt^gx>oV`87z`Z3*wd!T4)Dh!zZ|I|HKr?}ya? z?reff-U8Ednje;5bB zJPPlgd0yNh{2AjM?V>n8+EmsQEp-5yCHsEZHH{uHY&v}%6#sB>eVg_egZ)O zzthsC?CdbUu+sgJCmNAF-F>w=69p7i%p6}NP6{4m+vbhtN#m!osuu*&L`CT{PMyA3 zTie)p_D#m|$Vx-0LV|)~cC%A<7qPIuxlgt}22MJfXc`-TLw#AgFt#cCTXB2Ze&&0- zRv~ooIQc8ceW%tgZ~^`)9}x`>r26Mx)2Eb#_)TGIY1?oEEpy#_Av3Jlt7&rydqrG( zI4=1{Y_1?dd}@MF;GT60)QBz1cAOV$ZaONO6aqZ=YPK;_&JI8k)!{I2S?}~G%rxGF z7BK_Mh%=c#kIo^Cy_(HHNKctu@#cUPQ)Eu(>v56>X8el`A<$gxBqYwOIU>%>F;EN` zq{1{6M5KJMdCQ~#3n(monCjtj>@L~q_O{oJ z8JDJqUxhYfqO`j@-nFWEfcB3J%;P#@j>y#x@<9R$!}mQ1lH*>^H)bbZp_})}UcyEA zx*xjHr8IgV`~pdSBW=6=RLdyDCwu{?%;TcO`;*oLoasaUmJS{YW|w3Zt@u>W(zrNl zNwq5F6gmb)y_?yfw<#TRCa(Y`tH-)*@yCJvvWm(4x{#tU)*@z((19?+8E7hn1fKmE zeH*2d#zk)CN*82Q?Gt(+Vb*0b&#Vbk#F%f~17uZ#`r?Dg@I-|RHpL~y5hq(ZQWoeA z?&wD;KB{@V*BMzkAN+u5tGc~l2X^+xVfZx_lzBLhJO$KY{XTG4BWZn9`puM3oYWax zKKO57On~W{xU)w#hsQp4lsdta-Pj&Jhlw!fBy{GX2tvq?U%dn2rxLDMSG?JZVmB`S*P660NStlTOZX#zz6GdSf1~xNMNz zNNiJCj&qeG3+B<4xsdhi#E+FjaLH0@+;{+5rfVG~psG-!0KT8-L_Ikm1+)#XV4rwX-uD0$=g-Pq&0%Wh#e9&wCPW>Te*QW=p)y zj#3ol(Qqzk(Ge0_k)htgyoqX>dZv5o!o#jc!R2i=Zhj4WY7>r_11w$ z&(BeEg)u3<#JT1E-W_MP497#6dY^!3&bW@4xgii@M7ImH?AR?)r;6Nplfn$OW!=ZQ zTk^B$M2zCuD*sh2d?=Utih-%4e{LG>Qx!8vBb+dqDbC+vc{#D`+keMggDCt!+42?f z`c2M1w*Pg_oV&=qB`Ht09^N2YqKId54fgas3#wG(NfE3!fw4ZM5x=c3ZNMwnZYNAr zckvGR9nV~0F-x6;=b!F;Cm4aYlvGm)MGR^;uE#bS*dc!o%KTalyMa20$3KM_@NTG+ zwILiUmONIQje3Vhn$@3|FeZn7zuw74G;v2eO7)<**=X&KyaKdJ0iF-#!}?qgWAk*qDS$8{kLh)D zalh5+S{T;Iudg~;ecZbo$LP)wJMG+C3W|24x{5kRwtcBA^HhV|UI)?4)CXx$*#Nos z?iU5na`?2ypq(m!A?JD2L%`K6ewmO<81MKs*qWH1fvmy1jVz56jsD_N@{jw>ppf3o z{BfE>P`)c5N)=!GLmqo(9S_uYl%~a@5@64r*@(ODthi7>7Q{gHy z^fIEo>cLa7GPUFQ7v~1Ugvtt67b5eah?}l|JVB$R{X=IAt-P^ffyVdadaZ(JRtX=z z$wcR2P?;ekaQXx3Oi-@VT2I1Ki>>AV4KPB8MMT%(mvlPsIp-1FYZ3F$;XpcL*hrEL z=tz`oeBa|&c(6xKe~UsP*&N@@cqbntlAT_$MkV&d@kPiFY~Td*{nA=ruDbHEsCX)~ zYym&riISkKAE_vx&Afh6{$>aW7r#ca0!n_U2)n;9%*gb}CGmiwm2Ob?ufOHQ$tgJgLLhAksUjp)`vY&( z!u+4USpM1#B&~#e_(F2)b-QCCQcDRpAppM6&@iu9_v%iDIeWZa;%J$qOz$!n)@nRLZc3y1^{D3m5yt+5 zifCCu6=^_}V++{5L}4clT+AYdQyyZRaBO_t@W$z5u-jM>$M}I2Ojy0~aPqfVeFg`4 zxP4acndj7UBJ8R$n+&`L!tNW#AU3%z4_muieAswxrd2c1qz()fQAJXU2t*jghQB@Q z3!9soH{C~6b$T0L-TpU@q@r*?R>$DIoSvj)@5G|V(uZAh=f}|R3Qu z#2urrwJ`g#&EQooJx-d0L5G`$@UKQQ#|kgV)m|T0NHLprMcjJF^6SG_*g4`PMt7o4 z9aB}$4~&~6f?sk~y68P2pRt13AI|~hOl*-$C$eB2={d#ZRpi%}OTPG!@}!0r61b@v zHFa09*Se?o_)0m@Y`f}WUC5~Kuh=&fK&(8p(QY&_d_e%;`Yf>H;d|UUDfNz-nIHDj zPUUu2>H>{vx(asw-{ten)DW5xLvJ3LA^aXNVbmRlU!i^$+&7KUgL4G#XjsGI2J(saPO)20V;GsHH@&8F^A8B@Gjn(jCFu zoU~b82#A{12KOexGT>yf$Uyj04Cl;xL%`;NeJZNuO+5{AdkT?*Uvuh?z;!iM_{YXV zj*6Lzw0o}jRehx1yQ=miX1S{_DEny+XHMYv;2yScN;H;*rynI9fEx6jDo2x2kr4`i zx^HS^X#eP z-U}E_cgP0tM;pw z7L@a1(-}vBm&_B2MmSa)3GNQ%zfgkLF>=bR-|#!?w?4=dzr=_AGSTX&p~+wY9G!?g zvv~BN(s%N;H+<`+sgjn(h?Nvw2>R(7M(lJy^zz-|_b;l-GGi)eecEgtN#|D=pMbcP zFWakQt+tQ6wvg=>f%J;B1@RFCSigusooHU)JB&`^C|MGU@_Iv-l_aOkfSAo7p z+m(R9(xQJ#@Ss(Bq*3US30pemE4181=oB+o3a{s%n$zLJ^$sqhPae(Fz?g5Wl3KEx4KO-($@+g~Z?YLB9h- zew|A~*gQQ?tD^GQ&e;T$y)iWyYocr7Z4lJf=G_@#4WuuO8+493b!6C$iP6T&x~@}f zsrm#^4&n>cpl?E>AP@5|Ty^I8>Q1cDQaz`4v&D~Ey%T3TYd3ME&?tb2O6wE>! zBxO6!zLJeGp}Qy}jy5J|DWBn===zU1d3dR+GS*zZ!dXUn{D2p{ixZ`0=!j6#$^E6XfUTAFYHC zEHGV-6IA`sQk0=H_I9GS7Iuoo0Um(>_TQ+g22DuXY{rB5ps8Cx(@kE5B=K1YfYYIN z5bcs{2>@XX*y6unmp8cz7P&H{*9W+Enz+ps^@C?VjXL&fPF?nk5`cdlW6 zawuYoNA-t2Y47xlk>fM;^Y%trSZxvh%3pses{3CQpYVLRIj@8fVD~Gu{gATQ_|I(Y zco{iRxPBvx`Kx9Cy)%9)bEw0?N-}$O{OFy3NX)D6J^|``-#wiC-AK+?1*hTDV%ZuQq$sd~pUe9gk(iRQFyqJvLN6?U2Or{ccvpO8dWkin!leSe&R2uLmSiV`ki zJ8cOlQ8iYdNgbEZ8H;na*B{YuBNM%5P2qsn4%gb?JKL1S(6URid)pK{c=)o8?}l(t zgUy_VK0;g_gkSa{up_c@ddLk&d#h~6H#-;82}0j1pPmfqGjWOJ3Uu+;U&`|$*15A% z5YY;<^*{KSi8xSfJFWOv^ZCyV%!Ibgs%L0n+So#_HGIYW1HJ~-+K!b!d= zU5Mt-*cBHvAR%CnGRjC7W*B;L7`P&7Vl*_d-}ih3hAhwesQfrMG_LJO1(W_Pbasb7 zI{dK5@7}VQ$K9{BlO^jGm-ga4bA>19HQl z06RKGk?x_%yDWy^_X|8FG34qG`D+)2>J9j-iUSt{jEk@jOaUA;I&evU>XCwnTXWor z>hyJYE(8UDIW#9I|Mc~xy=(od)UV?OBF7rksni||9)jW_!v|$~Bq$o++j|X${lOib zsL2Z@Sls?Y2R2XqXWg^sL`!h+p~>3*o!`=ovORL&nAXt22LjohslLk4qg2^|9~@oX zj9j`ZR=X^RAe>OUcRQAAHQlr7)xEyKmr<%AyFIwO+(UO*L*PCUOvt|fr#~(4=ai~k zww;L-w3DuVpz-!Ta7Fvsn+H-;IRCG+=|5x73m4Dp4{AczUv$B?E5jv^>j*DjDN2&1 zIAQ>LgmMREG6`{I*@A@CbmgB0N(?M9s0Njsowl`8Fufp`DD|I4RUyCBE# z5!*l{{ecG@v5?=oF&0+x$8OqqzwiJ=63Eu@f%)7QgkZTxC~{s~wPwtWOkmp$1z)qY zwF{GlGdo`TJ35x;&<;3yyjwY;%I|fz-((0*+0eYnk(*>^7`4yg@98{>2LRHPk#->g zPMub4BHG^zeL_jD;P}L_c4+eJMEyCyu${|y9S4plzvlCs?1gnKiV(C)f4Sb=yyHZt zM2s%~fjqscWrO%bW8n|!vjLG0kRvCbpiKa0xF9cWl0)}m9Xq`WEKoJVOO(NK>r21uMOQoW+Wo(gC~_Yr1DZ|= zUO)o_Ro2rzb}b6tDc3QcJo5Cll@fEq;WsP9DD-OAgwu%M^B(k_SIN>s5g*P4ab7TC zBShUmd}N~F-fL>G z|6QK~ga2r0FK=X}~L?tWxh6cnaG{5++b-u$kBO|hSeZ**To z$q-tcT*p`M!^D}Xw|;fhti00{XHCyh`PEYKe$l^H0?cbK&^;AT=8{#Un*B z3ai~1DBg$`-mS^@k#=cuv==auA%N8AoN4VJrFPDY%5DtiqN= zGV>1Cl#gW7&mS6Z@~v)WD2!@Af7IsqSN=bDamsp$<6mk<-T`JE4yBwoqS1oT*+PCa zG0js=T6vX}M>o2p`)z|}Lz)w(pkX5J@BQ`e5z_yih1-918uvDJMsN&W-n=!9iu6#<}2lresC5%<*KiXi!;Nfnsjc|Sf#GjN4}dL+sI1qrBzs;LgmnluCVk?R zI2owp)w1^k9AfIe8Cd-|zgtSWW@EzN-C?5QNCl$i4Chz9t7H3u5|tGWteWiig52IT zR}2V5U(kGo1z=C?mwHzp6$0k7W;0NlKs#%9iTvdXNc;d-#Ftbfq)J_=vJw-iJ0NEX zzx7=5o+wN$DNI5<&~DS~B2xXwmiHwl(N~Dp8f)yM z!X4T@oWu)f@*l;3iJnm8E9%#15DN^LQBNId$$8<{SvHl zTWH$Vx!h(KG#N^4qS$tq{TQ&+5pjysQT^E4GCw4Y_~oK~N}cy-=Clai3k%vy%eGi5 zt1CRhaz~>`-W(DCf_1x0f04swvPer{NA4_1@5YOTLMGqVe*p+$U7F8fiM4?fx5^%dW^ zCI6I^#nWb|vynY42RNZ}AaV&vibDD4PlH9l96O_`Slh3EuiTuQw_KUC8p9#4p`qg5 zmzzSNQdYrfcTc69ZYS+d3K}?S3^5sYTh?o%ba`>{_4uUMU1zR zF)8Wd#9YhueKZFo?=Ps0tG|m3bZz7XR}^xmcZ4BGU6oePNoYV9hKbzMc&mFpfYxhp z4=j4{GA7)3GOA{(a;P8nAC(39J^%Y0jW8-l1#>?Yp7`nY^xRQX_&zw#6&da47_&)* z0#hLKf4+JW%pW|$;s)rE+0_3#HJ`_|CYm{xh=lsr`8JhCc zC^d&9if)haP1k<)*N7+64$JN$a zgbf!;fAbfpkTl2|j^klY_&y9Qal@;Ox= zQ(q@YjkLhe&LA(Ea>{LBSd---@lkdWt6h#w!|1r-`Qh(=r5|hPOCeuIrZAdLir zq`gC5m<16`P?Z0mqf9jU_6B6@wE^XklrH$w8XJ%Zk4_i2)G)1mM7wm2Blee6=_}pe zJC%qRQ8f5lT78yAZymBIQX%5QTi03r_;=<1v(57&qCHo`Bl|d)=5}$8egD_2uH)+J zep_M&PCT_frDjwvwma>YJHbdHc{$>l%PM$YyYFytw^w7A=9Gzp&d|7(SK9=k>wj5H09dpGe~!)@l%X zkRnv{0bfLrY=vscy{4Cr$e+Aqgx54Dm`lKr)3Wvi(%VVV-T17%hp=0gK-+)osMYAT z_T8@xk?CsIy*ue5cxRVsD%b)IC$6dKSOEea{vISP)A+d_M!n!ZJ}|hKZ4@pZs`+o0 zo28-tvy_ev3P6;XWGc8+hDs^Ex(v_X*SxIfLbs56)8OT!HG9)%^H@a)MG`G;%{3XN4H9Cwzcl?I8& zjr{~5{_3{atL7-GcPETSNa{yVfc0vc5lpz3i<83Zhf{%c7@f2(%o>Ng`=?h3l0%mH zmM=K+X2JuAXtxr%fcW^1pQ^IVhi)ZC-a_Fk-HqU}q&H6A^&1Yih2eGfc>|xgk2#gY zHF51j3wt0y!U8VE4gQu6I02Bw7IM*W`do>JLsTtpK}j!Ys2U6{lik1MEbk@e9RKDf zdj$%y6(dXSe4z6nhK%}*Ijl-mFo)$)arf`Qb)3fk{m{9cT!bSQ=m+I8&B-q2E+}_B zg?GtzuVC&5C+nW#8U~fDH}4hOf|N~BRRO`g8y|BxhA!-iN>!7KzEN7-M-$hf=he~E zjalEfg9&z9_3}4g=!!bjItT0(dCbN}ISaG${M_TOB6CpZ11PZa*l!ujyoew(N1~yF zSX4q&^75NxLO7NyU$+sW^Fxd0rdv6n`o<#kJ?j$2v?l_HhAG{PG7O;Lt18yE1=$c< zl6B_Fnqubut@+(_FkOXSD4^=L3hvMj_ef)a3qe~;Of#Z_IXZnPlD_2P%b*^2qG{S- z@kR2Uo21DZuP)5B9w)hOh3nQ7I~Qa7N9`Gchg7&SQ^z|ED>pgk!x+qaYk3j}B{T7V z<7?ZfXxyIAJl3=*iSPj(C|S)vX=|aT{NL$pae;!A_5crOc&@a#v9$Zl*u(QX{gqw? zACwjrP}`EF>!$1pCDe_j|DlQM*gLEmfw@8qDku6dM%S&5btF(TDlv% zEkeB&#BnqSqDaKPw|eA{Y1C;S`%%5x5T4e|D;8=x+@H0Uq{aNaU?tC4k&f;@jOwxNu>33FWw{b#Ub2RPaAB+6D$Qd*U zw)wwxi7Hph>W`*hFA>y+E<`;-O|Wj=?fpr+rjESH)(KERZb}gYdURkzsqLUNJo_I_ z^V=8RRcv%TU`eQ23EI819|!;H`j4}A@5#pm2vWq{j|BT?HHvt|VcN1?U*_rRt(MhD zNWbsFHUvH@f}-rSCD(E$!TD)q3QotRWru=_Yv2%cD5*o9{N&DA!TNE%yEE|0NAr8~ zZay{j!g|2<$$zUq{u3R1E06q-hgO6Y&#MeIq2f|wsou3Q`{$Q*Q`T%&osmD=94Rw> ztoOU2gWKXR6-pE{@~rW^KJq5ni_g`^otZ2c`(g;2H3n5D$AjyWvaIJj^O)W5?P`^; zqo3<8+t2ov1nUO~mv;!}o3^8l4HTbTHu+8*zw_cu_3Mva+WZ z-mC6E;!s8u0bcIQ6oS!8ueo0WG)1ZmGm2XmMmP&SRQJ{INzDA6Z@aijVb?eN9zR1Z zZ+&_^PED~uzM{-*7I0-e{R>NVCiP!Iw`c6HHJ?KmhcPo}mwP(e+2TRthfj*oW&>K< zj$!a;A3zTQ%Hbb%=2)}O_QwBO;xNea8aBKa2uu<}ogUGO8=Vw;qRdEMEdTXBF0OA( z62eC6)97Xo%DOIs_y_1rN4$uVq(z)CC(VDwt4c(5M{Xh_$PS=TM4&KFIF~?lj_ZFt3<$-l!=WvelAx>jH(wJoUw`7}jmr*>XaGK? zCg#IF|6LXxiySy0Ch8GdAT^_mBt91lO1+LlML4wjJDcQQO2#!Fv?aI~??B}|@EAeG zP>JXy2_K;ikKJ5M+3L^Gfj0)6nKKgBm%T3`1JnO9`Zlomt%NzYOPC>zSWtm|cxLHk z@~hWS*J5fb|C?4bgq|}Co8B8>fEBI371|nOi{z{wvpUDQAUP8vAN&gYG6l;Jz(qmt|gJr zte28$a`*Thh*dp-wDC__);&Lc#a2*OC-xJJC{(C9G^hk`8h)Z`A=fHKitn$tCF?Si zEF8l)5r{~n!>u!uHGzjoiQfr)3Lvk_rs_41Zg!p4XtrVHm$^)#5RVSGx@1PZh^uq7+AWXMNohjsATycgjDxd8H* zIGCV&lx*qPy{aRtqtB;a4y)LvLHY!hX)?ab@PQaui}(Hb`7c^JAh8jhAlM-Dp>Ap+p44>w=g$o0C$F1p zy53&`wCh^pL;imPUytNAg#c%XH5}oA{U5w%>^TEevw|H2Sbf!M&-tE^o*YPZ;Y6aM zrvJc_#Tm^P9nG!j{UzL#BCnwcMUt3scPOsMP1pM27ye}JWJ^~x90<7XEyi<_Av>K% ziY}^I<1FXS({WygM=5E9M`o-6Vz&hy1TRf~0rYYupRdCwq@p`Kzi@#TIf=Ig;br?% zO@K_n>b%7UWhpB1K=@L`rB89~*kMENoNcE^GsbX!^mA4UIiqMmnW1=lNd@ncr~9!D z2>WCq#|TFMmmr`{?r?S5c=)ffKZnQ`RQN%bZ$i4MQ+>H7T+o$~)>p3rg&wg!N?1S2 z@3uW23JMY^64xARgR3xSC%RF|1v_bPorRVM#8I>)=#JZz)LYzxX}_q3HK+k|r6l#l z4PJk*UaCZqBjY<@m%7;1UtB~JJ4)L)@6Hel`c>P$I9HbvMl6tzr`{ivjvr-;S-}#p zwnwgIk(qv}j>3Hx`B-bQP5Fy*G<9>7d!W#?5Hql`+#rSeQOh{0wO$gI!6BW7g*8fi za3!#hKC;akmC0!Qjb@J(v|>!f9-$y8BFqQQA{5aV`aK57kh6kVw4~B}r*>A-?pY@W zYzfz%NC&lDtK=n)6ntfIIo-QV8PU}$Mjr-sK6u?_`4L7agabHW)A}6@AWw!0<3MSN8vblzy-5}ySx9$y z_GOC^Kbj9HH1R_3a`T@1{No|v$?A{?{8%>F{@I^rF3suAnB-pZ=zQ+=h8Gi)5DhQ>B=-w228pE>KkJ z@^^p3zKm@0g3Wfxq#K8e^#C)pgG2-Lt!n7v$YZjwG1&zR^EV><{$(ABicXgA3|D)bF#vZB@uYjH`)zmA-Npfa$u5zTREi1YkgsrnX9<8Fgr{4XY z8%0~g;WfIk9}SVC_*>ip(b(TA2?qo~+ho-BCxF#gVVcB#1sa(FwVAjasknyUgY%J zOLsaFAmk```mExEop0=^6Q0Os7lxuN0%LG$FknEwxuaw;wU_@Nj5{aTG2PfElqBCd<{NaTT75CnlRKWU}8g=o{Z?s@&SDN!W=)X z-@6q%mQ)U{nwuXcS>=h;yxM(Vly5RXwC3uMxlrOOUUx{*Vm+H!0)NCXs`!u&z4Pd? z`3LdCUc=$pVqx0??6Ap{CBkUhrO;$_dE z4gp8CcOjwTbUY~*dkTKR*S-zH0QM09BSCt5{*!k#lh1qipZW(U(Z%=cQ>bstoG&$4>fo<1-jzQ-XDVT4+!b)uG$6FAwl5`5FsPVh*p8<}Yf>EosH zmGq_DhCV{K(6cKQdGQQ6NXNnl!0fd^gRC$IdC9(yH(U1&&EUqIMo~#0An1**@O%I)$Fr?1Dk5NZ%2dr*% z{>#U_C1O@>!9x7@#YQ~mAh~F&wzPHPB9|c}w$O@nzqh03fd2h~Q-$h|J1er9why(A zQ4U33kpa4yg>kr&eRFi^;Dod__iYVQR3<`5KX9pQGQ#H)u?aA9P$f>qUicuNtqQA> zH)eU)A*kzXmQ7xS%Vn7Q1GA?+wBtReA#h9sqCBQ+VZYC6T*V0XGp~mHrT`kQeD zN5T;=?)#3F&nd|DSNn82Lr`)To>0V8eaq+3P#8yiQ?=h3etEMfG3*+u z1jFA)!$~4whP&|AblL3C%0urXPov(Se&Tn?PJ;n3aU>ZS@aT|wg&6&AeoA+<1IADb zP88UnUq|wC7JexvF=+$B+>_Q$0j=zhIa_uebBm{;m7kzY^(k8clkq&Y7Zf~yFw6*4 zOhXL70ZRx1o_{EdLCYc#NKGq@ehaV_`6#^O^LvAlS77S-0TmO@Zs1FwS*yA}iy_e6 z$^lBs+yJF_^r8`%(tu18lDB?v8u4qs?&c|N(LZcNWD4=FH{JldhK+L9=-bCj&+uu{ zss&hkAa<~j7-y9Dx_2NwPReT=G{rQ+@J5Uy2O^9{5V=@3@PoYCHKy?TtF+s9kulAh zZsPDw8Rfib?_{(n=Ej^YUZ~V3*LL-Dqcg8Oi^4LWyEv*r-}lqTeyibVrF~4ZvGXBl z(dC>J@%p6IkGk^7mgA$%gsooO6$gBjX|(-e3^UqyEQf1MeZq`Fx@E7p#bXQNq#(@4 ztxlOw|C)jPj+owaNSEqQKs3*&)IS9*Ge67Qm2T{g^#>dU7uLH&#<&4Ld&@Qj!dl;I z#Egy9v~}Vad7222casgqLClvF?wnf^nnc zG_dC&LYGiRR~y;tT__l0=|-Nn#j_C7^o}e$!={*Ij^8O|>-LxIebZP2+31FwQTFh4 zjd)=HyWWp)DcGdGPMhC)tj@ZY#7=JFIqv>KIMEHXEkjcl%9mk&#^|5H_&Wb(0mOB5 z$Bd`vcTLWWS6}#eh$k!=QPSUG>ie>g+ULQP`k#xcQL+qy_YK`UJ=l^O=J>73s>I?rlqLhaPA*pAF z|Gf`S`yo;=hZGi6>G9PVu*+LhUk+hdo@%+gKNz>6^rx98j~u<)d5md914Kz9kP?1= z`A{vBesNK44>>4ygS}olpxr>MP%8L-5ZBk=e{C+QP++H6qcwGE|G?4lui`TtQDFcE z|GOF=?w!f6bbOa)$p%>Rte&24^>@#>+XB}(nZg$jTw=6&UP7-0RkHIhWRUAy4`kG+ zXjcL;l+$ zo*aDzQGoZw?Pb{QJMMLx%w*zi6*>XF(v$Q;sL(w*0I}jo;?$s3E(OPY%_W+f}~HLtN}BML00xApBy$C%NvBdFCRB{i(44 zJ&9|5c2~aJ1>$QnT7EpPVcn{`FE^2H03}Xy+>9zZAb_Eys{$#-Bq*- zD-QT|UuYz5s24ranWg^Lmo0H9|I#$4JBHu0#*9DKSD#o~)U~#XvEH z{F1l_nQ)vQk2G(E~-dwB$~+bZ4)_Ogr??)vbC zl-{OP^MX;117F>dC$;^sXN{Gv%CCjo_%CF{m-Cq$Q|ib_!VBrTJ@sZ8$-gfANW#d# z7(;=Mn7itrKeI+zuG1t~J+o^5YP!ZOCR9srPSX$bFD7O1M08&k`H?T>kEp7-appcq za2{&UQU3WskeiaKW0{8cGigVjMj|oreceCG3%81m(;EprZ#9zK5)t-PYGLlpG&1}L z^VQj|We%%mfNZ~Fo|vY19OZGmMSi@lFKfS7cbvQ?Ycgq&eQs*@t8v!dq2WJBm@?PbYHymLeN%|OdT-@DD? z3F_E}mFk{C7Id21U&DYyRfE*U7~e|7_RYNEbWyPECrIQue7*A}`tq;&-Yl6`(Y5R0 zJS2YSVt$iiOlB~tzESW=d@OwF*{Wse&W*l{CxWS*|71XGuU|+@3?!8BLKHCoI8a6d z$4u1VMC068?nUxB+p#dynR8)|J$}JkkZWJnN0<{EO}_t$=XyM+u8p`y*OK&2t1g5p|0APph-lfG$AzS!w0entGPErgV{+Ez^0x*10hs^`J(v78 z|5t75oA{w+2$|ZUmeN+Y7dEk6!Pf1@GP?nBe7pYCJir7*>Go%0F_mD0Utxy4Xj&ASR`H(+UMk)8- ztuDnWDpA0KSBqRlqRRh%b}J5#gWs#JtsPnbu97Rh@$(~Mb;O=Bxh`}(W8SZnScHgo7-n83}SS9r^zNQyB;*4JvW8*Yp zWWpCh$leg+_%hGYu902@BJ5{M_?oap(-^uoCQ>r`^gJA_`CmkhHRq!!lVNi6cwYjC zLW{rJILb$_3^&=cjXR}LB6Xc{l4LUjMUJWDZkfOjp?S@(aU0-0$_w2Q&Ts+5QFkhl zKa{@=6eAB^ksvnkAb1{M{?|u)s7MX%wTn9y^y zB44bPjj8^QyKKeJqg`d9BF!!(dt!V+&P9{ps(Gc%x|L}EIb0aQx~32H(br@?Qs?zl z$#G{FlTI@WRgGbwZllV!Lh_beomRv>Cel4;gYi{%7wehCay9x#?3$kLei_BE3gOhhjkauWE!>8u|An(McRA}5ib>Tr`S<4}roOEoM zw>C}uR-c7}c5hL7i}AFV`L1p6(32&XsH#8U3_3i~pRfFh5F3v0Q6QV?)?_)`Ztc1>Z)CWr?>Fa6VUeA6FEwVVL&rUcv%qU1F1^c zs?j(rgr|bd2_VOt`Cu!}#&WC;bn@%qSt2|0zos4_3lzRG}enRO+$02_bMhw!DN0C?zD|<51p#K?E`GHEh+uIvvW*^#h?}+e1wF1ILT7 zjnwj~HkNb8JCZ8`S`BR(=MXZ*(1}!hu;3U!amcG(m!wW5a1QUKUX~aBPMaRNsNfPi z7v&RAT`yt84n0?LjBlj2lUSLa`iymyG8U>BiATM7H>^J`Q60M?G=_aBhOk18O#VCZ zKuN3nL5EVGklh9oQL-2FdT(JydKbB!stbGXf z#I18p4K1kkKtUbgrVHIV?VAV0$SF#kL!c;vxI5E4QX^TtR zG!g=uk(U}9xo?YY=%!+Tl8zEdavAvtu1osth|R}L2QbZxQrH2(-6yq{E- zKy=6>4gOKp@-JNEoq#-Y`|T%>?#FxNkN3Cmyx7t@o`gYhBwlio+d$BdK`JDZ5K~DG zqL>Y?{OUBG`wT1mxbKA110-jS^QaE5?_mAuR0ITJKU1`Ic?RBoTYb)W{$cmkvCa}@ z8Y_kMJ1djYu4(_~3=%EGBFDv=I#}m0mgJ2;%2INhVnV;~Hsd!F6wb&mrSb7y)Vb>h zSlm)9f&hqs&ztlzK3VR z;F8Bg#};HVgwY6usKJC(W8f!#CGN`(b3c+OOn*bGkcV3yDuD@{*t9PdPlmsRw~=eG zfMmmUQ)Oe3gj?|vlyNt_B;hro7k;0AVR_f@ghc!$$0A~F1C}!X+lIz<(BGj)m1Pvg z?vV4iJxsF7rD<`4(~~GTM#M%^@iUzXzfKszzOL%h>?^C~eetl$VU00i)|VkhUyOB; z*u(m+YF*pt(2~QPLPKt)|Kxi~hnnTuV)_vn1Gr-wI5AY^aK*Yo#@mb(T<~df{vr<> z>*4FS$>4yL2!Tw>qs+vw7ft{a^>rbMC7Y}dVnu+@%{Q*P>xe@azvJv2&xz$iT|z8O zLtVh(MstdsS6%RZ)}|VK9b)hUgl-@M7D$9R7#jzWn*eBEN`}h6(sRQo-#9Hbl}$`> ze`o35<*+%8I2<&U1C;IGmc}TB7m{@Jets0Pr)48V2=~p%YASdH=d^m!e_^Lv+gwG2 zMN@xw`!Ue|4_ogToLSgz>%OsV+fF*RZKq?~?AX>D+a24s+3DC$$F_F9jkEUIwf@du zvu4#CbwAG-_ci9BGA4?GHu09gE`+krT0eU<@C!wfM?*oK|0u%5f~XG_WxJ{S$!B3Gw<{6x0^S_?@fi6Y8z~2Y0T!?gR8r7CE?y9T5&SVL}ZX zWl|e~FM;P3%h~2gX{-(SOaULiri#WDYIy&qS@j);bWInfLxHt5UB%*F^xgqMLVIRE zR~RX$;Ytr-{xu-xO*$rT5zk}M(Kcxs+*79Rq4Otmba>OE%Zt>3(6HPNoFW})06B01 z+13dA{e1t!gYvCOENIBTbpPF4@YwRlNB;{r1mj=ZUs0_^#Q|K6P&j8VYTG0Aqud8U za|~UcX!|$}N09SIx|0ngtC~Tuoa%ohTwd3YN1iOf^}{BJHRS44qFt)Ec$C#uJPif7 z16@srp+LS|69{X{-0L1h7xN>Zu}2eqDd=Azb9ra>;7+J#(%!F5T3w{yN!r&;?MA#z zgQaK+W=Ah>u%E0>uMp0bCSN`zOI(g;jL)uUls7wM(irvR?AHFV0|WV}z855(tC;zy zPz}gwX&^@4+89CV+yY$(; zQ(y7^ZwS}_P89g@{U=X=C?kb&Ts$^mW>Y*-9W9slL#>Lg;qc7OKsr*@B17=a%BikC zVdFsS#eGts{w5YEZ2~1L&U4_$kq2b#ev^)8Bryg#$+5wjov86GC9j%A&R}&dA`}8_VE4Y+eF1&P8a_QezzIj^e6q#|!GrEtq9 zg5VMN;~BAqK!kR2g&y_(f{twM7|=r*Es+Pnm%LWs1IM;+6R29N@GP?@3lwtff)&*Vm@)i7;8?isVM++~ z6?m$8Z#k>QO~k}gbsv*f3!?{5EK(1}AKK1JY8k7_Kbc>y{zzOVga>@*ISo8<|>{H)RQrX-o#{k}($X`THP!1DYw}C6OtB zI%3XUGrkLv9A0XwL^Y^{h=b>@9nhVZ%VTkgA0rBx-=JA3MZT zd5YY{zslFW$Ivw4uygJb)2u4x{v?=;-wnm^X8449a3T=L%sA^FE-r1s1$*z%r)y+HrmQ_W!KAM$b5s?jkZ7!n z1k}*w^e;;d97@y!1m+3%ON~%I-}jXcZ~Re61al`zN2}7ZhI~KveQ-Nf;7)f^ItuRv zT$LUPKeHy#$TCvMV8%VcSELsbvt)vDQ+OcD&UEu~RB-LLV~GHnyq~OKve#GM^-$1$ zG=$&y0>-dif@QF3Ttd_8w&Rj2)?!`Ko{VV^YOpQ7a1Edy9dpo zIaked^CU#06J&Xj@pQdF{HaCbgz(SVcvM*|GB!v2V2Pv9V+U&Ij%xE=cH&dNx0zXr z8m`hmQiX+uxDkkh$7>Q*T$3A4G820$6qdU~66C~Y4O;5vL@0p0x@|BDE-CtRxvA8@ zWow>|n$Xa3a=RX26TnrnEy3@FEpK`T1H^)$kl$Ntz9|uIIb#3BrUvzM<$hW35BDO0 z9rL{t-UjhH%_foy#M5?oVP^^Qx%l-NZG^M^>HPmOnE%(U>Gk0MoI5>V-I~WJd%Uzc z>^CS^OP=*2i9`_vNLbw_{-VMb@%xP{wzB`*yorxj!u^iQ>1%SFwdW49ZKs6= z?u5J1G!+%SA||ttK}Iw6#hgZ=a{JyROT{OFE?T`?g6x1=1?A6lo6d)z>}`h;v6BCQ zA4kgBdETWu|7s@FSk#!*9RIoV(^2ko;Z6|G)Sq>t@i6IU<&OLg4IGHMRsKmZm!OO& z!3;M=7NDayO72-<%-JFZtRxZo_cTmy)YRR`Egh0Eex$fr11P|RTj+HLAXo|l-V8A& zv*Tkn02#>`k-F{n0dXJ)h}j|p1U?)zz`vX|KbZ!7S$e_tdHH^*-;r%3dMUY=UIx|< zyWTXPQ|F1;8~dcE-S_k7!nrJErOiy7`qcxbJ0>i*{}2lhYl2-f?{|_-RFeKHxlvgy zzy<2(k*KbL-b%R)jdYnQC9ckJC-~k_AO{xsM(-1W);)2Ip+HRSvxE9YJ5H-ADPP-+ zHFNFqt)dFM`(ih0b)-U#SJhmX0|HibE1?b^X_YoSZ~@#ywctYB!VV6AzmL10^6nJG zsW6}+DPYM&f9{{~IELQG_}rCK4}8fm`Z=z7yc&&iIc#0tV2Vl;R1ka!(e>g~=NY#WxGNePlVeAhCRVTEUsP+iL7Q4_(s|iezSOq7UiOxFoNDnH zD4I`H)O|HZ^rI69?Q>G(fD;o?6>8F}W$L^`pz4S=AAtO2fEKKmbWiMb;=d>+ijCo) z)SO4qMQ)3ZGlvQ9tIc;Yy3lYyZop}fa>hdHZ&O!8^gJ-t{RGZZ0JK>ucd@dgfs#Hg z*zy^MDLOAG-_j*mwE<9jwNoK#vRab1vv6 zN{QG@13X~~ZfNHcNGrb31>Vl+femf5PgQ0BX>#B6@RsZlKp>Z0Ik(JO1@)LNIxBLv zm5dJmVGcd^Do0px5G8d@A>0vhbdfw%=!1@DPz(4YEYuFG54|;(Z{gm#1dXph!w5u( zxQgaVBR0Kig~m3vEs@T+R$D0vaK|+LT#+$Ggek@~E$jG zhS^HEILRu(qCHUvK7Yqq3S)jAxn4sFDN$R|2JL9YZmEHgOD#&rt`X6 zXg{Ye2e9%iFvW4BO9@T}eWl`_9Tt;EbNl+BhRkk$^9~K#iOJ3OQ2*E%JKIFlD zzOyjC?yk)OwS6^VfiK?mT~LZCo)eP*7*qBjjo0Z`9^T z_F}|)kMLI^!OiNeh^8mBQBK>sarvY8;mi_@?qEHmbN6AUfB+L5Eu!x+$6=UE1SK(p6qQ<)D}{n(J3| zDO6byu;H{bo!!h<(m8M0kW?G1C;uSN})b++XspiUwU&`z_!`p|jH?BGA!v zQm4Q{bg!^FO`D2q4Y!==${qZwH-Kp9CEMjTUceumOFa)FTT`7n+My=s$g}z#&oqXR zyk9uq!~4~5HC6^_Ao&45PesV=Skr%_IS`*OvK8d|UEBNFtuG8}c5h9rvIJ8AGH_<;)zni!jH7j4`5h*$|77j+kA-L( zB6f|ZXDx#1i(OJ3TLG*~ znmY#RagJ)-*8WRhT-4t5cpdUB0;_pfJ|OWVt?01mo_!DVA62c1Z6TM#z7h~f;S)_$EXS+~gim0L~L#G<+|8q5>v?d<-%FH|y!0o&<0Aq~6+zR{Q zAkr*J*wA_mV)(=Cs1lms8NH4t5>*L#yv>B_ECS?<_GBld#rfPxt}GPU{+Zv zhy+NsjX`MsR3x=|r!bB3ChiG4c0h)GNG4Zvk}D)d6jgpGhHsNU7n%)1--svI0^N-) zf@a&FiTSK~ii}9qm7;+NlH=D#LqffRODI3u1GQ@X3TIcU+}dgW3DW2-D|Mt?5f)My zX@pI%3-TWM?6mPdqYA~Ej)DF%%2eq#hKA8l6~-)%s7oJ_c=v*pa-}-hZDQ=1{l<&x ze858+sSi4*7i^tvtLp)Jv!q`m|7hqY&r{PVpU~7aN$RgV)@%ek+w1ZHnwz7AKG`?mlHK@Edwcv$Il=PI?L3ORI{)1Wf^Y)KmET`4q?a9wq0jYN0`JW}khQzCjaB9OA6o!v$;2vc4rYJY%OShe(gsv&7<{t(9oLCjTXl|6dkW zZ~&Sw89clL@wtDJs5_)%D)W71Al|lT*aM-D=V2eu;rb7JK;o|V!_R$#u2H`mL3aeD zg8FmtC-&yN-Lo|pfm6zTgv7 zfRz^jc6{*y9@nhUQA?IxiVt=t{OVK7t^1HHK;lX*>6(L~iWb-_TsSeiBzZKL+|7MW z;K4qYper7B?|MO*^f5A-`m-GsF5oL?i^MpXg;};6dymV&EjDqXK?kNW8~o!!$!Q2z zbZ`gMbV;>`Y_>w~GpHhw$|r0L+&~Ja*%8RsT0J-2ldfI{FU$+V_nUmBqK(On4?2T{ z@Pmal;BN}V?NfR@#XO6yIaJb*azRBI7GG!Ik|8RCv&y-%^(k)4Z+eKaAH zum*%!V_{3j>OwKa`CB^~o$qaOv82R#^vt(8zMOnKck2dunLP$CL9ceP7x&PZ)q6;&GvWP>-F}a$&Ip1(-xAgq6(@08BRb(GM6bZyzOs6U6~H6P1u9WI*t0&!E69Vkp{ao5Lc!$r6C+ z;7>oc;chDU0fZ%2RQ+DAFW&N+`_OZ(-(m|LWyB?8^pB7UmFd5~+1Y)p zZ#XmozNVrFu(^;BB zm_nS2eAIZz8?Eau!Ha-WHsk_Y6sCAbOj|{7>={Jwduf5N(eqg}dl1HySc{Twi;FTM z6Zp60PitzS1aS_CeOqutvZo#V6U*apLxD86(HKM-*TdIjfmn{Edu8oTjy+~X9mEux z`wPAuURMfgybiYZE=je5hwIY0de4PiuJB4XyE+nrWE3u3zUeF9Y4+zH*401!T~ajs zzYyz#lE5t#6{&DA4*t}cZ;CEotTm(GI()gXYWyZ;3B+)?XV!_XXy%)$Y~4k2Ue@Ql zz!l$*ZI5dVIsjj?^+W85fkjm3U-o4CVES)#?H70q>Gd@#z66*r_SbitNcc?!pRNgM z+Z|58<@_n@3sOL_4%u}FpHa8&WG}7%$qR2vePy>tFAY zqMs0=8KH2?hsaH{@S`;z?vsfPoR-}RL13`@m|Q6g+o5Fx;0I$b?4W@l=l9AGSi-Mz zS6BFyFW5>!-^H3NG{N_u~Bl^GbOsUHvfar}-6(Tagv|6U%J~b%>LdN(Frp zh;STMt0ci~n!SDMZPzYFO}P0X#or*hvB-*m+)SgouWV)KRa)^SO8ew*uhmS6j`Dn} zP1l)5JHt-0ZFT%^3{sv^R{XFZ)eAr15WE+Kc8kU=<2F7W$*k>3Ny?;u4CAXFMfdY^ z2vTY>I}znXdHy8T3=GEzKB^u!xCf9NeiKCUqgG5^xSrg%MSNptd}khs=Scz_ve6~u zLQa~B!pQ7iVAPS0Ny2K?}Hhmce8z9W=Vdg3Y{`om!P=Zv!N%Z{WPJH9S8&A+vPDP9{;`UZL% za?6k5vD=Eyb?>~{>~Th$KRjsPolvACbjhSrt0l$+pD@s20yQr}G{Y+=9^)hq=a;V+ zjm%KYzBjVQ7y`Od>r;D3;gx)WNJ!apt-opJ1CO74B}izi8V+&<3ap7t>;n$;Xk#|C z>v!RA8POW_F|J8q#LRn*raWgdUD1TiWz+h&ggcj}o1z9jmE z9*}F0%r+~C^(2BVW!y8Yv!Ngo&W$kwh-h_ewe035#FU8HD-2~wy2Z)PZQ+3BS27Ss zuY6MV4^AL7!R(ok8B$dDBkV>|ECTMf->-37z! zgf5X7a-zODyEmZmxX9GW_JufYPDRDIv6oh!MDM3;2;gcsro;1`1gOW7JT&9wl*^!Z z#n8Ff+Pn%8zQ@Qk?TKX-4AhE?ra$+CTXr3Q{MnosC*0Q!IE6#7C2iN%RZlIL+3B5-WE(N~dAdbUdXU z>xxXMDKyQ!$yqs_TWbbc=8^&4WZt=$p;v%Fy>$FQ9=kX_C*+{axGq&~HPuS!guixD zCX}!-Rx#qTr)j+A7q7(a4Hs?l)_mZHEM|gPHKh7!*7GT<(bDAI_lo%6cprqoZ#MTR zzb6r3rKlPg%z;ew=;ZQH(0S3<~_q|YTCA`V=2Pz(Tr86z;LcAZ64d|R5l&};i{WnY@ z9;$6Te_aT)o24)TgYZzDhG<)hRM8V5xxR-iGr)+PdP_1?Qo^d^U2=i z>)*I=e$FgXp^TaA1g00*?}a$?t%-PI{Cq}!F7Vas{{&O);+43Ob19+bd`XGDM+RTk_EuW9qI ze4r`iJJm`J=5UaYHsjym-(x~gJqUWJ39(yEda2;}i8cD&rq471#Sq|im!|VJc$&g@ zf;iyeCY#;D-6YOF`?RS zCN^6Qf)MVS<|}6)sZU|EeOdu0;Ayl%;GbwNgNSdv2ZExoPX@_An;X>I5j6;&V@<3v zBF1;|jv=nex-MuKNTJf0c!xY(n7Cep4;lN^Y@wFA>i4M|uktk?kk$1v=!oJUYQD_S z8)#eyWb#LJ3`#hlh4eMl`WrT-GG)P>71WkfdIn}n(Qy8zUl81ea3XLR(glFRwZG8L zr63X2j3In*4XrfX<`tU*e9zyVZgN2^?&xE|_YdB~pnzh|NPM!sWbHp)rPLPexRCp} zJF|Y;2ttF`Ab|KHVib$#Z!1;&nt-bk9w?n(p8^)43k&9jCna2eI|wg^{P0EK9TNR_ zpWjUI{w_W^1jqTcp)DZbI*_C(E!ZvT6KDAx>{#u{huJhkD$H?aXz#s@b2bXiA|Pr+ zRaKqdsOP81`~+p6EbOdUrKE!f)2`0-XVH^9B5Ms9N!D+GbeBeOMb^~1icT}J!L6OG zspQiPDmc&vwr^pnVId_AtRxQE23G*y&Yc=CHLnjd6y$>*6TuA zEs^ggE3(~Oo$AZ&MQ^A>#&7KF_fNg#J3O@xwQ9JG(U5kcThST6R+S0724Uew+d!nTd~}+O+D2vADXJ ztpF|IHKfxi=9(+PpeJ2}(w{JHbtsk8Rj1Zz|YJO*ANiRj$im z-;Z7Xn|TR|8I-5r2wfM5SuL$*)%hQ0Bg{vAJ2W+$QvzI~LSh18J6VY`LW7~?tMDp> zK}?=7X_Umm1qpm6Z*KXri*Um`m;hwAp7$i%CmCLxsDJPS3!! zzK(Ga{NulHzu1P!`Y%{DRo&@M_{KHOq1ln2{~&}-Y-DEIgxp8rGO7}5I2r`Kd-2El zXa%p{F`wbRArR5pl=O)vzAXYS!7X{?0Xj3Y^Rzu3HVOT>Qz|0`cjkPE>xUP2CrPs* zEcbAdS|DTZzjvM~Cp>Qg>|6^JAv4g?`F(WGXNM9 zN=avh$z|#7v7iXA`Qe+mWOTRveNBh7>Re3<_AUwi@*Lu3C}Z^N>X`4nk7;M@B^C4N z6x?Jj<4O?Z^ak^It-+!Ib%NMFttyLl$%G6f?(9CVYo?d4l{ZA@$_bYUq@<6#@JxH#W)Y+rd0kWjbd6z1Wk7Dsv6=(E z|NiBQ1nWH3LCR)UjNV6YScKJ(F$u$JZ2s5C#ts%IaN1=;qqXbXON$$;xmw6Z;bGChMvQM4YRY z%Sng!q{DRmiLSZ>taWr4DEfD;O<#&JJI0AX=Px z7^8&QjexLQ+GjMSR5S3#6vRu#GL-~7x`C;PU8PjPv&RAsviQ+;f}>lOP)GKt&JlqZQ{bnzQiL%GcM_(T z+BWX}YqLq@RC$4!@R>go@kPVn`zb5yymsI*$c&FfeU%dZOw%)Yzs4MUL z4p_7cMoyoQS2oW!JGhO$jy%Br!Ao_2w4x%hbds74W54$QkIb>K5@kXQ{s{*pt+?oD^!}3|C#M z96$l8@?+lGxT%+o6&H!Lo)o6{^oBHjHFi4EtAh7OHlQ9nBHA|s4`kz1iTC~I6p`UJ z7vZc}tSGwg*6=IU!!$x2&LUi;UR)XCOB$JmyLBYRTaU_yF$GIv{weC=qj$BSd^f?wI4d5i%bRHpw6B%=Ciq_aYOUg5C_msx8%ZlV932m?7SlLr{c@Q zWpB6e)orhTgD7Pd57llUs!Cz4azD^L?c_rL)@{M(U6%IQ1dHqhb3xu_UX73ivN1P} z5kbKJMf5bsyyL)M6B<1u-%v{h9c%aZjCt7x};OrqnefigglfQV?eFz6f8q? z-X5<3O@DiP4%S9fg^>~yv!An7D6sQ)jgz*HgdL{zcfqZd*~r^@pp^LUW}m+|iYEbB z`Q5Z6!I<(2j%0mIa0%m*%v8xx=A<;rwvH^ViE+9*DqZGsTTE3Wrox1WM|a{5=5WJ&O46)jbXRL35>4S(@M%#qi4r~wj1mt zB9QgT)hKM#Ajw;)+g74*B!9t;D)TVAjml-*Pv4zbI4kmjtc5#$T(F)ql3p@K)+aM> z=Uh@Qv3`^sVEm}Gdjx+n%JyrHyyB-qW6t+U{LV?Od+WUYBS&6lJMsT$(0_?i;9J-C zF!m=Dt@vA$sJ9eN7Z*bic0mQHZFGHCU!@g*ZU@sIZ>dTI9hH;(u`MO$B4-(Ty^ zw{5l3>h9~sL<amP6BC9QWO2 zlpDqpzl$myX$MXLKIKMI$3(KNt*&0@NIa)_?P4}&0{^qd;-52#9}BaVmv!2_Nmp_^ z3{c!i;Vsg@;JNpo#xj(mxST~_S#$#xYEV&o8kUB4p5c=zAh)<17)p7s>unp$7mg>L zUyQ8f7q(NuR;_&tZQ1R@LIB2h;n-|XcI-@0%N#t=m4)M?v_zKk@WyUuiLYbVV(ML3 zlQ0F4yNHa(E!s7?3=HSU81!BXb~Kf2!W^n-1BQtNl@tDzEQ|Z8-J1bO)H?WqLjU-4 zpbKz9>_7lyNNf)?q)ma`YLa1rz?6ahSeMQ#Bo@GlKSq#vMs7aM#mOH2I|_P*_UQi zWqNgDa7N3K_P*or3ZH4|QF6NdddGE-bkKy$EU$dlh*`rh;|QHn=v%R43F9 zyICby)iZXD2F&w;R)i-_;s>`z`?iso^yn9a2&R8w$2j&~X$KNC-z3`C4dPe1nFoOv zCFo9|2@ACuVdS=oj(}GOZP2;>Z>nXPkyC;uVK_~ao3-OScW*cHgAiz^!p!Lh;g`*| zYKAVoBVtok3(*;MkUQP3ZVDKPPV9na&)**vfL9VhPKz04}8o~UA;qAwReTp8ETYd>MxlGkem%i>Z+*7o( zOnT6Am`acZYNLM5-(fk{yOIO)^#A8T>InV0JB{M+K;Mqb)mjQmg|vXwMoe~z+^ zRmu*3-G2~t)~#{Usmmq!5(ftkX^5bcSlmt`%e#q|EJEIU z#1GhBoir_dhoTE+zx(@CDEyzS7OPJ?EkQdvS9W`AKM5e|`JS^K$}m*K~`8o_i5&*B7ea*L2(6I;sH&6MNI=-Tv@x{@Se&3I!q%VcQtvCw9m{XLK0IuHIx+Z+MjIn#QycKHjn}TxmmDupbmA&_@>wc;9 z?$s=HY7%9(|J>2~>?Nm+K95|;onTh{mpTjmx#BJi&}2EjBtMK`5`<1_ugTuvh-6gB znN(x-Jfb9q0)sz{L~MNhUNr4&$}oqbMLtqn+;d_UhY-A>nHF#GSL-hfrM7?9;%eE7 z0lcDN1iv8NxCB+6WR~1P-tzPqPfHEYDMq+GBu>ga4$W* z;YA&@Ycwh`9>03ur}it_GQ&Uxp!j|(EB~QeF|~8x)+>)Uh4pPhU{urO!a?a;dl>ek z-`}@f33+DO@d`x?H!h*`;6VT=Z`aA{js4 zsO8&mP+>g-@4{k_zw2>(|354M>MUH#JnVN;_2kSmjf<>h`_*A`^h@sQs(teF9$Ept z)dCzT{e5oS@VMDLL89BVNF%;LCBT?Bcb(sCe| zzE5tdkio9FJ>5Lx^Y5KzBf-(t3r2zkIMVJEx47NAO{-AwCMz#fdB9db+U#Gyibmxv zC1K>xS!@~as`6abYfjLMU~CC~?l}TXaOlu;PfkjRlV{{cP$G%^7eG*^c=||zCrE{T+by+7zMf|X}R;Bg%0<8xYctuejb1mPp0 zB5N_kl(IYf6gKi1;cRFy(Y$7^_A4M9lt1c;#e!E z0h2~;xN89SXTluqijxWrpRF`cMEBp$G}ax%%@~2LMQlU7sKRNgHgR*JxuV{j6s-Oe zXAo4FgjW>^lk*IJfnHq{=)X9AjP4dh1~UN=yp?H)@PS~BigF-_*0-(CMFlabicMH( zo#aFn5K}PbI7lEJA#<|wlY@gWG!dl27TK#LsSZSJlLJ8no3h%H!QE*$E%uL%rlJR| z!9^MQnN!16DF9UU>7Q7CE%(jC6S=+0j2{E(c(<1^)yfC`_$H7L!1dssvW;{5(US;n z9`4^~Q|-OiK&*IYLw9tBbzXtV=_q^UQUh7jf39;DOf7Uk7*)q9NH)6QQ`8rk9W3O? zUnotBjK&%at_jP?)4PT?5yhKcob%*F$~_#z5k0j_DI7oZ8>;Sp@(dSOXBn*%=yN9m z8<&-wpFz9I_K>67-IpPIYpL?Hq(g(kLlo)YIxtlNk z3KBJLB-~=F?wWU9s~|V{n)5aEPnE%Sh}YIIV4OxTR9VcXUkk;Dqz5H07h|?B`cjQS zn?5Z`eCYEdBDN2nIb+NTA*WD9f=%S3$9eu+mp_#&?L7Cv#PUq4GjUlKIN?no5r+H3 zk+xm4E;yW#{E+vQ7rbxZRL&2@_-}j>>O+#~5tV78)Kd?(($$iDN70hmeD|dv39fXv zbN-vd7LgXjcR4p4l^hjUhf-JGSVcITqKV|AeHZ!-{JUM(IzhAR@2``IGuWoQGtR>n zHik931581wrO`8Z>{^rDzDfSvP`B@BH}dqm0(APUiohsvXBhi*zLvm7P>p#U8A@2InOhBNgk;KL1jsnGb<3tCU`{6#;k>Otckl{Q+A9!Jui5Y!^;i5bO!B2U^A@pAgAl zB2nWWC~MprIwm_R9m&XMNsiz)Gbzkrx9H2MkCoW&RVwEB7bkjHq;T&3SleHvc{QD3 zqRJ;)OZ!gng>LEY*N$rT0j3fQXqLZ19{Kj(8~~!|ma0kxeU6%Gl~R@5;iaN!{WBYw zUl?zae3%X3gSt?TUO|}~8YX7!AB(N5ss%{A9o_|yZfa9q!8-0Oj7)@ab6!ZzS~SfH zB0Qgyv-%~m^R|cM_UTzGG0A^|CZGO;5Z>tlu5!4UTU&k;y=yJ5(!(afu5oDp{e8@L!Rp?mLmZhC;W`edMft|a?}j4~ zO-UL~-ml{=d5`co01uRH9pD-cJ7$|v?zpr_sN(TIrTu=KlrGAgoI%MN67d#6u4k-6 zuW=MFzLG6%Z9o6O_`;!1F62k}`J<}B>g?tcvn}SbmX)?a`j_uOb{flPli;m;2zS40 zeMp1(a~&22S{OdmYB;_H2Aj?;cHbF)Jjn3W&>UzU>T6fnK^!+sNWB`X&v;cdh;&nR zCO>Mb9!PF6A+n@$~_e;fX2LUwb@MiBCdx zO5;Y-b_5TwsPdV+!8D~|1)p9sAHr2s-Q%zjY8}O9b%w||tMT%-nT-H@)i5JBa~en` zGh&i9_VV_zb?3HFT~t}BqJlmL9W|J1hDRMp8v+pY(5`ag9r_o(*3(Q(xZGTRZ`yU{ zalD&dH|nQ+KCmT<1L8buG!lG%t3PJgQSU~EwG889!)hC*e=kQB&I>Hz%yn(FCDAc~ z;>IVtBBZvq-a!L(f3mu0*GZ0Of3T!UKm*-nKyzisdB)#O!$GU6*$>kBco|WyQ|z6& zZ?szGVP!<6GB19VkC6X0JA~#LFPL+$mJ8 z!{fC{*7A|-=r2_qK`D=HBG7n$Bj~(({DmkoR@4zn)VJh-)vd3aZnclq4wCLcc?PjR-Xq>GNtTjP#F7dtkfcA5qo~J$$ z1hyagosS&Zma+Hrf$sahQcrK&(gAXX6^6K9%WI}E0q^>^7SyCDWE`?BQ(I-FIe)Hj zX802hM)(ZeZ9IBG$TN9L<_kV(0*%I1wd5eeDHbuqNQ^e}hKNLChn1_~4Z`%}cEf6U zx|TRlo_ik=kBKgimX z?jiHoGVnBJaArn|RQq;PxZmzr8Pw6GEGE&S0O07=G`ps<8WF}U$hXNGaQrdyvUIt8 ztuLZSL9WxlKv--{W`D;b_U2_GRmMpPwb^FSVsSOt)6}gM6wd7V48D|IoZ3OTbtG_p zZTt+Y^&_hs(YZYOc;k;6(PCM4Od2HkU{e2b>^b~waY^joIsNL~k1*})<_BF<<^Fwc zU-Hhm;B_HSZJC)fWpjS~G5R39YPDOKAf^~up@k%P@+*=22HT(LU@vO93&lr$(I6Ww z!sH_CSSjbc;T2IHU0kym{q7e-yYK!>O)FXxIbalJO+M;j9NReII1-VnTPC|C>D9)Vt0h22rO&F)1 zM6#BxrjDMo1nY5ps6>9s&chfiV%`1;4IrJXx(GO6_Wee6w+Ipjm)@3NJ+Nab-YQb@ zP9XMGfi8ICWaOEqp&{JshCGb|tb*&+3%nw(QHSw2Wik`y|87|@TKqIY7Wj9E~JGc70@GD^(lH*SG`nkB|FAB50@EQ;!d`)F$P2Sm4RM?nVDo13+hvDA~UI zLShXzU5~T8x-eTIKu+&mZjD;WMJEu$uP{NKUrad`MQ&Cp|s(-XU6ycJ-r%tH02l#r^`EcO93ZMpl$qRkW$!m&>68Ho0loEV_8y4<(pejXl~QE}JP$ z$ql8f1&t&C2^x^|rxlzR;=tiV36g<5w-23j3D-uMq1j)2V1i2pjei=pkrMlrG~Qmv zu@BEmgoJ{fMS>cNTo4T{jzG9LWDQ*#~ z<9#|6N15JY!`M+`-#;wsYdo1)$xg4YKKv`C2rjZ=%U~1uLRilC{1Zk^&f}Rz>GMCb zz>D5q7WS2HaNR$C&c7C77V$|MA*S#^sz?DkZqrTCUGfCrs0P-SbKJV)7Bc1DxsM#u zv9W?q8dC>Gst-F*04xq?EN=9E@hZdlAQs}j2}Hi2_UAGl#p3WzYCiHm>w+^P%Pv!c zpgRjZgK&_HeTxWGTBkF3b)4xf?@aU5XBFPb;DZPe-^zs02}qv5iO>&$TPs8ukeWKw z4CKmYW!}C{+#%?7S13c5?@&rZ3A-k0rrBpGL1~bXe>={OjF!32+YE*;e~sV&8?>Q| zH68!0pBMrCXm0U|Kp`UcN>MfO0lL3AGEZt#!4o2*WAhQXY!RS`EP9wWhaatB1`aow z0kzxTx3xLEIHCE0F(Y~_QXx%zU?)%r`ir8gdlPvszE~WWcXSh4iiZuaA3M3qeDt|u zMa~pW>WP8AKW0D#994@QV{FTQz4cW%K(2yxs6r-eMO54Rf3WqB!Ieg9*9MxTW82OO zI!4E~Z5tii#!APwZQHh!j_rB z*(?VhMj(wT-EOaAbP@yx_f%1Jb-}C4N|*Dhxz7#jJ5H;$Z2c67Rs`3fiY<;S3;%&> zHs!H4_}jS7*5^4K{&rK>0H0^AcGub>)?*_@`r8ij{mG&Yt?^sn!%r%s7udOPW*Ro z*JdJH$R2*ikZ5{c5-nwnRa(-OjnEmM0Y&XbZzmyLtvkl`#zao{dpj9099 z9Wq7~Q;?t~cMg+tADiema|M?N2Q~Cag*fx|3nMo|yx_%pansHyflrf$o zJuu3WCJl3W6N5b7u64&S-Gi0{jy9`q8vc~1AUBoSI3G56qhkBD-$IJH!;k3nlnu_W z=?^f6vZL`CO$*X}{yc?%_&339q?@Rb6un%Qu7T4E-?f>a8e&y^9>~UQ!2?{yueo<8 ztQISba;@$(jg`CJGX`6UVR>gy?5%LU`bJy3Kg^5QHdGt=q$>Bxy-x!{GKeVr!r?+R z?X9wh4EF7xfA_QFm9o8Xm=a853=gz9@QJy58MhBF=!0m0?h(AsLt4BOC;g05rfGWN zw#J3pWB)MkpPR&o*dE-=dap4+yNt?taxc#+!a{=bT8OZZ^MH>?b3!)Q?vBtqqMwkU zYK-L)wC3YedIaUS8XWjsU|RCwpmVpl_^Sm0dIu|MtCEn3Z1o*wzT*(#ysJt1#0v;p z(%>`V%zW2U0qiZ)Tf&S*$IAKH07dBTh#i-%N?4WcIF$1gB&-sUI78oG3rd??A)hV> zZoF$GD>dJoh+W>8(Z7$&GIAqv z!Y^zJ27XhcUkiA>Rl39f0?sBO-kXl3gI}IVaayr4Hz(s16N1Q0` zSp>LnqWFOE*3N7q$Y!o9S5Xtx8IJ=3EEc4Qo4M7NWWh@BS_sV9`XgNrqW-F!pDpQ4 z_3>yiKe#qr-FwFEC(i0B{s#Cp!500Nyl^!>4b=m1b_(^QsOyC3S8KrfX~UtPzW}LA zWlV`oTaV$-#yzOs(*+khqk|)Vn=_bd{A1HI<;F-+PRp^EvKa}tWClHjc&0C$FX!US z&=i<_%02CMhdv>j8T*b;vSu$|ywg7FeoN-!skL2So0YD9Ne1;7))=fftuOGd4N|b$GHgHzVYJR8n-%Wo!*AH2cSsx=!7;b*Q!cxw)f9aGYgb5`9`mF!>ONPoxiwIurh% zuNWe=|L#PNrT=dy(p#P%`21(bLC_`+?Am5>ElAVo>E5-U+6?;do;m;y{JndbEbL%h z8-~cB{K`az4JDQC1@a9}&VXAo0xJy-)sRT0Hm`$}+4YF9H}1W_oyUoz{}mxHDhDdP zLpSswTtqqTmihSHZegomlG_iIfjm=_)*{=$q9g{zLSa=SRNZ(hIHihA4_beEzsInx za5H-MgMXfFM?;VHy?v_B4)UpB?En%$s-|P7tQoJR2f19#>7K977>JEoo=nWBl_5++ z1=d1O_EsLP1!_aC^H!r$qw-f{KPh`+*UC5|&`R>zcv>7AnN8)VR%5bWJXUSGK_^Jn z1djEFrk=kEW~CvB$G`&@x^v91#0(VUxC%ibYoY_ec%pwI#q$vk! zyo4|s!E@PNe_vQ9g+>$k8KH((=ON_dIBIR00llM{F-|v9eTrF`5MJ?;SQgt3mFn2Z z4+z!`hp4eK=u{yca`SR#!1@G~iK+=aBxAQ6LU(_uABtw!e+hv;mij}*)|*ZEv=nK( z_Q3{vt|Z-vaAFM!yvb;lEd`sA{{EUB(S|mgzKP;%JIj+n4#YUF0nPe3EGFL6s!V9S z-{?U$1(-P5A^}iU2JRp*#*n<7Al{JJyn5iH=3$COk0|rle9$R)0ZFj(?RK{c4$37O zA)}fA&}(-K3F%(etAf)> zFl%n?ph!YNjXo#ha+m>RxUw4TSZx8+y%WCAam4BC&8+UF!<5o@nB}@3Nw@(i48(*atxFqqUzL_oslCQaBa!C?*2CzZ z3}GqzBVh_fh8Ddaa&caa@A2 z?N=@|4DCT?aEo6}&{JhBL;hqv!gXgEY*Bb}P+RAb$(C#96~a?A&sBq@mRn2X0q;ei0$ct9f;?(FeHY6!WVX|&nUp?FPR5i zSsH3M)?=i2KGEN%Foc{q;9g#ofwac6S%9IYar91tFFxO4oj7PH*OKO-3%he6kf1L3 ziD;VY-h}geSF>-IipnpQ-EHHqNc%TEcWP@w!RL%0L+ehDhU!Ns@D85f6QrvIm8+?- z7=9(V#dTxpX}As8sHaXp{*i}M6Dw^kk*VOqhmdhwno4{Qa*urp82v`ghI`{A&C-d@ zN(=uQ^Ms4&$23Uf20gN6KLUUj#$(L|l?NvyD_y|b%832Rih@^*VW1nClz_W;hPP<= zwA2oFvMjF>KHXj(_>Ij%VsT~+q8zfblI8Vha8|DKUpqQV99=TMkyk=Db@~f3i9Oz8 zPZhToK?L}F_1ZLujsD7P@#$2=36HN9J*%~UN*10JF134S)N_d*Ln$pbNu3Ea)BU>i zR|shJkJ`~3Ow;$8=w9`$;UHLRXEUtk{aC8=*H^G7B>?0(f>x zdxF;q(Q@gwx_n%9{_x>|SP&hg+deqJPp%)85F62krMF^c;yjX~GW(|%E43YOj4T|? zD-Jz@yK|R~@+jO%)<@jfrgrG#ZJEm zl!tpgWAqvi<8v*^ib4%nJ3KI$1i%Gz~y0hUuSaU`)|7AG}ajY0MO`S zv_gX|q^F~t1sO_|${RfKl6zJwkVP~n%Nf$+p28>2s;`zst)1mqou#tf-eSB#qO*V8G)k^#57)hIqrOJo*#IVu`PZE7QQ;f3}>lkw3T>E700 zVXNa+L*tg`+`Ps>@Dch^UXxm$giZ_+-pMS29pR+t0Fc(9VqZr62^R(T9Ue=GNGXZd zKCv3fL<0CtTZ7^lP8jBxP;ntkJK|Y*D5^6!!JmXN&2ZU|Bo^jsw(RUmYH7h6B z=WZoaJ&krwT!z=pugNynyS1K0roSc!72Z@aCj%I5mZf2(`SZV@$Stwe6$cay9cCcy zsOoGcwHn~ESyI;lZTu9&)w&(sPV3ZY3(syrDVp${~SB1axNKf`PA1JqWlDeVfo&Y!C%fM=SnL|`` zu*S0!8i_`I@AuQA{?c6An*;F`a<++{7(*!Z#pU{zbobCUr#$GAKD1w88?I}qW#zEn zNEgr_*DaFU8ZKhk7S`OES7Hvtc143cr$ zPoh+&XlOR{-*0wYd0<0DcLK5609y=tmMPVPqi4{z*qq=!GnAN=AgeUobrj(C{aTl|;Ko$mlf#6ukA z+=2wmvD#67zU`sFM1f>66YwIjAc-Ls|CDOe@bUhCKzGo zatm?hl8396-{T(;Dha(Y00bxyM*7$MwzCU#UgqXSUPZ)5H{jo2$ohA56ddtUK-^cQ zcnQIwSx=yhKb+S$jo0D0M7A-OBb7yozDgk?<0;!*e}W2EMaUMUsJ9%+*T_GfuOuP? z+A~5?koE$aqc0`wA&8?E9UdramF?oc0Ps`Xy zvtAhB7ooUcN%l%?7arwp%x+7U-fq`Id=l}@!A94%f%V$2 z@Ndk-)+ANoy1iWevXY{({i4-T5M3M08can5s2<|fcmE|{_bK#M54s(8O&UXlNPHHqkqtHCC*O=^+56ZlAl+~k>G};_Fy5nv zeUy)yKr{jOU$E^Eo1~Mp ze>tcBYju#To_qH_28bUwwH>m5mv&E}eiUNG>O?G8cOek1anK>@&E`euqk~gQ)9+3E zMR+FvZrQN$x#H}r*R;GpqtJdbFr$wZ_(#)WZBvK;z?RTf#lcvZXQ|l z9q2J{?^~OKkUgJ%%2j9Y-@u9f|cvg4^(s!_W#O3c?KN90g8ur%|YdR8F=y$-mMSV zmPS~p+Uk=zp}?oKtJK#X4eyV@V1NWaA>aoeg@}$|v5`Q%;2tvtRlT{ua$# z%f9sY^hU5J1XCBSJre2qV-dPQy%Oh~#dkSGOQ943|2Z4(%mhoVJ~XC) zfQy{lH92be!7Hy1RM+&{H?SlB%JH9}Juw-uA}u@95GEL3ICnyObNOTx`6Y0zaw)ZZi-yR`PrHFg(`ywT_?mVi?uq2AzG*Ty-`#fpW~;! z;UILZ*ruE`-T#46(^HUMr=?v~hM-%>+ON;6su9EJ8coaG0z-G^-C)o-uQd9&2bmL_2d7H}jG?)4RbE2Hnb zARjT_{Tg&$b6QlEbwgL{XvBV{0OV}gzTa|4>q5lv~=?Z+RN-XX_Z0)B-s zVk{-08>fbEctbHiq}XL-kjrH^|f|(h)8k_{P^Rz?&KENRV)Ym06_@Mp^PPuk1F8zKg>MF>8=coO`QlU=1d!O1 z&jeAdwB%N!`gDm@|jvzmNb-&}DC*am@SYdhtQT)+s!@Q^qg!VpC;qMKPUrhoh zTIxZ^iBU+C`N)2kuqAJ`yKF$@z)t8QE1cUHa<&qgHtMw&XxiKKgNC z{nLD|nAHo0azaYrJM1oLuMR!;O9HgoRTzAcXbKYK+_;3-CBg_5ED_8|G$qb27J5*l z_Ja<49;5QpIy!o^f<5SDb=9(Km%8k_bUu(A=YnltCxUE5AN(`s=JF>7@k#)vD_IXk zcC-3fwV7dX6b=QV^~o$&lOzR;R3%KgClj84G$PIs8Mye3VPH7?V6rzj9O!2x@Ne55 z?2iVRS;#KNmJsk4%=RJj+;2Ve8}g-`kru-hk54E+*~?$*>QLL*3%2HlQ{7kL===QQ z?TfifOK|7L9~?Pq&gjak3izBTZeA_MsYfv(jMp_ZTjrCOc+~KG#Ff(E&lyTk$wFvQ z7%y;Ruh2oUqc)C&vXjRE>yG0jjqrL-=K-Pv=W6m0QZ}Ruymi;g#7(@u9`wILT3b2x zWm`V*7i;)GXL{bn;)EQt6ZEyEF+%t|E#L~9kg14ya<1||)O#=)*U|6XGS++av6Pb?^)H2gT&)VA;u;MUS(muyjQWtD z24_Zv62qnt$9JrG)djc$jZ96qxxp{@p~{Z0K0Gs@EcJxY&F87dDP}VYrx`_C!;&;H zZLR?gxlj2W7-C2BAP*W)yYyPl0Q zub#AfjJpzxFUE@wH?l^@?=*2mpP!$O7X(1y8|ffltnLTY>;xOCu{=lL(pvK7hOvsd z*VdA8`Hwe|9N5fXF+<96G;6m=jq8ck=c)Np)<)g^N|27B%q#+(RR{&~JFhvVwm-{G zqYWa`CjUX3%{GPXo*~v4DK;EwflaGa4uSU=rXF(_+2C-xnM=_dA}nIjNX)_)`ot}1 zNwv@16|&?!vcNVjo|ja1<9^{$3LaOdp0W_y>3JauS9==gFBm+_5|sIq@8wD4&U6GD zG8jF%0~S%(qYG_tFSZgWs z*U-gNd3P657iWTOx9%VBNRj`c3~ExwPX2)CI}dZkgl3=>Tuf3aQn=A$f>5tE-@E(k z(#Yg^*rh!Do{Y?c@=oPsto04cOuibU8cZwHnH(Jx-sYs+y%_iWvpa&?eX@$^Pqz3= z+nwGqRg`2&8`W`UU??n834e@w*lCj60hPO&9v<{CqU>*JYhrLN*6~jTJvPc5=|8P2 z2ru+TY=Lo8iL}aTv0-3l+O5$AAzEMdFA@kR3N!D{E%mKVM0V6r(4zp7XBZ4z<+StFA1i#0&E~A9(i?t z&I132l+YBY-X!@yThgGR10eT{LN@nd<1meZh4<(Tkz%ujYFOGNH7ESc3EA!}&2ux? zG56}b&s!a`L{Z7Ug5v>c=W$F>TLM7?;bU*NdF^;5N+4Oxl56}J>ht-zD|iBUgS15} z6^@SB{pZb39hpp45)9G4xu{cS{-bDU5#x!BnzTC=|yEK^ZcCQHMp%lgs_gnC+;yW(DH27u)E`+vay7QP6kL^sUfp2gY zL%DI3XZ;X>S@X)5*Z9jQlv@>?l{Mx0ig*+wD<0{DUcJ}K zim?elc#^KBIo8PwPE8h`rJE0I!V`7FY}DRdI52TLB1+u*Lhb(MdZ-~U2E`>wC-c%m z1nu#FK+xIjm+!(`3Un?;3fqJTSGZI6n*7cv*cSXFYp};ecR4QZ^)by=L7g`F-A__` ziYs(Nc^t4DF79WB^uk_t&dZ?5zp?PqlxbzVhG5U=jLPk{QBgW=?h_sUgk%kRNVBk}TW|)TJ!|bJX90D0N;<1CZ z&#Cho@6mais~DSgA$|XhC|#oj=YzU38A<#>w=*w})f*HCaQTc@5f0a*`?EkdQ2){s zo1K)kKAM0WE4uO#elu!~7e)o4?_7p-)|}JE)#IGn!}Bi8I*W%hBSN+-p~_@nL{J*A zMd((%L-Qw)y&NRTL zBA$GLV)f~5C#d7D-A^i84DCCQ6Z0s-2*xPJbp1e@c|wI$aKsRTHk~ZAkC@Aebb#s8 zO;qN(+pV!c!^Z9SGdXGCxXW#biSzZRXS*B7xrMT+rB0jUkjX#y(I7PBL+hZdZ>_1R z&FM(#5JOCeA}CpN-{5dg$b?mDcq#*Lhof|aR}l$%RzD#*0v}a%3(mp=V@LA#oOFDK zvNYF=CVK{PvZUNv7Z+tahp?q@@3q;#Gs#>Mn6 zHG5VSnnOmY5f>g+!yf~FsNkWo#V;N5G)2k$EHRVkHi`4%)p()4yY^T@k9?}H|C&|# z8#9!1YW|n+*Y5u`F17z^TyNH@Fr8cRO>pv|ypZO1(M!L+k8ol+d?(5mq1i&VW#yYf zLPjY`9jO%L*og+_ubu@SxCEN*17T58#*{q<_$|U+KJbQ8cg8CK{6=kBL5O)RpAQ+> zMPMDN1t=}5w4%h$klUDvhF3>!V>gUG3QzW>S!jL&W$oHN^eZ@g|9)nR!)zH$B-VM}Ad1wnwXw#?vz@?^dn!6-093F0pDBse5b#A8TJUgWo}78JvM`W) zSZa+)30vsWW3hD%5>6>OJ+qCZYVuia`X9QJ_Lfywi`mz>7n z;YfN0$hLfR%@i!cBJjl=!GPQlWlXwE*BESQaX0aDJ#X2y7)jo3gXU0fCnPa*6Ow!L z%8(|Es)qFI=_2R*JNsVY^hy}eKK`P5vX1(6P13s3b6;E*?~;AP9tRtLcbt@$)WLWs zMI15RKLrqT&>XSYKY8(HqHm)gCFv4uKN6f$;DJ1lSFkrG@}gl(Ae)0a5QM>W`IF~$95m;*fpzRDpV)4R+-!1+Dd%I;2p-fp=1EOBi$vb(LT<+;;=&35%&n9 zLO*bUr^JE8L}5nl4~va~!E;j}e@GxK2&eA|IxNYVMM3xY-HFvN!BS+D#{B+H6N)TmNCVkC~#^d(eE;?cZ}F=o#27X|=#+;4n*uE%WwMxuCm^@Cx7qOTwiW?Ov>m_fz?7>zswN_?MYo=P-P1 zNPrXLyp07O^v0@Zn~JKnF~MN@*i3&A4oHbYhOP+mDG7vubn49r_{G4<-B)Y@&X$Z1 z(vloKe5o(6us;eOeASnUdtBW~Qy+*jL#a^RL=~@aH>KHuW9f^|(v>}YLTf-$l`}Y8 z-r$hts*Z=_sB7hb+O!#eQ_}qf?C;txt^Q{lm#vXi7Xac*{n@g8<&lXS!4&u#0eo;_ zdsW2W1)DBhXg#1oNUziFkcwW-hC4z^Mf`ajGcmn`g~&I-{rBEH+PYDfE^U$Xnu&Wze}jKL(U_ zcBhk(7`F1L*gYeNG!IP4J+elXDowriaYpT!&RVC_*O`cnmHvG#J>73mf!m2^4;>)c ztRdtQ^mLs}i=xQ>e#IU}b>@ni)CwX_BhEwg3BX_gefF1vg<8X-Gl>q01+UnzSLjnG zDL98wiZ)jNU_?a(>sQs0Qft2-?>%)Q4ov*2*d66YWi_7x|o`Kjo7f6POwTUAS9%zobC zVQl)hlr2O;={M`r0_KI_n)ZvIk5ZX24+WAbX1|A7EucccP5A`{*v6Wu{xf!cBfHu9 z&O;}EbS{#jT(Ukj&*1QE4mEH?68}f(=fY~ z9OqtqNx@NBBj&Wxen=-kr`fE*ex|06sr-FaNcN(K$`LWK>9u79VdU-Xx@>aJG_foZPgjwJ<8ZdhBd<3&E zD_EQpW?YC`-P@wVga*_8$>7gNER}xny|XtltmxZxyp`#lO_N3O0r~;O$gXRBT}a+ zTaDTbmG^%|XzbCO`}#^_#Ksg(7;?om!qu`&*)iVXY#O*b@uenWniw4y4NQRny~b^o znC?}^68A)9v<{8-{qQp{8)k9!OsA4wh0|irmZ0>^E&Yoxu=FWJ-%;gP-f{MgIt!!Ht?={>|)B5_gpZRSEt(URW7rkcwiwWc~exiZBknPteIJ+`9NCSQ%FF zvCq3}50gx=lmpM#D`m;X{o6r%t$!9VqDM5j_Pbjw;GYTF=LLZlL_>8s$qMFo<8`Tp zqYqI~vkFR*+Er#8bqO6jY&g>cZ@n(=3{90~{N=ad*S3>yrMGp14E;HR3u7it*;Ev? zI%~_+$>6g~!T9jw(}KPan)pc4BY*Guu(@9iIi8YpFKow+&i5xYsVqmu%@!zvIU#pe zYY$6*|BLR4>rErv`{u(1$CCsrvFHm+a7jLQ>^$ddm(fraJjndY41FwKl ztS&BQO@kOUY56)SWi;_sNGI6ywYb$IFO+axZ3KoIVBpid0p{md~v4C6+-zoCn1yZ5e#|;LuI5FrD0hr22m~cIGOVIu}7e7SP{OE1kPTSB^ z=R42=;OlaIAT(Pl;nne>g$yvGnv1eVxie5i4Q5czWEsC%ec3)vz2)U~#)p>>OnTe| zK?%o%RM8M$>u1DmW8&IaE{t1-KI*Zmpu8|-!8(m3n`_?8Nsn7fkMc;98BNy`T!M{A z7-gDdk2-yTp_fr?FW$N@-=vWpY}{u1+K40@@+#NVpVRbwvFkj+3#_DuCT68D{Y=Bi z`G&3fMxwPzqO+$N&-I638(rpA?u(=qs6Z66`G)35y7^un4K(6Y1DkTZq%Xk(d|61s zd=Cl~M|)>n3-U_QA16(2lDEmKgwOkK%Yu~(S+4XI-$u7fzAI8x2d$>Q<5&g*o;WH3 zzz5&Tn)tSa$4fJJ8A357CWW%MafxuK%G{6?IEBkOXv0SW~G02|8p(k*3_bpYhd{{>*n|yFjqSLaYt>=SXYV5*Q`cS6)ZFLEBT<`3x=q?##c6wY;OzcS1Q)|@>WIP7@C zKj8jtelbG@JFY1-#_KpKsdMri&1F3DY%HZV(JNzJeLhQKPyJB`oww9gia9#$?wW(w z`_PQg)UD_$f38(yHtSI6hM3>Lw(pP=a=nrsJ)G~TBEqp(F-r6AY6}bQ-5eRNJpl6O zM3#mYx1>263u~oM>GYicO?|`z%n66WI|MDQ)XkO8C&W?NA=Qf9byW6^>$!paj)S)^ zo%X7-s8Uz{6jba(#@T6{;8`)SpSe{Qzw(qkN1ueGF~l;^A?o3p!rTjGbe0d! zP9-4+t#)eF9=S;qqVcz-QSLo)E4E9)ass%degmBPK%o8D0>Q-V^8)IGmXn&68KJUD z0G&!Sd{=yMUJ%*tuhJn*iRmX$vuvGFh5?!&p|$hyU}sHv)bEF@AGlNY2{;6KMjpRS zZSgE$MfRDGL`W#hSR#Z9f(sfeWEiHR@t<~qQLZb^f;G*=rpdyOoT?r6HMl3`2Qm*h>5@kOred`tb9L$R+OQF}3`|ua*Kv}G25(-NXhq*@_t}xd=4XIn zkhjMHywPOl?D`ZaoF-n?VbZzYA2OJR=mJX{w|UWn!^6CiP<}Be5yQVuF2B>VxGv*46s~Sb<= z*=|8CbQb*lel5GH(7HW9{eM{i^gf3FbJA|t_;1J!^#4L`$!ZwLZu(+OkRDv+{^oQX zR+cFbjl(Z+PRz^|^QvM)`+X5iD@{FRqIPF2eWXP}&q{_6(dyP>f;Nc0V)knj<;Dhe z*0o{&-|~*}N>Udbe}p$8iR=AG50vnqgbxVH*3Hdlh!#AHeeoi}2C&D;i%L7#$#KU+ ztau9&N3BEN6>!gjY?sYqp#hp3?11YBa8+GeJo};}r+ap!k$KRV{>jQAU}<%t|I3`6 zeJ>{tT>S@=^CNf?H%aK??8azkq)n^D-f@Z|Lob-z0cd zrrL*3`ghGXHMrhWe_Y~ z8B%z06X~#_Bl{rB{}MO}S4|t-s|EH!4t_MkslJ==U5coB5p=_Jd9B|o_YVY#m59E$ zdV)@}4I5PknH{hUKEKi8{bNG8H)s5Uy* zIBk-7^yY3*&&`e2=-CAjqf8d85J5OWKNL`rhK-lvACLewz?UEL2gUyt{>P?UG5QX> zu#l>KGmn-04gR?-O+Va6(2kjL-erS)>A%B5ryWA&KvqYVH@6MbmzPFd0@R47 ziyt6KLq|38%N$HT=-i+k5Nin2?oN8^Njy!7FZd>R3@h3gL{-YW)%yatL!ZT=M%(wc%%c=m*t*1qoEtJK02%NDXhgGnav~cy%t~($FuHp+v(QFL#DEQ^zJ) zq4<6fS1I18#mpNNJOW@UARqL$&!M$k3`@7tV^o|d!iVTKh^&Ryyey8v2KA=XQCLnC z=zW4lvT=qg{SZ8y1?1lEB?^?;6K8;{hGim^9$_By$U@!&7}E#LTJJTaT^Tnnaph*! zI}f|Go2{kL|98>#U-{|M^uNol#cvWmJ^GclR#U*v3V-zxl>*J=Sg~+RmupMBa-339 z*z*3i;CJi@7H6Dy@B(zwnD}WE5!9T#@C8INS?Er-mf5bqChct3V5@t7tJbHek*X_r zCSiGR^_p^ay)1OMg4++{zQG0Bp`O(oNe&}>m?n;)w<`m!9JL;O1)#g4rbnLz)&DINK=&Nf*+>vXSG6 zY7k6eW(BH}&`LsVx|`^nA74JY1--3|5-wW*5L%@Xhs5{H{nv9upcHf|U_ z1a&@{gLxjnN2sPDsye=Sj!nz`jow)pG4|=B2dp0Kr|{|rrvwEfZwm|+;_=3;f+C!E zKk5Eb3}^!4;^@iG9$84g8p^=l4vNg|mTU7zbiYJ5s@4&05}lRyQ|r&d9au)3pN%dS`B-X8=4J1+;)`4@ZSTg`)vG) znXBX!50ZI)i(%E%&qTY)h!Tx8ocytMQe)AH16Hq*yyN6o(vqU zuxm$3GZLBK6KAnnW-5QFBgn?=kPn0W$L!`Qr0NllfF21ljGI|@rAeLP^@4#39`zW= zzcYW&jZP+ftDB03#!*dte5s=LTF~OghZEH*<#MPEOstQ%o(x9z?QRk6ViHny8*38v zdu_e}*ZH{8{nH@^(myK{MH0ZfCLFmfbTPG8gxY=x7>gY zUCm!{vlR@qs-WX$lScon?OTvlXV_3}6#=C@9$Vx)zG8nUA>Fy0Yk_CZ-0wn(bQSLM zw8g5Lxk2xQ|5_VOM9{A}kGXu2X(dduUQEc0D$5<-=Q&%y(Ij*EqS!2EG9wsQ2WW(e z8qKD2m-3e`brKEnXYZ8Er)9Nh58>QNHjo*toX%tQD0si%kSLS10hpf4FZdG?eAT11 zy-Dv~PBqoXL;_W!7x6u3jjz0WqJhE^f*Hhi58QiDj33%^iY-;`reXKr)Y1PxiTFQ1 z{4@GKr^3ntRdf+Y#t&-U3v}g?6^?!%v>?^sSbh=)(mWrbX`|A!(;5=~7o7UW`qRFX z#>h`h@t|_aa_(Vm;BG7?U#MX3Fub?YrBNIj_pSPTdB zDx0`x-XQ`%7n9wGe-nOdVmqXq@lrx3HOpJWfSj<@OWyCQ5aQUZlE9*zyrj6B9dwn) z5p@6#A-M+5rFBs7_^)@?O9Xi7+mW3-oaw zVu%}VpN|FO-J^B-spb0Pa}yk5tU~oYSfmymEJ2)v(HqI7ppznW4rp+gB=Yf(1Vdh! z+;iW``hJf?U{Xv6YzrHH=x(~#l@gcOD<^Swvf)h`llz_t5=U!m`Hn59o#|*j>fSqj z3WlRGDF%yVBgpHXW%H>sQD3g*zMNZA{u~kHyhJtHHUt_r;})NQ&biv{Z&_t^wJ`Yt zQby`o0%Hp%lhOIusWF{{&JoIE156Bj-t5fjOW)~)l!8pBItndmZAKWJE5V|`QWd@< zAM#^En3^9GY8i!AjjZuHsnFCS)$#5)(iIJgjm(zZ@*5hQHC~Ds?s7z+p<5(k;tMY^ z`MGh%C}84n)2`jaZckC*bysA0PVtrnAYjkZ`QAIdmC;_TA|A1I)VTQuaXheNj5p>W z^+}Wi7j}+QONW?sEPhBjVo1bk=tIYX%F@>yXH?CqE#(EJ+W>`k#-Jf`xY2JA0C#@pQ}(@0HW@>}bcf)+1gH6VS6s@uZmq7X-+ zs``|D7l_bN;WUF8{bO=`n@BYwAtT?Ba-bOZ6a| zgj>Q~9 zsAkQ;2z3z~TqP+hiDFhmsC?O8v+T{foP(`FyF0LVPiko?E;!;RajOk;I{SQ?epdX{ z^f?q4RRDM??S`M)YTyMnJ3QayM5?5bINm+26eUc|v=jw&Rz`%vQMg6N+npqOHAUXs z%m8SDM63!*t?#}N?(3a7Clb(OocD>aW+3p?6T+m2gIL1!3cEYyC)`a+!wDnZ$g5z; zq*e5O=&M?`z9T(*0^n3G-VyQ4irD9Jktr#&J8<4OC9x^CH0(V!FRw|ArHN?lV${Gj z#;3=A%kBq;Kb=XQ$@Mc`&RU*i?_ah9Pn`)X;lEvw@Yn@}744*lm_8w`3f9$Y(iCA14wk9eYQdGjl4g ze{%!T$Ezv&*kt>+@JGJvr%!G& z%%ciJcz$F_kROw$ipWHv%7&CTsq%h2csx0x&BK9>CmO*!qh;eGIY!$f0xh2!=p1lr zW^S!$8SGdcN#}I@y~)j(&5a8`XQl?kr{8v!uKsvvcI|HVuwR%4{?aCZDW5j`Q_otZ zR0v{Am5ev67MC6sdG*-m^lmU!EQz@T{bN|3@rPjNr%NMZ9KpfF?;aJ6jIa&h6gKWz zKpxIkskxzpn+ElskYz2gJxu)s!+z-dek$58xaEm>`?TlRmABn5kxt1b{}45c?SY-! z1OU^gvBnc-h|JI62!uJ+HRO*-OvQSMI9EqtG_FrzCkZI4-JGPQbz=l zMUF)w{Q)!Dmw9)MOzl7r&!Pw$6$-c$rGp1QOH$@={?gY&-z<8O}NOS8LBP?|Db5iCUq9;{~;}LPO zG7#PpNmKtt%!WMAIl4SvF!L;}QV5lR58w(Q8%V`uRaYYcv^9n4TvI>ZvNj6&}yb)Kd%w$|4r)-Tu&qj4R~-=Wt`5 zFF>n%n5#R571cC3Z_G{ad>--i?$_dyiLb5UYfztgX=EBM8`tu>nE>{yx5DSRRx(-% zdU=^=ynjc+!`Uc4&4Qz*{Z`H4&`Riwl?TrS#_Ebg;%u%6{IM)v6ZC7coXKz4w4jbMTnFsT2$Inw4ZPY+txRLnxu7@|T zZ~3-TuM2lJO*ZQHifu{zede|w*2Jny^r8RPtOeSvGuHEUJXoF$9Q%|G6J zCa5RK<_oN?|EtslR)hpl@d>n%Nm!CZRiOLY#Y>Kq5Q#eyb!2Jn*Uxk>WQ|Yk5I)|> zgPyDWNv2$*@$}R-eoA|l`*02uU-?}1)j6cr+UO%hR+HU|%p#!SS7EIeN>H_-`w{o;nN$o^v?zF`ZPwq? zldguQ0qK2H-#&v=WQ(k$-&sQyHarw3;ou>!NUxyX!otM_7m>g6(ZEbKMA z!d`vF3HEB}NvIgp94$kR^mzoxoOzT?(cVSUEh%1=rU*5mlaTLn{eHnaTJ|!~(+DlG zBox<>qELPQ9hB~at$X$ffqx!xyPbSg>R1_^T!cu91I9n^L>X}5!{!~_%PDhrh=PIJ zgO9%6FISwB<2NM4>47jMug6S!rqSA2{{_Ndw>9*pzH7Fn7vRRr^6fD@J?2>>Y>>h2Y#X!O7|FRX<6uBRwi!jk@5VgFeN7{G5Sbacb#8 zx78yJNg$bvr(m}Xb&_XRGXme9qYI8Lg1)d&GQke39B?(&(4L$UZ<|6|)nMIyDTggx z^+lo=E`7|cA5?B~jN#d6*&Lm(%q1+D52}%W`CTB~(iwe!D9tbV10`-8DwcVFGVtW7$-vEqlCA=adCIHv0%{l8G?_ zFkmfYQzu2B2p=%ALPk>Jr2Ksh=~A}ed|H1 z%$Ccqp{60SC3_n%VqGcyO|cVh4K=%ho?;$s1)GRDKNGLYlb?HL{w*LsV#lct(6MsQ zOel;fdAc~$VyQs~+^^~$6Fc8MGJG<6j)=#u4~!>V-?LaKHdwCIuy-laQG?bp@yveg zu&z`*+bu#qv3TECEtF&*vwddzwaGs#-Jq6aHq`9};;bCPrx@)XmYizbv^Y0E$=(|Y zhqV5+#{ceW3M{I4&-o=|jDI(+|1GQ^xL^^xe;^vA`sQG7xb4eUWie(ADHECn2`uW* z--u|HPn@vo2;hx+qTpU zYl3b+XKZw(SO-2?GBg4i(eSRQ< zk6qH*LQ1r_M7wH7OwvwcW$7z?9ICiGtyG|(C8U$E%k}U+U8mu3;vF~(H@)CIsYuqm zBdk-kMMnpn_Y%r3DUd)G)&v+~q2BDvs4IE=9C-7_NGvWhz+{2h%;jHA&x(X^bg6UT zmd~t$w{LL`?I@zDMyKeV@Fvy0V#D9gdaV99w;$t%ZGd7Ba!dXBNTw=_WE*a*08x<9TT3|c`mCfe z!AW9bSmNcj z(QaKtp!-LHj^g;WKSP7+ML^lIMTtd^`1=PXH(q3Js%gQEHGV{akw)e3Rq}zRp>>8x z52hkD+uq2CZ)5v$J2T6gnwq$-m+Qe@UQhkg({f5q68z$!8SfyYHR21%5~0;~``dgaDaXuo1%yiDlN_8zy z`=!NDZIp*vuq|o-zaZ{^>){C+RMg%X%&53oDC!CmWcJi8?skLA@M-~VGKSb_KRqqxJ z;tG?LU+90D>@Bx7;7AQ+ll`7~@0s6OG5ool&<7u>d)i^XJokET6rUAE+WyWRudu3j z$AOr9tnlDyUV_$831`AlX(}X2h1I8_Lv>9rfEALT>;}S9O~%*{E~03HI3Uaec0IrY zy-Pyc^Ol8T`7}av%|KdBd>Fy#iuG~8e{WgS#2QIwny*h|Sum4sHL@158ZRrs5t zA=jA1^`tg!u8=U;mTYF_#LhIiRbeV`V7{q_SO@h1EpnS!HVKJm#!*3pWfgO99`#W9criK?!dkl329Vwnup6u_~rnhBQhjU5->E@*a3OwX$D{x z{`92&;svVh!}GQ?M!C`q(UY8d_KRxcY>dWZ>3YP7irL*t4c$hbQpm-JnfP(paZngNK|Ibk(`JZhr8D>|1CZ6T620T4hgRx%Kb<-S>x)Z_)EjiZ=9|z-8=46hs-H?k!}_>I!(wGZ zOr=5fXqgLp^Gn#4;MNt`Fpup}(a8Zs{GDl03ZGZ&S1;C z`A#eGZp66TPK2St96hEiiT!?t@o~ZyY4{i6B$L66#;~I-D8gtad``021!p8-PMDFD zK+XI89G4UMT==b|yH?-bgFuA`r*R?v+k2WHNwbX0kxJGC37X&%jJJVi#WD*sHpu`W zl{6&#t0w|(4xQT2#>MAA0sqee_$Po0GbA$qMD-h~L=t_lnPFNB6YO$!W5ph&cO-w~|BmS6jJ^BMo zACkPh48oE{VN71Z-+D`LA9BboW+z5BYd^Zt^_%p)nkc7#GY*(LPghV^L9YXo_INrcMS_irdwyh#3 z%Jw;d`<;5#nJ?PO4l1jwho5%*Y9Qk$rj93pCg(UU&F%5))&fD^fbWqQiP@Mck9+<% zg`&U3uoL4R{amT7JGpJY9vB5vWrvhuzq*`>zQMK_bT#?S5EZ|;IMa_emlsHs-T&8h z4+jbkJm`$X4p@E|eGXW;E;ndY+|sRogR=}fWh;~nznM2#ZV>S+Y#^>->I~k)Q%`0 zN-LCPwS(CbCuD?+f%TA8(iY8ztA5yG-)OY^S76o6C_8wyd~?%-Hbu>=K6c*LiedZe zg*!kUn`0_A-sakHM|qI6D@Qfg4nTiaiZ)l1kNXQ!a)t@Ki61$hlRIAF7bNYS8%MuV z5{TYU*j-7h5|2p-lsiG!t;pZWbaZW)Px_-g4Fu!1g=CqKZf>s|9SG)a3tWk9=m{rG zqZ{e$%7n5JU>;D9g@=2JwoJPDL-BY^RPTL4Y*n%e!2Dh_d{~&%NVWT~*vlgPiM;W42h6NRb2M48WlM@i&~#rd zK{#|o3;E@p&FEifxA(=po+KbKW`_qn(D1| zs!O{>>p!Kn&>SaVGygU4?Dq7#MvXhshJp90E3p(`%ia2Yt~_;J;d>-U2VAR)$L}PT z*lYR1<9#je<;E95TFFYGF#*doRbf#x0vtcIUaN-$uG5-5b#`~WHAEm{%I}(3JQ3*? zkkV3=Jm&L(0N){Ze&+?t&iVWqYPDl~8=2pR^D9F9rDZA{ZdAMth-kz;9+o!$3*o!P z{37MN!fz<1LGPP`7~=CI8M=4bZ1d}?MPPU;^84XtCvCveL<*ic?@HR=2Cae#j*Ud0 zB6d8Q&$qNfTNzppva_{Bh{oL+=7HK}Q2~$*L$y%+BVRK~5r2yl+2`wCwUIzEkz=e0 z`oVWa7IlXkzUHgUhOGf=fwgn&2N2+nHu{~A4tc+d*ZQ{=18EcFqtWLnP= zjFCMKR68cTf#B69TKLq3405Xvl0U`B1<48pmW9+EzdCB3dJ~&fRREDsRCsYY{Qkg;KnQg>sX|LsKPT!r0*|TY{*Z;?Tti7O6fxX^Gwusx_%&u2|3}DJ=0Zqhq-uN$Jdf!w z=Tb@=&jGI+-H72bJ!^4{%dX~7D3ZkmSeA4cydn%e|}n|;?CTU)zv!DfYhX3vlBz6{9Qd=gQS-P=rs#5`Xz0*Nm;(^ z>M3ugu$3NMZr94=t(wNMgg>3(_SM!;BSE#Il+J_zYeMx*;edzj+>Z851FeSqMNE6B|b~zq<_>;8@O>%obt<|j+MHa zYUu0q;htT1LKGvRZI6+5E$8~3X?=81u6#E%@+O-@j3zFU*^~>sjG#2vgQCAi?onbRAN?|uunPM=MiSu$Z;3;GE* z5L=UA`u7PfW))BA)@bzCm+zT5X~nrV<$ zg}NFxmW@On|Gepb0jlYkrpAjLP884(?A{_`Dvw{=DX02>X&bK8Y(qrium_;n(9l z>Z<8VMpE;$xK^Ez^Cbyj%a&90sKa+XWia6{UrmeUl%|e1OYwpKOCb0kNc}JD(R+#V z6Jq3Y*5ZT(cyz&ke&?SyB~hm>iBPA_M9W`rwLMWj*<+|9BAqP5^S=R7vV$H{TMxtB)zWhQaJXpeM0Z#ga(ypR6W473J?8uv@0{tBvYOqc>lPq-tPeU zGjCRvrktve$Y@8{E`d!RYWi62Nvj-z%F`GIRnX79NAzHw77Fh*g*M8q#z!W<7uT-g z#>Qsb^HwL~FPE2GL6Ts9-ZgLDk!l#0kBTRuH;yx}_GI+UoCMT>PvK7= z>JAt$b8-rZ7-MOPn4#hzTAP*+fvfdY3Jq+-8j~WYz?qA9I=Jin6#V(U!8u6;RYfNnaXbisaE98npKYw!3}df#d694-wa z7w?K2>Ol8K-D}jlO^^6SlDv5Z zD?EiF@4gew<7aeOGOce9(SL2$MSPNa&7K(Q{6_kBgRu`@uqP$)2UCm9Q!IV7U-yD% zA#JJ~Ec|{9d1-w2Ftw5zcW{-^)2^s28vl-6Jy@5am!<+>h_ujYUY)|^Cwpt+0`MDZ zV$OvB1>?u-i<7}xYG((#>mcR1w*hyyCz!9Ws^IX2pq$C#Dk$TcpP!%Y@$&ayq1gKDw5k41_J^eYXcb z9tn{&X%o3-#lqnEX;X~kgOJxF4B{p_Y$+jp;C@;RB1*}1`@__<2wAaN`NZX%)Hm$# zhxwn+LF|$VnT!;(7|@Qsy{gndLVi{`6#RvvOJe-ShQqWcN|1sDJuO$ur zo>3USG{t!GXi5d?sX({7ux)X1WOb9xjlIK@B^LFGe{3S#KlckIs;Fz)2w~Q?B6&r{ z@(h&&54jWnb+XEH_fX51e-wxflB)V|hc?R|#t*Rx6K;uVZd|TC>F>UTm zqAE3ng%;QGhVCtqwF>EJ@u;ltFcu3Dnoh}VjtYPfXVCPSYPH}p`6CuFV|e+OfCUW@ z#1lZB$w$@3IkTBG#OH(R&5|kvr}0|TT9N)C*;8!%%h=|^7-$lx>XfUXX~~pIMKE>p zmUO+7v(eo z1r0FGK0eN_Oz?ecf46JIeZdiCBwjyOS!r!%?x_= zlNQsaueNC{T~I-Svng zWhs3#)3qHGD0;sMIWXp2v=UH;NF9lGrT+Ox2#%MBe(R^je=)O5-n#yDjyAr-l5IT!$b$ z{TN#MAqT2b_|j#)2mpoWV}nn#v~|L$t1$~1k4S!j0 z8G$4k(~!mjn!hfpS3*}D5xALkmbRktUVTx%L`T6!1M(fm+}2WqOjYfqUb>c! z34UfFpIU-U4A(v!bOEol+&yDi49%AQ3Db}yuvuw8_4?`1{R*k4$w{e}n%hm5%({?s zu~(>lljb@4h0(4=9pJxQC$!8MYD6+Hw;)-aB{SNK?E@7ug9A!Kwt3{GH)iiJ_B1;u zHCX00N^r@^pOrFWezJwfgVq?qa!=ZY;k=w_N({KS|I11x5m?}l;JS48%{#HLI@ zI@qXUs}~A?rn>#%mBPliZqeuM9;Xxv;&I{>;)|yk#OTQxs3AAl%-XB97>OuKI(JVV z&!t@V#JQMgpO#sY&?N4?_VSUHHOgBz1{aKUBDwXEBmUK~jHoFL!7f1hf$e1E=0 zeyQZ)-*D4?6xh4Pzhf$+;{OJz`7Q0{ApTXfiB?WEsi3YCLcO6gIbYiQBi`M< z&T%=SO^DSyqZL;Tu3B(~5pnBxKy6yzKE@{B=T)2WGpA)@#ilrYzjijx9_)N z91t;?n4eZ>R&~@a-wY$F_gDCKew9RLHY;sAm#*NzWGf}d8rXtsICF#s88eg^4l*dj zqH?v}_}YT;PpSt?9V-Ec&;0+4eoC~|5C1?$vJphVe(K^fLY6zs>d)lU4I#pyo*#sx z@|p5gXB+@3k>+Vk@CUB4aC<-vE*@e1*Gj*hnBBU^81{yxi%GI(sfCGDvFguwou=x6 z*M6r90ala>XHz1H?$K5d*ud@e(MGNV?tQg zgRVIp$lTb0xs8jDeJ7&tU0(U-+Q(mOAQp;AxQPkE5qs~t?sQ+$5U$A|(p`v_obHk4 z@c|`N%3a5V+t4ihVOVfC^p!s5hJQ>qIHj8424CvQgvelLm3CLD#=`3>yiSil!&QuG z?c%Zw)1V#XM*3!XmeO3^MKp56m7w$fB2M3H)W=y#RT*HfAkQRC&fbHdg-!i~D|v7= zf3B98%c+9`j%X~vfmm`#T#+rf7d*7LFA%!U^T84w3(IJ;)j<>Gg-kE(=-P$NhM>X; zZf|y~Cmx`>a}9m9*1ZF++N|>A1-ba~Ur%%$aNt8GtuinUQxSs0riwrfB9uzX?W;Sx zFbLg&&Rm6L=-BSh9D}OTmiDE_pBUlr2TWd~M*vjEgH^Fa>reE+nSHgs=0_%(8v=hS z+WJV+U4}_{%8D70^d;XQPN?0Y+LERyFyU=6D>Akqu4{eN7n=|+Xr{=4zuq_p#ecV2 zRlNynT>S{?Q{)gzP3>75Nc;B_82c-cY~`bRa}It4nC0;e=Qs{5b$8jPF9YnvgL{um z^SYbfMv?{(8iHzW5p0S`b-b)flueEkcUPgGMxcAi6AY%Rcr%B6!iA19lcK|>BZse{ z-g)rpF1056p1l?L78NuUvXjQP9yxv{r+~+X2k!O9*=mFW)_r}33>$-5kS&G`rQv4I zOY6fJXao6HS<^!245B_@v?8^sE)8K75EIWsWH)nzDiuxc{V>@mzf?J+h{qXal zBX@te-$ZF84&?mcP8H*`PSp1zmE5KaE;+^dwiPI?-(iwcQ;iOWqcuZ*Idl4bdedsR z-emv%$aN*{`Ib=dP-<$+TYA2XF;Zn+(Y{|^%0%^_>W(05GT8eq8id*uL4UQ`)v3UJD!X@yY$QL{)jR(Q6%Oi$7SVm2iCKV?vmC?{!J@Yjjh7j8MkP<4Q>hrxR(z>eHj?*};&Y3Fe-a9;+^bUhy zy}7gTq+AOJ|2+`cVD5E=Wb$GwOGv!^QQ_!jS!MG{qff!F%)Gr2>BAaz9&$`IA$d}v zW!X=7r#j)*3h=rPmtwKS+2V1dc@)kk*sh1ZDK)h24~}EzzmN9)hs4N9@gnN79h7FK z%A?hORSYpDSyG9UF3`J7zKmJW64+I9vYLNA&7ei7YSYIf{#26MM4NW3_8QGdx{r6g zhO)V+&g{Nb+KgBzB?rfGoNzRZS#qDUGr(EoJ-%S@GYmR0=cMvjC?e5T6eX+P830V6 z4Sk=85pD|ePq+K*zYkcvY4c;LY^GOzIr4OxM)hKFsG%jlpaf4Nnf^{=()Z48A$t#f zu&D00=n#Jh7uwAe5P+-uXXR!vJl5{XNy(ZjA`bImURq{GkkGC#S*~@()e~Il+kdM( z$p2D#uKuO+bSgW3k4ST?)9pb_y=Tae)$*f-_els6KahK6Jf@F=wIBA=QwLRhV}57e zZ^CTD7LxKt`GQ1)yO)sa46;uZL(`vnV8y3X4-_z?>BRViyP#d4;YM*0V{#k)T6p;y zl-^1u9dANT2A{h8Ev2sgd71p9ps-k-4Apn*9E-BTc~H|-9w9=uw=@s6^n5So>tvpY zam0x_pyh~oW8quJhYu6oBwip++BWTo5cM)gO39 z{85iP@8m|^UHnoQgY@vot)5}b7i-j=w+zyj%eR07ZQDB2lD`1>1-{MonJ@EmQ=lRv z@!|{*I2rJvT%w}D@E{TE)-UZ`MVb1Zo}i)|s8Z)CQ=D&9izP|d7Y0rpE-CidY;=?U z0}9E&m+sZ#{F8u--gv4FpBhjqf$DhK#3-L+^8%`xO@9+FlaOgtO;;swJUmc;i~T7t z-+lnr6=YpQVPP!lC|22d)i_2*X7I<*i9DU$f3oz{@;ziy5^OK=vpJ-2*@*IDcoJpW zqc8{Wk^Cgr%n;Y~FLDTva4=%)HWktt(CiZ!)tf@6RckthYbw5EG@eAIiPHA{tLmpL z5WD=r2P*~+>l{xlpetdDrR)6e2yEBoaJ%`rM*5m`k}8c|2|oRNcD}|> z?HKk#W=C+M6bp3^KR?<;T#B^W%rC~Fn~Z72&JmtmObfiayr#cq`UCp-=WixC4)he$ zsY@rhW4&HCs`I&#CL(&wtPzj|<}RD>hHBnOV#D8!R#!nOpPGjKwBPB3r}P3wV$hA% zR*ckYuCR9xLdr<8WnHBeYq5|WoyyT^FKrY;J*{VWe{G$5rLoLpZ3uf?$~@=3iMsFU1h`@ zu)2%GbV7X?91fy2RuZiT^dqZbQ+LJUPyngxP@=$oHs1r&YT@nA1{ik-C~FDc&58Y1 z6zI0jFRznCR7ph>?j_;ix9R*W%fQ(dOxCT6_FN|DLHb!}ZcW;)vMwo%CF+ebLkUXS zks6C0mlD~qG@r5;PU)htbMS!O{*w}E_UhB7`qBrh8Sfk?r0a z*m(OhS3Aja`tU&9B%xeHi2C|^VxDR8u+8ytDJ3VF9R9B8h9v`_iEdX81*K^b`1*JF zeE0a{%uTD=2vZ;y-JVFl*!G|AE>Iw*2>{9k+;u-Ac3Zt}#?Ps5t{&uZW`9Sv(I*278cTbP;-LBuFhp_F`W0frzi?>ulqo3dn zlOq`<0Ykz54vu;OTV}e~+Q|zq%c%Dy0#DD#VOHob!>#D!>wrBG9^{H3Uk2Po$Zgsu z2HS+@2c|Z4XOy03CnQHmom+7oX0&i4M#5xJ>KkNl=0V4X>Wp>FX~7-w<_xh&4S;5aigGcsmovKJH+c4hvA5u$nP};O?OFK3>!#^`q(RWhz-wV$Y7sTz>DCJk%V6@@o#_xZZ2iFR zxx0NGT&`$oeD-)+u)71E;xzA8$GnRoE@Jo*O9z-pTd zOdtu2C(+4)z5^Rbq+cZ|3XFzl>6A1gMCTTADx`2$FBWbH0g=Lk`slBPw;;cQOulG)*wJ<&-HJ{>k}( zG`ENgG+T~pdkIXW*k53Hj~tjc_EQgIYRb@^e+vT*A8dqwI2l*FLyu zs5)!fLEGx5`6#sr@nsq<{)Hx*G3 zei2s_X>{4>#>)}x0<7!G9slm3b=vSNqt$t`lWXKr0jg%d&m{oSN2L zw%5?n2mWJMyFNzVASODz;)k68al;Rz7IB`|;PuJl2ml}0qkF-tqEth0$NkKv>5Nd3 ztf*r&=MQN^X2tJ5??HS2Ab<@OIyURv`58! zsv;}?ogv-3-wQ^MR<}2-4pLX~ua;o`)dr9gxC8CvN ze4)pOXV#<4w@E(y1wo$)Y4s$wQO?t42iz3YedxrqIs$oM5o%IPXH`W6&VCK-q%EO| z#m^}9RQ+3^(^V+GRSyn&F%G*gKeUBVC98g&l;o|h^3@F0zs>4ujltWu}Et>xqn{i13 zJS2}O?JsR{IHnXl!x()AV1!I)js-W*CeQTQv!=K8<<$`ym_!Yn=TTUJ*#dpViIkDD zA?XbS^Eq%s8A;cB`g`GwSRt}l+Jl@mT%15Tp9S?3#?6J8%l^E(zQO0ENSSj@eSUr% zQIEXF^!W*rnvXZHLhF|=A-4=7CSH!#%I1{{-4Z$4-w)wOWM%zMP=-t~FPgFA=fhzs z<9W?fFNO1g96Um*6t@B3tUedk?Gqz4vB9SO-ik{-sk?LQt1c7h-=$ z#8rCHHQ^9VfeSd0MyY~)hmcIU{)V3K!k}lhM+7XW{p6*GAyV@WmBbQe^4JIC3}&X? zJY2J~LCOL61|H5EcMjJ+2=L@A%e564p@noYQxK$JngUF{)5Yg;0s_^C03>E7;bIe2 zAUX|PuqmPd#m_!4cLx{5k{fXO$e+4Qv1R)6U(pUnom9Px^PfY2L^v|V%1V&zx zo`4^*YmTJL2fvuFS~@+x;%eP;RHG+HQ{>n6tR-`o85B-G5clvv2&9qvvVyhF$rI~% zPw~g67;+nRXY$+GbWa~CTmi}j3~n{@aCg?K2=p~o10|_Sdox0}7mN&t%0PSTd5%Hj z3g@8N3p4gc45V?*##J0gb}%K|beytA`ZJyH!ubhI{nPjOxnBWihR1gAh=IIN2HiW# z>gigR5aLu$sQgiUzYZh1u#eHhpI^~08}y#M10Pk{OQ~p0LIz0EUOWv0W0AqIwfXo& zpz$!Tq`v{}DxICCu>oWl3@*y>5=>o%tsr0S7rd`U?g#0&4E@{DmZ7eikdg&EBi-=O zXbg5PddtRDOVcJTAFTnd5{8k7C*}nv2H1&+EnT(I$TSHo@pV@qom_gMSDWG1ibV_u zn!XO?h&V;b*hGhURCrMyWf=j6XhJ+qvo5LrTN|wtU3`_0eTkElyskLDGcqAfp)B=o z*&=K?E5I6OkuEGU(G(Z^DO9x3Qip>J!SIM={wR~F%OJ845=0BYQro>8jjP6^w&k`* z!#6xh?l=c{V@3rR^&q{Of5&P+C4M)TC!gumFq2?ecq@B?^AAP$zdxC=A~~d@qWbp; z5FkpM4cIGk@Bg_Jrj1HH|Bv~&f6L#ScK=XP;TS;io9+hp>>MYB4)QH9BRX~`@tYRn zFeccZ_MA$HeNEx~E_o1#yYSZTGocad0T00 zwCzkJ8B-LkqROftxe-o0EX;6RM=I{wPPHfQaPlMODYEq)Tf-Z&WU~pP+$}7+=omtTAAuq-Q z*v)LK)M$FA!08(sb)r_JaC1TXF!JS=$)eUi?+CG`7@vk}YQ={5wohVbgi2S0ukcDu z^}-tCkf6_do7pL2_NbN)F?9{EwgI#0(GFE&<%C9cH@pAHa-^6&6YN7ud4+bst4XV@0R^~4_p*S)$J3D~pg>9YObm}Fe! zfmB04FEFB}*7Z`;rgfo2exy>PVRyg(I{qIOS)c@!v8gKK-%+wWTQz6@m*e5zQ(Vut zs6Qb>QHO=pC0LsB6O4+_5C`fyJHZz1Ihs>bzteIjDi9O!_f}3@zGr

dfVnx68Nq~1uz`=u%z36&)t<$`mdpm%pswt>5Zba0PEJaUBQ03G0 zuAtUOTj?66r4qX^mMfn%mbyd^- zPzaUF0cfQQS8%&NR_D^2ifKfnyuG$*_Sgxmo*~7+(1=&v&Ggr~Oro8Tg)G4~Uuw`; z<1?;E!htInDnXl?i!Us*<=|V3d9^HcYwd}d;Z5t+huR`6G9Oj`g7yZO zqFOW*fUs2!Q3u5JiSyP(rg!uc)#4MtCDivb{OHAZs^Q$L zJQVjd6p4lNI;RzSy0FddZC&7y28rtb-(7(GFM4`0IiLWQx9+w~Vr(8R9jQwwtt7ti zNU9a_dcx5DzTU=Fh42&ojXwZC;6`&5&Dght2$7bU=jlA8va2MI2q(NCWm#PcP2di; zaH-+0>qrcV{s{A+2~-f_#ufm6$O3EinFTQdp@|DSX3&avKCW_ud0KY-Mz5_&e8gHD-BHU{RNDsctq@Z1F zPcGaaB02+yaa7m`V?S>)$xyBDuq$3f{4 z1mpz@v_B$zK+fPszfCHFb*0!bnYhRBk*mR*+?z+73< zv=CrVB(fRk(#nH=bya@$ZdmBk^^JZq%_013?Rhd$-Zl&j|wo zfQXbb^eaQlz~nU4_JTOE(UdNfAhaMyaIkLyiUC!IUY^h5-7^EzlpQk#qo45CttoIs zzxEji>Eq0&mAw{=zO`Ln<4fRqb;FzkrG6g;enmnoNkEa1ncY&gjX(<-e zpR9}+X#;l-*vPbZMKpX+1OhuA8o^=mj?(S^Schsek6$Gvv-xum}Q+NF^J9O>JsNudoEYT4Xw=a!VhKygyZAaLD zE~6BhQ%1Mk)100{upF3C@aOGpbm&|~?D_;J%-=I2Q1!GJxHf@y`!?mQ)&^t+l`zAj zhCv`X9W(HCd7o+eWY_)&NpF;oStATHvK;I6nQU=w+da%}RqNnX2>p<#BcICkJts%3 z<9h-+Md27mbt=#0xHo=fCJg?-lnAU+b{bMo2}R%d(PKI00Nc}kO`II0I77jqC8_su z3~Af`wvdlw$rGygf`uMM-kDQ}7n$NpyO3B|oY86U>WJA(YM{{G;AHId9B{$Y{Pb@< zt;roIG<>A3kG)rQJLAe95HxPzpBY1kCMCj`+B!-rFu-Cdz*JOJP&Ue^GC8b3CV%r_ z!DG_qW;8ls2FFrlST4i)r~Cs1QqoGv{tFCNhXLtD8Py^-8tO!)jBVCygz z;l?A-Me>n?H|5y1Z}AIOmLm2bTSJzXL zu6D)pc9CTH1+CsJWX0)OHR|va&c7Rx(s;T6G{j|2E8)^Cd_n?Wa0u_)I`8kp6o=VcS%S2H> zwC>haS8+=TL5L1vXk=99a;_>OF1|p&Al*=f%kTXh$n4D~7c7t}24odoPLC70;#Jhze^o$GL3T}RR$J9KXTNNaHJNkPlcO@Mvc559P;h0{)Eq+$-sPOAz4IyaP#2r;U${!CQd#(A+l`XH zztM`I%&7dv^$0Yfzlp2K6BqbGABRqg==cyc7hetn%@cxdnpTp%t*mQ0exwC3Z$=O9 z^OaCrMAtiK)y}XZ)9MY#wnk(Q{R0GMlH;_@HGxa zBnXa)NI5V82+xR1g-nQh@~T)^&V~Nd{(-@r`VbbuB^ChcG!Y{m&;eg=*|^`9iMj^( z*sO-7z>?UumY>Wk!dzmlqNKKU^U=x)Cr?GePnkx#`OSfAN->VB!9hlu(T@5M3XwG7 zHXVzpa=hFU)=-@2JBiggpw`r@Z8P;l%L%!tFs4p$*mf*G^ z>|AAN$?!tn2A!{vs00K5-Ai53<6w~oot+zlaXWcgGWuvwK)^M#Mz<@YO`k1J z8wr;5>GAXQE`ksVeTfzwtm>ZzpgAH+pz@#p-Wl>=@L%O81avqAX13FlI13&Ll3>kD zU~(dO1Oo-}*B?mt86SMt&vuHSNK2{O_hD+~MU}lB(ruy7y+}%aJn4zN@LK0aapom_ z=uT%!#D@`@%_>`%^ptr=+y?q?{pDeldnl2tNI`H|q_LJ&BK)v^6g^(~tQS~I7i)(dI?5J z|8mco6_>=!+)70$qDS`)+xOAeoyi?l1*Zk>)gyT+!zD?(k$4j?IBevsu<2Q45D(_? zMF@4#L2hi4h)X!(d%RTCp0BMkT&qi~0KF5>v~~i~JUnXvFgEiN<-#=3?nD+`vM{R) zjkU@QX&_0gq6{ex>S2iuAogfJ7-ni`{IIK4JBn+QPfgWS2sSh4S;YoMUDgV=ldzge zC9LX?gbS3C`vi#@803|lE(}rHxW4!_ zJKdQ~r!pRPfvbU(NoI#oe>SV=c>6Ej{0D7JDEY4+Y@!AM9eK;vG`i`DaI~U?`Z)2O z4N};JFfdr{D(K`e&;AT!GsqrJGC&?pRln61yZC&V_#YN-d{lI{K7EpvaRnwu=Zdft z{=OuDtaPkc0w%`WR8E3Wq@~jvA*KuTX~eT{eM@7av2@q=6PVPnYnU)$-&LFSf{)|w zKV3<{}$mB`FE|8;_3AbRuE^z)W!Jg3%GPW(Ru^FM;b=$pQ177wFu}wQH z{!(%+EOMr|9<_v7Oy z3)ULwr~B!uuC69#H20_$j1!EzG$lBKT`?bZX)RRe6uA|5XO~)ZhmW#T8SAab@`m2b zYAN-Q{`x1{5*K3S%@$5J`%hrr%VP3>e`tWVYFSy)Z(-q0h9ZgfmYVHk*|c{5I@cUi zLNj&ot=#vD|DOHag+YOuEd3j3AZ^nK6Eeo0qKM(6Lb{h*6dRI;r?rp$&8AG;BFTSh zUWw_r!Ij7Tfsiqwd94RG%zE-j44j4`2_wk8xKQv4wfCgDlTO9d)bKKyCOZ<`wf~X_ z%~^gj{l?~>oeJOxSO zU)H02$P?f*mE2@+X$6c|`~zoMY;jQd)J=Buxe=-iFEvLXK$m@itW-dkmjP8-#>HUh zmSdhPRm~aFOP&GJ?%=`fn)oA3~N!Olb+Zy=qhtd#nB6k7tRRbeac<7I@^7+1iQ zlJ*ViuQWUgA;CA(QObzh9GKHY*y0&c)C8r%2Y9zHHLWbrG~9!(AuK5zlk@ zY-$Ax1vz*(-^?op_bKStsi{ zC!y`}elsaYhVQkp`-3r`)dj2AE)wEBDU#mybmBp$F1IQNd3b41B4vL7fFIEZ`+Tix zsJ(wJ(4v}8_2p%unp?W`LswuB#cAFinD)=c(mCoYUPlCach9HKeRMf$i@sw011&GA z9{2F)kd&3hW}~vPvE9o$0_c);pY4~^=;a+hep&yaZWFmq5u}3RmaL(ItiaKT#o_4r zalK|lBOl7oCGe%tFha?%xVX3)%PDeFe$boA$;mX^*%tfFliQlVT0jgm7^pi(?neb4 ztp7c4@k7A<>qTI?j@$cIv#!O*dg>!7I$9rlunTmH1BI0Y7}2CPt4SayzLs&|*A^`3 z1$yS5IXfj;N$%@-uSwp%E?Os+0qk$olC#vk*O8vxTjPGCL0Iy&j5o&Aj(iqdWsek- zzeWgI*XuO_Age142QOtD6g_A+N7b38{jR&_u6< zfVwrpNbi#t>uP%)MrmXFm#M8#pHDBr=~|Z+k!cSeEX9q9W=2%_XopBbObU87aT;+x zM+#=Eh>rAZoN*?6!#qn%MBn?r>uUj_>Wfwe^lVzMAa}od&fq4a=|H!;3}nnvJGWdg z#kYjQ&CZtY^UGFU%y<6xR6Fx2F+qTxZ%<52n#|F5`Eb+Di(0IoeA_~fD_OvQhOGJN z2CuamdaWt5FHsXIZW~&ZEhP3EHO*tM&eM3}a(`0=FE!mazhi?Fpy`3$;qY@rn6#_U zXS;cu+cUXEs;7l#qd>%-PfCQ;{+jaX0?H|J00#~~VOBbEUd@7VVZit>5+FH5cX@fF zKu8SbMNCYLEDC@}Kd1*B=v!C$5bLY8!t~!_JVD>TdWtvd`o#?XundZ01HJvppgMg_ zeA&BvKHK=7GTWEfONMpy=OIcf*@T&-jlbJJ^PGSw_#d*Y{nfv_R*{VLcoz`d&*R|D z2MagiXC#D`2YWWXPgoJ7D+#de6Yp^%;lqaX)?zz_TNsAdc1u(4MCzl2U>z^1`Pl>EmSYBU1sO+eI|g*Of!IpoTa+qd z^h~%?2gxl)8oTm83a^bTMbnq=AzDjXI2=w!RWk`RvximIN73RtWDJeL{B}IO6Nz}> z8MM$t4=LgED(&F33n1mXGJJngel~!&!AE@4Yg=8o{)Dh^r{vAZ8BDQpqd7U|Igk_PCa5mV&W&L%5uKRHvP}+)BR;FXuWUJrMyZ9+Pr?F2JKJN zhURTTzYpEW)ou8KB@;zlc&7H>jV%MfTMF9wd;(PAE)){&vJi?UO(}ty{R0$2#>3|A z8Hti%zhiB0`nS>`1;R75#rQr!`MKdpBobo`b4~O0DSHZ&yigs%nMZbEmB zt!dJ+CaKwmp|0%3PzM`|ypD`U->69S-L$v+aOnZ8g7aW6#R7pE;Aql z5TUhQMVv28=qdF#<qZWAf^DONODm`Ug|f0 zXlXV;FU<`;#3c)2L`xc@ErF1`&9KAJIta zy(mijgI(uKYtgA9ByQc#B1EVj$2CwOcs(d)kj9H8_QwRgT4m&gV?QQ^r&mG?_IVKF z&sp}qiWth73nK*e6!|`r>z?%7nSIT=iX60Q=+Neh>ik!%wzwj*8U{d6{;Z9lf(XWk zJv~NrkYMvZG9%!y#2Z95Q?FId)-O`;nkli+hAUH+J< zRWH$TY*hAcDWNzA``E1n-3a$3eX)|?8k^eCsI^hT#F5oNQL90R8`jIq3$z47dIQ=a za&KqPH=fq%XKcqa&W^R+;Yu{ zT6oTmU@&R(64JY?(c16*C5N8HF5XVf*f3zAyZZMGt1|d{h}_i`w{GN>Yc!*22J5dJ zXJxiKF2}w{e|_A<=39i+XFJ4?RH>+|%8hQfiw-;A=KC-lRaHM0e^gE6xHPlMe$T&3 zV?ZOj>8NLQo&Wr2;5~Tf^2=tXxl4zSDAD&8bZpIU781yf_6uZWNfk+vXd$Y`Mo4RL zn$Pf$G`D7c zwUuoyFdDk0S%3LEcp#86fhrtz>pw>o)FFCq$q^B3Xh?^BI{&SkBA))TKOU16%AIKC9e-eCKzJ3h)PH>*!W2Je^ zT~s4Nt^Ip;;mOEoEbl7*WDWo0umwG}`L)UL??Y6tKi%(G2vHs}DH3$1^TMe%t?p^v z4rbg_mKHg1%$e}s{5bX&%~cn#W6c8EZj|GmkMiMc5nnP`Zdv898`#=sek-@#~!PZ z8ju~t9yGZ~M&O`W8{{H06+6ROKQZh%mkb=UrR=jEGFgf;_|VH4>UU6|LT@_4!DzW| z@U^_6q))f6miS!8b_E~1gDv+2Sb|Hmxi-yK@dM@bEWkl8!y;}A>c50=(l1Zl$kpP3 z{cgYf_MFJd_w~cY0lMAqZI!inC}O)a6&8PN#3T@sdl|B%DeSxRDEg)6avxUm_V4Qx zgA^t;H8rg;8W5|muWw@=4fR430bQ!XY9l&n{ne)E7!vWY{a2!f!2PpAQ4oV)q^;?o zU^7esy&2)xK2iNu#P68bWsm3tOarg~_zT|(oi%ELxh$skC5IAW%5rG4+w27ygM=>% zQe#^qK^_)*;U@BwtP~W~37t^+nU~>+iX?~b8dPAdilOenO9SQ#&W?D@NkfNjN<{aF zb|M%jlKFF{B%QQ5lN>_Y8Q2Hl6}nABWXf954x=U{32Ig*h*Ptw-12jgrH9#iU4QZO z2j27GO$R01lV}mL@oJ;4e4?tc+ej=9n{ni$ zVM2(?j3xY~In8B3_vFWZ;fN+vY&x;jse9FI{Z>-3U!WOVC@S@n#q@)E7D4C60s;b| z;o*G~soYptSm%h;xkVM2clMZ2C?kemRi^X5BMbFdh4xS9sKN(;kfRE*&Tal14-b>K ziHLK$@4JmuWwazHNp&%%mYo2J`>h;4C!8JWEghUJOFqaf2NN-k&-~UX3XtX$7xjk7 zGWuVju!F$siAy)+#yx>HiV0SdSzNS+v(ZHtROvZ&dlPfq3_>MVKZx;k*=W|T@CPk_ z*3lUFMW*c&p?=V%l&#ib1&h6Kp3pabK}hn7^N4;uMzlx(L_jqBAY`a(FnnJ`k5e9c zZWl!HVcW!wh9*QJ0RvxCRCxG}^Jbf8t0=;-Yu3=OE|GU}5_LkT{y6>DtS=7H!OGZj z@Rdbv=5(VuFAaXy5uzhyW4|2ui|$qW3Bl|03WEwT_THt&ZPJEUbd5^#1!{RU5 zG5U9iTzleMv;oQw@1Nj54G1V@WSWK9C-V&HH9LBbEp^L_Q7|aD5&|nBVVk3+XKF8C zwC&0mbkCJuG81cZ2b$xpXg}%apJn}vKto9U1mm_2=c?!YM=Xl!EvJG3u@QBW|34$Z z6sRNwT|?>w@|>>>n&A}sv=+_HzggZt9@{%&20uWI4Ko#kis;-R-Wwaiz)ujdl`8m^ zX*H^FCA&q8b_RUwzQ^B6{vgu=gz_cfOOyFvSzsXxDjUg^w zhR9@6&3!P@k;-=p9ad~6>_!T1Mv_SZ3K$qZNWXtCRd9=`=X)iAJ?**L?ZST!YUR77 zYklkg_?oaZxnOzfd?UbjYwY9;Ug7@kiWm*bGeQB!U}-PSI}-QW-$U2t9|rZ zDkiz0CGx6I_@L)4Y^Y!=oui+lV+&)n%KkwtWJcOJzp+>dMs~%Ql_xJLMqO3xbcq~p zoUgPguNY;Nx(ti%;&K3C!&ZLl7IU!Qu$Ic%BGYa8qKw$fgPLq%1MQ;W$Sxr#&wM{przeXounp0X2XZNqPBIQSq}si67SmDpBinZmKSGSVun~L*L;N#$y?USXu-6Lk&oBDQya(oB z{Y3l%NJDx10rc+eLbhP98dcc-HpvZ;Y8rNhx}KBiYf8d`yc)8fm4tA@aJI}?^4fYt z@50Ch^FYK(mUxXmrEU^~v>c+xr@&&!dLYw&Q8FGfhqgQ2PNPay>Z@y(c2GJRQPahIOpUT!*4wqEaf8Y6E(v~5 z+_H2+J4*MEULlaU-y%5Z;^-nqwVrtW;~OryXb|-D;h3hparl%urR}IkV01g2sEFxt z)0{fr2wq)O8F>~vQ(T}i4v#XRgRLY=@1Vu0n+Jy2FDc?LMU{Z`7Zl4xlZL6~%TO^f zm2IQT-5LNviBvh`+J+YNQm(T93EO|7m?KS{gA`4d(eyWbOw5P7b`is)G1Li7E1_#w zUT^i|#V-Pj2Rv^lVT%i)N;N?+kiWMzSoKyUs7J6AGeZ`*p2x8QG#Y`lZeU~X1NlF zMvG@wH>m*|;Szm4b#e-l`TUU(SoE9oW!CL+fYj-1Ka^;r_Y?ahe!@on9quwVo-dWe z4xu%!&0{R&j+cjEjcymB#>j_HXPxYEhiUWS-JXb%tVL?8Tvk1CZ!<>VnzP~InCiK~ z&I<_OzJyNcuy_mtBYsm|Vzn}x3_H$Syi?x^5*}2gS`KbTkyv_|FG+g$enltBR|Xn| zo;bmB?Mii-Ez+I>`FIZRj_1=%d;#4RtEe8l_wSP{+hB4IC9VH=@)>9#e(}#oD!wdM z25r&NtoIL~W}J)pam^XF2zih)DYUJ}xLFCaSfoEGM${f1-p?-xe3&7b%ImZ&P^i#J zPTIWEXT}6M+T9sXnKz*@6&(Hc2bS_DGj!pGS^oU=K=y@X4<#vpNiZogGI1^MdGpr9 zP99G?>{C-Y2NpO~N4y}JX3f@oOV@P7G0;ZkX?p1`_V2@>Q$TbhE5!NgOO*XQ0$%X)UwB$!v_~xXJ7WLz2C~Ebi`bV z^EX{bFcq4jD6xB{{RJH|a%pHEm(HM4W2JBp!g8=qs5PygCcA*!;az(QDK`=j$IY|Z6pjkx_$FG+{!W;_Bi|vnURc;Zb&1j*=Nn8}Ow< z70{~JpbNM;St*|(wVcY5R#HOMZ7MVF_GWbvZrddGD@S{4*mPhm1Z)$7R@nbnDBr=b z!u>6k>q>@E0NLDx$MZH+B&)uphGR@oY>H^yQ`-_JfgqPv`@LPxw~yT89G&fihRsZa zhaszhAz=2b;~NHH!)3fhmh*(WhiMerkOHR!Of(Bq8c|sV>BQ_GJ$q*KSj^UF&1dg_ znh5JCC~luByl+o6i%~TyRA6;RIvdht4A5HJa9PI$F%-WUFT|a}d zr--`S5YbI;B3E(B2-B_pVy2M}&LE%|ELIq-CKhH3j4V7D>;Oj1?O(^T7Y>xLSxwAO z91C1z?A|Z$bTSA#5XauGv2Blgu_w?ozn}IjPpIpM4pf{^NUf?1ze&>K?3#7l*$cC5 z&rx?C-YQy^Zq7Xafxf5o*kHz!*@x+~z4G8WK2H!vlJKE>dwa9GzHi%w1`YMCR+II< zdO}j-r-4wcGGS7AX|jLAR}=Ea=d?a82>qXMo@q$F$07Z|zo|pg+nacst>}r8IBTOl#9g>@Dgf$5*0c%&b6J9yM?yC{IVL6ww2z5`x#9vnzEbW4XyQNvMV+Rf0DX;I5ZAE4LcvN+|LhYkp4d#uLdR316(Je0+ z5>_>PCg27Ws}UUW)}0lr{~05jIcC$sL}3>sMS#ob=;VDOV2z2tl|cEvgYR=_X`AAA zrHClyX~NpS{KVNCs!USwaN7vh0klqhiZCTeO*``F&@xPa;I4eot8{kFI3IN#zO zDHu;KW=CE=tFZ2K9;3Iwjq_KZi~W%svxw*mGa{|`l*O)5Jic=QRft719bX(eWj~m4PGuuW+A&?sr#Wzq~(u&YwOl<>BJ(^4zrE-7Q{?K z9Vh`nC$Q_o7+vCE-G*Q0I3aIQaHl%aVJNJu`}0k5OY>Ya6jfc`CTS~o!BtL;6W(?W zjQL=Wx>u%vTNnYo_p`KqizcmYQK`~fl44>p@U{A~_)_s~Jr;MFHpPJ8AfhgTg^H0k zqV?Xy%_VH8t$w#gV@x$lMLuYEUvvMrT#IbT43FoA7bm`AosXt3y#(K>Hf@=N#abMu zj~8Wl=ohshCESaAQE4gYy&DyUf~V;Y;OG}%@H9ra#6Z%Vgpsu9sz58I<>gdv^=^P$ zj(*FXT?c(iODOv=3)CHgb=UmYlQer`b#TJsB6aW~cj186yTrG=gweQaNi3KX-!QdC zD>oL@vWCNF4J^T+0%OM34$Y!}@==*eq_aGH;Pkep)JSrSjK3r=0^->yUOd`$b_(9j>=H`L`Q zmW7X>?LUaj<9h*CML)Xb9^%{yZt8zkSVCVLt3|w>l5kjH18jSmUi3zXra}Yd)rs31 zTYpS>aZ9cfj{5MG}E(NLbknOQ9{1^sLoRo@T80{`+!qRTaEXaI3RS;LJG_TkMGSou6@N*~$IKL@ zIF9;)qXaNo{>6p0nkLQnV6s#pd)c2F62fj9AI{hIj+XGaO;6&@BJ4slO{RtAhbFm9 zZo|_G?Lxt`qT`?QJQ#RQX+g zYdKkn9kn`!`!(=<%P_8xO;Gs^Xo)A^SX~D1_1S7)dVB>of;66AWpPD5B8T{++OOT zOCE#xnC9rY*bg=d1rN59h3~47eTJ$UM$sw7bUm}}O~ynLTP(Od0k5Mpkt~cdSI@v- zdSgDdD;QhPexJJ3u~|l#KAK>7#;GHn-Se{#Rr8hNc={~6Eael*IY`CF#QHaTo5so0`Nr8c& z-^`~sELt26G~3or2%ioP-bf7czwdYL6=BJVc7hsJb|(G@o*5_uHa0i^8l(J-6lP&j zN{zuF7ry%!-2DXwzl(J+l>ZA9AR#sw_)But&dzF$jE_oNHKO>#JpcUD*hrQa@Hm4_ z{dqzznh<~_kFwe7PGWOv3=QTu#`kO&^24dmxy^UDq~ol8GgVljIC=S{;@3k2`{^}8 zmh30d#8Inx#3m9VVge|y!l|L}re+Droq;D29XNOr{vEE?SkYT_ChsAvE!8WkZum2h zqxS5%d)paAdvWPt-$)m3nOrRrompvYc*g;-X-N#`N_3Wl{gYK-RJRr)CSQj8C3hh7 zG`i@@60_t*a3rHfC=Qe41G0TzowidJgJ5D_++Nj+>6x@9je<(?=1B+XovT$y2J0gx z({^l!=K!N!jKTS+Nh@8b*nl}ZLEx$jnO+z>%(&l=1W(Y_z67c_j)^kUa~I*Q%^Ho#hgeP<>vmBx7Ha0=mYDXof4r z)#-gxCm!dLYx}A&iDegOApDyh{zYPkUnX0(Y36P&QJQw-URhJL9S4Gs75QZnhxxnh z6V_GMfBF#UORU1MT|2R|EaCJK>b?$}FALTVlyxLiX)edQEBy=co@XXo$gBUw=Kqs> zA^<&xUbTR{4wj0K=S1jP6H<)@CjF*u>nF89OyKBe8>EG_G;7>~%pMX)ojeI|CkWSbEX+d|TojJO4s zk)j&{QQqX4?ID=)cqkui%vX>#)}wFLuDaI>%PpwX+vB@h=ZuEFo0)VWliETnj1OE2 zl9Mk{r}?1|qz!3f#*=ZjO6_=&Ao7w`CN@fL`OL_OP3o%9T1*ZHHxj4y)a$zJ`^R>X za`TckG=&SBT)i$dcnb|FsVk4GK+%_A$Qe;fA;Ox|UfH-4Z8wgq_L~U>fX2e4g0TM4 zIa=mm_%N}TM;@JS*2=pJBFE(L+wn}Qfx@eOHjbruG8vM8N6{%lEC~XU3cFq*=BD`% z?f;Prfnf}3+`HpEVl<$fPQear4GC+#GedMhfv~s`qFYG#aKT>9o5-`+F(Xstf7SMn zF=sa*aJ3Mo3^Xr|b4+MHEMcPE`Q6y5ZO8AGbRGOpBq3wKn9R&59-BW5K!>EVG%gF| zp(qm9ZJ{F19ITdL%)dQMZhUD--hcHlGypxCUNyBiaMU<~*_Tg3huf zZt`X(tmUAAFz)w&?=|Wj{7~cz2@^f8q^l*%MT1G2MK2xlau)}#CJg0%h@cv{D({s<9|`+rC^D4yS@s&U*^F5Hn*RFv%Yeg@4My5>T$ zBC8!`+b|St;QmF3Od_0Z`u|Do|NE*;#`D9M1I_n|cq1dmPckU`CiR@ZC0`l zY;P~%iJpEsk$#|p{MVVYiTsnV1mpAIhagWIzDB#)1OpGVH6kbEvw=>sJ_z-*qJAx0 z;(d_1HProJ8=s-Czu4Fku(D{($VHPJb+;_Rg4btaJtybn;3{2)Nowo=q)I#bTo$J; zCH9V(?ZlS-AREZ83v*z|`3L>xOMumMlNwB!LgX0Oc0NLy+^B_{k03y1?G1t2J6!rA zprIUk((uIrX%H8cP|~Sr*zeUWw>nqPJ4Fh)ts|UlWNdki(1X1A>|HpX7@$a-8ws|i zXF(;8sf(x~Vo>ySl7L%#M5@2mH$0_TkaKO2a=B%l)+1}WfJ3isOhBBMAo*s&fc@gWm#=~JN`W%+`E=uVlI4esCrCV)^ zHUyjlP4jmfhPGX8(@<@%9}U$TU8Yi%P>i90;1X;lB~y06wD798tLjON1#0DITJB}x z5>GE@%q^Ve94GkQ2y@z14A9AbY9B7khdXSdk%58vUxdWeTfQ+;k;tQY-E>*lYH^|T zy&FuK6AvEr;VLvh1;psF%OE+!Hnou`@3VFz)H*oN#I0l+(L_{k4=HMy1e|?E-W-Xh z%A^Y=+n~MB4j-w^#r_?n+Kab%NSe)V$nKmPh?D`490Emk<+rOMX~Fb@0H6Y{NCtxN z>cXv#fZPDyFt;rj6W2VdUH)uo0-IdlPU zC$h+y1e&`&Z~!*hg|fOjE~q)!Uw2ont>Cw!^5eDF|(8G3Hgn)l}bqx)imi)A6!mxdj3E}DtC^^&g;c-PUk`wpgX23v%-q;+J) z9^k%4fgQKuqA4F%b~V*R>9qXg?1VTUF`9{rq)Typ@H1y~5ua3GB+&Izbjwz(Vg|d* zAu>kD?hEv=deHcz1UbiN6nxulQv1&_ngUFsIEqk_Ly<9yztp;Tr1gL!@GyOaK^TPH zHRr7L!qyHhhJ>sda#ngKl<)`xU`-qq(3|9Zvhjf@CPX%p=UQZOKA}2(C3Lzn%myPk zX+HH}nVMG$WWQ~yOGk-fO!tn4z{J|hPT8Awr357uiwV37xxV4)OEY6_KTN)lDGbkh zCb@LAb6qb6|F+#XnDZ_6z$BKB{xIiD&@~SJZ{5!>1JEQ~`ruOqZ3Fm2eH-hvaOjjk zdK)(suXhOEck3aF9-A8*{_dAMqo=t$jI0W#7K7nk0NJ1u*t0mVwL@wg3ibkUT1HOJoag&dtMu4s%1SZ zfgCn}Dg?P6z&>}df=)+nzhZn|F0+9^pk(K5gz*#1^W$yzp5wCw^iOEH!EDj4eoT@D zX(pInPfO1`wvkRfKdAvc{=8rrs%1o|?6Suto^)Ldk2>8obS6OfYb}{YAH0MXT|nw~^X--1!CKHZo3swDH;_VxtSIKVxa zMxMDv!ShEWa^SEsZsE`Y_wLWZIWpH!`KH>7?D7{^#XJv1578oLWa$=~)2J!Z&UYH) zTxX{Pw}JDa{gTIYxwba)8|9pHIW*mbD1(;y&NUQ8u4vsf@-N~NZGx*(YBFx&OnLlN zMDB+!p`&iDX9)7WS!RfByZ9}Q7#b?g%RfJzR1bka0bW(rOe|b-X}+E^3S7$PXo=;Sfm%;i406~RS9IG ztVOGpG3(o5?2p$yE496QRE5n2ig$FZDjaMB+#-X?lCfq4Q+%@aJ1#W(Nf{IB-#fa^ z5T7ibWVS*>Mr4P%IdpLa`2rtGM2WL`c49YQ1O*6W9U?gE(@j=xhRtd_oi7%U&LvY> zo+<6~CI5ZsCS`I;O61}a$`g?Umi|5(_ERFaf{z?+B2hgm#C||obhDK+!g`YzgXr6{j_;p|^;sC*>nhjn8J_ z3mp%a-(N1;RQJ!_POHzcnsT(o6TcPj8Ie!z{Rs#mLVNz3Hk2@FkQRj<@kUHi8_|2h z_Cb@Ug={f@%Be*FD^j#dAVhBDdN_hzxjhg<$*9I!zFxkCL8KQEA6inu(65v29eJvf zu+f_RLng3d@-;T6#jPifKzmx0N!+?}l)a*hok3mRQ%o#daU`fm#~$5bRW! zw3dCTx?By5)=WVDI-N!Cyk)dSV?wbYwan5~7pU`{SlHxY6+Byl9UBw7dqZ&5C#f%i%lmG=J zxRfQeHYhyt-JpbBEs1BEKP~63f8R7}R12UO;M-0waBOKdBW!~UeJ2yG#wLdE9<32L zlEyNiBEK3{)#ynvKDMlTb2MvwtKCXlQEz719{a? z^j@WzcQr$e8y3Q zav3Zg<2(I?>cv7^rmpYY8=(@Fv{mtJq^YB|(NZF~!?q9}rXmt`xCzr@(!H7!*Uun3 zFsqP6TNXKWU^{?BQ4~IABJRZmA3W#5*rMx|{!4Lv?Qtp;L6-1g{NOd1fgd{}p5t~# z!mZ71-O`Y}bL3|lv*9GEXh7@2UUrq`oV!4}85{U2pZF@5OeCx-wpD&WTr5qgU4yJ)%;k>a4fqb9qMl}P2$YF9% zCB~iBmG8GJAz?W5dMn$BI!3*#ou0FnR8&+Z0+l<6u&_;JWMnI(AwO-6-~n3oH$-Iv`Z&0FOUdqu#DiX{h@irkr=!Y%w34t1l#$STp%S zAQ)q}+E*6L>9%#E7jGYjWpG|~v2l@6=&KjD*fSCz4}J5wGVidgh9gOUP(i*B4vd)N zY$2V)UMyl_uP}rVcYRB-SBM;PIL%uX;HfGV7E1tfcrymgC)L326nO-OOZ#!#iq5K% zssZ)$RsIxD%qb^#)L-Q51h(3==7U+4AvI&OXezhhM6>#LbJFRMr5S@G1u-zA`U9gn zYRjR-nfk8O2wZFv!z-G%4126JlGO;d9oQIkFK5y+Dw5Ug*aeqDEW!o68o=9?XI;au zY}kG+oU3k@Tmpoig40HhRy}Vu@v#c-(oCQmRChR{I{(A*Xk;*xain5)a*JM^6VLGJ z%hu$Jw}V6|2BYV-ld)P7Kb1lOh2u6Mji(1fSZni@eS1Rh0jta!a1o)DOIMjc@+aL! z81*X4X~eEC#Hz={?GfV0 zo*%9q-PVS_xowSLJ)MJnG@-^@?VFvqtM&Vr;;;@6420)5OLlyUqf{QIgb}MHO+SBe zVf_Tijtkqh&7u7r{h{?M+ErrRF;YUu5#>89bCk#Pq8+!th@D87c+x|zeIqJd#h#vJ zD&`5nNMI5F)mYJzG8Wly%H#$EZ)mK{^7oV5p-FK6eFdqjNL!uN} z`BZvrQW{24Y`lQ>hH>=|*QI!J{u4MCRH_|HbWX@ng_63}RDUB+QABJTw}cxm=@`}; z?s?A$BdZPob&3p$#lwrA`-_3H#${xlL(3>3H5^WyVg`4{@8(8~A=_Vm$r(D9H{#8O z|FT{-h+mhQK0w#WG_tqLhN8(LstnupmNdJIp+E5s`)wSlVzLo~K94CtMo_oupmjH^ zoY~?ceeJ18tx#!icn%YI0X392?e&MVE`YYUeXlR4uZyHSuhE$zo-UcWC)M89%vZ-t zb2~Rt&k$>RekNvSyrF1K{4XO-H)qy+t(DaxO@j{R(UG}bE|F6e*FT5=2<0R-$Go-~ zyM`mY0^cknJM;4T7{AvCXZzVYia300YctA-_;OU6wkga5SPK4XV;6}wSlEh$ZuvvU z()jWQ^v>s`;_~-TN3)ZF%H7GnIqk*L3k942S)ISZ?LZjL>%{@79$MYc<8Z#GNZl)}1Q224RH<}kQV zDWcN)G$&rqa5UWlb8U8S&YUXyQgMdum9WnPWTN}`s7$%M{qAabagE%P2*}pjmyywp zUNkMA%%-eRV=gpZhXQqJ*%fHRM2t1bv$*Iyk-W@0wrBC&F1NhQ6AH{ehgj`xkXd?Z z-s&e>wThM+Th3f$nICNoa&x?<2V`UeD}w35f+U!{j)292&jJ*mJDN}@W*mol0s z{ZBHtav20q$%wsJz?~DbO$oBY&;b2&-34_cmckmPSdHGix*E+9{&~&L^x7x9VI?i( zahFU2?n{x>`>}ZQz=Wy;iZZmD12|QmlK(tfKZ4)9s#r{ClL-YwWtU}vHoCegTq51iH%o`t=ephAB0H0r<%=k2XoJvdRh6ax7;+n( z%57N|$Sw8?e!auE+W$SgSoFc8x)mwfuInCtAW}RrvT$rD@_cJFK9M-2C9Y+$+ed6N zTdaD-FDHj`*=;jwtbSlv7I%YyT13RqwjD|_Z0+ondD^(S@q65!RGu_yj_IOY+GpB- z`!NhTJ@}B$yWZmmDzvSu_!J?^!sdw_Cj08NfE_0tO7S_$`hRp z+SyLMBhYO-2wl`sQ&r80y9JBAv@EhvQHe8_yG22l0u(a4=jU|?1_%A-#Eh%)DB*u%e6U;dS@x>uW7rGzw7zLxP=6;$uSa4I(=BNQWgyOG)7F9I1?t9It?9*F{GB zfT;ULL4x;`+8jmL*DOoy)2Hb5d3R%zNMau3r;gI~io*ivSJi8W-LLV$TfNo#yI1J) z%kj#4^pk|(x3S!(`ZDP^#R_)H5^o#EJBt$o;c;$a^7|dF=@$4;*9EqL*%ew{EU#A zhXI>(jN%qQj04KAtW-1UH!~K13PV1FZOgM*@<9|eei>CO>^nzurCthkk9j+PpSFx z<7v0?Zf>q9$8&!HqoVNwHFe6_bN1^!MI!=^zt7{+nx@3B8L1Twy4%->x$Zxx@_3i!>ab4&D8$L^Ktg>NGPG)akFT{MD=xb(rvX+k-(WP<=tYN z))=>?O|K_w7!_S2YrmI)WrlwKcRJs{pDshdama1_060ocmj(+{@%q9wHQW8P z&8|?`03AyxE>gSKI&6xsb1N)sXIq5(n} z8ipudYY)pPGs2BCP^#6*+&C?uG%d>E@$Wav`@IL|-er~-Pr@|6n9)94ecp&* zFYA6Q@5Pz*{bqrVUMJH&^NiGPwSy>vfL;66SlKtYVL{5;7viewnBq*FJkIGwXiJM$MYn9Nt8BLq+Bdp^$qwKCjd=(W%2uchx zWqAQeEbHct#Zc{T z^Gm-Jx=Eh-xKe4PVP7@7xev>8ih!js&XXy7?TzHXd^eeDfI&~vOtHIoVmYk zJU_OJy5A zV6<$t(}T)DTgXw1Uq{ywk2*k}FpiW((ULU@cP4`4K z$(s(WqVwO(@;vEHFJl^^Zt&9*D&of3ka@qr^1SqH_24 zo+4F!X+kx;e-`3td(7F9avoSY*4Q25-a-;=tU8-ABzmcwgHfG<8siCEpD_mMHc4B`%H z9C<7jX!#PbyT%oH-+OD*tdA3{X){PNiSx>PnjoC*lx-eKOw z5Bp;DD8sS;NAt0m-RG2gRB9P>u5kqT_i_r$5epHc1DtKsSo*``m{h z8j|j6eu^lF*Ul}@uocnJ{f=(>|6%K`gW`IV@8JlZK+xds8YIBr?gZE179iN*4hhcS z?(Q1gZLkR*NN{(8yUTmo{qF9k>izvi)f9H0+fU2s)2IDDgpL$tddSj;xxI}QYv@po z5R69R`olJLh&(zCi21Py0~j)xAF-CRsfr%(gd#(7qfp36uuQg&9+Rl^*8Fm)}U+!-Or>Y zIj-@lnq`&iax&^4E#r^GFKM_nDn6?>QpB2RH#w*U1HHu+emJel5E=f&a6^iP)5fTn zkW~z3%_hQjR$6Gpm9!J9CA7B!gGoe;GbrZ#?CeYx_E8$kgio;MEqX+ML@&btW@Tbz z!Uo{E5(+ky{Si7r-F7H$DG>`6gDJB7jir;1u4RGj>7U@t_( z_!|o05UYXIefV}@DdwXcIy@D3lW4lBKZd*pyQL+*eDUwJ{-0*YZEP6woRJvvJ}RhP zv38QIV?A1a3+fff@r0x+XpE(Zm zdXvt+UvVJZjolchJ6;stoA3xV2|aC9Q;C6=A8*Edr}S{o3D+)6@kc2@;Vx;s1|uD^ z)^P>#z{srq_=UVWF6;|ULz9gv5}LUlE!B;*243hgiGWA3`hnztOJ*yP-^DiKLy7=KcIdWpu6GX2v+myufYmu=blm?XgLcmTV zNe2owy_@T0+8-O#9Jo{(YtWGDauA~(#bqRoOsb5+;yC8g(A?CVQX?+`;n-y)`;T^H zoEQ#M)d91qW(kusF)_4Mfmx@sWSgpb+JORmk;Z192p2wVmbBaY7(=WKV3CoOd;Q-fez##0j)kYMIcFx)x^jYdQ_^d@ANVFZ^wh+iqXYL4sB+^2MzzZxP#XsW27BE(uzlQcCmv*$3I8dcxhs-gm=de~~`25rYd zd*`wq&uP=PBT$e5Cd8>+I&J!gEmkZduPg}8p^44miK7$}8qHHNoUubV)asjAtTp^l zb2X`Cm1(${4D*?D??EwLwfcuF>KHA8P-sGJ@34cds-AHuYLw{s{myYzh1sM$ zz4Ky9XOAD}nrBFSywzU>r|-!3$uq6YxrD^9Cewypc`TMAaa|2{_9htRQ!I5jpfySN zN~FE&Iv81rX2We1k4it;iDS(ny~uvOfA#V1h#O7&>Sr%cH9KNE2UEM_HO~=a+h{nM z?fEfl@aFvro4d7@2*tOJM2sn~;B;UV!mLAOvTJWZ>1eh)etlISOACMgw7n3WXtYVe z?P=AO6o=+*Y-D;;gGPOtobZXD_}a{bRR5_LH@v~ENOY6w@m=(lb!^R)Ul=~nIVu)h zYmZ%L53W5zM1UF*`J1&~>)9};nq-0;`A1vqvkDxYB$}6tHQ*=s=&>E9glNtDN@K-I=lo7taBDXtRNKwsvMTuf5op9X1fO?xw~0@8;JSYLM=Tw#dz;;^Mlb7z^DJRBQ`A(eHORV;-h@HTDk2yrF?J%Qv=wY1#zs zY?~@V-}m7{PK{*ifQQyXc(f`gr`gW;zK8Ue$WKPc8CfTS`Q*Fbe{N(yVCXBpUCQI( z(m{5!HF@+|+3Uw78f{T1F3sHyf_i>Hww3df^|rVT4tO1kh%jK`|BaQ)o~FY(E*~8> zRD9GSdpdnAP(!tmy6$600}8>fu9fVqS$;0&NuF~WqB6>HqO;LC=T56>mWy`FXu@-= z+?G=6K;T5d4V7?EWr{(RQc5iqON}wQx_Xt4(9&?!l=&(a8_M2J!VcTR+)<0D9CH8h zSw0PHoI0Z)7^)|)wNmZ_(FqcgXAbqx-|diR)oJWTe^4-o>e>& z+t7cIp_1OcECn?|AP!zr&4sD{GVh;@Y~eF9LxoR{r>0Vz`udl3k{F5^3kGCl^ zxx9+M4BNUCH+|L>1$j1&!F{o!8qacc$s) zk!kDP-RWTSG%Cv7H-PIsd(@Q};=Q?CgCdfa?@?kco?Ed7-|#w6eea#DMJO@X))>@H z43%+v*^U;I+G5H6?kM3EY5< zQ~}40+c}%aGgG{fd0rMdPyDjml74ppbhH8X0B+-Fil&dXO${4mlD56Wr=A?1PtK~% zCrcoEo2gprHM{}gK#nXN)x3;*Fq`R#pFburi(P8kB9sGz_ISxHZQQCCoPK|jC5QP; z;coFL#KvElp53rz9Tlz(`5cpsdh+;hW@=TAvtMV(NWAff!)kv&eATek^qg~BiKjP5 z{o68c(s}nf%t$&n&$g0X6X6im!Bj*Moweo64psZplh>c0C!-jX(pJ#PUpk0Ouw8*s zZbJx+0*Wz;%d5iIN-^6U&OezZe93jLfA zR~|7igya^j;%XYEvf4vQFA8ZgE=^AMrIHPoP1&bna4Fn24t7)S4fR{iAsL@2+>Eg@ z&%{@PyNnHm-OgGmLHuJ9ty#X6|)K`4iVD) z2D_^Z?uGizo~bwUy$#79)9@0A%+!piY`prkL2b8 zMv3KQ14b!IE#Zsf!4pWjwg2Z&nS`M1*q7bu*DENtG(zDk5YbBZ?IRY1J!cLh^OBi0 zh&s(>lS-V(L|lQ+enq7^q0SA=A)&$@%Y>6Nl>5O0edNhp37Hq|k(at)y zwt?8Zz!jb)vxIk+;MnC5nf!yYGSWeX$zueJBFL-Ymz|QCJP*zsT%+_Lj-O;SCl}%SDQ$>V6aZ)DyohN9MUV`6nAOGhx{d{l}}> ziiimX4StS(1qFBj;tPb~tCMN9PU|dPBEAGd+L0decK} zn4ka@8c_P0rV;-l0gdB!R4OuQoy)Jc%v~XE%vJSdldbsH*iw|HR3`t}?Yx3LIAxdw z(6*Hbd)uVvxbSXx`E2U#$jF1^CBOZp8_TwLSkp&X8;61B{i66}xCqth5#y57aDLLw zz3#{uCv-fZ)^QdY?WR?*xk`;5R2YgVDJ7Js(GqV4(nt<*RU8)U?0Tf5-|@GOv0qrK zMiAOK1h~a;%HB z$ji^hv)_IL-mY3gIzOukC7MJ7XvdT~Ka**DPn%Df*YtJ_n3o)Zp!Dx#ZJLzOSqKUxeBX~nQCBGwJaZZHzOeeo zFlj2{OPW%x!7e(3?hQg;;S|jQ&aoKhGJ@|}Xs>_-)+~2Y7@7{}`tf@oPGLXfi{nH{ zXMOTD*Pu;R8{*K;OiQIZ>B{Bo>})AtWMviS?cvV)q$#_tU42u^O4VkkQFtr)zdd4G z)jmQV`DR2hlVJ)q!l$H?1&(fcu$J5${ycsZFrp-#-3Nn}@eI_c%b#vH40uZ|>tiAbX3!S}bpTx{QSRHmFEaRbwG_hzQ??`#c6NkNRXsnvm+Pw(Q9J zNHr4RgDd0&ipYT4F|2k!>B>124ycj#AW^Snt-0s|=}Q^KcFAYW!T za2G0S8PFm>L~`$8Q%(>MB+09W3$CriN}S~If?RuRWsyehR^9j5tB#AExd&}>T0iRB zpmda;ym?BN!%FQ`yIp*90LhTSdB? z(}2o}r!hEV>K`i4JyTaKy75=~TzNkATe^6wy=}&&&0p1g&D*7V&rPN2tE@JyW|CLT zF_mB5SC00MLhv^(ZD77zn=45{l=;V3r=AcL^dR(q?E{BvBr!n?>6{R4IvGpbtnS$;0Te1h71R>$ zE2KOy2lS+GTU;l-FP~n#c=7Z!9?ClOpN(7ldWeSV5>t;zGGuTQ--M<8a=1lApeVV; z#dHve@sXMHVqxpuQDvph!lb&U>tsFdezw*QQV2Fq6`AxACpkP4GPS_{8S1EsgFCbP zRe^nE?0lgQhwGB(cABF?oR}<`3MCxU;^E>A9V4TZg9G758v^A#2fqvT9~3mrLP|G(oiqCt+AdKlwU~p zxAJrnZtT_icth`ODM3B>293;ffvJ~t8NVVpeW#X4&`_JBVZS@|{&iEH9q(re&Wn@| z(SnRBLBdvutn=Is(iIz(Ef)7CnUycHEuSE&8BXx=)UJFTB(gW6otG>(^l!=77Y+Ap zh^mA1YTjd+{uIOJ&p6Rz;3p)qQY2Oq`fue&=k#7 zN`d%AcKl3NZFM=AXn$BIeF_2gt34KHb{==X(W)~1-thzw1`vRu@Ka^WbZ!!l9dhf1 zCgtUhtyC-U0Cm#N8IR-E%A}^je8!&u+XPv#Q>)A1%FG4|jcEu)le^eD$TaP+^ZyI== z^fdt;pt3)kduu?GqU)W|eKx597z*EX(oZNL1N1ANj8E43Zf^$}fAj(*nM3!jgZ!o@ ztudUl@Q1LdDDI@pb>JT>xwGAhckkkvtu~Q*IBHst6a)0cqvPWP2yL5Q;^5%$&MrS* zayuQ&O`48N-3j~)(l`h7g$5nB`^J199rtHzrIsIiNFToe8LUU_J#e;Ng9(fm&aB>- zrcUYimyMkgU%nJ}@PSu^O-)TfDsUY{?50Scfrcz7CkuO@cDt{}Y^dt%B*U?{i!7jh zaa?BbAiHiOR5oOP`#b9~03d?yZ7l$Erl9q3j{QP)p6&E!g}Dw_V3=mLymqwobRm;e z_vv$;h4NTsyp#qNwOj> z0P&Q=iFP-m9xWIu`r1oUG||9S%b3H?>CuKI;2ilAL{v(bKkG(U?czj^X5{J z1MwzXbdpW%!p7#&XR^^rU6XoAJ0mc>2~T&bAcF$r$0Qw#{O_v?+)!cvf616LUJBmr zbJQaj&{uZcepQO0p#ZKgr&yOpFPs=y=|-N~i436ccviU{>1Tw?Ix|eulX{=WJOW#% z#fq_!C5!KeM@A-PO4M@JCO={Ly-w9aGP7RxJQ9#kVV8I}L|>Hd2k?%QL{P_{@hj`) z4!t5`0gsP>Qt0LYE}EZ7PY~)g(9qc>XPT?C%tNM*^|ZIQ&uke%mT^vVLgt2wz=J}C zWyxL*)s4@O)1nTBA;Qc9E#j^#KPO^dAdr9?^Tm5l_~M@>jDo4{Of;0oF5X*-3^7wduF zaEL#ab>ada2kb%I?Gai=Gp{MO|K-k~ZH7{8CC#n_3Mez5zcW32zY)13=zaUKMqkzy z?)x#NA)~P$IjBvW6r`iAp@F~v8J6VmykcV(X_}9ps4|J!W@c!akK{X z`BCz}C(4Peu*vvcqDl)6b|zb$QKA3plBa43a@tW+YyhJZHN-|wAzK%58~1$l<8!yP zHwxH5N;qklIZ`eLkWX={Fgn_F@GI#T@R$soC2e2l8!BidR8&-s0nF0`P{-nBWR{(& zoWqW+aO5b_^DUUb|HbgTE@n`>60y3vnpaydO0@|o(y$!P=l(>heJE8J814A>-f-cM zQNADs5tV_ohO(b>x&!gH=ja%+>?dTj#-+HYha=x>fQ#g$%gQ(5LI3J&H+1Th4j(W$vw!LU8l~ycny{H+$t$EPp4y%Wj9ALGznB*cW8d>t_>r{*BaE9;e z*6&07`(_^Jus_NvO5S0ZXNQs|sRqPtdY3UteX*MlOv8bWL!+$9jV0#Va&E=1w96Y? z4s*2($dr2mhF|xUX37VOS=HH+M7+cP>u3w(!u!Jl{1?J61|3C&(b3`HUWx$Ze?woR zZQF(+mR5Pj*90Qd`EUG$E6DzB7{$T`Dt#IaJcPWSm)32Mw|lo-uCb@VZOTq_CBo2?S5F~<-Z`dKV{-6ON)Cqb zwgj-N==u2%s?OI(apS6|Z|gKzkE_GwMc2>&lpkH3YcubZ%E2UZ+U~W_29~k-g~AUZBJg> z-giBgF`4pc!<^#>hK#pBenMvz?2jYiW_(dA z9cdY?!>8xJ6JkL;zZPgV&q_y4vP~b@!Hui5?~+LILSMgwc=}T9gd6iS5jnx1+O=|i z(}+7F`IoUf#Sz|MSc^Yg!L=Gk-zC|{k&2v|)q9mCLk(^Gjvpo3q()WN`d3x)D#wC*3c{7QK3YYHTFI?Dj)0D7L*Q^Y)EFN*S5Kf6EwtvD^K zX4g3>UkD_Pm~x3s<4M4D!iyh^@5QpKh@VMs^L1I0^eR*9B~UNGfVhRr)k8T+4YI%iKU_?Eo~q7%ZM|H5&G7Q?VKnAX zXRP(#zo-*JReSjN^L`uu@`)0q1{H@;uoOmoUrUis2Px}_R@sE3c3-MMAP`|qVR(4> z+f+6xX{~F_j#etX9g!gSP`}Q{s_0DtNaJh7hT#+I(v`rRAV5#8jTqAixwzg?7=2Y{#$~@7AYU zHiMMTSH1o`B3J{-}9oq&(e zbIF>wElrMQ3B{wEmvK-cKRtnwdit53@jSSBz6sD@{%(sT(jRJ6Y$68_`J|E#R1K-% z5J)LOXAB%}f_+fa+Kfu`;oDI?z(IMF@YrmB8&zp1!{z zN6_0vZ3BK0zjxdbg^<6N2CDBB;1E~~nH&6V~M z>vv{L-rHYZ@kQ~tdI}VyL7`kt0*VX!Q965xP~POc`7;)D;In7`zafzr$~4YrxQpFM zIfFEzbRBu~fUF?^-C$3u_rdm~g!^d~x03DW)3fBypdeXT1CbUBs)1CY(xcJClngd} zBIr{%z^dC;_M-Qzp)fB+?@oN=kvz49$oM>CtE#)V`hu3KXf>BfE0^oP(M{IuiidFk zDGvf6!h`}G>Gn%q@rKh+8dXe`gm2Kkax3AQ)`>hdoNQex!@Nruwi9jo0`YC^jpsF_ z1YFPhTiOv>_4`;uHATkWByhf6_s_4hrhyAiBa_qqZ-(?*#BjO8I~myYS*Vavs^is% zqxtPg9Egowe6JcPXtezNsqJyuLwkWSm%VFvLT=mMHup1r2x(zXLC^;)1|9c-I{dHG zF2L{Lv=l8s)zhx#H$mocHgu7>JSR*M-KWLF1!)W1v zMMR6QKD~~u0O&jvIHbdotvCNV4Kc-W-6q^?0M;k{<>|Wl8-FVa24}XHuN4ie9@S%X zIHsG_m2dLeGQ~>gs?gi59bP+@nGT-1I;{)jh$ig?A$yJ9-d-L3SS+xAprG%`z6$3s zWfEv&iOwfrHSAIzc?0t>)D&fpt>(6_uE^TIyKJ7{6LAJ|xb4ui2YaiHajf$OXaD= zrFIQB`UlseZ;$z_T=*Plyh9ssGa(uzIdbhC~+fdHsOT02-!Z;3?$hQ35`LyIm~&!!A5XivVh$+)^pz8mR&jY zYD&k>QlhbWdrumkegL%trk2VnZixicMJ85EcG6sD3t3?6zx^R7&f6@`TV=>lktAk- zk1kO9d0sEph6k)Bx`*H&r5Y+FaUXz;b&l2YCYNI1*|Ne`vSUMAEo2bjF%P>6CI&TB zQux&@+JVtlvK;jAbkD=)Id~OTOYedKpq`{c)Wd|wjJyNp!H_>EfeKju6$ldooixo` zEgde?@?R{H#bBS)Ttuv}+H71^UM+blVsUL{dQ+`JU4!$}syC3fysL-6nXMO@;K~jF zk|Eh+F&DVxlp(-$pb}}50ZqMb&Dn#NwF#J zMchw_@^@@$=hsX35D)t0r9e830&G)gY=$Q*E83dF=Ja$6ah_h!Q?KlwQz@oLib1Y_ zK03XWb;TvSXgR}}op5DBRZZfFZNe)mr*5Hb=nYYj`uYBBfk<DcGzL+X?cf z!z-?n7bYu#$rgP}W2T2$v;cmn@eR@pOWcj8w}gNBpAeXBPiC0rt12Sv+oz!UDk86F z;*Ci5m;abRZb^tKy8U5Qp#u~NCjrQsYL@87^wyw1OV!Ihjzje}?uoy=8CpO5(()$9 zxSU*0j106uq7(aysv3_rluzA;T)D5=VO#bj+sLz~akN)|@qj5vJJH4h~4c~u1kfy&1d2Lu&K zUdYR}0BPm#;^wGpz{}Ym&07SE+y*xz-V72YUNj^qe77QM=K$$qlX<0Yhc#{eI9DU^ar_xK-Ah8UzZe+tO)q zDxrbDIgC5rx^`eL>O)!fj(ttT^_H2{E26WJXLTAdr+GIxYC;wSlGfn<)4a4WF0+!)*JCyrDt2N!&KC=*5CFf zam8@nT29Vqdd)*BOc#PqL=e<}5*#LBqYeHQ@kb4Z(xhjJVpEbqy;Yk%5iuc{q5A7G zLR;M9ZzgbLtrbxVi^(kfX z&N5$zwl4Fk(tT4BqVOR~p<$8TuW6l2*R~j$aAb4-6V|?w{W3()R^0phyU1$#of-)k z7vzl-_KXOCk!FMfY^FJ#%4&ekAqQ(ggCw8jlr6Qfa^~M5Om7M?MB~e+KM|DnNq%B2 z4&$$9Yfvjx)s`+J9mQ{}Y{HtIY2eEgk6%Rb$D|>OhEz^upl7N=hjg+?c(Zzp$zL4NnN+Re_a|gMnn1Cg z$dE&&>!^IR3S((44f6^M>Gcg%F#{C4NcsVewQhUE1y&zL%YD&_)XG6RMZueV{k_HA zS^)TYlvg?WiFT3Fd+{xwR%o4I-dx4z2jV3DcIq%zYHz$vI)z}wx=S??=Py>GnwBvvlL1?g{=2|{99{YlTMQC`$M9_G^9qCQr^8-E0JtA@)bGcrQ}<;@=J;S zOD$sezpgeED`6|eBU}MxEU~sc5OH}KnQ?J#vRB_U)Wcgva-3m|KiVARqj^`mJ9FY0 z&+>D^|HQaVs3h5cU#O|!RdKi&4Ol5A(N|ZQrhzJ}D2HllT5Dc#D+hcx_D^`HGDvPV zYY(2bZEM=+B$BrDGq`9&w2xuF;5ZB3_^HFtkKi91CvEW+1BdCVVp1EVq!HPg(B0{- zs$N;MOEqG8)!x?@G{t95^#Xik z`h72u8;O`S#m8HiKB|&NPctw*Cu9GIR|umY2fbL#_qsoUW$!Tz79{DH`VS%8iX2uj zHPa#yF(QRvww*-Qh1;wX>-^Pz0FDZa$Vut~puSpFS-!mJ#Zt6b>&CrR6ndzm9hHb8 z5jZ~Xd3Ptva7Bpmid~Ul%5dWLWVREi&F%Wttm_W<#gS2PQvVF|PIF8;7E_PL-|UV4Ln6y%^+ds7uX@${<4i>}L- zjjx?_B%byu09yioe3c6)L=e~SAl2Vc9iD%lCrJ4VprL&3tMQdQY&%mRmg?1=he?FJ zUj;zdlLg~@ynab6%X_4s{FWdSc?W;CqMep;{BLb69zY)(sW>=o#n3vUZ9?ED%cz8W z9F_cMgMiEGk@!s!M8$oi{(eLmwMZIFburVntGwN8Ep5w25ORQjay4!FY_iHv+ zQlCO5G0apc;>|*cW#+vP7H{zkVI5QG;{ZcnJVqqtU7N#|&o7&4GMp%J@&6qQ+;>Ct zYXHl0I@Y*t6S!J%tdG*ZeZa+lgZoYi*vdW9`-lU1;8-Gx&x-^po{weiVJ9!jPb8B* zoV@!;6)0;a2&M6czCo*S0g2TQ9oMh!J0y(C)1*k;^T+$@er3c7>0?B@ui+$mxW@6G z_yVn)oj)a5K$g8eO;N^0@Buh7y5O)CIw7@4_|}e|`-KlN1~pRJglgHo-RK*Ke{+&GXoX3BVbqFE~}6FSLB?984yot`EUJ?BBiu(sne|96phOg)ilbI?XJc&48;eu#NEpXh%#?HgBTCcis7K&H-F z&E`bZ(|q<#T}E)?$V+bKe*e2-)mTIeCl9~G-u=}gf@Wu?ra}-&+=LM|-qEz@TN3@t zjMk;w-LM$nit07!BB3GPb|Ap!9{>!_Q})gAMymNQwdT}aoaxtPZ>Do=a;Z~sviwC3 z;@H|u2tR>;C8%9;4;_m_r9$K*VLv>VBN9h+7(CW3at-B-Fa*_D@KeUf8K)8DB>B*FY`uKV5v8Xc{=31m|3*%Tr={2-uM>?U8}}4v|$U-C<|Vy7thTD zbjl<09DxLl3=TLIl2>jQ{AJ-b0;FV?`gy|Hz_}3RcXx|E3kTJH84*=hj+wR(3k&Os zCKm$(Rc5w!?HB#uKS%l_z75Cq3v{gRWB`RrHUSIJWO-zEvt5(}(QXpGwR3&Hwc#D{ zm#;&3s0ITsa|uU`m&(#FFSi(u@yl@U%+%ShW+qhEw{55+IwddG^+Ws?Ax(IVTsp0g zmcyczxjIufD>E7d>c;Mrp!1oSumu@|+zTFB@=62TA7=2c@sR^iJ>RWmOr|uIdZKvH zUuj}!v|jUH86xXzcMuolQyFS`>@ke_mxxXLGc+kBA$gsTORWeEhG0;#I&4VSeVr_2 zNuzRp^U4__EF$t<%4ia3ARK2&uL}$JH`s^go#*;cMkgf&Lm()NfFxS4WW@q)bG7}t zBVO!;;IKX%FK*W+FMT2R<$qTO9j-f~3lOg(V|xv7=Bi@0hg0YXi$WmJC_lF*rOGzQT1 zI7{hHqUG%r|9!M-$lWbzu-m4`ZiITuOd~bncKPS1{CFzyhLCZ9@A3{Oq|IE4*mU8g zVm(W3LSTQ{py)c2gkgGR`Ny4CLrBPxBk*;{Kf4151tx`&uWhnp^8S;UkRg2(h65n@ zqD2k+q%(l`>AW%w02|z0nbL9pD`oy0vn@bKz{GjCR-pgNjE+XgHdk9Dro`9#`Z2qN zNijH!Nj3pb62YO=(D~K{zYgTZY>Ft8q4OKJ_5o`m>GCeUaDsycg6%0x1J?26DEgr5SQ z%tY8B-9W4=rCtAC+ESm4%mW**O@J-&Kq!2(6`Ro= zA4tHRtWu#9j*c1D9VOHcJ4nj4+YJSo`!J=tTKG3?zf%#H_?_n=5f*xJgLr(H%H$+t z@iGX_pa?0bHdW2|7#Tk#Tz!g=MNUKp=nXaNO>RSKh3;Z@3^8}Tm1xqfFm`fM zd^3r&RxVoBw?tzi+O9MIJ}W zDY-7vfY4Uaw~lV#$2BPSkF_iYRWfrp@kxV+hX(tlQjW+=B;b7}*I9ZuG!0L~U8}34 z-X9*=BN~6P?$0@OaBA8m5*L?pxA7f?s)QRfsy0JZGK(*Aots`SoWcjXhT6 zlTGf~<*$A)zKP^w&Sh({x?_L~K1~jUPw_nOB=jg+YtlHxVwwcR3viy>RsUyEUCwaQ z4&kIi`iO6dfvy5e%ML}I&HU7bN51fXzU9BRhYlC14m=W)(FX89JCLvUmoi$~RVY_i zZMdPzCoRr6kd@?i=6AItaW`B=$v$PBNZX<4PB#<7M34K;)k^ZbBUV)=+R5O z;auT;6;c11VXLEHYS`fYgLLa9zj$9g(o6VdZ8>$rGj5FoVKwmpmoOYNZE*BTa*nYd zld^5Ct&kw`fl1m8i*)8cj+*S3Fvic3k-j?1-%ezwKYvQwjIbH^8Zj;+%u!o5{6DPi z0^wiQhSG?PhJfem(?iJ6CW(Kz=R%uHiKb&5ot)gl^KQ9}^ry1@nX+19)X-L;;n%AO z>D@VGSJxJdJKPEh=v7&a%X(ml>^bRJE)YTZmQug}Y|_ftWmD7k`el@Gr&6dD{%*=F zr>gE%3@c z9AJe+Z|ndqnFDU|uh{>1asT&LvN6cX0H5yvmkV0`y&WT3qFF@XK;o|hnttt;2%j|E6ui2>!o)|>HiZ>!~Ym0nR9 z{g>>^X`$C~o)mxU*4q)Nuk?hE6=x1D8T$JR{ru--^UVGLhyWF6Nv{@b{t9O`>45{? zO5jZgf(-rtuD+jekI1)WVp{p+Z$OzT&V3sz#{xA=(hEw)JDO@lEi#`_`F|ML?!R73 z*P@LdWuqzXWBqPt(`nO?{nv?5i`>UN3%QVs*`F$=UEZ{X>;9xmuc_E9B7=W8D&0L8RcP$B%xQs>6vC%Ix3ME0zt18*#g|21>FY|B zF{`OM2?rX3`D$2uC03zjZO292m2QGFF7kbZd)QWBG5Nr@NV_~Y)lHk z&cisRmHoc)ApxB|3`Wh$KY4hpdtlNlx3My_S}d24tXf7J^^k(b^QG*zFsWg zHdnBv2HQQ$+y698p?jj{0&mi&X|$(_U>>F*<&@@D*J;g4k=q8F1gYtN*K_QEK_j%K z+x6k8>~Jz)tOq2(!KS?X+`Ri-792&T>g>jK3B#&>@F2<^&q0Wr$nuyKtmjca^_WU@ zyM+AH=_nas%rx4O7W01r!W)jJZqC!>^rdWT%02zxJAgotFPsEDR}!rH<(1OkU7-8Pk0RhQ8l&4J(lymLLUfD{(ps z|6bU839oRyW?~^B^4B&xL=F4LbB4|T^afpQ-(I+eMUe`TJd+eOhyZkv6X7i$`0crF z%M3bt@e!PqnHe>pru62s#0fV6vOgG;$ab0*PD;$}O@4%5SM~RAbg5Ah_LX++@$Q=K z7&!@(&(c{HR(wu7-=4u+5BQJWe?-Ar{(~QSu!|TOmex78y&I9x%)}$g*L3f#HdR4o z586#?+GgThJZr{ZHSO`24B`UcCRC24_XYd=XNhpDCC=7=H5h#co?;{78OJ1RH{iJL z=t|UVD;qj_h}nqea}KKFe)R?yz?AW=!2TA8>K6j7PK=T+mcc$1I$!<#t9cR+=7LMo z9Gt(MG+7)?(Qp>4)cn(8egW%3fL{GGFtQ`4>$V!$biEc9lbk%0eMc2xca8MN)i19J zC^?oq$`E&ykRx4}y>ShEAL?0-797XmD>SU_TLsBM)=rzcKw-X%`8}A_2k)6f`staI z83b@hJ6O^vok=N?ezuh@2|oddj2l44bfO^#p_u@Od9kIG@W&Q|sLn#G*^XFW|B1I! z(m_4TIL5ur(L8=EmiqZ;`6vHrQ1|yQV?XWG@xT9(XoaiJ%&t;e zH>s;e#MD2vEhMsML`zv|kW@+)Dzh;W0|)DvaA-ST=SrUteCe->RnJ)JfV1Oey9K}% z%zvtRK1&jlJL;Bxh5)F+e^ZcbC`EKaLLffJmmT2^fL)1L0SMNSE*aiYq5>yEWCbUc zje+Gyi7tBj{LugRbKQa8(21ld8R1~#$#C8<0lz#<$FujwYmoy@wR;siSkvvYga9NhNT)8?FFT`!2C-FkDUevJHB@i%!q2h%?L^qh$>SF>QJJRw!b9V$KyjK zt|YCLiKQ_@JMGGY$US( zGg4$88}e`-cgW($5CgEKJ>=LyfEHoP=yEYpMx%B~)hZ z_D&pfry0%hn&o86gj>S2oxX<(#3PK|qSHgX}Ie4*7kN8Z5VKS1N+ zeR0~$EJttPTMgu;3{b&k4@_B2lH%e-7xS~4iyeop>XAQAtvotgj%e10Sfg~-KWjSX z7rL<-fchtFbZ19TPaUSG;1ahJtA02g9Xg#HYTPUh+FNA?zW%*iSHWx9zG}GaSo^_t z)W9cGc_Ee5_VV{g&9GL$o;TQk1xd>HQ|I0q9S}&0TP48pqzo{QJp(b9aRvUFyO+HZ z_52eI7pFglm%(*Js z6or3(jYz@E``I9hhzIlU-@lpW&~$*VjjL7vH9^4xa1d+Z9HYOsS4+o$&0#Nc0ZH%P z!cS@Eoj}Vf7f?UlS!(ACLL-L6De%%KSAJtT0#r%~_ZON~v&_GI@q z`SAX1du-Yf0*V8zwG0ZfUSix~NQ=_LDsJrNu4m1&{$52-oAif^=;{(ZGtkGH0BB-; zm}BfuB^pn#wKhPDMpLock^&S`HPYi&$Y|OZds5?tLnuYT$b}; zj7s=}`9`Vj(G2KS%t3YF6a;A9O*VqEDq9bE@2bcCMSXuJD)FqV6T7D4et0kFsOjD1 zYz@$JS6Uj($4J_J0OGm&5A*Pz9+zqZp~vgIQR&7QuS2jAkrt<;!eXwt^#{8&pjk2SOKB2XB|NI?Tlc< z?I;$e5FySK^Cbu~1H)-J-?U_#IacBQqnz0JOja3{n6YJ?B9{IXcj~hY_pTz<*4B!iET!RIWghv5fxy zp0#P&7wt4+;|DLzXL2P6gcJm%SoN~z^CS&?cZS&hL<3-ar8Bd<&-=(&1@3h{SfhW3 z|E2rN;V?>)^avBZcHTZD?Z!jGgsczF`?yEwp6vy+S-%_B()AH760sTiORhXGy-q=p z1Z)-<1vaa^e8gHK)&a7Z-}(FdcpB;@_oC|~5Kr$n!={__S5KF=*q7xmL^iKO@R^7e z(G8*vp|c$e`}h7bML_USYc`%6*7^=Ey?((Ed74iyLwK@`zgeZBJ>`-bKG&b+_^FKWe={E8D<+0IlpJuhOXBw+n|}c_&KsJubbG zNol^mJS*rM&kQ`UVC~891hvg!>w}kkuyTdW$}M^3GA_5>T^Ebj3d*@W^bj<{JKFIn zXU9pq2QVtUyqudm>7D>6T2^|G_*8^y^Jhn-e9pyVnx3US3sbK;j_LvH+^(NK4`B|W zb^F7s;XTnAjnXj3M>{sDP>)U1AHe3&v-JC83~aZQ2Qu$lHlW%v)YOEg06L)1+z-1{ zLC=q|o9BoqW?o<1bT{Rj{S%*Sr7RJ|+mh_FDnJpew z1z6yt+z4ZDov;zF8R#B%$g(tNZ>)w1A`nO?;cSVH;3bOVmv1mowX1`jAbTD|~fKYVjJ1=wZAyKG zZho)FC|=LI?a}uR2PN;Da;ja%Ad}mPX0Aq~zCB>w9MV0QuieCyriG0B{y?U+rTdGwk)5$Rlwd^QH+|OP< z{-O(eu$TVFA}L|r_N8N|xa75!UEA2x-UoX^l~U3#@2zmcv(?G^Z!~Shht|tR4#qe} zE8QhuykF@(VNH!%PG~g7Xi>JVvEGmQQo}Z?72}ObRyxow-#k82p)P0jl1i>Q#^@iI z86Se##_&Rb+DDZjITN~Ad$6vZL!L$mpr4y#b~&*0-r3F*2v*hks5H8-hTT}Q?8Ha= zg~hkybYcJ6(zjzm#G|ZX2U0?w>l-QxzYRn`P>q=er)T6$=G5$P~X)# zq=*`QLV){>&sVLG!2rn{A;ED$BQo-RhlyvZuyT6^y#F@Z!mK9(ZNPghAZ`AuO@G6Y z-qhb=A{4F<3sOd}$DO9J8_4qZK})dDs~<+_-f#G+`O)B_K<|BWXo~m!6|heuDr};2 zBsvA{l$vr1do#Ch$DcahU7xm>x3x9{#^7PpMD%)mW_`-4DA{JtH6go?^&^hRhUuxf zF9k=l+K?A^>`H&B%_o=^C$FRRp^BlN9V`&Rq-EcU*vGks*dWo;z6obP;@jtgkOu02 z@PW$4!JWX5s#UQ4a-qIE6b4w?PCg@7)RV$4pv!DIEKTxa3Wj^;b)5X**qK=d~}22b0G?11J>qg*`bY5 z+`D{xx)oU4wFbUh{&Bbdr~DXG(mCyB%Fb|tOEqZR48Bx3;v9pEEkpyysGKXK5?G`w zHsW<3>7ai{3Y9L9NATeFDU~jbqxv!O($5-fa;h^ZsFtaDwkrKmjW-Xe&ZA@`Ar*tx z>IcQSM!op8cum$g=uV(=ka$txA!aG7YWFx6ru^vcP5C^Hz(Fas%e9pEjT&R(!sIE` z+9;egszmEJXPN7>!0CvXj$5Y%3!Kj*3^=&wgXgctrLwCiE%P24Gu>%j-Fk|VYZf1m@}dl zn8q{0fN}dnI+so0j4nd(I={!r_sCK13DS^Q^kWG+v3T`r^b^|Zy30+WhXOK0O0g#B zra-iMr_UtKS6SwIgeg6TFhQ65w{u&Cec4b4x;89yy;M}onuz^VT52OHcEXhe*Qzv* z;X9H$`lQ6+nNlFiBBTyz3c9=VXM@{9FV0<31m#FQyLBA2&ivVe?q-+i4P5%%q{VF$ zRpLfUyaK<+Pa5o(H4I#Fhk7<{5$$0NDGm}%^;Qm(ME$qOJ-3_)0o#5ttXvmAN0*wa zP)=dWT^uRebWdu#cKgfJPoh(10L=Wgd8ZG1WBIjUAQXJG}vk`qfW&3t47@0 z`E%*%CT|X~dK7$Vb3Gofb+%$qtS4{M!cD~C?j(L{w{NRorgp~PZWAJVf&0OiN&}11 zYUtolTDQhUs8T4kY(XXGO_}rjgzwp72D-MRwbDC?)JHCBwS7g48Dg8#p>`4RI@T@M z5UB_Mu)Ih1|C%sy;2`mrk0RTUwvGImpAz({y#AvnS&^f`SU?)(^k-QqHCy}W=M!v& zNPkjSvTP-XfTW)@BGXZmWxHz0a9y{S!uB*YGCyOE1oAt759OuCV9&y0HwhNg%dL%G z5GOufy{cKxU|J2yycT( zRW={2qqL6g__&bvz}41v5i`|dlre%Dh(NlKQQ=)ZeUvkAqAQJ{ByHx;Cb-q#6R3(s z)N3Cw`IynK{ku=x^IWe*ZjmNJV@m#1-U7YHr5ebAW$6TcLi(g#6 z6>$D}kFJVzDw2iz#7jN`JrGq`4&ZNXLlK(=Z%9OEojPJ;yI0UjrxStzBOx=aa?k7+ zm4N>nO_ri-!Jsolq~E2KoTXcnq=a^Cu5MS()LaqmKbuuY)jZ*F{tjIUW7;CYL9Dhm z{A3Uhk~Ra6`72iBzg_@iTA1Z1;b0Whz8Scf;%=DEfFD7|iw1!9#+VbL zjWH!p6a5JPk2}Zqg(gR3$)mjT%>(&sL=5U0qZz`})W*qzWv;d^P?LOW(%D~h=gnHJ zjzq;NV{cAp4ivW}zTABaq50>Uo1I62F)qDD~=>LE-eD?&=WI;@5id%v>-i3 zqHBA*>GVi5&Xl1%^)N?2op&?ohnI37SqaVHEvLCJ@3rbnj@AX{=pt(EJCq=Z@u+Dd z%}IRE)nJL+Ij|i6X!I_{>w0^ZGq#F(^Sk_UG;a1B6OkrSgs?+UTtxRre+?ErSx*!{ zdk%c$&+pm*Go_IF`>vCNR~;wlz^E|k+rmLnoDW!z^xKLFJ5m4VmbxMSQw|zP zhqDR{2mzmYge>XLjZ8>S5>LGHVe|7Li_j;!sap(z)sKdbvt7YIcy4{ia@2+goG0?K?d$UOTT!?nvV3&1pN`5SKB@gLs{$Fm=Q;em&+T?z`i}7W zUZW7=R>haF(^^2IMv!EM`=lKrloL0Hy4d~Nb|m#nv+?3w;7pswUH$IwxcCdRRN;pM zwKO(iwnuT#FZdScQFA zQ5}8P5nMa?#HXf;U1l%ZhcO{<<-wL|EVR@gpUE-vedik@DX*E%Txp!Ws5P16_Ri=G zcGn-D;M6Nww918Ver?J(tY$T4CLx!6l(WQYv8f>Sni#3@>-4l*u zlRK=oH`GHnh~t+#l-=_hlgHM_o;9i+Fc*2B$Fcc@(uy&k=M8b~PO$Dts0M;s98i-v zEzWkiwO<+eQ>Ty^bazvY=&kZ3@eqfOx1G(!|JMJu%Z937Kbn)wi5-|8mm8ww@N3U%AIj zjW#C@4}q4)xEJ2Mci1Tl8@MK;w$mo6U2R5tt~VSRo)#X(gZ1pu zI!5o}*FWK7RNqc>coU+=mrCwDeMa8kL<|||uQ=)8eP=!L_LSF5WxzU9${vO(*Kul_ zl^9vSV_=BMERZHhNrC~TCyJGS?@yL7;2H`OKqPD&eLR~ZhH$O=lY1Sk#u_=67ab}-~kLW zr6>R~Jcn3axU{k9gDfPCa0jo*X;wIBGt>b{U3)qb2yhK5keXuGc1c}&l7-hppNi6y zGC6xvi`8*7HHZqJm#BoQtFZRJofO&q3RDogJMdVVy^%|~3K{q(!mTUeaUL~_0Gyvl>aYk3D5X#F#Gx45b3WHso1+mev zXFU_~Q2YPf4XXry4ufl=js`DU@EoX=RDF##%?0lp&k?Pfu}XY{xkJpp>~fTfZsLW|+whafsUZg>iPbOSvqvA7eLRBYH2M1l!VF43-_|1 z9J*#SB3=6Ff}VSGzXV;1?wk#O8_pM9eaP2^mV5k}n)K|t@RnPJMWF=JL^!%XG(SR6 zYb|G1DLP?rUi0aaqFuMmk$P_HP3YQj2C9leraNat?V5sBF!NdeJ#1C2yOZ6piTj5j5+zUjF9RFbsd%7!N8rWwaGSSRKJ&b{Z)!B*|58I1 zyjaqZY4Q%OI$YtkIbu1r4Vi-SJBby|feM>0iR!u!1dH==5pesfb_yR=l<+Y^Yoj>q z;B+uZK3(1~DAgHNQS!%;Uqz2&&DD!V3OEZ9GI0xP#fL43=hW3JK4;hI8p;0$*B7kl z2a$hEQUG}-Ca>RDpaMy^@MqR+p26?6YNjkS4JG)4bM6f#j~roVQp-KzlO(s$UBZ(T$;ZUG zH+Xzn^>_Ru^~w9SOe~^yOY0iEdyR7^OSC+`c8Fp$0zr@Rd?VL!H@_s3sY%h&&}V!2ZB z%y-6~%>_F|C*AvIaCqu#*-pfXPL;Sx7!4GMEY90tN!l18A-Nok)%MV1lENNh0-5zN zc!s;E zpmU)JebS1#x0qTc_p3iDrMQ^kD^#4kK+WyGc3XZO54w}FTEHeb_~aSG?iaEXRJKAf z`16S5&-s%_`8*G5P0fNtrg^p9=JY7E@9UO6$k-oOocQquZYKp#nx2bo)XkU`Itx`3 zaUv+&G2L!F-Rwa9bd_b?B=X$--1N}TxR`RIB>`L0hc;duR5vmJKC}lcvsxTna0qF z+6wo4{SD0uu6UVW$HDwo$}F5zK&LV42)eofgQ$Gcd6BD0Gx4NZ+m||kzHVHGRwsCx zxwvjF1+KY%yyexZ^G)mhwFN(*k2j#~3F+5^`4bZ0)F|*!BxXBev$q+<6bApOIy>NjAPQabzuy<<#?MB0_O>|X|7B0BX{#aV8r z{gXonJHaW~I*(wBFLH0GwG}n7`z)F*Fm!qi7dZlOt22ipYzLPBA>pw*X}v@V$h!AN zKjLH7XMMgsS(5zbaL`FVLTGNK`{su|umEWvHJH>l5JwKjl*nyBK=KPdWM=yDttegM zLSwxtiaZ!-f@jXNiT4l>P}?uzwUwi74TsC{pXc+#7@n zjfR{oB<8F6^i!=FD@P{=Oq!k7w+P9{-ms)j?Om~0p^pUfC02;0X znfqz&Z6lJkf4VrdVBIcWV^J!{*GiliR}@1)X@Jt>L!5i&n89e z8!Ejy2)&T7{LTCMH6fn8M9N7JGbYSJ=PuzOax052GSYUH*nF;rNh+9C;wzKKls4Ts zYuW$|neS6+NXd-Mr~Goik)@wj>i~Huc#<r?x2-x*P5tBT3`h2uh!>AC3);9uJ=I89Xqd6@uym+S8~ zBmr}nsyF}XP+!{a&N3FWZ^s;u+w;rupw=s_@GBy8;1_F(*}4RxeBP9;>~WQ#)O&Sy zcYna$`FJ~n%)N*d2kaZbq*r-x4S~U??7&cgK<~D(dEjU=*Tv2_&%(~$NC;{V5l#F8 zpNxvJ`ZG2$Ux{V?Q?t8W`%ah?p#$?`AB#jmuz{UenHu%WkSB%TpFq+Cwq=R6z9TB5 zl#G0wmJZyH3lGfC6@g-yQl5X$sFpjUfu#JB`p27(3=fC7%-lUJ^gs-ZzYzTpR4aJj z^+x_xzkXB^`00)+v0qBp=in(ugV?ure^G8MHP&fSL^_vF)ggfky^*^Bmdx*1c-9h4 zSi}lbjE=21WMnp#YBy3{3cGpP&16XsC@o#alzW#L0tCo+PE_ z4}$^Tvv_!ZiU#ic-wiP>ypMrXu30>VoNA2W59``=jn_5mBCOeHv8SPMqM7W$ZFUQg z029#ffinS?gg3#)IUR0@dlOpUez6qjaPdl-Df%gH_n@KfZL@pmnP~gi3rM=>x%U3! zfv+~>X7_IdO zTXtOmxlRy};r6Ju*5V{r^X^xVSH}j`I{F;^lJxBW^WUf>na{%u$3BUWQeiti%iXB+ z{WMOjbpRH`sbD^5Bn%7h?R}^7-I~?j9Tdt@J2%W;@YzZj&ob}lf`}wp7&^hlJ_W2Z7Z0&qzF0hPeQ5%_sTjvds zdJe+J&3U%ZBVQ-5B=Xp0Ex!G^2?E4^LEU977^)Qo&eTTaM}qMfs((Fp85zH(!p(F- zo|A`7Rq z;e?shSwTYLivp}Skf;;41#|!v-Q^0o(tulHTNwZr7Jn1eM+0D?nOgiZkCGkcgZ@RZ zv#gWilrH9Yb-ld~;-0E?a(8L}1e0`#)tB#l&MUKZW_trtZ3uW@lMf0$(sVt>Gt_&Q zB{=oSanHC>=H--LynnfByzuGD zFLo~OD_)4e6sL-xi)(M2Hl1&@Y4J|gRN~^|j#YG-M>xaAJux2TK0sN#MXaq$62V=lW-Du9der#9h6DMpRFS((`^)SY1@`#sI6uK1tEs?~!rMPP$#kbl z?O8-pCgsq`-;{4<#50)`e5r`*W`i9j?Y+H@F<4> }eAe`& z=9y-x-AI(Lv}<9<52$tP6~54;dRkg7k&VV0eOxoWrltvCbDTervQx}*x&GIq8-ySY zKJ;J#(weTPOL*r$-|X(%-_9rinqs&7TSiLMo1S4^8@rnMgR;`{w0kVDY4#~VZ&NPF z4;{=R=C8PJC5ZK+e_Tr#*h?$_y{j9zS6&(}D1;u&lHPE*ynQdi#;lPop&-rc)yvdL z(vz1dKNy8ElaVZU{>HNc_n%N7CW5bs;Y_8;Y?3i}@m^)nFKDn4)c00|eUi6TB9yTp zYd9l#X%`lo6C3JIhm+WAk z&ARxB2Ntoj+igX9R1_SfjJa`-og{yHf@qM9z_;V)^*l)B^EaI%C{7m5AueGLq|pC8 z!kO9@-`GMAdJv~)acauaN7ppK&37j_i0F}1+2GW9; z^5~|brHctINo?+7FV0ERE6EzDi4OnI^uxYxPCu;?&ruM-#ve!TAt^P8`|R0n_56_+ zU@o{Ge}1H?YiA8vz42Id+uW{uRZUH0{+dCqv=6s*fEX>@LO!Pjn|LpR=p^(V;ZiH` z;KO)*o7O?VPK_eKh%GT3C)>Pk3N=YvU1KJI5KUVr{n=QRY2oG!+@hYPTMWN@q^jV zF3yluXKw+#$qpPC`ur8VSkM3nD~AglP!4?-I=_AzolHj`p)9ZF5hf!y`!$3KQM|5J z@i~{gejt5CNo2w-zC0eca;c!B%|R*zu3n6Qt`G7Lew&g20*tFQav=z9M`M|2IW72& z#mj|Nj)H!mm&R$KfoGD7gLhw^Yt%-@D}jjZ0#^1Cb0*5#k^l)_ENi`Q@LCU9PT+N* z1%zM=)2U6Ld2QlYkp<*0%eGa5$->B-kCASSbyZw<)9WSeR||_u$E=g{2lH5$U-l#q zhboD*r(VqeBn`ElFQ9hPIY5YsH=nl_8d$e+X%Js)=)S+iIk?V&|C;TmIuGgjd1XKX zd|4`7c=&Ei4IG%2q@_Po1d*WXkK0+MiXu`o1#IZP^Wg}qKloJ?ukiyh*z*U^3t}W7 zYg}%`evvLswinfj|0pMh^*1dw|Fo%2b;4PHGUXiDqjvF@B9fWK6wr|7rYl9V0_H-SnKtE%}-A%t~>#IMV$r+~oA7_yXESO0^4NJ|=Q<+U|E8}NNQ*q-0Ex5u~9FCLsM z0yo_qG!@?Hn7MowAIQ*W4EhqrE(fCStn4g;)*Y_^dquo&ss}7ZKV*fh#WpYT>&oRB zYg$hTH(YiCf1(@(szIMB$aiWNNC)C_s|rTO&i#FkM224x{=^5}V*b)7Rv(f$NxYfv z?Zs0V&A!VE21$|6R12L-gzft2?Ae=l{gPQB z%P-ggf=Wod;H>)YcvkWci$pViIfK;iAQ7anA1Y7Jd9F3`fz>ogyzeAn4&z2^q?EP5 z+dIeuHAKoH$?!)@X&ISz&MCcbOVT$jtq!^r9+Qz2b+TB@E0{XJ8TA{~Vt4_kWfFnR z*q=50DHCm7O;k10ix(rm#-l$CTiFq5=B=jStA?V8S@5<*NJ!m|7zLk9ZVUEh7YtPh z;t=POfkCv?ym~K-Gr*z6J~jJ0=EAw%!8P_K&!~N0{Pam8^YrD_x~!!I-oZp38mDf` z*_T&B^J%R>PqN1cmQp}l{6JX5PJ|QfL49dfzwIHk@3_%a(*st?(zd@jLYFknD0+r* zs9NS9>PHP!B|Z63K?9O+3x^!H{6eL(8^legP_h$I7oV)uh|icR7rqYwHw& zE;Um}f*7JnxqFJkR#KTwt7Vyb#)5XWrCg;U z&poa&oWqxEm@Q>V;9~o^vQ*UgeHUG)4HGR~`NgDGu3SToQDQd`WV zP5C$9gx60uiDRdL_ua_52&;Odv1`5t<#2!{f!V{UjlgwEATq(TG1(??M z7>ByXEz)~_Og>_RoWYYAHi`cFPR>CV;(XA>B%t=`D4s4$Tmn@`-x1PXIxMogFu$17ar(tLVO{0_6XF5OKLZ`pPcdlj~J-rOl0aPrF2jLliDOu7yTV_@%=P_46@ktZ*1~RknUmtfExQ6(2@T0 zEyDT8#X8?*V$dCP+NWO(3*Imi;IuJiS`a}3!{z}6_*MAl`kDy90d40P=3lFaa?PyL zPuo$VS|X9#gZC_vOno^LP2?}w=!jA%W?Q^ai#1&0PN-=b+HEfV2)7L@6FyvX6$+ih z%0UjbSp8OEal2_Iut(kbbj!yRiZ`vP6Rw8mEp{1(`B?fN3m#xujtFH;4wGzR%bwHj)*&36fLqCdmQ7(p19%UMTHQI}$ z)u3qwYAK)Vog_POB_wXAU|eW~FSB+pi?-5j zw9$h~3bPr3)Jz_=QZb`uI|OT9oc{_${^&&V1CTv{>G^CDl`7)qJIo0zDv1o6rIxeu zoH6SE%*Q_0fq(hE=LoD*C3t;P(L*|}K|Exd`G$l1Pz*yL+t?04RpY}42&wBb9LY3a z!f9qM7NT=LGS|T^yU_ECOPRyM2%tmx)iW4FrRDqWSczr>%G626m%F^Va40%8z4p>B z%Ap*gtZd$AF^I`dLW2vnMf>xF#gN#^1XtRt^}pL|!1OR?$jEXDtRhI1rn=Jhte9?= zmX=~>4b{s3c`-83v8EecSOAAF4Z@W19RcMtplEB-P1Wmtbc$hsuNCLt*)fdgdq4<&Qd++5c4$lV*g z0|Tno?aAb9Y*>ALeL(J7yY$!k$_MH>kfEI!Vya6J0=5&Z$i26$QoYKUJfYw-rx^4% zL4)$_Oh+w!>||0JCD~Nv1bQ$LdE*CsO~01fOzedzlQWb zP!ZT6V~v%-=fd0_Muc<_4!UdMbwWZyr@0pe`u{kKU}D?nB=E4eF1rnzm?k#I)p|{9 z4>Huz(M%?MlKWH5@u{J-0$i^3P?tJAiW!#^oDOlk@A``02z8}7a!GXYf8YQA9s>(h zFoYc@r&P_A8x5gWWz8CmUnPFo3QuSt1CyH>0>-y60oX3;>7~g#( zl5>4H-0WRa!VOV(L#sSy^S-gxOP+vF03pfDf+Q6JW24lR?WGb!|GuCfhx`E82&>bM z8l!H_6WC=5r0nR#(=g4|AA#oIG~a+v><9b2L&eJxBo&UVZ*4ku&4|YFFP!SNr?fJW z!-s1Q+A(I_?L0$}zbagzrXCQ5^=hzDR7?Hqd;xX)jH0YQgFjJRbj7>`0a@WcJB^1Z|YO{E_|t+~ohO>U%B# zPsC;U1B?^lU^`1ePcMx`LB{WD`|YsSQR)A@k)o0ri@>J*>G_6bNAd86@xYLN@N%HU zhfPwRhbE2@E@oPCl(t(WXH%n{iFzJ_8PpCIo@XRFJs(uo3>UeYgzT7Q1|a;sfDvrp z^QP|d)+aDLbmStnG~YD_`sxr#+&14swQAE)Z#tTNF>G=yD*wwTH7eKq;%OX#Rxa5#16=h}l9)D!k z|M%aH;FCnD)QXHQ{`hPBUWb3}VtaqF$`dj+;-47r-;NcFZ@sc5=nm!iB_?{gW9z1k zvLSbkneO@h`RLKU?HmdBH;VusaV0gQscsY3V`$LUL>2eg9Lb z-`)?=Xl;PGUATzBo@Xe2FJsbXhk^ZV)oFZcU*HqUZl#^P3^8OeEgaaci48tfx!~Js zsLzoZbdXb!Q-o3GPX`%MQU)X9LxxQ=rx)%{n@wGIImnFobs1&EyMFu-7LHVrj*a^g zbh5M*c5{1?!dttsSpQ^D$K#7CpEupX;%@Df&c2R}PX&*%G6F7~xGw`!2?Qi z9&9nhZZ<+bcV`^VEAjDcN1Y@yU&HhLo9||$lrXW^%MTM$3t!cX8p~4kr?5sY-1S>K zW`+Lzq)C2wbpXoBgqU>u+dvDR34XnMSkye9HWH(M!4yp<`6P~8@J|0a&7rc|N@ut( zCYTORCQP?zi7q1gzRVLM|GbQDeB&#?ZZ4wGt)9ym>Z$%DxQOMyooD6U$rc>58_>v2 z`Nf^VuCEr0Kr`mOQ^fAD{lub8xvadjj`O!`G*xf>yIHXWqrg7ta-@I?tLvt;zqWc?Wz*`+#M) znaI|BeU;<(7!BPg7=rx9bp`PXT&s-rE2#P)3lbWqYaNBpq*JF_F_)D^30vE~19U_C zy^5ZGKv*M9W*(KEfBpa<&hx`(RjGo;UtU^HD*-W^vEL$yyk}FM3?0HC=27yxTtP|> z;4;co`-MRB(wj5%W+PwtRX8C>ciJN8@3KMG#3=R0rpK7e3yGjTEV%*(Rk4NVVT0#) z;eOQ(0(pJcL;;F|!_b??xWnt?pq$AZ@W;+jDdixe7qhf@d4GCx|F%HSt*=jI31N|I z(4R3$K%La7&0JmG~6E0HM z@#R-j6Z28*`ga2kCIUFB{>bDVxj$prsvf?6yVoOUzt*<6{^oN1&%!h2LJC*uplOAFocou zEAYba9xRLQq4U_ziC+i6Ma>m($a4gXV%_>m?9?Ad{?{OQG5tl5Wz|ovF%V!k1O6MF zTj_Lo?aJIftaeR%MT4&y>SMotE7J?lhfr zP=W6Pgh8OJMu`^Wk--*AjK=rq5Je%m4<(x8r_7Pikd1+u2!2{3OP73N@80kV>%w47 zrudadOOY6@5Aq6p7jjp3b3W{sjrI1GlD4-ztB>5?j$GJxjtWp~)x6%}v|`$y!JSp*1LV1F+ja>DvVHX=7<%s zZ30%fah$<3350J zRlvZBjW_*IotnsxP9OT&#Zm_K)gb!lJM{W4yX3ofc<=bp6{Jw)GOjA@j1>A-yJT~+ zvpb)BAtLboR_so%LCnOYPl2K6CZqGqoSv>$ATE+j<-@T6-wHRqmNxZ`2d~VIb$Ltm z-h%w2P(DMaqJMno8OX0OfA}r~nP3zm8ot&u>6`u9bjzBV_fcvewFr?jmhV?XSS&{+ ziS7J}Jx@oqHT^8;H_tze%X7y9o6Iq6qHWw;P^>Z_Nd_2k^LyT$$CjGcDfs@VJ80Mi z@lVqlzd*7OFZ8RbKNtDn#nx^WE%W*@bYP#i6n4bXGs#%4COo|HI1-0R$oBDy<$jby zkl7T6_)iAQ=`jROl^EN6jIg+kq=wS~b>VRFE$M=`1M)n`x5-3QX&Cly(MkS!LtRJNmMozgv73?E5XFkce$R87^ohMju{7s#tVt!1G%t{qy|%mt8l^ z6Vcl2jI@2%2xV>enDR)gBzb*}ud?coeeLAc-PcHcCT?zef(wk&RFEv6XgBbO4@Gv;rTd zO4Sc~{2fAM;(MuD`I+Xd%2SCKZ~uy^6%N*-H--=8W_Px%Jx5jpA? z$=~l!aGPfE!wjmvx`fJRiTG(EQ5?>E%Okd1SFW&o)W12&fhWEkL`TY9nytaG<$lgT z_@c5B>(1&jkC08+YZ4LCFk-pO;;zI+D68E)4z_$Z=Y<(*=?rQ4gf=I%gOV7 z?pTm+qPH`tPeM1!f^oqLj4B+Yvh*BBX(~I=`7Wg=uRR&n0RpgBPIj?y@#(t}isJ3} z6QFD-6yMl9-x?8fC-B`(FvAvj^9Xs~X)5)2T(L;MIOpbevnUbItrh2c$)9(MJ6JRI z71T*(^ySB5SULV^2>x?qF&RMo-gJtqThwn=r)47c1b3mW2L;$EJ4uCx6k=a(q?)eB zD(1<4dz#$;_>6*H#$;u0>D@szwzEpq=#IX-E_A^HzWZ9!bB=0&r?9Y!W9&1tCr^AT_X1NrqQz+J$_{>1m)gbelEL z`>g*FxHlLx@sPUjRj{-1*y7|>ZeZ1c=AplHS?cU|v#KkxX{lgVeIae!@rJ02DSeV? zwBbw()|&ZnhQU7fV9k<9EU`E77d<6k8CV1hEF+Hs(qi?)dBoATrZqlXmuC246JA^) zvA^$D*}X;y{{;7Y&pgItwkfuEix0@U~gsW_#L9SNDu;K%?K=JB6Y#0naSi z>&oxt{;cwIr+HBnbQfdCf1g;{K0lV*{5}hh*F_6}NE2wJ7&@#?%8pWvdFLx!NI+b- zCy$58ud@U)YF_|4%=bK?`_`GUZrsn3-ebGK*W{1XQ2-u{G%#NokK;Z%c3`dGehoYl zq@->6&n8rT-5ID;4VRX@Zc1QC*YlTVVVdFK(Q5KFsxKwsLl?Q*RR`hN#k^{c=gGB-PPNL$u)zz?55Hn)!*9bIaf-na4*z;MhF|MR^aiTE=pZ zt_ca1m4=(#`cT4;LUN^A)6-+dxelpxCv9vFdoQnK*UzL|WiPVu#!f><$5wlpRlHBQ zWNfp&-f({YZ~OZ{&9KsAA28-=TcE>7eG4>px_TQxqCp!*3i=b`Ha3%fT#AtkKp-;# znsw1gYojd3tr3wx4&^+BRQp)_2&o7VRL1?5_0XzzLE|~+hj+tMMVe)yIh`OBZGErm zVJU2^o5+Uoh2mVbITcLBF6N?IfQ-IjZJO#7$EaP|IoYvualWrPJiHeqb9?wLLJ(DR z_ONUEYEzAvni_dBi#GN4yLLhZp*UNqw zjPxr;eo7kP7ehEk=Dq#hVdc5kgGfk+kM5Ig2kj!;ry{?>*R`Yo1eI8Q=b6vN<|u56 z0CS=Prdi*Iu`hyKL?J8qmbDAcvPCI!I3~I_CX3v?LcQNdze(!K(zRGM>FQe7=XAMr zJms#Qlv@5`*w*PDU-C)dkn?OPg;&+<;m)O~7wQFb?h|z3_qJ1sy5K7%b}0piX(DJ) zZa|D;)d3XvEs?|2`j{5TyWPNJazF%$RiX~|JPn*?c3NcH3+c}q|3F~M2 zR}Jdm+}Oj)S3y9!{|1^&YMa(eAwrI#*XRd!pC+{L8=LsnIcA#1@5gqR7qV+m%GvIE zY}OOwzeuIV>Gl!kokxG!Eu&>5CKgeG7(8V|=rIuK=C)8bz#mLveDK_OS}>EHx>vJX zGght<6ngKR9h!KWY}qqdBJzBr;<*PBZBAj4p@ z*A)kCJ)KTZxN5gA}APG2xR<_n(C5BYU zaeN=SXg$++jAC*IX-e_nU}0ZV-!7F;v_=^V8zTs+B0fU#PquF8(_w{e0VoKKIX*sA zB&7*MBPsAI9ATbOOy(OS72K3ttB@9f+U-hI)ch2nkpq_ryKS+p*ey;hutOqlcV)`;~MlXMQw+ra5G)b#F zPMhT0em&4Aa67M37O!$ zmnmgrJl}k+>@(2c4jlaqmHAUJ!)M&TZUP&{kR_zZ*w%{kVjT(4Zwx+9kOw)drFgc^yt|1K%;D#G@00HNJDA#Pc~1?UqWdok<&NzP*N! zyi^S(Q`MCn$D z3cDtz{GRmkC4ywu>{-*l>LLR<+wTXMiD_+`j=spxR^_3HTI059A?B+Q!=*2OGP5ns zQQ9gmOo?qTUNuE*2?_Ek!jVE3V!e2{H^r|^#4GO&CFBrAWWLhRu$dRh)N`ryxU!cG zE|=lj!KQ{4i7H@5M=;^94*c@LcALmop2SA5AlK+NrfKV+e|N;IcQZEgJ}ZF_BymDv z!{sE?DG1czx&ys26glEs_^U$EBc(unKlZQgpU=W#!sKpc0@x~efBbcXcLi)@u#08q zJr@oAp1uPOI%nF`jT!aj1OBVkUg9CDV&|6ePT?bIY(|9gS;NJ!&zKAYi4Ig+$eH;$ z_mJ1tjBEZ2SR2rb|otUi2pNHEN}>1GNH=*(7nCw)QBSTTv0(vj>goI0Ve zXQ}6&e6zCX4p~n|?epylj|pyCVz!yD`Om3D$Ad0-X-Ie+fA%SYX31iVS0BHVeuCQl+d+}5D^Bi>ti}suLMPw)Bb-MI)v0601CFQot;h z`#rfgI?KrI-GrLCW^1@(nx)_u)%Q|!9O&`wAEEMf95zuK|BGn-7vJ&$XRi99_k-uI z=p#f%>!L!HzZSpmqhBEeSnz4EKhv`nNsMiN5pl@LptU)iZWEK4(h6Q0v7GvW0j_6) z*Blf)fImdi2uW+Wn6+Tx1$fT>2h&x19vZf(lnl&H9XJM*yi=Dc_61NF4kl}lw}3VO z6U{!3i_XtaU9$pTyVij8-j$K8E~vaqa%q{jLDXUY#yzkn(*-6;&mfA*=cAXPs=MRZRn3y`x>N@axspjS4vvEPU?Mo^!utqo;dE2-JvA& zqK&4_pK+{A(e`)qYJHm%3G;9AR&qIVCW>pU#XOMo+tvp0ygtRkJsQ2~sp@hcxOm(q z3Kzokz`O48=Y+u+15N^EEGL3Sh=FJa6fGLrM1&iS~NVzH8PntY~)h`2p4d!F0$Q@uP2X zi|9F1>_w#n`#&zf2N)p$Ma0TwB^{2u=vLgXXA43ZHZ?(49=hFKt}bkSZ>k z8mCh3P?M&6IT4inDE?o(^j{5Q42nM(oCkm$kO>|cvq}Nr!l8Pn|XTgW!1apld+lT`pN}gUtQ{!Zzv^^#>=uCB0lm!gSQ* z>56K(><2T6x=V60YvvTjO$b{g3)|D-Pz1VGgLdBzR0fEydgwU4NM7#u&x%uRNz{&rl!rH?8&FiE%6(53{QlKj)#r>6)!TlZ9=?(W3XIw^{lKX=z+ z_q{vAQ0TZvX=B6su0S*+;w}xt_VVxXYXfoqCtIVUr^=*V3u<_a!9-ouXe;TDgTE%I z{_|z|pt|kaaCeT1*)mARaP1(WB|&?#=*O#k{-l5$axO^d#*ivVR@Z()x3pC4H5E)M zhVqO7q|S)jFTZmqTf}YM1f4L38cREl)EAL3iC@_2P#W~FFLB7aB|{Cb?#?S@J4w~!J=TF@1ee7w_mzU;;RG%E$0?4J4EGT6`GK#~y>#<8 zFR(vk;314ZsTBy-S&jRsy;i3O)(J5yIvtS-b)Xc{$FF~bn_e4-vDP*AS!~6xdRM!W z44alpn|999#<$+y?1I${A`9|)G>|21CF+D_pNJ);bzo$~GBT?7y1Fmjs1$<>m;Om3 zIxo8Rt*iDDJYszP4@NkB&*2oEEc|Vlq2>z5-1Wa8PJ2Hr7=+48oTcf4PIhvaEdEqz zyGX?~R9Gx;jlUk<&jVOO#p@s|&_@3UitrYP(vN4rw&$c+o(`LPfGVL2V>u}qP*kftZIPF}}<-)mnr1r4`KXz<|+rjdP> ze~$Ux%|#N>D}UTqEubiVWrfcJwaxdryY9X=!9rIliwiZw9Dm}@*}o2~B?&+WG(<17 z8bY&on)<&VKzvyU3+c2%ow`)^HZqvqV0gA+P1We}9;n{WYzi1m|5a8_6DDD>?30F! z!hm~EJq>6l!#%ggz;)5hT1GqU5I;`vh6Ak;`Ws| zxc=+pJ^k#qss0nK7tKB?+sm0qyja1!aptlh&^ZOQYDp=B?N{%mUqq2H)vYSgLh4oq2I?gS6PNq`+-0R- z-A3USdjpFiLB?ycFEYtj4x91I@`14#x(Liz;1Z;sk<#zFYKRcY1bO*Q7KNgs$6VWE zo9f$BO_f9X6`C1jJVw_Jjh}u}@M$EfBs1GbiL1Yv`c7&S^o;rcp%DLGi4tPij7&_Q zfRI=P;vJe5(*ES7u-L%qSWBULu$q%02>Gm-cWBJBOZ-k*$V$lH;_?TeSlBu(46#^Z zml7e1%Lqvl+Y9DTd%MUaG!cE=Eahsiw1#ANRo&7+W=K+)d9ewsbDd{h)yX|&mA&y3 z$xsgM8~7^l(l8c>%x57s08>~V;4^spZuRDdN-Jh)-x%AS51TLab!FnmDy`K>ZT;`G zR(t(J4bod{@W>L5n{qpT8mV10pP@#*H9H2jaOn`dl@{yL7H}~2E5LsEnhufrZUTcD zbF;^hfP$niazF@v@S?CH5c~02YE~3@D$-2?=FP9Uk|C1tHne9p_mFK~<4`-dW#=yO(Zc9j4pkdHm#xeDPK->r#(D zqqcLIb|6qS0`8q*y7mIbTxfKRb6d9y0Y*i|i%>YAY8m|sU*x%_@B-5nw6B}(ap)0ad=%7uL86tg(( z_+?5Rv@pF~z)eAI>$N&W5XSmX1GID^$fu+Jb@`{Fw!;Do&~&6!FWsa#>Ei(CBNH;} z-&M~s2nN&4^D6>hNVUmutfE8H@rx@EmSV^K|GR|{UiuWTAwitCgajOa!Bu7#Kt2mN zzJVK=L{VaSqPS66PqT5p?KrTVnohmH<3O(#mCjzWJN@aEed@sB=##`bw$JjX&pZ|U zlQ>u<-2bC5cr0rYNm=H!A|<&ZPw7JI5OhLfGec7NlR+>hmdsR5-AJNh>!L6*RRlk|Ii)F^DUMGb_LkAAphdJ^ej%S0!= zy(jmMo$GtUYH=SBOY)YYc`P#dRV-?unpbNHrWNfY27Mom+;G2X#K^Mn7X23(q=f{x z~@Bd|#0zd000Hs#W; zwDLG)X~KiN2Pq*@b~^FPPV&EXaQZ0zHo@Bm;i^zI{vl zHq1}NZuJ$DSd$3k_GsSe+b7GS|6k9W9}Wh*J&{p;&bX^FC1b>{?M6Cfaisx;x8xm| z{vD=c=8*N)RR{OdP0oY7;OPJu{a!-hWuGcC7YvoWEQngR&cGjTkr>Zvx&gfPo(=RU zn~Z^9NtSjgtW9}Np3|1loR*zP)bc8<>Jruu^b74vcftAALO@2_n0IV_zK4`cQjV4VjNU0njtsB1Aq@kcPOwkJ zd9{n1Ru+Kvx!qo1FNjfbuY~^){e-$0!`U>`di%)F2P%vRNqWtI)?4=;%bo4w`$TeSO zfG8XA;XeF}x&V-81v6hKJXQx>C?;hSt)*KW4jEmK>}2Ij<2YyZ-hM=bG`6KX9qFFo ztCt^=isPXiJ$)Z<4rrf5BFae)59wya+XO!Qe+5kcbRT&%oaQ|kk_s}ECtwuU%UoyV zDh#+p=!Ic3jKZOrW#AjA7YpAl-u@a$#A({`u{+Yx2!#=pqv8x1C3xrrU-$m}_Ohg? zusfV^-f9f%X;oDv2pm5Fs~zS&_u|!Q!@{{tcnYqJTku_N_y_4v17ve9?WRl9tMdhc ziCnt-U4Gatc<%VE0IEL$07{TbH+K#YBG>pVSV+DA5Ovc~;cxJWUc7eHI(|kF+oQ_< zua9b&;ngsJ;c@?16qVG(J_ls%D!1(vYtehR0qYC)SV2-~z^#w5;KwtXx|94=hc~F4 zztHD`#-|k)3ccOgU>9(>Jq*NxeOAbcuj3W$m=L&+zve{60zPW$J_BbE(cKc{mZRby zo$pR3(J3X)oPlic+BElH9&Sr_Nq;}ToXoDT+aSODH(JyZpP9>X?@iw!f{b}2316UN zGWx2|vOhAAqou88q1Cpe$`nsi#%Y1#Pbq)`Buj`p z+Xzom_*YZFg!U+ike;KSUjja91xL(NQRl15t-O81yUw&Pu4`Ryw$f zM+Oi{f7nqo<(#bZ$sy__q5O2ia01#KeH>^&gND=>vcPx+dd5npM%*nk7QL3=)qG!) z!)vIhB?Aoc8Mr9Tr$JJPL@ORN;>u{J_&q z2aI?4XRr;Tuq~ttjUFIz!5}*rA}L>kfP8ND?GFej4<1;Mc_NWAc-meW^V<0}z(ZQ^ zcmAa1Pb`5cpzO@N;Bw~01(k3)>#}=fGAqgy(kj*{0DVScy~=O$pqI=w!l=96N?)tK8lY!I zetHU{%y%FI2@N7>Y6;W!*(a2=kYc$nE^UHmssRmf%lhFB_?2~_@?x!IPPb*WcW&+W z&J&*<_^dgUH}4QX2cf?zI=#c62e@kmK_XA?kw=MAuv{qkG@luk0|yud*LR{m@_t|v^xl|g)@OMN^ef;3(n&MaHK zSp$Dz`KwEl$EWAh#s=u(zi|36i&NiYflp4HwDI?s;Ad!yB&+(Eu{2=9lEE1>m!)+7 zdwLW5QKGc-WL@UfuoP|7nUU?^hhe0O8aG5-DG2l0P?kjPP$O0kATtbPGyNZZ`boqi z_U9nY+lY!t=$IZwDoP|}hq`XFZ3FM1k$>(r4Tub}Pr+}oMaQfwlKK*kTci+2U&HYY z<#TnOV@efX-lCnfAn%(62uW(IOwlP16VGDRP)U2Hb?D7lu^tbpU)qgE%0A@fd42%8 zedm)AK!I767^1w5Fi7=68L(dn#5qg5NW1Yjc(!m_V)M;m)TZ>NQ9qh*6e(;gea&cV z{)tDvrZWO8qyGx1pX?sPbWc009Tlq~Hmb6ewSS*LMv*TOL2|;AZmX=ZpBS6E-x;Z5 zdt+DgTzb{8lydY{ayvgfw^*nOa_Mt7x6DZwhIJzG(AIG)jpqM4g-`vN{o1l51B#2t zq4aB>q)9KXG-Y)B4{Y)N-7NaQLwYQq2kvon0t-mx&k_Rxo24I5fUv6j3;x2St}%HJ zr{ZS~4$!enjQqt18S(JQtj4H;%c4JM?f=XZ#Y;1;y4IvVI#4xOZ!sQ|Gp^shVPvX*+9W$wF zCQUknS^_`A+azJO)a4nz@XvhzN24Z-5TYcxY_Tw~{Jlvo`dP((&Ad)`{+rZ)KORt3 zk^Di>D`Qn3Omv$D!a%sb$ykkMuyy6@*Sq93j~t2}EB>v%t$#ngU!f>bn&uh&mb1Jb z?>pAUORjKEI;%cb>~?*tI3Ld&uW`g;DlSrXASkod`A?nJ0L>9-&*mi7d7mm_!s!6R z^R;%%;+(|4e=e(m=IGypt|=uZ_Hi`|qiPetHs18#UY&0AQuxNV2Otjuu+vVsm>*!J z{JT?y4QmB=x(Fwcpt*gvicKJIh|NTQ)Xy{!f1pZAW+Nl8k-yVD{}s z#LlHGN%+=qFikSTX_GyM7K#B4O-+JP!`n{>i;cl-89dfeqL<47f8I|UxhK#>2~s!Z zi~I{FnnZpKr|S#ug%g02DRh2(QZio%LIiav9#s|m8%b)@4|#Om0&?4p%LbI?AZ<=y z)-f0yemibU0-k6e+uz>Pni`rz%b1|y8x_aE@SM%l(JAAW%lq5j#TpR43}TKuZLw8+ z<#N}{WUANeK{TF)d(b}ht!U9QtiQM1Hu1Eors{1=!m2 zs~i>^6l*FA`O#XFi=)7nr@wOmF&|y~^nQ4d6AV0s1=InYH7!-a-C( zJM-vK!8MHCV7{iW|MPe-{jtPv)gcQNfSQ7(QPFh^06F=a-~hM30xlSW3^BlFWM+M1q4gM9`c}T&oLO)eyr}(~*tEGOW)|NM{XsQY4 zR;-D4zA4P)QHSi+seA+3Jqwn@0Bw=uG&BgBC zqnwF^YVo(yI(?d+&&i?YDXfVP2@7<10f(L3sRUJtA1l6;uU`tW7DaurWoK?O=4XA4 zpI;Ov0zk#vQZE2z!bI$7Mbyp@PD5P1T1}k8-%zx$K^0&|NwazhL&mc{Qp#|LHm&o$G1BU6a`LB_N zy*QvPZUk|a@;~wMx9SLp zsNMyB$@0xiUS-rMW_Qm*l>h*`K`f12`soP5;V}gzU@F)*0pV_1{}fhAAvn-+meclv z^%#~pk^B`7MSzV=T2DDE{IAiqiyDCAtgPGV$}&z*69@)HTD>VVNADHiu!$(vqzN1U z;b2UR1dF(#9VFZMulm2a$U1qcU&l7-UPJBonSvf-?9trRW?Wb~e=&J0wy@om8GZ)H z6IQ$RiBvy1|I!p z1+1WA#aBmZq0rI@YHm#K{`;H#Xo{?^L<*)8*mbD{S2$Y4f4vV2KTz<9{qR*jhb%+1 z^=UcOe05(8lRBbAby^e{-^bj|@h5B=xp8k&jcHB|IhUeDeaRc`FB{jjV;Es;dO3%0 z2iCfAwQq5^K|Zje63p$^eR`w+oK}7!NX2iI&j8*D2dLsj=P9CKN3n9H?KK&Ue298Hxyl>}ULZbHg@@@M z@b*2*EohFoE&8xMK+~P)wm&!Z8NNbV_J8ljf89yr(%=hu<`kxq0N$BigW`26uz+ul z#y1RNeQNf(S7|t?j^zDareP;!yJ`Uhb9|Qymp4W}3HjqaNOukfWkEk{p8ST5BTk#Q~LyZT;MXxmqh#9`C@OWQ?z4dF+hb0}X6-M6)Hj+Ph@O*8=hj03~`Hj<=qnTdai z??Rn3x6L@M&17N19x!Q)mz&GYz9}jh_<2a7eh9PBsFAzPm`;6%&E@tCw_v+m(yQhV zs6`|oq>=?}q(=+jHwN0K3IL$bdTEde^?NF20EdMFY@f`0phy`ex$unm$<%k_rOoL3 ztpjOr0p$TZ%pdNH$J1uZFAWXtZmPliGgwL;yLIkueE{k>0hn{=+;7;T)cF1H27D`@$!VlHXn4!;tDRQv>)-?$8nJ7f3MBg&A5lo!?O}?TxqVrY$ z)|s{~>I@e_))bawz#n7-Ul%K;y3lO@Bg>zTT}e@K9o&<(mVO-fTD9{IA~$;sC*M9t zEVqp|&`8f$+cJPl!F(n{Hk@GA%>{ZhRbi>52f80@gO6Ki({v`?A`>9P-{pagr^XgV{nJUzGjNw5eW=y)1xXi|OTg`s8r7vfz%bNWbLg z)PKSV+SreCZeM<*gnN-f2K>Vu$$Y!fS#@=^td<_fVaxZZYDil?9_~)VM`P{5qs@e@ z5B3i4o0<2{SX!9svcDGG84wMjW^mNgh`Mdf{?hVtIa1n=-uUcgKQ>mrZS)I=4!%=V zDL|HgzcpM9<6}}2&QoOBKXZ+AYQdJuY1bxu7|8RN$CWs$?U{7@j2T{U0nsT5QMZ#F zalD-PJpepss1JYvjx$r^78yYOa_{%%T!8i`L1e46Je|w@MM?#*$ZfS@<0+gH1vwsp zN}AZeg}LC(i2!~64rDvfvJQ3`phTGcvSh0@YI^Vdqd+aQE#^hJCDAw?XX%dtuuY-Q zaUrLBGw<*tBPv+3co3`m+BPJl4f_r1?y?Y2iHg8UBhj)xbYh8PEZnRvk@_xSllfD% znGJH%CYC(FR4y3)ctn<7cAgQd;$8DGaQ>(#I_yROuz)*IN~`m| zy#w%?{b==lsH290A)j~1b(n8DUP1c7FcUN~5!}-l--up$0y;MUG&snj!R*bge6*G( zeaYNcckn*J4q!h?TMTFU6->N4e@`6yo4@@`}nBHT$?#Wf*NYBcYLxOb&Q2b=yj#;v?!MY9nJLSfN*(A zBHDgSvlDA~);$(Vl&EX?-M9Q@br56L+0)73*oNwxllB-+ds3eC0PlLsDtbU;*eK?` z-t7hFVoA66zR-xT@%2O46n=Zf@aBvKSwG2N`p8A*1G3E^M5|H@o*-&JCmE~$^MgOJ&umOsTgEOkJyk)X^0ZqO!nInQ%&ngZa_mrgR6vl zPWfCWs9Y6w9!fn8(TZp3U=Uml!MKKLSLc1Mz2Wh$M<{3`@yzdVhz29dUd_3Jlt$z; zS0L+Ua*gEADUJ%;jnnI=h*ds|+-fh5;+WtV$l4Q>)No|Q?5L4=D)H)z-3tx1l=ZDc zP{&zx5zvlQ1QvOf1GqD-kHJYokI0isrU<$Rw8<-WUttEyj0iq?2=e)u?+zyc(u&7i z9}h7D;KsaX2*!`?VBUm3(f8wG(`wfyti%KOYbcmIe2FxsACA_n3M8(osZENSNp2T7vI?fEG7?rS_&VgNaIgN>=VWe<8*>v zyRnw-u_mh9L(3zpl^q|_CsF1L!XNPGM`Y~fFK$0m3#P?R)G?y~;y$>np7A7dH(ed| zjiC}^k9LA~l(~Zckyp#f0Lz!y%Ca&}&Qi|~?wMm$%$<%fKm5xUqFN1q)Y4~DlVI7` zL&c(XP@^qNpmaQI8+B)$KFWl|2boaZ%w-9#!LU=961iq;>EYgenI##f9xN*c3wx^# zJq1_d*BHAZ2ZLQ~D9P1Z8r`IQWV)H!jY#6Rid)<(%(~HR->sCsB-Fvya9+pRru^!2 zwUNZF&;sDuHl{xsnsT~(H#+zQnLZzcScq5!$hJGZ|I9=3vuTlG^kWF(&HbHeCOVI~IC`FgT<4xiNLfF9YM_mL5rQ0X$k{(dggLSeAo8rABKeTRMY9Q7 zwHuD-1phs-|I5crsrDBd_`!I$YN*A=u8(Uda9y*!%5j7I;Bz>?+-n_-RJ6%4lAXU` zeU2JM8Wx4_M1#*jL7Jo)CGoy-z28OQMAAuA&nv*bQ(HC|c|wT+v97_z-i+d;o4j&k z;V^Gvpqeio4}>UMU|(!4u$$6hyOR^br6>xs7plu&JAAzr1}P4Z7tW#GvS}vuEAd&y zZWnZlPRYrS?>C$%(BW!H$Tq5JH01rHmY7;Je!fWPD#iXqh(|+XHwf&fE9|o+YED@a zzJQ?}FucIh#35UM=iM`@LEXL|S9xMWif@cM;>~$13v4pwtzq|Ibty+25?!Tjc&Ic^ zG6cu6v=JUJiAK5ijS52y54$)4S7_j52IGo?t(g|BV&ah~0+t6A5iaT!2;^VjFfNmE zn#QfarGBpXJ_&Q2g;}h9rSZ$jgsf3eGcbYQTQ)|MvSqG!><4q)H*Z`81%~5gyqtdZ zOV1dLA`32qY(C{HVu^ZDydA6EgufW%noe4d(z^SrY$gUEBuwo%!Lb?t42nC{oWA$B z%RpfCWKrh{5ogv8zm7rve*ogej}hXRk0j(wyNuJi(zguB+K;yiyW2N%?#T^#SG$*| z+GzrqdZ0P4$Hwc-re}mQmx**Vecmzmg2S+ENlji3$$?ei^a{77?g;Znp! z&Bbx}u6}*vP-tj~A3vjkp7vT#Y|IQ!M~;Kxc@JM)V=H~1o1K0iF_-hArv&mE8s)r> zzqHWMZD%y^S@l^TNIVM=2Hg|cwyI*Jlh~3XF%MaT&Udu35(oNEl>3Ld*)InOd{l-D z^2&MDAY))~A8^m3nBCRI7o#F2s6v~-Wb;=2n9yPtJ^HUv@s(0FPuW>3Y)DdZ`xo*v z!-^Ge$jTGIv{D^ln?}3%<_t4>Q8o1GU;tfGpJV&?KR?TZ2o(#U2L~?t201dSEJnPa zs06!;Yoan#Kiq6RbbH*NuRRHW4gdfqyc*Ftt-l&FV2* zcroie;0bb<_F9~C=w7Ul4eM{tFxqu14|GFFLo{Z>^u9QJCxw$7>RLR=1VTKsK0ofO zTlgKkJ7F9;TndK4kARx~`|2w6FoaCijNXpP zniUIn41!Uok*HSj&rBTw-kK^?Q{tOmY(z7tT)0L>xN{|Kj}c^?b)gdm{pNeD1QB-C z^(cP+)%x1Ydk_o}wrZ0l2D-n4sh)Rr1#&LcNQv77T}0vsb$39lX1F7YxVbE}QcfZ| zjECN6b4^K9SoFuvG>C(!NiK&*J2B~}VdXANFF%T8iJzf!9h7%<9XTa+wbs%TH7C~* zu2laX0U}_gkBn6Xk>~k#6`2e6Jkl!|@NXTew0_h&#%)EV=?Fbi7-xE}B9 zqhK;1qA+9S?C#PlJR3Qhw`8U7{1CyHM?M(Dpt(oHVtTbxH0g>Jdm=KL+zrvl@2hus z#h*CSQAfNY@B!cg#sVTF+moPcrtL{69Qw?s6wwIbBKYa{tn>Xu7Z#p5*Ry_&a_LQE zt50#mcB@mU<|EZAKlqN4p3v{YVcO1n7s-1T?Oe36uA*;c4lOmj-PZhckT7VzfQs&_ zy{S3Wn$CI1m$=WgH8{i+>1oWh9zv8fu_D@=5cekhKJbddw>7zSo5yNqiF)mg!4S3b z_Ds2XAvN=ca59l2QH`C?k~?sgDt?ng9~OtJzs%TbowPLun+q+RM?p)39!}7Ip5vJG zndKtpdBM+w=eC53q|#15zrJJ%pL0o_#3AF$Rd*=4`uLOc0k?m7+vRsbv7ew5jM$~(dR$>X7)U08>HtLv5B-Fl`ca{b z<`DqWdfvqlu|&;q3vRQx4l{dmO;Pf*;c>^egrJhJJt5&Oy9vEN#UE65QI4@o1!5Xy zJEZ#cSS4J1Q`7`NJ+9@;QF0~!P8LLi%U3SV&Q<4z!UyreX9%v;JHunoA&#gLF&^(p zq!FQ`^askA|FB%moXL&|FOCmjGw-J9*=f(0ySf8`4ikJd8w8qzW$RQ|i3!N5Mo94`~%V&Xiq)D!+GI?W()RHQzVq zpraO~bgVm!dL7QP1yB+KJc-3uA2w60bJa4%+?E{WfTFAgzNg#Bu=>w}h8#JaW6M?k zVy1|Dsl+2HKu&?VLXz}7a>VX-KEnsUMq4A8Pq6VM^1|u zT3jhH%aHQ|Ly>EpZ_S0_WPkS2W-H#kw0U>=OVhl0l4WwDxI!~d2%6(8=~|zEw4M9T zTSHhLDZca-qPJG0tHmfu@W<^_(7YL)aHL>++3qeB^x(KaA}2bpb40At!wq%4s>(MQ zW33SF*TXt`vlW=%4(%MklsNbAxl@LMz_3VEE7Rd8u1eO9Q^4r94bJZ65xqgT>ciqY zdNcYFXh{dtkw#z+tr{K7A+q4U6I%QN*Xnv#O7`h2=qSoKacXScNtQk93}a1^C^JRX`m{x@ zSN}ofAH36Kn95IFjA3Ub~m8bgH6wnQY#F7@zYza(6GO^5MR6jS?)_$#n zR62weF{s1kpxC)uzRcx(w>R@?YZ~=*AE1LX`q+)G__L)YVctDU5h5v$*|vUKTF@M7 zwYIs0fX7Md;-O?^{*RJG)6*^;Bp~Gl*9>x+-xXXs$$W^_+<{S@uIwh0w;!gQVs)3oiMl*==epaB%O7A)IdbSG;okc1vei{IYf*#D;F_rUvNIHxCF6!}LGdWos5uP?xx zZ_C5R6AsBD?z!c|gCWoMr2Um3%puSrWt=HMOjs%4?f8uigcz-#3qaG7UskaG$OrR< zqK3WdQBLuv)`*)t(wDd_ztzHt zZB-G;IcD*WYPFVT2?1G03rOK>_uDp*rz}GeiinZ6LvYDL)yGf&-k8PdC}r~SO5$5$ zXl7)Ci@!UH?L2is@_AvOlm_@K)0^Jfb~WDLBW@u)5kS&F@Pw<|+kg;Jmm4Us&3_Lh zIdiSO>|r|~wpf`7faTKkXiR;P2>9+J{{Ueg)jrDNe)_ZZXG=+vSMw?3njltDPo8_M=F? z7-Tr{1o*mTpy%9#Mi?8twHQD{iWen7I?T|%c1=0ELg5x`vXFp0w8i>Z?nH~!m0a?s z-p^&{tg-9x%`ZsLxvTRCt&Jy}&IFAi z70C1tJ&d2oAhN|V;NLj4CJ?7(%XIcr8vFzBRrClblg+c{x&tbcg_)0|?FtO(PdRHM z@p9BLl8x)Ed%6tY%Whz5@8i3YCMZKx&Br_+Q|@i;kLWCeP%xM ziB;>xJ9T|S&31iehEy8CQpO$PAvP7U+4tF(6jbeH+>lrKlAxmG$48;=`c_>Qa-Yj# zg#~0U4`g@RT{nisv0&zS#F6VFLpzgu=mRJ@S-wcg7Kv|jI{0;*_E6K<5YxYsH}TS& zJB#7zsi0EMRDyOw&;#wcA18K#rA-m@-#GVIyTb=*odH<34&;mx)k=^Sy%e4N+u!Z| z44GtEGXf5cto+NC%9r*&PD*r)ZIOtw*mQ3+H`W@QbMTV|iT$0m6FG$t61k{q_Wg;R z(A@EEXgG_G_B`-*sX|tN6NbR`e8P+o*XS2$mo!k0h99kl%6%6O6tz)T;a@UwUnKGH`V0b`OvKwl~eQrRH*$fa@b0 zmz{5<2QDw#HN2^s!7e`3i14x*c8OU3yiP;ZC`j^I?q3pj_bD8sV?^qcqO6t1(Zjbj zvFzpQ|ra!&o# zxzvK(?IYDsI_8_VG|)YhKC&H-v_9M&ae1JR$N236 z(@UpyZrk-0D6cUvbI!kgC1SMv)r8m+ZCs8jw(#?Evxw>iF-Oil8Ar6*Zw~VC4+AZk zYH^#Mhm?^QbE;?&;@5@L>`2J_-`L)540WVhu0h!bc)vq*6k20bEr6&XLpa@7T?qelU1llhS$A9}v?uWA8pXUUPJ^L(($q z!{Ksp7ktWt5^_#sr*kk%^rUN7A_Ht*;$p$r7kjY84G3tm@-siZ=E4y)^@*_O6gwci z^vl%vJYRM1+Z8qgn%3?Zz7?Cf*R0qbYk>8b`xA`Ue-J>C*;OZ7Az5QYRb?K|R)Yba=9on;u}#IY0UpB`0BX&j^S5u zc%y{Sx30Q`ihK2A&j_{G2R2$qH1&&a^LpX~AF*+AB+B27(G$W$AyiiCM8j{QLw8M) z3QQlJmekA@Di2}e)>E#%V9HD)BKxUun+yyls;{FtE?;jDJNXRfjLv|74jVm-D+~yF;cS=O19PY=qi`fO_V=MTBF7lIk{KKVfD@ z{GIPq=k{dGNyOqC5r|$~>mSLN>Rr3UXzHq*33pWUn(tt$bU_nf9+Qed+BE9k6_Lw~pf8?@X3W35EkQB>DO7`f z&Y9EPx-zD}XLTN^knB9fQ;)ArT*YyEd?Hd-4mgQ^(qOb0~$Mo#GvyfE6&B#sju{bTy_}wRBAP7rZ&X{UPD$qtz&E znUp%m2tE%tqgx!^XWOA1ZA}ApNv;L7ncB$ZH>zBb4ULY^=Jy$G4W~ zcn7wf*xMEYwggoxynTZ)$w_+N3jqm}ox07M8-LSbPxjKtODy5vS=^@#s~s^(TS*?# z8b9Yeh%RDV0dcD>{Ps8Y^m@cd-t{xi3%>Bwy@e@=z-)zwRkH5M9T&l7he)UA(1jmR z4HOK2<$ASRM?^NXfqM3Y-zzo68m|bBEJDPQ>lbnpuFm20C6C(MWAZDjV;{=%ok=`1 z3}LQtg3h|&w6$4*DGCDj&EYc&suqSrYw8tJ1goa-KDOEgGcLvWmWq+aLv~@jGpq#; z&b6EIYFk|qRhWh!T4bUWrXhbb(z*-Mg1;*0i{_o`L_ zj+Wk|6Ww{j1uvmA;f;qq~3P2P@5RRn@$w|56Fg4B$_$NQ8}_fvbU=K zc(+1FgJ)O3>xO74<;3YMxE>>Ir-PJah1f!_i~~tis4A!Z1hB^WT@0evSnw3_7Z{I% zu_zpF`e}Oz&0F-XRx{C|0Vf)8e#jk^BVAHBc#SecXkH>I+*V~~Iw3O(Pb^U4EX&-? zhMc+~tuxYaUao3Hby7$mS{*1*O2+;DIix&HVnapmFV|Km$002!kC%Uo@0(O~KO!L` zmZwCRd$AHN;g!|JNx=E%a)@WNY7{%brzLLP(!^G0KG1(rUfwvF^|FE@>l8z2JOe?+o@n2iwJ z==l%+opnw5R~Wj4@WE@wG#C}_L*Jb;80x^iNF*oaaJiQ9c1QY~ea z;Sw}n^yH0ddqj9FB>M$1#yo-TKC638qc3pzIT<%GBJ|Z-vRHPS6p<(A)M@wk(c+!z zHKs}uajo$yn_27IVKd}DY~NyB73A_`PJ5;)Uv;vo>B zU7LVrV3o)zW z^L=-!Qq>mmq25bXD;nSh8eazvJa(SU(?M&oKC<|$dJeJsdk^1Cp^6D1ClDDulyp@i zSyAyGr$2DnPkGrO;^=_HHp&Gdrx_CZkavAB?%smx1;5zDJ}ZJSJJO*=@Q4;q-V!BC zD;&e+JYy`;t|g%V8rQv~!iS-{qxI=vm)6hVpq@dxi~dWK+@fExq+}9)g0nRySVCWp&?chw53~{%bohXag2iD2 z+mGv!iDfb#5+S5QMmM>BSNVkKN8Kb&T6AK&P#>~6iT*(_Kgbx86)qz8+C?AW7Zcv6 z9jY^|DC)W_ck3v`M#3s~l1N6W_#A-GfWa8^6;Y`a1+xWT`6ID9kBJ+Uda*aW9a%hN z4F7$MOw}YpT<|%)=D)K5-1PPn+gTJHjDMr!Er+4NGa;3Qg_Wm>Ri`Z4y}4`rPV zIfgyP*@b}<#`>{e#gr`|9z`=2OXGJrN!@8_fa%lN zu_fyyfuxPAuz0Y?Znm58r!;Ae06Mz<=WcNmQMbJrC%cc_nB62(B^nBOKhAbx8BF_T z1o3^vKP25gM3cTrUmSXBxw2QZ7yu%?-DTBcEBe(lk zq#z1ANFuEzC}*h6s~@($1*^cT?PQMZDr+%RssM&Zazjq(L2nsj zSzzBTG4LWO#z{;^I&KCN4|6EPmM$TUUVyni!RB6{5P^z-+z_RAfO?as@bs=hGSm-JYECY2nAcjpt zoz-!agq~vPQu)K6>Gq(Id&{t> z-*@d-kw%g3?nY4QMx(FIojEY2@EwGcyDv8*wkwHGFlB05HklO_--ydxH3z|kZ%HvVol_{2(I z_o`Dk;7iGu544X|%6KzLk+ldKS9#FoGhaP&mk9t$BcT^B-8_i3#c7^780T%a5ao~9 z<;q^Y6o*uz<4QeAU_)+HO8s>tl3VcUscJDgB8u&rPC#m&aH@~wn)tvUuvz0O7D-Pq zK(r3z_;~um#s6&ey-`Ngz8p+_+ReB33RCDnY-gATp9DxHn40{LvX;+Yl`h~;es2sp zd@w+Ph=dD!f~VH@47oEtshbiNCcpK1QaNSk$XhC+G<10iAQq=tiWd0U)K&HKyCk)l zIxl*e5A~bhp#NyRg75~=7wJXIQ>qTD=IA8`Ni1Icc&a7Paz!*>Gt(1IsVXMfHY&ns zgkB#r1KDDYTrnztpv0!qDO^H2Qj?0V58K2jS1hhC#=AZl(`tuZCHua{FUGQBYRM8L z+mtH%bIzX7o_Lt2rybm4*aI6od){l*x=p8a`Iw=t?(vAfU#$L;bHioVLoL}MDXc3@ zoj9Afk791a0A$#l@E zV}?|m|Hroorx7#Uf(y>c^LA{qa-wWW{p7u*!>OUSYwIvAPOhPJ+NIc z@Y>)*rs*g1TG1x&5hhlx<&HOv%{xtZSwk<|n(ne&e0O-bz#0@?55orj+qEx|C3)jC z9+o{~#9TXG{KdJSUtGHf)Czf1T2`J~Az2MbOhW=@J07ReJ}*qNp;1KFEnl6YtMDym zizn`&V~$T6*VdIQ4RBTaexoi+`4BnzhKWYOl~|wVrv2!9SOB5n52mc=*bGce8q~y< zYDQ8ISq)hvFQ`ol5gsvX`o9Vmf0RI!ICFE6U*HCMU6#i#LlJlj#$4ejgk`FExXWg? zI?c|Ozf#g4VqA>h>!U66B(BGa3SpFGzw(b8&LuY&yhU)@dy;@Mmoj7^Q9hR?%kQv` ze;YEh+T_wE|GbGa?PtzNEmKWuEv>2qbI$H)^FLdLCCCVkwO!u|YwlpYTvoxvww7ca z{`cMl>=&K;!V!8sdTFRojo}EqZelaxy{D=glZrhT(PqpS!&3fb`CiKgB zi&|kdL{U|mH2!8LO97SKlV=Uy6R4Mbi!D zm90X3&dzebJ}d(?Qn}eq=B&UbPv4qs7%iVBaR3aeZkROYrw~68C|TISh+E)NQLa|; z{k6`|0v&w+0dXqq4psg-%i;*JNpY%}l`jHcphVmXGgbl39y%F*^8A90r_anAdi_#LNj*;yB*iRJAG z7*=(etjKdfzl=M8cA9TgIZSHmfhIa(7t4~PtMANhBy1@ zc3-mQ4a*MNg;{(J!HS#F%MGCyA0BDq|2lk6$yMBFue}1H2!_%ISn_fy=-nXpFxm)r1hMA?0f|d@{XXl0WxLJhQ z9bE0`r){mMf#F}5wbCw!-k|eXiMR|V(2@4#S_Z~r2TDc4GGA>jM6UQBa&%A|xS;+o zVHGbHz~$4jV)CzJl|FX0i_?Faj~FUALcq&Z_UAO|dB5h>mB;K>1A6Jxpi6@CmpEVL z>cWTox=bZK))5=nhR0t_Z>WY=$b7Qr@XaU{ZSIbEgp`C!KyO6;n=VOWqJlH)!+aYJ z=`_nX&r?SYonYDs=^%|R?e_&Z%WJgLV6fK7w4`$IJlmhI-E*qxi-}Mm&I=A#*%H^5 z9T5rufhsf}{JrVS*3#sM} ztyum{arvFk`-G(-(o;=NThC<8TcIKy9q7KB?crToKe=5u4J5Gl5cC-bQhr>%S$>p* zw?gVOF3U*&b?>LT@)LGuNH>8?Rnm(Zw=qV(O7!mnVB&kWGCYH}T=gaLC6p5kC~h<^ z%MGvyyDlA|#Ct*`?9kwtl{F!b0o19i;tM(oGxWXtysKAGg|aP3&Ia3j9wJH*`~LIJ z=R?o4!qy!(js%11$joV5-ENSnntVqbFUf14D8Qh9KLP1E;0Yj!dvi?(2DFE$UD#x- za+(9=Fv=8BEw%)8!hJ>;=q(s=>@m2^Zw~t?5GYQSMTilDaKZE_l;RC7;*@JZpkA;S zqPHe(<5!~qnRZdevR5Vu>o<>TC|&5+#(!)6J+#jCq;0oyU9-drBP)D5fPa%D2&fCW zOcKn2G+0Q(V5xbxT6>zK(Os6lEWj0A8c$rCsDQR>31XM)EoD_cn9truL$5<+4_#~{ zWnd%FezN!X?Ist!>RN=D2EfN$&VuqjZSmZb5zf;hqJ56#XqZ}|(C%-{5i*2Z3P+qXetWGXAVh6Z^T(q(A#8P4G|L?Zc^3+ zZYq8K%+xpL5^{&^k~hnr2HZy+PstS~8Qp|apb1SqDLk83UVB$x=eyubwnQ0;T3`at zd*=>K#3LMFK0Zs<&HpTy0mFVE5h|}rD?ZEVlDOAc|?LG4H2u&f96ms zLrto(W@O-BxkfDiz{G(bcR5SF+PS~{(~Ei2SYaFZwBi%EwdB)O>*C=N0c7DU-RjIQ zZm%GrL3#yd@Qzpr21%IL2MjJorme6E{{s2p3r%70R=g*L5huFZ>EpT>yh?Yb$L$s= zmsROAl?@Q&^q$TSvM8ry9P^STSwO&Mnqij8zpM_udTGcjq6xMLNa+uG(?A;r(nE&bn{21(zo|uPRg8 zgd98Wn0}9wNsGlFP zfb;vAKh9VFbp)Dvs8CbjQX_vc9D*e6dSelzP1i*yOE0nn2hmw^Uou=zS=`x6nzvds zU11h=_8W|B?3VzXs3vtQ!WW4mH3G8HIYsgr zuon4U0!4jQ&?+l=4D!x5X?mZY^gl<~LFjmBra+_0(DcDcl}IP+Cot7%0(;_L?RS2; z2y6bkxj!yWB?tL4Ul^HbiT8Hw5_??$9iO+NlUH~OzwCn&U0XbjYWkb4+Xp4!q;9_D zp3oq=8Zg(q$tQA)m;1CmO-NX1t$*D4wL?K3%ouAn~|B0fuln^>`a+(03?`34q7GiWy027SOh>Ej6r^Mx95Ut*;5D zDnVtA{r+U3EZ}#ao7LG5JnFicS!vt)hyH-@;X&I%6uUkkL;QEb ziC5V}B!<6JPQm67%QpP&j#2(S%NJlen{FPm()c!ga#2PpyEVTo>zRQ-wsYK1`$e`o zHesL$Mnbq~Cl~c?{q`ckW&z;~6T;3T1Z{4Us!$_;5<+Mm!|I2fAON@2MnX=m0i(^T zx>Wiv?>z7xz1&`^EB|L<6F*igaiu+P(v1~1{lG1(vBfc%ci&?;H$(Q=dyPcHCs2NS~$FcYu+ z%$l&WGT#+zAJ0kUsdjto6+_Ecq?Bc)MYx*3L>R`GnA0WyiPNrWBl(P1JWJJ?cC7y7 zxF%L@2RQZBaS8Up^pP&NwNC+EmT?1-)y+d$aJTju^DPz0cPb&Bb|sC~m6ol;q>{3z z4?o4IM7i3-;|uT)Z>|ufU%XZnA<{wX5X#UHX=IJ0tq5xfXqf>R90o&&s;%Q_6cJ_> z)G-xoM3i%_zL`I`C2jI_Zm_#=;OX(7mkMKHJozE9wkP(i@sQf^14OGH8jNR5CWa~S zi^>|ZnG-ut`ON6U3JY2|4w(?^iDb~eU{9Sy7v-ay$q)F4nU|-`=42;>vhyQH!f7wt ze(Isc)J~PAI)TN357rhPI{XjautvBb3h|oBD{QBS@&jsgJ4at4D`Ywh@FMcO*RDHaS+0&i z*xgBzgH)Cn9jtWJHzT&61wLS0eDn~eqv)RqGIqN43M>paO}U#X#9yvzOlj-mEnh2s zM&giljemIMz~TMO5pa2OT&@Ghg_WpuGgY;nUBvV~-5&dNBr>a)Ic6r&v|+I7FONRG z(SD%A+Es#%BKm>ryw}GODsqXosa-E!NRz#+hKdRGMYN^>`$o3PnyRpYAz5Zqt+qq( zx|T$&@ZFwFz#WVBTyYy3O>~=`i-rA?98^(de?|In0z0eGw|9cU7o_BZt|z5$-sNm1 zJA}CrBj^~gd2Gaf3hq)}6Yv#{2AIA}|40+fC-^jFCl+-iYoboz9?(yIo^g0qocZgJ zZ8<>)CT~zfF_M2?0Ud;kzI7DPMR24cLpofB<=4MO$E%D9`QQCysJ5Cy*glyLEVP?= zjg(S~_P=1ifcnXSiYEzCvCyaageO5ARyp@5g&dfNRzh$6fNU)7$<^<1V_VX?-)9qB z^CQhdn=&wDDIX^H=!)GDk}w5$PnRBW+O`1me}~h_t7QG z^DrbHxgp?AJX4*Qy!!X;up8DY!_f;)#b+|t5r6V}1xwmfqw9EwH3q|=<6{Lo=s2@B zijwd_a&6@`H&%d_8`g8?#&v)6*UO;+LMD`Ydx?r$bx5!Ys8P&;^M-14+AB0t~@mkLsGk)}HWQ-HSA6&`ucv}0i%kY#C2)fRFf{{hGvZ;%R zw6K;(b7``lyZK#aXL-YNSs9mk2G7l=F4WG;dXK%`e|cboY2Gg+p~mCQAG||*R)Aqs zhbHt!uZS&!oj0~gLx309Al%e`Ez_xdBar18_S5obWe}_7|D=n+Yt#~Ce-kuJtMJk% zWi(m2>*8aSE>8n0ycTl!I+}ffjkcqxX#-O(Bv|6$oqm}~;eMH84M!~cMFd`X=$bII z%8P9uHENDgN0Hznzi(=2m;vtsnbCFVa@5A$XkT3&;gDe=U_DZzQfzkJ{q5Uh=d%DI z490gRF~-sh<4-M`@dI)Lv(a2}9&75Lr>w%>gw}2NFM{F5Ia5B9C)*RYqI6*{=@)I6 z`5fONe$Ky(KZne2qu_7ku~?IJ%H20U>d}AnFyYZtAr+Nj+Q!5dARgQGJu#FLf(CeJi{_jh@f6~G5PvEH z!Xmx1KRCW_)+`<88|TeS8RiE+eUH*B*1!4;{3t>0Bd_gy-C2T`C^rz4t1M(0Ro-X`|7j!up$$-43N7I9J zJe;ybkbb}!y>f7^B+?+kRId8l7kE*vCgnX0ylrovw{6IxkL&E>67-EiSc8xO-nlW0 z3{wn0TsdK?#ou^*S)UXxS<6vN+tIVFE|hkA8dho(dY4WYyzWly$;%o zeN;&=s-xJgm}_U&5eR zf&cssI*`uY=8hITSk%7;YTN`a)1DdYKvE6^fDB4fPm%B|U_LYSIo0q0KrS2AIFmBA z@S334;_hHYBgl5U{0a4H0@kwkvEr~*(EFjMDY3Rc^R57<0gnyl1Gnbu#D91h_CVBd z!Pk5Z%gsj=!dad{g&5W^e&1tj5N~#SLm&ycU>%`)sLEnrH>p{raSCHQU|SvNW7IW$ z-c2^dOL0*krh!8iH_Wt6edPVgpkwB1JXP`&AI_Lpr~8G~z%{}=j8z(Q6eYa$J35CU zmF4?L4T+)hGpLtuqqs2bLQC^N-gExf5boNNp`Z6R-1F`#cl0f?3N7kz`^vqVFZ=` z1zKd@IT*)|Z2HljE4E5E^X_tZJ^4!iB>iPlbnI{Xx*g^Y)fesQ;(u5E{|G(*im_z= zAQHYIGsk9S%gk+km-L`K+q)y@7Hkk=QFQ($o#xSgC5e?A1>ZDq(j;8@johvHrbCii zCm9@z`bdXO%C}!0`EH_7Dc(Xf))C{2v?|{2Sv3BDXOk&N9UZ~cq;QnLC}`*MnB*Nu zXE_W821H*Yco7DXPD^y*GFOOQ|SDLFzl*oD&F@w@(hExo9(Vstcqzr75-nyeULV)|xN)shq~S&K8~ z&X~Dx4&mQ9hsH9n9}5m{H4p^^`ocZULu^Nm#WHXugSf0W29l;&eEZ_4>-U=@QY2pg z*Nfj{o2?pF3}{e#fEIZV{E|0-6(_ou6<7I+RXD!3!`iaEIhch9Hma0W8W$3E`s5yb z&IYqMvOGm%l&pfUKb8rl4;%_hq39tTK*|3{`((4 zA0x_9^EFDA$u5Hl-od-@?hi;`?RSS7@RQ5nyq{A8at5WDPml4;v|&B6$s3n)vDw02 z0|1RFIZf*@z1eo~BsN}9>93^Lys+^TMPs&!Mu7U1^iD|iJy>Y2LFb-+8>}y?8*`tb z!Cp2Dnd-W0`Xw@L;ezOUB>(Y6x5&m7EAx1{q#y+_G~*Ey64}S@W0N6;?ypTDyV;CQ za>|;+%8}hf9SHV>9+kyK$SuefzEA(PKhQ(%>BF|=puVv{;6NdN1ur4eYlrsPa+Xa{ zF-FWj?lkRAziOxFJxFi<1eWRUr zrm{O!NR!DZI7*Fcm_040KJL~#!e`(^RQn_`D^FfP9sxc3Zj=kJ949L4G^NAm5Gg2U zfj^%yKF-6C{D2nHGJl=Z8nALBrMz|X+9UjBlYKCGgCiZI`cQ01JGE}KG^Nj*x1e9- zm5PsrWg|N<_5Q7|#o-NsJOpB}!0`fB$y*O8!1-#BxejnhKxB=K9Lmq8>($KD30$W{FH0f-K4KUiSG!4`P zJ~?BtkeA)su-+;e8a{oIp1r%<>%juq1c?;aJ+j>nFH|?rsGVRLf)V2Ben+w)t^Ej) zTZP+e!aPKTPFg)eqT_uRNo1w+-y?#P9`}#iR#g1wA|aMg-n&9)50_81zm^nr|12q5 zP4(3pF@q{ZT%_Aq7KncN+ky8Lo28?q?l#MUlb;$6v%WG;+89`=&Dx4 zkPGUbEA6GfbdkS_91*==(7XrcfHe>EYMN7!df4%8BL=zjV_km#>~D5tdA*&bVKC)0 zGEyVwe5D&nzz!_Ed6GVPh_)zZxa}x zGHs>(>o`~ivon=5G4}xjw7E${Sru3^$}}Ttxe_EtQtU0hRIWCn$n~G+O{*cKHaz;C zt8?=Ir91zE3XdiaCxP!XQE8AxQ7h;E4-Zu(E6A5xvKLYU*$dJ<@0Zifss9hzJn*_h zf`5nyoMpcZgizK*GkwlxtDfQL6B}MPDvQ5iCdWviV0-Xzfgjs_v72@*Net;jk?cQX z#sN6UaU(>j3_ps}%=_!=Gh_u4mK^W1_!n2O?KS}-L8=bHMC0Gc(zSMte>VS)bkMJ^UWu@g5ZetJVgoKF3`LCnb zfexXLl@EO&HyrO_U^}26o2(_24|L=KWMc9H6;@ruzX&)LRrtmoLN%br7pCzo3Etro zL)XdqU4C?oJO6U4p1r&+P@)AT^#`5*^!ruJ;n(KTii!{vEGctCR@z4mH1JeR?BM2G zaEYsI29Ist=hFt7xQO;{LPN6V0vcoBoWQ19576lan-dBDp73JDe|?Ss@Hw{AUpkIl zq-@Nae_yYq#EdVZ9El&cpk#L88O^n!Z!i0gWdk=N@XupvToG59JJM1&-<_-D24)iS zj#w#yRO@hJ8GwBC!x=keODd{}1A*-zUYY@3eznakH{7(g%(?^^xutw&`ti^1c`hnm zFnD;RI+8N@zzf={~Bulhxhp3zw?g= zl*D{QR8SHBq15bK#LZ5)WNnLkPVk2_x|cIi5pEvOTV~j)@gIoo{}cB0*F}2;ejaUv z3^o+}#GmGa8`)?heeoKG{W?xw^7;G8a-WweXzED8%|$Fr{|9aO-xBYCKi0qFc<{;l zBEk3dpq5j@qUU2^*Rj%nBh1V?9h0&=GNa6s+M)*U8dLNArYFp^_z(a4e>wI6y5xKC z=7)(PTdD$NSD)&~Mi{uefAYE=_k(&B*7}b^_kEd3*>qTUW0wuBrnLg{ALn}h-#_9% zzl7-_m`KSz7-8fC>TY(!Aw#NLMqvgQvy#l`#VIee4fN(t=SQ%}T;v-g{x{_JzkTEX z_cQx5fx`6_HuIVrN5`0)NapP~A%2$+KQZ17c)-p-!d>lqpnHGUG5^0VCwADHzUoK1 zy+&n7t=)v&Qo4vV@-P|IlfWj&yP0o=Zxdcuj~RQ(u>(UJrXA(~%j5H3pE~yiqScFz zhaFgD2CV+!X{z%#-QB}7fuT1tRhRl{ssXS<=I3U8nZ1RUyWNgwzxLFiiWrvj7H-8F z4KagxdjDU31$Y!5_>;&($_!dw6AY!XFDs=n3{Foa#AG-py4vc=d=sd{2r%DGpX9cm zV$-QFim|D6Ahp!4aK{07EZsDGGG(9Ruinq@EiKjUzh<6o*H?EGk;?Aex?bvFliB;# zi-km`a@If%EgkG8%d*LSek=S_V|p9_;ba&qne_MO_b67#`X*5{qDRf1K9!_6$CLP# z$CBaLuH}EpePFAxuvU|yAwvZx}WRWdtLCYzL+4k-Ja>ehgP=i08mcJzw+Tx zCked#0&z*~y2TLc3%6Gf+H(F3K@O1ySUJnsfEGtbkPf86LByCQVlh6BW~@ zT+P*|7ia;qA_*AH%#iYd0=@vOX}mmtXC?Ea^n1%K$j0KpQoUYRjOd*^fX3>{{_+7p zsUtW-1dy8K!KPGkt^M*_fb^$_?`>N(fs;F+^<-S$@u#Y^Y>tSJ+Qbzj+!F-E>v%vT zdIbilw4f0j2Z}b`vt>U&OW+}#^X9ztAp$4#rrH|}MCuvfT#yq`;gS55EB(jL?*Qno zl;P%~{(Nv2t$Ydx`rpxghaBN${0iz+c``As^kBa7<7D%36_TI2w4#Vl%-D^6R)nQ`aD05Oy`oU|LE^1gX%G5FDw}438biuzpllv+2z}UjF z^|WZ*s)F2?2eEhM=~n-Jb`F8K_Zo|~s>N^T9M}9w z2UmslWI;^M`&_!X52n1D+1^L8dKEx(qZ2G%S8DdMd=wCPsQ|ecxDwCXjW^gYTj{HR zog2aT2lrDMNO0`Ey!InY0`FlIs|+?CvpXrI|F@HGLT z*;mIG=_E*7#1@&<4GWHupag8iM%XBNb=hA1%Atx$FM<BYl*Ip((>m;DuJMaIwkPH+ zoi2CUnKTO&s9t;*ZQflXrX*UBcAAxqnuM8Uv;l+SVh?>Qg_y{hqGiEYl1$TmLp#lFPR`y9loMF3aO#LJ0_j zJ#5YI{(9N?BiwR2%XO~5oGeYm$J0GigButQC|u~dl@NfFrG&umuj|f)APb+Pf_MWr z|2ReSPnU+IP7=aiw{pe`-Di`z5A}~bwZy0mj^e5Jdb7le+$R5VzOY|!C+FqvT{GYP zt-*bC%ex*O%OF3JZD2mSGUoW3zYv#aeOF8bh$jr z1BBCEk1(YM91g#2(Lx#PmK-&W-M0j=OdPVL<&eqM#cccze|Z*7V=CZU^7Sz(4_k$x z;zIu_G7K={FgiGcRxWx)@2COSyy*wf^3C@F(14v*Uz3Ke(9<$G|6g5&jHZaJ0&hwv z9%?c-U$WVC`QGjrYWWn_BL9;r0;!KH!e7Ujg10T~on@A+?-TJy=Wq7gj`CXX4$b&s z3(JE=UrA@vIkV2Qgl<+6s%veP`R@xT)Hst@QO#EW*l+lwWPb$P_RL<+Z>jIS+=DWc zJJ4fZ|Hv3zG<`j|t`S(!8=@ySBJN*1XKyhJx&GltfY@s)`K~{|Vx(Hz@GxVz+N|n9Yxq=M-UN$F`&*)MUE$|;T|r*al&BY zbDK3MB>nycB(dDSH|~NwDYB`KxeT=ie_eRytQ8Ay7S;e*mfKQQ70-v?v3@uCz_|2Y z_LVJA)^(;H_}!oT1z5i{Ji;dH>Z0yJzT^Qh=@P8cm;u1`QDo(HTo(Mr*!6AW9yFVJ z^lwt$?g@8!+R+Kz*@asg2q9pRy_B6QyP|gAu0jc9t!QFT{pPNreTpx?iTrfg)%xJv zPZ*m`$1;;h3S{6$lhsHarVWck3yxdQJ5HJ+1BgvSkp1ok{Q{~c2oSA@F1@RJ-v9hD zeswV7y^aJturP|0bMS|Bs)ZU|FQhy8DenyrhGqx@!tW`t*e*;0R!yhYS?gbC?%xkY zt&EEL+1(-m-uMOGkCdB+i<7au`FF{)uR2*tsjdKcD-I4rTU{6nD_2(bf6GFt`1V_m z1K?!RJ7V4n^zgqm-t-7B?T~usGzK>c87UL66SM{{{e;wYd2|Wgf5s6W4SkOt&~nNc zC``#_Q{36&SZ#kfX|ex}VWjn?=2ez+tIcv@^ibMC+rp8`9y;a(2BQ4f7m(U%CUY1t zfW&;5zs_osg=-;}-B`%5 z8I1N*z^dx;wUq%?AFgLZGUnHv`T1Y1sy>zzt=U6wE9Kg<>c%OqrKVr2RVcBiUEZ<~ zKR0iR%D4)2v8Zzx^c;7ZXBf}3TzrLE-so2nmDSAAzTuPN{IN*1-u}gRmq$f|eQ5H# zt9m25Y>7Zlvr`{!WIR(k-Np?L0k#M{1UG@4aklN=&vm%o6(hj=5Fo7ykh!u2TuNGj z7u3`fF~Fscru_4^dZ)Za?whCRBJC}9!xM02>koZ|b)yi*bp_71CwlR-YO;hps_Oza zS018}8TrgAXY!3Te!6JaRRQVT^u_Yh!quD|m~ksD6;LAL`GCjBd?>ZTyfY}g`ml#| z5NHn>p%>kpW?%p$H^88OzEc6M-+Bu6Sgrdn0p)iP?yzh-SNjppRBb+ln~AN1gySP< zbuJg(!YDcaGECZ&c37tx;qZWDaQWWw>3t?(PbZDsFCVYl*{r_a^O6D8vd!HuXwICm zB9*Y(5g=Xsn)PR zgmlyJVYhG;*=3sT7Qkz0H&BDaHENQ|W*|AwSaQn^A z@S*O`XH>DqVnw+EPhxJn1?^+rx9w#h9X=Cjab1+~Jo$w!n635)K0`8ytJFnTFt|3B zRaj;qkgRz7ZD8fcOW;%`IdQ)95zLs=+vw@|=agufO>ffbHAnFt1^ZzHP$_ESPZ^9@*c&-!Yul18og2FysYWC@!~?%HMrG zvURRVIHIu5Tjwaz?uk$`=kowRU7TmOz^06?M|$o)l2fF?Cs#h@hl1D;xvfHSz3pK6 zf^*8+Y?`oFy^l1U1Md15z5uoBM}JaE{XL)FUb!|alIJa}}D;niSqu5?(jdv?Ek zf9>ZuYrYfU`IP7&)yJ=Td>f=tDasqv)JXBg5iwPMHG<8u^_*2(K^IIkMa-pomm8fgV-B;$(2Jq0tfkg1N7ZxO zgpPKr)hsn}ju*bGb9cOo)R*z&X2JHF8+4O3LcN0%%O$LB8%_6W$GQ2;wK*ESXIpoX z#nU-=&c%(Xt`6co9(zPB=$BJW_+CHYwkHu{8q_Mss4NoNNpSJptb#Fbt*qktr z>>T;6yd`3XDl2rnq(nfhZHK~V<2|m7x2;LFDA|}e=4^ga>q&=xll4k-d)(9euNjZ; z-e*5{H_eh~6?HO@^gh`g(^V372gNSC{LRAAPd9F}F7FH^E$zhy=2BFiNR##2o!-71 z`cj+Bn9t=)N=&>M;{5??<7F#xyh}v!QXhMyPpdxdUlgl<-e3}o6}e*S(7-_KsWhe> z*!OiSa`<#6mbL?6HX=8;rA!h{PQdz1n;!CF707{^03N%Y1@Xp-Nt~l`#oOzBPgI&C zkj;?L?`rtsv?v>?n!LT)G32H5y?7>0GrFFn_0|~g{CFU-!(UxlSi`REGFe+n#9s>T zh(7D>v^AX2=JT{};4L&4?mP{dkEa8bjnc=x;rtJRU?)prqg-&$UiECr_nO=h49u@W zciXH{5;vO*uBGk5@99t|7kXKk^Ubj*O#0$@*_YyQLdhH}#mX-`&29rQ zIN~zwSavkK>)7p7MQ)vg&~*Kr**s+9=E2INtnYEg@`cpAk4!EoI_LLr$<8V7FDz1# zH*Wa1GHv}tkNWaoKlGDuANJeZ95FQ7GYtd99olQ^Mj8c}D*)iLug5w~OQZt=c|Ryh zIeO&gQ9KOTO0b{9fqamHpDX9_}!k3}JW4 zblT{##qembI8+L6sTT~1ECD>ZzB})$Z41Ql$DM(fC7f^`d~8&we#xu*Z%L|2c~_~3`IF@$$u<9xNj-`Q}SxiM!zI^Lj>Abp4=(MY?U2)N~{M>$M73o5O##6ZE|oka38hxt*G=gJ_b^t@|( zDn>r$98n>AghiX}GYJ35Xj`~b&aYEK&bKyE?MpA0pM1Az;kGC;G%~B3eP^(K@cn5l zy;BX_7O3}lkYh`2K1x$Aa_*mkuF+YgPgL-?OU(Ca>RD=6dM@qfflrs7Mw5Wd%5JkM zm^d$0qv@Sp%c#j%gT3Lgw@rGWd>iFn(^kd?aKf(b2OJq@u;-iZV%ZGZ>BWXn|I0l) z#_PRn`RhN|Bc;!u5kG$yMJ$@EP0J^93rNR#^4d)jL%9h4*gU4w_^a1^!B4FMLya-oWg)&gayps{vvi#D@QcG_pVs-# znIWo3;jto{w(QJ!3ERGw^NCg0Zh4m!MJU9!N*IL9A5i2gUKqBz#hlx)Ya;O;S5LJ7DM@o(ATopxKHC= zXV~cHYeDoOJMQujHr?je1nhbZo`tr@S8Dgt2-`uoyzxG+HXM(r`QGYFL;hSxZgQX0 zASJW?uD3MZo*QE#*({6WWZ*#Sj~8yRXGqDaEKUqX~!gb)~;Lg>BH;8*B;)D&DP{TCu8-)4w_cw!-?G zAQN)0H1IHbdUNq~ZAIBnR~piVP0Zwe4~S1B^wUsVAv`zxl@4hdZs%X42d3|VgWXwn zY9`G}9|~?nIS9nd%MV1wOkU>NMnbs8a`nYe0VyTOHO5U@jxmy$V51DgWZLQ*Dmu%Y`}x4 zcsyC(v@svNjRh!pj#1cg4Soy!Tp8&7j0*{mjD~-9@DgCXEh za&H^%bp46+Ttc|W2pz7_Y=Z2d(Tss4<`- zwWUA4$=~Pl9Zx}(iGkjk(0|=!VvRfuJnZbY$=m(KTKi;#tALC$5p}?;M`sTSZUFj( z>$D=Ehd+aQ0|*x!w#?Ou2sO;{ObbP7PI#y9RUkF-v`Q*_hu+-7Cx2!qMD#ew_X!h~ z_ZHj^^`L1|U99zh43;0d(0D7cv|HGwzE-|N zd29+fSTZF;B3$BcO(B-6`t!I;qJ7Fe?AcB|zg<`Z21p*n1&f4_$B5wIFV$e7p~|T*`ITpn)!w|L z7`_Q_qhJ&w{F&vk4xBaU;C-i2WuH%ej?frqW09_!E}^Ve+0J7$SD?LNJubJ1`4#2W zcm|qM*=`$XjUO?M%Tbr~zY}hn7p#)f$-aLS&~yJ=3o7>l?0b;9Wi~xorWmq3Lfc1Z z6Kto~bra=;A(l2LT;WN8i zPk?dThRnnJ*Mls2{G=OUcggmz&)LJo5k_!sbBP>Agh!}u1NbEucq3U8&hc&o>Wwm2 zNMFi6du@CQaTZa!Dw|)P{Pfdz`yf}ShZj|R&R^+%0YbuxO=# z!DzGzhSR9k-wpBB_6VRU@M9GD(Q`h@qPr$v&}$~?9)FinTUDO4#-P>uIKP*F6E()e zg>--xH)L?!)4sonG>6KC;_8(^rgxmpXfeC)O8SDd%_60*OOepwlue7@_kG?JyDZYJ zg#={?B(ERYWxayYmPOEJiB5sn{OqaYkHF=DsQqc5R{fa~ZL!^P8lipHZ0>1~KK-Yb zcACG)>Mg8i&GM0}jrvJE&X19fx_Ni-W(@>wsHU^Wy5A0hprD5x(JGNhRlejs^ITSv zIVN|(B5w~*-dp9<7pezag(+{X7>`aQ8Jrc=_Pnhp+ClZhajmvNYD;`f@{jiw5h!Tx3ij; z)b9H2eQn#0==~2V5l5@{)Fm z@A- zz48dz+^QB6%SpSqN~)1z`mh_DY>&w2$vFe2dKgL>;iA=(D-oYZyi%d$uBktIq&Zkw zG7)0hR`jIDc)51`mSoYXqeg0uXrd2qeQX3d#8@Q6RPnQbv{qC z&zN#uXLq?V8Y<+RP`;${1-mAlA_|ve_w&nl1of1+3tG<-isBq%oGs&D18wTi)qWg6 zVX-`ZK>ii?HpPNcoEV_Gcgk2U6-VDM8KA<1`NZt_R=;&0|CYgkIIMq?2Yxt9rB5#azSmca@gOX)APB1ZcgHFv|sl$lDYeGz;-BN(W+i70#|k%yv?`G`h?KKI&@Ao zUa98lI4X{!qZKVw(~7I=cvNnd-R=ETYnW?#C5sZ@5b$>fqp4dNxzfe_Bl5C5IAF+~ zLPTpI|0$?|gamB}>8n}&n@`-M}Ev@0U{rySA%nB1FDlk zgClkd^n|vDZj-9*z}NBE<=%a2S9w9h(X}t9UlR`^=193foiMF`?>!gqVZkA`V*3+jxYDJiiu0 zzn6;g15-sBQJ#!+>Vv@ z+P{1%q55Q8g%+(=pPt#Cy-U2vYXQu~OqlW8KqvtWvu(Y|SeV27Z;5D;X|kSMn< z#NCp8IG*T*(eoZ|neG8|?YhsZo6T+A9@2dm9FyjUC8edCp}X^U^X#+F`JJ<${XORo@ii!mnR{L9T5DbL{`?A;{GM=vMViFN+U$9w zo47;|ff$qKW?#!uJVM&7(AR|^Uz!AGoYI0f&4K|OsJU6&`9(Wq0M^D?-Q3Wq}QfYa%y zAsQNjJNspgNYlu8zgN0+J#D(#NBpJ)9lAq8;V`JVcbwin-9IsM>mp4nnd>Vl>4@AX8|l$255;b%dNDunogX{6;u^+ zNY_i$`>L1pl6BqV+hbGGUTeZu_NaYA>6}Q34{<2ck^;o^wq%k>nRp;m6)%e@<2d}e z>_2kpz;TD2E$tOu@5TpqtFut3i>4v@{N3Yg*{X;rfj^el%nJfLUWH32Df z!|1FYzA6q6M;$N5`(j_x>OIO-LS{*8RJy;P_`Vo>Od^JU^L?0q@aD6UO%w1uzUN!H z9ys`-wb|;s90Io#Vzt)4`X~D!6{`R|ecsT~cCc%76nc7R5;@B=!fZ$=`A8fYms2ib za{3wHpifI_eVvEgdxeh^Zh#NwlZyGk{?g?wF1s6}r8QoyF@J8*2%n|!)((5TC zH(gr5_|f8T1#Ou}y4E&LX#`(4n8FXfu=w$DJUuotgIdb?b+W1Of#a~G@6|LH>f_e7 z(Vt3V|1=^bx@?G}5nhM+@GX);+|p6Al=faW>YXb)>UoCK116_G<^X%Y+v5kL#IO}U zQ(Hjl=FwPq72)(7X{xNH(fo(t!l4|~L1C*)@epM6+rCp}=j;jiC;&u|l_g-oTOE7{ezQdY!SJpJ_?Fh0@r7LHCj z*}?ua_649`&oTbmR^Ct+=M+pz^kX>L6gjAqUgJleAX543kSmnf8VTJNr`}awSjOW# zvAN7J-(zTOk}AN@gU`i7AAott;7U{mTfNl&oXmBR(GqLYut7|?#<+ZGt%;O{WST~Z zH#eexZDpI%a|Lx;-T5@{MJIDJJE=36}2uKFp|YKXBW<1|DRWrR3|h!1v0o*(1n zqb++I+&(vOt0dc8?mMs57V1t!6Y}%}AVK8uFY07`u$ME0S)69t!%YwobU5No^Hw>W zwF4>zOS}ZvrMNkNF!x8;{yOJ7Eu+K|V}4U}`&)IjG|yH?Z1(Xq(b;=aI7Q*7ocJ>q(*iipiXDRc!GHma3uc=lF zlR< zg(X|%9btW>kAVSnf^jE1ZI|}Lk-J5Q1AC_))j?=il3=e^sY1L75RU3ev48(4oO?gp zVP(3Ewdw{a9^Dj1%*F~|TAZ8M_?Wg58jREd`Ll3E`P)V=n`kk)G-vGwUyXZ?h)_zF zfr$k&qbTP~*%o31Q8&4Uzp~pC9}z1_sjw zrW7FI7O1tZBF_%KrYW<+E^v_RO7?aMB8n8-7$?2{1`Fd{{1a0TngZkElk-LIK4pPnU z-?mjjP@?J-+-K={qgo=4+~<`DB1HPR?NH!NRSBn-G!2{?(<7pyR6S1!Q<@hnRMSWe} zi8Rg4{=_}tUV`4#RS#c4^>D8`ZgdMg+J1-wUqVUcz8sDG}Mr3q0&N9|sQAC1SqnGLP-UJKQ^xjLjXG3J9 zpL6^;pQQg~qJbU)J25sb{nk}0n9vUBuf0-T6-mM89>%|Ux(4)g9OePgC7JgYaoHef z9p(${cA%->ST8kby70C4&6(_n#+)N3mmWW{d`}7$bf__N5rU~urWQC%q|iv>v%Llt zbwS%{jKGgtu$+Qnm8G8G^C(Lz_;6VHkQO$+;fg|sVmf)+WU;aKyiIy-#$`_j3+w4olUmdVl-CzPC&l^-Ly zszXnPQ2UO)%%f2kvZ)5wXxG8AS!+fv!@Ceo2K7Zs92-#FyJ|U9TWZvB@yGiLy=%HiZK!cTc%eod{wq7~-XUcrlyO$W zDq3o`n=g}Zy84(=N3I3XQc6*WuclKgqw<`6vbKbo&{rR#&@gUQqpSH31S85#@>OVL zzSKq5MD&OY$VNqeQK5{Ja>(-rYRoIJy5*L_E5360crC1SmsM!>diI6`;*1%x2`2cy z5Tb7i;#KIBfs!Q^5NFf`t^kDHcIt1+jU1IKr#k&q)g6x)^Zly?qC=-2Ob^|agL|7a52oYNnoVyyKu9>lMJ*7|=e27(*#Uz_$d z+En&YqArAbD#0IyDmx~JE{Bnv@^0R^Ca>ja?oa3^ij>=Gzf3Op7{Nqewx6>-AmCGJ zGTbPMwkzwc?O~z^_grhp2ROVHnTl5dN8ZJ|i(9)yy`%Azlfi&CaDv5Q0+{hekWX#y z%7=AaGrGeq9W&!3+Mf18Da_ot=bN$vb(!dzPYo#ya(8zxzO=vLU3fJuH+u=_G$2yk z{}7TN#Sa7M3P;S1yAoTuqkEK%dLx&PBfhJ&PAgxOiJiP=fu~Q3w$x0A@c_ z0~W&k<-3}c>z8Q<{hX}n{ZS+BKloyv9iy4iL^3h)UtWw~-_k4Uzuue~<)gY>9_Fm? zLpfwhdBNeIMBRQ>B5*>i)d-2BE?2GDY6%T(QaK1LbQOQM#DSk0Y$7^@x zF75zDQht%*r3$ZYQvC>knBnSC>_5JQip_b~eG&{XffnRVrdqX-ilj7%=ss$!V-?D`a9 zlUhynSRsbKcX(xZTXh-ldIThNQFb?Xw6+K$p(f8$kZ9?>8#(AbSq^#-PgqT5Vda5n zaLg2IKdCg6QDR;%zSrw;Vd7O_*VeR2E33#*x~*ghTJF66AiR>>H@Wo0dC6@>l>So3 zckbBV3iNmWe1q409TY3C`3PLiWH&Us8&jRjw!p%1nX4II@--o`n;ULAb6{a0ZteIz zCHy(|>ftQZcH5VB>V@op4CR_$n!;y#cjn)5l^YU0?kexn{y#?I3vFPicA43IPF7r8%F_soy zk2niMMWK@oSS44GjqU3(Z2FMppVH`pcSl{{a6rE#OG-d?7a2c5<$8uynqfJtpSa1? zt8Y+YkkbzuO3lsRN}1mg#p=JV2QrUCzj{9rY`SJhppf!RUFRt~OA-WwkQ}D{+`mOZ zeA{;^&@~w4h|brI1l4o46Jmz|C!x#wbA5_Zmp-I@lldy;9{iO@W$QG zNf;`W+A`8u-=AVQ!t{h~Rw2XwE}1`w6eP!o^^89t>ao%hK=qk_8%$6(4}7uo^N{e6 z&06G9X*S#a>xBKNa~m3m&q%(me2d08E_No@8WP`O$J*X?o7+cLUtwc?mHoNwY}!EK z49i$U4>`;2_Mwn)bBEP#>-P9ydCfV|wGq7<{ZF&v$~eoz%;U9ijrwsN;-|FvOk7w+ zssUkOF?`)9Ro*#@E-6*!UPU#VS0e6p*Qr(hNc!SFxDa#@Ii|R16*Xv}rH7t;&uRxC z#>dWipJ2&+Uan#lHB}=fIqb0qQs_SI#JT|inV1D5q^3b#^FNwu@F3N`iRUL-hHkcAfZO!PdC(0ulZp(`QnWz zgGpk6Npc4)!%bu`&tOnG;ND93x9xAR+i;yNC7{fz#JSR-jq+M)S-QId2|BGG=Q51{ z%4{Bs^z9ad$7(lLsCcIee1Q|D5_*#n0vF-Qd|GThJF>t?*7a}ap2XTRpfVs8b@T#} zS}KZ!8pJArI-E7-svS*U&Jhyz8R#=mM1I!K+0#PJ*sC@f6WJReB$#BMfreYKm-n1_ zWG8Lo#Dd#^hN<)dfO(yYv&fyJ~3F@D#3t1OR40#wnap2kBKA|t!ovW>wpSNyHI0B+@ z+2^$mW_L*fXwYS>F}v?LOW1qpYxaqKF|ZRpV5;N5_aGAUFjf7yh$(pu_idqul+b2w z#I*FYt%F?*h>qQk;eMPCDE^^EcFdrSl9iGrrUfBZWp*_pux@d7cxa@KQ=^D=z7#W2 z<{YtTPf&KhJYpsaSfDjGFphF|ti~ovGulKpEd%kRj3i`xHZ9L1BF7Bxq9ohM46!SF zxGJDW2K4@^t-iK1YX4}0g?ef&p%J02|2dV$lWnj8-O7AC5Kb#mx|%W6+LFwyIrV{{ z?Q$oeq9g^Zi2uS9rtPqlbYg8=Pe#+{e7B!HS^q3UvA=qqQ1A_V6hvhB#bYEelTBO^jv(5jv$ZjuMGEhCFE4u zcsV|7qvw4#xMq+aZk&N7q0uO;4_hnCC!eRECm4FMr8>`XLi?{Hrw8y!vlZDdGv9R2 zIItEUc!G6OPi8}DL;E^1n*;^3YMqJ*!yw1ild``e_zkd=`?a+^ESdrkmM*M*jJb14 z&4`E$BR*6w9E+PMTur5D#qb|%I$+fbC|25lr=xW31{mSpvXW@DmPfLL1nOm$Q0K9@ z_^cpqIvaKM+}%NCXvb4I{a%{9)4jInoHlns{wn&Skf~hH&F;KkS0~i%8ji^(&E_ zY3B}bg0W1EUn6PxgH5#B2u;--5b9G}4I;N<;=L38(9gNcYMV%K;e&34-B^{FL@-YR zTkQsdwk|yD70c|1()0OhvH1MM`2;6xznB;ItxDk0iUK{} zawjXW(JjD`{Guw}Wm^RBUt3o9`p=Q^Y67TOH9`!{eVk-N)vi!X%n@mQg({O_Z39mGaNMOYbNMW;B>IZj zZkc(ejaX-<(bW1R1D@nvTij{4O|3oU%V)dE>U@c5`Oif*@Mo7~cJrE7c6THdP^S%g zuNX6^9J27=m6t);o4+h3scuaju8msq>V$LzP%X7ob7(r4&9He3hDjzUAJs0;qBeN{ zLvZ(w6ibCr-GE74>xhsJkhOvyPd1DPmIxE!>ha$}my1X2kZIevxKbf~PSE~fS^`N3 z7@YEbsxKlOdfYEf+vph~czUrH_wxuLUWEOll>6Nf_Ah@F{n0{$%Q{8hd36znk%guc zyG)z{M!gUD3{y4KyeHl=)q3*`_;(awg^c-Qh`j&t=>eqty_+F+5)D*L)Izfmf{F!S z+fs|jeMST5lU(7KLjnW}P~f;#_i{@SNRHn8d3%ipmJG@v={QhoN_8#U;uHRPjjZgX z{=$C)FBxDfRSoxrJ8=hk71PH&)Ir?n0bR5rn*aox@zr@15xwfBw`a$)@8fUt#3zs{ z$<10@S&Jz$^fDE zP_pN7#2emKbk-bvv8o+BFPmJaw}vYj1QMZ+-YJ4sVE-JKqoj54sb(*G$Lse6cX%~% z9-$?^@w&_+DtzzgtHvVkot!#8E}Y(PwgEZP@+_O^p7xjThV)QqI1lN%o7UbaCN3Mz zJDRUM5hiO?l=iQ@@|oK}$THFb%@Kv^uv~(X4_UMB^jiFj< z#dx{DD;NW)3~w!G*dn0_dr8ambx5Clo;vbko?3dvO8|(|dfqm4=_T~IQ27w8{Q9n7 zIo)yznRe-1yzA9TQ&8dt8lk}v&y7{0!gR7Qg)%noIbn9!G--`s8Zp` zM@jZ#Wtr+~2ZHQa3S&vQ>NYr{zZvF^y|Zbr-neX3Ml%jqQ}7;j?Uh5>}jftl-5~%Qq5KtUP#mtj6;Emdn?N8q|U?mE2xl9xMKqb3+%WQyjzr zbk8Ezzw@FqT0ciRx8TU8w&3$5N}$$R;-c}8#3k*e(GSbj?$7>k@24(WZrd~s=^yS6 zFi~m*QOd8n0Y$i&hiKLl)M6-~^z4JIawERyK@Qy2=FGW$uw=%CaOpP^f;?NCM~^ve zGvpcBJO!a<; zLHW_s>K3p#qe-*)Bhn^bomzdaO-;b;CZhi+m)ovsm^a@@1T-Dn0R49^p)R^T$P`RRHHdv3*`Iz%= zEe@;xwl)xS@V_mh%=RJI?JHcou{!TPkKObM5_QdRI8RA#+_LU>UrV^+ppa#4@)nnI zXgvYnM11}eK~37t@{$}6vS&%7O)pkW&8xyHs?i-zj>uKqDFw1OVGimKr=}5%z=gLJ zppkm&5M)(Hhpr?r(!+ThPf7rF{CfbcYo8f<$F;X+(Agc2yerX`!&(p6-<6T=ba`eQ z#<`qYw}H!~y0N}i`{6oJgn|)^8^;@dK1u^;&w%`W>eIqf9nJ zPn5*Ehgh2a4Uk0r0dR-bc z;~H4h%A8EBcD{C>WaQ3T9G%(iN0m!>NJYD(9`UjRIbM5vTd@1=aE!BxRhn#kS$4u* zTANSr3*G?24b&-hvvxHq0cMI0Um?9aQ!E~^z#neGrtl3R2?+a=X7Pv(B8&-SmqSENQ1(hS z^!aRZ$*Hvg4+4Y3Md;Q?0*XSzcn3sP)Ivd=vSSfA_`K}Az_gWE`Aa*fu z^er}^EqRHPR16R!(ekk5keP>6i`QUZlCaC$OPQAqAhK%l2?fAyKhaMxp*$vznEtH; zT!3W8?wW8>A`NuK^z)=}UVKMH7h>8=6$O8x{Q&rWH{uXS!A~v*GWuh)tdYkEJYfB1 zXOy`YG#w~muNVinEW1}I44&`zF(PTqUB%mHoQ~7((_xcp3)eib7>fhTJHVfax~i^B zkjQYI5y0UR-LCNbK|^&9PeU;xY)WS8U&3bk?!$G)Z&J@j9IWpsh)AIH6BN_x_x063 z_!t3OkDD)N)?;tt`&g^+*kE6`ABOhW+JQ;A-ETQ;7w|o_L*(vALd0tLLD#+wzrYk- zMn@lJ8Q>>)I>?ET2*pB_4MBPepO&2Kc_%~_-3bob>U-9YPBQy2<7k}m1(I~2`(|I4 zI>q)(5sR3sV@&5{>Svxd6`sxc3+CgHPU0JOy!-?68Zz!ieZ7fv;Koqid$$X0{v+|r zKdbL+_ELJ5;bP<*uG(ZT0hG)#ETGd<(IIg*EM}RAwAmM*ag4r2N@m@H1$T4AjMIZ+tEW*#%`-5bIA1p zNacDmn$?v+&%^#ZE5wzdI%`<4JJG z8*M)_UOj1KIp+O&=p*st2)sA{`S**2C@*Hi;(+C&q}+*LJwo8~Uhnjpi__gN1=JPj z0fEhxCuV?HCo;&@#Z~v`GhY`K;YZlRz51kZ`8$ZCpVzkv>A7HhMS6pNDj+jCc?DkXnWPq(d4qp4gwDC9uF zxIsCT?KRTWyzp>qS+W5e5_#KqPwL02zlP`bUp8{lp7)yn78EeU(5c$UPndBaI0xX& zHf@K^@iwq6DHOHD_Dbhuvq!w~Mhf1e*D!YJqrG_(?PqjncCdR=Jw~laznc@uiaO32 z$`pGA9Y0+lc_G@amfS1_>m3xk=g6%Z&T)By9&~vfY06px26>va0RgW5dGUsB2T#mf zwhI0glAcb@c?o))(5PG3OqT-_+VQPY``1~Y^nC9%wF-z>?|rqL?IZK8Imq21Th75R z;0cXzHxH;)_eea`8q&RdLZ&a3L3P_t|#5(xIi%9DKIpBJiyjrlcdZVJD2nIhvJCa zQd#7;wU)(|wcZrKvpOL7C%FTyppFOW&0igKtEscx72n5e5@Q?YQ9m1zj zUI_{jwgYeagj@dW^@Ofwk*noEpXxu}=m?E+qOcY5F&P z$;Fi8U!fs-M1LJ>4stw353Fajxi*8cJykPKuz^ZG`Sr-20duiTSiW^`vg5L~V!rWQ z^4Ln_J-{5B<@4|<7kS~st_ehkn%mk$vMY=B+9Wtz9`AG1IqY^Et(wKifcn{xv0By4 z915)_{8aMbTMmfOK=KhkqXySxL{PY77p1zmG|X&_cfIW z)r5ICzxw7qaKRrbRppwqjPSbYVe_HveW?unvq=I&Wu>Pzo*F4V-er}tGOFkn=)@`j zX(|dM3ik@-coRg+iwH;ss887%^z)|Z-R^kzHagWDy7uy9(MafN&Y_OOUT)J!9a}Dm zg`8EMhJVssId3%SJrV<8ji0dDOJ4MpCjS)c>aM_%V4O4U?a4@0&)|dIllQ9U5ycO@ z=CJ$KT`F?~6kL#87Mhrd&6xJ)=cP{O^TE=VUhx$1^`WcM_NGckRfVx%5I5MG=VJXQ!p3@RuM61CWZfe`3q;t#E-p@NYcWdb;Z_}G(V)PigLhHi{PqC2 ztg%mngzo2Q*~^24eMp^YUraq|Kj*>My}KB$L0<)QtQ5g{3ssEsxBX@-B*<{mH^jEG z9j^f9YUVA8oB5BHq0(7`npZ;w+#&QoAd4wa8ScL64z@>Px4tIL8u9UaW-B1Q7cxW4ndStL@1WO{Lk=x$GY`Tfp*zat}q6ISiUGb5!B; zWl)8rR)}AFUWl}F*h1_g)N=E!BGc|^hOhOugz4vjaKlMm&B z_4#MO^>bP0zaMad2+RWjtcm|PLI2+501LFrB@?6ia`!w>HYKKdbj1r`GQ+oj;T8ZO zZ9g&OS&A$uRmM>;aAmZC3J=S&7KcEiyJek?sE&AqX?$9OP@?R$dT`|%mPz;;i@NHm z`W$T0wC-XGJRbV;tGELD)lUe!bH%PmTBDa08$fDO^jwlcwklMBJ`qMgLn-7iqY;KS z%zTh&t;}gRUGR$MYh&lTqF4bM+(XvL)dBq%>n~Vk!6_?VMw|n+x zR-jycA+r7!St~x@*0W&gBBFu4pDo!@deCTf|BqzLKbVy2v>`R<(BnO~Q}Y!a35N+U z)#l0Bcb@GA(qHWAifV=OLIKlUF43-eTK0RD6Y-x2o@k5DY)e_a4PlT@+&Q>-gaNVD zWnQS^0g#~F^8v)Iej>ung8)XCKp``;?eVwRY%l!3Xf?&C{(v(m-pQR-2jq0Bf?}O` zUNo7*=P@z=6SpE7{G=>_Xdn|QUM2!Ck_Qv7tE&%C(-=m6#f(WdcTiTZ)a&OB`7b0b zhb@)=~xoB1SyA;q_!D2855v%qNs zGX+0Bh~r_o(Y9YdSMY*y_-0lo>y1hhGUR$A<=ZZ80HtP~SD0F5Cm9szvNlE#Si>*| zfHhP2@FQGsz1AmfNdb|YW$uVu*7vuUDS+U>fQGIu4N!rF4Zl$Q7Zg!t@+tZ^$?|nt z6VVa-3P5ZG`Rz37y!p->;@a5w;cq^Ij!mPE6sxzM=g<1;t~m8E2&^pMrNS_5qFnt4 z{@=3~MM(iTVE4D2K0N<^38TSO(A2X+kJHV!Z&5om%XG|{egjRhd@Dh?j1?T7G=I%C zK#O_;Nxf0bUe95Cg8w;xYSUOud;GqKyGK9QejUhl*j`pUYwZPkD1W2*JNus&;P)5s zWHOPY2^RJRO2JuohV9pYN_8L z;6uXy$Ib^78hn0w((ui~(5l;lI`dAq%K@mhTMDZ^OMG&hYdk@VUD*@H^wZxdX~bit za$3e<<)T`R<^=0R(!u5T;rpYCKg<02T{So8%He!>I^_wG0XQB_G0kz4f3j%(-JbxG z%RkVXHKKHj8rNc$-5*+>kD1o!oGh<wFQiiE7Hfomow z7l-tdm@|CP@q6T03{Q+_a%?k_5LEMSxn4LIb>j2%B0zk*`bMk zYhCegXY^kl1I3d_TznkEGGOfJhNn&Fw+~d?4SAtlUQ1Y{_ZJCL>2`UD(r~<=aQ^@E z@Grys#~cATVSpJ-0Ag!Xc4%!oksz)+lIOD+Js5NTqhRG`V?Xoty)F%}V5_s{|7B)= zqWv&P}&js}#V8y@v>pzyvyBG_h)t7yr$c|Hm`*rv@IKjCg_HnlKc+bQzmQ z>QBmJN>_A$?Y!;YPpQ_S&MD!5rxHi|pT6;b4DjCv_s;=hC0&7NKj6cgb5@A}w6%A@ zns3cvO0Nsa0C?Or(I}CUSBV=@I`U=G@-xAIY<2(75#b~$OrK^KP9=N!YGhC#UokM* zaW9d~d`NRV?@8Ik;FOjovHb5SzyG;@|LYh0m)!wMwWT@?Ji}Mn&Yvb!Zxr`vOL~)%vt`H5}H zlQOwx*-vXpM(IMrLtXL@+UEZ{&t_3ii|!dJ6y&xD(QY)Untyw0w_U&TcFfWlNn6OH z-sUFMP(Cx{T%KMZaMlb0&IP z{~pBsuj}z2ct!;{pke{!vCXwDV(CXZ;VLrouuxZhaZ6L3rVah}SMl=y<`C)O@dKY` zXBn6s%f(9$<6n3lHV3$lTImSpyKWj`%(@Ke87|$AmWRU2x%A^mF(TV2C~cREh|g8u ziDB(UJcowh3$?&94Jsa6PKR5@Prqb&W<8(F{7#>vg@qqi95(hmkBa*_{I)slQ5e@E z!^SmX!G_@NFMDC~KQnHNmzi$uHQGOJ2XEW4Clpi>u|%K2{r#8kN`B9zUHG|4o#07# zPwRh{Xpe84)Dy~9T!+#O(~Y_G2XiNGvcn@&_&xjkFB2MHRgrkPM6qGobP`Lun~F|G zZ62B4-a~`^pSe9qHlp{49c^0%@=p8s;^X4qWYT5t<89#on|Jf{Hg~k127FWJ3{T30 z=}Tk3*-FFH&?Y|X(`p$1#Zsm(-141{&<6Iu{Y0V^Vb9=VDA>PI3&6!}giik~&CqtV zoCD65p-VDyw;E#)IiJ6M5PSXi&*QIQzXt;&V2x&26}8SV!IRFMY=2AQxb3uQ8bIXb z*xZY&Js`{5=j&?Io{|1{&sFr9KR+HtCpKKc3*HM$^MTdC?16cS)Ae$VX_?s!+xXFr zmRey`8AR^)c>j-QQH)E`shX+uvM=LV-yMRyIe5{esitQ#{syXHFjf86f9%ESs952#C- zh#||Djm>iLDgUSeL@6=lvW`D3G}p}oQYfQ>_%2j_qIigaEFzLE8KUd(h`evufk`CaWKP_8o;{E`R34v)`Brvd z{Du7KH*c!%O+NT6Y|M#@zTPqsmxmP-M|sIC=F5izcrKpard0 z(-hAE;Ev4`m2%ITiIp{ZeckMmzoL1E(c^qlbN{l%bw6GylfS9&kJ#VLVwfJVFc3qO z{MPRGJgS>NCe5e$1XDwf>S~F36gXmbvBieYOSWPNvl0Tc*-Ab-XUOQJ*u~EmCsC(~ zWU{7zup~51O75S=6rq>&y7=SLOG6{417pR>{=z7^1fepwpboMFhlh{B+R0^grpqsG zwH_j$N$(#H142astHN4rwtcaQX9m$Q*@e1}tMIgB#wA1ooV+DvU*ft^+!jM$#p=zG znnNwmY~iu}8xoo#bEexibpzVlMQ_cEku=+mX@-)Uhbv2*Lys@B!dRxh3xE_+e0n+) z+0dEJ6SvE>)TL@6gQ>hx8~|uWA6kF?)pQ)ina%&o&ZheYfl7Jdr#NoZm*vd(jg_+nK@~3h`Nlov^mJ4T9 zEB$7frR+OF#^?r!u=Z*0Z8sxR!aL4b??=Ns9h|Z8k__wHi9(vo0k-`!J05RooebVm zxhxU8&{1ykSEi*76VnF3fm zn!Xg8hCb!E)tQpKITk`$lbYgOw7cTcY5T~bFGxLJmtik>>qSlzy;+Jf1>{4~*s3pG z$f61mVrwojD|DGOJesH*?0|pckF3ahhgxr&bjPBbNA1)hH1r`MODKN^95@L7N`1dB zzV+p<`G9Hc?0Ma5At`=Ca8ldhqb4+Fzj}KmbZ?!24l~7$N8xxxt>khP)u9y#59XlZ zTAG>t)V9o2FS0#syUa=?DiOBeFBc1Zk;XmA^5j?s(|MGikim3W?m1B;Jj249ELtB4 zOs1{Co5Pt*UW1-APgi#U5!W|-I0V~c)2&x%wpKsG}sQqFIxjpVU>i3 zuK}>oR=_XCFI?aV_1os2d0mzcy9NI1-qP5y3PX{nE4JCy(aMFh9u$>Lzxl(({v2J| z;0?>GcVibFz{U6kn3*{jJ%DNKstd%GnJ|UU;sowv%lk%4wK-*Zay@IiW_$;Rhm+My zG=2e0fAg-8myXMwa6kbj|HQ?4A0koJla9h8hnf5$$s>sG{gDonGxZN~O zj~G~=dgorMi!^-N>+m=%n{|1+WXM1H#LL`2xk($nbp%=`U);}(@|U1FY{4`L38(h~ z>8p|n3>d5&2x)M*?r8~(0D^-1PP>*RgHameBS}670p7y<;#nQeRZcEy66Znyym)UU z|3PruTd&D6KU22@Q~X~0;=>5*zwRqoUhoAq(Phc-!r3FUHNG9^zl-Tgm&zY%>bxd( ztOu41s`@bqC#r#bkj-PLQ)$xWJpS3ptl`~6TPnB6{pZZXbWMZ$h#HQv3ZF|~xPda? zWuE~=`gzC2%+)>ShQZ#@hdrSWRw7OhdOS;uZj0s4%<()ULYEolKA!s`7KqZ+>gtSh z<@y3GO5?}64Ejr{O0u_qVtvl2tUInV(uS29UMlIHO*)tA(qfsU@Q~$d0HQ<1>yfds z4F>>epAq&uC>)n=_8JMD$Os<~Ppd$pLC!-cc3Y-F;VYdHnNrSnSp7&sVj^B8#`vg> z5YSaZyQqGES9G?MaSM>n>lFGLH?DZ6t>MMQ<_bL&*uSfJ-*DdiF^$!xJ;JUOQflK_ zg484>B)l&wZ3l^AH)L!dHMHOC0Qk)5a=S0@4Gg_5_N>yBb{SpVai0XQDR!f^<1n4c=e$DA0BiEOa zEr6R*J??wAM&9C)Y3*!qZaIBTT=C)8XCB}~)hTpx8hPBm2Ki&|9`}*E#oEu-GN^Fp z{mnLcuH=VJT7XnkpWZZg@rrb0KaoA4V+&e_9bxMGv-f6di&?hbY%0R+BNV=TP#LJigcF}O`3n+EkE;fE{J+h12e z8TwqGi}w&o-IwZc7(%|-5W^(}MyT3%Xa z3mJ34=Qz?Zj1{j;`NF!zg)4CTtUf#0?Sw8p8;qrD*OBCD3sDxi|8U0NHU~&1I6d*o7H+@7et+iX-w~BY{oY~6Zz3%nwsnv0mJUFj zQ;lNv6%Mmw;(vdB$K2NK4br~~Hw%Y8bRE}2_Iu7`#(CLvZI(Ob=_s2sR93YZ zWZSvPo`%kN35$l7@0rMsSIIn9+Zy;6(qgn2SXk-~F9CF?@sj2J-p3Xf5^Ux&UhSyrBn$}CzlT%aDg}UY*!T!Y`=#j}Xu|Fn40WRBmR-Z!M zhoUl_aAA-=|I&h>$o+OhE+iv?kBZm1JG{ORQK1w7lP)Evl@D-;;SV5EWRy+;(pv{p z=KR+S&XzF10fA%FjiQB6d-*n7caWcv;jikoJSIFW7T$5k#)yK%LgbXkcgK$wcbkC? zH)Cf(lT$rmBkB_<^9XnX4{n#K2U|bW4k0iBI{{XpYDBJ2QM6BJDqo(^`eJV;DJC7m zdtld|e~*a_b0a#Y7%cU;b?z==t+S}GYrX(bO6>&BPPaO?f4=NflYX6saU-XFcKW1r zFglb=a@lMT|13#rC3Sn08P^DWnkE`}$(1Ap z`{;2$cz<5uTb3v-O`S%OrrQVj0kK7n}dwHkNcLB?G2_)4jN2%LB&IFlAo)XY#^ z`+iK|Wm3c2M=RS52Cr6++O=6hZ#y@~Pjp4Do}N7Rei8x+L&(n$jol0jQ62eZTz1F~ z3t%oWR-q-2d>O5?#(R1-u2;Jg{EI^<4j=U`5F^c{xZN=|XDb*wzmfIo#J-?`e=eHQ zKZp$1us{24Z{ZGs6jSiX@3OL`!{gLK-u4{wiu!1Y%V^~)^)u;1=>FxWf^mE+;Y^=j zQKXgQb+~g;dX?jogHc#0S_ZgnWRDiLV!t8&7$4h|NVtDuedNr2H~-aN>Hll*t;3>h zyS`CCKoCIz1rY|2p;LNjM2VqG=}=lEB&8%&kPzuE>F#bsq-#Ks?if<(W}oBzJooc{ zuRizw{@us1{X@r@Yp&~@bFFj5ZmA=FW`-T_$^}QU}Gq%a7-m$LFhrE;*hODuf*unWCU^pZ1UU;?nc5Y zm}6%@k>~x+Fy7h22Mkns(zT3ld*qDa#{FE{rpkJ1W7k5uvdVsf`~}V}vL&ubq&Rj zu{pt$rO|p$S_6)%J-1Y%w2EuHUW!x&Ti1XQrq^!ieJd1{ieCvM~5%8dUczxI%_dxgwG*;+84U+mz$ke$&v)oZh* zLfxm4F@)%Xxlo&3ddmrJrJDB9^Apl1gP{zwR}vXCwOO%YdvHgO0I>l+&kOgeg>Rnf z@A#XX*Qm=5ybeR^I;x%A<0^=mA-kUTKwQ_Z3|WLH^deu3WSv-!JKXXcLJEUk`m$pU z=md$+a%&4}0p_?=-|q0p$N6qb^7)3@bZcZLC)fI2b;0G!W#B;A7j;i)6$WYujjmG- z{49bFoT7#tux*R=s-~Rv(X4v=n0oIBS)6!eswD_meIvYox7=PT40cT8@vW_rK`Ch? zsXaedHAmLQYvG3H(ZdX*X@0@Yn4u?$!OR1hR?a&yyd2CXKe1sB&$9rSU#148J{U2Z z6o8`wdJMeKDK++Yvo7-uj90i~qBxuHyghC0TAYZ-Ufo>ahbDcW4)F(a;1+jOBP((@ zO|2`njD^*brq2zK9mMg+fa6YT=eTRt?N%G!msJ({3AlX8(c<4)x#G zb-YS$M4Y-Ey54v^HnUSCO>1M+Hs5^Z^{E@&`de3D`0=1$soC;%H?gVVq4mBxx;|E) zjV6=cb4{oIS5u1C&q&rccUzN>%yJGI*cmtUFvmJyj8ueNo;F>U&BSGGn1GHj<+bGq9pSEeeM3wYd##m}!gqGlI1UE40Sz;B zN4@J)TDm&iLV-c+jqD>bgoYtl*DHh=hoxd9^_g~nmgj4hpAJf!q4py8>51M z(5U4p;ZX9!mT%~o9VBO_>OCTbk7CW{O*lMtA~q}`b>WA?a~qN`_E$xCALPr@*Z!!O zzDn#~dKp7VfAH-4g~|DhCAs{~mV+nG=V^hx@qPQUT8=8Oh$-xb>H~w~Ee1CZs;`O& zO}91d?(F2UY8GiZK6tfa|0&siWjD2bZ(8VZL|%=azB;E9ouBxugfxeL=so-+r3f<{ z>$a62uHbjPM4`yHr5)AF=OCBBxY-L`u|&3rMWgT*w_|ML0k9r5hNY_@LQdcD!qyvc zOW$iHOF6%>Ha)-PGVjl@vEX_`UYXWhpWqMp`@U34qonb(oCY9;i#B3RHeEeSDi&h3 zp17#IWaZ_~wZb*S4~B4v8W`#DeB_>raAx}eP(X{ej^NX+l7O;YcE-(uSG#GKw(Hr} zvaB|-aR& zL+T_at`bZRsNIJRt*u5#F5~rIq^Xg=D|N}T@9OV&4tynTi>ZV6rIVryhrntAl`3M< zfSpy)mxp_&dh?KfPjty(Xiu4#Ufy<{6P+L{HcHWAF`*lid2&zL*`bR$%e}g&!bR0U znmDFeZNPqQ1qJTnji1%nuV@Qui)SfZ8=1~jh_7wWC2#p;X^uHj4`uLCfMg!FSUhwD z%X?lN$7cp1NKERjbD5YEJC6y@wd*TN6R(eVeqOyM)d>bwd@gwC8JuV}R%&8>0w$_% z(_Z!1XW!C~@0T115cnNx#i4kJo|zqhHbXx9%5v0iq)-blAEGwHiyjsP6Ae+VJo`+hW#Y?%*T4?ngBm0Y6PoSuwi!6sf0NpWse zelzh(1KE%*iVK3;B`z*63wEL# zSYER=zgk?jS}|mrh>(86QcKxtaCxHPHsuedvQ%$=l=52G_PngqX=0b6+FE9fDjZFL zOrlI^iHkC~!!27Svz1Y-e$XxgB9}GH1tpn05DGrYO6tdSeMO zqx74|Tnx`qj?eC6(%Ee5I4&E*dk8dX#zVigZ*;Lws@Q#Zn7=cR_d%xT|>xyRb^r)9~%DNjUg`0A}2!xHc;SlIK zOj-EFF6SfmeRrftDV)33C1%^7aSyqS1y;JfVH>B{Gx##*U>yG1mq*XQvE{%Jf&}9P zi`*2Z1ZRsPIn&yBiPna7HGRN zDc^~~b-j=?&?=*9h&(rcT1c}VYER^g<+bo7T6cEl zp;K%>Aw?%BYxL2%1kTAyL1R}VdsM;D!`KSGjX{FA7NLp(?tWplj3Cd?pvOOJSs@(c zo*I00Ne&M;V$-kx?n3@!9}Eq#Eo!e@t=JL@22iSUbE#sgmRMVF9y;~56DtPAsIL8o zD%{5h4MPu(Ib>evV^EBD0N<+5X*gM^!MTMG-*+{2x9|L3GogHd!*5`Hwhb!cRBbek z)=ny5S&%ritT}UL6THO27ML<}@))&k!95*%6z) zoHC3qY%0YL^W*t}4RqOB;X0#+6t>dw(rs_=x zcVt)a7@D7w*gCEMG-z!Wf_&tTe3C&7aY++vH4QbdqO|dAFwH@k6*}odJQgcw(gx5|V}T)!V~vP9 z(~9`*yi-r3V_Dg=vf;J!<=%KUJ!QF+ zquS+BUWR%dJxo}6Bba3M-Abvrt!v+Y$(?dfba82^MnM1+tWE8Bo(v^VI&23kHaSka ztkfJkyxg^|L=I)unK-_cELIrJvwhK^=IXLNg{pQdu2$S%Y%)J7w-{nx5+Ok+Xh;G8 zyTELAG?=(j^8l*%F}=8W=Ww*-y6xi1%7mql`6)LS*M_4S^V2wfFr>T6!2`0Agk(V{ z@T8lD{%<`Z?s1#CdCqyoN5O=caf*EQ`^#6+6$>@a1ye<@UmSNe>hX*fonRmGO%hVv zZPvVOK7TTuS)@RnJ)Ezh`ocn4BC7~tbYgZG#Gea8y6{~rP3#LeT+T)tXlA(hZPwG7 zx<-kVO_#obI*H6QO?1uFX2^<^M9n&wukUeQ=z#334aLqhve|(?C5Y7zgdB&~NqU1* z-1?p{C{cG3l93L&LuhN<4CX#FI+>(?L_{(S27>8AB*;=anBAdani%?H}wtTJ&CKw83< z4nP1?o^~f|kyD+ek#}f1&WMc262G^_AU~;HXj}PU zFw68Rpl4W={9;c$nKog(jS*Y(9R7WCvmB>qLXg3e zTs2yO2Y8-?si`#UU=GVmA)Q1!BoZ09HsA_y_~fhZ+S6_qaTpZ4EB(${J$8K=L);cZ z4nvhNWM7`B_cng1x!(nd`LBr16?&s@KLHdfP+Q-@<}6oOTMsgSIi9U1i8Ze8dG45} z5YHD`2q%p_aMHDe)cQ(W2^I5(q_!|+v7O|irvgLli7YD7a~~KDPo8IDKUWk)rpq4h zz0NKfLND6?0AhIrw_qy+;^EFh;6&k%yJYnua=w6Z&UyNNp44A0c1s9s&4s4jcNN?) z{i=HV8pkhpqMORuYZi$P<8tGJyKC5gcZ1QqPf+3Spt?n|fHa!sl9`i*F6sOpcjDPs zCsvmIK}xI;s{zeQTPfSs4tz+-rw2ig#g1NQq0@HJeD^I<*~%z?*C)PH+(<6E#QL?JaXecB@2r^Fr1EgivSNKnpL z&=Z<>yhaBRA$$Q;!6!%I(~YtyY_^0xU5{_#KGuS@52Ptz(5Lk-Jjy8#NX#BMY?~7# zhINdsae<51_M~e2;N4MdEEQT4BUwsRmdib{eK}Ve*G?dUuB~FTDNBw4sVEOQDmd!B z@lYr`m_IC+As%9sWi5C@s~I9E{leT3{)R@8Y~}G4NWa)+{R9s_J(3eY`&E{QAlTRFvOU`l(IzeDI&tb}~?I{*hg&S~- z(Uvm{mY2BxPrp4d4TXo(`^SoQWZp+7;Cw)OYSzNa%x-Fh$2o-G2|5;h$;TrkiAI)3 zIj)a%f-#F39;XMW;O;4JzN|#gi%cUTD>opNzQ~t6&WRw?qRt$ezA%y!evjMuo@_~( zslVh^nf;0)C;BZ;sO%T{ho)5`!%nNt{lO5SFY@L-7SK{4kzB0T^mpWkI{CfxmC{Rsd1ty%L&(ft{Gs@{g^!6#py zN_}}xreJjS21mo5D(6rl){90m7dq4Q@EAfqC*}71OQQQ!{=cVoiF<&wBf=I45MmtL zo^sZ!?*5AnrJ8YjYqVBxVB$r6+XdLNX>E3gTWv#@z?u#24hc@ zMD`2KDd6L4b%_MdU?lEB?N((bWx zS17irtG#^rbFS~zEnpI&wO{1Fhz;e{DO|HHq;NI)lzzC*UeKN-CvC$)$QedYtbyP6%t-c{Y|#}+CEs+?`;=5Bx(sb5oN)etN~NLFCWUzW-kepAov>+|B6tCl zttIHr$x3?OG*$fc%?9uKEo9~;$r@uYD6Q&!-EZ&*Qa-%-z6Q+?hikyTwk50Hww2MGCLx+sC*uTRj2cdd2_$GRdiVu};Ppe8Alm%A zt%B}$w(Ww)Luo@_0+b?XiSG|8yt$iriXR3^VB_z7E(r#UpyLtIv_JKe-C zU?cJ16A$^glJ*A*iUXf9{JGX!8+UPRF~j{ofV8lZmsI9$gSX8Y(SwP0@1x8Oj*lV- zF<=(ded=eR*~#XDS)${U<^@`s|7|{@VeCJq&cvV){G9runKJfdPfm4s?N!ZAsv2F* zo>%>Tmu1+@JgjEv1dmRHZctwPu?q_aOVkb1Mmnq<$YS~2n+%KR^)K}IbLs^x{SzFP zCG9%3T4_1^!aNK-?|Hf{L&!WXUI{d6TqS1B$z~8}$SJq{#u}7U%-jXsRW_n21SxiM3YIGrs<-*yKE^Ba%9T>?Ud$C%{r$7V^h;{1u{@#NJ~f8CQ)X7 z1w-X_M}L2`Nq5xP%IaiI8LDMU?n^6ExSpE6V_n9QFz}GjT$%{m#ReQZYZ03=66La)Ov)I&X z`5tT>|(SKDo(I9!$Hnx9buBp7OiUm1x*rj z_BOsFzQDUodOzP-%y5p1wGirA#pi}R@zLT>b8C9QWj*YVpylP9J)xYoHy3ZML!Kri zya8DVz4@=k4!fig%Nuj1pMEAumm)teO%rYW<}&<8If(1wd{OsG%bqQk_dKxZgi|p( z>pBO6xD|D4*!)@R&RJ7(L}_qVS8w!39;gt;RamLBbx3=161yK>K)NuyLSNI1)F9!K zs`M=SSl+K5rS`4SJ0}|)>6b6Bq)SSbE}CwINXm`KRDCj9s`XFH@CR7-_so`{voi@M z78d(5b8R?0CnA2Ij_3wy$6Qsk%*pppVdGh4iPBl!0sKRzC4Vp*aHwzMdk+ssFqZ{C zS`Y?VYZy3RQc=tspx|Znbyv;A#*V6H-PeAKkfQC_Z&8ce2ikagCzk7HL<$tvB_~JA z!YxOzwA}U8F>h&zqbVxnLGqEi;I*`8#*(p}O~wcEF3w2yn)Akq zbm@2pH&dp|ob0`?7WdQ2%X=aYYYtV0c6N3$>hpC~cLM0lmL)7)E^y%MQ=NTAMOQ!H zBp3~Z{DG&vYj4ZkS1T$*{j?9iL=T-L7MIzHT?YVW;C>{gEp|=48v67)8f>9i`f)6z zGZ%US6*eqz93xpK5hbSsbfF)ip&B%?HXLCG(Mjyc1{NvXqr~8<-$nf2tf4H8!3p3k z!aD{%@pRlI+o5irwt?lH_aAQbjFV{zHHEbU&1I(N78peFy(K^L={%(=t*?nbanCJf zQG*&>t{98do(N@JS7m-~ZKLA^jm6%Kt4m)e3d5&2<78gGjHQj2Y+Lvkl*`CJq~NhX zrt9+bD_6~OQr*(q!rskV7Ut|@8yhbu8-o%-j~4qTLrhMgt3!6peSv>CfGH!WB^S%5 z(AclIy11^zBNm215j66g_EWt$N&R`8dt;UVfFM5R{1t!2~hoqi2jyLd{jjU#>4 zusR)4-C`+h0tj>B1RpUsStNyMwZZhsOkP;*u$Fa=MJf-I7$a5RsVepjD)vns)0oh^ zrmi`YtS)9><;PS8bQ|5;A0t$+Fh5mxy+Xm{Mj*SD`WP{X;0zln(7RI+@=rmt8=I3>O4|fe?5leb79@~U6mgp(Wb{Hvfp3c`>9IJOQiAdEs5|1T^IkO|A(WA_6p&Ty%=ct^hZ z#0uUXPJGtYmm*QN#>4M2wX~%S*zCV7R&_(PY#y%j(6wo7bs7b?#qadRQQUTsxSSxL zeQ(g95Ky@hNnH%kOWBDs2#O@`Bbl z?ivl;RYVD_-#qNu;OC3+kIfBWHJ!H*uG66CX|(WF}}lVt+5bnAzfjOaw^n0&OEeoqkj&oAJ` z1$o!-?6?JMue5aa9fMdFi^TEqz@T;px+iJ@5C3!FZ?A6Vpz@tt#^qHWt7@`h74=5% zQ9?4#+tS{m`B5?3{CDopHbOa4UV>1V?9R1}*?p|A!K%RWrdb|;IobCqPXhj6V*dN# z;tHs3dh7FH_D8pAQ^geut*hyawvD&yOO)dX)|fW?$k<={W5O_8LjT~vQ7a!}%uE!{%NSU2Z#@Ewi&6dWbs*l#s3i2%TeKg^XFK{L zKLf2BICmvufAt)NP%w(sRfvN zvRn02viJHPfG}A#&hvkID9$jgxL2!^%o~!IFCSmI<&8*0lb-AIP!%?-DX+6l=Yb4| zqxmH>{S&DzQKWwV1QTYLZabmRbg?Rjv_+bC-WgDke@pv+%bMWq4`IS8x1#22STYut z7HAus&4_2SGjIC+gEFIj1|Cf*fe#wukKB4p->g!MBxgD{j&w+JFHCo|+0F^o`-5@u zZwdY07o1}p^+ZAnEOIBA&eRITu^{p6DuUhT?mr%sFW!B=mz1wZSQ2EG0(#`qxEO!$R%(HP_pU6_Sws#<@XeXpcIP$NwC z-{q-z3QCfFwzZeChDlDMwF}A7bzZ+%eK7*Qs^~zl`$zFFK8$sCnxFbw0#qCV2LM&1 zchlxkiYFteZTqApM@z>97gP|{nGXlDPDYmR_f1Zj#?frFV`66J@(gHW_!$-#MX%5! zPwAa0Sj0~%`4BAE+m#&^#3EnCw@o+U2@NLh4}qbi!JlO7dlB!_Fp1-5Ey7y$&%zgP z--+I{k@WhTG{f5fv4w8>vJf;A(H|U*R(H zHGEqhi*LGuy27&aM8q`Np{ak8#hi48k=?!~yNgR&$?#~=a}&d+t!KAax+@jpO{7he zfC#SRW$nUfV`D|<7v{h09H*C;e>r0!<<5$PD^-4s)hmmw?dnRepA5s? zoqgeAkrNRW8UNyESh`~jPi9L%ff82112pgNgecgs0hmy6j@yh@Uja4;#Zdaw`7WU| z1WK@GXLyD(x+k68ZXp7WsBt6~GM}p)+UYzfY7F6@(P0&88iT^g*fJ!Cc#y-EDe|?( z3F&lSJCh357YhzaI;MNB(BLv5YILY-ju7@49)ap=ALuN_bQtlTxI?oNC#9p^GQ4r<1ucpp+Ig9VI*;;|Hn#JXE9qvSQ3$KpT^1_+14K+(I<9m0ID|sfJ z^CpPW_McqE(&QFnh2aUp>B$t66)O5?+pL|d<4VC)1A6mv1l_qy-5L2VVl_{9j>AGJ z7?NL~70P?s++(6wCVF9ilIsIufS@=e z``1xMD(qv!wDGst*c2(j9`_X8$zl0Jv8=Zy5hFW{RZcXC%|1Fml7_omy?v{}WO!AD zE8UZy`<;5d%l#1s7Xi1$66vIfttWPjmYJx%;rWn(%I)?PGG_ct8p2`+B?Glg;eB;ZTuKqX)fS})>)!rr_uZQMSsA0z(Y ztBb!OWaRhuo6K)0HJ^5my&S_9*qoGGN-gu1Mb3FV#-Q-)%_hKiU4;Ac^vi_5;^unD z5Bd2tH*_#smmPXiwTlVcia<|{A5$~XYP8cj920ig+E-dctXoHhwmUm@^WTwY#{|6a zUKyoqy@0UrD`9|#CqWdv-jw(dhWsrDk%cjOXlOvUJbIx1A02)3JtunEKy}eGh=wzp z26t-O>Hy{OY4QadnTJx;dXazuQDA8K0zM#&X2mV0>fw?hEts%tH{78&#Ev2y z?r>?-D^W1Kq;LNDU5qk1I7|PYMxuyJn8IRe^?fDN`XBi(n4YuDG+2f4D99CR#!Tv_ z<)l7iX69C1Py>aX0?vuJ#*@r%>ZMWZ@!sH;-Hf;9h zE47z6D~jTiSis0zmRdLuKF)HxLZQb_#>Qk7M?Cu@rX=upi)+{nJyG`gWz@HwJv^I0 z9xIMkUrGi&!OGiAEFrwfp6mwJT@i9cajaQM1A1Knc1y<$Nf8^1`gZ#!X>=k2T5_$rDv}_3CC1u6T4KY?bgnv8S&d1~`OyBKL&0@wVC4tkl+QB@E zktK>f%*4{Ub6~eHq+4;@h=lTnp6Y4-*=dOchk_#D-js`9Nm%+d=)ulWvZMY+bTux5 zzy=j&3J^ECnav(Dey1?m@)4T%INpzQ6*nPSnt>QB=A^l67t|U?m2=~S;wi4^E2nQ1 z?c}=T0!>B_tV$i`o@rH?6df@?=?Ez|-CUf+>x=UCsFl%{OVb8l$@pSDDk9$y%*t#)XfM_=6=NvN`-;0nJ6Xoy# zIge*;SzYhBzjmz>w!>XzRrmRW(tz|3hyE3FcfLZg?ZG_7PR;`FbhMnJVj+)Z z)IBDqOurBK;56Z=q-1A5x_0eax`|vFq{VnvdvTd(&^N9!JQk#EjhFa+ z?*O#+d#7UUpoAA~v&Wt!EZ(g{{|yxSh8HHG z1|`M7$rg91Q6iv#nU8E+JRg}g^wt)=4FK<2@wU4Z@1#d&aT+KDEE!VJEjG;p_yfcJ zPCX?(s7Eh9O^#RisG_*{{8zA5WXSWCUFnUEIl6UPM{^s0CBDn2DNS4$DgiVDKd|=P z9tttDyhgV=0)u)0EvIn6#5paGoeob|t)?t%^&BkkulG`Bs7_A(tj{h6VOs(k{vc35 zaXen!BzClwjW@VDGFa=#;#N?(n)D8wTwtu-LggSC)Hj{E-LP;?F|Y1l34bV1qxxK5 z0o#TURS4qEj3K!9eWfh;Y^T#%>NYjeP0(7y%f!=pbsGwbh9B&NpxjGLqT&q|iW45& zwo9|4b7%+=!1t6NdtKJ4^ZI{_00H?J6)Is&;=DsYr+Z%F4uC$x-bGGN*3=?VnjgR` z0L=-%2~tFpV+e>+amJrOIc6%TPH1aB_iohA16e@*&HxIsy_r6!WQ7(tfU-ob%daTAXp79c@)<wg~br$G7&qbBm7ddll8ykM=G(3bZS5?#$Zm_^OGyq(M^-CHQTq15Sw{md}BZ z`wm!Wu7;uI;=vE&m(GcvJPBM|-cX@A%F(k6N0*`c(~A|v;apOGQ*d0D(2DK&ootcm z^0+m;!|E#QtW9r3g3(10>f`#xBBcBDf_IyOucSeVk;)10SN#diV zea9G)AJaubPcWIO@ca!}T{gFYl18CAPF&lUNyE7~A`rE6ZdBo=povnYIm@s@SJ)ELAVtk1r$}%xE}Iimo1a zH`PNb_F3JQuu%fWLbvl}{4HYp^SUT_58r{TIZ^{&C{Ld$e z3nyJDSAxd@h%)?h0(%d@u4))yEXw;FU&I|?g)yO@_iwelX#vw0)r~B!1E)8_OHm~< z(L)*N^sZZvIUUW+uVa+3rCh-X2n@`Fv=B~ch_C;WO?LxP?(ufH6YI@m0>VIWRBo!L z6c=~ACX!8V9%&OrNiYD#>>Y+Y^xN5B%H`rhJYX@e~ykBR;4N|);vb5$clcPp^b);t8*7qrQ zGc84@a)q`;GF=+LCH2TSr7FYrJUQpJoa}phyA}f5G`5N&fJH~h3)XGNZ!5YDvuQ6S zxV~w?ex4Oh(sh14OMlyO#IeS3UjW(1E3p00qrj>rZtdff2HwOfPKbt4gZ}x2x%?=k ztDlT==HkL*d6Bhc3qovDM7G)(;2Z=E7G!tAGMXxl)0cN zdN~@`g(bA5;C%4#=gXaws#ce6k*$qU$A*@*yx!yTph~?eBdww-wB6{TsI_t8?8KI? zt8zCSntZ;kmop8#T!|UdUvWQKqfpa7FUpKMwC?*kK0=(~bZRy>!`-_)QOUtiQ$F`{0i_Eju7yblG#51=}c@TAvuR{S{>fwDUXWWY5?Q>)^H z9=$}YQ$?^p1EsKBj6EI5V{1jgzMKGXh5A}!EBq5mGyrborBnZaf%KGCKv*x%z zQnm)emW9Cs@`37j`|9eE-@QkSrM!aPwy*gOl zuq|~`zCL2HNwJsY{;>W&)B4C!dj0iVHJhV?;QU80E4-+x`h0o!D4A!^e7^iVCmh7b zHKoW#m@nZw6q;wggnV1~T~PB-a0H^X600lE!X(3lC^>{07x$;*O!H04>%*0Xl4+6r z-;Y1TKR%iGoWq}RFd{i^J5HiFU2CpWzJ{|R2o_gxAeE<|+|RJIv~+uh>Sz1bDO7t@ zy5ZNB?dzZbh%-<0!NXdtHP#=?`&*;i6H4oZ;Ib6JsKkHkby-D|}Z z2$qzCfhg9OR4;6CIa%4>ZRpQ1P}!j0WR+1=FCW#L49#=C)%Dh&R5so;XNJ=F20G^5 z2Csex@wG!~_h+e~FVWVa-?69SQ|rw7RQ@Ct=;8=zH48~P8TNU5M&qW6!DtvI_flH= z6FlmSwW#NYfl^YZ8*1`1;_;lm5$gC+cW`%r~UqS>k649lgbZK*6)3Pwd-eo>Oz>qfPm{qW^koZ^=QU?~uNY z!1imHBWAy4=LMUk@kvjOYQzA@Wqb~m^eZ)4$V^0>e&Mt`ZcC#Tp?k1X>eRJxcNPYM zAz!aYq`wXDWAPbXx3btj1!hmTi&2f0_m((p6JK3_=-mS?f)jTR`E7?{#K5=8bkX_OKrBl=Y{le8b%-6JigGfZ9J$C$M!N z#f0_X5Dc{|TPHQLva?5iS-izv@V2ku20c*H^xl&q@g{R!dUgn=)^scTp-+a9odL(& z9eW@7x-v#}p-^l`UUWn*I>9+4BftESY&R<$uE2+`w*1vvKAF7mHu~+3?lI^(|9136 zJATMsIV}?hQ^XB@7kHF4OIV(D9L<|rzC4OqSSwh?s>MbagAS19s6lX|O2<(o&yGTxv=zM4CWACNIM+{TY3bpmS{flJzY}P9BM_rm z(CoC=2l(C(jCX#ei~FPoA-#yKuV{Wq|BxaL*2s@h)=`6`N+B&%I+SH??c`@4<}e7~ zKSn3cX5`QFYmY~JhYjPkeQDUPeS8|u4dUm-OOq?a_1*5(uKIv1I^>J8SJ(SyIhxUC*96U2%x z7$IMsF$!2dGT_dAgN9V8d60nVeTR5phHkmnRgWZJgwZT%M2Cd+OqRWNzcK}#pqo{& z(PPSN=fdX$*R#o3u5wRO%D#XSH(7hs&;5#u{%u=xtG3>F3(zxVOK;^X6c20pBd6j= zIxh7+G-?TibPB6yY2nMb8`^bhH@$snQkudyF4nkR<}*GnCcrhiR-ZQHVZsD?6hiR3 zNjvhW@*~PP>QoiUCnI@1?si@ES{F|t!5BwsK9_QUbd53C$j7tS_(PWB*6C3|PvjK} zBWxM13-s6vgk0(9sA6H$0g=TFO(H>;c8nUP_X)eG&dE{x#QB(Wv1Ph1hguliq_Q@ zrF(0`y6#E5u&wHKQ>L*p#iaOy(CcD{L;{z4EvBw@0fSV7FUU~+`(9jIy%h^aHOog8 zey0YFf1-m38jL?@tjK)~X6wmT=Kc^_hV2LHaD3(xjpypPO*%6rYL>s|W!Pky%G;2} zS4BR#chdx&JB-Dw=)xgF7+Oac^2f%T_iDB>^U~ewGE1$#Nw^#r za2TJl-BwdarhcpKIPy$NhNgWrp!FN^pvgO6==`8o7*c2z&t+Hs)cscVHE?WG^znGs z(4)LsCPJj^V(Gcf5o4lj&SyXqKmwvo7V z1XYrgjn-GKwuxROjqaWuYVUq)?<*_&48!v9(eAV|KSl|oVIzkFg}Ma z`z$%=SMqVAmz2zI97gv2ET!knI8Rs9B;Y#oTU=H#d4Kym7ReNe=AF&}Nvh7%1C|uTJngqp#Qe*uf2}a7}vAqZmG6QzJ3X zBj?yH@fR@MTym4T`6cVH05IfuUZHQA1Ji(`fl()4bZ1EDtDLNliy&f8cx`A*C)$Jh z(%mE{LF8#^f}6J3Sd&SG@ZpS>-MoNC$Gfb4)Q+aG!o$xTl7v`V@8 z;nqie6^J4^mQ%Nfmx$(f0NrkSUL@a$q$it9i+sT|!@$whq*54Tmv(&%&R~M36?W5n z79)ycvPy%tAyS025nBK_m93|I9Zgi83gZN145RvjJ2GAiOzv0PKD9nXcDqz^gwL(wj}`Xn4l*TmF4G8x|_j}rt0l$Z9C&aEQ|F_{GB0v3Uo|N zDhIJlxfkaftYi*f-*L_Le6llK(RP{7;m=4W!*W87JpkK%u%dc?&XNY|@4G&9*U>0#b3<-*W2{r#L8f}WO+^*J9 z+!&MS3f$c|wh=p`(Xp=g2-qtlJx!1QH0YUtX0DFmSJo72!O=iZ?zGhTgS)Ss^!(d9 z?_Tb)u&`-)*Mn77_{*i83rmj9`TZKZ6_8!sR}dGIV@}Xro})&`{V8wKwe{6d3)sfA zlUdu65Be$f>Ef9pr}Y~gHuOec6IdbIyy3dLbsbr|E1mS*c*{pDg*VGoHm1L&%Cf##cs z0?Z?`N6LNm^LNR3tHXhxaJ+Yp+}SE^up9fP(KT>!dq{^DG*m=I1jT2+gH$V+(?J5j zO7Et7De=7)RWfz|@$ME$G~7ZfC-%mKgCacxTtbz&=46=(k>wHh;&R@|#dYL)&3vZL z(vfv9ZQpgmez9ulOi>|)I0rJCQTe+J7N!rLg6O=tFh+cIBZx|rNy+x`5O%`hAUwXU z?=E$kWVic)INc?nL)xa1S&o0PtH61h7NVUVtMYWI)ML|ZrztiN^ni@+bjdmhOAqLH zuag9_Zq2JPqGHFbXZR;fTF#V=dzTxCY?qj7>T@*+X$FydD*7EHs^ zU*7GnRJU8oIYeL|tHdqf{t;18V|X5}bo2AX8{&TKU$RNI^e>+$looT@>o~**}?qg4E(4mu=n#{wIuE0ULdv3wKyl+C*$BGnZAa9 z>I*AQ{RGoz->gmc)ch*S7hnUTU@$BL7{Y%<3Q+jM`#6A^We=)y{&AImt^AQ3h>VsS z=aBw+X}@lsic0NxUatO6$y0-<*a4f?+CODVWrNfA{w~|?e@enE2gu4JxhU7)aARtY+1ZYGW}^S~@V_rILBA?ZrkV6V7xwpO;6(=>47QUQ@IT-BpLY`1YHm@% vj$-(4>)XK~2(Hhe`5(Xf|G1Mf$q}0FYb$E0dEUop;166<@lm0Kq0j#VM=o{r literal 0 HcmV?d00001 diff --git a/docs/conf.py b/docs/conf.py index 60bcecf32f..a7a932160e 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -98,3 +98,54 @@ }, } html_extra_path = ["project.json", "versions1.json"] + +# -- Supporting rendering GitHub alerts correctly ---------------------------- +# https://github.com/executablebooks/MyST-Parser/issues/845 + +_GITHUB_ADMONITIONS = { + "> [!NOTE]": "note", + "> [!TIP]": "tip", + "> [!IMPORTANT]": "important", + "> [!WARNING]": "warning", + "> [!CAUTION]": "caution", +} + + +def convert_gh_admonitions(app, relative_path, parent_docname, contents): + # loop through content lines, replace github admonitions + for i, orig_content in enumerate(contents): + orig_line_splits = orig_content.split("\n") + replacing = False + for j, line in enumerate(orig_line_splits): + # look for admonition key + line_roi = line.lstrip() + for admonition_key in _GITHUB_ADMONITIONS: + if line_roi.startswith(admonition_key): + line = line.replace( + admonition_key, + "```{" + _GITHUB_ADMONITIONS[admonition_key] + "}", + ) + # start replacing quotes in subsequent lines + replacing = True + break + else: # no break + if not replacing: + continue + # remove GH directive to match MyST directive + # since we are replacing on the original line, this will preserve the right indent, if any + if line_roi.startswith("> "): + line = line.replace("> ", "", 1) + elif line_roi.rstrip() == ">": + line = line.replace(">", "", 1) + else: + # missing "> ", so stop replacing and terminate directive + line = f"```\n{line}" + replacing = False + # swap line back in splits + orig_line_splits[j] = line + # swap line back in original + contents[i] = "\n".join(orig_line_splits) + + +def setup(app): + app.connect("include-read", convert_gh_admonitions) diff --git a/docs/design-docs/generation.md b/docs/design-docs/generation.md index 275625f371..13d7b3dc65 100644 --- a/docs/design-docs/generation.md +++ b/docs/design-docs/generation.md @@ -62,7 +62,7 @@ A key design principle for generation backends is that they process tokens direc ## VLLM Backend -The VLLM backend (`models/generation/vllm.py`) implements the {py:class}`GenerationInterface ` to provide efficient text generation using the VLLM library, which is optimized for large language models. +The VLLM backend (`models/generation/vllm/vllm_generation.py`) implements the {py:class}`GenerationInterface ` to provide efficient text generation using the VLLM library, which is optimized for large language models. ### VllmGeneration Class diff --git a/docs/docker.md b/docs/docker.md index 1157e92ebc..f6f93fc1b8 100644 --- a/docs/docker.md +++ b/docs/docker.md @@ -1,39 +1,50 @@ # Build Docker Images -This guide provides three methods for building Docker images: +This guide provides two methods for building Docker images: * **release**: Contains everything from the hermetic image, plus the nemo-rl source code and pre-fetched virtual environments for isolated workers. * **hermetic**: Includes the base image plus pre-fetched NeMo RL python packages in the `uv` cache. -* **base**: A minimal image with CUDA, `ray`, and `uv` installed, ideal for specifying Python dependencies at runtime. Use the: * **release** (recommended): if you want to pre-fetch the NeMo RL [worker virtual environments](./design-docs/uv.md#worker-configuration) and copy in the project source code. * **hermetic**: if you want to pre-fetch NeMo RL python packages into the `uv` cache to eliminate the initial overhead of program start. -* **base**: if you just need a minimal image with CUDA, `ray`, and `uv` installed and are okay with dynamically downloading your requirements at runtime. This option trades off fast container download/startup with slower initial overhead to download python packages. ## Release Image The release image is our recommended option as it provides the most complete environment. It includes everything from the hermetic image, plus the nemo-rl source code and pre-fetched virtual environments for isolated workers. This is the ideal choice for production deployments. ```sh -cd docker/ -docker buildx build --target release -t nemo_rl -f Dockerfile .. +# Self-contained build (default: builds from main): +docker buildx build --target release -f docker/Dockerfile --tag /nemo-rl:latest --push . + +# Self-contained build (specific git ref): +docker buildx build --target release -f docker/Dockerfile --build-arg NRL_GIT_REF=r0.3.0 --tag /nemo-rl:r0.3.0 --push . + +# Self-contained build (remote NeMo RL source; no need for a local clone of NeMo RL): +docker buildx build --target release -f docker/Dockerfile --build-arg NRL_GIT_REF=r0.3.0 --tag /nemo-rl:r0.3.0 --push https://github.com/NVIDIA-NeMo/RL.git + +# Local NeMo RL source override: +docker buildx build --target release --build-context nemo-rl=. -f docker/Dockerfile --tag /nemo-rl:latest --push . ``` +**Note:** The `--tag /nemo-rl:latest --push` flags are not necessary if you just want to build locally. + ## Hermetic Image The hermetic image includes all Python dependencies pre-downloaded in the `uv` cache, eliminating the initial overhead of downloading packages at runtime. This is useful when you need a more predictable environment or have limited network connectivity. ```sh -cd docker/ -docker buildx build --target hermetic -t nemo_rl -f Dockerfile .. -``` +# Self-contained build (default: builds from main): +docker buildx build --target hermetic -f docker/Dockerfile --tag /nemo-rl:latest --push . -## Base Image +# Self-contained build (specific git ref): +docker buildx build --target hermetic -f docker/Dockerfile --build-arg NRL_GIT_REF=r0.3.0 --tag /nemo-rl:r0.3.0 --push . -The base image provides a minimal environment with CUDA, `ray`, and `uv` installed. While it's the smallest image, it requires downloading Python dependencies at runtime, which may not be ideal for all use cases. +# Self-contained build (remote NeMo RL source; no need for a local clone of NeMo RL): +docker buildx build --target hermetic -f docker/Dockerfile --build-arg NRL_GIT_REF=r0.3.0 --tag /nemo-rl:r0.3.0 --push https://github.com/NVIDIA-NeMo/RL.git -```sh -cd docker/ -docker buildx build --target base -t nemo_rl -f Dockerfile .. +# Local NeMo RL source override: +docker buildx build --target hermetic --build-context nemo-rl=. -f docker/Dockerfile --tag /nemo-rl:latest --push . ``` + +**Note:** The `--tag /nemo-rl:latest --push` flags are not necessary if you just want to build locally. diff --git a/docs/fp8.md b/docs/fp8.md new file mode 100644 index 0000000000..8d3fe2b82d --- /dev/null +++ b/docs/fp8.md @@ -0,0 +1,39 @@ +# FP8 for NeMo-RL + +This module provides a suite of tools to enable FP8 quantization for large language models. This module is still in developement. Currently we support FP8 generation, using Deepseek style FP8 (sub channel scaling). + +NeMo-RL monkey patches several vLLM functions to enable FP8 generations for reinforcement learning. The `init_fp8` function patches key `vLLM` components when initialized: +1. **`RayDistributedExecutor`**: For multi-GPU inference, the executor is patched to ensure that every worker process applies the same FP8 patches before model initialization. +2. **Quantization Utilities**: Functions within `vllm.model_executor.layers.quantization` are replaced with versions that support power-of-2 scaling and other custom features. +3. **Weight Loading**: A custom `load_weights` function handles the on-the-fly quantization of model weights from a higher-precision format to FP8 with the correct scaling factors. + +--- + +## Usage + +FP8 generations are recommended to be configured with the following settings: + + ``` + loss_fn: + # importance sampling helps improve stability + use_importance_sampling_correction: true + + policy: + generation: + vllm_cfg: + precision: 'fp8' + # DeepGemm is much more performant than vLLM's default cutlass fp8 subchannel scaling kernels + use_deep_gemm: true + # Keeping the first and last three layers in bf16 reduces the multi-token error without + # a signficant effect to performance + num_last_layers_in_bf16: 3 + num_first_layers_in_bf16: 1 + # Use FP32 scaling factors. Rounding scaling factors to the nearest pow2 may improve quantization + # fidelity however this feature is still under research. + use_weight_pow2_scale: False + use_activation_pow2_scale: False +``` + +## Accuracy + +We observe on the Llama 8b recipe a ~5% accuracy loss is incurred with FP8 generations. Convergence is still under active research and FP8 generations should be used with caution. We are investigating ways to close the accuracy gap and further improve performance. diff --git a/docs/guides/eval.md b/docs/guides/eval.md index 089ac31f5b..f5737913b7 100644 --- a/docs/guides/eval.md +++ b/docs/guides/eval.md @@ -79,7 +79,7 @@ When you complete the evaluation, you will receive a summary similar to the foll ``` ============================================================ model_name='Qwen2.5-Math-1.5B-Instruct' dataset_name='aime2024' -max_new_tokens=2048 temperature=0.0 top_p=1.0 top_k=-1 +max_new_tokens=2048 temperature=0.0 top_p=1.0 top_k=-1 seed=42 metric=pass@1 num_tests_per_prompt=1 diff --git a/docs/guides/grpo-deepscaler.md b/docs/guides/grpo-deepscaler.md index 42f9029230..7b62025783 100644 --- a/docs/guides/grpo-deepscaler.md +++ b/docs/guides/grpo-deepscaler.md @@ -5,12 +5,12 @@ This guide explains how to use NeMo RL to train long Chain of Thought (CoT) reas ## Train the Model We follow the DeepScaleR recipe and train the model in three stages. In the first stage, we train with an 8K context window. In the second stage, we train with a 16K context window. In the third stage, we train with a 24K context window. -To train the model using NeMo RL, use the `examples/configs/grpo-deepscaler-1.5b-8K.yaml` config file. This file closely matches the experiment settings in the original DeepScaleR recipe. We then train with `examples/configs/grpo-deepscaler-1.5b-16K.yaml` and `examples/configs/grpo-deepscaler-1.5b-24K.yaml` for the second and third stages, respectively. +To train the model using NeMo RL, use the `examples/configs/recipes/llm/grpo-deepscaler-1.5b-8K.yaml` config file. This file closely matches the experiment settings in the original DeepScaleR recipe. We then train with `examples/configs/recipes/llm/grpo-deepscaler-1.5b-16K.yaml` and `examples/configs/recipes/llm/grpo-deepscaler-1.5b-24K.yaml` for the second and third stages, respectively. ```sh -uv run examples/run_grpo_math.py --config=examples/configs/grpo-deepscaler-1.5b-8K.yaml -uv run examples/run_grpo_math.py --config=examples/configs/grpo-deepscaler-1.5b-16K.yaml policy.model_name=/path/to/8K/checkpoint/hf -uv run examples/run_grpo_math.py --config=examples/configs/grpo-deepscaler-1.5b-24K.yaml policy.model_name=/path/to/16K/checkpoint/hf +uv run examples/run_grpo_math.py --config=examples/configs/recipes/llm/grpo-deepscaler-1.5b-8K.yaml +uv run examples/run_grpo_math.py --config=examples/configs/recipes/llm/grpo-deepscaler-1.5b-16K.yaml policy.model_name=/path/to/8K/checkpoint/hf +uv run examples/run_grpo_math.py --config=examples/configs/recipes/llm/grpo-deepscaler-1.5b-24K.yaml policy.model_name=/path/to/16K/checkpoint/hf ``` At the end of each stage, you need to specify the Hugging Face checkpoint to continue training with. To get this checkpoint, we convert a model checkpoint to a Hugging Face checkpoint with the following command: @@ -19,7 +19,7 @@ At the end of each stage, you need to specify the Hugging Face checkpoint to con uv run examples/converters/convert_dcp_to_hf.py --config=results/grpo-deepscaler-1.5b-8K/step_240/config.yaml --dcp-ckpt-path=results/grpo-deepscaler-1.5b-8K/step_240/policy/weights --hf-ckpt-path=results/grpo-deepscaler-1.5b-8K/step_240/hf ``` -When running the next command, we use the Hugging Face checkpoint as the initial checkpoint. We train with an 8K context window for 240 steps, a 16K context window for 290 steps, and a 24K context window for 50 steps. The 8K and 16K steps can be run on a single 8XH100 80GB node, while the 24K step requires four nodes. If you're running on 8XA100 80GB, you will need at least 1 node for 8K training and four nodes for 16-24k training. +When running the next command, we use the Hugging Face checkpoint as the initial checkpoint. We train with an 8K context window for 240 steps, a 16K context window for 290 steps, and a 24K context window for 50 steps. We run all experiments on a single 8XH100 80GB node. If you're running on 8XA100 80GB, you will need at least 1 node for 8K training and 2 nodes for 16-24k training. ## Training Curve When using the above commands, we get the following training curve: @@ -35,11 +35,17 @@ Throughout training, the checkpoints of the model will be saved to the `results` uv run examples/run_eval.py \ generation.model_name=results/grpo-deepscaler-1.5b-8K/step_240/hf \ data.prompt_file=examples/prompts/cot.txt \ - generation.vllm_cfg.max_model_len=32768 + generation.vllm_cfg.max_model_len=32768 \ + generation.vllm_cfg.enforce_eager=True \ + generation.temperature=1.0 ``` Use `generation.model_name` to specify the path to the Hugging Face checkpoint. In addition, we use AIME24 as the validation dataset and calculate pass@1 on it throughout training. +> [!NOTE] +> AIME24 only has 30 examples so the accuracy can be very noisy. +> To reduce the variance consider runing `run_eval.py` with `eval.num_tests_per_prompt=16`. + ## Evaluation Results Using the above instructions to train DeepSeek-R1-Distill-Qwen-1.5B on the DeepScaleR dataset, we can track the model's performance on the AIME24 benchmark throughout training. The following plot shows the evaluation metrics as training progresses: diff --git a/docs/guides/grpo.md b/docs/guides/grpo.md index b137d45921..9b92fc6b71 100644 --- a/docs/guides/grpo.md +++ b/docs/guides/grpo.md @@ -107,7 +107,7 @@ This Policy object holds a [RayWorkerGroup](../../nemo_rl/distributed/worker_gro ## Fast Generation -We support vLLM through the [VllmGeneration](../../nemo_rl/models/generation/vllm.py) class right now. +We support vLLM through the [VllmGeneration](../../nemo_rl/models/generation/vllm/vllm_generation.py) class right now. The function [grpo_train](../../nemo_rl/algorithms/grpo.py) contains the core GRPO training loop. diff --git a/docs/index.md b/docs/index.md index e39423a23c..75c85cbfde 100644 --- a/docs/index.md +++ b/docs/index.md @@ -49,6 +49,7 @@ testing.md documentation.md debugging.md nsys-profiling.md +fp8.md guides/use-custom-vllm.md apidocs/index.rst ``` diff --git a/docs/model-quirks.md b/docs/model-quirks.md index 6ba7f12f55..52869bf04d 100644 --- a/docs/model-quirks.md +++ b/docs/model-quirks.md @@ -4,14 +4,6 @@ This document outlines special cases and model-specific behaviors that require c ## Gemma-3 -### Tied Weights - -Weight tying between the embedding layer (`model.embed_tokens`) and output layer (`lm_head`) is currently not respected when using the DTensor policy when TP > 1 (See [this issue](https://github.com/NVIDIA-NeMo/RL/issues/227)). To avoid errors when training these models, we only allow training models with tied weights using the DTensor policy with TP=1. For Llama-3 and Qwen2.5 models, weight-tying is only enabled for the smaller models (< 2B), which can typically be trained without tensor parallelism. For Gemma-3, all model sizes have weight-tying enabled, including the larger models which require tensor parallelism. To support training of these models, we specially handle the Gemma-3 models by allowing training using the DTensor policy with TP > 1. - -**Special Handling:** -- We skip the tied weights check for all Gemma-3 models when using the DTensor policy, allowing training using TP > 1. -- We exclude `model.embed_tokens` and `lm_head` from the DTensor tensor parallel plan to maintain weight tying correctly. - ### vLLM Initialization Gemma-3 models have a specific issue with vLLM dummy weight initialization due to a vLLM bug where [a `normalizer` buffer is created](https://github.com/vllm-project/vllm/blob/964472b9667508b1d4a7ed92068ff81740ae0036/vllm/model_executor/models/gemma3.py#L372) that is not present in the Hugging Face model. This causes the `normalizer` buffer to be set to dummy weights at initialization and then never updated with the correct values during model refit. As a workaround for this issue, we do not use dummy weight initialization for vLLM with Gemma-3 models and instead use the `load_format="auto"` setting to load the full weights at initialization. @@ -39,6 +31,13 @@ Whether model level support CP only depends on arguments passed to `torch.nn.fun - It's a known issue that context parallel can't be used together with sequence parallel. Refer to [here](https://github.com/NVIDIA-NeMo/RL/issues/659) for more details. +## DeepScaleR Recipe Convergence Issues + +The DeepScaleR recipe (e.g., `examples/configs/grpo-deepscaler-1.5b-8K.yaml`) has been found to experience convergence issues when CUDA graphs are enabled in vLLM. + +**Special Handling:** +- CUDA graphs must be disabled by setting `enforce_eager: True` in the vLLM configuration (https://github.com/NVIDIA-NeMo/RL/pull/857 forces eager execution by default). + ## vLLM Async Rollout Timeout vLLM async generation has a configurable timeout for waiting for individual sample results. This is particularly important for longer sequences on large models. diff --git a/docs/nsys-profiling.md b/docs/nsys-profiling.md index db3f6a768a..951251420c 100644 --- a/docs/nsys-profiling.md +++ b/docs/nsys-profiling.md @@ -17,7 +17,7 @@ NeMo RL supports Nsight profiling for Ray workers through environment variable p Set the `NRL_NSYS_WORKER_PATTERNS` environment variable with a comma-separated list of patterns to match worker names: ```bash -export NRL_NSYS_WORKER_PATTERNS="*policy*,*other-worker*" +export NRL_NSYS_WORKER_PATTERNS="*policy*,*vllm*" ``` Set the `NRL_NSYS_PROFILE_STEP_RANGE` environment variable to control which training steps the profiler captures. Its @@ -40,7 +40,7 @@ export NRL_NSYS_PROFILE_STEP_RANGE=3:5 The supported worker types are: - **DTensorPolicyWorker**: Pattern matched against `"dtensor_policy_worker"` -- **MegatronPolicyWorker**: Pattern matched against `"megatron_policy_worker"` +- **VllmGenerationWorker**: Pattern matched against `"vllm_generation_worker"` ## Example Usage @@ -49,10 +49,16 @@ The supported worker types are: NRL_NSYS_PROFILE_STEP_RANGE=2:3 NRL_NSYS_WORKER_PATTERNS="*policy*" uv run examples/run_grpo_math.py grpo.max_num_steps=5 ``` +### Profile Multiple Worker Types + +```bash +NRL_NSYS_PROFILE_STEP_RANGE=1:2 NRL_NSYS_WORKER_PATTERNS="*policy*,*vllm*" uv run examples/run_grpo_math.py grpo.max_num_steps=5 +``` + ### Profile Workers with Exact Names ```bash -NRL_NSYS_PROFILE_STEP_RANGE=3:10 NRL_NSYS_WORKER_PATTERNS="dtensor_policy_worker" uv run examples/run_grpo_math.py grpo.max_num_steps=5 +NRL_NSYS_PROFILE_STEP_RANGE=3:10 NRL_NSYS_WORKER_PATTERNS="dtensor_policy_worker,vllm_generation_worker" uv run examples/run_grpo_math.py grpo.max_num_steps=5 ``` ### Profile Megatron Workers @@ -63,7 +69,7 @@ To profile a Megatron worker, you should set `LD_LIBRARY_PATH` as follows, other ```bash LD_LIBRARY_PATH="/usr/local/cuda/targets/x86_64-linux/lib:/usr/local/cuda/lib64:/usr/local/cuda/lib:/usr/local/nvidia/lib64:/usr/local/nvidia/lib:/usr/lib/x86_64-linux-gnu" \ -NRL_NSYS_PROFILE_STEP_RANGE=2:3 NRL_NSYS_WORKER_PATTERNS="megatron_policy_worker" uv run examples/run_grpo_math.py --config examples/configs/grpo_math_1B_megatron.yaml grpo.max_num_steps=5 +NRL_NSYS_PROFILE_STEP_RANGE=2:3 NRL_NSYS_WORKER_PATTERNS="megatron_policy_worker,vllm_generation_worker" uv run examples/run_grpo_math.py --config examples/configs/grpo_math_1B_megatron.yaml grpo.max_num_steps=5 ``` ## Profile Output @@ -78,7 +84,10 @@ When profiling is enabled, it generates the following logs and files: 2. **Profile Files**: Each profiled worker generates a `.nsys-rep` file with naming pattern: ``` dtensor_policy_worker__.nsys-rep + vllm_generation_worker__.nsys-rep + worker_process_.nsys-rep ``` +If you are not using model parallelism in Vllm, you should directly refer to `vllm_generation_worker__.nsys-rep` for nsight reports; If you are using model parallelism, the `vllm_generation_worker__.nsys-rep` will be empty, and the `worker_process_.nsys-rep` are nsight profiles from vllm's ray distributed executors (refer to https://github.com/vllm-project/vllm/blob/7e3a8dc90670fd312ce1e0d4eba9bf11c571e3ad/vllm/executor/ray_distributed_executor.py#L136 for more information). 3. **File Location**: Profile files are saved in `/tmp/ray/session*/logs/nsight/` directory on each worker node. Ensure you check both `ls /tmp/ray/session_[0-9]*/logs/nsight` and `ls /tmp/ray/session_latest/logs/nsight` for the profiles, since the "latest" pointer may be stale. diff --git a/docs/testing.md b/docs/testing.md index 8ce97346b9..5a24452813 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -4,9 +4,15 @@ This guide outlines how to test NeMo RL using unit and functional tests, detaili ## Unit Tests -:::{important} -Unit tests require 2 GPUs to test the full suite. -::: +> [!IMPORTANT] +> Unit tests require 2 GPUs to test the full suite. + +> [!TIP] +> Some unit tests require setting up test assets which you can download with +> ```sh +> uv run tests/unit/prepare_unit_test_assets.py +> ``` + ```sh # Run the unit tests using local GPUs diff --git a/examples/configs/dpo.yaml b/examples/configs/dpo.yaml index e6a2b7d5f9..11a2ed1acf 100755 --- a/examples/configs/dpo.yaml +++ b/examples/configs/dpo.yaml @@ -26,6 +26,7 @@ checkpointing: higher_is_better: false keep_top_k: 3 save_period: 50 + checkpoint_must_save_by: null policy: model_name: "meta-llama/Llama-3.2-1B-Instruct" @@ -44,6 +45,8 @@ policy: precision: "bfloat16" dtensor_cfg: + env_vars: + PYTORCH_CUDA_ALLOC_CONF: "" # Refers to https://docs.pytorch.org/docs/stable/notes/cuda.html#optimizing-memory-usage-with-pytorch-cuda-alloc-conf enabled: true cpu_offload: False sequence_parallel: false @@ -154,14 +157,16 @@ data: # dataset_name: PreferenceDataset # train_data_path: # val_data_path: + shuffle: true logger: log_dir: "logs" # Base directory for all logs wandb_enabled: false # Make sure you do a ``wandb login [Your API key]'' before running + tensorboard_enabled: false mlflow_enabled: false # Disable MLflow logging monitor_gpus: true # If true, will monitor GPU usage and log to wandb and/or tensorboard - num_val_samples_to_print: 0 # Number of validation samples to pretty print on terminal + num_val_samples_to_print: 0 # Number of validation samples to pretty print on terminal wandb: project: "dpo-dev" name: "dpo" diff --git a/examples/configs/grpo_math_1B.yaml b/examples/configs/grpo_math_1B.yaml index b9be32bdda..5b49db2b38 100644 --- a/examples/configs/grpo_math_1B.yaml +++ b/examples/configs/grpo_math_1B.yaml @@ -10,6 +10,7 @@ grpo: val_at_start: false max_val_samples: 256 val_batch_size: 256 + seed: 42 loss_fn: reference_policy_kl_penalty: 0.01 @@ -19,6 +20,7 @@ loss_fn: # (default off) loss formulation improvements (docs/guides/grpo.md#loss) use_on_policy_kl_approximation: false use_importance_sampling_correction: false + sequence_level_importance_ratios: false token_level_loss: true checkpointing: @@ -28,9 +30,9 @@ checkpointing: higher_is_better: true keep_top_k: 3 save_period: 10 + checkpoint_must_save_by: null policy: - # Qwen/Qwen2.5-1.5B has tied weights which are only supported with dtensor policy with tp size 1 (https://github.com/NVIDIA-NeMo/RL/issues/227) model_name: "Qwen/Qwen2.5-1.5B" tokenizer: name: ${policy.model_name} ## specify if you'd like to use a tokenizer different from the model's default @@ -40,8 +42,10 @@ policy: logprob_batch_size: 4 max_total_sequence_length: 512 precision: "bfloat16" + logprob_chunk_size: null dtensor_cfg: + _v2: true enabled: true cpu_offload: False sequence_parallel: false @@ -52,6 +56,67 @@ policy: megatron_cfg: enabled: false + empty_unused_memory_level: 0 + activation_checkpointing: false + converter_type: "Qwen2ForCausalLM" + tensor_model_parallel_size: 1 + expert_tensor_parallel_size: 1 + expert_model_parallel_size: 1 + pipeline_model_parallel_size: 1 + num_layers_in_first_pipeline_stage: null + num_layers_in_last_pipeline_stage: null + context_parallel_size: 1 + pipeline_dtype: ${policy.precision} + sequence_parallel: false + freeze_moe_router: true + moe_router_dtype: "fp64" + moe_router_load_balancing_type: "none" # "seq_aux_loss" causes logprob error divergence for grpo + moe_router_bias_update_rate: 0.0 # by default, disable bias updates for grpo + #gives ~20% training perf speedup with sequence packing + apply_rope_fusion: True + defer_fp32_logits: null + + optimizer: + optimizer: "adam" + lr: 5.0e-6 + min_lr: 5.0e-7 + weight_decay: 0.01 + bf16: true + fp16: false + params_dtype: "float32" + + #adam + adam_beta1: 0.9 + adam_beta2: 0.999 + adam_eps: 1e-8 + + #sgd + sgd_momentum: 0.9 + + #distributed optimizer + use_distributed_optimizer: true + use_precision_aware_optimizer: true + + clip_grad: ${policy.max_grad_norm} + + scheduler: + start_weight_decay: ${policy.megatron_cfg.optimizer.weight_decay} + end_weight_decay: ${policy.megatron_cfg.optimizer.weight_decay} + weight_decay_incr_style: "constant" + lr_decay_style: "constant" + lr_decay_iters: null + lr_warmup_iters: 13 + lr_warmup_init: 5.0e-7 + + distributed_data_parallel_config: + grad_reduce_in_fp32: false + overlap_grad_reduce: true + overlap_param_gather: true + average_in_collective: true + use_custom_fsdp: false + data_parallel_sharding_strategy: "optim_grads_params" + + env_vars: null # See docs/design-docs/sequence-packing-and-dynamic-batching.md # for more details on dynamic batching and sequence packing. @@ -113,6 +178,9 @@ policy: gpu_memory_utilization: 0.6 max_model_len: ${policy.max_total_sequence_length} enforce_eager: False + use_deep_gemm: False + num_last_layers_in_bf16: 0 + num_first_layers_in_bf16: 0 colocated: # true: generation shares training GPUs # false: uses dedicated generation resources @@ -127,6 +195,7 @@ data: prompt_file: "examples/prompts/cot.txt" system_prompt_file: null dataset_name: "OpenMathInstruct-2" + shuffle: true env: math: diff --git a/examples/configs/grpo_math_1B_megatron.yaml b/examples/configs/grpo_math_1B_megatron.yaml index cf6ba44d75..a8ce18e220 100644 --- a/examples/configs/grpo_math_1B_megatron.yaml +++ b/examples/configs/grpo_math_1B_megatron.yaml @@ -29,6 +29,7 @@ checkpointing: higher_is_better: true keep_top_k: 3 save_period: 10 + checkpoint_must_save_by: null policy: model_name: "Qwen/Qwen2.5-1.5B" @@ -146,6 +147,7 @@ data: prompt_file: "examples/prompts/cot.txt" system_prompt_file: null dataset_name: "OpenMathInstruct-2" + shuffle: true env: math: diff --git a/examples/configs/grpo_math_8B_megatron.yaml b/examples/configs/grpo_math_8B_megatron.yaml index 3f68344417..90cfb49e6b 100644 --- a/examples/configs/grpo_math_8B_megatron.yaml +++ b/examples/configs/grpo_math_8B_megatron.yaml @@ -8,6 +8,7 @@ grpo: checkpointing: enabled: false checkpoint_dir: "results/grpo_8b_megatron" + checkpoint_must_save_by: null policy: model_name: "meta-llama/Llama-3.1-8B-Instruct" diff --git a/examples/configs/grpo_math_8B_megatron_fp8.yaml b/examples/configs/grpo_math_8B_megatron_fp8.yaml new file mode 100644 index 0000000000..c171bba100 --- /dev/null +++ b/examples/configs/grpo_math_8B_megatron_fp8.yaml @@ -0,0 +1,13 @@ +# GRPO Algorithm Configuration +defaults: "grpo_math_8B_megatron.yaml" + +loss_fn: + use_importance_sampling_correction: true + +policy: + generation: + vllm_cfg: + precision: 'fp8' + use_deep_gemm: true + num_last_layers_in_bf16: 0 + num_first_layers_in_bf16: 0 diff --git a/examples/configs/grpo_math_qwen30ba3b_megatron.yaml b/examples/configs/grpo_math_qwen30ba3b_megatron.yaml index e7cae09858..1a0cc651c7 100644 --- a/examples/configs/grpo_math_qwen30ba3b_megatron.yaml +++ b/examples/configs/grpo_math_qwen30ba3b_megatron.yaml @@ -55,7 +55,7 @@ policy: lr_decay_iters: null lr_warmup_iters: 13 lr_warmup_init: 3.0e-8 - + env_vars: PYTORCH_CUDA_ALLOC_CONF: "expandable_segments:False" diff --git a/examples/configs/grpo_sliding_puzzle.yaml b/examples/configs/grpo_sliding_puzzle.yaml index 97f54cc67a..db6f2c1e64 100644 --- a/examples/configs/grpo_sliding_puzzle.yaml +++ b/examples/configs/grpo_sliding_puzzle.yaml @@ -14,6 +14,7 @@ checkpointing: higher_is_better: true keep_top_k: 3 save_period: 10 + checkpoint_must_save_by: null policy: model_name: "Qwen/Qwen2.5-1.5B-Instruct" @@ -44,6 +45,7 @@ policy: data: add_system_prompt: false + shuffle: false # disable dataloader shuffle, shuffle is handled within the dataset env: sliding_puzzle_game: diff --git a/examples/configs/recipes/llm/dpo-llama3.1-8b-instruct-4n8g-fsdp2tp2-quick.v2.yaml b/examples/configs/recipes/llm/dpo-llama3.1-8b-instruct-4n8g-fsdp2tp2-quick.v2.yaml index 4906550001..72dcb9ad1e 100644 --- a/examples/configs/recipes/llm/dpo-llama3.1-8b-instruct-4n8g-fsdp2tp2-quick.v2.yaml +++ b/examples/configs/recipes/llm/dpo-llama3.1-8b-instruct-4n8g-fsdp2tp2-quick.v2.yaml @@ -21,6 +21,7 @@ checkpointing: higher_is_better: false keep_top_k: 3 save_period: 10000 + checkpoint_must_save_by: null policy: model_name: "meta-llama/Llama-3.1-8B-Instruct" @@ -73,6 +74,7 @@ policy: data: dataset_name: "HelpSteer3" max_input_seq_length: ${policy.max_total_sequence_length} + shuffle: true logger: log_dir: "logs" @@ -80,7 +82,6 @@ logger: tensorboard_enabled: true mlflow_enabled: false monitor_gpus: true - num_val_samples_to_print: 0 # Number of validation samples to pretty print on terminal wandb: project: nemo-rl name: dpo-llama3.1-8b-instruct-4n8g-fsdp2tp1 diff --git a/examples/configs/recipes/llm/dpo-llama3.1-8b-instruct-4n8g-fsdp2tp1.v2.yaml b/examples/configs/recipes/llm/dpo-llama3.1-8b-instruct-4n8g-fsdp2tp4.yaml similarity index 89% rename from examples/configs/recipes/llm/dpo-llama3.1-8b-instruct-4n8g-fsdp2tp1.v2.yaml rename to examples/configs/recipes/llm/dpo-llama3.1-8b-instruct-4n8g-fsdp2tp4.yaml index e7eaef706a..22851b368c 100644 --- a/examples/configs/recipes/llm/dpo-llama3.1-8b-instruct-4n8g-fsdp2tp1.v2.yaml +++ b/examples/configs/recipes/llm/dpo-llama3.1-8b-instruct-4n8g-fsdp2tp4.yaml @@ -20,7 +20,8 @@ checkpointing: metric_name: "val_loss" higher_is_better: false keep_top_k: 3 - save_period: 10000 + save_period: 50 + checkpoint_must_save_by: null policy: model_name: "meta-llama/Llama-3.1-8B-Instruct" @@ -28,14 +29,14 @@ policy: name: ${policy.model_name} train_global_batch_size: 256 train_micro_batch_size: 1 - max_total_sequence_length: 2048 + max_total_sequence_length: 8192 precision: "bfloat16" dtensor_cfg: enabled: true cpu_offload: False sequence_parallel: false activation_checkpointing: false - tensor_parallel_size: 1 + tensor_parallel_size: 4 context_parallel_size: 1 custom_parallel_plan: null @@ -73,6 +74,7 @@ policy: data: dataset_name: "HelpSteer3" max_input_seq_length: ${policy.max_total_sequence_length} + shuffle: true logger: log_dir: "logs" @@ -80,10 +82,9 @@ logger: tensorboard_enabled: true mlflow_enabled: false monitor_gpus: true - num_val_samples_to_print: 0 # Number of validation samples to pretty print on terminal wandb: project: nemo-rl - name: dpo-llama3.1-8b-instruct-4n8g-fsdp2tp1 + name: dpo-llama3.1-8b-instruct-4n8g-fsdp2tp4 tensorboard: {} gpu_monitoring: collection_interval: 10 diff --git a/examples/configs/recipes/llm/dpo-llama3.1-8b-instruct-4n8g-megatron.yaml b/examples/configs/recipes/llm/dpo-llama3.1-8b-instruct-4n8g-megatron.v2.yaml similarity index 92% rename from examples/configs/recipes/llm/dpo-llama3.1-8b-instruct-4n8g-megatron.yaml rename to examples/configs/recipes/llm/dpo-llama3.1-8b-instruct-4n8g-megatron.v2.yaml index 789f4fcbdf..1960502a09 100644 --- a/examples/configs/recipes/llm/dpo-llama3.1-8b-instruct-4n8g-megatron.yaml +++ b/examples/configs/recipes/llm/dpo-llama3.1-8b-instruct-4n8g-megatron.v2.yaml @@ -20,7 +20,8 @@ checkpointing: metric_name: "val_loss" higher_is_better: false keep_top_k: 3 - save_period: 10000 + save_period: 50 + checkpoint_must_save_by: null policy: model_name: "meta-llama/Llama-3.1-8B-Instruct" @@ -28,7 +29,7 @@ policy: name: ${policy.model_name} train_global_batch_size: 256 train_micro_batch_size: 1 - max_total_sequence_length: 2048 + max_total_sequence_length: 8192 precision: "bfloat16" dtensor_cfg: enabled: false @@ -48,7 +49,7 @@ policy: enabled: true empty_unused_memory_level: 1 activation_checkpointing: false - tensor_model_parallel_size: 2 + tensor_model_parallel_size: 4 expert_tensor_parallel_size: 1 expert_model_parallel_size: 1 pipeline_model_parallel_size: 1 @@ -106,6 +107,7 @@ policy: data: dataset_name: "HelpSteer3" max_input_seq_length: ${policy.max_total_sequence_length} + shuffle: true logger: log_dir: "logs" @@ -113,10 +115,9 @@ logger: tensorboard_enabled: true mlflow_enabled: false monitor_gpus: true - num_val_samples_to_print: 0 # Number of validation samples to pretty print on terminal wandb: project: nemo-rl - name: dpo-llama3.1-8b-instruct-4n8g-fsdp2tp1 + name: dpo-llama3.1-8b-instruct-4n8g-fsdp2tp1-megatron tensorboard: {} gpu_monitoring: collection_interval: 10 diff --git a/examples/configs/recipes/llm/dpo-llama3.1-8b-instruct-4n8g-megatrontp2pp2-quick.yaml b/examples/configs/recipes/llm/dpo-llama3.1-8b-instruct-4n8g-megatrontp2pp2-quick.yaml index 7d480f58a3..987e70dc88 100644 --- a/examples/configs/recipes/llm/dpo-llama3.1-8b-instruct-4n8g-megatrontp2pp2-quick.yaml +++ b/examples/configs/recipes/llm/dpo-llama3.1-8b-instruct-4n8g-megatrontp2pp2-quick.yaml @@ -21,6 +21,7 @@ checkpointing: higher_is_better: false keep_top_k: 3 save_period: 10000 + checkpoint_must_save_by: null policy: model_name: "meta-llama/Llama-3.1-8B-Instruct" @@ -106,6 +107,7 @@ policy: data: dataset_name: "HelpSteer3" max_input_seq_length: ${policy.max_total_sequence_length} + shuffle: true logger: log_dir: "logs" @@ -113,7 +115,6 @@ logger: tensorboard_enabled: true mlflow_enabled: false monitor_gpus: true - num_val_samples_to_print: 0 # Number of validation samples to pretty print on terminal wandb: project: nemo-rl name: dpo-llama3.1-8b-instruct-4n8g-fsdp2tp1 diff --git a/examples/configs/recipes/llm/dpo-llama3.1-8b-tulu3-1n8g-fsdp2tp1.yaml b/examples/configs/recipes/llm/dpo-llama3.1-8b-tulu3-1n8g-fsdp2tp1.yaml new file mode 100644 index 0000000000..c3398a6cd5 --- /dev/null +++ b/examples/configs/recipes/llm/dpo-llama3.1-8b-tulu3-1n8g-fsdp2tp1.yaml @@ -0,0 +1,51 @@ +defaults: "../../dpo.yaml" + +cluster: + num_nodes: 1 + gpus_per_node: 8 + +policy: + model_name: "allenai/Llama-3.1-Tulu-3-8B-SFT" + tokenizer: + name: "allenai/Llama-3.1-Tulu-3-8B-SFT" + train_micro_batch_size: 1 + train_global_batch_size: 128 + max_total_sequence_length: 2048 + optimizer: + name: "torch.optim.AdamW" + kwargs: + lr: 5.0e-7 + weight_decay: 0.0 + scheduler: + - name: "torch.optim.lr_scheduler.LinearLR" + kwargs: + start_factor: 1.0e-6 + end_factor: 1.0 + total_iters: 211 + - name: "torch.optim.lr_scheduler.LinearLR" + kwargs: + start_factor: 1.0 + end_factor: 0.0 + total_iters: 1899 + - milestones: [211] + +data: + dataset_name: "Tulu3Preference" + +dpo: + max_num_steps: 2110 + val_period: -1 + val_at_start: false + preference_average_log_probs: True + reference_policy_kl_penalty: 5 + val_micro_batch_size: ${policy.train_micro_batch_size} + val_global_batch_size: ${policy.train_global_batch_size} + +checkpointing: + metric_name: null + save_period: 250 + +logger: + wandb_enabled: True + wandb: + name: "dpo-tulu3-8b" diff --git a/examples/configs/recipes/llm/dpo-llama3.2-1b-instruct-1n8g-fsdp2tp1.v2.yaml b/examples/configs/recipes/llm/dpo-llama3.2-1b-instruct-1n8g-fsdp2tp1.v2.yaml index 8863fad45f..22870f0e66 100644 --- a/examples/configs/recipes/llm/dpo-llama3.2-1b-instruct-1n8g-fsdp2tp1.v2.yaml +++ b/examples/configs/recipes/llm/dpo-llama3.2-1b-instruct-1n8g-fsdp2tp1.v2.yaml @@ -21,6 +21,7 @@ checkpointing: higher_is_better: false keep_top_k: 3 save_period: 50 + checkpoint_must_save_by: null policy: model_name: "meta-llama/Llama-3.2-1B-Instruct" @@ -74,13 +75,14 @@ policy: data: dataset_name: "HelpSteer3" max_input_seq_length: ${policy.max_total_sequence_length} + shuffle: true + logger: log_dir: "logs" wandb_enabled: true tensorboard_enabled: true mlflow_enabled: false monitor_gpus: true - num_val_samples_to_print: 0 # Number of validation samples to pretty print on terminal wandb: project: nemo-rl name: dpo-llama3.2-1b-instruct-1n8g-fsdp2tp1 diff --git a/examples/configs/recipes/llm/dpo-mistral-nemo-instruct-2407-1n8g-fsdp2tp8-actckpt-long.yaml b/examples/configs/recipes/llm/dpo-mistral-nemo-instruct-2407-1n8g-fsdp2tp8-actckpt-long.yaml new file mode 100644 index 0000000000..084ea843f2 --- /dev/null +++ b/examples/configs/recipes/llm/dpo-mistral-nemo-instruct-2407-1n8g-fsdp2tp8-actckpt-long.yaml @@ -0,0 +1,106 @@ +# DPO Algorithm Configuration +dpo: + max_num_epochs: 1 + max_num_steps: 100 + val_period: 10 + val_batches: 1 + val_global_batch_size: 16 + val_micro_batch_size: 1 + val_at_start: true + seed: 42 + + reference_policy_kl_penalty: 0.1 + preference_average_log_probs: False # whether normalizing log probs according to the sequence length in preference_loss + sft_average_log_probs: ${.preference_average_log_probs} # whether normalizing log probs according to the sequence length in sft_loss + + preference_loss_weight: 1 # the coefficient of the preference loss + sft_loss_weight: 0 # the coefficient of the SFT loss + +checkpointing: + enabled: true + checkpoint_dir: "results/dpo-mistral-nemo-instruct-2407-1n8g-fsdp2tp8-actckpt-long" + metric_name: "val_loss" + higher_is_better: false + keep_top_k: null + save_period: 50 + checkpoint_must_save_by: null + +policy: + model_name: "mistralai/Mistral-Nemo-Instruct-2407" + tokenizer: + name: ${policy.model_name} + + # number of preference samples per batch + # each preference sample corresponds to a pair of chosen and rejected responses + # so the actual batch size processed by the model is train_global_batch_size * 2 + train_global_batch_size: 8 + train_micro_batch_size: 1 + + + #logprob_batch_size: ${policy.train_micro_batch_size} + max_total_sequence_length: 12288 + precision: "bfloat16" + + dtensor_cfg: + enabled: true + cpu_offload: false + sequence_parallel: false + activation_checkpointing: true + tensor_parallel_size: 8 + context_parallel_size: 1 + custom_parallel_plan: null + env_vars: + PYTORCH_CUDA_ALLOC_CONF: "max_split_size_mb:64" + + dynamic_batching: + enabled: false + + sequence_packing: + enabled: false + + # makes the training sequence length divisible by the tensor parallel size + # this is useful for sequence parallel training + make_sequence_length_divisible_by: ${policy.dtensor_cfg.tensor_parallel_size} + max_grad_norm: 1.0 + + optimizer: + name: "torch.optim.AdamW" + kwargs: + lr: 1.0e-6 + weight_decay: 0.01 + betas: [0.9, 0.999] + eps: 1e-8 + # when using Dtensor, we need to set foreach + # and fused to False + foreach: False + fused: False + + scheduler: + - name: "torch.optim.lr_scheduler.ConstantLR" + kwargs: + factor: 1.0 + total_iters: 10000000000 + - milestones: [] + +data: + dataset_name: "HelpSteer3" + shuffle: False + max_input_seq_length: ${policy.max_total_sequence_length} + +logger: + log_dir: "logs/dpo-mistral-nemo-instruct-2407-1n8g-fsdp2tp8-actckpt-long" # Base directory for all logs + wandb_enabled: false # Make sure you do a ``wandb login [Your API key]'' before running + tensorboard_enabled: false + mlflow_enabled: false + monitor_gpus: true # If true, will monitor GPU usage and log to wandb and/or tensorboard + num_val_samples_to_print: 0 # Number of validation samples to pretty print on terminal + wandb: + project: "nemo-rl" + name: "dpo-mistral-nemo-instruct-2407-1n8g-fsdp2tp8-actckpt-long" + gpu_monitoring: + collection_interval: 10 # How often to collect GPU usage metrics (in seconds) + flush_interval: 10 # How often to flush GPU usage metrics to the loggers (in seconds) + +cluster: + gpus_per_node: 8 + num_nodes: 1 diff --git a/examples/configs/grpo-deepscaler-1.5b-16K.yaml b/examples/configs/recipes/llm/grpo-deepscaler-1.5b-16K.yaml similarity index 94% rename from examples/configs/grpo-deepscaler-1.5b-16K.yaml rename to examples/configs/recipes/llm/grpo-deepscaler-1.5b-16K.yaml index 866b365da4..570fecb1b9 100644 --- a/examples/configs/grpo-deepscaler-1.5b-16K.yaml +++ b/examples/configs/recipes/llm/grpo-deepscaler-1.5b-16K.yaml @@ -8,6 +8,7 @@ loss_fn: policy: max_total_sequence_length: 16384 + logprob_batch_size: 2 dtensor_cfg: enabled: true diff --git a/examples/configs/grpo-deepscaler-1.5b-24K.yaml b/examples/configs/recipes/llm/grpo-deepscaler-1.5b-24K.yaml similarity index 79% rename from examples/configs/grpo-deepscaler-1.5b-24K.yaml rename to examples/configs/recipes/llm/grpo-deepscaler-1.5b-24K.yaml index b8ab06496f..ab4918df73 100644 --- a/examples/configs/grpo-deepscaler-1.5b-24K.yaml +++ b/examples/configs/recipes/llm/grpo-deepscaler-1.5b-24K.yaml @@ -8,6 +8,7 @@ loss_fn: policy: max_total_sequence_length: 24576 + logprob_batch_size: 2 dtensor_cfg: enabled: true @@ -42,11 +43,5 @@ policy: tensor_parallel_size: 1 pipeline_parallel_size: 1 gpu_memory_utilization: 0.8 + enforce_eager: True max_model_len: ${policy.max_total_sequence_length} - # For most cases, use "dummy" to load the initial weights, since they will be overwritten during refit - # For Gemma models, we need to use "auto" due to a vllm bug - load_format: dummy - -cluster: - gpus_per_node: 8 - num_nodes: 4 diff --git a/examples/configs/grpo-deepscaler-1.5b-8K.yaml b/examples/configs/recipes/llm/grpo-deepscaler-1.5b-8K.yaml similarity index 91% rename from examples/configs/grpo-deepscaler-1.5b-8K.yaml rename to examples/configs/recipes/llm/grpo-deepscaler-1.5b-8K.yaml index e742480739..7513390aaa 100644 --- a/examples/configs/grpo-deepscaler-1.5b-8K.yaml +++ b/examples/configs/recipes/llm/grpo-deepscaler-1.5b-8K.yaml @@ -10,6 +10,7 @@ grpo: val_at_start: false max_val_samples: 480 val_batch_size: 32 + seed: 42 loss_fn: reference_policy_kl_penalty: 0.0 @@ -28,9 +29,9 @@ checkpointing: higher_is_better: true keep_top_k: 10 save_period: 10 + checkpoint_must_save_by: null policy: - # Qwen/Qwen2.5-1.5B has tied weights which are only supported with dtensor policy with tp size 1 (https://github.com/NVIDIA-NeMo/RL/issues/227) model_name: "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B" tokenizer: name: ${policy.model_name} ## specify if you'd like to use a tokenizer different from the model's default @@ -100,10 +101,7 @@ policy: pipeline_parallel_size: 1 gpu_memory_utilization: 0.6 max_model_len: ${policy.max_total_sequence_length} - enforce_eager: False - # For most cases, use "dummy" to load the initial weights, since they will be overwritten during refit - # For Gemma models, we need to use "auto" due to a vllm bug - load_format: dummy + enforce_eager: True colocated: # true: generation shares training GPUs # false: uses dedicated generation resources @@ -118,6 +116,7 @@ data: prompt_file: "examples/prompts/cot.txt" system_prompt_file: null dataset_name: "DeepScaler" + shuffle: true env: math: diff --git a/examples/configs/recipes/llm/grpo-gemma3-1b-it-1n8g-fsdp2tp1.yaml b/examples/configs/recipes/llm/grpo-gemma3-1b-it-1n8g-fsdp2tp1.yaml index 102c274bd6..cade2ee778 100644 --- a/examples/configs/recipes/llm/grpo-gemma3-1b-it-1n8g-fsdp2tp1.yaml +++ b/examples/configs/recipes/llm/grpo-gemma3-1b-it-1n8g-fsdp2tp1.yaml @@ -9,6 +9,7 @@ grpo: val_at_start: false max_val_samples: 256 val_batch_size: 256 + seed: 42 loss_fn: reference_policy_kl_penalty: 0.01 ratio_clip_min: 0.2 @@ -24,6 +25,7 @@ checkpointing: higher_is_better: true keep_top_k: 3 save_period: 10 + checkpoint_must_save_by: null policy: model_name: google/gemma-3-1b-it tokenizer: @@ -104,6 +106,7 @@ data: prompt_file: examples/prompts/cot.txt system_prompt_file: null dataset_name: OpenMathInstruct-2 + shuffle: true env: math: num_workers: 8 diff --git a/examples/configs/recipes/llm/grpo-gemma3-27b-it-16n8g-fsdp2tp8sp-actckpt-long.yaml b/examples/configs/recipes/llm/grpo-gemma3-27b-it-16n8g-fsdp2tp8sp-actckpt-long.yaml index ff89e45881..9f37e6edce 100644 --- a/examples/configs/recipes/llm/grpo-gemma3-27b-it-16n8g-fsdp2tp8sp-actckpt-long.yaml +++ b/examples/configs/recipes/llm/grpo-gemma3-27b-it-16n8g-fsdp2tp8sp-actckpt-long.yaml @@ -9,6 +9,7 @@ grpo: val_at_start: false max_val_samples: 256 val_batch_size: 256 + seed: 42 loss_fn: reference_policy_kl_penalty: 0.01 ratio_clip_min: 0.2 @@ -24,6 +25,7 @@ checkpointing: higher_is_better: true keep_top_k: 3 save_period: 10 + checkpoint_must_save_by: null policy: model_name: google/gemma-3-27b-it tokenizer: @@ -105,6 +107,7 @@ data: prompt_file: examples/prompts/cot.txt system_prompt_file: null dataset_name: OpenMathInstruct-2 + shuffle: true env: math: num_workers: 8 diff --git a/examples/configs/recipes/llm/grpo-gspo-deepscaler-1.5b-8K.yaml b/examples/configs/recipes/llm/grpo-gspo-deepscaler-1.5b-8K.yaml new file mode 100644 index 0000000000..35bd140c1a --- /dev/null +++ b/examples/configs/recipes/llm/grpo-gspo-deepscaler-1.5b-8K.yaml @@ -0,0 +1,146 @@ +# GRPO Algorithm Configuration +grpo: + num_prompts_per_step: 128 + num_generations_per_prompt: 8 + max_rollout_turns: 1 # for multi-turn rollouts. Math Environments just have 1 turn (answering the question) + max_num_steps: 1000000 + normalize_rewards: true + use_leave_one_out_baseline: true + val_period: 10 + val_at_start: false + max_val_samples: 480 + val_batch_size: 32 + seed: 42 + +loss_fn: + reference_policy_kl_penalty: 0.0 + ratio_clip_min: 0.2 + ratio_clip_max: 0.2 + ratio_clip_c: null + # (default off) loss formulation improvements (docs/guides/grpo.md#loss) + use_on_policy_kl_approximation: false + use_importance_sampling_correction: false + sequence_level_importance_ratios: true + token_level_loss: false + +checkpointing: + enabled: true + checkpoint_dir: "results/grpo" + metric_name: "val_reward" + higher_is_better: true + keep_top_k: 10 + save_period: 10 + checkpoint_must_save_by: null + +policy: + model_name: "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B" + tokenizer: + name: ${policy.model_name} ## specify if you'd like to use a tokenizer different from the model's default + train_global_batch_size: 64 + train_micro_batch_size: 1 + generation_batch_size: 32 # Only used when generating using HF backend + logprob_batch_size: 4 + max_total_sequence_length: 8192 + precision: "bfloat16" + + dtensor_cfg: + enabled: true + cpu_offload: true + sequence_parallel: true + activation_checkpointing: true + tensor_parallel_size: 1 + context_parallel_size: 1 + custom_parallel_plan: null + + dynamic_batching: + enabled: False + + sequence_packing: + enabled: False + + # makes the training sequence length divisible by the tensor parallel size + # this is useful for sequence parallel training + make_sequence_length_divisible_by: ${policy.dtensor_cfg.tensor_parallel_size} + max_grad_norm: 1.0 + + optimizer: + name: "torch.optim.AdamW" + kwargs: + lr: 2.0e-6 + weight_decay: 0.01 + betas: [0.9, 0.999] + eps: 1e-8 + # when using Dtensor, we need to set foreach + # and fused to False + foreach: False + fused: False + + scheduler: + - name: "torch.optim.lr_scheduler.LinearLR" + kwargs: + start_factor: 0.1 + end_factor: 1.0 + total_iters: 50 + - name: "torch.optim.lr_scheduler.ConstantLR" + kwargs: + factor: 1.0 + total_iters: 10000000000 + - milestones: [50] + + generation: + backend: "vllm" + max_new_tokens: ${policy.max_total_sequence_length} + temperature: 1.0 + top_p: 1.0 + top_k: null + stop_token_ids: null + stop_strings: null + vllm_cfg: + async_engine: false + precision: ${policy.precision} + tensor_parallel_size: 1 + pipeline_parallel_size: 1 + gpu_memory_utilization: 0.6 + max_model_len: ${policy.max_total_sequence_length} + enforce_eager: True + colocated: + # true: generation shares training GPUs + # false: uses dedicated generation resources + enabled: true + # only relevant when enabled is false + resources: + gpus_per_node: null # Decides num gpus to be dedicated to generation when there is one node in the cluster i.e cluster.num_nodes == 1 + num_nodes: null # Decides number of nodes to be dedicated to generation + +data: + max_input_seq_length: ${policy.max_total_sequence_length} # upper bound, real truncation occurs at vllm.max_model_len + prompt_file: "examples/prompts/cot.txt" + system_prompt_file: null + dataset_name: "DeepScaler" + shuffle: true + +env: + math: + num_workers: 16 + +logger: + log_dir: "logs" # Base directory for all logs + num_val_samples_to_print: 0 # Number of validation samples to pretty print on terminal + wandb_enabled: false + tensorboard_enabled: false + mlflow_enabled: false + monitor_gpus: false # If true, will monitor GPU usage and log to wandb and/or tensorboard + wandb: + project: "grpo-dev" + name: "grpo-dev-logger" + tensorboard: {} + mlflow: + experiment_name: "grpo-dev" + run_name: "grpo-dev-logger" + gpu_monitoring: + collection_interval: 10 # How often to collect GPU usage metrics (in seconds) + flush_interval: 10 # How often to flush GPU usage metrics to the loggers (in seconds) + +cluster: + gpus_per_node: 8 + num_nodes: 1 diff --git a/examples/configs/recipes/llm/grpo-llama3.1-8b-instruct-1n8g-megatron-fp8.yaml b/examples/configs/recipes/llm/grpo-llama3.1-8b-instruct-1n8g-megatron-fp8.yaml new file mode 100644 index 0000000000..9a62f9008a --- /dev/null +++ b/examples/configs/recipes/llm/grpo-llama3.1-8b-instruct-1n8g-megatron-fp8.yaml @@ -0,0 +1,161 @@ +grpo: + num_prompts_per_step: 64 + num_generations_per_prompt: 32 + max_rollout_turns: 1 + max_num_steps: 500 + normalize_rewards: true + use_leave_one_out_baseline: true + val_period: 10 + val_at_start: false + max_val_samples: 256 + val_batch_size: 256 + seed: 42 +loss_fn: + reference_policy_kl_penalty: 0.01 + ratio_clip_min: 0.2 + ratio_clip_max: 0.2 + ratio_clip_c: null + use_on_policy_kl_approximation: false + use_importance_sampling_correction: True + token_level_loss: true +checkpointing: + enabled: true + checkpoint_dir: results/grpo-llama3.1-8b-instruct-1n8g-megatron-fp8 + metric_name: val_reward + higher_is_better: true + keep_top_k: 3 + save_period: 10 + checkpoint_must_save_by: null +policy: + model_name: meta-llama/Llama-3.1-8B-Instruct + tokenizer: + name: meta-llama/Llama-3.1-8B-Instruct + train_global_batch_size: 512 + train_micro_batch_size: 1 + generation_batch_size: 32 + logprob_batch_size: 2 + max_total_sequence_length: 4096 + precision: bfloat16 + make_sequence_length_divisible_by: 1 + max_grad_norm: 1 + + dtensor_cfg: + enabled: False + + dynamic_batching: + enabled: False + + sequence_packing: + enabled: True + train_mb_tokens: ${mul:${policy.max_total_sequence_length}, ${policy.train_micro_batch_size}} + logprob_mb_tokens: ${mul:${policy.max_total_sequence_length}, ${policy.logprob_batch_size}} + algorithm: "modified_first_fit_decreasing" + sequence_length_round: 64 + + megatron_cfg: + enabled: True + empty_unused_memory_level: 1 + converter_type: "LlamaForCausalLM" + tensor_model_parallel_size: 1 + pipeline_model_parallel_size: 2 + context_parallel_size: 1 + expert_tensor_parallel_size: 1 + expert_model_parallel_size: 1 + sequence_parallel: False + pipeline_dtype: ${policy.precision} + num_layers_in_first_pipeline_stage: null + num_layers_in_last_pipeline_stage: null + freeze_moe_router: True + moe_router_dtype: "fp64" + moe_router_load_balancing_type: "none" # "seq_aux_loss" causes logprob error divergence for grpo + moe_router_bias_update_rate: 0.0 # by default, disable bias updates for grpo + apply_rope_fusion: True + activation_checkpointing: True + defer_fp32_logits: True + + optimizer: + optimizer: "adam" + lr: 5.0e-7 + min_lr: 5.0e-8 + weight_decay: 0.0 + bf16: True + fp16: False + params_dtype: "float32" + + adam_beta1: 0.9 + adam_beta2: 0.999 + adam_eps: 1e-8 + + use_distributed_optimizer: True + use_precision_aware_optimizer: True + + clip_grad: ${policy.max_grad_norm} + + scheduler: + start_weight_decay: ${policy.megatron_cfg.optimizer.weight_decay} + end_weight_decay: ${policy.megatron_cfg.optimizer.weight_decay} + weight_decay_incr_style: "constant" + lr_decay_style: "constant" + lr_decay_iters: null + lr_warmup_iters: 2 + lr_warmup_init: 5.0e-8 + + distributed_data_parallel_config: + grad_reduce_in_fp32: False + overlap_grad_reduce: True + overlap_param_gather: True + average_in_collective: True + use_custom_fsdp: False + data_parallel_sharding_strategy: "optim_grads_params" + + generation: + backend: vllm + max_new_tokens: 4096 + temperature: 1 + top_p: 1 + top_k: null + stop_token_ids: + - 128009 + stop_strings: null + vllm_cfg: + async_engine: false + precision: 'fp8' + tensor_parallel_size: 1 + pipeline_parallel_size: 1 + gpu_memory_utilization: 0.6 + max_model_len: 4096 + enforce_eager: False + use_deep_gemm: true + num_last_layers_in_bf16: 0 + num_first_layers_in_bf16: 0 + colocated: + enabled: true + resources: + gpus_per_node: null + num_nodes: null +data: + max_input_seq_length: 4096 + prompt_file: examples/prompts/cot.txt + system_prompt_file: null + dataset_name: OpenMathInstruct-2 + shuffle: true +env: + math: + num_workers: 8 +logger: + log_dir: logs/grpo-llama3.1-8b-instruct-1n8g-megatron-fp8 + num_val_samples_to_print: 0 + wandb_enabled: true + tensorboard_enabled: true + mlflow_enabled: false + monitor_gpus: true + wandb: + project: nemo-rl + name: grpo-llama3.1-8b-instruct-1n8g-megatron-fp8 + tensorboard: {} + gpu_monitoring: + collection_interval: 10 + flush_interval: 10 +cluster: + gpus_per_node: 8 + num_nodes: 4 diff --git a/examples/configs/recipes/llm/grpo-llama3.1-8b-instruct-4n8g-fsdp2tp1-long.v3.yaml b/examples/configs/recipes/llm/grpo-llama3.1-8b-instruct-4n8g-fsdp2tp1-long.v3.yaml index d778674238..5f9e983920 100644 --- a/examples/configs/recipes/llm/grpo-llama3.1-8b-instruct-4n8g-fsdp2tp1-long.v3.yaml +++ b/examples/configs/recipes/llm/grpo-llama3.1-8b-instruct-4n8g-fsdp2tp1-long.v3.yaml @@ -9,6 +9,7 @@ grpo: val_at_start: false max_val_samples: 256 val_batch_size: 256 + seed: 42 loss_fn: reference_policy_kl_penalty: 0.01 ratio_clip_min: 0.2 @@ -24,6 +25,7 @@ checkpointing: higher_is_better: true keep_top_k: 3 save_period: 10 + checkpoint_must_save_by: null policy: model_name: meta-llama/Llama-3.1-8B-Instruct tokenizer: @@ -105,6 +107,7 @@ data: prompt_file: examples/prompts/cot.txt system_prompt_file: null dataset_name: OpenMathInstruct-2 + shuffle: true env: math: num_workers: 8 diff --git a/examples/configs/recipes/llm/grpo-llama3.2-1b-instruct-1n8g-fsdp2tp1.v3.yaml b/examples/configs/recipes/llm/grpo-llama3.2-1b-instruct-1n8g-fsdp2tp1.v3.yaml index ea4f5e66e0..db36e1b59c 100644 --- a/examples/configs/recipes/llm/grpo-llama3.2-1b-instruct-1n8g-fsdp2tp1.v3.yaml +++ b/examples/configs/recipes/llm/grpo-llama3.2-1b-instruct-1n8g-fsdp2tp1.v3.yaml @@ -9,6 +9,7 @@ grpo: val_at_start: false max_val_samples: 256 val_batch_size: 256 + seed: 42 loss_fn: reference_policy_kl_penalty: 0.01 ratio_clip_min: 0.2 @@ -24,6 +25,7 @@ checkpointing: higher_is_better: true keep_top_k: 3 save_period: 10 + checkpoint_must_save_by: null policy: model_name: meta-llama/Llama-3.2-1B-Instruct tokenizer: @@ -105,6 +107,7 @@ data: prompt_file: examples/prompts/cot.txt system_prompt_file: null dataset_name: OpenMathInstruct-2 + shuffle: true env: math: num_workers: 8 diff --git a/examples/configs/recipes/llm/grpo-llama3.2-1b-instruct-1n8g-megatron.yaml b/examples/configs/recipes/llm/grpo-llama3.2-1b-instruct-1n8g-megatron.yaml new file mode 100755 index 0000000000..8f17d32819 --- /dev/null +++ b/examples/configs/recipes/llm/grpo-llama3.2-1b-instruct-1n8g-megatron.yaml @@ -0,0 +1,159 @@ +grpo: + num_prompts_per_step: 32 + num_generations_per_prompt: 16 + max_rollout_turns: 1 + max_num_steps: 500 + normalize_rewards: true + use_leave_one_out_baseline: true + val_period: 10 + val_at_start: false + max_val_samples: 256 + val_batch_size: 256 + seed: 42 +loss_fn: + reference_policy_kl_penalty: 0.01 + ratio_clip_min: 0.2 + ratio_clip_max: 0.2 + ratio_clip_c: null + use_on_policy_kl_approximation: false + use_importance_sampling_correction: false + token_level_loss: true +checkpointing: + enabled: false + checkpoint_dir: results/grpo-llama3.2-1b-instruct-1n8g-megatron + metric_name: val_reward + higher_is_better: true + keep_top_k: 3 + save_period: 100 + checkpoint_must_save_by: null +policy: + model_name: meta-llama/Llama-3.2-1B-Instruct + tokenizer: + name: meta-llama/Llama-3.2-1B-Instruct + train_global_batch_size: 512 + train_micro_batch_size: 4 + generation_batch_size: 32 + logprob_batch_size: 4 + max_total_sequence_length: 512 + precision: bfloat16 + optimizer: null + megatron_cfg: + enabled: true + empty_unused_memory_level: 0 + activation_checkpointing: false + tensor_model_parallel_size: 1 + expert_tensor_parallel_size: 1 + expert_model_parallel_size: 1 + pipeline_model_parallel_size: 1 + num_layers_in_first_pipeline_stage: null + num_layers_in_last_pipeline_stage: null + context_parallel_size: 1 + pipeline_dtype: ${policy.precision} + sequence_parallel: false + freeze_moe_router: true + moe_router_dtype: "fp64" + moe_router_load_balancing_type: "none" # "seq_aux_loss" causes logprob error divergence for grpo + moe_router_bias_update_rate: 0.0 # by default, disable bias updates for grpo + #gives ~20% training perf speedup with sequence packing + apply_rope_fusion: True + + optimizer: + optimizer: "adam" + lr: 5.0e-6 + min_lr: 5.0e-7 + weight_decay: 0.01 + bf16: true + fp16: false + params_dtype: "float32" + + #adam + adam_beta1: 0.9 + adam_beta2: 0.999 + adam_eps: 1e-8 + + #sgd + sgd_momentum: 0.9 + + #distributed optimizer + use_distributed_optimizer: true + use_precision_aware_optimizer: true + + clip_grad: ${policy.max_grad_norm} + + scheduler: + start_weight_decay: ${policy.megatron_cfg.optimizer.weight_decay} + end_weight_decay: ${policy.megatron_cfg.optimizer.weight_decay} + weight_decay_incr_style: "constant" + lr_decay_style: "constant" + lr_decay_iters: null + lr_warmup_iters: 50 + lr_warmup_init: 5.0e-7 + + distributed_data_parallel_config: + grad_reduce_in_fp32: false + overlap_grad_reduce: true + overlap_param_gather: true + average_in_collective: true + use_custom_fsdp: false + data_parallel_sharding_strategy: "optim_grads_params" + + dtensor_cfg: + enabled: false + dynamic_batching: + enabled: False + sequence_packing: + enabled: True + train_mb_tokens: ${mul:${policy.max_total_sequence_length}, ${policy.train_micro_batch_size}} + logprob_mb_tokens: ${mul:${policy.max_total_sequence_length}, ${policy.logprob_batch_size}} + algorithm: "modified_first_fit_decreasing" + sequence_length_round: 64 + make_sequence_length_divisible_by: 1 + max_grad_norm: 1 + generation: + backend: vllm + max_new_tokens: 512 + temperature: 1 + top_p: 1 + top_k: null + stop_token_ids: + - 128009 + stop_strings: null + vllm_cfg: + async_engine: false + precision: ${policy.precision} + tensor_parallel_size: 1 + pipeline_parallel_size: 1 + gpu_memory_utilization: 0.6 + max_model_len: 512 + enforce_eager: False + colocated: + enabled: true + resources: + gpus_per_node: null + num_nodes: null +data: + max_input_seq_length: 512 + prompt_file: examples/prompts/cot.txt + system_prompt_file: null + dataset_name: OpenMathInstruct-2 + shuffle: true +env: + math: + num_workers: 8 +logger: + log_dir: logs/grpo-llama3.2-1b-instruct-1n8g-megatron + num_val_samples_to_print: 0 + wandb_enabled: true + tensorboard_enabled: true + mlflow_enabled: False + monitor_gpus: true + wandb: + project: nemo-rl + name: grpo-llama3.2-1b-instruct-1n8g-megatron + tensorboard: {} + gpu_monitoring: + collection_interval: 10 + flush_interval: 10 +cluster: + gpus_per_node: 8 + num_nodes: 1 diff --git a/examples/configs/recipes/llm/grpo-math-qwen3-30ba3b-megatron-tp4-32k.yaml b/examples/configs/recipes/llm/grpo-math-qwen3-30ba3b-megatron-tp4-32k.yaml new file mode 100644 index 0000000000..fddd7726c1 --- /dev/null +++ b/examples/configs/recipes/llm/grpo-math-qwen3-30ba3b-megatron-tp4-32k.yaml @@ -0,0 +1,168 @@ +checkpointing: + enabled: True + checkpoint_dir: results/grpo-math-qwen3-30ba3b-megatron-tp4-32k + save_period: 3 + keep_top_k: 1 + metric_name: val_reward + higher_is_better: True + checkpoint_must_save_by: null + +grpo: + normalize_rewards: True + use_leave_one_out_baseline: True + max_num_steps: 3 + num_prompts_per_step: 64 + num_generations_per_prompt: 16 + max_rollout_turns: 1 + val_period: 3 + val_at_start: False + max_val_samples: 256 + val_batch_size: 256 + seed: 42 + +loss_fn: + reference_policy_kl_penalty: 0.01 + ratio_clip_min: 0.2 + ratio_clip_max: 0.2 + # (default off) loss formulation improvements (docs/guides/grpo.md#loss) + use_on_policy_kl_approximation: False + use_importance_sampling_correction: False + token_level_loss: True + ratio_clip_c: null + +policy: + model_name: "Qwen/Qwen3-30B-A3B" + tokenizer: + name: ${policy.model_name} ## specify if you'd like to use a tokenizer different from the model's default + train_global_batch_size: 512 + train_micro_batch_size: 1 + generation_batch_size: 32 # Only used when generating using HF backend + logprob_batch_size: 1 + max_total_sequence_length: 32768 + precision: "bfloat16" + logprob_chunk_size: 2048 + + dtensor_cfg: + enabled: False + + dynamic_batching: + enabled: False + + sequence_packing: + enabled: False + + max_grad_norm: 1.0 + make_sequence_length_divisible_by: ${policy.megatron_cfg.tensor_model_parallel_size} + + optimizer: null # remove default FSDP optimizer + + scheduler: null # remove default FSDP scheduler + + megatron_cfg: + enabled: True + empty_unused_memory_level: 1 + converter_type: "LlamaForCausalLM" + tensor_model_parallel_size: 4 + pipeline_model_parallel_size: 1 + context_parallel_size: 1 + expert_tensor_parallel_size: 1 + expert_model_parallel_size: 8 + sequence_parallel: True + pipeline_dtype: ${policy.precision} + num_layers_in_first_pipeline_stage: null + num_layers_in_last_pipeline_stage: null + freeze_moe_router: True + moe_router_dtype: "fp64" + moe_router_load_balancing_type: "none" # "seq_aux_loss" causes logprob error divergence for grpo + moe_router_bias_update_rate: 0.0 # by default, disable bias updates for grpo + apply_rope_fusion: True + activation_checkpointing: True + defer_fp32_logits: True + + optimizer: + optimizer: "adam" + lr: 5.0e-7 + min_lr: 5.0e-8 + weight_decay: 0.0 + bf16: True + fp16: False + params_dtype: "float32" + + adam_beta1: 0.9 + adam_beta2: 0.999 + adam_eps: 1e-8 + + use_distributed_optimizer: True + use_precision_aware_optimizer: True + + clip_grad: ${policy.max_grad_norm} + + scheduler: + start_weight_decay: ${policy.megatron_cfg.optimizer.weight_decay} + end_weight_decay: ${policy.megatron_cfg.optimizer.weight_decay} + weight_decay_incr_style: "constant" + lr_decay_style: "constant" + lr_decay_iters: null + lr_warmup_iters: 2 + lr_warmup_init: 5.0e-8 + + distributed_data_parallel_config: + grad_reduce_in_fp32: False + overlap_grad_reduce: True + overlap_param_gather: True + average_in_collective: True + use_custom_fsdp: False + data_parallel_sharding_strategy: "optim_grads_params" + + generation: + backend: "vllm" + max_new_tokens: ${policy.max_total_sequence_length} + temperature: 1.0 + top_p: 1.0 + top_k: null + stop_token_ids: null + stop_strings: null + vllm_cfg: + async_engine: False + precision: ${policy.precision} + tensor_parallel_size: 4 + pipeline_parallel_size: 1 + gpu_memory_utilization: 0.6 + max_model_len: ${policy.max_total_sequence_length} + # NB(pjin): https://github.com/NVIDIA-NeMo/RL/pull/857 + enforce_eager: True + colocated: + enabled: true + resources: + gpus_per_node: null + num_nodes: null + +data: + dataset_name: "OpenMathInstruct-2" + shuffle: true + max_input_seq_length: ${policy.max_total_sequence_length} # upper bound, real truncation occurs at vllm.max_model_len + prompt_file: "examples/prompts/cot.txt" + system_prompt_file: null + +env: + math: + num_workers: 8 + +logger: + log_dir: logs/grpo-math-qwen3-30ba3b-megatron-tp4-32k + num_val_samples_to_print: 0 # Number of validation samples to pretty print on terminal + wandb_enabled: True + tensorboard_enabled: True + mlflow_enabled: False # Disable MLflow logging + monitor_gpus: False # If true, will monitor GPU usage and log to wandb and/or tensorboard + wandb: + project: nemo-rl + name: "grpo-math-qwen3-30ba3b-megatron-tp4-32k" + tensorboard: {} + gpu_monitoring: + collection_interval: 10 # How often to collect GPU usage metrics (in seconds) + flush_interval: 10 # How often to flush GPU usage metrics to the loggers (in seconds) + +cluster: + gpus_per_node: 8 + num_nodes: 4 diff --git a/examples/configs/recipes/llm/grpo-moonlight-16ba3b-4n8g-megatron.yaml b/examples/configs/recipes/llm/grpo-moonlight-16ba3b-4n8g-megatron.yaml new file mode 100644 index 0000000000..42b365f351 --- /dev/null +++ b/examples/configs/recipes/llm/grpo-moonlight-16ba3b-4n8g-megatron.yaml @@ -0,0 +1,169 @@ +# GRPO Algorithm Configuration +defaults: "../../grpo_math_1B.yaml" + +grpo: + num_prompts_per_step: 32 + num_generations_per_prompt: 16 + max_num_steps: 1000000 + normalize_rewards: true + use_leave_one_out_baseline: true + val_period: -1 + val_at_start: false + max_val_samples: 256 + val_batch_size: 256 + +loss_fn: + reference_policy_kl_penalty: 0.04 + ratio_clip_min: 0.2 + ratio_clip_max: 0.2 + # (default off) loss formulation improvements (docs/guides/grpo.md#loss) + use_on_policy_kl_approximation: false + use_importance_sampling_correction: false + token_level_loss: true + ratio_clip_c: null + +checkpointing: + enabled: false + checkpoint_dir: "results/grpo_megatron" + metric_name: "val_reward" + higher_is_better: true + keep_top_k: 3 + save_period: 10000 + +policy: + model_name: "moonshotai/Moonlight-16B-A3B-Instruct" + tokenizer: + name: ${policy.model_name} ## specify if you'd like to use a tokenizer different from the model's default + train_global_batch_size: 512 + train_micro_batch_size: 1 + generation_batch_size: 64 # Only used when generating using megatron backend + logprob_batch_size: 1 + max_total_sequence_length: 8192 + precision: "bfloat16" + + dtensor_cfg: + enabled: false + + # dynamic_batching improves performance by ensuring logprob and training microbatches + # have a sufficent number of tokens to maximize GPU utilization. Specifically, variable length + # responses are sorted by sequence length and bucketed into microbatches with a total + # amount of tokens is approximately close to 'train_mb_tokens' and 'logprob_mb_tokens' for the + # training and logprob stages respectively. + dynamic_batching: + enabled: False + + sequence_packing: + enabled: False # coming soon + train_mb_tokens: ${mul:${policy.max_total_sequence_length}, ${policy.train_micro_batch_size}} + logprob_mb_tokens: ${mul:${policy.max_total_sequence_length}, ${policy.logprob_batch_size}} + algorithm: "modified_ffd" + sequence_length_round: 64 + + max_grad_norm: 1.0 + # makes the training sequence length divisible by the tensor parallel size + # this is useful for sequence parallel training + make_sequence_length_divisible_by: ${policy.megatron_cfg.tensor_model_parallel_size} + + optimizer: null # remove default FSDP optimizer + + megatron_cfg: + enabled: true + empty_unused_memory_level: 0 + activation_checkpointing: false + converter_type: "Qwen2ForCausalLM" + tensor_model_parallel_size: 1 + expert_tensor_parallel_size: 1 + expert_model_parallel_size: 4 + pipeline_model_parallel_size: 4 + num_layers_in_first_pipeline_stage: 7 + num_layers_in_last_pipeline_stage: 6 + context_parallel_size: 1 + pipeline_dtype: ${policy.precision} + sequence_parallel: false + freeze_moe_router: true + moe_router_dtype: "fp64" + moe_router_load_balancing_type: "none" # "seq_aux_loss" causes logprob error divergence for grpo + moe_router_bias_update_rate: 0.0 # by default, disable bias updates for grpo + #gives ~20% training perf speedup with sequence packing + # Causes logprob error divergence for moonlight + apply_rope_fusion: False + + optimizer: + optimizer: "adam" + lr: 1.0e-6 + min_lr: 5.0e-7 + weight_decay: 0.01 + bf16: true + fp16: false + params_dtype: "float32" + + #adam + adam_beta1: 0.9 + adam_beta2: 0.999 + adam_eps: 1e-8 + + #sgd + sgd_momentum: 0.9 + + #distributed optimizer + use_distributed_optimizer: true + use_precision_aware_optimizer: true + + clip_grad: ${policy.max_grad_norm} + + scheduler: + start_weight_decay: ${policy.megatron_cfg.optimizer.weight_decay} + end_weight_decay: ${policy.megatron_cfg.optimizer.weight_decay} + weight_decay_incr_style: "constant" + lr_decay_style: "constant" + lr_decay_iters: null + lr_warmup_iters: 50 + lr_warmup_init: 5.0e-7 + + distributed_data_parallel_config: + grad_reduce_in_fp32: false + overlap_grad_reduce: true + overlap_param_gather: true + average_in_collective: true + use_custom_fsdp: false + data_parallel_sharding_strategy: "optim_grads_params" + + generation: + backend: "vllm" + max_new_tokens: ${policy.max_total_sequence_length} + temperature: 1.0 + top_p: 1.0 + top_k: null + vllm_cfg: + tensor_parallel_size: 1 + gpu_memory_utilization: 0.6 + max_model_len: ${policy.max_total_sequence_length} + +data: + max_input_seq_length: ${policy.max_total_sequence_length} # upper bound, real truncation occurs at vllm.max_model_len + prompt_file: "examples/prompts/cot.txt" + system_prompt_file: null + dataset_name: "OpenMathInstruct-2" + +env: + math: + num_workers: 8 + +logger: + log_dir: "logs" # Base directory for all logs + num_val_samples_to_print: 0 # Number of validation samples to pretty print on terminal + wandb_enabled: false + tensorboard_enabled: false + mlflow_enabled: False + monitor_gpus: false # If true, will monitor GPU usage and log to wandb and/or tensorboard + wandb: + project: "grpo-dev" + name: "grpo-moonlight-16B-A3B-Instruct" + tensorboard: {} + gpu_monitoring: + collection_interval: 10 # How often to collect GPU usage metrics (in seconds) + flush_interval: 10 # How often to flush GPU usage metrics to the loggers (in seconds) + +cluster: + gpus_per_node: 8 + num_nodes: 4 diff --git a/examples/configs/recipes/llm/grpo-qwen2.5-32b-32n8g-fsdp2tp8sp-actckpt-long.v3.yaml b/examples/configs/recipes/llm/grpo-qwen2.5-32b-32n8g-fsdp2tp8sp-actckpt-long.v3.yaml index 9b8ecb47b9..1c937b37b9 100644 --- a/examples/configs/recipes/llm/grpo-qwen2.5-32b-32n8g-fsdp2tp8sp-actckpt-long.v3.yaml +++ b/examples/configs/recipes/llm/grpo-qwen2.5-32b-32n8g-fsdp2tp8sp-actckpt-long.v3.yaml @@ -9,6 +9,7 @@ grpo: val_at_start: false max_val_samples: 256 val_batch_size: 256 + seed: 42 loss_fn: reference_policy_kl_penalty: 0.01 ratio_clip_min: 0.2 @@ -24,6 +25,7 @@ checkpointing: higher_is_better: true keep_top_k: 3 save_period: 10 + checkpoint_must_save_by: null policy: model_name: Qwen/Qwen2.5-32B tokenizer: @@ -105,6 +107,7 @@ data: prompt_file: examples/prompts/cot.txt system_prompt_file: null dataset_name: OpenMathInstruct-2 + shuffle: true env: math: num_workers: 8 diff --git a/examples/configs/recipes/llm/grpo-qwen2.5-32b-32n8g-fsdp2tp8sp-actckpt.v3.yaml b/examples/configs/recipes/llm/grpo-qwen2.5-32b-32n8g-fsdp2tp8sp-actckpt.v3.yaml index 4a21332a07..01a4166c5c 100644 --- a/examples/configs/recipes/llm/grpo-qwen2.5-32b-32n8g-fsdp2tp8sp-actckpt.v3.yaml +++ b/examples/configs/recipes/llm/grpo-qwen2.5-32b-32n8g-fsdp2tp8sp-actckpt.v3.yaml @@ -9,6 +9,7 @@ grpo: val_at_start: false max_val_samples: 256 val_batch_size: 256 + seed: 42 loss_fn: reference_policy_kl_penalty: 0.01 ratio_clip_min: 0.2 @@ -24,6 +25,7 @@ checkpointing: higher_is_better: true keep_top_k: 3 save_period: 10 + checkpoint_must_save_by: null policy: model_name: Qwen/Qwen2.5-32B tokenizer: @@ -105,6 +107,7 @@ data: prompt_file: examples/prompts/cot.txt system_prompt_file: null dataset_name: OpenMathInstruct-2 + shuffle: true env: math: num_workers: 8 diff --git a/examples/configs/recipes/llm/grpo-qwen2.5-7b-instruct-4n8g-fsdp2tp4sp.v3.yaml b/examples/configs/recipes/llm/grpo-qwen2.5-7b-instruct-4n8g-fsdp2tp4sp.v3.yaml index 54b60a3cfb..8b15700eaf 100644 --- a/examples/configs/recipes/llm/grpo-qwen2.5-7b-instruct-4n8g-fsdp2tp4sp.v3.yaml +++ b/examples/configs/recipes/llm/grpo-qwen2.5-7b-instruct-4n8g-fsdp2tp4sp.v3.yaml @@ -9,6 +9,7 @@ grpo: val_at_start: false max_val_samples: 256 val_batch_size: 256 + seed: 42 loss_fn: reference_policy_kl_penalty: 0.01 ratio_clip_min: 0.2 @@ -24,6 +25,7 @@ checkpointing: higher_is_better: true keep_top_k: 3 save_period: 10 + checkpoint_must_save_by: null policy: model_name: Qwen/Qwen2.5-7B-Instruct tokenizer: @@ -105,6 +107,7 @@ data: prompt_file: examples/prompts/cot.txt system_prompt_file: null dataset_name: OpenMathInstruct-2 + shuffle: true env: math: num_workers: 8 diff --git a/examples/configs/recipes/llm/grpo-qwen2.5-7b-instruct-4n8g-megatron.yaml b/examples/configs/recipes/llm/grpo-qwen2.5-7b-instruct-4n8g-megatron.yaml new file mode 100755 index 0000000000..13689e6ddc --- /dev/null +++ b/examples/configs/recipes/llm/grpo-qwen2.5-7b-instruct-4n8g-megatron.yaml @@ -0,0 +1,181 @@ +grpo: + num_prompts_per_step: 64 + num_generations_per_prompt: 32 + max_rollout_turns: 1 + max_num_steps: 30 + normalize_rewards: true + use_leave_one_out_baseline: true + val_period: 10 + val_at_start: false + max_val_samples: 256 + val_batch_size: 256 + seed: 42 +loss_fn: + reference_policy_kl_penalty: 0.01 + ratio_clip_min: 0.2 + ratio_clip_max: 0.2 + ratio_clip_c: null + use_on_policy_kl_approximation: false + use_importance_sampling_correction: false + token_level_loss: true +checkpointing: + enabled: false + checkpoint_dir: results/grpo-qwen2.5-7b-instruct-4n8g-megatron + metric_name: val_reward + higher_is_better: true + keep_top_k: 3 + save_period: 100 + checkpoint_must_save_by: null +policy: + model_name: Qwen/Qwen2.5-7B-Instruct + tokenizer: + name: ${policy.model_name} + train_global_batch_size: 512 + train_micro_batch_size: 1 + generation_batch_size: 32 + logprob_batch_size: 2 + max_total_sequence_length: 4096 + precision: bfloat16 + dtensor_cfg: + enabled: false + megatron_cfg: + enabled: true + empty_unused_memory_level: 0 + activation_checkpointing: false + converter_type: "Qwen2ForCausalLM" + tensor_model_parallel_size: 2 + expert_tensor_parallel_size: 1 + expert_model_parallel_size: 1 + pipeline_model_parallel_size: 1 + num_layers_in_first_pipeline_stage: null + num_layers_in_last_pipeline_stage: null + context_parallel_size: 1 + pipeline_dtype: ${policy.precision} + sequence_parallel: false + freeze_moe_router: true + moe_router_dtype: "fp64" + moe_router_load_balancing_type: "none" # "seq_aux_loss" causes logprob error divergence for grpo + moe_router_bias_update_rate: 0.0 # by default, disable bias updates for grpo + #gives ~20% training perf speedup with sequence packing + apply_rope_fusion: True + + optimizer: + optimizer: "adam" + lr: 5.0e-6 + min_lr: 5.0e-7 + weight_decay: 0.01 + bf16: true + fp16: false + params_dtype: "float32" + + #adam + adam_beta1: 0.9 + adam_beta2: 0.999 + adam_eps: 1e-8 + + #sgd + sgd_momentum: 0.9 + + #distributed optimizer + use_distributed_optimizer: true + use_precision_aware_optimizer: true + + clip_grad: ${policy.max_grad_norm} + + scheduler: + start_weight_decay: ${policy.megatron_cfg.optimizer.weight_decay} + end_weight_decay: ${policy.megatron_cfg.optimizer.weight_decay} + weight_decay_incr_style: "constant" + lr_decay_style: "constant" + lr_decay_iters: null + lr_warmup_iters: 50 + lr_warmup_init: 5.0e-7 + + distributed_data_parallel_config: + grad_reduce_in_fp32: false + overlap_grad_reduce: true + overlap_param_gather: true + average_in_collective: true + use_custom_fsdp: false + data_parallel_sharding_strategy: "optim_grads_params" + dynamic_batching: + enabled: false + sequence_packing: + enabled: true + train_mb_tokens: ${mul:${policy.max_total_sequence_length}, ${policy.train_micro_batch_size}} + logprob_mb_tokens: ${mul:${policy.max_total_sequence_length}, ${policy.logprob_batch_size}} + algorithm: "modified_first_fit_decreasing" + sequence_length_round: 64 + make_sequence_length_divisible_by: 4 + max_grad_norm: 1 + optimizer: + name: torch.optim.AdamW + kwargs: + lr: 3e-07 + weight_decay: 0.01 + betas: + - 0.9 + - 0.999 + eps: 1e-08 + foreach: false + fused: false + scheduler: + - name: torch.optim.lr_scheduler.LinearLR + kwargs: + start_factor: 0.1 + end_factor: 1 + total_iters: 13 + - name: torch.optim.lr_scheduler.ConstantLR + kwargs: + factor: 1 + total_iters: 10000000000 + - milestones: + - 13 + generation: + backend: vllm + max_new_tokens: 4096 + temperature: 1 + top_p: 1 + top_k: null + stop_token_ids: + - 151645 + stop_strings: null + vllm_cfg: + async_engine: false + precision: ${policy.precision} + tensor_parallel_size: 4 + pipeline_parallel_size: 1 + gpu_memory_utilization: 0.6 + max_model_len: 4096 + enforce_eager: False + colocated: + enabled: true + resources: + gpus_per_node: null + num_nodes: null +data: + max_input_seq_length: 4096 + prompt_file: examples/prompts/cot.txt + system_prompt_file: null + dataset_name: OpenMathInstruct-2 + shuffle: true +env: + math: + num_workers: 8 +logger: + log_dir: logs/grpo-qwen2.5-7b-instruct-4n8g-megatron + num_val_samples_to_print: 0 + wandb_enabled: true + tensorboard_enabled: true + mlflow_enabled: False + monitor_gpus: true + wandb: + project: nemo-rl + name: grpo-qwen2.5-7b-instruct-4n8g-megatron + tensorboard: {} + gpu_monitoring: + collection_interval: 10 + flush_interval: 10 +cluster: + gpus_per_node: 8 + num_nodes: 4 diff --git a/examples/configs/recipes/llm/grpo-qwen2.5-math-1.5b-instruct-1n8g-fsdp2tp1.v3.yaml b/examples/configs/recipes/llm/grpo-qwen2.5-math-1.5b-instruct-1n8g-fsdp2tp1.v3.yaml index b0930e76c2..f2cb817aa8 100644 --- a/examples/configs/recipes/llm/grpo-qwen2.5-math-1.5b-instruct-1n8g-fsdp2tp1.v3.yaml +++ b/examples/configs/recipes/llm/grpo-qwen2.5-math-1.5b-instruct-1n8g-fsdp2tp1.v3.yaml @@ -9,6 +9,7 @@ grpo: val_at_start: false max_val_samples: 256 val_batch_size: 256 + seed: 42 loss_fn: reference_policy_kl_penalty: 0.01 ratio_clip_min: 0.2 @@ -24,6 +25,7 @@ checkpointing: higher_is_better: true keep_top_k: 3 save_period: 10 + checkpoint_must_save_by: null policy: model_name: Qwen/Qwen2.5-Math-1.5B-Instruct tokenizer: @@ -105,6 +107,7 @@ data: prompt_file: examples/prompts/cot.txt system_prompt_file: null dataset_name: OpenMathInstruct-2 + shuffle: true env: math: num_workers: 8 diff --git a/examples/configs/recipes/llm/grpo-qwen3-30ba3b-8n8g-megatron.yaml b/examples/configs/recipes/llm/grpo-qwen3-30ba3b-8n8g-megatron.yaml new file mode 100755 index 0000000000..048ed32782 --- /dev/null +++ b/examples/configs/recipes/llm/grpo-qwen3-30ba3b-8n8g-megatron.yaml @@ -0,0 +1,154 @@ +# GRPO Algorithm Configuration +defaults: "../../grpo_math_1B.yaml" + +grpo: + num_prompts_per_step: 64 + num_generations_per_prompt: 32 + max_num_steps: 1000000 + normalize_rewards: true + use_leave_one_out_baseline: true + val_period: 10 + val_at_start: false + max_val_samples: 256 + val_batch_size: 256 +loss_fn: + reference_policy_kl_penalty: 0.01 + ratio_clip_min: 0.2 + ratio_clip_max: 0.2 + # (default off) loss formulation improvements (docs/guides/grpo.md#loss) + use_on_policy_kl_approximation: false + use_importance_sampling_correction: false + token_level_loss: true + ratio_clip_c: null +checkpointing: + enabled: false + checkpoint_dir: results/grpo-qwen3-30ba3b-8n8g-megatron + metric_name: val_reward + higher_is_better: true + keep_top_k: 3 + save_period: 10 + checkpoint_must_save_by: null +policy: + model_name: "Qwen/Qwen3-30B-A3B" + tokenizer: + name: ${policy.model_name} ## specify if you'd like to use a tokenizer different from the model's default + train_global_batch_size: 512 + train_micro_batch_size: 1 + generation_batch_size: 32 # Only used when generating using HF backend + logprob_batch_size: 4 + max_total_sequence_length: 4096 + precision: "bfloat16" + + dtensor_cfg: + enabled: false + + optimizer: null # remove default FSDP optimizer + + scheduler: null # remove default FSDP scheduler + + dynamic_batching: + enabled: False + sequence_packing: + enabled: False # coming soon + train_mb_tokens: ${mul:${policy.max_total_sequence_length}, ${policy.train_micro_batch_size}} + logprob_mb_tokens: ${mul:${policy.max_total_sequence_length}, ${policy.logprob_batch_size}} + algorithm: "modified_ffd" + sequence_length_round: 64 + max_grad_norm: 1.0 + make_sequence_length_divisible_by: ${policy.megatron_cfg.tensor_model_parallel_size} + megatron_cfg: + enabled: true + empty_unused_memory_level: 1 + activation_checkpointing: false + tensor_model_parallel_size: 4 + pipeline_model_parallel_size: 4 + context_parallel_size: 1 + expert_tensor_parallel_size: 1 + expert_model_parallel_size: 4 + num_layers_in_first_pipeline_stage: null + num_layers_in_last_pipeline_stage: null + sequence_parallel: True + pipeline_dtype: ${policy.precision} + freeze_moe_router: true + moe_router_dtype: "fp64" + moe_router_load_balancing_type: "none" # "seq_aux_loss" causes logprob error divergence for grpo + moe_router_bias_update_rate: 0.0 # by default, disable bias updates for grpo + #gives ~20% training perf speedup with sequence packing + apply_rope_fusion: True + + optimizer: + optimizer: "adam" + lr: 3.0e-7 + min_lr: 3.0e-8 + weight_decay: 0.01 + bf16: true + fp16: false + params_dtype: "float32" + clip_grad: ${policy.max_grad_norm} + #adam + adam_beta1: 0.9 + adam_beta2: 0.999 + adam_eps: 1e-8 + #sgd + sgd_momentum: 0.9 + #distributed optimizer + use_distributed_optimizer: true + use_precision_aware_optimizer: true + + scheduler: + start_weight_decay: ${policy.megatron_cfg.optimizer.weight_decay} + end_weight_decay: ${policy.megatron_cfg.optimizer.weight_decay} + weight_decay_incr_style: "constant" + lr_decay_style: "constant" + lr_decay_iters: null + lr_warmup_iters: 50 + lr_warmup_init: 3.0e-8 + + env_vars: + PYTORCH_CUDA_ALLOC_CONF: "expandable_segments:False" + + distributed_data_parallel_config: + grad_reduce_in_fp32: false + overlap_grad_reduce: true + overlap_param_gather: true + average_in_collective: true + use_custom_fsdp: false + data_parallel_sharding_strategy: "optim_grads_params" + + generation: + backend: "vllm" + max_new_tokens: ${policy.max_total_sequence_length} + temperature: 1.0 + top_p: 1.0 + top_k: null + stop_token_ids: null + stop_strings: null + vllm_cfg: + tensor_parallel_size: 4 + gpu_memory_utilization: 0.7 + max_model_len: ${policy.max_total_sequence_length} +data: + max_input_seq_length: ${policy.max_total_sequence_length} # upper bound, real truncation occurs at vllm.max_model_len + prompt_file: "examples/prompts/cot.txt" + system_prompt_file: null + dataset_name: "OpenMathInstruct-2" +env: + math: + num_workers: 8 +logger: + log_dir: logs/grpo-qwen3-30ba3b-8n8g-megatron + num_val_samples_to_print: 0 + wandb_enabled: true + tensorboard_enabled: true + mlflow_enabled: False + monitor_gpus: true + wandb: + project: nemo-rl + name: grpo-qwen3-30ba3b-8n8g-megatron + tensorboard: {} + gpu_monitoring: + collection_interval: 10 + flush_interval: 10 +cluster: + gpus_per_node: 8 + num_nodes: 8 diff --git a/examples/configs/recipes/llm/sft-llama3.1-70b-8n8g-tp4pp2-long-megatron.yaml b/examples/configs/recipes/llm/sft-llama3.1-70b-8n8g-tp4pp2-long-megatron.yaml new file mode 100644 index 0000000000..cd5751f523 --- /dev/null +++ b/examples/configs/recipes/llm/sft-llama3.1-70b-8n8g-tp4pp2-long-megatron.yaml @@ -0,0 +1,133 @@ +sft: + max_num_epochs: 1 + max_num_steps: 1000000 + val_period: 500 + val_batches: 4 + val_global_batch_size: 128 + val_micro_batch_size: 1 + val_at_start: false + seed: 42 +checkpointing: + enabled: true + checkpoint_dir: results/sft-llama3.1-70b-8n8g-tp4pp2-long-megatron + metric_name: val_loss + higher_is_better: false + keep_top_k: 3 + save_period: 100 + checkpoint_must_save_by: null +policy: + model_name: "meta-llama/Llama-3.1-70B" + tokenizer: + name: meta-llama/Llama-3.1-8B-Instruct ## specify if you'd like to use a tokenizer different from the model's default + train_global_batch_size: 512 + train_micro_batch_size: 1 + max_total_sequence_length: 4096 + precision: "bfloat16" + dtensor_cfg: + enabled: false + megatron_cfg: + enabled: true + empty_unused_memory_level: 1 + activation_checkpointing: false + tensor_model_parallel_size: 4 + expert_tensor_parallel_size: 1 + expert_model_parallel_size: 1 + pipeline_model_parallel_size: 2 + num_layers_in_first_pipeline_stage: null + num_layers_in_last_pipeline_stage: null + context_parallel_size: 1 + pipeline_dtype: ${policy.precision} + sequence_parallel: false + freeze_moe_router: true + moe_router_dtype: "fp64" + moe_router_load_balancing_type: "none" # "seq_aux_loss" causes logprob error divergence for grpo + moe_router_bias_update_rate: 0.0 # by default, disable bias updates for grpo + #gives ~20% training perf speedup with sequence packing + apply_rope_fusion: True + + optimizer: + optimizer: "adam" + lr: 2e-5 + min_lr: 2e-5 + weight_decay: 0.01 + bf16: true + fp16: false + params_dtype: "float32" + + #adam + adam_beta1: 0.9 + adam_beta2: 0.999 + adam_eps: 1e-8 + + #sgd + sgd_momentum: 0.9 + + #distributed optimizer + use_distributed_optimizer: true + use_precision_aware_optimizer: true + + clip_grad: 0.0 + + scheduler: + start_weight_decay: ${policy.megatron_cfg.optimizer.weight_decay} + end_weight_decay: ${policy.megatron_cfg.optimizer.weight_decay} + weight_decay_incr_style: "constant" + lr_decay_style: "constant" + lr_decay_iters: null + lr_warmup_iters: 1 + lr_warmup_init: 2e-5 + + distributed_data_parallel_config: + grad_reduce_in_fp32: false + overlap_grad_reduce: true + overlap_param_gather: true + average_in_collective: true + use_custom_fsdp: false + data_parallel_sharding_strategy: "optim_grads_params" + dynamic_batching: + enabled: false + sequence_packing: + enabled: false + # makes the training sequence length divisible by the tensor parallel size + # this is useful for sequence parallel training + make_sequence_length_divisible_by: ${policy.megatron_cfg.tensor_model_parallel_size} + max_grad_norm: null + optimizer: + name: "torch.optim.AdamW" + kwargs: + lr: 2e-5 + weight_decay: 0.01 + betas: [0.9, 0.98] + eps: 1e-8 + # when using Dtensor, we need to set foreach + # and fused to False + foreach: False + fused: False +data: + max_input_seq_length: ${policy.max_total_sequence_length} + dataset_name: "openmathinstruct2" + prompt_file: examples/prompts/math.txt + split: "train_1M" + add_bos: true + add_eos: true + add_generation_prompt: true + output_key: 'generated_solution' + shuffle: true + seed: 42 +logger: + log_dir: "logs" # Base directory for all logs + wandb_enabled: true # Make sure you do a ``wandb login [Your API key]'' before running + tensorboard_enabled: true + mlflow_enabled: False + monitor_gpus: false # If true, will monitor GPU usage and log to wandb and/or tensorboard + wandb: + project: "sft-dev" + name: "openmathinstruct-nemorl-1M_train" + tensorboard: + log_dir: "tb_logs-openmathinstruct-nemorl-1M_train" + gpu_monitoring: + collection_interval: 10 # How often to collect GPU usage metrics (in seconds) + flush_interval: 10 # How often to flush GPU usage metrics to the loggers (in seconds) +cluster: + gpus_per_node: 8 + num_nodes: 8 diff --git a/examples/configs/recipes/llm/sft-llama3.1-8b-1n8g-fsdp2tp1-dynamicbatch.yaml b/examples/configs/recipes/llm/sft-llama3.1-8b-1n8g-fsdp2tp1-dynamicbatch.yaml new file mode 100644 index 0000000000..d7906b82e0 --- /dev/null +++ b/examples/configs/recipes/llm/sft-llama3.1-8b-1n8g-fsdp2tp1-dynamicbatch.yaml @@ -0,0 +1,80 @@ +sft: + max_num_epochs: 1 + max_num_steps: 10000 + val_period: 500 + val_batches: 4 + val_global_batch_size: 128 + val_micro_batch_size: 2 + val_at_start: false + seed: 42 +checkpointing: + enabled: true + checkpoint_dir: results/sft-llama3.1-8b-instruct-1n8g-fsdp2tp1-long + metric_name: val_loss + higher_is_better: false + keep_top_k: 3 + save_period: 50 + checkpoint_must_save_by: null +policy: + model_name: meta-llama/Llama-3.1-8B + tokenizer: + name: meta-llama/Llama-3.1-8B-Instruct + train_global_batch_size: 512 + train_micro_batch_size: 2 + max_total_sequence_length: 4096 + precision: bfloat16 + dtensor_cfg: + enabled: true + cpu_offload: false + sequence_parallel: false + activation_checkpointing: false + tensor_parallel_size: 4 + context_parallel_size: 1 + custom_parallel_plan: null + dynamic_batching: + enabled: true + train_mb_tokens: ${mul:${policy.max_total_sequence_length}, ${policy.train_micro_batch_size}} + sequence_length_round: 64 + sequence_packing: + enabled: false + make_sequence_length_divisible_by: 1 + max_grad_norm: 1 + optimizer: + name: torch.optim.AdamW + kwargs: + lr: 2e-5 + weight_decay: 0.01 + betas: + - 0.9 + - 0.98 + eps: 1e-08 + foreach: false + fused: false +data: + max_input_seq_length: ${policy.max_total_sequence_length} + dataset_name: "openmathinstruct2" + prompt_file: examples/prompts/math.txt + split: "train_1M" + add_bos: true + add_eos: true + add_generation_prompt: true + output_key: 'generated_solution' + seed: 42 + shuffle: true +logger: + log_dir: logs/sft-llama3.1-8b-instruct-1n8g-fsdp2tp1-long + wandb_enabled: true + tensorboard_enabled: true + mlflow_enabled: false + monitor_gpus: true + wandb: + project: nemo-rl + name: sft-llama3.1-8b-instruct-1n8g-fsdp2tp1-long + tensorboard: + log_dir: tb_logs-sft-dev-squad + gpu_monitoring: + collection_interval: 10 + flush_interval: 10 +cluster: + gpus_per_node: 8 + num_nodes: 1 diff --git a/examples/configs/recipes/llm/sft-llama3.1-8b-instruct-1n8g-fsdp2tp1-long.v2.yaml b/examples/configs/recipes/llm/sft-llama3.1-8b-1n8g-fsdp2tp1-long.yaml similarity index 61% rename from examples/configs/recipes/llm/sft-llama3.1-8b-instruct-1n8g-fsdp2tp1-long.v2.yaml rename to examples/configs/recipes/llm/sft-llama3.1-8b-1n8g-fsdp2tp1-long.yaml index 8535855965..1fc0ccec7c 100644 --- a/examples/configs/recipes/llm/sft-llama3.1-8b-instruct-1n8g-fsdp2tp1-long.v2.yaml +++ b/examples/configs/recipes/llm/sft-llama3.1-8b-1n8g-fsdp2tp1-long.yaml @@ -1,11 +1,11 @@ sft: max_num_epochs: 1 - max_num_steps: 2730 - val_period: 10 - val_batches: 8 - val_global_batch_size: 32 - val_micro_batch_size: 1 - val_at_start: true + max_num_steps: 10000 + val_period: 500 + val_batches: 4 + val_global_batch_size: 128 + val_micro_batch_size: 2 + val_at_start: false seed: 42 checkpointing: enabled: true @@ -13,15 +13,15 @@ checkpointing: metric_name: val_loss higher_is_better: false keep_top_k: 3 - save_period: 10 + save_period: 100 + checkpoint_must_save_by: null policy: - model_name: meta-llama/Llama-3.1-8B-Instruct + model_name: meta-llama/Llama-3.1-8B tokenizer: name: meta-llama/Llama-3.1-8B-Instruct - chat_template: '{% for message in messages %}{%- if message[''role''] == ''system'' %}{{''Context: '' + message[''content''].strip()}}{%- elif message[''role''] == ''user'' %}{{'' Question: '' + message[''content''].strip() + '' Answer:''}}{%- elif message[''role''] == ''assistant'' %}{{'' '' + message[''content''].strip()}}{%- endif %}{% endfor %}' - train_global_batch_size: 32 - train_micro_batch_size: 1 - max_total_sequence_length: 1024 + train_global_batch_size: 512 + train_micro_batch_size: 2 + max_total_sequence_length: 4096 precision: bfloat16 dtensor_cfg: enabled: true @@ -33,6 +33,8 @@ policy: custom_parallel_plan: null dynamic_batching: enabled: false + train_mb_tokens: ${mul:${policy.max_total_sequence_length}, ${policy.train_micro_batch_size}} + sequence_length_round: 64 sequence_packing: enabled: false train_mb_tokens: ${mul:${policy.max_total_sequence_length}, ${policy.train_micro_batch_size}} @@ -43,27 +45,31 @@ policy: optimizer: name: torch.optim.AdamW kwargs: - lr: 5e-06 - weight_decay: 0.1 + lr: 2e-5 + weight_decay: 0.01 betas: - 0.9 - 0.98 - eps: 1e-05 + eps: 1e-08 foreach: false fused: false data: - max_input_seq_length: 1024 - dataset_name: squad + max_input_seq_length: ${policy.max_total_sequence_length} + dataset_name: "openmathinstruct2" + prompt_file: examples/prompts/math.txt + split: "train_1M" add_bos: true add_eos: true - add_generation_prompt: false + add_generation_prompt: true + output_key: 'generated_solution' + shuffle: true + seed: 42 logger: log_dir: logs/sft-llama3.1-8b-instruct-1n8g-fsdp2tp1-long wandb_enabled: true tensorboard_enabled: true mlflow_enabled: false monitor_gpus: true - num_val_samples_to_print: 0 # Number of validation samples to pretty print on terminal wandb: project: nemo-rl name: sft-llama3.1-8b-instruct-1n8g-fsdp2tp1-long @@ -75,3 +81,4 @@ logger: cluster: gpus_per_node: 8 num_nodes: 1 + diff --git a/examples/configs/recipes/llm/sft-llama3.1-8b-instruct-1n8g-fsdp2tp2sp.v2.yaml b/examples/configs/recipes/llm/sft-llama3.1-8b-1n8g-fsdp2tp2sp.yaml similarity index 64% rename from examples/configs/recipes/llm/sft-llama3.1-8b-instruct-1n8g-fsdp2tp2sp.v2.yaml rename to examples/configs/recipes/llm/sft-llama3.1-8b-1n8g-fsdp2tp2sp.yaml index 2eff0aabf6..8c3f14b531 100644 --- a/examples/configs/recipes/llm/sft-llama3.1-8b-instruct-1n8g-fsdp2tp2sp.v2.yaml +++ b/examples/configs/recipes/llm/sft-llama3.1-8b-1n8g-fsdp2tp2sp.yaml @@ -1,7 +1,7 @@ sft: max_num_epochs: 1 max_num_steps: 350 - val_period: 10 + val_period: 500 val_batches: 8 val_global_batch_size: 32 val_micro_batch_size: 1 @@ -13,15 +13,15 @@ checkpointing: metric_name: val_loss higher_is_better: false keep_top_k: 3 - save_period: 10 + save_period: 20 + checkpoint_must_save_by: null policy: - model_name: meta-llama/Llama-3.1-8B-Instruct + model_name: meta-llama/Llama-3.1-8B tokenizer: name: meta-llama/Llama-3.1-8B-Instruct - chat_template: '{% for message in messages %}{%- if message[''role''] == ''system'' %}{{''Context: '' + message[''content''].strip()}}{%- elif message[''role''] == ''user'' %}{{'' Question: '' + message[''content''].strip() + '' Answer:''}}{%- elif message[''role''] == ''assistant'' %}{{'' '' + message[''content''].strip()}}{%- endif %}{% endfor %}' - train_global_batch_size: 32 - train_micro_batch_size: 1 - max_total_sequence_length: 1024 + train_global_batch_size: 512 + train_micro_batch_size: 2 + max_total_sequence_length: 4096 precision: bfloat16 dtensor_cfg: enabled: true @@ -43,32 +43,36 @@ policy: optimizer: name: torch.optim.AdamW kwargs: - lr: 5e-06 - weight_decay: 0.1 + lr: 2e-5 + weight_decay: 0.01 betas: - 0.9 - 0.98 - eps: 1e-05 + eps: 1e-08 foreach: false fused: false data: - max_input_seq_length: 1024 - dataset_name: squad + max_input_seq_length: ${policy.max_total_sequence_length} + dataset_name: "openmathinstruct2" + prompt_file: examples/prompts/math.txt + split: "train_1M" add_bos: true add_eos: true - add_generation_prompt: false + add_generation_prompt: true + output_key: 'generated_solution' + shuffle: true + seed: 42 logger: log_dir: logs/sft-llama3.1-8b-instruct-1n8g-fsdp2tp2sp wandb_enabled: true tensorboard_enabled: true mlflow_enabled: false monitor_gpus: true - num_val_samples_to_print: 0 # Number of validation samples to pretty print on terminal wandb: project: nemo-rl name: sft-llama3.1-8b-instruct-1n8g-fsdp2tp2sp tensorboard: - log_dir: tb_logs-sft-dev-squad + log_dir: tb_logs-sft-dev-openmathinstruct2 gpu_monitoring: collection_interval: 10 flush_interval: 10 diff --git a/examples/configs/recipes/llm/sft-llama3.1-8b-1n8g-megatron-seqpack.yaml b/examples/configs/recipes/llm/sft-llama3.1-8b-1n8g-megatron-seqpack.yaml new file mode 100644 index 0000000000..4ad9355446 --- /dev/null +++ b/examples/configs/recipes/llm/sft-llama3.1-8b-1n8g-megatron-seqpack.yaml @@ -0,0 +1,125 @@ +sft: + max_num_epochs: 1 + max_num_steps: 250 + val_period: 500 + val_batches: 8 + val_global_batch_size: 32 + val_micro_batch_size: 1 + val_at_start: true + seed: 42 +checkpointing: + enabled: true + checkpoint_dir: results/sft-llama3.1-8b-instruct-1n8g-megatron + metric_name: val_loss + higher_is_better: false + keep_top_k: 3 + save_period: 50 + checkpoint_must_save_by: null +policy: + model_name: meta-llama/Llama-3.1-8B + tokenizer: + name: meta-llama/Llama-3.1-8B-Instruct + train_global_batch_size: 512 + train_micro_batch_size: 2 + max_total_sequence_length: 4096 + precision: bfloat16 + dtensor_cfg: + enabled: false + dynamic_batching: + enabled: false + sequence_packing: + enabled: true + train_mb_tokens: ${mul:${policy.max_total_sequence_length}, ${policy.train_micro_batch_size}} + algorithm: "modified_first_fit_decreasing" + sequence_length_round: 64 + make_sequence_length_divisible_by: ${policy.megatron_cfg.tensor_model_parallel_size} + max_grad_norm: 1 + optimizer: null + megatron_cfg: + enabled: true + empty_unused_memory_level: 1 + activation_checkpointing: false + tensor_model_parallel_size: 2 + expert_tensor_parallel_size: 1 + expert_model_parallel_size: 1 + pipeline_model_parallel_size: 2 + context_parallel_size: 1 + pipeline_dtype: ${policy.precision} + num_layers_in_first_pipeline_stage: null + num_layers_in_last_pipeline_stage: null + sequence_parallel: false + freeze_moe_router: false + moe_router_dtype: null + moe_router_load_balancing_type: "aux_loss" + moe_router_bias_update_rate: 1e-3 + #gives ~20% training perf speedup with sequence packing + apply_rope_fusion: True + + optimizer: + optimizer: "adam" + lr: 2.0e-5 + min_lr: 1.99999e-5 + weight_decay: 0.01 + bf16: true + fp16: false + params_dtype: "float32" + + #adam + adam_beta1: 0.9 + adam_beta2: 0.98 + adam_eps: 1e-5 + + #sgd + sgd_momentum: 0.9 + + #distributed optimizer + use_distributed_optimizer: true + use_precision_aware_optimizer: true + + clip_grad: ${policy.max_grad_norm} + + scheduler: + start_weight_decay: ${policy.megatron_cfg.optimizer.weight_decay} + end_weight_decay: ${policy.megatron_cfg.optimizer.weight_decay} + weight_decay_incr_style: "constant" + lr_decay_style: "constant" + lr_decay_iters: null + lr_warmup_iters: 50 + lr_warmup_init: 1.9999e-65 + + distributed_data_parallel_config: + grad_reduce_in_fp32: false + overlap_grad_reduce: true + overlap_param_gather: true + average_in_collective: true + data_parallel_sharding_strategy: "optim_grads_params" + + +data: + max_input_seq_length: ${policy.max_total_sequence_length} + dataset_name: "openmathinstruct2" + prompt_file: examples/prompts/math.txt + split: "train_1M" + add_bos: true + add_eos: true + add_generation_prompt: true + output_key: 'generated_solution' + seed: 42 + shuffle: true +logger: + log_dir: logs/sft-llama3.1-8b-1n8g-megatron + wandb_enabled: true + tensorboard_enabled: true + mlflow_enabled: false + monitor_gpus: true + wandb: + project: nemo-rl + name: sft-llama3.1-8b-1n8g-megatron + tensorboard: + log_dir: tb_logs-sft-dev-openmathinstruct2 + gpu_monitoring: + collection_interval: 10 + flush_interval: 10 +cluster: + gpus_per_node: 8 + num_nodes: 1 diff --git a/examples/configs/recipes/llm/sft-llama3.1-8b-instruct-1n8g-megatron.yaml b/examples/configs/recipes/llm/sft-llama3.1-8b-1n8g-megatron.yaml similarity index 71% rename from examples/configs/recipes/llm/sft-llama3.1-8b-instruct-1n8g-megatron.yaml rename to examples/configs/recipes/llm/sft-llama3.1-8b-1n8g-megatron.yaml index 07f5524000..e5e86dd302 100644 --- a/examples/configs/recipes/llm/sft-llama3.1-8b-instruct-1n8g-megatron.yaml +++ b/examples/configs/recipes/llm/sft-llama3.1-8b-1n8g-megatron.yaml @@ -1,27 +1,27 @@ sft: max_num_epochs: 1 max_num_steps: 250 - val_period: 10 + val_period: 500 val_batches: 8 val_global_batch_size: 32 val_micro_batch_size: 1 val_at_start: true seed: 42 checkpointing: - enabled: false #true - checkpoint_dir: results/sft-llama3.1-8b-instruct-1n8g-fsdp1 + enabled: true + checkpoint_dir: results/sft-llama3.1-8b-instruct-1n8g-megatron metric_name: val_loss higher_is_better: false keep_top_k: 3 - save_period: 10 + save_period: 100 + checkpoint_must_save_by: null policy: - model_name: meta-llama/Llama-3.1-8B-Instruct + model_name: meta-llama/Llama-3.1-8B tokenizer: name: meta-llama/Llama-3.1-8B-Instruct - chat_template: '{% for message in messages %}{%- if message[''role''] == ''system'' %}{{''Context: '' + message[''content''].strip()}}{%- elif message[''role''] == ''user'' %}{{'' Question: '' + message[''content''].strip() + '' Answer:''}}{%- elif message[''role''] == ''assistant'' %}{{'' '' + message[''content''].strip()}}{%- endif %}{% endfor %}' - train_global_batch_size: 32 + train_global_batch_size: 512 train_micro_batch_size: 2 - max_total_sequence_length: 1024 + max_total_sequence_length: 4096 precision: bfloat16 dtensor_cfg: enabled: false @@ -57,10 +57,10 @@ policy: optimizer: optimizer: "adam" - lr: 5.0e-6 - min_lr: 4.9999e-6 - weight_decay: 0.1 - bf16: false + lr: 2.0e-5 + min_lr: 1.99999e-5 + weight_decay: 0.01 + bf16: true fp16: false params_dtype: "float32" @@ -85,7 +85,7 @@ policy: lr_decay_style: "constant" lr_decay_iters: null lr_warmup_iters: 50 - lr_warmup_init: 4.9999e-6 + lr_warmup_init: 1.9999e-65 distributed_data_parallel_config: grad_reduce_in_fp32: false @@ -96,23 +96,27 @@ policy: data: - add_generation_prompt: false - max_input_seq_length: 1024 - dataset_name: squad + max_input_seq_length: ${policy.max_total_sequence_length} + dataset_name: "openmathinstruct2" + prompt_file: examples/prompts/math.txt + split: "train_1M" add_bos: true add_eos: true + add_generation_prompt: true + output_key: 'generated_solution' + shuffle: true + seed: 42 logger: - log_dir: logs/sft-llama3.1-8b-instruct-1n8g-fsdp1 + log_dir: logs/sft-llama3.1-8b-1n8g-megatron wandb_enabled: true tensorboard_enabled: true mlflow_enabled: false monitor_gpus: true - num_val_samples_to_print: 0 # Number of validation samples to pretty print on terminal wandb: project: nemo-rl - name: sft-llama3.1-8b-instruct-1n8g-fsdp1 + name: sft-llama3.1-8b-1n8g-megatron tensorboard: - log_dir: tb_logs-sft-dev-squad + log_dir: tb_logs-sft-dev-openmathinstruct2 gpu_monitoring: collection_interval: 10 flush_interval: 10 diff --git a/examples/configs/recipes/llm/sft-llama3.2-1b-1n8g-fsdp2tp1.v2.yaml b/examples/configs/recipes/llm/sft-llama3.2-1b-1n8g-fsdp2tp1.v3.yaml similarity index 85% rename from examples/configs/recipes/llm/sft-llama3.2-1b-1n8g-fsdp2tp1.v2.yaml rename to examples/configs/recipes/llm/sft-llama3.2-1b-1n8g-fsdp2tp1.v3.yaml index c6311cf357..165e2fa9a3 100644 --- a/examples/configs/recipes/llm/sft-llama3.2-1b-1n8g-fsdp2tp1.v2.yaml +++ b/examples/configs/recipes/llm/sft-llama3.2-1b-1n8g-fsdp2tp1.v3.yaml @@ -13,7 +13,8 @@ checkpointing: metric_name: val_loss higher_is_better: false keep_top_k: 3 - save_period: 10 + save_period: 100 + checkpoint_must_save_by: null policy: model_name: meta-llama/Llama-3.2-1B tokenizer: @@ -52,23 +53,27 @@ policy: foreach: false fused: false data: - max_input_seq_length: 1024 - dataset_name: squad + max_input_seq_length: ${policy.max_total_sequence_length} + dataset_name: "openmathinstruct2" + prompt_file: examples/prompts/math.txt + split: "train_1M" add_bos: true add_eos: true - add_generation_prompt: false + add_generation_prompt: true + output_key: 'generated_solution' + shuffle: true + seed: 42 logger: log_dir: logs/sft-llama3.2-1b-1n8g-fsdp2tp1 wandb_enabled: true tensorboard_enabled: true mlflow_enabled: false monitor_gpus: true - num_val_samples_to_print: 0 # Number of validation samples to pretty print on terminal wandb: project: nemo-rl name: sft-llama3.2-1b-1n8g-fsdp2tp1 tensorboard: - log_dir: tb_logs-sft-dev-squad + log_dir: tb_logs-sft-dev-openmathinstruct2 gpu_monitoring: collection_interval: 10 flush_interval: 10 diff --git a/examples/configs/recipes/llm/sft-qwen2.5-32b-4n8g-fsdp2tp8sp-actckpt.v2.yaml b/examples/configs/recipes/llm/sft-qwen2.5-32b-4n8g-fsdp2tp8sp-actckpt.v3.yaml similarity index 85% rename from examples/configs/recipes/llm/sft-qwen2.5-32b-4n8g-fsdp2tp8sp-actckpt.v2.yaml rename to examples/configs/recipes/llm/sft-qwen2.5-32b-4n8g-fsdp2tp8sp-actckpt.v3.yaml index 54d30dd80b..800d94711e 100644 --- a/examples/configs/recipes/llm/sft-qwen2.5-32b-4n8g-fsdp2tp8sp-actckpt.v2.yaml +++ b/examples/configs/recipes/llm/sft-qwen2.5-32b-4n8g-fsdp2tp8sp-actckpt.v3.yaml @@ -13,7 +13,8 @@ checkpointing: metric_name: val_loss higher_is_better: false keep_top_k: 3 - save_period: 10 + save_period: 100 + checkpoint_must_save_by: null policy: model_name: Qwen/Qwen2.5-32B tokenizer: @@ -52,23 +53,26 @@ policy: foreach: false fused: false data: - max_input_seq_length: 16000 - dataset_name: squad + max_input_seq_length: ${policy.max_total_sequence_length} + dataset_name: "openmathinstruct2" + prompt_file: examples/prompts/math.txt + split: "train_1M" add_bos: true add_eos: true - add_generation_prompt: false + add_generation_prompt: true + output_key: 'generated_solution' + shuffle: true logger: log_dir: logs/sft-qwen2.5-32b-4n8g-fsdp2tp8sp-actckpt wandb_enabled: true tensorboard_enabled: true mlflow_enabled: false monitor_gpus: true - num_val_samples_to_print: 0 # Number of validation samples to pretty print on terminal wandb: project: nemo-rl name: sft-qwen2.5-32b-4n8g-fsdp2tp8sp-actckpt tensorboard: - log_dir: tb_logs-sft-dev-squad + log_dir: tb_logs-sft-dev-openmathinstruct2 gpu_monitoring: collection_interval: 10 flush_interval: 10 diff --git a/examples/configs/recipes/vlm/vlm_grpo-qwen2.5-vl-3b-instruct-clevr-1n2g-dtensor2tp1.v1.yaml b/examples/configs/recipes/vlm/vlm_grpo-qwen2.5-vl-3b-instruct-clevr-1n2g-dtensor2tp1.v1.yaml new file mode 100644 index 0000000000..feec358487 --- /dev/null +++ b/examples/configs/recipes/vlm/vlm_grpo-qwen2.5-vl-3b-instruct-clevr-1n2g-dtensor2tp1.v1.yaml @@ -0,0 +1,173 @@ +# GRPO Algorithm Configuration +grpo: + num_prompts_per_step: 8 + num_generations_per_prompt: 16 + max_rollout_turns: 1 # for multi-turn rollouts. Math Environments just have 1 turn (answering the question) + max_num_steps: 1000000 + normalize_rewards: true + use_leave_one_out_baseline: true + val_period: 10 + val_at_start: false + max_val_samples: 256 + val_batch_size: 256 + seed: 42 + +loss_fn: + reference_policy_kl_penalty: 0.01 + ratio_clip_min: 0.2 + ratio_clip_max: 0.2 + ratio_clip_c: null + # (default off) loss formulation improvements (docs/guides/grpo.md#loss) + use_on_policy_kl_approximation: false + use_importance_sampling_correction: false + token_level_loss: true + +checkpointing: + enabled: true + checkpoint_dir: "results/clevr_grpo" + metric_name: "val_reward" + higher_is_better: true + keep_top_k: 3 + save_period: 10 + checkpoint_must_save_by: null + +policy: + model_name: "Qwen/Qwen2.5-VL-3B-Instruct" + tokenizer: + name: ${policy.model_name} ## specify if you'd like to use a tokenizer different from the model's default + train_global_batch_size: 128 + train_micro_batch_size: 1 + generation_batch_size: 32 # Only used when generating using HF backend + logprob_batch_size: 4 + max_total_sequence_length: 3072 + precision: "bfloat16" + + dtensor_cfg: + enabled: true + cpu_offload: False + sequence_parallel: false + activation_checkpointing: false + tensor_parallel_size: 1 + context_parallel_size: 1 + custom_parallel_plan: null + + # dynamic_batching improves performance by ensuring logprob and training microbatches + # have a sufficent number of tokens to maximize GPU utilization. Specifically, variable length + # responses are sorted by sequence length and bucketed into microbatches with a total + # amount of tokens is approximately close to 'train_mb_tokens' and 'logprob_mb_tokens' for the + # training and logprob stages respectively. + dynamic_batching: + enabled: True + train_mb_tokens: ${mul:${policy.max_total_sequence_length}, ${policy.train_micro_batch_size}} + logprob_mb_tokens: ${mul:${policy.max_total_sequence_length}, ${policy.logprob_batch_size}} + sequence_length_round: 64 + + # makes the training sequence length divisible by the tensor parallel size + # this is useful for sequence parallel training + make_sequence_length_divisible_by: ${policy.dtensor_cfg.tensor_parallel_size} + max_grad_norm: 1.0 + + sequence_packing: + enabled: False + + optimizer: + name: "torch.optim.AdamW" + kwargs: + lr: 5.0e-7 + weight_decay: 0.01 + betas: [0.9, 0.999] + eps: 1e-8 + # when using Dtensor, we need to set foreach + # and fused to False + foreach: False + fused: False + + scheduler: + - name: "torch.optim.lr_scheduler.LinearLR" + kwargs: + start_factor: 0.1 + end_factor: 1.0 + total_iters: 50 + - name: "torch.optim.lr_scheduler.ConstantLR" + kwargs: + factor: 1.0 + total_iters: 10000000000 + - milestones: [50] + + generation: + backend: "vllm" + # max_new_tokens: ${policy.max_total_sequence_length} + max_new_tokens: 1024 + temperature: 1.0 + top_p: 1.0 + top_k: null + stop_token_ids: null + stop_strings: null + vllm_cfg: + async_engine: false # Only for internal testing, will be enabled by https://github.com/NVIDIA/NeMo-RL/issues/447. + precision: ${policy.precision} + tensor_parallel_size: 1 + pipeline_parallel_size: 1 + gpu_memory_utilization: 0.6 + max_model_len: ${policy.max_total_sequence_length} + enforce_eager: False + colocated: + # true: generation shares training GPUs + # false: uses dedicated generation resources + enabled: true + # only relevant when enabled is false + resources: + gpus_per_node: null # Decides num gpus to be dedicated to generation when there is one node in the cluster i.e cluster.num_nodes == 1 + num_nodes: null # Decides number of nodes to be dedicated to generation + +data: + max_input_seq_length: ${policy.max_total_sequence_length} # upper bound, real truncation occurs at vllm.max_model_len + prompt_file: "examples/prompts/clevr_cogent_cot.txt" + system_prompt_file: null + dataset_name: "clevr-cogent" + split: "trainA" + shuffle: true + +env: + clevr-cogent: + num_workers: 8 + reward_functions: + - name: format + weight: 0.2 + - name: exact_alnum + weight: 0.8 + geometry3k: + num_workers: 8 + reward_functions: + - name: format + weight: 0.1 + - name: math_expr + weight: 0.9 + refcoco: + num_workers: 8 + reward_functions: + - name: format + weight: 0.1 + - name: bbox_giou + weight: 0.9 + kwargs: + giou_penalty_thres: 1.0 # (apply giou penalty if iou < giou_penalty_thres; anything less than 0 means use iou only (since the condition iou < 0 is not possible)) + +logger: + log_dir: "logs" # Base directory for all logs + num_val_samples_to_print: 0 # Number of validation samples to pretty print on terminal + wandb_enabled: false + tensorboard_enabled: true + mlflow_enabled: false # Disable MLflow logging + monitor_gpus: false # If true, will monitor GPU usage and log to wandb and/or tensorboard + wandb: + project: "grpo-dev" + name: "grpo-dev-logger" + tensorboard: {} + gpu_monitoring: + collection_interval: 10 # How often to collect GPU usage metrics (in seconds) + flush_interval: 10 # How often to flush GPU usage metrics to the loggers (in seconds) + +cluster: + gpus_per_node: 2 + num_nodes: 1 diff --git a/examples/configs/recipes/vlm/vlm_grpo-smolvlm2-2.2b-instruct-clevr-1n2g-dtensor2tp1.v1.yaml b/examples/configs/recipes/vlm/vlm_grpo-smolvlm2-2.2b-instruct-clevr-1n2g-dtensor2tp1.v1.yaml new file mode 100644 index 0000000000..a80feaca4e --- /dev/null +++ b/examples/configs/recipes/vlm/vlm_grpo-smolvlm2-2.2b-instruct-clevr-1n2g-dtensor2tp1.v1.yaml @@ -0,0 +1,173 @@ +# GRPO Algorithm Configuration +grpo: + num_prompts_per_step: 8 + num_generations_per_prompt: 16 + max_rollout_turns: 1 # for multi-turn rollouts. Math Environments just have 1 turn (answering the question) + max_num_steps: 1000000 + normalize_rewards: true + use_leave_one_out_baseline: true + val_period: 10 + val_at_start: false + max_val_samples: 256 + val_batch_size: 256 + seed: 42 + +loss_fn: + reference_policy_kl_penalty: 0.01 + ratio_clip_min: 0.2 + ratio_clip_max: 0.2 + ratio_clip_c: null + # (default off) loss formulation improvements (docs/guides/grpo.md#loss) + use_on_policy_kl_approximation: false + use_importance_sampling_correction: false + token_level_loss: true + +checkpointing: + enabled: true + checkpoint_dir: "results/clevr_grpo" + metric_name: "val_reward" + higher_is_better: true + keep_top_k: 3 + save_period: 10 + checkpoint_must_save_by: null + +policy: + model_name: "HuggingFaceTB/SmolVLM2-2.2B-Instruct" + tokenizer: + name: ${policy.model_name} ## specify if you'd like to use a tokenizer different from the model's default + train_global_batch_size: 128 + train_micro_batch_size: 1 + generation_batch_size: 32 # Only used when generating using HF backend + logprob_batch_size: 4 + max_total_sequence_length: 3072 + precision: "bfloat16" + + dtensor_cfg: + enabled: true + cpu_offload: False + sequence_parallel: false + activation_checkpointing: false + tensor_parallel_size: 1 + context_parallel_size: 1 + custom_parallel_plan: null + + # dynamic_batching improves performance by ensuring logprob and training microbatches + # have a sufficent number of tokens to maximize GPU utilization. Specifically, variable length + # responses are sorted by sequence length and bucketed into microbatches with a total + # amount of tokens is approximately close to 'train_mb_tokens' and 'logprob_mb_tokens' for the + # training and logprob stages respectively. + dynamic_batching: + enabled: True + train_mb_tokens: ${mul:${policy.max_total_sequence_length}, ${policy.train_micro_batch_size}} + logprob_mb_tokens: ${mul:${policy.max_total_sequence_length}, ${policy.logprob_batch_size}} + sequence_length_round: 64 + + # makes the training sequence length divisible by the tensor parallel size + # this is useful for sequence parallel training + make_sequence_length_divisible_by: ${policy.dtensor_cfg.tensor_parallel_size} + max_grad_norm: 1.0 + + sequence_packing: + enabled: False + + optimizer: + name: "torch.optim.AdamW" + kwargs: + lr: 5.0e-7 + weight_decay: 0.01 + betas: [0.9, 0.999] + eps: 1e-8 + # when using Dtensor, we need to set foreach + # and fused to False + foreach: False + fused: False + + scheduler: + - name: "torch.optim.lr_scheduler.LinearLR" + kwargs: + start_factor: 0.1 + end_factor: 1.0 + total_iters: 50 + - name: "torch.optim.lr_scheduler.ConstantLR" + kwargs: + factor: 1.0 + total_iters: 10000000000 + - milestones: [50] + + generation: + backend: "vllm" + # max_new_tokens: ${policy.max_total_sequence_length} + max_new_tokens: 1024 + temperature: 1.0 + top_p: 1.0 + top_k: null + stop_token_ids: null + stop_strings: null + vllm_cfg: + async_engine: false # Only for internal testing, will be enabled by https://github.com/NVIDIA/NeMo-RL/issues/447. + precision: ${policy.precision} + tensor_parallel_size: 1 + pipeline_parallel_size: 1 + gpu_memory_utilization: 0.6 + max_model_len: ${policy.max_total_sequence_length} + enforce_eager: False + colocated: + # true: generation shares training GPUs + # false: uses dedicated generation resources + enabled: true + # only relevant when enabled is false + resources: + gpus_per_node: null # Decides num gpus to be dedicated to generation when there is one node in the cluster i.e cluster.num_nodes == 1 + num_nodes: null # Decides number of nodes to be dedicated to generation + +data: + max_input_seq_length: ${policy.max_total_sequence_length} # upper bound, real truncation occurs at vllm.max_model_len + prompt_file: "examples/prompts/clevr_cogent_cot.txt" + system_prompt_file: null + dataset_name: "clevr-cogent" + split: "trainA" + shuffle: true + +env: + clevr-cogent: + num_workers: 8 + reward_functions: + - name: format + weight: 0.2 + - name: exact_alnum + weight: 0.8 + geometry3k: + num_workers: 8 + reward_functions: + - name: format + weight: 0.1 + - name: math_expr + weight: 0.9 + refcoco: + num_workers: 8 + reward_functions: + - name: format + weight: 0.1 + - name: bbox_giou + weight: 0.9 + kwargs: + giou_penalty_thres: 0.5 + +logger: + log_dir: "logs" # Base directory for all logs + num_val_samples_to_print: 0 # Number of validation samples to pretty print on terminal + wandb_enabled: false + tensorboard_enabled: true + mlflow_enabled: false # Disable MLflow logging + monitor_gpus: false # If true, will monitor GPU usage and log to wandb and/or tensorboard + wandb: + project: "grpo-dev" + name: "grpo-dev-logger" + tensorboard: {} + gpu_monitoring: + collection_interval: 10 # How often to collect GPU usage metrics (in seconds) + flush_interval: 10 # How often to flush GPU usage metrics to the loggers (in seconds) + +cluster: + gpus_per_node: 2 + num_nodes: 1 diff --git a/examples/configs/rm.yaml b/examples/configs/rm.yaml index f1c66514ca..6ff1f4b0ac 100644 --- a/examples/configs/rm.yaml +++ b/examples/configs/rm.yaml @@ -31,7 +31,6 @@ policy: train_micro_batch_size: 1 max_total_sequence_length: 8192 precision: "bfloat16" - fsdp_offload_enabled: false activation_checkpointing_enabled: false reward_model_cfg: @@ -129,6 +128,7 @@ data: # dataset_name: PreferenceDataset # train_data_path: # val_data_path: + shuffle: true logger: log_dir: "logs" # Base directory for all logs diff --git a/examples/configs/sft.yaml b/examples/configs/sft.yaml index a592321cfe..2319568475 100644 --- a/examples/configs/sft.yaml +++ b/examples/configs/sft.yaml @@ -19,6 +19,7 @@ checkpointing: higher_is_better: false keep_top_k: 3 save_period: 10 + checkpoint_must_save_by: null policy: model_name: "meta-llama/Llama-3.2-1B" @@ -41,6 +42,8 @@ policy: dynamic_batching: enabled: false + train_mb_tokens: ${mul:${policy.max_total_sequence_length}, ${policy.train_micro_batch_size}} + sequence_length_round: 64 sequence_packing: enabled: False @@ -124,7 +127,7 @@ policy: overlap_param_gather: true average_in_collective: true data_parallel_sharding_strategy: "optim_grads_params" - + use_custom_fsdp: false data: max_input_seq_length: ${policy.max_total_sequence_length} @@ -132,6 +135,13 @@ data: add_bos: true add_eos: true add_generation_prompt: false + shuffle: true + + ## unused with squad dataset + prompt_file: null + split: null + output_key: null + seed: null logger: log_dir: "logs" # Base directory for all logs @@ -139,7 +149,6 @@ logger: tensorboard_enabled: true mlflow_enabled: false monitor_gpus: true # If true, will monitor GPU usage and log to wandb and/or tensorboard - num_val_samples_to_print: 0 # Number of validation samples to pretty print on terminal wandb: project: "sft-dev" name: "sft-dev-${data.dataset_name}" diff --git a/examples/configs/sft_openmathinstruct2.yaml b/examples/configs/sft_openmathinstruct2.yaml index 1f1b88a8a9..09354a2039 100644 --- a/examples/configs/sft_openmathinstruct2.yaml +++ b/examples/configs/sft_openmathinstruct2.yaml @@ -16,6 +16,7 @@ checkpointing: higher_is_better: false keep_top_k: 100 save_period: 500 + checkpoint_must_save_by: null policy: model_name: "meta-llama/Llama-3.1-8B" @@ -35,11 +36,17 @@ policy: context_parallel_size: 1 custom_parallel_plan: null + megatron_cfg: + enabled: false + dynamic_batching: enabled: false sequence_packing: enabled: false + train_mb_tokens: ${mul:${policy.max_total_sequence_length}, ${policy.train_micro_batch_size}} + algorithm: "modified_first_fit_decreasing" + sequence_length_round: 64 # makes the training sequence length divisible by the tensor parallel size # this is useful for sequence parallel training @@ -67,6 +74,7 @@ data: add_eos: true add_generation_prompt: true output_key: 'generated_solution' + shuffle: true logger: log_dir: "logs" # Base directory for all logs @@ -74,7 +82,6 @@ logger: tensorboard_enabled: true mlflow_enabled: false monitor_gpus: false # If true, will monitor GPU usage and log to wandb and/or tensorboard - num_val_samples_to_print: 0 # Number of validation samples to pretty print on terminal wandb: project: "sft-dev" name: "openmathinstruct-nemorl-1M_train" diff --git a/examples/configs/sft_openmathinstruct2_megatron.yaml b/examples/configs/sft_openmathinstruct2_megatron.yaml new file mode 100644 index 0000000000..17b7ddeaee --- /dev/null +++ b/examples/configs/sft_openmathinstruct2_megatron.yaml @@ -0,0 +1,149 @@ +# SFT Algorithm Configuration +defaults: sft_openmathinstruct2.yaml + +sft: + max_num_epochs: 1 + max_num_steps: 1000000 + val_period: 500 + val_batches: 4 + val_global_batch_size: 128 + val_micro_batch_size: 1 + val_at_start: true + seed: 42 + +checkpointing: + enabled: true + checkpoint_dir: "results/sft_openmathinstruct2" + metric_name: "val_loss" + higher_is_better: false + keep_top_k: 100 + save_period: 500 + +policy: + model_name: "meta-llama/Llama-3.1-8B" + tokenizer: + name: meta-llama/Llama-3.1-8B-Instruct + train_global_batch_size: 512 + train_micro_batch_size: 1 + max_total_sequence_length: 4096 + precision: "bfloat16" + + dtensor_cfg: + enabled: false + + megatron_cfg: + activation_checkpointing: false + context_parallel_size: 1 + distributed_data_parallel_config: + average_in_collective: true + data_parallel_sharding_strategy: optim_grads_params + grad_reduce_in_fp32: true + overlap_grad_reduce: true + overlap_param_gather: true + empty_unused_memory_level: 1 + enabled: true + expert_tensor_parallel_size: 1 + expert_model_parallel_size: 1 + num_layers_in_first_pipeline_stage: null + num_layers_in_last_pipeline_stage: null + optimizer: + adam_beta1: 0.9 + adam_beta2: 0.98 + adam_eps: 1.0e-8 + bf16: true + clip_grad: 0 + fp16: false + lr: 0.00002 + min_lr: 0.00002 + optimizer: adam + params_dtype: bfloat16 + sgd_momentum: 0.9 + use_distributed_optimizer: true + use_precision_aware_optimizer: false #true ## TODO: precision aware optim not working with fp8. Is this expected? + weight_decay: 0.01 + + ## recently introduced, our current mcore commit doesn't have this + #fp8_recipe: delayed + + pipeline_dtype: bfloat16 + pipeline_model_parallel_size: 1 + scheduler: + end_weight_decay: 0.01 + lr_decay_iters: null + lr_decay_style: constant + lr_warmup_init: 0.00001999999 + lr_warmup_iters: 1 + start_weight_decay: 0.01 + weight_decay_incr_style: constant + sequence_parallel: false + tensor_model_parallel_size: 4 ## TODO: should not need this large TP size + + freeze_moe_router: true + moe_router_dtype: "fp64" + moe_router_load_balancing_type: "none" # "seq_aux_loss" causes logprob error divergence for grpo + moe_router_bias_update_rate: 0.0 # by default, disable bias updates for grpo + #gives ~20% training perf speedup with sequence packing + apply_rope_fusion: True + + env_vars: + PYTORCH_CUDA_ALLOC_CONF: "expandable_segments:False" + + fp8_cfg: + enabled: true + fp8: hybrid + fp8_recipe: delayed + fp8_param: true # false gives the following error: "RuntimeError: /TransformerEngine/transformer_engine/common/gemm/cublaslt_gemm.cu:116 in function CanonicalizeGemmInput: Assertion failed: !is_fp8_dtype(ret.Atype). Input A is missing column-wise usage" + fp8_dot_product_attention: false #true + fp8_multi_head_attention: false #true + + dynamic_batching: + enabled: false + train_mb_tokens: ${mul:${policy.max_total_sequence_length}, ${policy.train_micro_batch_size}} + sequence_length_round: 64 + + + sequence_packing: + enabled: True + train_mb_tokens: ${mul:${policy.max_total_sequence_length}, ${policy.train_micro_batch_size}} + algorithm: "modified_first_fit_decreasing" + sequence_length_round: 64 + + # makes the training sequence length divisible by the tensor parallel size + # this is useful for sequence parallel training + make_sequence_length_divisible_by: ${mul:16, ${policy.megatron_cfg.tensor_model_parallel_size}} + max_grad_norm: null + + optimizer: null + +data: + max_input_seq_length: ${policy.max_total_sequence_length} + dataset_name: "openmathinstruct2" + prompt_file: examples/prompts/math.txt + split: "train_1M" + add_bos: true + add_eos: true + add_generation_prompt: true + output_key: 'generated_solution' + +logger: + log_dir: "logs" # Base directory for all logs + wandb_enabled: true # Make sure you do a ``wandb login [Your API key]'' before running + tensorboard_enabled: true + mlflow_enabled: false + monitor_gpus: false # If true, will monitor GPU usage and log to wandb and/or tensorboard + wandb: + project: "sft-openmathinstruct-megatron" + name: "llama8b" + tensorboard: + log_dir: "tb_logs-openmathinstruct-nemorl-1M_train" + mlflow: + experiment_name: "sft-dev" + run_name: "openmathinstruct-nemorl-1M_train" + gpu_monitoring: + collection_interval: 10 # How often to collect GPU usage metrics (in seconds) + flush_interval: 10 # How often to flush GPU usage metrics to the loggers (in seconds) + +cluster: + gpus_per_node: 8 + num_nodes: 2 + diff --git a/examples/configs/sft_vlm_3B.yaml b/examples/configs/sft_vlm_3B.yaml new file mode 100644 index 0000000000..185dced165 --- /dev/null +++ b/examples/configs/sft_vlm_3B.yaml @@ -0,0 +1,49 @@ +defaults: + - sft.yaml + +policy: + model_name: "Qwen/Qwen2.5-VL-3B-Instruct" + tokenizer: + name: ${policy.model_name} ## specify if you'd like to use a tokenizer different from the model's default + train_global_batch_size: 16 + train_micro_batch_size: 1 + max_total_sequence_length: 1024 + precision: "bfloat16" + + sequence_packing: + enabled: False + +checkpointing: + enabled: true + checkpoint_dir: "results/sft_${policy.model_name}" + metric_name: "val_loss" ## set to null to save most recent k checkpoints + higher_is_better: false + keep_top_k: 1 + save_period: 10 + +data: + max_input_seq_length: ${policy.max_total_sequence_length} + dataset_name: "clevr_cogent" + add_bos: true + add_eos: true + add_generation_prompt: false + split: trainA + prompt_file: null + +logger: + log_dir: "logs" # Base directory for all logs + wandb_enabled: false # Make sure you do a ``wandb login [Your API key]'' before running + tensorboard_enabled: true + monitor_gpus: true # If true, will monitor GPU usage and log to wandb and/or tensorboard + wandb: + project: "sft-dev" + name: "sft-dev-${data.dataset_name}" + tensorboard: + log_dir: "tb_logs-sft-dev-${data.dataset_name}" + gpu_monitoring: + collection_interval: 10 # How often to collect GPU usage metrics (in seconds) + flush_interval: 10 # How often to flush GPU usage metrics to the loggers (in seconds) + +cluster: + gpus_per_node: 2 + num_nodes: 1 diff --git a/examples/configs/vlm_grpo_3B.yaml b/examples/configs/vlm_grpo_3B.yaml new file mode 100644 index 0000000000..ecc711170f --- /dev/null +++ b/examples/configs/vlm_grpo_3B.yaml @@ -0,0 +1,174 @@ +# GRPO Algorithm Configuration +# Examplar script for running GRPO on Qwen2.5-VL-3B-Instruct +grpo: + num_prompts_per_step: 8 + num_generations_per_prompt: 16 + max_rollout_turns: 1 # for multi-turn rollouts. Math Environments just have 1 turn (answering the question) + max_num_steps: 1000000 + normalize_rewards: true + use_leave_one_out_baseline: true + val_period: 10 + val_at_start: false + max_val_samples: 256 + val_batch_size: 256 + seed: 42 + +loss_fn: + reference_policy_kl_penalty: 0.01 + ratio_clip_min: 0.2 + ratio_clip_max: 0.2 + ratio_clip_c: null + # (default off) loss formulation improvements (docs/guides/grpo.md#loss) + use_on_policy_kl_approximation: false + use_importance_sampling_correction: false + token_level_loss: true + +checkpointing: + enabled: true + checkpoint_dir: "results/clevr_grpo_${policy.model_name}" + metric_name: "val_reward" + higher_is_better: true + keep_top_k: 3 + save_period: 10 + checkpoint_must_save_by: null + +policy: + model_name: "Qwen/Qwen2.5-VL-3B-Instruct" + tokenizer: + name: ${policy.model_name} ## specify if you'd like to use a tokenizer different from the model's default + train_global_batch_size: 128 + train_micro_batch_size: 1 + generation_batch_size: 32 # Only used when generating using HF backend + logprob_batch_size: 4 + max_total_sequence_length: 2048 + precision: "bfloat16" + + dtensor_cfg: + enabled: true + cpu_offload: False + sequence_parallel: false + activation_checkpointing: false + tensor_parallel_size: 1 + context_parallel_size: 1 + custom_parallel_plan: null + + # dynamic_batching improves performance by ensuring logprob and training microbatches + # have a sufficent number of tokens to maximize GPU utilization. Specifically, variable length + # responses are sorted by sequence length and bucketed into microbatches with a total + # amount of tokens is approximately close to 'train_mb_tokens' and 'logprob_mb_tokens' for the + # training and logprob stages respectively. + dynamic_batching: + enabled: True + train_mb_tokens: ${mul:${policy.max_total_sequence_length}, ${policy.train_micro_batch_size}} + logprob_mb_tokens: ${mul:${policy.max_total_sequence_length}, ${policy.logprob_batch_size}} + sequence_length_round: 64 + + # makes the training sequence length divisible by the tensor parallel size + # this is useful for sequence parallel training + make_sequence_length_divisible_by: ${policy.dtensor_cfg.tensor_parallel_size} + max_grad_norm: 1.0 + + sequence_packing: + enabled: False + + optimizer: + name: "torch.optim.AdamW" + kwargs: + lr: 5e-7 + weight_decay: 0.01 + betas: [0.9, 0.999] + eps: 1e-8 + # when using Dtensor, we need to set foreach + # and fused to False + foreach: False + fused: False + + scheduler: + - name: "torch.optim.lr_scheduler.LinearLR" + kwargs: + start_factor: 0.1 + end_factor: 1.0 + total_iters: 50 + - name: "torch.optim.lr_scheduler.ConstantLR" + kwargs: + factor: 1.0 + total_iters: 10000000000 + - milestones: [50] + + generation: + backend: "vllm" + # max_new_tokens: ${policy.max_total_sequence_length} + max_new_tokens: 1024 + temperature: 1.0 + top_p: 1.0 + top_k: null + stop_token_ids: null + stop_strings: null + vllm_cfg: + async_engine: false # Only for internal testing, will be enabled by https://github.com/NVIDIA/NeMo-RL/issues/447. + precision: ${policy.precision} + tensor_parallel_size: 1 + pipeline_parallel_size: 1 + gpu_memory_utilization: 0.6 + max_model_len: ${policy.max_total_sequence_length} + enforce_eager: False + colocated: + # true: generation shares training GPUs + # false: uses dedicated generation resources + enabled: true + # only relevant when enabled is false + resources: + gpus_per_node: null # Decides num gpus to be dedicated to generation when there is one node in the cluster i.e cluster.num_nodes == 1 + num_nodes: null # Decides number of nodes to be dedicated to generation + +data: + max_input_seq_length: ${policy.max_total_sequence_length} # upper bound, real truncation occurs at vllm.max_model_len + prompt_file: "examples/prompts/clevr_cogent_cot.txt" + system_prompt_file: null + dataset_name: "clevr-cogent" + split: "trainA" + shuffle: true + +env: + clevr-cogent: + num_workers: 8 + reward_functions: + - name: format + weight: 0.2 + - name: exact_alnum + weight: 0.8 + geometry3k: + num_workers: 8 + reward_functions: + - name: format + weight: 0.1 + - name: math_expr + weight: 0.9 + refcoco: + num_workers: 8 + reward_functions: + - name: format + weight: 0.1 + - name: bbox_giou + weight: 0.9 + kwargs: + giou_penalty_thres: 0.5 + +logger: + log_dir: "logs" # Base directory for all logs + num_val_samples_to_print: 0 # Number of validation samples to pretty print on terminal + wandb_enabled: false + tensorboard_enabled: true + mlflow_enabled: false # Disable MLflow logging + monitor_gpus: false # If true, will monitor GPU usage and log to wandb and/or tensorboard + wandb: + project: "grpo-dev" + name: "grpo-dev-logger" + tensorboard: {} + gpu_monitoring: + collection_interval: 10 # How often to collect GPU usage metrics (in seconds) + flush_interval: 10 # How often to flush GPU usage metrics to the loggers (in seconds) + +cluster: + gpus_per_node: 2 + num_nodes: 1 diff --git a/examples/prompts/clevr_cogent_cot.txt b/examples/prompts/clevr_cogent_cot.txt new file mode 100644 index 0000000000..0139bd3374 --- /dev/null +++ b/examples/prompts/clevr_cogent_cot.txt @@ -0,0 +1,5 @@ +Think step-by-step to solve the following problem, and answer in the following format: step-by-step thought process final answer +Note that your final answer must only contain a single numerical output (e.g. 2, 12, 45) for numerical solutions, and only the strings "yes" or "no" for yes/no type questions. + +Let's think step-by-step: +{} diff --git a/examples/prompts/geo3k.txt b/examples/prompts/geo3k.txt new file mode 100644 index 0000000000..25e57bb46b --- /dev/null +++ b/examples/prompts/geo3k.txt @@ -0,0 +1,5 @@ +Think step-by-step to solve the following math problem, and answer in the following format: step-by-step thought process only answer here . +Note that your final answer must only contain a number or mathematical expression + +Let's think step-by-step: +{} diff --git a/examples/prompts/refcoco.txt b/examples/prompts/refcoco.txt new file mode 100644 index 0000000000..d6353a003b --- /dev/null +++ b/examples/prompts/refcoco.txt @@ -0,0 +1,5 @@ +Think step-by-step to solve the following problem, and answer in the following format: step-by-step thought process [x1, y1, x2, y2] +Your answer must contain a bounding box with the following format: [x1, y1, x2, y2] where x1, y1 are the top left coordinates, and x2, y2 are the bottom right coordinates. The coordinates should be normalized from [0, 1000]. + +Let's think step-by-step: +{} diff --git a/examples/run_dpo.py b/examples/run_dpo.py index acbb6f6810..69fa38a3a9 100644 --- a/examples/run_dpo.py +++ b/examples/run_dpo.py @@ -176,6 +176,13 @@ def setup_data(data_config: DataConfig, policy_config: PolicyConfig): print( f" āœ“ Training and validation datasets loaded with {len(data.formatted_ds['train'])} and {len(data.formatted_ds['validation'])} samples, respectively." ) + elif data_config["dataset_name"] == "Tulu3Preference": + data = hf_datasets.Tulu3PreferenceDataset() + train_dataset = data.formatted_ds["train"] + val_dataset = None + print( + f" āœ“ Training dataset loaded with {len(data.formatted_ds['train'])} samples." + ) elif data_cls == "DPODataset": data = hf_datasets.DPODataset( train_data_path=data_config["train_data_path"], diff --git a/examples/run_grpo_math.py b/examples/run_grpo_math.py index 006ad36a16..f31c2c212c 100644 --- a/examples/run_grpo_math.py +++ b/examples/run_grpo_math.py @@ -124,6 +124,7 @@ def setup_data( tokenizer: TokenizerType, data_config: DataConfig, env_configs: dict[str, Any], + seed: int, ) -> tuple[ AllTaskProcessedDataset, Optional[AllTaskProcessedDataset], @@ -140,12 +141,12 @@ def setup_data( # Load OpenMathInstruct2Dataset using nemo rl datasets if data_config["dataset_name"] == "OpenMathInstruct-2": print("Loading nvidia/OpenMathInstruct2Dataset for training and validation") - data: Any = OpenMathInstruct2Dataset() + data: Any = OpenMathInstruct2Dataset(seed=seed) elif data_config["dataset_name"] == "DeepScaler": print( "Loading agentica-org/DeepScaleR-Preview-Dataset for training and validation" ) - data: Any = DeepScalerDataset() + data: Any = DeepScalerDataset(seed=seed) else: raise ValueError(f"No processor for dataset {data_config['dataset_name']}.") @@ -236,7 +237,7 @@ def main() -> None: val_dataset, task_to_env, val_task_to_env, - ) = setup_data(tokenizer, config["data"], config["env"]) + ) = setup_data(tokenizer, config["data"], config["env"], config["grpo"]["seed"]) ( policy, diff --git a/examples/run_grpo_sliding_puzzle.py b/examples/run_grpo_sliding_puzzle.py index c5ccc65524..ca2359d0d2 100644 --- a/examples/run_grpo_sliding_puzzle.py +++ b/examples/run_grpo_sliding_puzzle.py @@ -24,7 +24,7 @@ from transformers import AutoTokenizer from nemo_rl.algorithms.grpo import MasterConfig, grpo_train, setup -from nemo_rl.algorithms.utils import get_tokenizer +from nemo_rl.algorithms.utils import get_tokenizer, set_seed from nemo_rl.data.interfaces import DatumSpec, LLMMessageLogType from nemo_rl.distributed.virtual_cluster import init_ray from nemo_rl.environments.games.sliding_puzzle import ( @@ -223,6 +223,8 @@ def main(): init_ray() + set_seed(config["grpo"]["seed"]) + # setup tokenizer tokenizer = get_tokenizer(config["policy"]["tokenizer"]) config["policy"]["generation"] = configure_generation_config( diff --git a/examples/run_sft.py b/examples/run_sft.py index df0d7ce3f7..392dfad645 100644 --- a/examples/run_sft.py +++ b/examples/run_sft.py @@ -16,7 +16,7 @@ import os import pprint from functools import partial -from typing import Any +from typing import Any, Callable, Optional from omegaconf import OmegaConf from transformers import AutoTokenizer @@ -59,8 +59,13 @@ def sft_preprocessor( add_bos: bool = True, add_eos: bool = True, add_generation_prompt: bool = False, + datum_preprocessor: Optional[Callable] = None, ) -> DatumSpec: """Process a datum dictionary for SFT training.""" + # optional preprocessor + if datum_preprocessor is not None: + datum_dict = datum_preprocessor(datum_dict) + message_log = get_formatted_message_log( datum_dict["messages"], tokenizer, @@ -91,11 +96,16 @@ def sft_preprocessor( return output -def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig): +def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig, seed: int): print("\nā–¶ Setting up data...") data_cls = data_config["dataset_name"] + + datum_preprocessor = None if data_cls == "open_assistant": - data = hf_datasets.OasstDataset(output_dir="/tmp/open_assistant") + data = hf_datasets.OasstDataset( + output_dir="/tmp/open_assistant", + seed=seed, + ) elif data_cls == "squad": data = hf_datasets.SquadDataset() elif data_cls == "prompt_response_dataset": @@ -110,6 +120,7 @@ def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig): split=data_config["split"], output_key=data_config["output_key"], prompt_file=data_config["prompt_file"], + seed=seed, ) elif data_cls == "openai_format": data = hf_datasets.OpenAIFormatDataset( @@ -119,6 +130,14 @@ def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig): data_config["system_key"], data_config["system_prompt"], ) + elif data_cls == "clevr_cogent": + from nemo_rl.data.hf_datasets.clevr import format_clevr_cogent_dataset + + data = hf_datasets.CLEVRCoGenTDataset( + split=data_config["split"], + prompt_file=data_config["prompt_file"], + ) + datum_preprocessor = partial(format_clevr_cogent_dataset, return_pil=True) else: raise ValueError(f"Unknown dataset class: {data_cls}") print( @@ -138,6 +157,7 @@ def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig): add_bos=data_config["add_bos"], add_eos=data_config["add_eos"], add_generation_prompt=data_config["add_generation_prompt"], + datum_preprocessor=datum_preprocessor, ), max_seq_length=data_config["max_input_seq_length"], ) @@ -151,6 +171,7 @@ def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig): add_bos=data_config.get("add_bos", True), add_eos=data_config.get("add_eos", True), add_generation_prompt=data_config["add_generation_prompt"], + datum_preprocessor=datum_preprocessor, ), max_seq_length=data_config["max_input_seq_length"], ) @@ -158,7 +179,7 @@ def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig): return train_dataset, val_dataset, sft_task_spec -def main(): +def main(is_vlm: bool = False): """Main entry point.""" # Parse arguments args, overrides = parse_args() @@ -189,15 +210,14 @@ def main(): init_ray() - # setup tokenizer - tokenizer = get_tokenizer(config["policy"]["tokenizer"]) - + # setup tokenizer (or processor) + tokenizer = get_tokenizer(config["policy"]["tokenizer"], get_processor=is_vlm) # setup data ( dataset, val_dataset, sft_task_spec, - ) = setup_data(tokenizer, config["data"]) + ) = setup_data(tokenizer, config["data"], config["sft"]["seed"]) ( policy, diff --git a/examples/run_vlm_grpo.py b/examples/run_vlm_grpo.py new file mode 100644 index 0000000000..fdb5fa26e9 --- /dev/null +++ b/examples/run_vlm_grpo.py @@ -0,0 +1,393 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import base64 +import os +import pprint +from collections import defaultdict +from io import BytesIO +from typing import Any, Optional + +import requests +from omegaconf import OmegaConf +from PIL import Image +from transformers import AutoProcessor + +from nemo_rl.algorithms.grpo import MasterConfig, grpo_train, setup +from nemo_rl.algorithms.utils import get_tokenizer +from nemo_rl.data import DataConfig +from nemo_rl.data.datasets import AllTaskProcessedDataset +from nemo_rl.data.hf_datasets.clevr import ( + CLEVRCoGenTDataset, + format_clevr_cogent_dataset, +) +from nemo_rl.data.hf_datasets.geometry3k import ( + Geometry3KDataset, + format_geometry3k_dataset, +) +from nemo_rl.data.hf_datasets.refcoco import RefCOCODataset, format_refcoco_dataset +from nemo_rl.data.interfaces import ( + DatumSpec, + LLMMessageLogType, + TaskDataProcessFnCallable, + TaskDataSpec, +) +from nemo_rl.data.multimodal_utils import ( + PackedTensor, + get_dim_to_pack_along, + get_multimodal_keys_from_processor, +) +from nemo_rl.distributed.ray_actor_environment_registry import ( + get_actor_python_env, +) +from nemo_rl.distributed.virtual_cluster import init_ray +from nemo_rl.environments.interfaces import EnvironmentInterface +from nemo_rl.environments.vlm_environment import VLMEnvironment +from nemo_rl.models.generation import configure_generation_config +from nemo_rl.utils.config import load_config, parse_hydra_overrides +from nemo_rl.utils.logger import get_next_experiment_dir + +OmegaConf.register_new_resolver("mul", lambda a, b: a * b) + + +def parse_args() -> tuple[argparse.Namespace, list[str]]: + """Parse command line arguments.""" + parser = argparse.ArgumentParser(description="Run GRPO training with configuration") + parser.add_argument( + "--config", type=str, default=None, help="Path to YAML config file" + ) + # Parse known args for the script + args, overrides = parser.parse_known_args() + return args, overrides + + +# =============================================================================== +# VLM Data Processor +# =============================================================================== + + +def resolve_to_image(image_path_or_image: str | Image.Image) -> Image.Image: + """Resolve the image path to a PIL.Image object. + + image_path can be either: + - path to local file + - url to image + - base64 encoded image + """ + if isinstance(image_path_or_image, Image.Image): + return image_path_or_image + + if image_path_or_image.startswith(("http://", "https://")): + # Handle URL + response = requests.get(image_path_or_image) + response.raise_for_status() + return Image.open(BytesIO(response.content)).convert("RGB") + elif image_path_or_image.startswith("data:"): + # Handle base64 encoded image + # Format: data:image/jpeg;base64,/9j/4AAQSkZJRg... + header, encoded = image_path_or_image.split(",", 1) + image_data = base64.b64decode(encoded) + return Image.open(BytesIO(image_data)).convert("RGB") + else: + # Handle local file path + return Image.open(image_path_or_image).convert("RGB") + + +def hf_data_processor( + datum_dict: dict[str, Any], + task_data_spec: TaskDataSpec, + processor: AutoProcessor, + max_seq_length: int, + idx: int, +) -> DatumSpec: + """Process a datum dictionary (directly loaded from data/hf_datasets/.py) into a DatumSpec for the VLM Environment.""" + # depending on the task, format the data differently + if task_data_spec.task_name == "clevr-cogent": + datum_dict = format_clevr_cogent_dataset(datum_dict) + elif task_data_spec.task_name == "refcoco": + datum_dict = format_refcoco_dataset(datum_dict) + elif task_data_spec.task_name == "geometry3k": + datum_dict = format_geometry3k_dataset(datum_dict) + else: + raise ValueError(f"No data processor for task {task_data_spec.task_name}") + + user_message = datum_dict["messages"] + problem = user_message[0]["content"] + extra_env_info = {"ground_truth": user_message[1]["content"]} + + message_log: LLMMessageLogType = [] + ### only one round of interaction is assumed, this can easily be extended to a conversational setting + user_message = {"role": "user", "content": []} + # + images = [] + if isinstance(problem, list): + for content in problem: + # for image, video, just append it + # for text, format the prompt to the problem + if content["type"] != "text": + user_message["content"].append(content) + if content["type"] == "image": + images.append(content["image"]) + else: + raise ValueError(f"Unsupported content type: {content['type']}") + elif content["type"] == "text": + user_message["content"].append( + { + "type": "text", + "text": task_data_spec.prompt.format(content["text"]) + if task_data_spec.prompt + else content["text"], + } + ) + else: + # conversation consists of a text-only message + user_message["content"] = task_data_spec.prompt.format(problem) + + images = [resolve_to_image(image) for image in images] + + # get formatted user message + if hasattr(processor, "conversation_preprocessor"): + user_message_for_chat_template = processor.conversation_preprocessor( + user_message + ) + else: + user_message_for_chat_template = user_message + + # this is the string-tokenized conversation template for the generation policy (for vllm) + string_formatted_dialog = processor.apply_chat_template( + [user_message_for_chat_template], + tokenize=False, + add_generation_prompt=True, + ) + + # this is the id-tokenized and image processed conversation template for the policy + message: dict = processor.apply_chat_template( + [user_message], + tokenize=True, + add_generation_prompt=True, + return_tensors="pt", + return_dict=True, + ) + + # add this for backward compatibility + user_message["token_ids"] = message["input_ids"][0] + # add all keys and values to the user message, and the list of keys + multimodal_keys = get_multimodal_keys_from_processor(processor) + for key in multimodal_keys: + if key in message: + user_message[key] = PackedTensor( + message[key], dim_to_pack=get_dim_to_pack_along(processor, key) + ) + + # specifically for gemma, we need to add token_type_ids to the user message as a sequence-type value + if "token_type_ids" in message: + user_message["token_type_ids"] = message["token_type_ids"][0] + + ### append to user message + message_log.append(user_message) + + length = sum(len(m["token_ids"]) for m in message_log) + loss_multiplier = 1.0 + if length > max_seq_length: + # make smaller and mask out + for chat_message in message_log: + chat_message["token_ids"] = chat_message["token_ids"][ + : min(4, max_seq_length // len(message_log)) + ] + loss_multiplier = 0.0 + raise NotImplementedError( + "Sequence length is too long, please use a shorter sequence length" + ) + + output: DatumSpec = { + "message_log": message_log, + "length": length, + "extra_env_info": extra_env_info, + "loss_multiplier": loss_multiplier, + "idx": idx, + "task_name": task_data_spec.task_name, + # get the prompt content! (use this for vllm-backend that needs formatted dialog and list of images) for the entire conversation + # add images for vllm serving + "vllm_content": string_formatted_dialog, + "vllm_images": images, + } + return output + + +def setup_data( + processor: AutoProcessor, + data_config: DataConfig, + env_configs: dict[str, Any], +) -> tuple[ + AllTaskProcessedDataset, + Optional[AllTaskProcessedDataset], + dict[str, EnvironmentInterface], + dict[str, EnvironmentInterface], +]: + """This function will create a TaskSpec, DatumSpec, and connect the two. + + task_spec contains the task name as well as prompt and system prompt modifiers that can be used by data processor + """ + print("\nā–¶ Setting up data...") + # Load CLEVR-CoGenT dataset using nemo rl datasets + # other VLM datasets can be added here + if data_config["dataset_name"] == "clevr-cogent": + data: Any = CLEVRCoGenTDataset( + split=data_config["split"], + ) + elif data_config["dataset_name"] == "refcoco": + data: Any = RefCOCODataset( + split=data_config["split"], + download_dir=data_config["download_dir"], + ) + elif data_config["dataset_name"] == "geometry3k": + data: Any = Geometry3KDataset( + split=data_config["split"], + ) + else: + raise ValueError(f"No processor for dataset {data_config['dataset_name']}.") + + task_name = data.task_name + vlm_task_spec = TaskDataSpec( + task_name=task_name, + prompt_file=data_config["prompt_file"], + system_prompt_file=data_config["system_prompt_file"], + ) + + # add data processor for different tasks + task_data_processors: dict[str, tuple[TaskDataSpec, TaskDataProcessFnCallable]] = ( + defaultdict(lambda: (vlm_task_spec, hf_data_processor)) + ) + task_data_processors[task_name] = (vlm_task_spec, hf_data_processor) + + vlm_env = VLMEnvironment.options( # type: ignore # it's wrapped with ray.remote + runtime_env={ + "py_executable": get_actor_python_env( + "nemo_rl.environments.vlm_environment.VLMEnvironment" + ), + "env_vars": dict(os.environ), # Pass thru all user environment variables + } + ).remote(env_configs[task_name]) + + dataset = AllTaskProcessedDataset( + data.formatted_ds["train"], + processor, + vlm_task_spec, + task_data_processors, + max_seq_length=data_config["max_input_seq_length"], + ) + + val_dataset: Optional[AllTaskProcessedDataset] = None + if data.formatted_ds["validation"]: + val_dataset = AllTaskProcessedDataset( + data.formatted_ds["validation"], + processor, + vlm_task_spec, + task_data_processors, + max_seq_length=data_config["max_input_seq_length"], + ) + else: + val_dataset = None + + task_to_env: dict[str, EnvironmentInterface] = defaultdict(lambda: vlm_env) + task_to_env[task_name] = vlm_env + return dataset, val_dataset, task_to_env, task_to_env + + +def main() -> None: + """Main entry point.""" + args, overrides = parse_args() + + if not args.config: + args.config = os.path.join( + os.path.dirname(__file__), "configs", "vlm_grpo_3B.yaml" + ) + + config = load_config(args.config) + print(f"Loaded configuration from: {args.config}") + + if overrides: + print(f"Overrides: {overrides}") + config = parse_hydra_overrides(config, overrides) + + config: MasterConfig = OmegaConf.to_container(config, resolve=True) + print("Applied CLI overrides") + + # Print config + print("Final config:") + pprint.pprint(config) + + # Get the next experiment directory with incremented ID + config["logger"]["log_dir"] = get_next_experiment_dir(config["logger"]["log_dir"]) + print(f"šŸ“Š Using log directory: {config['logger']['log_dir']}") + if config["checkpointing"]["enabled"]: + print( + f"šŸ“Š Using checkpoint directory: {config['checkpointing']['checkpoint_dir']}" + ) + + init_ray() + + # init processor + processor = get_tokenizer(config["policy"]["tokenizer"], get_processor=True) + tokenizer = processor.tokenizer + + assert config["policy"]["generation"] is not None, ( + "A generation config is required for GRPO" + ) + config["policy"]["generation"] = configure_generation_config( + config["policy"]["generation"], processor.tokenizer + ) + + # setup data + # this function is local to this script, and can be extended to other VLM datasets + ( + dataset, + val_dataset, + task_to_env, + val_task_to_env, + ) = setup_data(processor, config["data"], config["env"]) + + ( + policy, + policy_generation, + cluster, + dataloader, + val_dataloader, + loss_fn, + logger, + checkpointer, + grpo_state, + master_config, + ) = setup(config, tokenizer, dataset, val_dataset, processor=processor) + + grpo_train( + policy, + policy_generation, + dataloader, + val_dataloader, + tokenizer, + loss_fn, + task_to_env, + val_task_to_env, + logger, + checkpointer, + grpo_state, + master_config, + processor, + ) + + +if __name__ == "__main__": + main() diff --git a/examples/run_vlm_sft.py b/examples/run_vlm_sft.py new file mode 100644 index 0000000000..c97be905f0 --- /dev/null +++ b/examples/run_vlm_sft.py @@ -0,0 +1,18 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from run_sft import main as sft_main + +if __name__ == "__main__": + sft_main(is_vlm=True) diff --git a/nemo_rl/algorithms/dpo.py b/nemo_rl/algorithms/dpo.py index 143ad391a5..c00185578f 100644 --- a/nemo_rl/algorithms/dpo.py +++ b/nemo_rl/algorithms/dpo.py @@ -36,7 +36,7 @@ from nemo_rl.utils.checkpoint import CheckpointingConfig, CheckpointManager from nemo_rl.utils.logger import Logger, LoggerConfig from nemo_rl.utils.nsys import maybe_gpu_profile_step -from nemo_rl.utils.timer import Timer +from nemo_rl.utils.timer import TimeoutChecker, Timer class DPOSaveState(TypedDict): @@ -152,7 +152,7 @@ def setup( train_dataloader = StatefulDataLoader( train_dataset, batch_size=policy_config["train_global_batch_size"], - shuffle=True, + shuffle=data_config["shuffle"], collate_fn=partial( preference_collate_fn, tokenizer=tokenizer, @@ -400,6 +400,11 @@ def dpo_train( ) -> None: # Run dpo training timer = Timer() + timeout = TimeoutChecker( + timeout=master_config["checkpointing"]["checkpoint_must_save_by"], + fit_last_save_time=True, + ) + timeout.start_iterations() if dpo_save_state is None: dpo_save_state = _default_dpo_save_state() @@ -454,15 +459,16 @@ def dpo_train( with timer.time("total_step_time"): print("ā–¶ Taking a training step...") - train_results = policy.train( - batch, - loss_fn, - eval_mode=False, - ## NOTE: we double the batch size here because each preference example corresponds to a pair of - ## examples, chosen and rejected, and the pair needs to be processed as part of the same microbatch. - gbs=master_config["policy"]["train_global_batch_size"] * 2, - mbs=master_config["policy"]["train_micro_batch_size"] * 2, - ) + with timer.time("policy_training"): + train_results = policy.train( + batch, + loss_fn, + eval_mode=False, + ## NOTE: we double the batch size here because each preference example corresponds to a pair of + ## examples, chosen and rejected, and the pair needs to be processed as part of the same microbatch. + gbs=master_config["policy"]["train_global_batch_size"] * 2, + mbs=master_config["policy"]["train_micro_batch_size"] * 2, + ) is_last_step = total_steps + 1 >= master_config["dpo"][ "max_num_steps" @@ -494,11 +500,20 @@ def dpo_train( dpo_save_state["consumed_samples"] += master_config["policy"][ "train_global_batch_size" ] - if master_config["checkpointing"]["enabled"] and ( + timeout.mark_iteration() + + should_save_by_step = ( is_last_step or (total_steps + 1) % master_config["checkpointing"]["save_period"] == 0 - ): # +1 because step is 0-indexed + ) + # +1 because step is 0-indexed + # Check if timeout-based checkpointing is enabled in config. + should_save_by_timeout = timeout.check_save() + + if master_config["checkpointing"]["enabled"] and ( + should_save_by_step or should_save_by_timeout + ): dpo_save_state["step"] = (current_step + 1) % len(train_dataloader) dpo_save_state["total_steps"] = total_steps + 1 dpo_save_state["epoch"] = current_epoch @@ -555,6 +570,22 @@ def dpo_train( print("\nšŸ“Š Training Results:") print(f" • Loss: {float(metrics['loss']):.4f}") + if "total_flops" in train_results: + total_tflops = ( + train_results["total_flops"] + / timing_metrics["policy_training"] + / 1e12 + ) + num_ranks = train_results["num_ranks"] + print( + f" • Training FLOPS: {total_tflops:.2f} TFLOPS ({total_tflops / num_ranks:.2f} TFLOPS per rank)" + ) + if "theoretical_tflops" in train_results: + theoretical_tflops = train_results["theoretical_tflops"] + print( + f" • Training Model Floating Point Utilization: {100 * total_tflops / theoretical_tflops:.2f}%" + ) + metrics["train_fp_utilization"] = total_tflops / theoretical_tflops print("\nā±ļø Timing:") # Display total time first, separately total_time = timing_metrics.get("total_step_time", 0) diff --git a/nemo_rl/algorithms/grpo.py b/nemo_rl/algorithms/grpo.py index fceb2173c6..90454bbab9 100644 --- a/nemo_rl/algorithms/grpo.py +++ b/nemo_rl/algorithms/grpo.py @@ -21,6 +21,7 @@ import ray import torch from torchdata.stateful_dataloader import StatefulDataLoader +from transformers import AutoProcessor from transformers.tokenization_utils_base import PreTrainedTokenizerBase from nemo_rl.algorithms.interfaces import LossFunction @@ -29,7 +30,7 @@ ClippedPGLossDataDict, ClippedPGLossFn, ) -from nemo_rl.algorithms.utils import calculate_baseline_and_std_per_prompt +from nemo_rl.algorithms.utils import calculate_baseline_and_std_per_prompt, set_seed from nemo_rl.data import DataConfig from nemo_rl.data.datasets import AllTaskProcessedDataset, rl_collate_fn from nemo_rl.data.interfaces import ( @@ -65,7 +66,7 @@ print_message_log_samples, ) from nemo_rl.utils.nsys import maybe_gpu_profile_step -from nemo_rl.utils.timer import Timer +from nemo_rl.utils.timer import TimeoutChecker, Timer # =============================================================================== # Configuration @@ -84,6 +85,7 @@ class GRPOConfig(TypedDict): val_batch_size: int val_at_start: bool max_val_samples: int + seed: int class GRPOSaveState(TypedDict): @@ -127,6 +129,7 @@ def setup( tokenizer: TokenizerType, dataset: AllTaskProcessedDataset, val_dataset: Optional[AllTaskProcessedDataset], + processor: Optional[AutoProcessor] = None, ) -> tuple[ ColocatablePolicyInterface, Optional[GenerationInterface], @@ -149,6 +152,7 @@ def setup( generation_config = master_config["policy"]["generation"] loss_config = master_config["loss_fn"] grpo_config = master_config["grpo"] + data_config = master_config["data"] logger_config = master_config["logger"] cluster_config = master_config["cluster"] @@ -156,6 +160,9 @@ def setup( "A generation config in the PolicyConfig is required for GRPO" ) + # Set seed for all random number generators + set_seed(grpo_config["seed"]) + # ========================== # Logger # ========================== @@ -179,7 +186,7 @@ def setup( dataloader = StatefulDataLoader( dataset, batch_size=grpo_config["num_prompts_per_step"], - shuffle=False, + shuffle=data_config["shuffle"], collate_fn=rl_collate_fn, drop_last=True, ) @@ -312,6 +319,11 @@ def setup( ) elif backend == "vllm": generation_config = cast(VllmConfig, generation_config) + if generation_config["vllm_cfg"]["precision"] == "fp8": + assert loss_config["use_importance_sampling_correction"] is True, ( + "Importance sampling must be enabled for vLLM FP8 generation for good convergence!" + ) + policy_generation = VllmGeneration( cluster=inference_cluster, config=generation_config ) @@ -333,6 +345,7 @@ def setup( cluster=train_cluster, config=policy_config, tokenizer=tokenizer, + processor=processor, weights_path=weights_path, optimizer_path=optimizer_path, init_optimizer=True, @@ -484,9 +497,16 @@ def grpo_train( checkpointer: CheckpointManager, grpo_save_state: GRPOSaveState, master_config: MasterConfig, + processor: Optional[AutoProcessor] = None, ) -> None: """Run GRPO training algorithm.""" timer = Timer() + timeout = TimeoutChecker( + timeout=master_config["checkpointing"]["checkpoint_must_save_by"], + fit_last_save_time=True, + ) + timeout.start_iterations() + NEED_REFIT = True # If policy_generation is None, use the policy as the generation interface (megatron framework backend) if policy_generation is None: @@ -654,6 +674,8 @@ def grpo_train( "sample_mask": repeated_batch["loss_multiplier"], } ) + # this will be mini-batched inside the policy, so maintain the packed multimodal structure + train_data.update(flat_messages.get_multimodal_dict(as_tensors=False)) train_data.to("cpu") print("ā–¶ Preparing for logprob inference...") @@ -707,10 +729,19 @@ def grpo_train( ## Checkpointing consumed_samples += master_config["grpo"]["num_prompts_per_step"] - if master_config["checkpointing"]["enabled"] and ( + timeout.mark_iteration() + + should_save_by_step = ( is_last_step or (step + 1) % master_config["checkpointing"]["save_period"] == 0 - ): # +1 because step is 0-indexed + ) + # +1 because step is 0-indexed + # Check if timeout-based checkpointing is enabled in config. + should_save_by_timeout = timeout.check_save() + + if master_config["checkpointing"]["enabled"] and ( + should_save_by_step or should_save_by_timeout + ): policy.prepare_for_training() grpo_save_state["step"] = step + 1 @@ -750,7 +781,6 @@ def grpo_train( os.path.join(checkpoint_path, "train_dataloader.pt"), ) checkpointer.finalize_checkpoint(checkpoint_path) - policy.offload_after_refit() # Logging # Log training data @@ -806,6 +836,20 @@ def grpo_train( print( f" • Mean Generation Length: {rollout_metrics['mean_gen_tokens_per_sample']:.4f}" ) + if "total_flops" in train_results: + total_tflops = ( + train_results["total_flops"] / timing_metrics["policy_training"] / 1e12 + ) + num_ranks = train_results["num_ranks"] + print( + f" • Training FLOPS: {total_tflops:.2f} TFLOPS ({total_tflops / num_ranks:.2f} TFLOPS per rank)" + ) + if "theoretical_tflops" in train_results: + theoretical_tflops = train_results["theoretical_tflops"] + print( + f" • Training Model Floating Point Utilization: {100 * total_tflops / theoretical_tflops:.2f}%" + ) + metrics["train_fp_utilization"] = total_tflops / theoretical_tflops print("\nā±ļø Timing:") # Display total time first, separately diff --git a/nemo_rl/algorithms/loss_functions.py b/nemo_rl/algorithms/loss_functions.py index ed778dc392..eb907a61de 100644 --- a/nemo_rl/algorithms/loss_functions.py +++ b/nemo_rl/algorithms/loss_functions.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Optional, TypedDict, TypeVar +from typing import Any, NotRequired, Optional, TypedDict, TypeVar import torch @@ -21,8 +21,8 @@ masked_mean, ) from nemo_rl.distributed.batched_data_dict import BatchedDataDict -from nemo_rl.distributed.model_utils import from_parallel_logits_to_logprobs -from nemo_rl.models.dtensor.parallelize import ( +from nemo_rl.distributed.model_utils import ( + from_parallel_logits_to_logprobs, get_logprobs_from_vocab_parallel_logits, ) @@ -37,6 +37,11 @@ class ClippedPGLossConfig(TypedDict): use_on_policy_kl_approximation: bool use_importance_sampling_correction: bool token_level_loss: bool + # If True, apply the off-policy importance-sampling correction at the + # sequence level (one weight per generated sample), as in GSPO. + # If False (default), correction is applied at the token level as in the + # original GRPO paper. + sequence_level_importance_ratios: NotRequired[bool] class ClippedPGLossDataDict(TypedDict): @@ -60,6 +65,7 @@ class ClippedPGLossFn(LossFunction): - PPO (Clipped) - https://arxiv.org/abs/1707.06347 - GRPO - https://arxiv.org/abs/2402.03300 - REINFORCE/RLOO (set disable_ppo_ratio = True and ignores ratio_clip_min/ratio_clip_max) - https://arxiv.org/abs/2402.14740 + - GSPO (set sequence_level_importance_ratios = True and token_level_loss = False) - https://arxiv.org/abs/2507.18071 Formula: L(Īø) = E_t [ min(r_t(Īø) * A_t, clip(r_t(Īø), 1-ε, 1+ε) * A_t) ] - β * KL(Ļ€_Īø || Ļ€_ref) @@ -101,10 +107,18 @@ def __init__(self, cfg: ClippedPGLossConfig): self.use_importance_sampling_correction = cfg[ "use_importance_sampling_correction" ] - + # Whether to compute importance weights per-sequence instead of per-token. + self.sequence_level_importance_ratios = cfg.get( + "sequence_level_importance_ratios", + False, + ) self.loss_type = ( LossType.TOKEN_LEVEL if cfg["token_level_loss"] else LossType.SEQUENCE_LEVEL ) + if self.sequence_level_importance_ratios: + assert self.loss_type == LossType.SEQUENCE_LEVEL, ( + "sequence-level importance sampling (e.g. GSPO) is mutually exclusive with token-level loss" + ) def __call__( self, @@ -137,8 +151,6 @@ def __call__( global_normalization_factor=global_valid_toks, ).item() - next_token_logits = next_token_logits.to(torch.float32) - if vocab_parallel_group is not None: assert vocab_parallel_rank is not None, ( "vocab_parallel_rank must be provided when vocab_parallel_group is provided" @@ -159,6 +171,7 @@ def __call__( next_token_logits, data["input_ids"], seq_index=seq_index ) else: + next_token_logits = next_token_logits.to(torch.float32) next_token_logits_wo_last = next_token_logits[ :, :-1 ] # Remove last position's logits @@ -205,7 +218,17 @@ def __call__( # Calculate clipped loss function if ppo ratio is enabled. if not self.disable_ppo_ratio: - ratios = (curr_logprobs - prev_logprobs).exp() + log_ratios = curr_logprobs - prev_logprobs + if self.sequence_level_importance_ratios: + seq_log_ratio_mean = masked_mean( + log_ratios, + token_mask, + dim=-1, + ).unsqueeze(-1) + seq_ratio = seq_log_ratio_mean.exp() + ratios = seq_ratio.repeat(1, advantages.shape[1]) + else: + ratios = log_ratios.exp() ratios_clamped = ratios.clamp( 1.0 - self.ratio_clip_min, 1.0 + self.ratio_clip_max ) @@ -229,11 +252,29 @@ def __call__( advantages < 0, torch.min(clip_loss, loss3), clip_loss ) + # ------------------------------------------------------------- + # Off-policy (actor) importance-sampling correction + # ------------------------------------------------------------- # See: docs/guides/grpo.md#importance-sampling-correction - actor_importance_weights = torch.exp(prev_logprobs - generation_logprobs) - actor_importance_weights = torch.nan_to_num( - actor_importance_weights, nan=0.0, posinf=0.0, neginf=0.0 - ) + if self.sequence_level_importance_ratios: + # importance weight w_i = exp(Ī£_t (log Ļ€_actor āˆ’ log Ļ€_behaviour)) + seq_lp_diff = ((prev_logprobs - generation_logprobs) * mask).sum(dim=-1) + actor_importance_weights = torch.exp(seq_lp_diff).detach() + actor_importance_weights = torch.nan_to_num( + actor_importance_weights, nan=0.0, posinf=0.0, neginf=0.0 + ) + # Broadcast to token dimension so we can reuse existing reduction + actor_importance_weights_expanded = actor_importance_weights.unsqueeze(-1) + else: + # Token-level correction + actor_importance_weights_expanded = torch.exp( + prev_logprobs - generation_logprobs + ) + actor_importance_weights_expanded = torch.nan_to_num( + actor_importance_weights_expanded, nan=0.0, posinf=0.0, neginf=0.0 + ) + actor_importance_weights = actor_importance_weights_expanded + del actor_importance_weights_expanded if self.use_importance_sampling_correction: importance_weights_to_use = actor_importance_weights else: @@ -256,12 +297,20 @@ def __call__( global_normalization_factor=global_valid_seqs, ) + # Metric: sampling importance ratio (mean over samples) # See: docs/guides/grpo.md#sampling-importance-ratio - sample_importance_ratio = masked_mean( - actor_importance_weights, - mask, - global_normalization_factor=global_valid_toks, - ) + if self.sequence_level_importance_ratios: + sample_importance_ratio = masked_mean( + actor_importance_weights, + sample_mask, + global_normalization_factor=global_valid_seqs, + ) + else: + sample_importance_ratio = masked_mean( + actor_importance_weights, + mask, + global_normalization_factor=global_valid_toks, + ) # Approximating entropy as E_{s ~ \pi_{gen}(s)}[-(\pi_{curr}/\pi_{gen})log(\pi_{curr}(s))] # See more details and other metrics in docs/guides/grpo.md#metrics @@ -325,8 +374,7 @@ def __call__( token_mask = data["token_mask"][:, 1:] sample_mask = data["sample_mask"] mask = token_mask * sample_mask.unsqueeze(-1) - - next_token_logits = next_token_logits.to(torch.float32) + seq_index = data.get("seq_index", None) # Gather the logprobs for the actual next tokens if vocab_parallel_group is not None: @@ -346,10 +394,11 @@ def __call__( token_logprobs = token_logprobs[:, : data["input_ids"].shape[1] - 1] elif isinstance(next_token_logits, torch.distributed.tensor.DTensor): token_logprobs = get_logprobs_from_vocab_parallel_logits( - next_token_logits, data["input_ids"] + next_token_logits, data["input_ids"], seq_index=seq_index ) else: next_tokens = data["input_ids"][:, 1:].cuda() # Skip first token + next_token_logits = next_token_logits.to(torch.float32) next_token_logprobs = torch.nn.functional.log_softmax( next_token_logits, dim=-1 ) @@ -580,8 +629,8 @@ def _dpo_loss( ## TODO(@ashors): there's some duplicate code here with the NLLLoss function. We should refactor token_mask = data["token_mask"][:, 1:] sample_mask = data["sample_mask"] + seq_index = data.get("seq_index", None) - next_token_logits = next_token_logits.to(torch.float32) if vocab_parallel_group is not None: assert vocab_parallel_rank is not None, ( "vocab_parallel_rank must be provided when vocab_parallel_group is provided" @@ -599,10 +648,11 @@ def _dpo_loss( token_logprobs = token_logprobs[:, : data["input_ids"].shape[1] - 1] elif isinstance(next_token_logits, torch.distributed.tensor.DTensor): token_logprobs = get_logprobs_from_vocab_parallel_logits( - next_token_logits, data["input_ids"] + next_token_logits, data["input_ids"], seq_index=seq_index ) else: next_tokens = data["input_ids"][:, 1:].cuda() # Skip first token + next_token_logits = next_token_logits.to(torch.float32) next_token_logprobs = torch.nn.functional.log_softmax( next_token_logits, dim=-1 ) diff --git a/nemo_rl/algorithms/rm.py b/nemo_rl/algorithms/rm.py index 05b2dcb1ec..5a7610873b 100644 --- a/nemo_rl/algorithms/rm.py +++ b/nemo_rl/algorithms/rm.py @@ -142,7 +142,7 @@ def setup( train_dataloader = StatefulDataLoader( train_dataset, batch_size=policy_config["train_global_batch_size"], - shuffle=True, + shuffle=data_config["shuffle"], collate_fn=partial( preference_collate_fn, tokenizer=tokenizer, diff --git a/nemo_rl/algorithms/sft.py b/nemo_rl/algorithms/sft.py index bb62028a32..adc33d02c4 100644 --- a/nemo_rl/algorithms/sft.py +++ b/nemo_rl/algorithms/sft.py @@ -19,7 +19,7 @@ import numpy as np import torch from torchdata.stateful_dataloader import StatefulDataLoader -from transformers import AutoTokenizer +from transformers import AutoTokenizer, PreTrainedTokenizerBase from nemo_rl.algorithms.loss_functions import ( NLLLoss, @@ -40,7 +40,7 @@ from nemo_rl.utils.checkpoint import CheckpointingConfig, CheckpointManager from nemo_rl.utils.logger import Logger, LoggerConfig from nemo_rl.utils.nsys import maybe_gpu_profile_step -from nemo_rl.utils.timer import Timer +from nemo_rl.utils.timer import TimeoutChecker, Timer class SFTSaveState(TypedDict): @@ -134,7 +134,7 @@ def setup( train_dataloader = StatefulDataLoader( train_dataset, batch_size=policy_config["train_global_batch_size"], - shuffle=True, + shuffle=data_config["shuffle"], collate_fn=rl_collate_fn, drop_last=True, ) @@ -171,10 +171,17 @@ def setup( # Training # ========================== print("\nā–¶ Setting up model...") + # check if tokenizer is a processor (e.g. for VLMs) + processor = None + if not isinstance(tokenizer, PreTrainedTokenizerBase): + processor = tokenizer + tokenizer = processor.tokenizer + policy = Policy( cluster=cluster, config=policy_config, tokenizer=tokenizer, + processor=processor, weights_path=Path(last_checkpoint_path) / "policy" / "weights" if last_checkpoint_path else None, @@ -260,6 +267,9 @@ def validate( } ) + # update multimodal data + val_data.update(cat_and_padded.get_multimodal_dict(as_tensors=False)) + ## just run model fwd val_results = policy.train( val_data, @@ -326,6 +336,11 @@ def sft_train( ) -> None: # Run basic sft training timer = Timer() + timeout = TimeoutChecker( + timeout=master_config["checkpointing"]["checkpoint_must_save_by"], + fit_last_save_time=True, + ) + timeout.start_iterations() if sft_save_state is None: sft_save_state = _default_sft_save_state() @@ -403,9 +418,13 @@ def sft_train( "sample_mask": batch["loss_multiplier"], } ) + train_data.update( + cat_and_padded.get_multimodal_dict(as_tensors=False) + ) print("ā–¶ Taking a training step...") - train_results = policy.train(train_data, loss_fn) + with timer.time("policy_training"): + train_results = policy.train(train_data, loss_fn) is_last_step = total_steps + 1 >= master_config["sft"][ "max_num_steps" @@ -439,12 +458,19 @@ def sft_train( sft_save_state["consumed_samples"] += master_config["policy"][ "train_global_batch_size" ] - if master_config["checkpointing"]["enabled"] and ( + timeout.mark_iteration() + should_save_by_step = ( is_last_step or (total_steps + 1) % master_config["checkpointing"]["save_period"] == 0 + ) + # +1 because step is 0-indexed + # Check if timeout-based checkpointing is enabled in config. + should_save_by_timeout = timeout.check_save() + + if master_config["checkpointing"]["enabled"] and ( + should_save_by_step or should_save_by_timeout ): - ## +1 because step is 0-indexed sft_save_state["step"] = (current_step + 1) % len(train_dataloader) sft_save_state["total_steps"] = total_steps + 1 sft_save_state["epoch"] = current_epoch @@ -502,6 +528,22 @@ def sft_train( print("\nšŸ“Š Training Results:") print(f" • Loss: {float(metrics['loss']):.4f}") + if "total_flops" in train_results: + total_tflops = ( + train_results["total_flops"] + / timing_metrics["policy_training"] + / 1e12 + ) + num_ranks = train_results["num_ranks"] + print( + f" • Training FLOPS: {total_tflops:.2f} TFLOPS ({total_tflops / num_ranks:.2f} TFLOPS per rank)" + ) + if "theoretical_tflops" in train_results: + theoretical_tflops = train_results["theoretical_tflops"] + print( + f" • Training Model Floating Point Utilization: {100 * total_tflops / theoretical_tflops:.2f}%" + ) + metrics["train_fp_utilization"] = total_tflops / theoretical_tflops print("\nā±ļø Timing:") # Display total time first, separately total_time = timing_metrics.get("total_step_time", 0) diff --git a/nemo_rl/algorithms/utils.py b/nemo_rl/algorithms/utils.py index 6d634e3ceb..b4d353be89 100644 --- a/nemo_rl/algorithms/utils.py +++ b/nemo_rl/algorithms/utils.py @@ -18,7 +18,11 @@ import numpy as np import torch -from transformers import AutoTokenizer, PreTrainedTokenizerBase +from transformers import ( + AutoProcessor, + AutoTokenizer, + PreTrainedTokenizerBase, +) from nemo_rl.data import hf_datasets from nemo_rl.models.policy import TokenizerConfig @@ -144,7 +148,9 @@ def set_seed(seed: int) -> None: torch.cuda.manual_seed_all(seed) -def get_tokenizer(tokenizer_config: TokenizerConfig) -> PreTrainedTokenizerBase: +def get_tokenizer( + tokenizer_config: TokenizerConfig, get_processor: bool = False +) -> PreTrainedTokenizerBase: """Get the tokenizer and set pad token to eos token if it is not already set. This function initializes a tokenizer from the Hugging Face transformers library @@ -160,6 +166,7 @@ def get_tokenizer(tokenizer_config: TokenizerConfig) -> PreTrainedTokenizerBase: - "default": Uses the tokenizer's default template - A custom jinja2 template string If not specified, the tokenizer's default template will be used. + get_processor: Whether to return a processor (via AutoProcessor) instead of a tokenizer. Returns: PreTrainedTokenizerBase: The configured tokenizer instance @@ -198,13 +205,38 @@ def get_tokenizer(tokenizer_config: TokenizerConfig) -> PreTrainedTokenizerBase: Using custom chat template >>> formatted = tokenizer.apply_chat_template(messages, tokenize=False) >>> assert formatted == " START: You are a helpful AI assistant. END. START: Hello! END." + + >>> # Requesting a processor (for multimodal models like Qwen-VL) + >>> config = {"name": "Qwen/Qwen2.5-VL-3B-Instruct"} + >>> processor = get_tokenizer(config, get_processor=True) + No chat template provided, using tokenizer's default + >>> messages = [ + ... {"role": "system", "content": "You are a helpful AI assistant."}, + ... {"role": "user", "content": "Hello!"} + ... ] + >>> formatted = processor.tokenizer.apply_chat_template(messages, tokenize=False) + >>> assert formatted == AutoTokenizer.from_pretrained( + ... "Qwen/Qwen2.5-VL-3B-Instruct", trust_remote_code=True + ... ).apply_chat_template(messages, tokenize=False) + >>> assert processor.pad_token_id == processor.tokenizer.pad_token_id + >>> ``` """ - tokenizer = AutoTokenizer.from_pretrained( - tokenizer_config["name"], trust_remote_code=True - ) + processor = None + + if get_processor: + processor = AutoProcessor.from_pretrained( + tokenizer_config["name"], trust_remote_code=True, use_fast=True + ) + tokenizer = processor.tokenizer + else: + tokenizer = AutoTokenizer.from_pretrained( + tokenizer_config["name"], trust_remote_code=True + ) + if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token + if "chat_template" in tokenizer_config: if tokenizer_config["chat_template"] is None: print("Using passthrough chat template") @@ -219,4 +251,17 @@ def get_tokenizer(tokenizer_config: TokenizerConfig) -> PreTrainedTokenizerBase: else: print("No chat template provided, using tokenizer's default") - return tokenizer + # The "tokenizer" is passed to the policy workers only to use the pad/eos/bos tokens for extra padding and processing of the tokenized messages. That is the only reason it is needed. + # However, the dataloader needs the processor for multimodal data preprocessing, so the processor is needed for the dataloader (only tokenizer is NOT enough). + # Inheriting special keys from the tokenizer is a minimal change that doesn't disturb the rest of the SFT pipeline + if processor is not None: + processor.pad_token = tokenizer.pad_token + processor.eos_token = tokenizer.eos_token + processor.bos_token = tokenizer.bos_token + processor.pad_token_id = tokenizer.pad_token_id + processor.eos_token_id = tokenizer.eos_token_id + processor.bos_token_id = tokenizer.bos_token_id + # copy name_or_path from tokenizer to processor for logging + processor.name_or_path = tokenizer.name_or_path + + return tokenizer if processor is None else processor diff --git a/nemo_rl/data/__init__.py b/nemo_rl/data/__init__.py index 9a9ce4b23a..ee0600bf47 100644 --- a/nemo_rl/data/__init__.py +++ b/nemo_rl/data/__init__.py @@ -28,6 +28,9 @@ class DataConfig(TypedDict): add_generation_prompt: NotRequired[bool] add_system_prompt: NotRequired[bool] split: NotRequired[str] + shuffle: NotRequired[bool] + seed: NotRequired[int] + download_dir: NotRequired[str] class MathDataConfig(DataConfig): diff --git a/nemo_rl/data/datasets.py b/nemo_rl/data/datasets.py index 7c1dd32719..172b73ecd0 100644 --- a/nemo_rl/data/datasets.py +++ b/nemo_rl/data/datasets.py @@ -15,7 +15,7 @@ import torch from datasets import Dataset -from transformers import PreTrainedTokenizerBase +from transformers import AutoProcessor, PreTrainedTokenizerBase from nemo_rl.data.interfaces import ( DatumSpec, @@ -29,7 +29,7 @@ ) from nemo_rl.distributed.batched_data_dict import BatchedDataDict -TokenizerType = PreTrainedTokenizerBase +TokenizerType = Union[PreTrainedTokenizerBase, AutoProcessor] # TODO @sahilj handle too-long prompts and masking them out throughout the whole process and renormalizing on loss @@ -150,6 +150,20 @@ def rl_collate_fn(data_batch: list[DatumSpec]) -> BatchedDataDict[Any]: # Extract stop_strings if present stop_strings = [datum.get("stop_strings", None) for datum in data_batch] + # check if any of the data batch has vllm content and images + extra_args = {} + if any( + [datum_spec.get("vllm_content", None) is not None for datum_spec in data_batch] + ): + vllm_content = [ + datum_spec.get("vllm_content", None) for datum_spec in data_batch + ] + vllm_images = [datum_spec.get("vllm_images", []) for datum_spec in data_batch] + vllm_videos = [datum_spec.get("vllm_videos", []) for datum_spec in data_batch] + extra_args["vllm_content"] = vllm_content + extra_args["vllm_images"] = vllm_images + extra_args["vllm_videos"] = vllm_videos + output: BatchedDataDict[Any] = BatchedDataDict( message_log=message_log, length=length, @@ -159,6 +173,7 @@ def rl_collate_fn(data_batch: list[DatumSpec]) -> BatchedDataDict[Any]: idx=idx, batch_max_length=batch_max_length, stop_strings=stop_strings, + **extra_args, ) return output @@ -299,6 +314,7 @@ def assert_no_double_bos(token_ids: torch.Tensor, tokenizer: TokenizerType) -> N and token_ids_list[1] == tokenizer.bos_token_id ), "Found double BOS token in the first two positions of the message." else: + # `name_or_path` is not available for AutoProcessor, temp fix in get_tokenizer print( f"skip assert_start_single_bos since Tokenizer {tokenizer.name_or_path} has no BOS token" ) diff --git a/nemo_rl/data/hf_datasets/__init__.py b/nemo_rl/data/hf_datasets/__init__.py index 107769494f..3c3d7c91fa 100644 --- a/nemo_rl/data/hf_datasets/__init__.py +++ b/nemo_rl/data/hf_datasets/__init__.py @@ -13,6 +13,7 @@ # limitations under the License. from nemo_rl.data.hf_datasets.chat_templates import COMMON_CHAT_TEMPLATES +from nemo_rl.data.hf_datasets.clevr import CLEVRCoGenTDataset from nemo_rl.data.hf_datasets.dpo import DPODataset from nemo_rl.data.hf_datasets.helpsteer3 import HelpSteer3Dataset from nemo_rl.data.hf_datasets.oai_format_dataset import OpenAIFormatDataset @@ -23,6 +24,7 @@ PromptResponseDataset, ) from nemo_rl.data.hf_datasets.squad import SquadDataset +from nemo_rl.data.hf_datasets.tulu3 import Tulu3PreferenceDataset __all__ = [ "DPODataset", @@ -33,5 +35,7 @@ "PreferenceDataset", "PromptResponseDataset", "SquadDataset", + "Tulu3PreferenceDataset", "COMMON_CHAT_TEMPLATES", + "CLEVRCoGenTDataset", ] diff --git a/nemo_rl/data/hf_datasets/clevr.py b/nemo_rl/data/hf_datasets/clevr.py new file mode 100644 index 0000000000..b3281f586f --- /dev/null +++ b/nemo_rl/data/hf_datasets/clevr.py @@ -0,0 +1,141 @@ +## Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 +import io +from typing import Any, Optional + +from datasets import load_dataset +from PIL import Image + +from nemo_rl.data.interfaces import TaskDataSpec + + +def pil_to_base64(image: Image.Image, format: str = "PNG") -> str: + """Converts a PIL Image object to a base64 encoded string. + + Args: + image: The PIL Image object to convert. + format: The image format (e.g., "PNG", "JPEG"). Defaults to "PNG". + + Returns: + A base64 encoded string representation of the image. + """ + buffered = io.BytesIO() + image.save(buffered, format=format) + img_str = base64.b64encode(buffered.getvalue()).decode("utf-8") + return f"data:image/png;base64,{img_str}" + + +def format_answer_fromtags(answer: str) -> str: + """Extract content between tags and strip whitespace.""" + import re + + pattern = r"(.*?)" + match = re.search(pattern, answer) + ret = match.group(1).strip() if match else answer.strip() + return ret + + +def format_clevr_cogent_dataset( + example: dict[str, Any], return_pil: bool = False +) -> dict[str, Any]: + """Format the CLEVR-CoGenT dataset into an OpenAI-API-like message log.""" + user_content = [ + { + "type": "image", + "image": pil_to_base64(example["image"]) + if not return_pil + else example["image"], + }, + { + "type": "text", + "text": str(example["problem"]), + }, + ] + + assistant_content = format_answer_fromtags(str(example["solution"])) + + ret = { + "messages": [ + {"role": "user", "content": user_content}, + { + "role": "assistant", + "content": assistant_content, + }, + ], + "task_name": "clevr-cogent", + } + return ret + + +# contain different variants of the CLEVR dataset +def prepare_clevr_cogent_dataset( + split: str = "trainA", task_name: Optional[str] = None +): + if task_name is None: + task_name = "clevr-cogent" + + if split == "trainA": + tr_dataset = load_dataset("MMInstruction/Clevr_CoGenT_TrainA_70K_Complex")[ + "train" + ] + val_dataset = load_dataset("MMInstruction/Clevr_CoGenT_ValA")["train"] + elif split == "trainB": + tr_dataset = load_dataset("MMInstruction/Clevr_CoGenT_TrainA_70K_Complex")[ + "train" + ] + val_dataset = load_dataset("MMInstruction/Clevr_CoGenT_ValB")["train"] + elif split == "valA": + tr_dataset = load_dataset("MMInstruction/Clevr_CoGenT_ValA")["train"] + val_dataset = load_dataset("MMInstruction/Clevr_CoGenT_ValA")["train"] + elif split == "valB": + tr_dataset = load_dataset("MMInstruction/Clevr_CoGenT_ValB")["train"] + val_dataset = load_dataset("MMInstruction/Clevr_CoGenT_ValB")["train"] + + # format - disable features to avoid schema conflicts + tr_dataset = tr_dataset.add_column("task_name", [task_name] * len(tr_dataset)) + val_dataset = val_dataset.add_column("task_name", [task_name] * len(val_dataset)) + + return { + "train": tr_dataset, + "validation": val_dataset, + } + + +class CLEVRCoGenTDataset: + def __init__( + self, + split: str = "trainA", + prompt_file: Optional[str] = None, + ): + """Simple wrapper around the CLEVR-CoGenT dataset. + + Args: + split: The split of the dataset to use. + prompt_file: The file containing the prompt for the dataset. + """ + if split not in ["trainA", "trainB", "valA", "valB"]: + raise ValueError( + f"Invalid split: {split}. Please use 'trainA', 'trainB', 'valA', or 'valB'." + ) + self.task_name = "clevr-cogent" + + self.formatted_ds = prepare_clevr_cogent_dataset( + split=split, task_name=self.task_name + ) + self.task_spec = TaskDataSpec( + task_name="CLEVR", + prompt_file=prompt_file, + ) diff --git a/nemo_rl/data/hf_datasets/geometry3k.py b/nemo_rl/data/hf_datasets/geometry3k.py new file mode 100644 index 0000000000..b150147798 --- /dev/null +++ b/nemo_rl/data/hf_datasets/geometry3k.py @@ -0,0 +1,101 @@ +## Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Optional + +from datasets import load_dataset + +from nemo_rl.data.hf_datasets.clevr import pil_to_base64 +from nemo_rl.data.interfaces import TaskDataSpec + + +def format_geometry3k_dataset( + example: dict[str, Any], return_pil: bool = False +) -> dict[str, Any]: + """Format the Geometry3K dataset into an OpenAI-API-like message log.""" + # isolate single image + example["image"] = ( + example["images"][0] + if isinstance(example["images"], list) + else example["images"] + ) + + user_content = [ + { + "type": "image", + "image": pil_to_base64(example["image"]) + if not return_pil + else example["image"], + }, + { + "type": "text", + "text": str(example["problem"]).replace("", ""), + }, + ] + + assistant_content = str(example["answer"]) + + ret = { + "messages": [ + {"role": "user", "content": user_content}, + { + "role": "assistant", + "content": assistant_content, + }, + ], + "task_name": "geometry3k", + } + return ret + + +def prepare_geometry3k_dataset(split: str = "train", task_name: str = "geometry3k"): + if split == "train": + tr_dataset = load_dataset("hiyouga/geometry3k")["train"] + val_dataset = load_dataset("hiyouga/geometry3k")["validation"] + else: + tr_dataset = load_dataset("hiyouga/geometry3k")[split] + val_dataset = load_dataset("hiyouga/geometry3k")[split] + + # format - disable features to avoid schema conflicts + tr_dataset = tr_dataset.add_column("task_name", [task_name] * len(tr_dataset)) + val_dataset = val_dataset.add_column("task_name", [task_name] * len(val_dataset)) + return { + "train": tr_dataset, + "validation": val_dataset, + } + + +class Geometry3KDataset: + def __init__( + self, + split: str = "train", + prompt_file: Optional[str] = None, + ): + """Simple wrapper around the Geometry3K dataset. + + Args: + split: The split of the dataset to use. + prompt_file: The file containing the prompt for the dataset. + """ + assert split in ["train", "validation", "test"], ( + f"Invalid split: {split}. Please use 'train' or 'validation' or 'test'." + ) + self.task_name = "geometry3k" + + self.formatted_ds = prepare_geometry3k_dataset( + split=split, task_name=self.task_name + ) + self.task_spec = TaskDataSpec( + task_name="Geometry3K", + prompt_file=prompt_file, + ) diff --git a/nemo_rl/data/hf_datasets/oasst.py b/nemo_rl/data/hf_datasets/oasst.py index a0c19b6909..3ba044e452 100644 --- a/nemo_rl/data/hf_datasets/oasst.py +++ b/nemo_rl/data/hf_datasets/oasst.py @@ -123,8 +123,8 @@ def download_and_process_oasst( class OasstDataset: - def __init__(self, output_dir: str = ".") -> None: - self.formatted_ds = download_and_process_oasst(output_dir) + def __init__(self, output_dir: str = ".", seed: int = 42) -> None: + self.formatted_ds = download_and_process_oasst(output_dir, seed) self.task_spec = TaskDataSpec( task_name="OASST", ) diff --git a/nemo_rl/data/hf_datasets/refcoco.py b/nemo_rl/data/hf_datasets/refcoco.py new file mode 100644 index 0000000000..b5de9f6891 --- /dev/null +++ b/nemo_rl/data/hf_datasets/refcoco.py @@ -0,0 +1,262 @@ +## Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import random +import zipfile +from pathlib import Path +from typing import Any, Optional, Union + +import requests +from datasets import load_dataset +from PIL import Image +from tqdm import tqdm # Using tqdm for progress bar, install with: pip install tqdm + +from nemo_rl.data.hf_datasets.clevr import pil_to_base64 +from nemo_rl.data.interfaces import TaskDataSpec + + +def download_and_unzip(url: str, target_directory: str, subdir_name: str = "."): + """Downloads a zip file from a given URL to a target directory and unzips it into a specified subdirectory within the target directory, showing download progress. + + Args: + url (str): The URL of the zip file to download. + target_directory (str): The directory where the zip file will be downloaded + and unzipped. + subdir_name (str): The name of the subdirectory within the target_directory + where the contents of the zip file will be unzipped. + Defaults to "train". + """ + if not os.path.exists(target_directory): + os.makedirs(target_directory) + print(f"Created target directory: {target_directory}") + + # Extract filename from URL + filename = url.split("/")[-1] + filepath = os.path.join(target_directory, filename) + + # Download the file with progress + if not os.path.exists(filepath): + print(f"Downloading {filename} from {url} to {filepath}...") + try: + with requests.get(url, stream=True) as r: + r.raise_for_status() + total_size_in_bytes = int(r.headers.get("content-length", 0)) + block_size = 8192 # 8 Kibibytes + + # Initialize tqdm progress bar + progress_bar = tqdm( + total=total_size_in_bytes, unit="iB", unit_scale=True + ) + + with open(filepath, "wb") as f: + for chunk in r.iter_content(chunk_size=block_size): + progress_bar.update(len(chunk)) + f.write(chunk) + progress_bar.close() # Close the progress bar + + print(f"Download complete: {filepath}") + except requests.exceptions.RequestException as e: + raise requests.exceptions.RequestException(f"Error downloading file: {e}") + else: + print(f"File {filepath} already exists, skipping download.") + + # Define the unzipping directory + unzip_dir = os.path.join(target_directory, subdir_name) + if not os.path.exists(unzip_dir): + os.makedirs(unzip_dir) + print(f"Created unzip directory: {unzip_dir}") + + # Unzip the file + print(f"Unzipping {filepath} to {unzip_dir}...") + try: + with zipfile.ZipFile(filepath, "r") as zip_ref: + # You can add a progress bar for unzipping as well, but it's more complex + # as zipfile doesn't directly expose progress for extractall. + # For large files, consider iterating through namelist and extracting one by one. + zip_ref.extractall(unzip_dir) + print("Unzipping complete.") + except zipfile.BadZipFile: + raise zipfile.BadZipFile(f"Error: {filepath} is not a valid zip file.") + except Exception as e: + raise Exception(f"Error unzipping file: {e}") + + +def format_refcoco_dataset( + example: dict[str, Any], + width: int = 256, + height: int = 256, + caption_type: str = "random", + prompt_file: Optional[str] = None, +) -> dict[str, Any]: + """Format the RefCOCO dataset from huggingface. + + This should be replaced with our own curated RefCOCO/+/g dataset soon + + Args: + example: The example to format. + width: The width of the resized image. + height: The height of the resized image. + caption_type: The type of caption to use. + """ + split = example["split"] + if "val" in split: + caption_type = "descriptive" + + # resize image for easy image processing across batches + image = Image.open(example["image_path"]) + orig_width, orig_height = image.size + resized_image = image.resize((width, height)) + + # get caption from many types + if caption_type == "random": + caption = random.choice(example["captions"]) + elif caption_type == "first": + caption = example["captions"][0] + elif caption_type == "descriptive": # choose the most descriptive caption + caption = max(example["captions"], key=lambda x: len(x)) + elif caption_type == "brief": # choose the briefest caption + caption = min(example["captions"], key=lambda x: len(x)) + elif caption_type == "all": + caption = " or ".join(example["captions"]) + else: + raise ValueError(f"Invalid caption type: {caption_type}") + + # get normalized bounding box coordinates (top-left, bottom-right) + bbox = example["bbox"] + bbox = [ + bbox[0] / orig_width * 1000, + bbox[1] / orig_height * 1000, + bbox[2] / orig_width * 1000, + bbox[3] / orig_height * 1000, + ] + bbox = [int(round(coord)) for coord in bbox] + solution = f"[{bbox[0]}, {bbox[1]}, {bbox[2]}, {bbox[3]}]" + + user_content = [ + { + "type": "image", + "image": pil_to_base64(resized_image), + }, + { + "type": "text", + "text": f"Please provide the bounding box coordinate of the region described by the following phrase: {caption}", + }, + ] + + ret = { + "messages": [ + {"role": "user", "content": user_content}, + { + "role": "assistant", + "content": solution, + }, + ], + "task_name": "refcoco", + } + return ret + + +# contain different variants of the CLEVR dataset +def prepare_refcoco_dataset( + split: str = "default", + task_name: Optional[str] = None, + path_to_coco_images: Optional[Union[str, Path]] = None, +): + if task_name is None: + task_name = "refcoco" + + tr_dataset = load_dataset("jxu124/refcoco")["train"] + val_dataset = load_dataset("jxu124/refcoco")["validation"] + + # format - disable features to avoid schema conflicts + tr_dataset = tr_dataset.add_column("task_name", [task_name] * len(tr_dataset)) + val_dataset = val_dataset.add_column("task_name", [task_name] * len(val_dataset)) + + if path_to_coco_images is None: + print("No path to coco images provided, downloading images to ./coco_images") + path_to_coco_images = Path("./coco_images") + os.makedirs(path_to_coco_images, exist_ok=True) + else: + path_to_coco_images = Path(path_to_coco_images) + + # check for images + if not os.path.exists(str(path_to_coco_images / "train2014")): + print(f"Downloading train2014 images to {path_to_coco_images}") + download_and_unzip( + "http://images.cocodataset.org/zips/train2014.zip", str(path_to_coco_images) + ) + if not os.path.exists(str(path_to_coco_images / "val2014")): + print(f"Downloading val2014 images to {path_to_coco_images}") + download_and_unzip( + "http://images.cocodataset.org/zips/val2014.zip", str(path_to_coco_images) + ) + + # add image column + tr_dataset = tr_dataset.map( + lambda example: { + **example, + "image_path": str(example["image_path"]).replace( + "coco/", str(path_to_coco_images) + "/" + ) + if "image_path" in example + else example.get("image_path"), + } + ) + val_dataset = val_dataset.map( + lambda example: { + **example, + "image_path": str(example["image_path"]).replace( + "coco/", str(path_to_coco_images) + "/" + ) + if "image_path" in example + else example.get("image_path"), + } + ) + + return { + "train": tr_dataset, + "validation": val_dataset, + } + + +class RefCOCODataset: + def __init__( + self, + split: str = "default", + prompt_file: Optional[str] = None, + download_dir: Optional[str] = None, + ): + """Simple wrapper around the RefCOCO dataset. + + Args: + split: The split of the dataset to use (currently only 'default' is supported) + prompt_file: The file containing the prompt for the dataset. + """ + VALID_SPLITS = ["default"] + if split not in VALID_SPLITS: + raise ValueError( + f"Invalid split: {split}. Please use one of {VALID_SPLITS}." + ) + self.task_name = "refcoco" + + self.formatted_ds = prepare_refcoco_dataset( + split=split, + task_name=self.task_name, + path_to_coco_images=download_dir, + ) + self.task_spec = TaskDataSpec( + task_name="RefCOCO", + prompt_file=prompt_file, + ) diff --git a/nemo_rl/data/hf_datasets/tulu3.py b/nemo_rl/data/hf_datasets/tulu3.py new file mode 100644 index 0000000000..ab3fa62623 --- /dev/null +++ b/nemo_rl/data/hf_datasets/tulu3.py @@ -0,0 +1,67 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +from typing import Any + +from datasets import load_dataset + +from nemo_rl.data.interfaces import TaskDataSpec + + +def format_tulu3_preference(data: dict[str, Any]) -> dict[str, str | dict[str, str]]: + chosen_conversation = data["chosen"] + rejected_conversation = data["rejected"] + + context = chosen_conversation[:-1] + + # We assume that except last assistant response, all messages in + # chosen and rejected conversations are similar. Validating this... + assert json.dumps(context, ensure_ascii=False) == json.dumps( + rejected_conversation[:-1], ensure_ascii=False + ), ( + f"Context mismatch.\n\nchosen: {chosen_conversation}\n\n rejected: {rejected_conversation}" + ) + + # We assume that last response is always from the assistant. Validating this... + assert chosen_conversation[-1]["role"] == "assistant", ( + f"The last chosen response ({chosen_conversation[-1]}) is not from assistant!" + ) + assert rejected_conversation[-1]["role"] == "assistant", ( + f"The last rejected response ({rejected_conversation[-1]}) is not from assistant!" + ) + + chosen_response = chosen_conversation[-1]["content"] + rejected_response = rejected_conversation[-1]["content"] + + return { + "prompt": context, + "chosen_response": chosen_response, + "rejected_response": rejected_response, + } + + +class Tulu3PreferenceDataset: + """Tulu3 preference dataset for DPO training.""" + + def __init__(self) -> None: + ds = load_dataset( + path="allenai/llama-3.1-tulu-3-8b-preference-mixture", + trust_remote_code=True, + ) + self.formatted_ds = ds.map(format_tulu3_preference) + + self.task_spec = TaskDataSpec( + task_name="Tulu3Preference", + ) diff --git a/nemo_rl/data/llm_message_utils.py b/nemo_rl/data/llm_message_utils.py index c9563f1afd..ee3fdb9bbf 100644 --- a/nemo_rl/data/llm_message_utils.py +++ b/nemo_rl/data/llm_message_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import warnings -from typing import Any, Optional, cast +from typing import Any, Optional, Union, cast import torch from datasets import Dataset @@ -23,6 +23,10 @@ LLMMessageLogType, TaskDataSpec, ) +from nemo_rl.data.multimodal_utils import ( + PackedTensor, + get_multimodal_keys_from_processor, +) from nemo_rl.distributed.batched_data_dict import BatchedDataDict Tensor = torch.Tensor @@ -60,6 +64,19 @@ def message_log_to_flat_messages( ['Hello', 'Hi there'] >>> flat_msgs['token_ids'] tensor([1, 2, 3, 4, 5, 6, 7]) + >>> + >>> # Multimodal example: + >>> from nemo_rl.data.multimodal_utils import PackedTensor + >>> img1 = torch.randn(2, 3, 4, 4) + >>> img2 = torch.randn(3, 3, 4, 4) + >>> mm_log = [ + ... {'role': 'user', 'content': 'see', 'token_ids': torch.tensor([1]), 'images': PackedTensor(img1, dim_to_pack=0)}, + ... {'role': 'assistant', 'content': 'ok', 'token_ids': torch.tensor([2, 3]), 'images': PackedTensor(img2, dim_to_pack=0)}, + ... ] + >>> flat_mm = message_log_to_flat_messages(mm_log) + >>> tuple(flat_mm['images'].as_tensor().shape) + (5, 3, 4, 4) + >>> ``` """ result: dict[str, list[Any]] = {} @@ -94,6 +111,14 @@ def message_log_to_flat_messages( f"tensors for {key=} must have same number of dimensions: {[t.shape for t in result[key]]}" ) from e raise + elif result[key] and isinstance(result[key][0], PackedTensor): + try: + concat[key] = PackedTensor.concat(result[key]) + except Exception as e: + raise RuntimeError( + f"Error concatenating packed multimodal data for {key=}" + ) from e + output: FlatMessagesType = {**result, **concat} return output @@ -264,6 +289,26 @@ def batched_message_log_to_flat_message( >>> input_lengths tensor([7, 9], dtype=torch.int32) >>> + >>> # Multimodal example: include images on both conversations and verify packing + >>> from nemo_rl.data.multimodal_utils import PackedTensor + >>> mm_batch = [ + ... [ + ... {'role': 'user', 'content': 'look', 'token_ids': torch.tensor([1, 2, 3]), 'images': PackedTensor(torch.randn(2, 3, 4, 4), dim_to_pack=0)}, + ... {'role': 'assistant', 'content': 'ok', 'token_ids': torch.tensor([4])} + ... ], + ... [ + ... {'role': 'user', 'content': 'again', 'token_ids': torch.tensor([5, 6]), 'images': PackedTensor(torch.randn(1, 3, 4, 4), dim_to_pack=0)}, + ... {'role': 'assistant', 'content': 'fine', 'token_ids': torch.tensor([7, 8])} + ... ] + ... ] + >>> mm_flat, mm_lengths = batched_message_log_to_flat_message(mm_batch, pad_value_dict={'token_ids': 0}) + >>> isinstance(mm_flat['images'], PackedTensor) + True + >>> tuple(mm_flat['images'].as_tensor().shape) # 2 + 1 images + (3, 3, 4, 4) + >>> mm_lengths + tensor([4, 4], dtype=torch.int32) + >>> ``` """ if not message_log_batch: @@ -276,6 +321,7 @@ def batched_message_log_to_flat_message( # Find max length and identify tensor keys max_len = 0 tensor_keys = [] + multimodal_keys = [] for seq in sequenced_lists: for key, value in seq.items(): if isinstance(value, Tensor): @@ -313,6 +359,10 @@ def batched_message_log_to_flat_message( result = BatchedDataDict() for key in all_keys: values = [seq.get(key) for seq in sequenced_lists] + # if the values are PackedTensors, create a new PackedTensor from the list of values + if values and isinstance(values[0], PackedTensor): + result[key] = PackedTensor.flattened_concat(values) + continue if not values or not isinstance(values[0], Tensor): result[key] = values continue @@ -372,6 +422,20 @@ def get_first_index_that_differs(str1: str, str2: str) -> int: return min(len(str1), len(str2)) +def get_images_from_message(message: dict[str, Any]) -> list[Any]: + """Get all images from a message log item.""" + if isinstance(message["content"], str): + return [] + # iterate over the content list + images = [] + for item in message["content"]: + if item["type"] == "image": + images.extend(list(item["image"])) if isinstance( + item["image"], (list, tuple) + ) else images.append(item["image"]) + return images + + def get_formatted_message_log( message_log: LLMMessageLogType, tokenizer: TokenizerType, @@ -399,13 +463,61 @@ def get_formatted_message_log( list[dict[str, str]], message_log ) # we just use the str:str parts here + multimodal_keys = get_multimodal_keys_from_processor(tokenizer) + + def _format_content_helper( + content: Union[str, list[dict[str, Any]]], + ) -> Union[str, list[dict[str, Any]]]: + """This function formats the text portion of the first user message with the task prompt. + + The `content` argument could either be a string (user text prompt) or a dict (user text prompt + multimodal data). + + Examples of `content` argument include strings or dicts from the following conversation turns: + - {"role": "user", "content": "What is the capital of France?"} + - {"role": "user", "content": [{"type": "text", "text": "What is the capital of the city in the image?"}, {"type": "image", "image": "path/to/image.jpg"}]} + - {"role": "user", "content": [{"type": "text", "text": "Does the animal in the image match the sound it makes in the audio?"}, {"type": "image", "image": "path/to/image.jpg"}, {"type": "audio", "audio": "path/to/audio.mp3"}]} + + In all cases, the text portion of the message is formatted with the task prompt. + + Previously, the `content` argument was modified using + >>> message_log_strs = [ + ... { + ... "role": "user", + ... "content": task_data_spec.prompt.format(message_log_strs[0]["content"]), + ... } + ... ] + message_log_strs[1:] + >>> + + which assumes that the first message is a string (not true for multimodal data). This helper function correctly handles all cases. + """ + if isinstance(content, str): + return task_data_spec.prompt.format(content) + # this is a list of dicts, format only the text ones + for item in content: + if item["type"] == "text": + item["text"] = task_data_spec.prompt.format(item["text"]) + return content + + # ignore any system prompts + first_user_msg_id = 0 + for i, msg in enumerate(message_log_strs): + if msg["role"] == "user": + first_user_msg_id = i + break + if task_data_spec.prompt: - message_log_strs = [ - { - "role": "user", - "content": task_data_spec.prompt.format(message_log_strs[0]["content"]), - } - ] + message_log_strs[1:] + message_log_strs = ( + message_log_strs[:first_user_msg_id] + + [ + { + "role": "user", + "content": _format_content_helper( + message_log_strs[first_user_msg_id]["content"] + ), + } + ] + + message_log_strs[first_user_msg_id + 1 :] + ) for i, message in enumerate(message_log_strs): # If enabled, add_generation_prompt is only used on user messages to include @@ -436,28 +548,70 @@ def get_formatted_message_log( message_chunk = tokenizer.bos_token + message_chunk if i == len(message_log_strs) - 1: - message_chunk = message_chunk.rstrip("\n") + r""" + This is an attempt to robustly append the eos token. The origin is Qwen + chat templates always append \n and some models like gemma do not + use the at all in the chat template. Adding a if the is + already at the end, is likely a user error, and since we know Qwen likes to + have \n we'll check for that case. + + This makes the logic slightly more robust to the model family's chat template + so users don't need to know whether they need to add add_eos or not. + """ + stripped_message_chunk = message_chunk.rstrip("\n") if add_eos_token: if tokenizer.eos_token is None: warnings.warn( "add_eos_token is True but the tokenizer does not have an EOS token. Skipping EOS token addition." ) - elif not message_chunk.endswith(tokenizer.eos_token): + elif not stripped_message_chunk.endswith(tokenizer.eos_token): message_chunk += tokenizer.eos_token + # get images too (extend this for other modalities) + images_cur_message = get_images_from_message(message) + new_message = message.copy() - new_message["token_ids"] = tokenizer( - message_chunk, return_tensors="pt", add_special_tokens=False - )["input_ids"][0] + # extend this if statement to check for all(len(modality)) == 0 when adding other modalities + if len(images_cur_message) == 0: + new_message["token_ids"] = tokenizer( + text=message_chunk, return_tensors="pt", add_special_tokens=False + )["input_ids"][0] + else: + # extend the else statement to add other modalities (in this case, tokenizer will be a processor) + processed_chunk = tokenizer( + text=[message_chunk], + images=images_cur_message, + return_tensors="pt", + add_special_tokens=False, + ) + new_message["token_ids"] = processed_chunk["input_ids"][0] + + # add all vlm keys to the message + for key in multimodal_keys: + if key in processed_chunk: + new_message[key] = PackedTensor(processed_chunk[key], dim_to_pack=0) + if len(new_message["token_ids"]) == 0: # if there is an empty message, the empty `token_ids` tensor ends up being in fp32, # which causes `_validate_tensor_consistency` to fail. To fix this, we convert the # empty tensor to int64. new_message["token_ids"] = new_message["token_ids"].to(torch.int64) # type: ignore - new_message["content"] = message_chunk - new_message_log.append(new_message) + # format content correctly + if isinstance(message["content"], str): + new_message["content"] = message_chunk + else: + # format the content list of new message the same way as the original message but replace the text with the new message chunk + new_message["content"] = [] + for item in message["content"]: + if item["type"] == "text": + new_message["content"].append( + {"type": "text", "text": message_chunk} + ) + else: + new_message["content"].append(item) + new_message_log.append(new_message) prev_formatted_message = formatted_message return new_message_log diff --git a/nemo_rl/data/multimodal_utils.py b/nemo_rl/data/multimodal_utils.py new file mode 100644 index 0000000000..74e5a73a8c --- /dev/null +++ b/nemo_rl/data/multimodal_utils.py @@ -0,0 +1,163 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional, Union + +import torch +from transformers import PreTrainedTokenizerBase + + +class PackedTensor: + """Wrapper around a list of torch tensors and a dimension along which to pack the tensors. + + This class is used to wrap a list of tensors along with a `dim_to_pack` parameter. + It can be used for data that can be packed along different dimensions (such as multimodal data). + + `dim_to_pack` is used to specify the dimension along which to pack the tensors. + + The list of tensors can be returned as a single packed tensor by calling `as_tensor` which will concatenate the tensors along the `dim_to_pack` dimension. + """ + + def __init__( + self, tensors: Union[torch.Tensor, list[torch.Tensor]], dim_to_pack: int + ) -> None: + assert tensors is not None, "Input tensors to PackedTensor cannot be None" + + if isinstance(tensors, torch.Tensor): + self.tensors: list[torch.Tensor] = [tensors] + elif isinstance(tensors, list): + assert len(tensors) > 0, ( + "Input tensors to PackedTensor must be a non-empty list" + ) + self.tensors: list[torch.Tensor] = tensors + else: + raise ValueError( + f"Unsupported type for input tensors to PackedTensor: {type(tensors)}" + ) + self.dim_to_pack = dim_to_pack + + def as_tensor(self, device: Optional[torch.device] = None) -> torch.Tensor: + if device is not None: + self.tensors = [item.to(device) for item in self.tensors] + return torch.cat(self.tensors, dim=self.dim_to_pack).to(device) + + def __len__(self) -> int: + # this is the number of tensors in this data wrapper + return len(self.tensors) + + def to(self, device: str | torch.device) -> "PackedTensor": + self.tensors = [item.to(device) for item in self.tensors] + return self + + def slice(self, indices: Union[list[int], torch.Tensor]) -> "PackedTensor": + idx = indices.tolist() if isinstance(indices, torch.Tensor) else indices + tensors = [self.tensors[i] for i in idx] + return PackedTensor(tensors, self.dim_to_pack) + + @classmethod + def concat(cls, from_packed_tensors: list["PackedTensor"]) -> "PackedTensor": + """Concatenate a list of PackedTensor objects into a single PackedTensor. + + The underlying tensors from the PackedTensors are combined into a single list of tensors and used to create a new PackedTensor. + + Each batch must have the same dim_to_pack. + + Example: + ```{doctest} + >>> import torch + >>> from nemo_rl.data.multimodal_utils import PackedTensor + >>> p1 = PackedTensor([torch.tensor([1, 2, 3]), torch.tensor([4, 5, 6])], dim_to_pack=0) + >>> p2 = PackedTensor([torch.tensor([7, 8, 9])], dim_to_pack=0) + >>> p3 = PackedTensor.concat([p1, p2]) + >>> p3.tensors + [tensor([1, 2, 3]), tensor([4, 5, 6]), tensor([7, 8, 9])] + >>> p3.as_tensor() + tensor([1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> + ``` + """ + dim_to_packs = [batch.dim_to_pack for batch in from_packed_tensors] + assert len(set(dim_to_packs)) == 1, ( + "All packed tensors must have the same dim_to_pack" + ) + # concatenate the tensors + tensors = [] + for packed_tensor in from_packed_tensors: + tensors.extend(packed_tensor.tensors) + dim_to_pack = dim_to_packs[0] + return cls(tensors, dim_to_pack) + + @classmethod + def flattened_concat( + cls, from_packed_tensors: list["PackedTensor"] + ) -> "PackedTensor": + """Given a list of PackedTensor objects, flattens each PackedTensor and then concatenates them into a single PackedTensor. + + Each PackedTensor is first flattened by packing along the PackedTensor's `dim_to_pack` dimension. Then, the resulting flattened tensors are used to create a new PackedTensor. + + This is different from `PackedTensor.concat` which simply extends the underlying list of tensors. This is important because the `slice` and `__len__` methods operate on the underlying list of tensors. Note, however, that calling `as_tensor` on the resulting PackedTensor will result in the same tensor as `concat`. + + Each batch must have the same dim_to_pack. + + Example: + ```{doctest} + >>> import torch + >>> from nemo_rl.data.multimodal_utils import PackedTensor + >>> p1 = PackedTensor([torch.tensor([1, 2, 3]), torch.tensor([4, 5, 6])], dim_to_pack=0) + >>> p2 = PackedTensor([torch.tensor([7, 8, 9])], dim_to_pack=0) + >>> p3 = PackedTensor.flattened_concat([p1, p2]) + >>> p3.tensors + [tensor([1, 2, 3, 4, 5, 6]), tensor([7, 8, 9])] + >>> p3.as_tensor() + tensor([1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> + ``` + """ + dim_to_packs = [batch.dim_to_pack for batch in from_packed_tensors] + assert len(set(dim_to_packs)) == 1, ( + "All packed tensors must have the same dim_to_pack" + ) + tensors = [p.as_tensor() for p in from_packed_tensors] + return cls(tensors, from_packed_tensors[0].dim_to_pack) + + +def get_multimodal_keys_from_processor(processor) -> list[str]: + """Get keys of the multimodal data that can be used as model inputs. + + This will be used in the data_processor function to determine which keys to use as model inputs. + """ + if isinstance(processor, PreTrainedTokenizerBase): + return [] + + all_keys = set() + if hasattr(processor, "image_processor"): + all_keys.update(processor.image_processor.model_input_names) + if hasattr(processor, "video_processor"): + all_keys.update(processor.video_processor.model_input_names) + if hasattr(processor, "feature_extractor"): + all_keys.update(processor.feature_extractor.model_input_names) + # all_keys.update(processor.model_input_names) + all_keys.difference_update(set(processor.tokenizer.model_input_names)) + return list(all_keys) + + +def get_dim_to_pack_along(processor, key: str) -> int: + """Special considerations for packing certain keys from certain processors. + + In most cases, the packed items are along dim 0 + """ + if processor.__class__.__name__ == "SmolVLMProcessor": + return 1 + # return zero by default + return 0 diff --git a/nemo_rl/data/processors.py b/nemo_rl/data/processors.py index 67e3658882..0e1c811cf7 100644 --- a/nemo_rl/data/processors.py +++ b/nemo_rl/data/processors.py @@ -64,7 +64,9 @@ def math_data_processor( add_generation_prompt=True, add_special_tokens=False, ) - user_message["token_ids"] = tokenizer(message, return_tensors="pt")["input_ids"][0] + user_message["token_ids"] = tokenizer( + message, return_tensors="pt", add_special_tokens=False + )["input_ids"][0] user_message["content"] = message message_log.append(user_message) diff --git a/nemo_rl/distributed/batched_data_dict.py b/nemo_rl/distributed/batched_data_dict.py index dc75b39cf0..4d4187a46b 100644 --- a/nemo_rl/distributed/batched_data_dict.py +++ b/nemo_rl/distributed/batched_data_dict.py @@ -29,6 +29,9 @@ import torch from typing_extensions import Self +from nemo_rl.data.multimodal_utils import ( + PackedTensor, +) from nemo_rl.data.packing import get_packer from nemo_rl.distributed.collectives import ( gather_jagged_object_lists, @@ -70,6 +73,11 @@ class DynamicBatchingArgs(TypedDict): class BatchedDataDict(UserDict, Generic[DictT]): + # keys that are model specific, but not part of the PackedTensor + ADDITIONAL_OPTIONAL_KEY_TENSORS = [ + "token_type_ids", # specific to gemma3 that tells where the image tokens are in the sequence, not required for llm-only inference/training + ] + def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @@ -77,6 +85,19 @@ def __init__(self, *args, **kwargs): self.micro_batch_lengths = None self.elem_counts_per_gb = None + def get_multimodal_dict( + self, as_tensors: bool = False, device: Optional[torch.device] = None + ) -> dict[str, Any]: + """Return a regular dict of tensors or packed multimodal data items.""" + multimodal_dict = {} + for k, v in self.data.items(): + if isinstance(v, PackedTensor): + multimodal_dict[k] = v.as_tensor(device=device) if as_tensors else v + elif k in self.ADDITIONAL_OPTIONAL_KEY_TENSORS: + multimodal_dict[k] = v + + return multimodal_dict + @classmethod def from_batches( cls: Type[Self], @@ -104,6 +125,8 @@ def from_batches( tensor_or_list: list[Any] | torch.Tensor = [ item for sublist in list_of_tensors for item in sublist ] + elif isinstance(list_of_tensors[0], PackedTensor): + tensor_or_list = PackedTensor.flattened_concat(list_of_tensors) elif all(x.ndim == 1 for x in list_of_tensors): tensor_or_list = torch.cat(list_of_tensors) elif isinstance(list_of_tensors[0], torch.Tensor): @@ -183,6 +206,8 @@ def chunk(self, rank: int, chunks: int) -> "SlicedDataDict": for k in self.data: if torch.is_tensor(self.data[k]): chunked_batch[k] = self.data[k][indices].clone() + elif isinstance(self.data[k], PackedTensor): + chunked_batch[k] = self.data[k].slice(indices) else: chunked_batch[k] = [self.data[k][i] for i in indices] @@ -212,6 +237,8 @@ def reorder_data(self, reorded_indices: list[int]): sorted_v = v.index_select( dim=0, index=torch.IntTensor(reordered_indices) ) + elif isinstance(v, PackedTensor): + sorted_v = v.slice(reordered_indices) else: sorted_v = [v[i] for i in reordered_indices] self.data[k] = sorted_v @@ -315,7 +342,8 @@ def shard_by_batch_size( batch_sizes.add(len(val)) assert len(batch_sizes) == 1, ( - "Batch sizes are not the same across the rollout batch" + "Batch sizes are not the same across the rollout batch, found sizes: " + + f"[{','.join(str(size) for size in batch_sizes)}]" ) total_batch_size = batch_sizes.pop() if batch_size is None: @@ -365,11 +393,13 @@ def shard_by_batch_size( # finally reorder the data along the sorted sequence len indices for k, v in self.data.items(): - sorted_v: torch.Tensor | list[Any] + sorted_v: torch.Tensor | list[Any] | PackedTensor if torch.is_tensor(v): sorted_v = v.index_select( dim=0, index=torch.IntTensor(batch_sorted_indices) ) + elif isinstance(v, PackedTensor): + sorted_v = v.slice(batch_sorted_indices) else: sorted_v = [v[i] for i in batch_sorted_indices] data[k] = sorted_v @@ -505,6 +535,10 @@ def _get_padded_seqlen(seqlen: int) -> int: # First time seeing this key for this shard, initialize it if torch.is_tensor(data[k]): aggregated_shards[shard_idx][k] = data[k][indices].clone() + elif isinstance(data[k], PackedTensor): + aggregated_shards[shard_idx][k] = data[k].slice( + indices.tolist() + ) else: aggregated_shards[shard_idx][k] = [ data[k][i] for i in indices @@ -518,6 +552,13 @@ def _get_padded_seqlen(seqlen: int) -> int: data[k][indices].clone(), ] ) + elif isinstance(data[k], PackedTensor): + aggregated_shards[shard_idx][k] = PackedTensor.concat( + [ + aggregated_shards[shard_idx][k], + data[k].slice(indices.tolist()), + ] + ) else: aggregated_shards[shard_idx][k].extend( [data[k][i] for i in indices] @@ -648,6 +689,10 @@ def slice(self, start: int, end: int) -> "SlicedDataDict": """ sliced_batch = SlicedDataDict() for k in self.data: + if isinstance(self.data[k], PackedTensor): + sliced_batch[k] = self.data[k].slice(list(range(start, end))) + continue + if isinstance(self.data[k], torch.Tensor): assert end <= self.data[k].shape[0], ( f"end: {end} is greater than the shape of the tensor: {self.data[k].shape[0]} for key: {k}" @@ -667,6 +712,10 @@ def repeat_interleave(self, num_repeats: int) -> Self: if torch.is_tensor(v): # For tensors, use repeat_interleave to repeat each element repeated_batch[k] = v.repeat_interleave(num_repeats, dim=0) + elif isinstance(v, PackedTensor): + raise NotImplementedError( + "PackedTensor does not currently support repeat_interleave" + ) else: # For lists or other sequences, use a list comprehension to repeat each element repeated_batch[k] = [ @@ -757,6 +806,8 @@ def to(self, device: str | torch.device) -> Self: for k, v in self.data.items(): if torch.is_tensor(v): self.data[k] = v.to(device) + elif isinstance(v, PackedTensor): + self.data[k] = v.to(device) return self def select_indices(self, indices: Union[list[int], torch.Tensor]) -> Self: @@ -772,6 +823,8 @@ def select_indices(self, indices: Union[list[int], torch.Tensor]) -> Self: for k, v in self.data.items(): if torch.is_tensor(v): selected_batch[k] = v[indices] + elif isinstance(v, PackedTensor): + selected_batch[k] = v.slice(indices) elif isinstance(v, list): selected_batch[k] = [v[i] for i in indices] else: diff --git a/nemo_rl/distributed/model_utils.py b/nemo_rl/distributed/model_utils.py index 5b6a2d57f2..8d915eeaa9 100644 --- a/nemo_rl/distributed/model_utils.py +++ b/nemo_rl/distributed/model_utils.py @@ -77,11 +77,10 @@ def forward( # pyrefly: ignore[bad-override] Always ignore torch.autograd.Func masked_target = target - vocab_start_index masked_target[target_mask] = 0 - log_softmax_output = _compute_distributed_log_softmax( - vocab_parallel_logits, group=group - ) - log_probs = log_softmax_output.clone() - softmax_output = log_softmax_output.exp_() + vocab_parallel_logits = vocab_parallel_logits.to(dtype=torch.float32) + + log_probs = _compute_distributed_log_softmax(vocab_parallel_logits, group=group) + softmax_output = log_probs.exp() log_probs = torch.gather(log_probs, -1, masked_target.unsqueeze(-1)).squeeze(-1) log_probs[target_mask] = 0.0 @@ -141,6 +140,121 @@ def backward( return grad_input, None, None, None, None, None, None +class ChunkedDistributedLogprob(torch.autograd.Function): + """Custom autograd function for computing log probabilities in a distributed setting. + + The log probabilities computation is chunked in the sequence dimension + to mitigate GPU OOM (especially during backward pass). + In addition, logits casting from float16 or bfloat16 -> float32 is performed + inside the chunk loop to avoid materializing a whole float32 logits tensor. + + Adapted from https://github.com/NVIDIA/NeMo-Aligner/blob/9faab404f21994a7eb1d6ed5890b76152b941636/nemo_aligner/utils/distributed.py#L286 + """ + + @staticmethod + def forward( # pyrefly: ignore[bad-override] Always ignore torch.autograd.Function.forward's type since it's always more specific than the base class + ctx: Any, + vocab_parallel_logits: torch.Tensor, + target: torch.Tensor, + vocab_start_index: int, + vocab_end_index: int, + chunk_size: int, + tp_group: torch.distributed.ProcessGroup, + inference_only: bool = False, + ) -> torch.Tensor: + # Create a mask of valid vocab ids (1 means it needs to be masked). + target_mask = (target < vocab_start_index) | (target >= vocab_end_index) + masked_target = target - vocab_start_index + masked_target[target_mask] = 0 + + seq_size = int(vocab_parallel_logits.shape[1]) + num_chunks = (seq_size + chunk_size - 1) // chunk_size + all_log_probs = [] + + for chunk_idx in range(num_chunks): + chunk_start = chunk_idx * chunk_size + chunk_end = min(seq_size, (chunk_idx + 1) * chunk_size) + + logits = vocab_parallel_logits[:, chunk_start:chunk_end, :] + logits = logits.to(dtype=torch.float32) + + log_probs = _compute_distributed_log_softmax( + logits, + group=tp_group, + ) + + log_probs = torch.gather( + log_probs, -1, masked_target[:, chunk_start:chunk_end].unsqueeze(-1) + ).squeeze(-1) + log_probs[target_mask[:, chunk_start:chunk_end]] = 0.0 + + torch.distributed.all_reduce( + log_probs, + op=torch.distributed.ReduceOp.SUM, + group=tp_group, + ) + + all_log_probs.append(log_probs) + + log_probs = torch.cat(all_log_probs, dim=1) + + if not inference_only: + # only save for backward when we have inference only=False + ctx.save_for_backward(vocab_parallel_logits, target_mask, masked_target) + ctx.chunk_size = chunk_size + ctx.tp_group = tp_group + + return log_probs + + @staticmethod + def backward( + ctx: Any, + *grad_outputs: torch.Tensor, + ) -> tuple[torch.Tensor, None, None, None, None, None, None]: + grad_output = grad_outputs[0] + vocab_parallel_logits, target_mask, masked_target = ctx.saved_tensors + chunk_size = ctx.chunk_size + tp_group = ctx.tp_group + + partition_vocab_size = int(vocab_parallel_logits.shape[-1]) + seq_size = int(vocab_parallel_logits.shape[1]) + num_chunks = (seq_size + chunk_size - 1) // chunk_size + + all_grad_input = [] + + for chunk_idx in range(num_chunks): + chunk_start = chunk_idx * chunk_size + chunk_end = min(seq_size, (chunk_idx + 1) * chunk_size) + + logits = vocab_parallel_logits[:, chunk_start:chunk_end, :] + logits = logits.to(dtype=torch.float32) + + softmax_output = _compute_distributed_log_softmax( + logits, + group=tp_group, + ) + softmax_output = softmax_output.exp() + + # 1 if it's the chosen log prob, 0 otherwise + is_chosen = (~(target_mask[:, chunk_start:chunk_end])).unsqueeze( + -1 + ) * torch.nn.functional.one_hot( + masked_target[:, chunk_start:chunk_end], + num_classes=partition_vocab_size, + ) + + grad_input = is_chosen.float().sub_(softmax_output) + + grad_input.mul_(grad_output[:, chunk_start:chunk_end].unsqueeze(dim=-1)) + + all_grad_input.append(grad_input) + + grad_input = torch.cat(all_grad_input, dim=1) + + # if you add an argument to the forward method, then you must add a corresponding None here + return grad_input, None, None, None, None, None, None + + def dtensor_from_parallel_logits_to_logprobs( vocab_parallel_logits: torch.Tensor, target: DTensor | torch.Tensor, @@ -149,6 +263,7 @@ def dtensor_from_parallel_logits_to_logprobs( tp_group: torch.distributed.ProcessGroup, inference_only: bool = False, seq_index: Optional[torch.Tensor] = None, + chunk_size: Optional[int] = None, ) -> torch.Tensor: """Get log probabilities from TP+CP sharded vocab logits. @@ -163,6 +278,7 @@ def dtensor_from_parallel_logits_to_logprobs( inference_only (bool, optional): If True, tensors won't be saved for backward pass. Defaults to False. seq_index (Optional[torch.Tensor]): Sequence index tensor with shape [seq_len]. It is only provided for cp sharded logits. It represents how tensor is sharded across the sequence dimension. + chunk_size (Optional[int]): Sequence dimension chunk size for computing the log probabilities. Returns: torch.Tensor: Log probabilities tensor with shape [batch_size, seq_len-1]. @@ -194,23 +310,34 @@ def dtensor_from_parallel_logits_to_logprobs( else: target = target.roll(shifts=-1, dims=-1) - probs: torch.Tensor = DistributedLogprob.apply( # type: ignore - vocab_parallel_logits, - target, - vocab_start_index, - vocab_end_index, - tp_group, - inference_only, - ).contiguous() + if chunk_size is not None: + logprobs: torch.Tensor = ChunkedDistributedLogprob.apply( # type: ignore + vocab_parallel_logits, + target, + vocab_start_index, + vocab_end_index, + chunk_size, + tp_group, + inference_only, + ).contiguous() + else: + logprobs: torch.Tensor = DistributedLogprob.apply( # type: ignore + vocab_parallel_logits, + target, + vocab_start_index, + vocab_end_index, + tp_group, + inference_only, + ).contiguous() if cp_size > 1: - # probs is sharded on the sequence dimension. + # logprobs is sharded on the sequence dimension. # Get full sequence tensor, vocab dim has been reduced already. - probs_dtensor = DTensor.from_local(probs, cp_mesh, cp_placements) - probs = probs_dtensor.full_tensor()[:, sorted_indices] - assert probs.shape == target_shape + logprobs_dtensor = DTensor.from_local(logprobs, cp_mesh, cp_placements) + logprobs = logprobs_dtensor.full_tensor()[:, sorted_indices] + assert logprobs.shape == target_shape - return probs[:, :-1] + return logprobs[:, :-1] def from_parallel_logits_to_logprobs( @@ -221,6 +348,7 @@ def from_parallel_logits_to_logprobs( tp_group: torch.distributed.ProcessGroup, inference_only: bool = False, cp_group: Optional[torch.distributed.ProcessGroup] = None, + chunk_size: Optional[int] = None, ) -> torch.Tensor: """Get log probabilities from TP+CP sharded vocab logits. @@ -234,6 +362,7 @@ def from_parallel_logits_to_logprobs( tp_group (torch.distributed.ProcessGroup): Process group for distributed communication. inference_only (bool, optional): If True, tensors won't be saved for backward pass. Defaults to False. cp_group (torch.distributed.ProcessGroup, optional): Context parallelism process group. Defaults to None. + chunk_size (int, optional): Sequence dimension chunk size for computing the log probabilities. Returns: torch.Tensor: Log probabilities tensor with shape [batch_size, seq_len-1]. @@ -254,25 +383,36 @@ def from_parallel_logits_to_logprobs( cp_rank = torch.distributed.get_rank(cp_group) target = _get_tokens_on_this_cp_rank(target, cp_rank, cp_size, seq_dim=1) - probs: torch.Tensor = DistributedLogprob.apply( # type: ignore - vocab_parallel_logits, - target, - vocab_start_index, - vocab_end_index, - tp_group, - inference_only, - ).contiguous() + if chunk_size is not None: + logprobs: torch.Tensor = ChunkedDistributedLogprob.apply( # type: ignore + vocab_parallel_logits, + target, + vocab_start_index, + vocab_end_index, + chunk_size, + tp_group, + inference_only, + ).contiguous() + else: + logprobs: torch.Tensor = DistributedLogprob.apply( # type: ignore + vocab_parallel_logits, + target, + vocab_start_index, + vocab_end_index, + tp_group, + inference_only, + ).contiguous() if cp_size > 1: # we need to gather the logits by context parallelism - probs = allgather_cp_sharded_tensor( - probs, cp_group, seq_dim=1 + logprobs = allgather_cp_sharded_tensor( + logprobs, cp_group, seq_dim=1 ) # , unpadded_seqlen=target.shape[1]) if pad_len > 0: - probs = probs[:, :-pad_len] + logprobs = logprobs[:, :-pad_len] - return probs[:, :-1] + return logprobs[:, :-1] def from_parallel_logits_to_logprobs_packed_sequences( @@ -285,6 +425,7 @@ def from_parallel_logits_to_logprobs_packed_sequences( group: torch.distributed.ProcessGroup, inference_only: bool = False, cp_group: Optional[torch.distributed.ProcessGroup] = None, + chunk_size: Optional[int] = None, ) -> torch.Tensor: """Get log probabilities from TP sharded vocab logits for packed sequences. @@ -301,6 +442,7 @@ def from_parallel_logits_to_logprobs_packed_sequences( group (torch.distributed.ProcessGroup): Process group for distributed communication. inference_only (bool, optional): If True, tensors won't be saved for backward pass. Defaults to False. cp_group (torch.distributed.ProcessGroup, optional): Context parallelism process group. Defaults to None. + chunk_size (int, optional): Sequence dimension chunk size for computing the log probabilities. Returns: torch.Tensor: Unpacked log probabilities tensor with shape [batch_size, unpacked_seqlen-1]. @@ -334,14 +476,25 @@ def from_parallel_logits_to_logprobs_packed_sequences( vocab_parallel_logits = vocab_parallel_logits.unsqueeze(0) # Apply distributed log probability computation - probs: torch.Tensor = DistributedLogprob.apply( # type: ignore - vocab_parallel_logits, - rolled_targets, - vocab_start_index, - vocab_end_index, - group, - inference_only, - ).contiguous() + if chunk_size is not None: + probs: torch.Tensor = ChunkedDistributedLogprob.apply( # type: ignore + vocab_parallel_logits, + rolled_targets, + vocab_start_index, + vocab_end_index, + chunk_size, + group, + inference_only, + ).contiguous() + else: + probs: torch.Tensor = DistributedLogprob.apply( # type: ignore + vocab_parallel_logits, + rolled_targets, + vocab_start_index, + vocab_end_index, + group, + inference_only, + ).contiguous() # Remove batch dimension for filtering probs = probs.squeeze(0) @@ -494,3 +647,53 @@ def backward(ctx, grad_output): ) return grad_input, None, None # , None + + +def get_logprobs_from_vocab_parallel_logits( + vocab_parallel_logits: DTensor, + input_ids: torch.Tensor | DTensor, + seq_index: Optional[torch.Tensor] = None, + chunk_size: Optional[int] = None, +): + """Computes log probabilities from vocabulary-parallel logits. + + This function takes logits that are sharded across the vocabulary dimension (tensor parallel) + and computes the log probabilities for the given input IDs. + + Args: + vocab_parallel_logits (DTensor): Logits distributed across tensor parallel workers, + with shape [batch_size, seq_len, vocab_size/tp_size]. + input_ids (torch.Tensor | DTensor): Input token IDs for which to compute log probabilities, + with shape [batch_size, seq_len]. + seq_index (Optional[torch.Tensor]): Sequence index for the input IDs, + with shape [sequence_length]. + chunk_size (Optional[int]): Sequence dimension chunk size for computing log probabilities. + + Returns: + torch.Tensor: Log probabilities for the given input IDs. + """ + device_mesh = vocab_parallel_logits.device_mesh + if seq_index is not None: + assert ( + device_mesh.mesh_dim_names is not None + and "cp" in device_mesh.mesh_dim_names + ), "seq_index must be provided for cp sharded logits" + + tp_size = 1 + + tp_group = device_mesh.get_group("tp") + tp_rank = tp_group.rank() + tp_size = tp_group.size() + + vocab_interval_per_rank = vocab_parallel_logits.shape[-1] // tp_size + + return dtensor_from_parallel_logits_to_logprobs( + vocab_parallel_logits.to_local(), + input_ids, + vocab_interval_per_rank * tp_rank, + (tp_rank + 1) * vocab_interval_per_rank, + tp_group, + inference_only=not torch.is_grad_enabled(), + seq_index=seq_index, + chunk_size=chunk_size, + ) diff --git a/nemo_rl/distributed/ray_actor_environment_registry.py b/nemo_rl/distributed/ray_actor_environment_registry.py index e300aec54b..c51e7ba5fa 100644 --- a/nemo_rl/distributed/ray_actor_environment_registry.py +++ b/nemo_rl/distributed/ray_actor_environment_registry.py @@ -12,15 +12,28 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os + from nemo_rl.distributed.virtual_cluster import PY_EXECUTABLES +USE_SYSTEM_EXECUTABLE = os.environ.get("NEMO_RL_PY_EXECUTABLES_SYSTEM", "0") == "1" +VLLM_EXECUTABLE = ( + PY_EXECUTABLES.SYSTEM if USE_SYSTEM_EXECUTABLE else PY_EXECUTABLES.VLLM +) +MCORE_EXECUTABLE = ( + PY_EXECUTABLES.SYSTEM if USE_SYSTEM_EXECUTABLE else PY_EXECUTABLES.MCORE +) + ACTOR_ENVIRONMENT_REGISTRY: dict[str, str] = { - "nemo_rl.models.generation.vllm.VllmGenerationWorker": PY_EXECUTABLES.VLLM, + "nemo_rl.models.generation.vllm.vllm_worker.VllmGenerationWorker": VLLM_EXECUTABLE, + "nemo_rl.models.generation.vllm.vllm_worker_async.VllmAsyncGenerationWorker": VLLM_EXECUTABLE, # Temporary workaround for the coupled implementation of DTensorPolicyWorker and vLLM. # This will be reverted to PY_EXECUTABLES.BASE once https://github.com/NVIDIA-NeMo/RL/issues/501 is resolved. - "nemo_rl.models.policy.dtensor_policy_worker.DTensorPolicyWorker": PY_EXECUTABLES.VLLM, - "nemo_rl.models.policy.megatron_policy_worker.MegatronPolicyWorker": PY_EXECUTABLES.MCORE, + "nemo_rl.models.policy.dtensor_policy_worker.DTensorPolicyWorker": VLLM_EXECUTABLE, + "nemo_rl.models.policy.dtensor_policy_worker_v2.DTensorPolicyWorkerV2": PY_EXECUTABLES.AUTOMODEL, + "nemo_rl.models.policy.megatron_policy_worker.MegatronPolicyWorker": MCORE_EXECUTABLE, "nemo_rl.environments.math_environment.MathEnvironment": PY_EXECUTABLES.SYSTEM, + "nemo_rl.environments.vlm_environment.VLMEnvironment": PY_EXECUTABLES.SYSTEM, "nemo_rl.environments.code_environment.CodeEnvironment": PY_EXECUTABLES.SYSTEM, "nemo_rl.environments.games.sliding_puzzle.SlidingPuzzleEnv": PY_EXECUTABLES.SYSTEM, "nemo_rl.environments.tools.retriever.RAGEnvironment": PY_EXECUTABLES.SYSTEM, diff --git a/nemo_rl/distributed/virtual_cluster.py b/nemo_rl/distributed/virtual_cluster.py index 6e0a75b880..5d5e0bd7d9 100644 --- a/nemo_rl/distributed/virtual_cluster.py +++ b/nemo_rl/distributed/virtual_cluster.py @@ -48,6 +48,9 @@ class PY_EXECUTABLES: # Use NeMo-RL direct dependencies and vllm. VLLM = "uv run --locked --extra vllm" + # Use NeMo-RL direct dependencies and nemo-automodel. + AUTOMODEL = "uv run --locked --extra automodel" + # Megatron-core (and nemo dependencies) # We always run with --reinstall to avoid issues where someone runs "uv run ... --extra mcore ..." # but the submodules are not downloaded yet. This results in errors where it appears Megatron/Nemo diff --git a/nemo_rl/distributed/worker_group_utils.py b/nemo_rl/distributed/worker_group_utils.py index fe2a9a03be..c51d3b8a7f 100644 --- a/nemo_rl/distributed/worker_group_utils.py +++ b/nemo_rl/distributed/worker_group_utils.py @@ -57,6 +57,7 @@ def get_nsight_config_if_pattern_matches(worker_name: str) -> dict[str, Any]: # Profile will only start/stop when torch.cuda.profiler.start()/stop() is called "capture-range": "cudaProfilerApi", "capture-range-end": "stop", + "cuda-graph-trace": "node", } } diff --git a/nemo_rl/environments/rewards.py b/nemo_rl/environments/rewards.py new file mode 100644 index 0000000000..3372796968 --- /dev/null +++ b/nemo_rl/environments/rewards.py @@ -0,0 +1,173 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import re +from typing import Callable, Optional + +import numpy as np +from math_verify.errors import TimeoutException +from math_verify.metric import math_metric +from math_verify.parser import ExprExtractionConfig, LatexExtractionConfig + +# initialize math_verify_func once +math_verify_func = math_metric( + gold_extraction_target=(LatexExtractionConfig(),), + pred_extraction_target=( + ExprExtractionConfig(), + LatexExtractionConfig(), + ), +) + +boxed = lambda x: "\\boxed{" + x + "}" if not x.startswith("\\boxed{") else x + + +def math_expression_reward( + ground_truth: str, response: str, tag: str = "answer" +) -> tuple[float, bool]: + """Reward the agent when the answer within the <{tag}> tags is the same expression as the ground truth. + + The `tag` is customizable and must be specified as part of the user COT prompt text file. + """ + match = re.search(rf"<{tag}>([\s\S]*)", response) + if match: + answer = match.group(1) + try: + score, _ = math_verify_func([boxed(ground_truth)], [boxed(answer)]) + return float(score), score > 0.1 + except (Exception, TimeoutException) as e: + return 0.0, False + return 0.0, False + + +def format_reward( + ground_truth: str, + response: str, + think_tag: str = "think", + answer_tag: str = "answer", +) -> tuple[float, Optional[bool]]: + """Reward the agent when the response follows the format: (.*) (.*) (.*) . + + The `think_tag` and `answer_tag` are customizable and must be specified as part of the user COT prompt text file. + """ + rew = 0.0 + if re.search(rf"<{think_tag}>[\s\S]*", response): + rew += 0.25 # 0.25 points for having think tags + if re.search(rf"<{answer_tag}>[\s\S]*", response): + rew += 0.75 # 0.75 points for having answer tags + return rew, None + + +def exact_answer_alphanumeric_reward( + ground_truth: str, response: str, answer_tag: str = "answer" +) -> tuple[float, bool]: + """Reward the agent when the answer within the <{answer_tag}> tags is the same as the ground truth (case-insensitive). + + The `answer_tag` is customizable and must be specified as part of the user COT prompt text file. + """ + match = re.search(rf"<{answer_tag}>([\s\S]*)", response) + if match: + answer = match.group(1) + # Remove all non-alphanumeric characters (including whitespace, punctuation, etc.) + answer_clean = "".join(c for c in answer if c.isalnum()).lower() + ground_truth_clean = "".join(c for c in ground_truth if c.isalnum()).lower() + if answer_clean == ground_truth_clean: + return 1.0, True + return 0.0, False + + +def bbox_giou_reward( + ground_truth: str, + response: str, + giou_penalty_thres: float = 10.0, + answer_tag: str = "answer", +) -> tuple[float, bool]: + """Given [x1, y1, x2, y2] normalized bounding box coordinates within the <{answer_tag}> tags, compute the GIoU between the ground truth and the response. + + The `answer_tag` is customizable and must be specified as part of the user COT prompt text file. + """ + match = re.search(rf"<{answer_tag}>([\s\S]*)", response) + if match: + answer = match.group(1) + else: + return 0.0, False + + try: + x1g, y1g, x2g, y2g = [ + float(x) for x in ground_truth.replace("[", "").replace("]", "").split(",") + ] + x1r, y1r, x2r, y2r = [ + float(x) for x in answer.replace("[", "").replace("]", "").split(",") + ] + except ValueError: + return 0.0, False + + # compute iou function + # compute the area of the ground truth and response bounding boxes + area_g = (x2g - x1g) * (y2g - y1g) + area_r = (x2r - x1r) * (y2r - y1r) + # compute the intersection of the ground truth and response bounding boxes + x1i = max(x1g, x1r) + y1i = max(y1g, y1r) + x2i = min(x2g, x2r) + y2i = min(y2g, y2r) + # compute the area of the intersection + area_i = max(0.0, x2i - x1i) * max(0.0, y2i - y1i) + # compute the area of the union + area_u = max(1e-3, area_g + area_r - area_i) + # compute the iou + iou = area_i / area_u + # if iou is too low, introduce a giou term to compensate + if iou < giou_penalty_thres: + # compute convex hull as min + x1c = min(x1g, x1r) + y1c = min(y1g, y1r) + x2c = max(x2g, x2r) + y2c = max(y2g, y2r) + # compute the area of the convex hull + area_c = max(1e-3, (x2c - x1c) * (y2c - y1c)) + # compute the giou + giou = iou - (area_c - area_u) / area_c + else: + giou = iou + return giou, giou > 0.5 + + +def combine_reward_functions( + reward_functions: list[tuple[Callable[[str, str], tuple[float, bool]], float]], +) -> Callable[[str, str], tuple[float, bool]]: + """Returns a callable function that takes (ground_truth, response) and collects multiple reward functions in sequence. + + The reward functions are weighted by the second element of the tuple. + This information can be provided in the YAML config file and resolved in the VLMEnvironment class. + + Args: + reward_functions: list[tuple[Callable[[str, str], tuple[float, bool]], float]]. A list of reward functions and their weights. + + Returns: + Callable[[str, str], tuple[float, bool]]: A callable function that takes (ground_truth, response) and collects multiple reward functions in sequence + """ + weights = [weight for _, weight in reward_functions] + weights = np.array(weights) / np.sum(weights) # renormalize weights to 1 + + def combined_reward_func(ground_truth: str, response: str) -> tuple[float, bool]: + reward_env_score = [ + reward_func(ground_truth, response) for reward_func, _ in reward_functions + ] + rewards = [x[0] for x in reward_env_score] + is_correct = [ + x[1] for x in reward_env_score if x[1] is not None + ] # skip None values, because they do not contribute to the "correctness" of the response (e.g. format_reward, because the answer can still be correct without tags) + is_correct = all(is_correct) + return np.sum(np.array(rewards) * weights), is_correct + + return combined_reward_func diff --git a/nemo_rl/environments/vlm_environment.py b/nemo_rl/environments/vlm_environment.py new file mode 100644 index 0000000000..7e4943c3b2 --- /dev/null +++ b/nemo_rl/environments/vlm_environment.py @@ -0,0 +1,252 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import contextlib +import io +import logging +from functools import partial +from typing import Any, Callable, List, Optional, TypedDict + +import ray +import torch + +from nemo_rl.distributed.batched_data_dict import BatchedDataDict +from nemo_rl.distributed.virtual_cluster import PY_EXECUTABLES +from nemo_rl.environments.interfaces import ( + EnvironmentInterface, + EnvironmentReturn, +) +from nemo_rl.environments.metrics import ( + calculate_pass_rate_per_prompt, +) +from nemo_rl.environments.rewards import ( + bbox_giou_reward, + combine_reward_functions, + exact_answer_alphanumeric_reward, + format_reward, + math_expression_reward, +) +from nemo_rl.environments.utils import chunk_list_to_workers + + +class VLMEnvConfig(TypedDict): + num_workers: int + stop_strings: Optional[list[str]] # Default stop strings for this env + reward_functions: List[dict[str, Any]] # list of reward functions and their weights + + +@contextlib.contextmanager +def _mute_output(): + devnull_out, devnull_err = io.StringIO(), io.StringIO() + with ( + contextlib.redirect_stdout(devnull_out), + contextlib.redirect_stderr(devnull_err), + ): + yield + + +@ray.remote +class VLMVerifyWorker: + def __init__(self, cfg: VLMEnvConfig) -> None: + logging.getLogger("vlm_worker").setLevel(logging.CRITICAL) + # this is a simple reward function that rewards the agent for correct answer and correct format + reward_functions = [] + # loop over all configs + for reward_func_cfg in cfg["reward_functions"]: + # get name and weight + reward_func_name: str = reward_func_cfg["name"] + reward_func_weight: float = reward_func_cfg["weight"] + reward_func_kwargs: Optional[dict] = reward_func_cfg.get("kwargs", None) + reward_func: Callable[[str, str], tuple[float, Optional[bool]]] + if reward_func_name == "format": + reward_func = format_reward + elif reward_func_name == "exact_alnum": + reward_func = exact_answer_alphanumeric_reward + elif reward_func_name == "math_expr": + reward_func = math_expression_reward + elif reward_func_name == "bbox_giou": + reward_func = bbox_giou_reward + else: + raise ValueError(f"Invalid reward function: {reward_func_name}") + + # check for additional kwargs + if reward_func_kwargs is not None: + reward_func = partial(reward_func, **reward_func_kwargs) + + reward_functions.append((reward_func, reward_func_weight)) + + if len(reward_functions) == 0: + raise ValueError("No reward functions provided") + + # combine the reward functions + self.verify_func = combine_reward_functions(reward_functions) + + def verify( + self, pred_responses: list[str], ground_truths: list[str] + ) -> list[float]: + """Verify the correctness of the predicted responses against the ground truth. + + Args: + pred_responses: list[str]. The predicted responses from the LLM. + ground_truths: list[str]. The ground truth responses. + + Returns: + list[float]. The rewards for each predicted response. + """ + results = [] + for response, ground_truth in zip(pred_responses, ground_truths): + try: + with _mute_output(): + try: + ret_score, _ = self.verify_func(ground_truth, response) + except Exception as e: + ret_score = 0.0 + print(f"Error in verify_func: {e}") + results.append(float(ret_score)) + except Exception as e: + print(f"Error in verify: {e}") + results.append(0.0) + return results + + +class VLMEnvironmentMetadata(TypedDict): + ground_truth: str + + +@ray.remote(max_restarts=-1, max_task_retries=-1) +class VLMEnvironment(EnvironmentInterface): + def __init__(self, cfg: VLMEnvConfig): + self.cfg = cfg + self.num_workers = cfg["num_workers"] + self.workers = [ + VLMVerifyWorker.options( # type: ignore # (decorated with @ray.remote) + runtime_env={"py_executable": PY_EXECUTABLES.SYSTEM} + ).remote(cfg) + for _ in range(self.num_workers) + ] + + def shutdown(self) -> None: + # shutdown all workers + for worker in self.workers: + ray.kill(worker) + + def step( # type: ignore[override] + self, + message_log_batch: list[list[dict[str, str]]], + metadata: list[VLMEnvironmentMetadata], + ) -> EnvironmentReturn: + """Runs a step in the vlm environment. + + Args: + message_log: list[list[dict[str, str]]]. A batch of OpenAI-API-like message logs that represent interactions with the VLM. + metadata: list[VLMEnvironmentMetadata]. The grader will use the 'ground_truth' key to evaluate correctness. + + Returns: + EnvironmentReturn: A tuple containing: + - list[dict[str, str]]: Observations/responses batch + - list[dict]: Updated metadata + - list[str]: Next stop strings for the next turn + - Tensor: Rewards tensor + - Tensor: Done flags tensor + """ + # Extract the assistant's responses from the message history + # Each message list should have at least one assistant response + assistant_response_batch = [] + for conversation in message_log_batch: + assistant_responses = [ + interaction["content"] + for interaction in conversation + if interaction["role"] == "assistant" + ] + assistant_response_batch.append("".join(assistant_responses)) + + ground_truths = [g["ground_truth"] for g in metadata] + + chunked_assistant_response_batch = chunk_list_to_workers( + assistant_response_batch, self.num_workers + ) + chunked_ground_truths = chunk_list_to_workers(ground_truths, self.num_workers) + + # # Process each chunk in parallel + futures = [ + self.workers[i].verify.remote(chunk, ground_truth_chunk) + for i, (chunk, ground_truth_chunk) in enumerate( + zip(chunked_assistant_response_batch, chunked_ground_truths) + ) + ] + + results = ray.get(futures) + + # flatten the results + results = [item for sublist in results for item in sublist] + observations = [ + { + "role": "environment", + "content": "Environment: correct" + if result + else "Environment: incorrect", + } + for result in results + ] + + # create a tensor of rewards and done flags + rewards = torch.tensor(results).cpu() + done = torch.ones_like(rewards).cpu() + + next_stop_strings = [None] * len(message_log_batch) + + return EnvironmentReturn( + observations=observations, + metadata=metadata, + next_stop_strings=next_stop_strings, + rewards=rewards, + terminateds=done, + answers=None, + ) + + def global_post_process_and_metrics( + self, batch: BatchedDataDict[Any] + ) -> tuple[BatchedDataDict[Any], dict[str, float | int]]: + """Computes metrics for this environment given a global rollout batch. + + Every rank will run this function, so you're free to use distributed + calculations if you'd prefer for heavy metrics. + """ + batch["rewards"] = ( + batch["rewards"] * batch["is_end"] + ) # set a reward of 0 for any incorrectly ended sequences + if (batch["rewards"] == 1).float().sum() > 0: + correct_solution_generation_lengths = ( + (batch["generation_lengths"] - batch["prompt_lengths"])[ + batch["rewards"] == 1 + ] + .float() + .mean() + .item() + ) + else: + correct_solution_generation_lengths = 0 + + metrics = { + "accuracy": batch["rewards"].mean().item(), + "pass@samples_per_prompt": calculate_pass_rate_per_prompt( + batch["text"], batch["rewards"] + ), + "fraction_of_samples_properly_ended": batch["is_end"].float().mean().item(), + "num_problems_in_batch": batch["is_end"].shape[0], + "generation_lengths": batch["generation_lengths"].float().mean().item(), + "prompt_lengths": batch["prompt_lengths"].float().mean().item(), + "correct_solution_generation_lengths": correct_solution_generation_lengths, + } + + return batch, metrics diff --git a/nemo_rl/evals/eval.py b/nemo_rl/evals/eval.py index 9f5be0dbf7..32881d7706 100644 --- a/nemo_rl/evals/eval.py +++ b/nemo_rl/evals/eval.py @@ -427,7 +427,7 @@ def _save_evaluation_data_to_json(evaluation_data, master_config, save_path): "model_name": master_config["generation"]["model_name"], "dataset_name": master_config["data"]["dataset_name"], "metric": master_config["eval"]["metric"], - "pass_k_value": master_config["eval"]["pass_k_value"], + "k_value": master_config["eval"]["k_value"], "num_tests_per_prompt": master_config["eval"]["num_tests_per_prompt"], "temperature": master_config["generation"]["temperature"], "top_p": master_config["generation"]["top_p"], @@ -485,6 +485,7 @@ def _print_results( dataset_name = os.path.basename(master_config["data"]["dataset_name"]) model_name = os.path.basename(generation_config["model_name"]) max_new_tokens = generation_config["vllm_cfg"]["max_model_len"] + seed = master_config["eval"]["seed"] temperature = generation_config["temperature"] top_p = generation_config["top_p"] top_k = generation_config["top_k"] @@ -492,7 +493,7 @@ def _print_results( print("\n" + "=" * 60) print(f"{model_name=} {dataset_name=}") - print(f"{max_new_tokens=} {temperature=} {top_p=} {top_k=}\n") + print(f"{max_new_tokens=} {temperature=} {top_p=} {top_k=} {seed=}\n") print(f"metric={metric[:-1]}{k_value} {num_tests_per_prompt=}\n") print(f"score={average_score:.4f} ({score}/{dataset_size})") print("=" * 60 + "\n") diff --git a/nemo_rl/experience/rollouts.py b/nemo_rl/experience/rollouts.py index ed8fa5b890..e0534d38f1 100644 --- a/nemo_rl/experience/rollouts.py +++ b/nemo_rl/experience/rollouts.py @@ -380,6 +380,7 @@ def run_multi_turn_rollout( # Extract input_ids and lengths from the flat messages active_input_ids = active_flat_messages["token_ids"] + # Prepare generation input data generation_input_data = BatchedDataDict[GenerationDatumSpec]( { "input_ids": active_input_ids, @@ -387,6 +388,17 @@ def run_multi_turn_rollout( "stop_strings": active_stop_strings, } ) + # add the multimodal data to the generation input data + multimodal_data = active_flat_messages.get_multimodal_dict(as_tensors=False) + generation_input_data.update(multimodal_data) + + # keep message log for generation + if "vllm_content" in active_batch: + generation_input_data["vllm_content"] = active_batch["vllm_content"] + if "vllm_images" in active_batch: + generation_input_data["vllm_images"] = active_batch["vllm_images"] + if "vllm_videos" in active_batch: + generation_input_data["vllm_videos"] = active_batch["vllm_videos"] # generate_responses updates active_batch["message_log"] in-place active_batch, generated_ids, gen_metrics = generate_responses( diff --git a/nemo_rl/models/dtensor/parallelize.py b/nemo_rl/models/dtensor/parallelize.py index e2af748d71..e5469e3e04 100644 --- a/nemo_rl/models/dtensor/parallelize.py +++ b/nemo_rl/models/dtensor/parallelize.py @@ -31,8 +31,6 @@ from torch.distributed.tensor.parallel import ( ColwiseParallel, ParallelStyle, - PrepareModuleInput, - PrepareModuleOutput, RowwiseParallel, SequenceParallel, parallelize_module, @@ -44,10 +42,30 @@ Gemma3ForConditionalGeneration, ) from transformers.models.llama.modeling_llama import LlamaForCausalLM +from transformers.models.llama4.modeling_llama4 import Llama4ForConditionalGeneration +from transformers.models.llava.modeling_llava import LlavaForConditionalGeneration +from transformers.models.llava_next.modeling_llava_next import ( + LlavaNextForConditionalGeneration, +) +from transformers.models.llava_next_video.modeling_llava_next_video import ( + LlavaNextVideoForConditionalGeneration, +) +from transformers.models.llava_onevision.modeling_llava_onevision import ( + LlavaOnevisionForConditionalGeneration, +) +from transformers.models.mistral3.modeling_mistral3 import ( + Mistral3ForConditionalGeneration, +) from transformers.models.qwen2.modeling_qwen2 import Qwen2ForCausalLM +from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import ( + Qwen2_5_VLForConditionalGeneration, +) +from transformers.models.qwen2_vl.modeling_qwen2_vl import ( + Qwen2VLForConditionalGeneration, +) from transformers.models.qwen3.modeling_qwen3 import Qwen3ForCausalLM +from transformers.models.smolvlm.modeling_smolvlm import SmolVLMForConditionalGeneration -from nemo_rl.distributed.model_utils import dtensor_from_parallel_logits_to_logprobs from nemo_rl.models.policy.utils import import_class_from_path @@ -93,18 +111,14 @@ def _parallelize_gemma3( model: Union[Gemma3ForCausalLM, Gemma3ForConditionalGeneration], sequence_parallel: bool = False, ) -> dict[str, ParallelStyle]: - """Parallelizes a Gemma3ForCausalLM model across data parallel dimensions. - - Tensor parallelism is not supported for Gemma3 models because of tied word embeddings. - """ + """Parallelizes a Gemma3ForCausalLM model across data and tensor parallel dimensions.""" if isinstance(model, Gemma3ForConditionalGeneration): model_prefix = "model.language_model" else: model_prefix = "model" - # For gemma3 models, we don't include the model.embed_tokens and lm_head in the - # parallelization plans because they have tied weights. base_model_tp_plan: dict[str, ParallelStyle] = { + f"{model_prefix}.embed_tokens": RowwiseParallel(input_layouts=Replicate()), f"{model_prefix}.layers.*.self_attn.q_proj": ColwiseParallel(), f"{model_prefix}.layers.*.self_attn.k_proj": ColwiseParallel(), f"{model_prefix}.layers.*.self_attn.v_proj": ColwiseParallel(), @@ -112,13 +126,12 @@ def _parallelize_gemma3( f"{model_prefix}.layers.*.mlp.up_proj": ColwiseParallel(), f"{model_prefix}.layers.*.mlp.gate_proj": ColwiseParallel(), f"{model_prefix}.layers.*.mlp.down_proj": RowwiseParallel(), + "lm_head": ColwiseParallel(output_layouts=Shard(-1), use_local_output=False), } base_model_sp_plan = { - f"{model_prefix}.embed_tokens": PrepareModuleOutput( - output_layouts=Replicate(), - desired_output_layouts=Shard(1), - use_local_output=False, + f"{model_prefix}.embed_tokens": RowwiseParallel( + input_layouts=Replicate(), output_layouts=Shard(1) ), f"{model_prefix}.rotary_emb": RotaryEmbedParallel(use_local_output=True), f"{model_prefix}.rotary_emb_local": RotaryEmbedParallel(use_local_output=True), @@ -133,10 +146,8 @@ def _parallelize_gemma3( ), f"{model_prefix}.layers.*.post_feedforward_layernorm": SequenceParallel(), f"{model_prefix}.norm": SequenceParallel(), - "lm_head": PrepareModuleInput( - input_layouts=(Shard(1),), - desired_input_layouts=(Replicate(),), - use_local_output=True, + "lm_head": ColwiseParallel( + input_layouts=Shard(1), output_layouts=Shard(-1), use_local_output=False ), } @@ -312,12 +323,45 @@ def get_hf_tp_plan(model: PreTrainedModel): AssertionError: If no TP plan is found """ model_cls = type(model) - if model_cls == Gemma3ForConditionalGeneration: + + # Handle VL models structure + if model_cls in [ + Qwen2VLForConditionalGeneration, + Qwen2_5_VLForConditionalGeneration, + ]: + inner_model = model.model.language_model + model_prefix = "model.language_model" + config = model.model.language_model.config + + elif model_cls == Gemma3ForConditionalGeneration: inner_model = model.language_model model_prefix = "language_model" + config = model.config.text_config + + elif model_cls == Llama4ForConditionalGeneration: + inner_model = model.language_model.model + model_prefix = "language_model.model" + config = model.language_model.model.config + + elif model_cls in [ + LlavaForConditionalGeneration, + LlavaNextForConditionalGeneration, + LlavaNextVideoForConditionalGeneration, + LlavaOnevisionForConditionalGeneration, + ]: + inner_model = model.model.language_model + model_prefix = "model.language_model" + config = model.model.language_model.config + + elif model_cls == Mistral3ForConditionalGeneration: + inner_model = model.model.language_model + model_prefix = "model.language_model" + config = model.model.language_model.config + else: inner_model = model.model model_prefix = "model" + config = model.config hf_tp_plan = {} @@ -342,19 +386,12 @@ def get_hf_tp_plan(model: PreTrainedModel): ) # hf tp plan not contain embed_tokens, we add it and set to rowwise_rep - if ( - f"{model_prefix}.embed_tokens" not in hf_tp_plan - and not model.config.tie_word_embeddings - ): + if f"{model_prefix}.embed_tokens" not in hf_tp_plan: hf_tp_plan[f"{model_prefix}.embed_tokens"] = "rowwise_rep" for k, v in hf_tp_plan.items(): # speed up the tp plan for lm_head - if ( - k == "lm_head" - and v == "colwise_rep" - and not model.config.tie_word_embeddings - ): + if (k == "lm_head" or k == "language_model.lm_head") and v == "colwise_rep": hf_tp_plan[k] = ColwiseParallel( output_layouts=Shard(-1), use_local_output=False ) @@ -364,9 +401,80 @@ def get_hf_tp_plan(model: PreTrainedModel): return hf_tp_plan +def _parallelize_nm5_h( + model, + dp_mesh: DeviceMesh, + tp_mesh: DeviceMesh, + param_dtype: torch.dtype, + sequence_parallel: bool = False, + activation_checkpointing: bool = False, + cpu_offload: bool = False, + custom_parallel_plan: Optional[Union[dict, str]] = None, +) -> torch.distributed.fsdp.FSDPModule: + """Parallelize a NemotronHForCausalLM model across data and tensor parallel dimensions.""" + assert not sequence_parallel, ( + "Sequence parallelism is not supported for NemotronHForCausalLM" + ) + assert custom_parallel_plan is None, ( + "Custom parallel plan is not supported for NemotronHForCausalLM" + ) + + model_tp_plan: dict[str, ParallelStyle] = { + "lm_head": ColwiseParallel(output_layouts=Shard(-1), use_local_output=False), + } + + mlp_tp_plan: dict[str, ParallelStyle] = { + "mixer.up_proj": ColwiseParallel(), + "mixer.down_proj": RowwiseParallel(), + } + + layers: torch.nn.ModuleList = model.backbone.layers + parallelize_module(model, tp_mesh, model_tp_plan) + + for layer in model.backbone.layers: + if layer.block_type == "mlp": + parallelize_module(layer, tp_mesh, mlp_tp_plan) + + if activation_checkpointing: + for i in range(len(layers)): + if layers[i].block_type == "mlp": + layers[i] = checkpoint_wrapper(layers[i]) + + if layers[i].block_type == "mamba": + layers[i] = checkpoint_wrapper(layers[i]) + + mp_policy = MixedPrecisionPolicy( + param_dtype=param_dtype, + reduce_dtype=torch.float32, + output_dtype=torch.float32, + ) + + offload_policy = ( + CPUOffloadPolicy(pin_memory=False) + if cpu_offload + else torch.distributed.fsdp.OffloadPolicy + ) + + for layer in layers: + fully_shard( + layer, mesh=dp_mesh, mp_policy=mp_policy, offload_policy=offload_policy + ) + + # do not reshard after forward for root model + # because its parameters will be used in backward immediately + return fully_shard( + model, + mesh=dp_mesh, + mp_policy=mp_policy, + offload_policy=offload_policy, + reshard_after_forward=False, + ) + + def _parallelize_model( model: Union[ Qwen2ForCausalLM, + Qwen3ForCausalLM, LlamaForCausalLM, Gemma3ForCausalLM, Gemma3ForConditionalGeneration, @@ -401,11 +509,93 @@ def _parallelize_model( ValueError: If the model type is not supported for parallelization. """ model_cls = type(model) + + # Handle different model structures if model_cls == Gemma3ForConditionalGeneration: + # layers: torch.nn.ModuleList = model.language_model.layers # type: ignore + layers: list = [] + for layer in model.language_model.layers: + layers.append(layer) + # siglip encoder also has the same structure as clip encoder (being the same model after all) + for layer in model.vision_tower.vision_model.encoder.layers: + layers.append(layer) layers: torch.nn.ModuleList = model.language_model.layers # type: ignore num_attention_heads = model.config.text_config.num_attention_heads num_key_value_heads = model.config.text_config.num_key_value_heads + + elif model_cls.__name__ == "NemotronHForCausalLM": + # need to do something special for nm5, since it's harder to shard the mamba layers + # nm5 is not importable, so we check the __name__ attribute + return _parallelize_nm5_h( + model, + dp_mesh, + tp_mesh, + param_dtype, + sequence_parallel, + activation_checkpointing, + cpu_offload, + custom_parallel_plan, + ) + + elif model_cls in [ + Qwen2_5_VLForConditionalGeneration, + Qwen2VLForConditionalGeneration, + ]: + # VL models have the language model at model.language_model + layers: list = [] + # append language model layers + for layer in model.language_model.layers: + layers.append(layer) + # append visual model layers + for layer in model.visual.blocks: + layers.append(layer) + + num_attention_heads = model.language_model.config.num_attention_heads + num_key_value_heads = model.language_model.config.num_key_value_heads + + elif model_cls == SmolVLMForConditionalGeneration: + layers: list = [] + for layer in model.model.text_model.layers: + layers.append(layer) + for layer in model.model.vision_model.encoder.layers: + layers.append(layer) + num_attention_heads = model.model.text_model.config.num_attention_heads + num_key_value_heads = model.model.text_model.config.num_key_value_heads + + elif model_cls in [ + LlavaForConditionalGeneration, + LlavaNextForConditionalGeneration, + LlavaNextVideoForConditionalGeneration, + LlavaOnevisionForConditionalGeneration, + ]: + layers: list = [] + for layer in model.model.language_model.layers: + layers.append(layer) + for layer in model.vision_tower.vision_model.encoder.layers: + layers.append(layer) + num_attention_heads = model.language_model.config.num_attention_heads + num_key_value_heads = model.language_model.config.num_key_value_heads + + elif model_cls == Mistral3ForConditionalGeneration: + layers: list = [] + for layer in model.model.language_model.layers: + layers.append(layer) + for layer in model.model.vision_tower.transformer.layers: + layers.append(layer) + num_attention_heads = model.model.language_model.config.num_attention_heads + num_key_value_heads = model.model.language_model.config.num_key_value_heads + + elif model_cls == Llama4ForConditionalGeneration: + layers: list = [] + for layer in model.language_model.model.layers: + layers.append(layer) + for layer in model.vision_model.model.layers: + layers.append(layer) + num_attention_heads = model.language_model.model.config.num_attention_heads + num_key_value_heads = model.language_model.model.config.num_key_value_heads + else: + # this is the default case for all other models (assumed to be a causal LM) layers: torch.nn.ModuleList = model.model.layers # type: ignore num_attention_heads = model.config.num_attention_heads num_key_value_heads = model.config.num_key_value_heads @@ -471,6 +661,25 @@ def _parallelize_model( for i in range(len(layers)): layers[i].mlp = checkpoint_wrapper(layers[i].mlp) # type: ignore + """ + the extra memory overhead for layer norm seems to be only present + in mistral models, where some intermediate state is converted to float32 + + need to find a better solution for checkpointing + """ + if hasattr(layers[i], "self_attn"): + layers[i].self_attn = checkpoint_wrapper(layers[i].self_attn) # type: ignore + + if hasattr(layers[i], "input_layernorm"): + layers[i].input_layernorm = checkpoint_wrapper( + layers[i].input_layernorm # type: ignore + ) + + if hasattr(layers[i], "post_attention_layernorm"): + layers[i].post_attention_layernorm = checkpoint_wrapper( + layers[i].post_attention_layernorm # type: ignore + ) + mp_policy = MixedPrecisionPolicy( param_dtype=param_dtype, reduce_dtype=torch.float32, @@ -614,50 +823,3 @@ def get_grad_norm( total_norm = total_norm.item() ** (1.0 / norm_type) # type: ignore return total_norm - - -def get_logprobs_from_vocab_parallel_logits( - vocab_parallel_logits: DTensor, - input_ids: torch.Tensor | DTensor, - seq_index: Optional[torch.Tensor] = None, -): - """Computes log probabilities from vocabulary-parallel logits. - - This function takes logits that are sharded across the vocabulary dimension (tensor parallel) - and computes the log probabilities for the given input IDs. - - Args: - vocab_parallel_logits (DTensor): Logits distributed across tensor parallel workers, - with shape [batch_size, seq_len, vocab_size/tp_size]. - input_ids (torch.Tensor | DTensor): Input token IDs for which to compute log probabilities, - with shape [batch_size, seq_len]. - seq_index (Optional[torch.Tensor]): Sequence index for the input IDs, - with shape [sequence_length]. - - Returns: - torch.Tensor: Log probabilities for the given input IDs. - """ - device_mesh = vocab_parallel_logits.device_mesh - if seq_index is not None: - assert ( - device_mesh.mesh_dim_names is not None - and "cp" in device_mesh.mesh_dim_names - ), "seq_index must be provided for cp sharded logits" - - tp_size = 1 - - tp_group = device_mesh.get_group("tp") - tp_rank = tp_group.rank() - tp_size = tp_group.size() - - vocab_interval_per_rank = vocab_parallel_logits.shape[-1] // tp_size - - return dtensor_from_parallel_logits_to_logprobs( - vocab_parallel_logits.to_local(), - input_ids, - vocab_interval_per_rank * tp_rank, - (tp_rank + 1) * vocab_interval_per_rank, - tp_group, - inference_only=not torch.is_grad_enabled(), - seq_index=seq_index, - ) diff --git a/nemo_rl/models/generation/fp8.py b/nemo_rl/models/generation/fp8.py new file mode 100644 index 0000000000..cb983617d6 --- /dev/null +++ b/nemo_rl/models/generation/fp8.py @@ -0,0 +1,574 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from dataclasses import dataclass, field +from unittest.mock import patch + +import ray +import torch +from accelerate import init_empty_weights +from transformers import AutoConfig, AutoModel +from vllm.model_executor.layers.linear import LinearBase +from vllm.triton_utils import tl, triton +from vllm.v1.engine.core import EngineCoreProc +from vllm.v1.engine.utils import CoreEngineProcManager + +FP8_BLOCK_QUANT_KWARGS = { + "activation_scheme": "dynamic", + "fmt": "e4m3", + "quant_method": "fp8", + "weight_block_size": [128, 128], +} + + +@dataclass(frozen=True) +class FP8Config: + use_weight_pow2_scale: bool = False + use_activation_pow2_scale: bool = False + num_first_layers_in_bf16: int = 0 + num_last_layers_in_bf16: int = 0 + model_parallel_size: int = None + + +@dataclass() +class FP8State: + # A cache of fp8 parameter names, we can check this cache to see if a + # param name corresponds to a fp8 weight + seen_params: set = field(default_factory=lambda: set()) + fp8_param_names: set = field(default_factory=lambda: set()) + vllm_patches: list = field(default_factory=lambda: []) + + +# Global FP8 config that can be accessed by patched vLLM functions +# initialized by 'init_fp8_cfg()' +global_fp8_config: FP8Config = None +# Global FP8 state that holds runtime fp8 objects +fp8_state: FP8State = FP8State() + +fp8_patches_applied = False + +original_run_engine_core = EngineCoreProc.run_engine_core +original_init = CoreEngineProcManager.__init__ + + +def my_init(*args, **kwargs): + kwargs["vllm_config"].nrl_fp8_cfg = global_fp8_config + return original_init(*args, **kwargs) + + +def my_run_engine_core(*args, **kwargs): + fp8_cfg = kwargs["vllm_config"].nrl_fp8_cfg + del kwargs["vllm_config"].nrl_fp8_cfg + monkey_patch_vllm_ray_executor(fp8_cfg) + return original_run_engine_core(*args, **kwargs) + + +def monkey_patch_vllm_ray_executor(fp8_config): + if fp8_config.model_parallel_size > 1: + # we patch vllm's _run_workers so that before vllm initalizes the model on each rank, we execute + # a ray remote that patches each worker with the required fp8 vllm patches + from vllm.v1.executor.ray_distributed_executor import RayDistributedExecutor + + original_run_workers = RayDistributedExecutor._run_workers + + def patched_run_workers(self, *args, **kwargs): + global fp8_patches_applied + if not fp8_patches_applied: + futures = [ + worker.execute_method.remote(apply_fp8_patches, fp8_config) + for worker in self.workers + ] + [ray.get(future) for future in futures] + fp8_patches_applied = True + + return original_run_workers(self, *args, **kwargs) + + RayDistributedExecutor._run_workers = patched_run_workers + else: + # for single gpu there is no ray, so just call the patches + apply_fp8_patches(None, fp8_config) + + global fp8_patches_applied + fp8_patches_applied = True + + +def apply_fp8_patches(self, fp8_config): + global global_fp8_config, fp8_patches_applied + assert not fp8_patches_applied + + global_fp8_config = fp8_config + + # This patch is used to support torch.compile with vllm parameter subclasses, such as + # PerTensorScaleParameter. Because we need weight loaders to update fp8 weights each + # refit, we patch fp8 parameters to have a reference to their weight loader. Eventually + # with pytorch 2.8, parameter subclassing with torch.compile will be natively supported, in + # which this patch can be removed. + func1_path = "vllm.model_executor.layers.quantization.fp8.Fp8LinearMethod.process_weights_after_loading" + patcher1 = patch(func1_path, process_weights_after_loading) + fp8_state.vllm_patches.append(patcher1) + # These patches add support for pow2, e8 dynamic activation scalings factors which are believed to have higher + # SNR compared to plain fp32 scaling factors. This feature is still under active research. + if global_fp8_config.use_activation_pow2_scale: + func2_path = "vllm.model_executor.layers.quantization.utils.fp8_utils.per_token_group_quant_fp8" + func3_path = "vllm.model_executor.layers.quantization.utils.fp8_utils._per_token_group_quant_fp8" + func4_path = "vllm.model_executor.layers.quantization.utils.fp8_utils._per_token_group_quant_fp8_colmajor" + patcher2 = patch(func2_path, per_token_group_quant_fp8) + patcher3 = patch(func3_path, _per_token_group_quant_fp8) + patcher4 = patch(func4_path, _per_token_group_quant_fp8_colmajor) + fp8_state.vllm_patches.append(patcher2, patcher3, patcher4) + + for p in fp8_state.vllm_patches: + p.start() + + fp8_patches_applied = True + + +def init_fp8(vllm_cfg, model_name, model_parallel_size): + config = AutoConfig.from_pretrained(model_name) + if hasattr(config, "num_experts"): + assert config.num_experts == 0, ( + "FP8 generation for MoE models is currently not supported" + ) + + global global_fp8_config + global_fp8_config = FP8Config( + use_weight_pow2_scale=vllm_cfg.get("pow2_weight_scaling_factors", False), + use_activation_pow2_scale=vllm_cfg.get( + "pow2_activation_scaling_factors", False + ), + num_first_layers_in_bf16=vllm_cfg.get("num_first_layers_in_bf16", 0), + num_last_layers_in_bf16=vllm_cfg.get("num_last_layers_in_bf16", 0), + model_parallel_size=model_parallel_size, + ) + + if vllm_cfg.get("use_deep_gemm", False): + os.environ["VLLM_USE_DEEP_GEMM"] = "1" + + if vllm_cfg["async_engine"]: + # for async engine, vllm spawns a process for each DP, so we patch + # vllm so that upon spawning the thread it applies our FP8 patches + EngineCoreProc.run_engine_core = my_run_engine_core + CoreEngineProcManager.__init__ = my_init + else: + # if not async, just directly monkey patch the ray executor + monkey_patch_vllm_ray_executor(global_fp8_config) + + # create fp8 kwargs for vllm's LLM(...) + num_first_layers_in_bf16 = vllm_cfg.get("num_first_layers_in_bf16", 0) + num_last_layers_in_bf16 = vllm_cfg.get("num_last_layers_in_bf16", 0) + fp8_block_quant_kwargs = dict(FP8_BLOCK_QUANT_KWARGS) + + if num_first_layers_in_bf16 > 0 or num_last_layers_in_bf16 > 0: + with init_empty_weights(): + model = AutoModel.from_config(config) + param_names = [name for name, _ in model.named_parameters()] + + bf16_params = [] + if num_first_layers_in_bf16 > 0: + layers = [l for l in range(num_first_layers_in_bf16)] + bf16_params.append(_get_params_in_layers(param_names, layers)) + + if num_last_layers_in_bf16 > 0: + layers = [ + l + for l in range( + config.num_hidden_layers - num_last_layers_in_bf16, + config.num_hidden_layers, + ) + ] + bf16_params.append(_get_params_in_layers(param_names, layers)) + + fp8_block_quant_kwargs["ignored_layers"] = bf16_params + + vllm_kwargs = { + "quantization": "fp8", + "hf_overrides": {"quantization_config": fp8_block_quant_kwargs}, + } + return vllm_kwargs + + +def is_fp8_model(vllm_config): + from vllm.model_executor.layers.quantization.fp8 import Fp8Config + + if hasattr(vllm_config, "quant_config") and isinstance( + vllm_config.quant_config, Fp8Config + ): + assert vllm_config.quant_config.weight_block_size is not None, ( + "Only block scaling is currently supported in NeMo-RL!" + ) + return True + + return False + + +def _get_params_in_layers(param_names, layers): + layer_templates = [] + for i in layers: + # Prefixes used by huggingface model transformer layers. + # We'll use these to match against the parameter names to determine + # which layer the parameter is in. + layer_templates.extend( + [ + f"transformer.h.{i}.", + f"layers.{i}.", + f"layer.{i}.", + ] + ) + prefixes = [p for p in layer_templates if any(p in n for n in param_names)] + if len(prefixes) == 0: + raise ValueError(f"Could not identify layers {layers} for model.") + + params = [] + for name in param_names: + if ( + any(p in name for p in prefixes) + and "bias" not in name + and "layernorm" not in name + ): + # Convert the param name into vllm's module name + # Vllm wraps the model with an extra 'model' + params.append(f"model.{name}".removesuffix(".weight")) + return params + + +def _get_module_from_param_name(model, name: str): + # Split the name into parts (e.g., 'layers', '0', 'self_attn', 'q_proj', 'weight') + # The module path is all but the last part (the parameter's own name) + path_parts = name.split(".") + module_path = path_parts[:-1] + # Replace with the fused model name + packed_modules_mapping = model.packed_modules_mapping + reversed_mapping = { + original_name: fused_name + for fused_name, original_names_list in packed_modules_mapping.items() + for original_name in original_names_list + } + if module_path[-1] in reversed_mapping.keys(): + module_path[-1] = reversed_mapping[module_path[-1]] + + current_module = model + try: + # Traverse the model hierarchy + for part in module_path: + if isinstance(current_module, torch.nn.ModuleList): + current_module = current_module[int(part)] + else: + current_module = getattr(current_module, part) + except (AttributeError, IndexError, ValueError) as e: + print(f"Warning: Could not find module for parameter '{name}'. Error: {e}") + return current_module + + +def _is_fp8_weight(name, model): + if name not in fp8_state.seen_params: + fp8_state.seen_params.add(name) + # Filter out bias params + if name.endswith("weight"): + module = _get_module_from_param_name(model, name) + # We currently only quantize linear layers + if ( + isinstance(module, LinearBase) + and module.weight.dtype == torch.float8_e4m3fn + ): + fp8_state.fp8_param_names.add(name) + return name in fp8_state.fp8_param_names + + +def load_weights(weights, model_runner): + weights_quantized = [] + model = model_runner.model + + for k, v in weights: + if not _is_fp8_weight(k, model): + weights_quantized.append((k, v)) + continue + # Cast the weight into fp8 and its scale factor + param_lp, param_scale = cast_tensor_to_fp8_blockwise( + v.to(torch.float), + weight_block_size=FP8_BLOCK_QUANT_KWARGS["weight_block_size"], + ) + param_scale = torch.squeeze(param_scale, dim=-1) + weights_quantized.append([k, param_lp]) + weights_quantized.append([k + "_scale_inv", param_scale]) + # Monkey patch the param class to their subclass, as certain models + # will check the param type to call the proper weightloader + for name, param in model.named_parameters(): + if hasattr(param, "subclass_type"): + param.orig_type = param.__class__ + param.__class__ = param.subclass_type + # Finally load the weights into vllm + model.load_weights(weights_quantized) + # Undo the type change above to the original type + for name, param in model.named_parameters(): + if hasattr(param, "subclass_type"): + param.__class__ = param.orig_type + + +def cast_tensor_to_fp8_blockwise( + data_hp, + weight_block_size, +): + assert len(data_hp.shape) == 2, "Only 2d input tensor is supported" + + block_size1 = weight_block_size[1] + block_size0 = weight_block_size[0] + assert data_hp.shape[1] % block_size1 == 0, ( + f"data_hp.shape[1] {data_hp.shape[1]} must be a multiple of block_size1: {block_size1}." + ) + assert data_hp.shape[0] % block_size0 == 0, ( + f"data_hp.shape[0] {data_hp.shape[0]} must be a multiple of block_size0: {block_size0}." + ) + + # FP8 + max_dtype = torch.finfo(torch.float8_e4m3fn).max + + original_shape = data_hp.shape + blk_m, blk_n = data_hp.shape[0] // block_size0, data_hp.shape[1] // block_size1 + + assert block_size1 == block_size0 + data_hp = data_hp.reshape(blk_m, block_size0, blk_n, block_size1) + + # Permute to (BLK_M, BLK_N, BLOCK_SIZE_M, BLOCK_SIZE_N) + data_hp = data_hp.permute(0, 2, 1, 3) + # Flatten to (BLK_M, BLK_N, BLOCK_SIZE_M * BLOCK_SIZE_N) + data_hp = data_hp.to(torch.float32).contiguous().flatten(start_dim=2) + + # Calculate max absolute value per block + max_abs = torch.amax(torch.abs(data_hp), dim=-1, keepdim=True) + # Calculate descale factor + descale = max_abs / max_dtype + + global global_fp8_config + if global_fp8_config.use_weight_pow2_scale: + exponent = torch.ceil(torch.log2(descale)) + # Post process exponent to be in range of -127 to 127 and to be E8M0 biased + exponent = torch.clamp(exponent, min=-127, max=127) + 127 + # Convert to uint8 container + exponent = exponent.to(torch.uint8) + # Calculate descale_fp to apply to data_hp + scale_fp = torch.where( + # If exponent is 0, descale_fp is 1.0 rather than 2^127 + exponent == 0, + 1.0, + torch.exp2(127 - exponent.to(torch.float32)), + ) + descale_fp = torch.reciprocal(scale_fp) + else: + scale_fp = max_dtype / max_abs + scale_fp = torch.where(max_abs == 0, 1.0, scale_fp) + # preserve the behavior for 0 amax case + scale_fp = torch.where(max_abs == torch.inf, 1.0, scale_fp) + + descale_fp = torch.reciprocal(scale_fp) + + # Scale and saturate cast the data elements to max of target dtype + data_lp = torch.clamp(data_hp * scale_fp, min=-1 * max_dtype, max=max_dtype) + + fp_data = data_lp.to(torch.float8_e4m3fn) + + # (BLK_M, BLK_N, BLOCK_SIZE_M * BLOCK_SIZE_N) to (M, N) + fp_data = ( + fp_data.reshape(blk_m, blk_n, block_size0, block_size1) + .permute(0, 2, 1, 3) + .reshape(original_shape) + ) + + # Convert to target format, but still in original precision container + return fp_data, descale_fp + + +def process_weights_after_loading(self, layer) -> None: + from torch.nn import Parameter + from vllm.model_executor.parameter import ( + BlockQuantScaleParameter, + ModelWeightParameter, + ) + + assert self.block_quant and self.quant_config.is_checkpoint_fp8_serialized + assert self.quant_config.activation_scheme == "dynamic" + + def _create_param_from_subclass_attributes(custom_param): + param = Parameter(custom_param.data, requires_grad=False) + base_param_dir = dir(torch.nn.Parameter) + custom_param_dir = dir(custom_param) + # Find the attributes that are unique to the custom parameter + custom_attributes = [ + attr + for attr in custom_param_dir + if attr not in base_param_dir and not attr.startswith("__") + ] + # Set the custom attributes into the base parameter object + for attr in custom_attributes: + setattr(param, attr, getattr(custom_param, attr)) + + param.subclass_type = type(custom_param) + return param + + weight = layer.weight.data + weight_scale_inv = layer.weight_scale_inv.data + weight = self._maybe_pad_weight(weight) + + layer.weight = _create_param_from_subclass_attributes( + ModelWeightParameter( + data=weight, + output_dim=0, + input_dim=1, + weight_loader=layer.weight.weight_loader, + ) + ) + layer.weight_scale_inv = _create_param_from_subclass_attributes( + BlockQuantScaleParameter( + data=weight_scale_inv, + output_dim=0, + input_dim=1, + weight_loader=layer.weight_scale_inv.weight_loader, + ) + ) + + +@triton.jit +def _per_token_group_quant_fp8( + # Pointers to inputs and output + y_ptr, + y_q_ptr, + y_s_ptr, + group_size, + # Num columns of y + y_num_columns, + y_row_stride, + # Avoid to divide zero + eps, + # Information for float8 + fp8_min, + fp8_max, + # Meta-parameters + BLOCK: tl.constexpr, +): + groups_per_row = y_num_columns // group_size + + # Map the program id to the row of X and Y it should compute. + g_id = tl.program_id(0) + row = g_id // groups_per_row + row_g_id = g_id % groups_per_row + + y_ptr += (row * y_row_stride) + (row_g_id * group_size) + y_q_ptr += g_id * group_size + y_s_ptr += g_id + + cols = tl.arange(0, BLOCK) # N <= BLOCK + mask = cols < group_size + + y = tl.load(y_ptr + cols, mask=mask, other=0.0).to(tl.float32) + # Quant + _absmax = tl.maximum(tl.max(tl.abs(y)), eps) + + # pow2_scale + inv_scale = fp8_max / _absmax + exponent = tl.floor(tl.log2(inv_scale)) + # exponent is an integer + exponent = tl.minimum(exponent, 126.0) + + # after rounding to exponent, round back to floating + inv_scale_pow2 = tl.exp2(exponent) + + is_nan = inv_scale_pow2 != inv_scale_pow2 + is_inf = (inv_scale_pow2 == 1.0 / 0.0) | (inv_scale_pow2 == -1.0 / 0.0) + + # If the value is NaN or infinity, default it to 1.0, + # otherwise keep its original value. + inv_scale_pow2 = tl.where(is_nan | is_inf, 1.0, inv_scale_pow2) + # finally uninverse + y_s = 1.0 / inv_scale_pow2 + + y_q = tl.clamp(y / y_s, fp8_min, fp8_max).to(y_q_ptr.dtype.element_ty) + + tl.store(y_q_ptr + cols, y_q, mask=mask) + tl.store(y_s_ptr, y_s) + + +@triton.jit +def _per_token_group_quant_fp8_colmajor( + # Pointers to inputs and output + y_ptr, + y_q_ptr, + y_s_ptr, + group_size, + # Num columns of y + y_num_columns, + y_row_stride, + # Stride from one column to the next of y_s + y_s_col_stride, + # Avoid to divide zero + eps, + # Information for float8 + fp8_min, + fp8_max, + # Meta-parameters + BLOCK: tl.constexpr, +): + groups_per_row = y_num_columns // group_size + + # Map the program id to the row of X and Y it should compute. + g_id = tl.program_id(0) + row = g_id // groups_per_row + row_g_id = g_id % groups_per_row + + y_ptr += (row * y_row_stride) + (row_g_id * group_size) + y_q_ptr += g_id * group_size + + # Convert g_id the flattened block coordinate to 2D so we can index + # into the output y_scales matrix + blocks_per_row = y_num_columns // group_size + scale_col = g_id % blocks_per_row + scale_row = g_id // blocks_per_row + y_s_ptr += scale_col * y_s_col_stride + scale_row + + cols = tl.arange(0, BLOCK) # group_size <= BLOCK + mask = cols < group_size + + y = tl.load(y_ptr + cols, mask=mask, other=0.0).to(tl.float32) + _absmax = tl.maximum(tl.max(tl.abs(y)), eps) + + # Quant pow2_scale: + inv_scale = fp8_max / _absmax + # calculate the nearest pow2 integer + exponent = tl.floor(tl.log2(inv_scale)) + exponent = tl.minimum(exponent, 126.0) + # round inv_scale to the nearest pow2 with the exp we just calculated + inv_scale_pow2 = tl.exp2(exponent) + # If the value is NaN or infinity, default it to 1.0, + # otherwise keep its original value. + is_nan = inv_scale_pow2 != inv_scale_pow2 + is_inf = (inv_scale_pow2 == float("inf")) | (inv_scale_pow2 == float("-inf")) + inv_scale_pow2 = tl.where(is_nan | is_inf, 1.0, inv_scale_pow2) + # finally uninverse + y_s = 1.0 / inv_scale_pow2 + + y_q = tl.clamp(y / y_s, fp8_min, fp8_max).to(y_q_ptr.dtype.element_ty) + + tl.store(y_q_ptr + cols, y_q, mask=mask) + tl.store(y_s_ptr, y_s) + + +def per_token_group_quant_fp8( + *args, + **kwargs, +) -> tuple[torch.Tensor, torch.Tensor]: + assert global_fp8_config.use_activation_pow2_scale + from vllm.model_executor.layers.quantization.utils.fp8_utils import ( + per_token_group_quant_fp8 as vllm_per_token_group_quant_fp8, + ) + + return vllm_per_token_group_quant_fp8(*args, **kwargs) diff --git a/nemo_rl/models/generation/vllm.py b/nemo_rl/models/generation/vllm.py deleted file mode 100644 index cbdd264d69..0000000000 --- a/nemo_rl/models/generation/vllm.py +++ /dev/null @@ -1,2023 +0,0 @@ -# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import asyncio -import copy -import gc -import os -import sys -import uuid -from collections import defaultdict -from typing import ( - Any, - AsyncGenerator, - NotRequired, - Optional, - TypedDict, - Union, - cast, -) - -import numpy as np -import ray -import torch -from ray.util.placement_group import PlacementGroup - -from nemo_rl.distributed.batched_data_dict import BatchedDataDict, SlicedDataDict -from nemo_rl.distributed.named_sharding import NamedSharding -from nemo_rl.distributed.virtual_cluster import ( - RayVirtualCluster, -) -from nemo_rl.distributed.worker_group_utils import get_nsight_config_if_pattern_matches -from nemo_rl.distributed.worker_groups import ( - RayWorkerBuilder, - RayWorkerGroup, -) -from nemo_rl.models.generation.interfaces import ( - GenerationConfig, - GenerationDatumSpec, - GenerationInterface, - GenerationOutputSpec, - verify_right_padding, -) -from nemo_rl.models.huggingface.common import ModelFlag -from nemo_rl.models.policy.utils import is_vllm_v1_engine_enabled - - -class VllmSpecificArgs(TypedDict): - tensor_parallel_size: int - pipeline_parallel_size: int - gpu_memory_utilization: float - max_model_len: int - # Additional arguments for vLLM inserted by nemo rl based on the context of when vllm is used - skip_tokenizer_init: bool - async_engine: bool - load_format: NotRequired[str] - precision: NotRequired[str] - enforce_eager: NotRequired[bool] - - -class VllmConfig(GenerationConfig): - vllm_cfg: VllmSpecificArgs - vllm_kwargs: NotRequired[dict[str, Any]] - - -@ray.remote( - runtime_env={**get_nsight_config_if_pattern_matches("vllm_generation_worker")} -) # pragma: no cover -class VllmGenerationWorker: - def __repr__(self) -> str: - """Customizes the actor's prefix in the Ray logs. - - This makes it easier to identify which worker is producing specific log messages. - """ - return f"{self.__class__.__name__}" - - @staticmethod - def configure_worker( - num_gpus: int | float, bundle_indices: Optional[tuple[int, list[int]]] = None - ) -> tuple[dict[str, Any], dict[str, str], dict[str, Any]]: - """Provides complete worker configuration for vLLM tensor and pipeline parallelism. - - This method configures the worker based on its role in tensor and pipeline parallelism, - which is determined directly from the bundle_indices parameter. - - Args: - num_gpus: Original GPU allocation for this worker based on the placement group - bundle_indices: Tuple of (node_idx, local_bundle_indices) for parallelism (if applicable) - - Returns: - tuple with complete worker configuration: - - 'resources': Resource allocation (e.g., num_gpus) - - 'env_vars': Environment variables for this worker - - 'init_kwargs': Parameters to pass to __init__ of the worker - """ - # Initialize configuration - resources: dict[str, Any] = {"num_gpus": num_gpus} - init_kwargs: dict[str, Any] = {} - env_vars: dict[str, str] = {} - - local_bundle_indices = None - if bundle_indices is not None: - node_idx = bundle_indices[0] - local_bundle_indices = bundle_indices[1] - init_kwargs["bundle_indices"] = local_bundle_indices - - """ - compute a unique seed from the node_idx and bundle_indices: - node_idx = 0, bundle_indices = [0, 1, 2, 3] -> seed = 0*1024 + 0 - node_idx = 0, bundle_indices = [4, 5, 6, 7] -> seed = 0*1024 + 1 - node_idx = 1, bundle_indices = [0, 1, 2, 3] -> seed = 1*1024 + 0 - node_idx = 1, bundle_indices = [4, 5, 6, 7] -> seed = 1*1024 + 1 - """ - # For single worker groups, use a simpler seed calculation - if len(local_bundle_indices) == 1: - seed = node_idx * 1024 + local_bundle_indices[0] - else: - # For parallel groups, use the original calculation - bundle_id = local_bundle_indices[0] // len(local_bundle_indices) - seed = node_idx * 1024 + bundle_id - - init_kwargs["seed"] = seed - # Need to give each DP group its own vllm cache to address: - # https://github.com/vllm-project/vllm/issues/18851 - env_vars["VLLM_CACHE_ROOT"] = os.path.expanduser(f"~/.cache/vllm_{seed}") - - # Check if this worker is part of a parallel group (TP or TP+PP). - # A worker is part of a parallel group if it's a secondary member (local_bundle_indices is None) - # or if it's a primary member of a group with multiple workers. - is_part_of_parallel_workers = ( - local_bundle_indices is not None and len(local_bundle_indices) > 1 - ) or local_bundle_indices is None - - if is_part_of_parallel_workers: - # Ray + vllm likes to manage GPU assignment internally for parallel groups - resources["num_gpus"] = 0 - env_vars["RAY_EXPERIMENTAL_NOSET_CUDA_VISIBLE_DEVICES"] = "1" - init_kwargs["fraction_of_gpus"] = num_gpus - - env_vars["VLLM_ENABLE_V1_MULTIPROCESSING"] = "0" - # Skip vllm P2P check and rely on driver to report peer to peer capability. - env_vars["VLLM_SKIP_P2P_CHECK"] = "1" - - return resources, env_vars, init_kwargs - - def __init__( - self, - config: VllmConfig, - bundle_indices: Optional[list[int]] = None, - fraction_of_gpus: float = 1.0, - seed: Optional[int] = None, - ): - """Initialize a vLLM worker for distributed inference. - - Args: - config: Configuration dictionary for the policy - bundle_indices: List of local bundle indices within a node for parallelism. - Only needed for the first worker in each tied worker group. - fraction_of_gpus: Fraction of GPUs to use for this worker - seed: Random seed for initialization - """ - self.cfg = config - - self.model_name = self.cfg["model_name"] - self.tensor_parallel_size = self.cfg["vllm_cfg"]["tensor_parallel_size"] - self.pipeline_parallel_size = self.cfg["vllm_cfg"]["pipeline_parallel_size"] - self.gpu_memory_utilization = self.cfg["vllm_cfg"]["gpu_memory_utilization"] - self.fraction_of_gpus = fraction_of_gpus - self.is_model_owner = bundle_indices is not None - - # Store the Python executable being used by this worker - self.py_executable = sys.executable - - # Skip model loading if we're not the model owner - if not self.is_model_owner: - self.llm = None - self.tokenizer = None - self.rank = 0 - self.world_size = 1 - return - - # In Ray+vLLM setup, each worker process considers itself rank 0 - # vLLM handles the parallelism internally through Ray - self.rank = 0 - self.world_size = 1 - - # Monkey patch for vLLM to ensure RAY_ADDRESS is set in Ray actors. - try: - import vllm.utils - from vllm.logger import init_logger - from vllm.utils import cuda_is_initialized, is_in_ray_actor - - logger = init_logger("vllm_patch") - - def _patched_maybe_force_spawn(): - """Patched version of vllm.utils._maybe_force_spawn. - - This patch changes an `elif is_in_ray_actor()` to an `if` statement. - This ensures that `os.environ["RAY_ADDRESS"]` is set when running - within a Ray actor, even if CUDA has already been initialized. - This is crucial for vLLM workers to connect back to the Ray cluster. - """ - if os.environ.get("VLLM_WORKER_MULTIPROC_METHOD") == "spawn": - return - - reason = None - if cuda_is_initialized(): - reason = "CUDA is initialized" - - if is_in_ray_actor(): - # even if we choose to spawn, we need to pass the ray address - # to the subprocess so that it knows how to connect to the ray cluster. - # env vars are inherited by subprocesses, even if we use spawn. - import ray - - os.environ["RAY_ADDRESS"] = ray.get_runtime_context().gcs_address - if reason is None: - reason = "In a Ray actor and can only be spawned" - - if reason is not None: - logger.warning( - "We must use the `spawn` multiprocessing start method. " - "Overriding VLLM_WORKER_MULTIPROC_METHOD to 'spawn'. " - "See https://docs.vllm.ai/en/latest/getting_started/" - "troubleshooting.html#python-multiprocessing " - "for more information. Reason: %s", - reason, - ) - os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn" - - vllm.utils._maybe_force_spawn = _patched_maybe_force_spawn - logger.info("Successfully patched vllm.utils._maybe_force_spawn.") - - def _patch_vllm_init_workers_ray(): - # Patch the vLLM ray_distributed_executor.py file to pass custom runtime_env in _init_workers_ray call. - # This allows passing custom py_executable to worker initialization. - - try: - import vllm.executor.ray_distributed_executor as ray_executor_module - - file_to_patch = ray_executor_module.__file__ - - with open(file_to_patch, "r") as f: - content = f.read() - - old_line = "self._init_workers_ray(placement_group)" - new_line = f'self._init_workers_ray(placement_group, runtime_env={{"py_executable": "{self.py_executable}"}})' - - if new_line in content: - return - - if old_line not in content: - return - - patched_content = content.replace(old_line, new_line) - - # Write back the patched content - with open(file_to_patch, "w") as f: - f.write(patched_content) - - except (ImportError, FileNotFoundError, PermissionError): - # Allow failures gracefully - pass - - _patch_vllm_init_workers_ray() - - except (ImportError, AttributeError): - # vllm not installed or has a different structure, skipping patch. - pass - - try: - import vllm - - self.SamplingParams = vllm.SamplingParams - except ImportError: - raise ImportError( - "vLLM is not installed. Please check that the py_executable in the runtime_env of VllmGenerationWorker " - "covers the vllm dependency. You may have to update nemo_rl/distributed/ray_actor_environment_registry.py. " - "If you are working interactively, you can install by running `uv sync --extra vllm` anywhere in the repo." - ) - vllm_kwargs: dict[str, Any] = copy.deepcopy(self.cfg.get("vllm_kwargs", {})) - - # Calculate total parallel size (TP * PP) - model_parallel_size = self.tensor_parallel_size * self.pipeline_parallel_size - - # Special handling for parallel case (either TP or PP or both) - if model_parallel_size > 1: - # Configure vLLM for tensor/pipeline parallelism within Ray - # Reset CUDA_VISIBLE_DEVICES to allow vLLM to manage GPU assignment - os.environ.pop("CUDA_VISIBLE_DEVICES", None) - os.environ["VLLM_RAY_PER_WORKER_GPUS"] = str( - self.fraction_of_gpus / model_parallel_size - ) - - # Set bundle indices for parallel workers - bundle_indices_str = ",".join(map(str, bundle_indices)) - os.environ["VLLM_RAY_BUNDLE_INDICES"] = bundle_indices_str - print( - f"VLLM_RAY_BUNDLE_INDICES environment variable set to: {os.environ.get('VLLM_RAY_BUNDLE_INDICES')}" - ) - - # Use Ray for distributed execution in parallel mode - vllm_kwargs["distributed_executor_backend"] = "ray" - else: - # For non-parallel mode, explicitly set executor to None to avoid Ray issues - vllm_kwargs["distributed_executor_backend"] = None - - os.environ["VLLM_USE_V1"] = "1" if is_vllm_v1_engine_enabled() else "0" - os.environ["VLLM_ALLOW_INSECURE_SERIALIZATION"] = "1" - - load_format = self.cfg["vllm_cfg"]["load_format"] - if ModelFlag.VLLM_LOAD_FORMAT_AUTO.matches(self.model_name): - load_format = "auto" - - llm_kwargs = dict( - model=self.model_name, - load_format=load_format, - # vllm==0.10.0 breaks skip_tokenizer_init=True. - # This will be reverted to `self.cfg["vllm_cfg"]["skip_tokenizer_init"]` once https://github.com/NVIDIA-NeMo/RL/issues/818 is resolved. - skip_tokenizer_init=False, - tensor_parallel_size=self.tensor_parallel_size, - pipeline_parallel_size=self.pipeline_parallel_size, - gpu_memory_utilization=self.gpu_memory_utilization, - enable_prefix_caching=torch.cuda.get_device_capability()[0] >= 8, - dtype=self.cfg["vllm_cfg"]["precision"], - seed=seed, - enforce_eager=self.cfg["vllm_cfg"]["enforce_eager"], - max_model_len=self.cfg["vllm_cfg"]["max_model_len"], - trust_remote_code=True, - worker_extension_cls="nemo_rl.models.generation.vllm_backend.VllmInternalWorkerExtension", - enable_sleep_mode=True, - disable_log_stats=True, - logprobs_mode="raw_logprobs", - **vllm_kwargs, - ) - - if self.cfg["vllm_cfg"]["async_engine"]: - from vllm.engine.arg_utils import AsyncEngineArgs - from vllm.v1.engine.async_llm import AsyncLLM - - self.llm = AsyncLLM.from_engine_args(AsyncEngineArgs(**llm_kwargs)) - else: - self.llm = vllm.LLM(**llm_kwargs) - - # will be initialized in post_init - # used in update_weights_from_ipc_handles - self.vllm_device_ids = None - - def post_init(self): - self.vllm_device_ids = self.report_device_id() - - async def post_init_async(self): - self.vllm_device_ids = await self.report_device_id_async() - - def init_collective( - self, rank_prefix: int, ip: str, port: int, world_size: int - ) -> None: - self.llm.collective_rpc( - "init_collective", - args=( - rank_prefix, - ip, - port, - world_size, - ), - ) - - async def init_collective_async( - self, rank_prefix: int, ip: str, port: int, world_size: int - ) -> None: - await self.llm.collective_rpc( - "init_collective", - args=( - rank_prefix, - ip, - port, - world_size, - ), - ) - - def llm(self): - return self.llm - - def is_alive(self): - """Check if the worker is alive.""" - return True - - def _merge_stop_strings(self, batch_stop_strings): - stop_set: set[str] = set() - - if self.cfg.get("stop_strings"): - stop_set.update(self.cfg["stop_strings"]) - - if batch_stop_strings is not None: - for sample_ss in batch_stop_strings: - if sample_ss: - stop_set.update(sample_ss) - - return list(stop_set) if stop_set else None - - def _build_sampling_params( - self, - *, - greedy: bool, - stop_strings, - max_new_tokens: Optional[int] = None, - ): - top_k_cfg = self.cfg["top_k"] - top_k_val = 1 if greedy else (top_k_cfg if top_k_cfg is not None else -1) - - temperature = 0.0 if greedy else self.cfg["temperature"] - - max_tokens = ( - max_new_tokens if max_new_tokens is not None else self.cfg["max_new_tokens"] - ) - - return self.SamplingParams( - temperature=temperature, - top_p=self.cfg["top_p"], - top_k=top_k_val, - max_tokens=max_tokens, - logprobs=0, - stop_token_ids=self.cfg["stop_token_ids"], - stop=stop_strings, - include_stop_str_in_output=True, - ) - - def generate( - self, data: BatchedDataDict[GenerationDatumSpec], greedy: bool = False - ) -> BatchedDataDict[GenerationOutputSpec]: - """Generate a batch of data using vLLM generation. - - Args: - data: BatchedDataDict containing input_ids and input_lengths tensors - greedy: Whether to use greedy decoding instead of sampling - - Returns: - BatchedDataDict conforming to GenerationOutputSpec: - - output_ids: input + generated token IDs with proper padding - - logprobs: Log probabilities for tokens - - generation_lengths: Lengths of each response - - unpadded_sequence_lengths: Lengths of each input + generated sequence - """ - # Handle empty input case - if len(data["input_ids"]) == 0: - # Return empty BatchedDataDict with all required fields - return BatchedDataDict[GenerationOutputSpec]( - { - "output_ids": torch.zeros((0, 0), dtype=torch.long), - "logprobs": torch.zeros((0, 0), dtype=torch.float), - "generation_lengths": torch.zeros(0, dtype=torch.long), - "unpadded_sequence_lengths": torch.zeros(0, dtype=torch.long), - } - ) - - input_ids = data["input_ids"] - input_lengths = data["input_lengths"] - batch_stop_strings: list[list[str]] = data.get("stop_strings", []) - stop_strings = self._merge_stop_strings(batch_stop_strings) - sampling_params = self._build_sampling_params( - greedy=greedy, - stop_strings=stop_strings, - ) - - # verify inputs have correct padding - verify_right_padding(data, pad_value=self.cfg["pad_token_id"]) - - # Convert inputs to vLLM format - batch_size = input_ids.shape[0] - # Original input length with padding - padded_input_length = input_ids.size(1) - - # Prepare prompts for vLLM (removing padding) - prompts = [] - - for i in range(batch_size): - # Use input_lengths to get only valid tokens (not padding) - valid_length = input_lengths[i].item() - valid_ids = ( - input_ids[i, :valid_length] if valid_length > 0 else input_ids[i, :0] - ) - token_ids = valid_ids.tolist() - - prompts.append({"prompt_token_ids": token_ids}) - - # Generate outputs - assert self.llm is not None, ( - "Attempting to generate with either an uninitialized vLLM or non-model-owner" - ) - outputs = self.llm.generate(prompts, sampling_params) - - # Process the outputs - but preserve the original input padding structure - output_ids_list = [] - logprobs_list = [] - generation_lengths = [] - unpadded_sequence_lengths = [] - max_length = 0 - for output in outputs: - max_length = max(max_length, len(output.outputs[0].token_ids)) - - for i, output in enumerate(outputs): - # Extract generated tokens - sequence_length = input_lengths[i] - generation = output.outputs[0] - generated_tokens = list(generation.token_ids) - - # Calculate total sequence length (original input length + generated tokens) - total_length = padded_input_length + max_length - - # Create a new tensor with the right size and fill with padding token - full_output = torch.full( - (total_length,), self.cfg["pad_token_id"], dtype=input_ids.dtype - ) - - # Copy original input (with padding) into the beginning - full_output[:sequence_length] = input_ids[i][:sequence_length] - - # Add generated tokens after the original input - full_output[sequence_length : sequence_length + len(generated_tokens)] = ( - torch.tensor(generated_tokens) - ) - - output_ids_list.append(full_output) - full_logprobs = torch.zeros(total_length, dtype=torch.float32) - if hasattr(generation, "logprobs") and generation.logprobs: - try: - for idx, logprob_dict in enumerate(generation.logprobs): - if logprob_dict: - position = sequence_length + idx - full_logprobs[position] = next(iter(logprob_dict.items()))[ - 1 - ].logprob - except Exception: - import traceback - - traceback.print_exc() - - logprobs_list.append(full_logprobs) - - response_length = sequence_length + len(generated_tokens) - generation_lengths.append(len(generated_tokens)) - unpadded_sequence_lengths.append(response_length) - assert response_length <= self.llm.llm_engine.model_config.max_model_len, ( - f"response_length={response_length} > max_model_len={self.llm.llm_engine.model_config.max_model_len}, which should not happen. Please check this behavior in isolation by running `uv run --extra vllm tools/model_diagnostics/1.max_model_len_respected.py {self.llm.llm_engine.model_config.model}` and raise this issue with the vllm team." - ) - - # Create return data conforming to GenerationOutputSpec - output_ids = torch.stack(output_ids_list) - logprobs = torch.stack(logprobs_list) - - return_data = BatchedDataDict[GenerationOutputSpec]( - { - "output_ids": output_ids, - "logprobs": logprobs, - "generation_lengths": torch.tensor( - generation_lengths, dtype=torch.long - ), - "unpadded_sequence_lengths": torch.tensor( - unpadded_sequence_lengths, dtype=torch.long - ), - } - ) - - return return_data - - async def generate_async( - self, - data: BatchedDataDict[GenerationDatumSpec], - greedy: bool = False, - ) -> AsyncGenerator[tuple[int, BatchedDataDict[GenerationOutputSpec]], None]: - """Generate a batch of data using vLLM's AsyncLLMEngine, yielding results as they are ready. - - Args: - data: BatchedDataDict with input_ids and input_lengths - greedy: Whether to use greedy decoding instead of sampling - - Yields: - Tuple of (original_index, BatchedDataDict conforming to GenerationOutputSpec for the single sequence) - """ - if not self.cfg["vllm_cfg"]["async_engine"]: - raise RuntimeError( - "generate_async can only be used when async_engine is enabled in vLLM config." - ) - - # Handle empty input case - if len(data["input_ids"]) == 0: - return - - verify_right_padding(data, pad_value=self.cfg["pad_token_id"]) - - input_ids_batch = data["input_ids"] - input_lengths_batch = data["input_lengths"] - batch_size = input_ids_batch.shape[0] - - # Ensure generate_async only receives single samples (batch_size = 1) - assert batch_size == 1, ( - f"generate_async is restricted to handle only single samples, " - f"but received batch_size={batch_size}. Please handle batching outside this method." - ) - - batch_specific_stop_strings_list = data.get( - "stop_strings", [[] for _ in range(batch_size)] - ) - - # Create tasks for each sample in the batch - async def process_single_sample(sample_idx): - """Process a single sample and return the result.""" - current_input_actual_length = input_lengths_batch[sample_idx].item() - prompt_token_ids_list = ( - input_ids_batch[sample_idx, :current_input_actual_length].tolist() - if current_input_actual_length > 0 - else [] - ) - prompt = {"prompt_token_ids": prompt_token_ids_list} - - per_sample_stop_strings = None - if batch_specific_stop_strings_list and sample_idx < len( - batch_specific_stop_strings_list - ): - per_sample_stop_strings = batch_specific_stop_strings_list[sample_idx] - - final_stop_strings_for_sample = self._merge_stop_strings( - [per_sample_stop_strings] if per_sample_stop_strings else None - ) - - remaining_ctx = ( - self.cfg["vllm_cfg"]["max_model_len"] - current_input_actual_length - ) - allowed_new_tokens = max(0, min(self.cfg["max_new_tokens"], remaining_ctx)) - - # Handle case where no tokens can be generated due to length constraints - if allowed_new_tokens == 0: - # Access the input data directly from the function parameters - input_ids_single_row = input_ids_batch[sample_idx] - - # Create output tensors with just the input (no generated tokens) - output_ids_single_item_batched = input_ids_single_row[ - :current_input_actual_length - ].unsqueeze(0) - - logprobs_single_item = torch.zeros( - (1, current_input_actual_length), - dtype=torch.float32, - device=input_ids_single_row.device, - ) - - generation_lengths_tensor = torch.tensor( - [0], dtype=torch.long, device=input_ids_single_row.device - ) - - unpadded_sequence_lengths_tensor = torch.tensor( - [current_input_actual_length], - dtype=torch.long, - device=input_ids_single_row.device, - ) - - result_batch = BatchedDataDict[GenerationOutputSpec]( - { - "output_ids": output_ids_single_item_batched, - "logprobs": logprobs_single_item, - "generation_lengths": generation_lengths_tensor, - "unpadded_sequence_lengths": unpadded_sequence_lengths_tensor, - } - ) - - return (sample_idx, result_batch) - - sampling_params_for_request = self._build_sampling_params( - greedy=greedy, - stop_strings=final_stop_strings_for_sample, - max_new_tokens=allowed_new_tokens, - ) - - request_id = str(uuid.uuid4()) - - # Generate using vLLM async engine - vllm_request_generator = self.llm.generate( - prompt=prompt, - sampling_params=sampling_params_for_request, - request_id=request_id, - ) - - # Get the final result from the generator - final_request_output = None - async for req_output in vllm_request_generator: - final_request_output = req_output - - if final_request_output is None: - raise RuntimeError(f"No output received for request {request_id}") - - # Process the output - generation_details = final_request_output.outputs[0] - generated_token_ids = list(generation_details.token_ids) - num_generated_tokens = len(generated_token_ids) - - original_input_ids_single_row = input_ids_batch[sample_idx] - final_output_tensor_len = current_input_actual_length + num_generated_tokens - - # Create output_ids tensor for this single item - output_ids_single_item = torch.full( - (final_output_tensor_len,), - self.cfg["pad_token_id"], - dtype=original_input_ids_single_row.dtype, - device=original_input_ids_single_row.device, - ) - # Copy original input (up to its actual length) - output_ids_single_item[:current_input_actual_length] = ( - original_input_ids_single_row[:current_input_actual_length] - ) - # Add generated tokens after the actual input - output_ids_single_item[ - current_input_actual_length : current_input_actual_length - + num_generated_tokens - ] = torch.tensor( - generated_token_ids, - dtype=original_input_ids_single_row.dtype, - device=original_input_ids_single_row.device, - ) - - # Reshape to (1, seq_len) for BatchedDataDict - output_ids_single_item_batched = output_ids_single_item.unsqueeze(0) - - # Create logprobs tensor for this single item - logprobs_single_item = torch.zeros( - (1, final_output_tensor_len), - dtype=torch.float32, - device=original_input_ids_single_row.device, - ) - if hasattr(generation_details, "logprobs") and generation_details.logprobs: - for idx, logprob_dict_per_token in enumerate( - generation_details.logprobs - ): - if logprob_dict_per_token and idx < len(generated_token_ids): - token_id_at_idx = generated_token_ids[idx] - if token_id_at_idx in logprob_dict_per_token: - logprob_value = logprob_dict_per_token[ - token_id_at_idx - ].logprob - position_in_output_tensor = ( - current_input_actual_length + idx - ) - if position_in_output_tensor < final_output_tensor_len: - logprobs_single_item[0, position_in_output_tensor] = ( - logprob_value - ) - - # Generation lengths - generation_lengths_tensor = torch.tensor( - [num_generated_tokens], - dtype=torch.long, - device=original_input_ids_single_row.device, - ) - - # Unpadded sequence lengths (actual_input + actual_generated) - unpadded_total_length = current_input_actual_length + num_generated_tokens - unpadded_sequence_lengths_tensor = torch.tensor( - [unpadded_total_length], - dtype=torch.long, - device=original_input_ids_single_row.device, - ) - - result_batch = BatchedDataDict[GenerationOutputSpec]( - { - "output_ids": output_ids_single_item_batched, - "logprobs": logprobs_single_item, - "generation_lengths": generation_lengths_tensor, - "unpadded_sequence_lengths": unpadded_sequence_lengths_tensor, - } - ) - - return (sample_idx, result_batch) - - # Create tasks for all samples and yield results as they complete - sample_tasks = [ - asyncio.create_task(process_single_sample(i)) for i in range(batch_size) - ] - - # Yield results as they become available - for completed_task in asyncio.as_completed(sample_tasks): - try: - result = await completed_task - yield result - except Exception as e: - # Cancel remaining tasks - for task in sample_tasks: - if not task.done(): - task.cancel() - await asyncio.gather(*sample_tasks, return_exceptions=True) - raise e - - def generate_text( - self, data: BatchedDataDict[GenerationDatumSpec], greedy: bool = False - ) -> BatchedDataDict[GenerationOutputSpec]: - """Generate text responses using vLLM generation. - - Args: - data: BatchedDataDict containing prompts with text strings - greedy: Whether to use greedy decoding instead of sampling - - Returns: - BatchedDataDict containing: - - texts: List of generated text responses - """ - # Check if async engine is enabled - if self.cfg["vllm_cfg"]["async_engine"]: - raise RuntimeError( - "generate_text cannot be used with async_engine=True. Use generate_text_async instead." - ) - - # Extract stop_strings if provided, else use default from config - batch_stop_strings: list[list[str] | None] = data.get( - "stop_strings", [self.cfg.get("stop_strings")] * len(data["prompts"]) - ) - - # This function requires all generations have the same stop strings, so we collect all here - stop_strings: set[str] = set() - for sample_stop_strings in batch_stop_strings: - if sample_stop_strings: - stop_strings.update(sample_stop_strings) - - # Add default stop strings from config - if self.cfg.get("stop_strings", None): - stop_strings.update(self.cfg["stop_strings"]) - - stop_strings = list(stop_strings) if len(stop_strings) > 0 else None - - # Read generation parameters from config - top_k = self.cfg["top_k"] if self.cfg["top_k"] is not None else -1 - sampling_params = self.SamplingParams( - temperature=self.cfg["temperature"] if not greedy else 0, - top_p=self.cfg["top_p"], - top_k=top_k if not greedy else 1, - max_tokens=self.cfg["max_new_tokens"], - stop_token_ids=self.cfg["stop_token_ids"], - stop=stop_strings, - include_stop_str_in_output=True, # returning stop strings like hf - ) - - # Generate outputs - assert self.llm is not None, ( - "Attempting to generate with either an uninitialized vLLM or non-model-owner" - ) - outputs = self.llm.generate(data["prompts"], sampling_params) - texts = [output.outputs[0].text for output in outputs] - - # Convert to BatchedDataDict - return_data: BatchedDataDict[GenerationOutputSpec] = BatchedDataDict( - {"texts": texts} - ) - return return_data - - async def generate_text_async( - self, data: BatchedDataDict[GenerationDatumSpec], greedy: bool = False - ) -> AsyncGenerator[tuple[int, BatchedDataDict[GenerationOutputSpec]], None]: - """Generate text responses asynchronously, yielding results as they are ready. - - Args: - data: BatchedDataDict containing prompts with text strings - greedy: Whether to use greedy decoding instead of sampling - - Yields: - Tuple of (original_index, BatchedDataDict containing single text response) - """ - if not self.cfg["vllm_cfg"]["async_engine"]: - raise RuntimeError( - "generate_text_async can only be used when async_engine is enabled in vLLM config." - ) - - # Handle empty input case - if len(data["prompts"]) == 0: - return - - prompts = data["prompts"] - batch_size = len(prompts) - - # Extract stop_strings if provided, else use default from config - batch_stop_strings: list[list[str] | None] = data.get( - "stop_strings", [self.cfg.get("stop_strings")] * batch_size - ) - - # Create tasks for each prompt - async def process_single_prompt(prompt_idx): - """Process a single prompt and return the result.""" - prompt = prompts[prompt_idx] - - # Get stop strings for this specific prompt - per_prompt_stop_strings = None - if batch_stop_strings and prompt_idx < len(batch_stop_strings): - per_prompt_stop_strings = batch_stop_strings[prompt_idx] - - # Merge stop strings - final_stop_strings = self._merge_stop_strings( - [per_prompt_stop_strings] if per_prompt_stop_strings else None - ) - - # Create sampling parameters - top_k = self.cfg["top_k"] if self.cfg["top_k"] is not None else -1 - sampling_params = self.SamplingParams( - temperature=self.cfg["temperature"] if not greedy else 0, - top_p=self.cfg["top_p"], - top_k=top_k if not greedy else 1, - max_tokens=self.cfg["max_new_tokens"], - stop_token_ids=self.cfg["stop_token_ids"], - stop=final_stop_strings, - include_stop_str_in_output=True, # returning stop strings like hf - ) - - request_id = str(uuid.uuid4()) - - # Generate using vLLM async engine - vllm_request_generator = self.llm.generate( - prompt=prompt, - sampling_params=sampling_params, - request_id=request_id, - ) - - # Get the final result from the generator - final_request_output = None - async for req_output in vllm_request_generator: - final_request_output = req_output - - if final_request_output is None: - raise RuntimeError(f"No output received for request {request_id}") - - # Extract the generated text - generated_text = final_request_output.outputs[0].text - - # Create result in BatchedDataDict format - result_batch = BatchedDataDict[GenerationOutputSpec]( - {"texts": [generated_text]} - ) - - return (prompt_idx, result_batch) - - # Create tasks for all prompts and yield results as they complete - prompt_tasks = [ - asyncio.create_task(process_single_prompt(i)) for i in range(batch_size) - ] - - # Yield results as they become available - for completed_task in asyncio.as_completed(prompt_tasks): - try: - result = await completed_task - yield result - except Exception as e: - # Cancel remaining tasks - for task in prompt_tasks: - if not task.done(): - task.cancel() - await asyncio.gather(*prompt_tasks, return_exceptions=True) - raise e - - def shutdown(self) -> bool: - """Clean up vLLM resources.""" - try: - if self.llm is not None: - is_async_engine = self.cfg.get("vllm_cfg", {}).get( - "async_engine", False - ) - - if is_async_engine: - try: - self.llm.shutdown() - except Exception as e_stop: - print(f"Error calling shutdown_background_loop: {e_stop}") - # Explicitly delete the engine. This may trigger its __del__ method. - del self.llm - - self.llm = None - self.tokenizer = None - - # Force garbage collection - gc.collect() - torch.cuda.empty_cache() - - return True - except Exception as e: - print(f"Error during vLLM shutdown: {e}") - return False - - def report_device_id(self) -> list[str]: - """Report device ID from the vLLM worker.""" - assert self.llm is not None, ( - "Attempting to report device id with either an uninitialized vLLM or non-model-owner" - ) - - if self.cfg["vllm_cfg"]["async_engine"]: - raise RuntimeError( - "report_device_id cannot be used with async_engine=True. Use report_device_id_async instead." - ) - - list_of_worker_results = self.llm.collective_rpc( - "report_device_id", args=tuple() - ) - return cast(list[str], list_of_worker_results) - - async def report_device_id_async(self) -> list[str]: - """Async version of report_device_id.""" - assert self.llm is not None, ( - "Attempting to report device id with either an uninitialized vLLM or non-model-owner" - ) - - if not self.cfg["vllm_cfg"]["async_engine"]: - raise RuntimeError( - "report_device_id_async can only be used with async_engine=True. Use report_device_id instead." - ) - - result_or_coro = await self.llm.collective_rpc("report_device_id", args=tuple()) - - if asyncio.iscoroutine(result_or_coro): - list_of_worker_results = await result_or_coro - else: - list_of_worker_results = result_or_coro - - return cast(list[str], list_of_worker_results) - - def prepare_refit_info(self, state_dict_info: dict[str, Any]) -> None: - """Prepare the info for refit.""" - self.llm.collective_rpc("prepare_refit_info", args=(state_dict_info,)) - - async def prepare_refit_info_async(self, state_dict_info: dict[str, Any]) -> None: - """Async version of prepare_refit_info.""" - await self.llm.collective_rpc("prepare_refit_info", args=(state_dict_info,)) - - def update_weights_from_ipc_handles(self, ipc_handles: dict[str, Any]) -> bool: - """Update weights from IPC handles by delegating to the vLLM Worker implementation. - - Args: - ipc_handles (dict): Dictionary mapping device UUIDs (str) to parameter IPC handles. - - Returns: - bool: True if weights were successfully updated, False otherwise. - """ - try: - assert self.llm is not None, ( - "Attempting to update weights with either an uninitialized vLLM or non-model-owner" - ) - - if self.cfg["vllm_cfg"]["async_engine"]: - raise RuntimeError( - "update_weights_from_ipc_handles cannot be used with async_engine=True. Use update_weights_from_ipc_handles_async instead." - ) - - if self.tensor_parallel_size == 1: - # UniProcExecutor - assert len(self.vllm_device_ids) == 1 - result_or_coro = self.llm.collective_rpc( - "update_weights_from_local_ipc_handles", - args=(ipc_handles[self.vllm_device_ids[0]],), - ) - else: - """ - DO NOT USE VLLM's collective_rpc: This code causes duplicate IPC data transfer across Ray workers, - leading to unnecessary network serialization overhead and potential performance degradation. - - result_or_coro = self.llm.collective_rpc( - "update_weights_from_global_ipc_handles", args=(ipc_handles,) - ) - """ - ray_worker_outputs = [] - # MultiProcExecutor - for worker, device_id in zip( - self.llm.llm_engine.model_executor.workers, self.vllm_device_ids - ): - ray_worker_outputs.append( - worker.execute_method.remote( - "update_weights_from_local_ipc_handles", - ipc_handles[device_id], - ) - ) - - # Gather the results - result_or_coro = ray.get(ray_worker_outputs) - - worker_result = result_or_coro[0] - - if not worker_result: - print( - f"Error: Worker failed to update weights. Result: {worker_result}" - ) - return False - return True - except Exception as e: - print(f"Exception during collective_rpc for weight update: {e}") - import traceback - - traceback.print_exc() - return False - - async def update_weights_from_ipc_handles_async( - self, ipc_handles: dict[str, Any] - ) -> bool: - """Async version of update_weights_from_ipc_handles. - - Args: - ipc_handles (dict): Dictionary mapping device UUIDs (str) to parameter IPC handles. - - Returns: - bool: True if weights were successfully updated, False otherwise. - """ - try: - assert self.llm is not None, ( - "Attempting to update weights with either an uninitialized vLLM or non-model-owner" - ) - - if not self.cfg["vllm_cfg"]["async_engine"]: - raise RuntimeError( - "update_weights_from_ipc_handles_async can only be used with async_engine=True. Use update_weights_from_ipc_handles instead." - ) - - # TODO: switch to update_weights_from_local_ipc_handles for better performance once collectively report_device_id is supported in asyncLLM initialization - result_or_coro = await self.llm.collective_rpc( - "update_weights_from_global_ipc_handles", args=(ipc_handles,) - ) - - if asyncio.iscoroutine(result_or_coro): - worker_results = await result_or_coro - else: - worker_results = result_or_coro - - worker_result = worker_results[0] - - if not worker_result: - print( - f"Error: Worker failed to update weights. Result: {worker_result}" - ) - return False - return True - except Exception as e: - print(f"Exception during collective_rpc for weight update: {e}") - import traceback - - traceback.print_exc() - return False - - def update_weights_from_collective(self) -> bool: - """Update the model weights from collective communication.""" - try: - assert self.llm is not None, ( - "Attempting to update weights with either an uninitialized vLLM or non-model-owner" - ) - - if self.cfg["vllm_cfg"]["async_engine"]: - raise RuntimeError( - "update_weights_from_collective can only be used with async_engine=False. Use update_weights_from_collective_async instead." - ) - - result_or_coro = self.llm.collective_rpc( - "update_weights_from_collective", args=tuple() - ) - worker_result = result_or_coro[0] - - if not worker_result: - print( - f"Error: Worker failed to update weights. Result: {worker_result}" - ) - return False - return True - except Exception as e: - print(f"Exception during collective_rpc for weight update: {e}") - import traceback - - traceback.print_exc() - return False - - async def update_weights_from_collective_async(self) -> bool: - """Async version of update_weights_from_collective.""" - try: - assert self.llm is not None, ( - "Attempting to update weights with either an uninitialized vLLM or non-model-owner" - ) - - if not self.cfg["vllm_cfg"]["async_engine"]: - raise RuntimeError( - "update_weights_from_collective_async can only be used with async_engine=True. Use update_weights_from_collective instead." - ) - - result_or_coro = await self.llm.collective_rpc( - "update_weights_from_collective", args=tuple() - ) - - if asyncio.iscoroutine(result_or_coro): - worker_results = await result_or_coro - else: - worker_results = result_or_coro - - worker_result = worker_results[0] - - if not worker_result: - print( - f"Error: Worker failed to update weights. Result: {worker_result}" - ) - return False - return True - except Exception as e: - print(f"Exception during collective_rpc for weight update: {e}") - import traceback - - traceback.print_exc() - return False - - def reset_prefix_cache(self): - """Reset the prefix cache of vLLM engine.""" - assert self.llm is not None, ( - "Attempting to reset prefix cache with either an uninitialized vLLM or non-model-owner" - ) - - if self.cfg["vllm_cfg"]["async_engine"]: - raise RuntimeError( - "reset_prefix_cache can only be used with async_engine=False. Use reset_prefix_cache_async instead." - ) - - self.llm.llm_engine.reset_prefix_cache() - gc.collect() - torch.cuda.empty_cache() - - async def reset_prefix_cache_async(self): - """Async version of reset_prefix_cache.""" - assert self.llm is not None, ( - "Attempting to reset prefix cache with either an uninitialized vLLM or non-model-owner" - ) - - if not self.cfg["vllm_cfg"]["async_engine"]: - raise RuntimeError( - "reset_prefix_cache_async can only be used with async_engine=True. Use reset_prefix_cache instead." - ) - - await self.llm.reset_prefix_cache() - gc.collect() - torch.cuda.empty_cache() - - def sleep(self): - """Put the vLLM engine to sleep.""" - assert self.llm is not None, ( - "Attempting to sleep with either an uninitialized vLLM or non-model-owner" - ) - - if self.cfg["vllm_cfg"]["async_engine"]: - raise RuntimeError( - "sleep cannot be used with async_engine=True. Use sleep_async instead." - ) - - # Reset the prefix cache to ensure that prefix cache is not reused after weights are updated - self.llm.llm_engine.reset_prefix_cache() - self.llm.sleep(level=1) - - gc.collect() - torch.cuda.empty_cache() - - async def sleep_async(self): - """Async version of sleep.""" - assert self.llm is not None, ( - "Attempting to sleep with either an uninitialized vLLM or non-model-owner" - ) - - if not self.cfg["vllm_cfg"]["async_engine"]: - raise RuntimeError( - "sleep_async can only be used with async_engine=True. Use sleep instead." - ) - - # Reset the prefix cache to ensure that prefix cache is not reused after weights are updated - await self.llm.reset_prefix_cache() - await self.llm.sleep(level=1) - - gc.collect() - torch.cuda.empty_cache() - - def wake_up(self, **kwargs): - """Wake up the vLLM engine.""" - assert self.llm is not None, ( - "Attempting to wake up with either an uninitialized vLLM or non-model-owner" - ) - - if self.cfg["vllm_cfg"]["async_engine"]: - raise RuntimeError( - "wake_up cannot be used with async_engine=True. Use wake_up_async instead." - ) - - tags = kwargs.get("tags") - - wake_up_args = {} - if tags is not None: - wake_up_args["tags"] = tags - - self.llm.wake_up(**wake_up_args) - - async def wake_up_async(self, **kwargs): - """Async version of wake_up.""" - assert self.llm is not None, ( - "Attempting to wake up with either an uninitialized vLLM or non-model-owner" - ) - - if not self.cfg["vllm_cfg"]["async_engine"]: - raise RuntimeError( - "wake_up_async can only be used with async_engine=True. Use wake_up instead." - ) - - tags = kwargs.get("tags") - - wake_up_args = {} - if tags is not None: - wake_up_args["tags"] = tags - - await self.llm.wake_up(**wake_up_args) - - def start_gpu_profiling(self) -> None: - """Start GPU profiling.""" - torch.cuda.profiler.start() - - def stop_gpu_profiling(self) -> None: - """Stop GPU profiling.""" - torch.cuda.profiler.stop() - - -class VllmGeneration(GenerationInterface): - def __init__( - self, - cluster: RayVirtualCluster, - config: VllmConfig, - name_prefix: str = "vllm_policy", - workers_per_node: Optional[Union[int, list[int]]] = None, - ): - """Initialize a vLLM policy with distributed workers.""" - # Store config - self.cfg = config - if self.cfg["vllm_cfg"]["pipeline_parallel_size"] > 1: - assert self.cfg["vllm_cfg"]["async_engine"], ( - "When pipeline_parallel_size > 1, async_engine must be set to True in the vLLM configuration. " - "You can enable it by adding `policy.generation.vllm_cfg.async_engine=true` to your command." - ) - - # Ensure all required VllmConfig fields are present - missing_keys = [ - key for key in VllmConfig.__required_keys__ if key not in self.cfg - ] - assert not missing_keys, ( - f"VLLM Configuration Error: Missing required keys in VllmConfig.\n" - f"Missing keys: {', '.join(missing_keys)}\n" - f"Provided keys: {', '.join(self.cfg.keys())}\n" - f"Please update your configuration to include all required VLLM parameters." - ) - - self.sharding_annotations = NamedSharding( - layout=np.arange(cluster.world_size()).reshape( - -1, # DP - config["vllm_cfg"]["pipeline_parallel_size"], # PP - config["vllm_cfg"]["tensor_parallel_size"], # TP - ), - names=["data_parallel", "pipeline_parallel", "tensor_parallel"], - ) - self.model_parallel_size = self.sharding_annotations.get_axis_size( - "tensor_parallel" - ) * self.sharding_annotations.get_axis_size("pipeline_parallel") - - # Determine if we need cross-node model parallelism - needs_cross_node_parallelism = ( - self.model_parallel_size > cluster.num_gpus_per_node - ) - - # Initialize placement groups with the appropriate mode - cluster._init_placement_groups(use_unified_pg=needs_cross_node_parallelism) - - # Create worker builder for VllmGenerationWorker - worker_builder = RayWorkerBuilder( - "nemo_rl.models.generation.vllm.VllmGenerationWorker", config - ) - - # It's necessary to set env_vars here to ensure that vllm non-leader workers also have these env_vars - # Explicitly set NCCL_CUMEM_ENABLE to 1 to avoid the P2P initialization error for PyNCCLCommunicator. - # See https://github.com/NVIDIA-NeMo/RL/issues/564 for more details. - env_vars = {} - if not self.cfg["colocated"]["enabled"]: - os.environ["NCCL_CUMEM_ENABLE"] = "1" - - # Check if we need parallelism-aware worker group creation - if self.model_parallel_size > 1: - # For parallelism, create node-aware worker groups - node_bundle_indices = self._get_tied_worker_bundle_indices(cluster) - - self.worker_group = RayWorkerGroup( - cluster, - worker_builder, - name_prefix=name_prefix, - bundle_indices_list=node_bundle_indices, - sharding_annotations=self.sharding_annotations, - env_vars=env_vars, - ) - else: - # Use standard worker group creation for non-parallel case - self.worker_group = RayWorkerGroup( - cluster, - worker_builder, - name_prefix=name_prefix, - workers_per_node=workers_per_node, - sharding_annotations=self.sharding_annotations, - env_vars=env_vars, - ) - - # Call some collective rpc functions in VllmGenerationWorker when initializing the vLLM engine - # This is necessary for async engine to work - self._post_init() - - # Number of data parallel groups is the number of tied worker groups - self.dp_size = self.worker_group.dp_size - - # Used to track the round-robin selection of worker groups for generate_async - self.current_generate_dp_shard_idx = 0 - - # Save the device uuids for the workers - self.device_uuids = self._report_device_id() - - def _get_tied_worker_bundle_indices( - self, cluster: RayVirtualCluster - ) -> list[tuple[int, list[int]]]: - """Calculate bundle indices for tensor and pipeline parallel workers. - - Handles both unified placement groups (for cross-node model parallelism) and - per-node placement groups (for node-local model parallelism). - """ - # Get the placement groups from the cluster - placement_groups = cluster.get_placement_groups() - - if not placement_groups: - raise ValueError("No placement groups available in the cluster") - - # Total parallel sizes - tp_size = self.sharding_annotations.get_axis_size("tensor_parallel") - pp_size = self.sharding_annotations.get_axis_size("pipeline_parallel") - model_parallel_size = tp_size * pp_size - - if len(placement_groups) == 1: - # Single unified placement group used when we need multiple nodes for model parallelism - unified_pg = placement_groups[0] - - def get_node_bundles( - pg: PlacementGroup, - ) -> dict[str, list[int]]: - # Retrieve mapping from node ID to bundle indices from a placement group. - try: - pg_table = ray.util.placement_group_table(pg) - bundle_to_node = pg_table["bundles_to_node_id"] - except Exception as e: - raise RuntimeError( - "Failed to retrieve bundle/node mapping from placement group" - ) from e - - node_bundles: dict[str, list[int]] = defaultdict(list) - for bundle_idx, node_id in bundle_to_node.items(): - node_bundles[node_id].append(bundle_idx) - for bundles in node_bundles.values(): - bundles.sort() - return dict(node_bundles) - - def allocate_worker_groups( - pg: PlacementGroup, tp_size: int, pp_size: int - ) -> list[tuple[int, list[int]]]: - # Allocate worker groups for TP and PP training, assuming all nodes have identical bundle counts. - - # Retrieve both bundle mapping and per-node bundles - pg_table = ray.util.placement_group_table(pg) - bundle_to_node = pg_table["bundles_to_node_id"] - node_bundles = get_node_bundles(pg) - - if not node_bundles: - raise ValueError("Placement group contains no bundles") - - # Ensure all nodes have the same number of bundles - counts = [len(b) for b in node_bundles.values()] - assert len(set(counts)) == 1, ( - "All nodes must have identical bundle counts" - ) - - total = sum(counts) - model_parallel_size = tp_size * pp_size - num_groups = total // model_parallel_size - if num_groups == 0: - raise ValueError( - "Unable to allocate any worker groups with the available resources." - ) - - # Create reproducible node indices - sorted_nodes = sorted(node_bundles) - node_idx = {nid: idx for idx, nid in enumerate(sorted_nodes)} - - # Flatten bundles in node order - flat: list[int] = [] - for nid in sorted_nodes: - flat.extend(node_bundles[nid]) - - # Slice into groups and assign logical index - groups: list[tuple[int, list[int]]] = [] - for i in range(num_groups): - slice_ = flat[ - i * model_parallel_size : (i + 1) * model_parallel_size - ] - first_node = bundle_to_node[slice_[0]] - groups.append((node_idx[first_node], slice_)) - - return groups - - tied_groups = allocate_worker_groups(unified_pg, tp_size, pp_size) - else: - tied_groups = [] - # For per-node PGs, each PG represents a node - for pg_idx, pg in enumerate(placement_groups): - if pg.bundle_count == 0: - continue - - # Check if this PG has enough bundles for at least one group - num_groups_in_pg = pg.bundle_count // model_parallel_size - - # Create groups within this PG - for group_idx in range(num_groups_in_pg): - start_idx = group_idx * model_parallel_size - end_idx = start_idx + model_parallel_size - bundle_indices = list(range(start_idx, end_idx)) - # Use pg_idx as the node identifier - tied_groups.append((pg_idx, bundle_indices)) - - if not tied_groups: - raise ValueError( - "Unable to allocate any worker groups with the available resources." - ) - - return tied_groups - - def _report_device_id(self) -> list[list[str]]: - """Report the device ID of vllm workers.""" - # Choose the appropriate method based on async_engine setting - method_name = ( - "report_device_id_async" - if self.cfg["vllm_cfg"]["async_engine"] - else "report_device_id" - ) - # Use run_all_workers_single_data for methods that don't need data - futures = self.worker_group.run_all_workers_single_data( - method_name, run_rank_0_only_axes=["tensor_parallel", "pipeline_parallel"] - ) - # Wait for all futures to complete - results = ray.get(futures) - return results - - def _post_init(self): - # Choose the appropriate method based on async_engine setting - method_name = ( - "post_init_async" if self.cfg["vllm_cfg"]["async_engine"] else "post_init" - ) - # Use run_all_workers_single_data for methods that don't need data - futures = self.worker_group.run_all_workers_single_data( - method_name, run_rank_0_only_axes=["tensor_parallel", "pipeline_parallel"] - ) - # Wait for all futures to complete - results = ray.get(futures) - return results - - def init_collective( - self, ip: str, port: int, world_size: int - ) -> list[ray.ObjectRef]: - """Initialize the collective communication.""" - if not self.worker_group or not self.worker_group.workers: - raise RuntimeError("Worker group is not initialized") - - # Choose the appropriate method based on async_engine setting - method_name = ( - "init_collective_async" - if self.cfg["vllm_cfg"]["async_engine"] - else "init_collective" - ) - - # Prepare rank - total_workers = len(self.worker_group.workers) - if self.dp_size == 0: - raise RuntimeError( - "Data parallel size is zero, cannot initialize collective." - ) - workers_per_group = total_workers // self.dp_size - rank_prefix_list = list(range(0, total_workers, workers_per_group)) - - # Send world_size and rank for init collective to all workers - futures = self.worker_group.run_all_workers_multiple_data( - method_name, - rank_prefix=rank_prefix_list, - run_rank_0_only_axes=["tensor_parallel", "pipeline_parallel"], - common_kwargs={"ip": ip, "port": port, "world_size": world_size}, - ) - - # this function should co-work with lm_policy, so we should wait for all futures to complete outside - return futures - - def generate( - self, data: BatchedDataDict[GenerationDatumSpec], greedy: bool = False - ) -> BatchedDataDict[GenerationOutputSpec]: - """Generate a batch of data using vLLM.""" - assert isinstance(data, BatchedDataDict), ( - f"data must be a BatchedDataDict, got type: {type(data)}" - ) - assert "input_ids" in data and "input_lengths" in data, ( - "input_ids and input_lengths are required in data for vLLM generation" - ) - - # Shard the data across the tied worker groups - dp_size = self.sharding_annotations.get_axis_size("data_parallel") - sharded_data: list[SlicedDataDict] = data.shard_by_batch_size( - dp_size, allow_uneven_shards=True - ) - future_bundle = self.worker_group.run_all_workers_sharded_data( - "generate", - data=sharded_data, - in_sharded_axes=["data_parallel"], - replicate_on_axes=None, # just run on tp rank 0 - output_is_replicated=None, - common_kwargs={"greedy": greedy}, - ) - - # Get results from the workers, respecting tied worker groups (only one result per tied worker group) - results = self.worker_group.get_all_worker_results(future_bundle) - - # Combine results from all tied worker groups - combined: BatchedDataDict[GenerationOutputSpec] = BatchedDataDict.from_batches( - results, pad_value_dict={"output_ids": self.cfg["pad_token_id"]} - ) - - # Verify the output has all required fields - required_keys = [ - "output_ids", - "generation_lengths", - "unpadded_sequence_lengths", - "logprobs", - ] - missing_keys = [key for key in required_keys if key not in combined] - if missing_keys: - raise ValueError( - f"Missing required keys for GenerationOutputSpec: {missing_keys}" - ) - - return combined - - def generate_text( - self, data: BatchedDataDict[GenerationDatumSpec], greedy: bool = False - ) -> BatchedDataDict[GenerationOutputSpec]: - """Generate text responses using vLLM.""" - assert isinstance(data, BatchedDataDict), ( - f"data must be a BatchedDataDict, got type: {type(data)}" - ) - - # Check if async engine is enabled - if self.cfg["vllm_cfg"]["async_engine"]: - raise RuntimeError( - "generate_text cannot be used with async_engine=True. Use generate_text_async instead." - ) - - # Shard the data across the tied worker groups - dp_size = self.sharding_annotations.get_axis_size("data_parallel") - sharded_data: list[SlicedDataDict] = data.shard_by_batch_size( - dp_size, allow_uneven_shards=True - ) - future_bundle = self.worker_group.run_all_workers_sharded_data( - "generate_text", - data=sharded_data, - in_sharded_axes=["data_parallel"], - replicate_on_axes=None, # just run on tp rank 0 - output_is_replicated=None, - common_kwargs={"greedy": greedy}, - ) - - # Get results from the workers, respecting tied worker groups (only one result per tied worker group) - results = self.worker_group.get_all_worker_results(future_bundle) - - # Combine results from all tied worker groups - combined: BatchedDataDict[GenerationOutputSpec] = BatchedDataDict.from_batches( - results, pad_value_dict={"output_ids": self.cfg["pad_token_id"]} - ) - - # Verify the output has all required fields - required_keys = ["texts"] - missing_keys = [key for key in required_keys if key not in combined] - if missing_keys: - raise ValueError( - f"Missing required keys for GenerationOutputSpec: {missing_keys}" - ) - - return combined - - async def _async_generate_base( - self, - data: BatchedDataDict[GenerationDatumSpec], - method_name: str, - data_validation_fn, - greedy: bool = False, - ) -> AsyncGenerator[tuple[int, BatchedDataDict[GenerationOutputSpec]], None]: - """Base async generation method that handles common worker management logic. - - Args: - data: Input data for generation - method_name: Name of the worker method to call ('generate_async' or 'generate_text_async') - data_validation_fn: Function to validate input data - greedy: Whether to use greedy decoding - - Yields: - Tuple of (original_index, BatchedDataDict containing generation result) - """ - if not self.cfg["vllm_cfg"]["async_engine"]: - raise RuntimeError( - f"{method_name} can only be used when async_engine is enabled in vLLM config." - ) - - assert isinstance(data, BatchedDataDict), ( - f"data must be a BatchedDataDict, got type: {type(data)}" - ) - - # Validate input data and handle empty case - if not data_validation_fn(data): - return - - # Determine the leader worker for the current data parallel shard - leader_worker_idx = self.worker_group.get_dp_leader_worker_idx( - self.current_generate_dp_shard_idx - ) - - # Run the async method on the selected leader worker - worker_gen_proxy = self.worker_group.run_single_worker_single_data( - method_name=method_name, - worker_idx=leader_worker_idx, - data=data, - greedy=greedy, - ) - - # Increment the round-robin worker group index - self.current_generate_dp_shard_idx += 1 - self.current_generate_dp_shard_idx %= self.worker_group.dp_size - - # Create a queue to collect sample results from the worker as they complete - result_queue = asyncio.Queue() - finished = False - - async def consume_worker_generator(worker_idx, worker_gen): - """Consume a single worker generator and put sample results in the queue.""" - nonlocal finished - worker_name = f"Worker-{worker_idx}" - try: - async for sample_result_ref in worker_gen: - sample_result = await sample_result_ref - await result_queue.put(("sample", sample_result)) - except Exception as e: - # Log the error before putting it in the queue for better debugging - import traceback - - print(f"Exception in worker {worker_name}") - traceback.print_exc() - await result_queue.put(("error", e)) - finally: - finished = True - await result_queue.put(("worker_done", None)) - - # Start the task to consume the worker generator - worker_task = asyncio.create_task( - consume_worker_generator(leader_worker_idx, worker_gen_proxy) - ) - - # Yield sample results as they become available from the worker - timeout_seconds = float( - os.environ.get("NRL_VLLM_ASYNC_TIMEOUT_SECONDS", "600") - ) # Default 10 minutes - - while not finished: - try: - msg_type, item = await asyncio.wait_for( - result_queue.get(), timeout=timeout_seconds - ) - except asyncio.TimeoutError: - print( - f"Timeout waiting for results after {timeout_seconds}s. Worker has not finished." - ) - print( - f"For longer sequences, increase the timeout by setting: export NRL_VLLM_ASYNC_TIMEOUT_SECONDS={int(timeout_seconds * 2)}" - ) - # Cancel the task - if not worker_task.done(): - worker_task.cancel() - await asyncio.gather(worker_task, return_exceptions=True) - raise RuntimeError( - f"Timeout waiting for worker results after {timeout_seconds}s. " - f"For longer sequences, increase timeout by setting: export NRL_VLLM_ASYNC_TIMEOUT_SECONDS={int(timeout_seconds * 2)}" - ) - - if msg_type == "sample": - # Yield individual sample result immediately - yield item - elif msg_type == "error": - # Cancel the task and propagate error - if not worker_task.done(): - worker_task.cancel() - await asyncio.gather(worker_task, return_exceptions=True) - raise item - elif msg_type == "worker_done": - # Worker finished, just continue the loop - pass - else: - raise RuntimeError(f"Unexpected message type: {msg_type}") - - # Verify the task is actually done - assert worker_task.done(), ( - f"Worker task {leader_worker_idx} should be done but isn't" - ) - - async def generate_text_async( - self, data: BatchedDataDict[GenerationDatumSpec], greedy: bool = False - ) -> AsyncGenerator[tuple[int, BatchedDataDict[GenerationOutputSpec]], None]: - """Generate text responses asynchronously, yielding results as they are ready. - - Args: - data: BatchedDataDict containing prompts with text strings - greedy: Whether to use greedy decoding instead of sampling - - Yields: - Tuple of (original_index, BatchedDataDict containing single text response) - """ - - def validate_text_data(data): - if len(data["prompts"]) == 0: - return False # Return False for empty case to trigger early return - return True - - async for result in self._async_generate_base( - data, "generate_text_async", validate_text_data, greedy - ): - yield result - - async def generate_async( - self, data: BatchedDataDict[GenerationDatumSpec], greedy: bool = False - ) -> AsyncGenerator[tuple[int, BatchedDataDict[GenerationOutputSpec]], None]: - """Generate responses asynchronously, yielding individual samples as they complete. - - This method provides per-sample streaming across all workers, yielding each - sample result as soon as it's ready, regardless of which worker processed it. - """ - - def validate_generate_data(data): - if "input_ids" not in data or "input_lengths" not in data: - raise AssertionError( - "input_ids and input_lengths are required in data for vLLM generation" - ) - if len(data["input_ids"]) == 0: - return False # Return False for empty case to trigger early return - return True - - async for result in self._async_generate_base( - data, "generate_async", validate_generate_data, greedy - ): - yield result - - def prepare_for_generation(self, *args: Any, **kwargs: Any) -> bool: - """Wake workers up for colocated inference.""" - # non-colocated no need to wake up - if not self.cfg["colocated"]["enabled"]: - return True - - try: - # Choose the appropriate method based on async_engine setting - method_name = ( - "wake_up_async" if self.cfg["vllm_cfg"]["async_engine"] else "wake_up" - ) - # Use run_all_workers_single_data for methods that don't need data - futures = self.worker_group.run_all_workers_single_data( - method_name, - run_rank_0_only_axes=["tensor_parallel", "pipeline_parallel"], - **kwargs, - ) - # Wait for all futures to complete - results = ray.get(futures) - return all(result for result in results if result is not None) - except Exception as e: - print(f"Error during policy preparation: {e}") - return False - - def finish_generation(self, *args: Any, **kwargs: Any) -> bool: - """Sleep workers and reset prefix cache.""" - try: - # Choose the appropriate method based on setting - # non-colocated only needs reset prefix cache, no need to sleep. - if self.cfg["colocated"]["enabled"]: - method_name = ( - "sleep_async" if self.cfg["vllm_cfg"]["async_engine"] else "sleep" - ) - else: - method_name = ( - "reset_prefix_cache_async" - if self.cfg["vllm_cfg"]["async_engine"] - else "reset_prefix_cache" - ) - # Use run_all_workers_single_data for methods that don't need data - futures = self.worker_group.run_all_workers_single_data( - method_name, - run_rank_0_only_axes=["tensor_parallel", "pipeline_parallel"], - ) - # Wait for all futures to complete - results = ray.get(futures) - return all(result for result in results if result is not None) - except Exception as e: - print(f"Error during policy preparation: {e}") - return False - - def shutdown(self) -> bool: - """Shut down all vLLM workers and clean up resources.""" - try: - # Use the worker group's shutdown method with the worker's cleanup method - return self.worker_group.shutdown(cleanup_method="shutdown") - except Exception as e: - print(f"Error during policy shutdown: {e}") - return False - - def prepare_refit_info(self, state_dict_info: dict[str, Any]) -> None: - """Prepare the info for refit.""" - # Choose the appropriate method based on async_engine setting - method_name = ( - "prepare_refit_info_async" - if self.cfg["vllm_cfg"]["async_engine"] - else "prepare_refit_info" - ) - - # Use run_all_workers_single_data to send data to all workers - futures = self.worker_group.run_all_workers_single_data( - method_name, - state_dict_info=state_dict_info, - run_rank_0_only_axes=["tensor_parallel", "pipeline_parallel"], - ) - - # Wait for all futures to complete - ray.get(futures) - - def update_weights_from_ipc_handles(self, ipc_handles: dict[str, Any]) -> bool: - """Update weights of the policy using IPC handles, considering tensor parallelism. - - For tp > 1, only the leader in each tensor parallel tied worker group will update weights. - - Args: - ipc_handles (dict): Dictionary mapping device UUIDs (str) to parameter IPC handles. - - Returns: - bool: True if weights were successfully updated, False otherwise. - """ - if not self.worker_group or not self.worker_group.workers: - return False - - # Choose the appropriate method based on async_engine setting - method_name = ( - "update_weights_from_ipc_handles_async" - if self.cfg["vllm_cfg"]["async_engine"] - else "update_weights_from_ipc_handles" - ) - - # Only send the ipc handles required by the current worker - ipc_handles_list = [] - for worker_device_uuids in self.device_uuids: - worker_ipc_handles = { - device_uuid: ipc_handles[device_uuid] - for device_uuid in worker_device_uuids - } - ipc_handles_list.append(worker_ipc_handles) - - try: - # Directly pass ipc_handles to the method - futures = self.worker_group.run_all_workers_multiple_data( - method_name, - ipc_handles=ipc_handles_list, - run_rank_0_only_axes=["tensor_parallel", "pipeline_parallel"], - ) - # Wait for all futures to complete - results = ray.get(futures) - return all(result for result in results if result is not None) - except Exception as e: - print(f"Error during update weights: {e}") - return False - - def update_weights_from_collective(self) -> list[ray.ObjectRef]: - """Update weights of the policy using collective communication.""" - if not self.worker_group or not self.worker_group.workers: - raise RuntimeError("Worker group is not initialized") - - # Choose the appropriate method based on async_engine setting - method_name = ( - "update_weights_from_collective_async" - if self.cfg["vllm_cfg"]["async_engine"] - else "update_weights_from_collective" - ) - - # Use run_all_workers_single_data for methods that don't need data - futures = self.worker_group.run_all_workers_single_data( - method_name, - run_rank_0_only_axes=["tensor_parallel", "pipeline_parallel"], - ) - - # this function should co-work with lm_policy, so we should wait for all futures to complete outside - return futures - - def start_gpu_profiling(self) -> None: - """Start GPU profiling.""" - futures = self.worker_group.run_all_workers_single_data("start_gpu_profiling") - ray.get(futures) - - def stop_gpu_profiling(self) -> None: - """Stop GPU profiling.""" - futures = self.worker_group.run_all_workers_single_data("stop_gpu_profiling") - ray.get(futures) - - def __del__(self) -> None: - """Shuts down the worker groups when the object is deleted or is garbage collected. - - This is an extra safety net in case the user forgets to call shutdown() and the pointer to - the object is lost due to leaving a function scope. It's always recommended that the - user calls shutdown(). - """ - self.shutdown() diff --git a/nemo_rl/models/generation/vllm/__init__.py b/nemo_rl/models/generation/vllm/__init__.py new file mode 100644 index 0000000000..8d3409eb00 --- /dev/null +++ b/nemo_rl/models/generation/vllm/__init__.py @@ -0,0 +1,24 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from nemo_rl.models.generation.vllm.config import VllmConfig +from nemo_rl.models.generation.vllm.vllm_generation import VllmGeneration +from nemo_rl.models.generation.vllm.vllm_worker import VllmGenerationWorker +from nemo_rl.models.generation.vllm.vllm_worker_async import VllmAsyncGenerationWorker + +__all__ = [ + "VllmConfig", + "VllmGeneration", + "VllmGenerationWorker", + "VllmAsyncGenerationWorker", +] diff --git a/nemo_rl/models/generation/vllm/config.py b/nemo_rl/models/generation/vllm/config.py new file mode 100644 index 0000000000..72cb35ca1c --- /dev/null +++ b/nemo_rl/models/generation/vllm/config.py @@ -0,0 +1,35 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, NotRequired, TypedDict + +from nemo_rl.models.generation.interfaces import GenerationConfig + + +class VllmSpecificArgs(TypedDict): + tensor_parallel_size: int + pipeline_parallel_size: int + gpu_memory_utilization: float + max_model_len: int + # Additional arguments for vLLM inserted by nemo rl based on the context of when vllm is used + skip_tokenizer_init: bool + async_engine: bool + load_format: NotRequired[str] + precision: NotRequired[str] + enforce_eager: NotRequired[bool] + + +class VllmConfig(GenerationConfig): + vllm_cfg: VllmSpecificArgs + vllm_kwargs: NotRequired[dict[str, Any]] diff --git a/nemo_rl/models/generation/vllm/utils.py b/nemo_rl/models/generation/vllm/utils.py new file mode 100644 index 0000000000..0243464e56 --- /dev/null +++ b/nemo_rl/models/generation/vllm/utils.py @@ -0,0 +1,81 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Optional + +from nemo_rl.distributed.batched_data_dict import BatchedDataDict +from nemo_rl.models.generation.interfaces import GenerationDatumSpec + + +def format_prompt_for_vllm_generation( + data: BatchedDataDict[GenerationDatumSpec], sample_idx: Optional[int] = None +) -> list[dict[str, Any]]: + """Format a list of prompts for vllm generation (which requires a specific format for its own `generate` method). + + See https://docs.vllm.ai/en/v0.9.1/features/multimodal_inputs.html for prompt format for multimodal inputs. + """ + # Prepare prompts for vLLM (removing padding) + prompts = [] + + input_ids = data["input_ids"] + batch_size = input_ids.shape[0] + input_lengths = data["input_lengths"] + + # if sample_idx is None, return list of all prompts for the entire batch + # else, return the prompt for the single sample specified by sample_idx + return_all = sample_idx is None + if sample_idx is None: + start_idx = 0 + end_idx = batch_size + else: + start_idx = sample_idx + end_idx = sample_idx + 1 + + def _get_regular_prompt(index: int): + valid_length = input_lengths[index].item() + valid_ids = ( + input_ids[index, :valid_length] + if valid_length > 0 + else input_ids[index, :0] + ) + token_ids = valid_ids.tolist() + return {"prompt_token_ids": token_ids} + + # Check if this is VLM generation by looking for message_log with images + # Support for videos/audio/etc. can be added here + # if 'message_log' in data and any('images' in msg for msg in data['message_log']): + if "vllm_content" in data: + # VLM generation using content and multi_modal_data + for i in range(start_idx, end_idx): + msg = data["vllm_content"][i] + # if msg is None, this conversation had no multimodal content, fallback to regular prompt + if msg is None: + prompts.append(_get_regular_prompt(i)) + continue + # init prompt dict + prompt_dict = {"prompt": msg} + # add additional data if present + images = data.get("vllm_images", None) + if images is not None: + prompt_dict["multi_modal_data"] = { + "image": images[i][0] if len(images[i]) == 1 else images[i] + } + prompts.append(prompt_dict) + else: + # Regular LLM generation using token_ids + for i in range(start_idx, end_idx): + # Use input_lengths to get only valid tokens (not padding) + prompts.append(_get_regular_prompt(i)) + + return prompts if return_all else prompts[0] diff --git a/nemo_rl/models/generation/vllm_backend.py b/nemo_rl/models/generation/vllm/vllm_backend.py similarity index 80% rename from nemo_rl/models/generation/vllm_backend.py rename to nemo_rl/models/generation/vllm/vllm_backend.py index 9cac3ba6b5..5c3b125514 100644 --- a/nemo_rl/models/generation/vllm_backend.py +++ b/nemo_rl/models/generation/vllm/vllm_backend.py @@ -17,13 +17,16 @@ import torch from torch.multiprocessing.reductions import rebuild_cuda_tensor +from nemo_rl.utils.nsys import wrap_with_nvtx_name + try: import vllm # noqa: F401 except ImportError: raise ImportError( "vLLM is not installed. Please check that the py_executable in the runtime_env of VllmGenerationWorker " "covers the vllm dependency. You may have to update nemo_rl/distributed/ray_actor_environment_registry.py. " - "If you are working interactively, you can install by running `uv sync --extra vllm` anywhere in the repo." + "This error can also happen if the venv creation was aborted or errored out in the middle. In that case, " + "please run at least once with the environment variable NRL_FORCE_REBUILD_VENVS=true set to force the rebuild of the environment." ) @@ -61,10 +64,13 @@ def prepare_refit_info( MegatronPolicyWorker: colocated inference: state_dict_info is a dict of {tensor_name: (shape, dtype, numel)} - non-colocated inference: not implemented yet + non-colocated inference: state_dict_info is a dict of {tensor_name: (shape, dtype)} """ self.state_dict_info = state_dict_info # pyrefly: ignore[implicitly-defined-attribute] This class does not define __init__ so assignments like this should be ignored + @wrap_with_nvtx_name( + "vllm_internal_worker_extension/update_weights_from_global_ipc_handles" + ) def update_weights_from_global_ipc_handles(self, global_device_ipc_handles): """Update weights from global IPC handles. @@ -78,6 +84,9 @@ def update_weights_from_global_ipc_handles(self, global_device_ipc_handles): local_device_ipc_handles = global_device_ipc_handles[device_uuid] return self.update_weights_from_local_ipc_handles(local_device_ipc_handles) + @wrap_with_nvtx_name( + "vllm_internal_worker_extension/update_weights_from_local_ipc_handles" + ) def update_weights_from_local_ipc_handles(self, local_device_ipc_handles): """Update weights from local IPC handles. @@ -146,7 +155,14 @@ def update_weights_from_local_ipc_handles(self, local_device_ipc_handles): weights.append((name, tensor)) # Load weights into the model - self.model_runner.model.load_weights(weights=weights) + from nemo_rl.models.generation import fp8 + + if fp8.is_fp8_model(self.model_runner.vllm_config): + # the fp8 load_weights additionally casts bf16 weights into fp8 + fp8.load_weights(weights, self.model_runner) + else: + self.model_runner.model.load_weights(weights=weights) + return True except Exception as e: print( @@ -154,6 +170,9 @@ def update_weights_from_local_ipc_handles(self, local_device_ipc_handles): ) return False + @wrap_with_nvtx_name( + "vllm_internal_worker_extension/update_weights_from_collective" + ) def update_weights_from_collective(self) -> bool: """Update the model weights from collective communication.""" assert self.state_dict_info is not None, ( @@ -165,7 +184,14 @@ def update_weights_from_collective(self) -> bool: for name, (shape, dtype) in self.state_dict_info.items(): weight = torch.empty(shape, dtype=dtype, device="cuda") self.model_update_group.broadcast(weight, src=0) - self.model_runner.model.load_weights(weights=[(name, weight)]) + + from nemo_rl.models.generation import fp8 + + if fp8.is_fp8_model(self.model_runner.vllm_config): + # the fp8 load_weights additionally casts bf16 weights into fp8 + fp8.load_weights([(name, weight)], self.model_runner) + else: + self.model_runner.model.load_weights(weights=[(name, weight)]) except Exception as e: print( f"Error in VllmInternalWorkerExtension.update_weights_from_collective: {e}" @@ -173,3 +199,11 @@ def update_weights_from_collective(self) -> bool: return False return True + + def start_gpu_profiling(self) -> None: + """Start GPU profiling.""" + torch.cuda.profiler.start() + + def stop_gpu_profiling(self) -> None: + """Stop GPU profiling.""" + torch.cuda.profiler.stop() diff --git a/nemo_rl/models/generation/vllm/vllm_generation.py b/nemo_rl/models/generation/vllm/vllm_generation.py new file mode 100644 index 0000000000..27b0a2d406 --- /dev/null +++ b/nemo_rl/models/generation/vllm/vllm_generation.py @@ -0,0 +1,784 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import os +from collections import defaultdict +from typing import ( + Any, + AsyncGenerator, + Optional, + Union, +) + +import numpy as np +import ray +from ray.util.placement_group import PlacementGroup + +from nemo_rl.distributed.batched_data_dict import BatchedDataDict, SlicedDataDict +from nemo_rl.distributed.named_sharding import NamedSharding +from nemo_rl.distributed.virtual_cluster import RayVirtualCluster +from nemo_rl.distributed.worker_groups import RayWorkerBuilder, RayWorkerGroup +from nemo_rl.models.generation.interfaces import ( + GenerationDatumSpec, + GenerationInterface, + GenerationOutputSpec, +) +from nemo_rl.models.generation.vllm.config import VllmConfig + +# Global thresholds for top_k and top_p validation. +# While top-k/p are not supported, these values allow for token filtering while the logprobs should be compatible. +# See https://github.com/NVIDIA-NeMo/RL/issues/69 and https://github.com/NVIDIA-NeMo/RL/issues/237 for more details. +TOP_K_THRESHOLD = 8000 # Allow top_k >= 8000 (effectively no filtering) +TOP_P_THRESHOLD = 0.99 # Allow top_p >= 0.99 (close to 1.0) + + +class VllmGeneration(GenerationInterface): + def __init__( + self, + cluster: RayVirtualCluster, + config: VllmConfig, + name_prefix: str = "vllm_policy", + workers_per_node: Optional[Union[int, list[int]]] = None, + ): + """Initialize a vLLM policy with distributed workers.""" + # Store config + self.cfg = config + if self.cfg["vllm_cfg"]["pipeline_parallel_size"] > 1: + assert self.cfg["vllm_cfg"]["async_engine"], ( + "When pipeline_parallel_size > 1, async_engine must be set to True in the vLLM configuration. " + "You can enable it by adding `policy.generation.vllm_cfg.async_engine=true` to your command." + ) + + # Validate sampling parameters early to avoid resource allocation with unsupported configs. + # The vLLM sampler patch only supports temperature scaling and does not handle top_p/top_k correctly. + # However, we allow values above certain thresholds for token filtering purposes. + top_k: int | None = self.cfg.get("top_k") + if top_k is not None and top_k != -1 and top_k < TOP_K_THRESHOLD: + raise ValueError( + ( + f"top_k sampling with values < {TOP_K_THRESHOLD} is not supported because the vLLM V1 engine " + "does not return logprobs after top_k filtering. Values >= {TOP_K_THRESHOLD} are allowed " + "for token filtering purposes. If you understand the implications and still want to use " + f"a lower top_k value, please manually comment out this check. Got top_k={top_k}. " + "See https://github.com/NVIDIA-NeMo/RL/issues/69 for more details." + ) + ) + + top_p: float = self.cfg.get("top_p", 1.0) + if top_p < TOP_P_THRESHOLD: + raise ValueError( + ( + f"top_p sampling with values < {TOP_P_THRESHOLD} is not supported because the vLLM V1 engine " + "does not return logprobs after top_p filtering. Values >= {TOP_P_THRESHOLD} are allowed " + "for token filtering purposes. If you understand the implications and still want to use " + f"a lower top_p value, please manually comment out this check. Got top_p={top_p}. " + "See https://github.com/NVIDIA-NeMo/RL/issues/69 for more details." + ) + ) + + # Ensure all required VllmConfig fields are present + missing_keys = [ + key for key in VllmConfig.__required_keys__ if key not in self.cfg + ] + assert not missing_keys, ( + f"VLLM Configuration Error: Missing required keys in VllmConfig.\n" + f"Missing keys: {', '.join(missing_keys)}\n" + f"Provided keys: {', '.join(self.cfg.keys())}\n" + f"Please update your configuration to include all required VLLM parameters." + ) + + self.sharding_annotations = NamedSharding( + layout=np.arange(cluster.world_size()).reshape( + -1, # DP + config["vllm_cfg"]["pipeline_parallel_size"], # PP + config["vllm_cfg"]["tensor_parallel_size"], # TP + ), + names=["data_parallel", "pipeline_parallel", "tensor_parallel"], + ) + self.model_parallel_size = self.sharding_annotations.get_axis_size( + "tensor_parallel" + ) * self.sharding_annotations.get_axis_size("pipeline_parallel") + + # non-colocated needs to use PACK strategy to avoid uneven node_bundles + # e.g. assuming we use 3 nodes with 8GPUs, 2 nodes for train and 1 node for inference. + # if we use SPREAD, then the node bundles will be something like 0: [0,3,6] 1: [1,4,7] 2: [2,5], which is not correct. + strategy = None if self.cfg["colocated"]["enabled"] else "PACK" + + # Determine if we need cross-node model parallelism + needs_cross_node_parallelism = ( + self.model_parallel_size > cluster.num_gpus_per_node + ) + + # Initialize placement groups with the appropriate mode + cluster._init_placement_groups( + strategy=strategy, + use_unified_pg=needs_cross_node_parallelism, + ) + + # Create worker builder for VllmGenerationWorker + if self.cfg["vllm_cfg"]["async_engine"]: + worker_cls = "nemo_rl.models.generation.vllm.vllm_worker_async.VllmAsyncGenerationWorker" + else: + worker_cls = ( + "nemo_rl.models.generation.vllm.vllm_worker.VllmGenerationWorker" + ) + worker_builder = RayWorkerBuilder(worker_cls, config) + + # It's necessary to set env_vars here to ensure that vllm non-leader workers also have these env_vars + # Explicitly set NCCL_CUMEM_ENABLE to 1 to avoid the P2P initialization error for PyNCCLCommunicator. + # See https://github.com/NVIDIA-NeMo/RL/issues/564 for more details. + env_vars = {} + if not self.cfg["colocated"]["enabled"]: + env_vars["NCCL_CUMEM_ENABLE"] = "1" + + # Check if we need parallelism-aware worker group creation + if self.model_parallel_size > 1: + # For parallelism, create node-aware worker groups + node_bundle_indices = self._get_tied_worker_bundle_indices(cluster) + + self.worker_group = RayWorkerGroup( + cluster, + worker_builder, + name_prefix=name_prefix, + bundle_indices_list=node_bundle_indices, + sharding_annotations=self.sharding_annotations, + env_vars=env_vars, + ) + else: + # Use standard worker group creation for non-parallel case + self.worker_group = RayWorkerGroup( + cluster, + worker_builder, + name_prefix=name_prefix, + workers_per_node=workers_per_node, + sharding_annotations=self.sharding_annotations, + env_vars=env_vars, + ) + + # Call some collective rpc functions in VllmGenerationWorker when initializing the vLLM engine + # This is necessary for async engine to work + self._post_init() + + # Number of data parallel groups is the number of tied worker groups + self.dp_size = self.worker_group.dp_size + + # Used to track the round-robin selection of worker groups for generate_async + self.current_generate_dp_shard_idx = 0 + + # Save the device uuids for the workers + self.device_uuids = self._report_device_id() + + def _get_tied_worker_bundle_indices( + self, cluster: RayVirtualCluster + ) -> list[tuple[int, list[int]]]: + """Calculate bundle indices for tensor and pipeline parallel workers. + + Handles both unified placement groups (for cross-node model parallelism) and + per-node placement groups (for node-local model parallelism). + """ + # Get the placement groups from the cluster + placement_groups = cluster.get_placement_groups() + + if not placement_groups: + raise ValueError("No placement groups available in the cluster") + + # Total parallel sizes + tp_size = self.sharding_annotations.get_axis_size("tensor_parallel") + pp_size = self.sharding_annotations.get_axis_size("pipeline_parallel") + model_parallel_size = tp_size * pp_size + + if len(placement_groups) == 1: + # Single unified placement group used when we need multiple nodes for model parallelism + unified_pg = placement_groups[0] + + def get_node_bundles( + pg: PlacementGroup, + ) -> dict[str, list[int]]: + # Retrieve mapping from node ID to bundle indices from a placement group. + try: + pg_table = ray.util.placement_group_table(pg) + bundle_to_node = pg_table["bundles_to_node_id"] + except Exception as e: + raise RuntimeError( + "Failed to retrieve bundle/node mapping from placement group" + ) from e + + node_bundles: dict[str, list[int]] = defaultdict(list) + for bundle_idx, node_id in bundle_to_node.items(): + node_bundles[node_id].append(bundle_idx) + for bundles in node_bundles.values(): + bundles.sort() + return dict(node_bundles) + + def allocate_worker_groups( + pg: PlacementGroup, tp_size: int, pp_size: int + ) -> list[tuple[int, list[int]]]: + # Allocate worker groups for TP and PP training, assuming all nodes have identical bundle counts. + + # Retrieve both bundle mapping and per-node bundles + pg_table = ray.util.placement_group_table(pg) + bundle_to_node = pg_table["bundles_to_node_id"] + node_bundles = get_node_bundles(pg) + + if not node_bundles: + raise ValueError("Placement group contains no bundles") + + # Ensure all nodes have the same number of bundles + counts = [len(b) for b in node_bundles.values()] + assert len(set(counts)) == 1, ( + "All nodes must have identical bundle counts" + ) + + total = sum(counts) + model_parallel_size = tp_size * pp_size + num_groups = total // model_parallel_size + if num_groups == 0: + raise ValueError( + "Unable to allocate any worker groups with the available resources." + ) + + # Create reproducible node indices + sorted_nodes = sorted(node_bundles) + node_idx = {nid: idx for idx, nid in enumerate(sorted_nodes)} + + # Flatten bundles in node order + flat: list[int] = [] + for nid in sorted_nodes: + flat.extend(node_bundles[nid]) + + # Slice into groups and assign logical index + groups: list[tuple[int, list[int]]] = [] + for i in range(num_groups): + slice_ = flat[ + i * model_parallel_size : (i + 1) * model_parallel_size + ] + first_node = bundle_to_node[slice_[0]] + groups.append((node_idx[first_node], slice_)) + + return groups + + tied_groups = allocate_worker_groups(unified_pg, tp_size, pp_size) + else: + tied_groups = [] + # For per-node PGs, each PG represents a node + for pg_idx, pg in enumerate(placement_groups): + if pg.bundle_count == 0: + continue + + # Check if this PG has enough bundles for at least one group + num_groups_in_pg = pg.bundle_count // model_parallel_size + + # Create groups within this PG + for group_idx in range(num_groups_in_pg): + start_idx = group_idx * model_parallel_size + end_idx = start_idx + model_parallel_size + bundle_indices = list(range(start_idx, end_idx)) + # Use pg_idx as the node identifier + tied_groups.append((pg_idx, bundle_indices)) + + if not tied_groups: + raise ValueError( + "Unable to allocate any worker groups with the available resources." + ) + + return tied_groups + + def _report_device_id(self) -> list[list[str]]: + """Report the device ID of vllm workers.""" + # Choose the appropriate method based on async_engine setting + method_name = ( + "report_device_id_async" + if self.cfg["vllm_cfg"]["async_engine"] + else "report_device_id" + ) + # Use run_all_workers_single_data for methods that don't need data + futures = self.worker_group.run_all_workers_single_data( + method_name, run_rank_0_only_axes=["tensor_parallel", "pipeline_parallel"] + ) + # Wait for all futures to complete + results = ray.get(futures) + return results + + def _post_init(self): + # Choose the appropriate method based on async_engine setting + method_name = ( + "post_init_async" if self.cfg["vllm_cfg"]["async_engine"] else "post_init" + ) + # Use run_all_workers_single_data for methods that don't need data + futures = self.worker_group.run_all_workers_single_data( + method_name, run_rank_0_only_axes=["tensor_parallel", "pipeline_parallel"] + ) + # Wait for all futures to complete + results = ray.get(futures) + return results + + def init_collective( + self, ip: str, port: int, world_size: int + ) -> list[ray.ObjectRef]: + """Initialize the collective communication.""" + if not self.worker_group or not self.worker_group.workers: + raise RuntimeError("Worker group is not initialized") + + # Choose the appropriate method based on async_engine setting + method_name = ( + "init_collective_async" + if self.cfg["vllm_cfg"]["async_engine"] + else "init_collective" + ) + + # Prepare rank + total_workers = len(self.worker_group.workers) + if self.dp_size == 0: + raise RuntimeError( + "Data parallel size is zero, cannot initialize collective." + ) + workers_per_group = total_workers // self.dp_size + rank_prefix_list = list(range(0, total_workers, workers_per_group)) + + # Send world_size and rank for init collective to all workers + futures = self.worker_group.run_all_workers_multiple_data( + method_name, + rank_prefix=rank_prefix_list, + run_rank_0_only_axes=["tensor_parallel", "pipeline_parallel"], + common_kwargs={"ip": ip, "port": port, "world_size": world_size}, + ) + + # this function should co-work with lm_policy, so we should wait for all futures to complete outside + return futures + + def generate( + self, data: BatchedDataDict[GenerationDatumSpec], greedy: bool = False + ) -> BatchedDataDict[GenerationOutputSpec]: + """Generate a batch of data using vLLM.""" + assert isinstance(data, BatchedDataDict), ( + f"data must be a BatchedDataDict, got type: {type(data)}" + ) + assert "input_ids" in data and "input_lengths" in data, ( + "input_ids and input_lengths are required in data for vLLM generation" + ) + + # Shard the data across the tied worker groups + dp_size = self.sharding_annotations.get_axis_size("data_parallel") + sharded_data: list[SlicedDataDict] = data.shard_by_batch_size( + dp_size, allow_uneven_shards=True + ) + future_bundle = self.worker_group.run_all_workers_sharded_data( + "generate", + data=sharded_data, + in_sharded_axes=["data_parallel"], + replicate_on_axes=None, # just run on tp rank 0 + output_is_replicated=None, + common_kwargs={"greedy": greedy}, + ) + + # Get results from the workers, respecting tied worker groups (only one result per tied worker group) + results = self.worker_group.get_all_worker_results(future_bundle) + + # Combine results from all tied worker groups + combined: BatchedDataDict[GenerationOutputSpec] = BatchedDataDict.from_batches( + results, pad_value_dict={"output_ids": self.cfg["pad_token_id"]} + ) + + # Verify the output has all required fields + required_keys = [ + "output_ids", + "generation_lengths", + "unpadded_sequence_lengths", + "logprobs", + ] + missing_keys = [key for key in required_keys if key not in combined] + if missing_keys: + raise ValueError( + f"Missing required keys for GenerationOutputSpec: {missing_keys}" + ) + + return combined + + def generate_text( + self, data: BatchedDataDict[GenerationDatumSpec], greedy: bool = False + ) -> BatchedDataDict[GenerationOutputSpec]: + """Generate text responses using vLLM.""" + assert isinstance(data, BatchedDataDict), ( + f"data must be a BatchedDataDict, got type: {type(data)}" + ) + + # Check if async engine is enabled + if self.cfg["vllm_cfg"]["async_engine"]: + raise RuntimeError( + "generate_text cannot be used with async_engine=True. Use generate_text_async instead." + ) + + # Shard the data across the tied worker groups + dp_size = self.sharding_annotations.get_axis_size("data_parallel") + sharded_data: list[SlicedDataDict] = data.shard_by_batch_size( + dp_size, allow_uneven_shards=True + ) + future_bundle = self.worker_group.run_all_workers_sharded_data( + "generate_text", + data=sharded_data, + in_sharded_axes=["data_parallel"], + replicate_on_axes=None, # just run on tp rank 0 + output_is_replicated=None, + common_kwargs={"greedy": greedy}, + ) + + # Get results from the workers, respecting tied worker groups (only one result per tied worker group) + results = self.worker_group.get_all_worker_results(future_bundle) + + # Combine results from all tied worker groups + combined: BatchedDataDict[GenerationOutputSpec] = BatchedDataDict.from_batches( + results, pad_value_dict={"output_ids": self.cfg["pad_token_id"]} + ) + + # Verify the output has all required fields + required_keys = ["texts"] + missing_keys = [key for key in required_keys if key not in combined] + if missing_keys: + raise ValueError( + f"Missing required keys for GenerationOutputSpec: {missing_keys}" + ) + + return combined + + async def _async_generate_base( + self, + data: BatchedDataDict[GenerationDatumSpec], + method_name: str, + data_validation_fn, + greedy: bool = False, + ) -> AsyncGenerator[tuple[int, BatchedDataDict[GenerationOutputSpec]], None]: + """Base async generation method that handles common worker management logic. + + Args: + data: Input data for generation + method_name: Name of the worker method to call ('generate_async' or 'generate_text_async') + data_validation_fn: Function to validate input data + greedy: Whether to use greedy decoding + + Yields: + Tuple of (original_index, BatchedDataDict containing generation result) + """ + if not self.cfg["vllm_cfg"]["async_engine"]: + raise RuntimeError( + f"{method_name} can only be used when async_engine is enabled in vLLM config." + ) + + assert isinstance(data, BatchedDataDict), ( + f"data must be a BatchedDataDict, got type: {type(data)}" + ) + + # Validate input data and handle empty case + if not data_validation_fn(data): + return + + # Determine the leader worker for the current data parallel shard + leader_worker_idx = self.worker_group.get_dp_leader_worker_idx( + self.current_generate_dp_shard_idx + ) + + # Run the async method on the selected leader worker + worker_gen_proxy = self.worker_group.run_single_worker_single_data( + method_name=method_name, + worker_idx=leader_worker_idx, + data=data, + greedy=greedy, + ) + + # Increment the round-robin worker group index + self.current_generate_dp_shard_idx += 1 + self.current_generate_dp_shard_idx %= self.worker_group.dp_size + + # Create a queue to collect sample results from the worker as they complete + result_queue = asyncio.Queue() + finished = False + + async def consume_worker_generator(worker_idx, worker_gen): + """Consume a single worker generator and put sample results in the queue.""" + nonlocal finished + worker_name = f"Worker-{worker_idx}" + try: + async for sample_result_ref in worker_gen: + sample_result = await sample_result_ref + await result_queue.put(("sample", sample_result)) + except Exception as e: + # Log the error before putting it in the queue for better debugging + import traceback + + print(f"Exception in worker {worker_name}") + traceback.print_exc() + await result_queue.put(("error", e)) + finally: + finished = True + await result_queue.put(("worker_done", None)) + + # Start the task to consume the worker generator + worker_task = asyncio.create_task( + consume_worker_generator(leader_worker_idx, worker_gen_proxy) + ) + + # Yield sample results as they become available from the worker + timeout_seconds = float( + os.environ.get("NRL_VLLM_ASYNC_TIMEOUT_SECONDS", "600") + ) # Default 10 minutes + + while not finished: + try: + msg_type, item = await asyncio.wait_for( + result_queue.get(), timeout=timeout_seconds + ) + except asyncio.TimeoutError: + print( + f"Timeout waiting for results after {timeout_seconds}s. Worker has not finished." + ) + print( + f"For longer sequences, increase the timeout by setting: export NRL_VLLM_ASYNC_TIMEOUT_SECONDS={int(timeout_seconds * 2)}" + ) + # Cancel the task + if not worker_task.done(): + worker_task.cancel() + await asyncio.gather(worker_task, return_exceptions=True) + raise RuntimeError( + f"Timeout waiting for worker results after {timeout_seconds}s. " + f"For longer sequences, increase timeout by setting: export NRL_VLLM_ASYNC_TIMEOUT_SECONDS={int(timeout_seconds * 2)}" + ) + + if msg_type == "sample": + # Yield individual sample result immediately + yield item + elif msg_type == "error": + # Cancel the task and propagate error + if not worker_task.done(): + worker_task.cancel() + await asyncio.gather(worker_task, return_exceptions=True) + raise item + elif msg_type == "worker_done": + # Worker finished, just continue the loop + pass + else: + raise RuntimeError(f"Unexpected message type: {msg_type}") + + # Verify the task is actually done + assert worker_task.done(), ( + f"Worker task {leader_worker_idx} should be done but isn't" + ) + + async def generate_text_async( + self, data: BatchedDataDict[GenerationDatumSpec], greedy: bool = False + ) -> AsyncGenerator[tuple[int, BatchedDataDict[GenerationOutputSpec]], None]: + """Generate text responses asynchronously, yielding results as they are ready. + + Args: + data: BatchedDataDict containing prompts with text strings + greedy: Whether to use greedy decoding instead of sampling + + Yields: + Tuple of (original_index, BatchedDataDict containing single text response) + """ + + def validate_text_data(data): + if len(data["prompts"]) == 0: + return False # Return False for empty case to trigger early return + return True + + async for result in self._async_generate_base( + data, "generate_text_async", validate_text_data, greedy + ): + yield result + + async def generate_async( + self, data: BatchedDataDict[GenerationDatumSpec], greedy: bool = False + ) -> AsyncGenerator[tuple[int, BatchedDataDict[GenerationOutputSpec]], None]: + """Generate responses asynchronously, yielding individual samples as they complete. + + This method provides per-sample streaming across all workers, yielding each + sample result as soon as it's ready, regardless of which worker processed it. + """ + + def validate_generate_data(data): + if "input_ids" not in data or "input_lengths" not in data: + raise AssertionError( + "input_ids and input_lengths are required in data for vLLM generation" + ) + if len(data["input_ids"]) == 0: + return False # Return False for empty case to trigger early return + return True + + async for result in self._async_generate_base( + data, "generate_async", validate_generate_data, greedy + ): + yield result + + def prepare_for_generation(self, *args: Any, **kwargs: Any) -> bool: + """Wake workers up for colocated inference.""" + # non-colocated no need to wake up + if not self.cfg["colocated"]["enabled"]: + return True + + try: + # Choose the appropriate method based on async_engine setting + method_name = ( + "wake_up_async" if self.cfg["vllm_cfg"]["async_engine"] else "wake_up" + ) + # Use run_all_workers_single_data for methods that don't need data + futures = self.worker_group.run_all_workers_single_data( + method_name, + run_rank_0_only_axes=["tensor_parallel", "pipeline_parallel"], + **kwargs, + ) + # Wait for all futures to complete + results = ray.get(futures) + return all(result for result in results if result is not None) + except Exception as e: + print(f"Error during policy preparation: {e}") + return False + + def finish_generation(self, *args: Any, **kwargs: Any) -> bool: + """Sleep workers and reset prefix cache.""" + try: + # Choose the appropriate method based on setting + # non-colocated only needs reset prefix cache, no need to sleep. + if self.cfg["colocated"]["enabled"]: + method_name = ( + "sleep_async" if self.cfg["vllm_cfg"]["async_engine"] else "sleep" + ) + else: + method_name = ( + "reset_prefix_cache_async" + if self.cfg["vllm_cfg"]["async_engine"] + else "reset_prefix_cache" + ) + # Use run_all_workers_single_data for methods that don't need data + futures = self.worker_group.run_all_workers_single_data( + method_name, + run_rank_0_only_axes=["tensor_parallel", "pipeline_parallel"], + ) + # Wait for all futures to complete + results = ray.get(futures) + return all(result for result in results if result is not None) + except Exception as e: + print(f"Error during policy preparation: {e}") + return False + + def shutdown(self) -> bool: + """Shut down all vLLM workers and clean up resources.""" + try: + # Use the worker group's shutdown method with the worker's cleanup method + return self.worker_group.shutdown(cleanup_method="shutdown") + except Exception as e: + print(f"Error during policy shutdown: {e}") + return False + + def prepare_refit_info(self, state_dict_info: dict[str, Any]) -> None: + """Prepare the info for refit.""" + # Choose the appropriate method based on async_engine setting + method_name = ( + "prepare_refit_info_async" + if self.cfg["vllm_cfg"]["async_engine"] + else "prepare_refit_info" + ) + + # Use run_all_workers_single_data to send data to all workers + futures = self.worker_group.run_all_workers_single_data( + method_name, + state_dict_info=state_dict_info, + run_rank_0_only_axes=["tensor_parallel", "pipeline_parallel"], + ) + + # Wait for all futures to complete + ray.get(futures) + + def update_weights_from_ipc_handles(self, ipc_handles: dict[str, Any]) -> bool: + """Update weights of the policy using IPC handles, considering tensor parallelism. + + For tp > 1, only the leader in each tensor parallel tied worker group will update weights. + + Args: + ipc_handles (dict): Dictionary mapping device UUIDs (str) to parameter IPC handles. + + Returns: + bool: True if weights were successfully updated, False otherwise. + """ + if not self.worker_group or not self.worker_group.workers: + return False + + # Choose the appropriate method based on async_engine setting + method_name = ( + "update_weights_from_ipc_handles_async" + if self.cfg["vllm_cfg"]["async_engine"] + else "update_weights_from_ipc_handles" + ) + + # Only send the ipc handles required by the current worker + ipc_handles_list = [] + for worker_device_uuids in self.device_uuids: + worker_ipc_handles = { + device_uuid: ipc_handles[device_uuid] + for device_uuid in worker_device_uuids + } + ipc_handles_list.append(worker_ipc_handles) + + try: + # Directly pass ipc_handles to the method + futures = self.worker_group.run_all_workers_multiple_data( + method_name, + ipc_handles=ipc_handles_list, + run_rank_0_only_axes=["tensor_parallel", "pipeline_parallel"], + ) + # Wait for all futures to complete + results = ray.get(futures) + return all(result for result in results if result is not None) + except Exception as e: + print(f"Error during update weights: {e}") + return False + + def update_weights_from_collective(self) -> list[ray.ObjectRef]: + """Update weights of the policy using collective communication.""" + if not self.worker_group or not self.worker_group.workers: + raise RuntimeError("Worker group is not initialized") + + # Choose the appropriate method based on async_engine setting + method_name = ( + "update_weights_from_collective_async" + if self.cfg["vllm_cfg"]["async_engine"] + else "update_weights_from_collective" + ) + + # Use run_all_workers_single_data for methods that don't need data + futures = self.worker_group.run_all_workers_single_data( + method_name, + run_rank_0_only_axes=["tensor_parallel", "pipeline_parallel"], + ) + + # this function should co-work with lm_policy, so we should wait for all futures to complete outside + return futures + + def start_gpu_profiling(self) -> None: + """Start GPU profiling.""" + futures = self.worker_group.run_all_workers_single_data("start_gpu_profiling") + ray.get(futures) + + def stop_gpu_profiling(self) -> None: + """Stop GPU profiling.""" + futures = self.worker_group.run_all_workers_single_data("stop_gpu_profiling") + ray.get(futures) + + def __del__(self) -> None: + """Shuts down the worker groups when the object is deleted or is garbage collected. + + This is an extra safety net in case the user forgets to call shutdown() and the pointer to + the object is lost due to leaving a function scope. It's always recommended that the + user calls shutdown(). + """ + self.shutdown() diff --git a/nemo_rl/models/generation/vllm/vllm_worker.py b/nemo_rl/models/generation/vllm/vllm_worker.py new file mode 100644 index 0000000000..d023609160 --- /dev/null +++ b/nemo_rl/models/generation/vllm/vllm_worker.py @@ -0,0 +1,849 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import gc +import os +import sys +from typing import Any, Optional, cast + +import ray +import torch + +from nemo_rl.distributed.batched_data_dict import BatchedDataDict +from nemo_rl.distributed.worker_group_utils import get_nsight_config_if_pattern_matches +from nemo_rl.models.generation.interfaces import ( + GenerationDatumSpec, + GenerationOutputSpec, + verify_right_padding, +) +from nemo_rl.models.generation.vllm.config import VllmConfig +from nemo_rl.models.generation.vllm.utils import format_prompt_for_vllm_generation +from nemo_rl.models.huggingface.common import ModelFlag +from nemo_rl.models.policy.utils import is_vllm_v1_engine_enabled +from nemo_rl.utils.nsys import wrap_with_nvtx_name + + +# Use a base class to share some functions to avoid code duplication. +class BaseVllmGenerationWorker: + def __repr__(self) -> str: + """Customizes the actor's prefix in the Ray logs. + + This makes it easier to identify which worker is producing specific log messages. + """ + return f"{self.__class__.__name__}" + + @staticmethod + def configure_worker( + num_gpus: int | float, bundle_indices: Optional[tuple[int, list[int]]] = None + ) -> tuple[dict[str, Any], dict[str, str], dict[str, Any]]: + """Provides complete worker configuration for vLLM tensor and pipeline parallelism. + + This method configures the worker based on its role in tensor and pipeline parallelism, + which is determined directly from the bundle_indices parameter. + + Args: + num_gpus: Original GPU allocation for this worker based on the placement group + bundle_indices: Tuple of (node_idx, local_bundle_indices) for parallelism (if applicable) + + Returns: + tuple with complete worker configuration: + - 'resources': Resource allocation (e.g., num_gpus) + - 'env_vars': Environment variables for this worker + - 'init_kwargs': Parameters to pass to __init__ of the worker + """ + # Initialize configuration + resources: dict[str, Any] = {"num_gpus": num_gpus} + init_kwargs: dict[str, Any] = {} + env_vars: dict[str, str] = {} + + local_bundle_indices = None + if bundle_indices is not None: + node_idx = bundle_indices[0] + local_bundle_indices = bundle_indices[1] + init_kwargs["bundle_indices"] = local_bundle_indices + + """ + compute a unique seed from the node_idx and bundle_indices: + node_idx = 0, bundle_indices = [0, 1, 2, 3] -> seed = 0*1024 + 0 + node_idx = 0, bundle_indices = [4, 5, 6, 7] -> seed = 0*1024 + 1 + node_idx = 1, bundle_indices = [0, 1, 2, 3] -> seed = 1*1024 + 0 + node_idx = 1, bundle_indices = [4, 5, 6, 7] -> seed = 1*1024 + 1 + """ + # For single worker groups, use a simpler seed calculation + if len(local_bundle_indices) == 1: + seed = node_idx * 1024 + local_bundle_indices[0] + else: + # For parallel groups, use the original calculation + bundle_id = local_bundle_indices[0] // len(local_bundle_indices) + seed = node_idx * 1024 + bundle_id + + init_kwargs["seed"] = seed + # Need to give each DP group its own vllm cache to address: + # https://github.com/vllm-project/vllm/issues/18851 + env_vars["VLLM_CACHE_ROOT"] = os.path.expanduser(f"~/.cache/vllm_{seed}") + + # Check if this worker is part of a parallel group (TP or TP+PP). + # A worker is part of a parallel group if it's a secondary member (local_bundle_indices is None) + # or if it's a primary member of a group with multiple workers. + is_part_of_parallel_workers = ( + local_bundle_indices is not None and len(local_bundle_indices) > 1 + ) or local_bundle_indices is None + + if is_part_of_parallel_workers: + # Ray + vllm likes to manage GPU assignment internally for parallel groups + resources["num_gpus"] = 0 + env_vars["RAY_EXPERIMENTAL_NOSET_CUDA_VISIBLE_DEVICES"] = "1" + init_kwargs["fraction_of_gpus"] = num_gpus + + env_vars["VLLM_ENABLE_V1_MULTIPROCESSING"] = "0" + # Skip vllm P2P check and rely on driver to report peer to peer capability. + env_vars["VLLM_SKIP_P2P_CHECK"] = "1" + + return resources, env_vars, init_kwargs + + def __init__( + self, + config: VllmConfig, + bundle_indices: Optional[list[int]] = None, + fraction_of_gpus: float = 1.0, + seed: Optional[int] = None, + ): + """Initialize a vLLM worker for distributed inference. + + Args: + config: Configuration dictionary for the policy + bundle_indices: List of local bundle indices within a node for parallelism. + Only needed for the first worker in each tied worker group. + fraction_of_gpus: Fraction of GPUs to use for this worker + seed: Random seed for initialization + """ + self.cfg = config + + self.model_name = self.cfg["model_name"] + self.tensor_parallel_size = self.cfg["vllm_cfg"]["tensor_parallel_size"] + self.pipeline_parallel_size = self.cfg["vllm_cfg"]["pipeline_parallel_size"] + self.gpu_memory_utilization = self.cfg["vllm_cfg"]["gpu_memory_utilization"] + self.precision = self.cfg["vllm_cfg"]["precision"] + self.fraction_of_gpus = fraction_of_gpus + self.is_model_owner = bundle_indices is not None + + # Store the Python executable being used by this worker + self.py_executable = sys.executable + + # Skip model loading if we're not the model owner + if not self.is_model_owner: + self.llm = None + self.tokenizer = None + self.rank = 0 + self.world_size = 1 + return + + # In Ray+vLLM setup, each worker process considers itself rank 0 + # vLLM handles the parallelism internally through Ray + self.rank = 0 + self.world_size = 1 + + # Monkey patch for vLLM to ensure RAY_ADDRESS is set in Ray actors. + try: + import vllm.utils + from vllm.logger import init_logger + from vllm.utils import cuda_is_initialized, is_in_ray_actor + + logger = init_logger("vllm_patch") + + def _patched_maybe_force_spawn(): + """Patched version of vllm.utils._maybe_force_spawn. + + This patch changes an `elif is_in_ray_actor()` to an `if` statement. + This ensures that `os.environ["RAY_ADDRESS"]` is set when running + within a Ray actor, even if CUDA has already been initialized. + This is crucial for vLLM workers to connect back to the Ray cluster. + """ + if os.environ.get("VLLM_WORKER_MULTIPROC_METHOD") == "spawn": + return + + reason = None + if cuda_is_initialized(): + reason = "CUDA is initialized" + + if is_in_ray_actor(): + # even if we choose to spawn, we need to pass the ray address + # to the subprocess so that it knows how to connect to the ray cluster. + # env vars are inherited by subprocesses, even if we use spawn. + import ray + + os.environ["RAY_ADDRESS"] = ray.get_runtime_context().gcs_address + if reason is None: + reason = "In a Ray actor and can only be spawned" + + if reason is not None: + logger.warning( + "We must use the `spawn` multiprocessing start method. " + "Overriding VLLM_WORKER_MULTIPROC_METHOD to 'spawn'. " + "See https://docs.vllm.ai/en/latest/getting_started/" + "troubleshooting.html#python-multiprocessing " + "for more information. Reason: %s", + reason, + ) + os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn" + + vllm.utils._maybe_force_spawn = _patched_maybe_force_spawn + logger.info("Successfully patched vllm.utils._maybe_force_spawn.") + + def _patch_vllm_init_workers_ray(): + """Patch the vLLM ray_distributed_executor.py file. + + 1. Pass custom runtime_env in _init_workers_ray call. + - This allows passing custom py_executable to worker initialization. + 2. Add NCCL_CUMEM_ENABLE and NCCL_NVLS_ENABLE to vLLM ADDITIONAL_ENV_VARS. + - This is a workaround to fix async vllm in some scenarios. + - See https://github.com/NVIDIA-NeMo/RL/pull/898 for more details. + """ + try: + import vllm.executor.ray_distributed_executor as ray_executor_module + + file_to_patch = ray_executor_module.__file__ + + with open(file_to_patch, "r") as f: + content = f.read() + + old_lines = [ + "self._init_workers_ray(placement_group)", + 'ADDITIONAL_ENV_VARS = {"HF_TOKEN", "HUGGING_FACE_HUB_TOKEN"}', + ] + + new_lines = [ + f'self._init_workers_ray(placement_group, runtime_env={{"py_executable": "{self.py_executable}"}})', + 'ADDITIONAL_ENV_VARS = {"HF_TOKEN", "HUGGING_FACE_HUB_TOKEN", "NCCL_CUMEM_ENABLE", "NCCL_NVLS_ENABLE"}', + ] + + need_replace = False + for old_line, new_line in zip(old_lines, new_lines): + if new_line in content or old_line not in content: + continue + content = content.replace(old_line, new_line) + need_replace = True + + if not need_replace: + return + + # Write back the patched content + with open(file_to_patch, "w") as f: + f.write(content) + + except (ImportError, FileNotFoundError, PermissionError): + # Allow failures gracefully + pass + + _patch_vllm_init_workers_ray() + logger.info("Successfully patched vllm _init_workers_ray.") + + # Patch the vLLM sampler.py file to modify logprobs computation wrt temperature. + # This replaces raw_logprobs = self.compute_logprobs(logits) with custom temperature-applied logprobs. + # TODO(zhanda): This is only a temporary fix to address the issue of incorrect logprobs returned by vllm + # and should be removed or improved after vllm's new logprobs option is released. And currently, other + # sampling parameters like top_p, top_k, etc. are not supported. + # See https://github.com/NVIDIA-NeMo/RL/issues/69 for more details. + def _patch_vllm_sampler(): + try: + import vllm.v1.sample.sampler as sampler_module + + file_to_patch = sampler_module.__file__ + + with open(file_to_patch, "r") as f: + content = f.read() + + old_line = "raw_logprobs = self.compute_logprobs(logits)" + new_lines = "raw_logprobs = self.compute_logprobs(self.apply_temperature(logits.to(torch.float32), sampling_metadata.temperature) if sampling_metadata.temperature is not None else logits)" + + if new_lines in content: + return + + if old_line not in content: + return + + # Replace all instances of the old line with the new lines + patched_content = content.replace(old_line, new_lines) + + # Write back the patched content + with open(file_to_patch, "w") as f: + f.write(patched_content) + + except (ImportError, FileNotFoundError, PermissionError): + # Allow failures gracefully + pass + + _patch_vllm_sampler() + + except (ImportError, AttributeError): + # vllm not installed or has a different structure, skipping patch. + pass + + try: + import vllm + + self.SamplingParams = vllm.SamplingParams + except ImportError: + raise ImportError( + "vLLM is not installed. Please check that the py_executable in the runtime_env of VllmGenerationWorker " + "covers the vllm dependency. You may have to update nemo_rl/distributed/ray_actor_environment_registry.py. " + "This error can also happen if the venv creation was aborted or errored out in the middle. In that case, " + "please run at least once with the environment variable NRL_FORCE_REBUILD_VENVS=true set to force the rebuild of the environment." + ) + vllm_kwargs: dict[str, Any] = copy.deepcopy(self.cfg.get("vllm_kwargs", {})) + + # Calculate total parallel size (TP * PP) + model_parallel_size = self.tensor_parallel_size * self.pipeline_parallel_size + + # Special handling for parallel case (either TP or PP or both) + if model_parallel_size > 1: + # Configure vLLM for tensor/pipeline parallelism within Ray + # Reset CUDA_VISIBLE_DEVICES to allow vLLM to manage GPU assignment + os.environ.pop("CUDA_VISIBLE_DEVICES", None) + os.environ["VLLM_RAY_PER_WORKER_GPUS"] = str( + self.fraction_of_gpus / model_parallel_size + ) + + # Set bundle indices for parallel workers + bundle_indices_str = ",".join(map(str, bundle_indices)) + os.environ["VLLM_RAY_BUNDLE_INDICES"] = bundle_indices_str + print( + f"VLLM_RAY_BUNDLE_INDICES environment variable set to: {os.environ.get('VLLM_RAY_BUNDLE_INDICES')}" + ) + + # Use Ray for distributed execution in parallel mode + vllm_kwargs["distributed_executor_backend"] = "ray" + else: + # For non-parallel mode, explicitly set executor to None to avoid Ray issues + vllm_kwargs["distributed_executor_backend"] = None + + os.environ["VLLM_USE_V1"] = "1" if is_vllm_v1_engine_enabled() else "0" + os.environ["VLLM_ALLOW_INSECURE_SERIALIZATION"] = "1" + + load_format = self.cfg["vllm_cfg"]["load_format"] + if ModelFlag.VLLM_LOAD_FORMAT_AUTO.matches(self.model_name): + load_format = "auto" + + if ( + len(get_nsight_config_if_pattern_matches("vllm_generation_worker")) > 0 + and vllm_kwargs["distributed_executor_backend"] == "ray" + ): + logger.warning( + "Nsight profiling is enabled for vllm generation worker through the vllm ray distributed executor. " + "The nsight command-line args and output file names are automatically picked by the ray distributed " + "executor. Refer to https://github.com/vllm-project/vllm/blob/7e3a8dc90670fd312ce1e0d4eba9bf11c571e3ad/vllm/executor/ray_distributed_executor.py#L136 " + "for more information." + ) + vllm_kwargs["ray_workers_use_nsight"] = True + + if self.cfg["vllm_cfg"]["precision"] == "fp8": + from nemo_rl.models.generation.fp8 import init_fp8 + + fp8_kwargs = init_fp8( + self.cfg["vllm_cfg"], self.model_name, model_parallel_size + ) + vllm_kwargs.update(fp8_kwargs) + # overriden by quant config, however vllm complains if this not passed + self.precision = "bfloat16" + + llm_kwargs = dict( + model=self.model_name, + load_format=load_format, + # vllm==0.10.0 breaks skip_tokenizer_init=True. + # This will be reverted to `self.cfg["vllm_cfg"]["skip_tokenizer_init"]` once https://github.com/NVIDIA-NeMo/RL/issues/818 is resolved. + skip_tokenizer_init=False, + tensor_parallel_size=self.tensor_parallel_size, + pipeline_parallel_size=self.pipeline_parallel_size, + gpu_memory_utilization=self.gpu_memory_utilization, + enable_prefix_caching=torch.cuda.get_device_capability()[0] >= 8, + dtype=self.precision, + seed=seed, + enforce_eager=self.cfg["vllm_cfg"]["enforce_eager"], + max_model_len=self.cfg["vllm_cfg"]["max_model_len"], + trust_remote_code=True, + worker_extension_cls="nemo_rl.models.generation.vllm.vllm_backend.VllmInternalWorkerExtension", + enable_sleep_mode=True, + disable_log_stats=True, + logprobs_mode="raw_logprobs", + **vllm_kwargs, + ) + + self._create_engine(llm_kwargs) + + # will be initialized in post_init + # used in update_weights_from_ipc_handles + self.vllm_device_ids = None + + def llm(self): + return self.llm + + def is_alive(self): + """Check if the worker is alive.""" + return True + + def _merge_stop_strings(self, batch_stop_strings): + stop_set: set[str] = set() + + if self.cfg.get("stop_strings"): + stop_set.update(self.cfg["stop_strings"]) + + if batch_stop_strings is not None: + for sample_ss in batch_stop_strings: + if sample_ss: + stop_set.update(sample_ss) + + return list(stop_set) if stop_set else None + + def _build_sampling_params( + self, + *, + greedy: bool, + stop_strings, + max_new_tokens: Optional[int] = None, + ): + top_k_cfg = self.cfg["top_k"] + top_k_val = 1 if greedy else (top_k_cfg if top_k_cfg is not None else -1) + + temperature = 0.0 if greedy else self.cfg["temperature"] + + max_tokens = ( + max_new_tokens if max_new_tokens is not None else self.cfg["max_new_tokens"] + ) + + return self.SamplingParams( + temperature=temperature, + top_p=self.cfg["top_p"], + top_k=top_k_val, + max_tokens=max_tokens, + logprobs=0, + stop_token_ids=self.cfg["stop_token_ids"], + stop=stop_strings, + include_stop_str_in_output=True, + ) + + def start_gpu_profiling(self) -> None: + """Start GPU profiling.""" + torch.cuda.profiler.start() + if self.llm is not None: + self.llm.collective_rpc("start_gpu_profiling", args=tuple()) + + def stop_gpu_profiling(self) -> None: + """Stop GPU profiling.""" + torch.cuda.profiler.stop() + if self.llm is not None: + self.llm.collective_rpc("stop_gpu_profiling", args=tuple()) + + +@ray.remote( + runtime_env={**get_nsight_config_if_pattern_matches("vllm_generation_worker")} +) # pragma: no cover +class VllmGenerationWorker(BaseVllmGenerationWorker): + def _create_engine(self, llm_kwargs: dict[str, Any]) -> None: + import vllm + + self.llm = vllm.LLM(**llm_kwargs) + + def post_init(self): + self.vllm_device_ids = self.report_device_id() + + def init_collective( + self, rank_prefix: int, ip: str, port: int, world_size: int + ) -> None: + self.llm.collective_rpc( + "init_collective", + args=( + rank_prefix, + ip, + port, + world_size, + ), + ) + + @wrap_with_nvtx_name("vllm_genertion_worker/generate") + def generate( + self, data: BatchedDataDict[GenerationDatumSpec], greedy: bool = False + ) -> BatchedDataDict[GenerationOutputSpec]: + """Generate a batch of data using vLLM generation. + + Args: + data: BatchedDataDict containing input_ids and input_lengths tensors + greedy: Whether to use greedy decoding instead of sampling + + Returns: + BatchedDataDict conforming to GenerationOutputSpec: + - output_ids: input + generated token IDs with proper padding + - logprobs: Log probabilities for tokens + - generation_lengths: Lengths of each response + - unpadded_sequence_lengths: Lengths of each input + generated sequence + """ + # Handle empty input case + if len(data["input_ids"]) == 0: + # Return empty BatchedDataDict with all required fields + return BatchedDataDict[GenerationOutputSpec]( + { + "output_ids": torch.zeros((0, 0), dtype=torch.long), + "logprobs": torch.zeros((0, 0), dtype=torch.float), + "generation_lengths": torch.zeros(0, dtype=torch.long), + "unpadded_sequence_lengths": torch.zeros(0, dtype=torch.long), + } + ) + + input_ids = data["input_ids"] + input_lengths = data["input_lengths"] + batch_stop_strings: list[list[str]] = data.get("stop_strings", []) + stop_strings = self._merge_stop_strings(batch_stop_strings) + sampling_params = self._build_sampling_params( + greedy=greedy, + stop_strings=stop_strings, + ) + + # verify inputs have correct padding + verify_right_padding(data, pad_value=self.cfg["pad_token_id"]) + + # Original input length with padding + padded_input_length = input_ids.size(1) + + # Convert inputs to vLLM format + prompts = format_prompt_for_vllm_generation(data) + + # Generate outputs + assert self.llm is not None, ( + "Attempting to generate with either an uninitialized vLLM or non-model-owner" + ) + outputs = self.llm.generate(prompts, sampling_params) + + # Process the outputs - but preserve the original input padding structure + output_ids_list = [] + logprobs_list = [] + generation_lengths = [] + unpadded_sequence_lengths = [] + max_length = 0 + for output in outputs: + max_length = max(max_length, len(output.outputs[0].token_ids)) + + for i, output in enumerate(outputs): + # Extract generated tokens + sequence_length = input_lengths[i] + generation = output.outputs[0] + generated_tokens = list(generation.token_ids) + + # Calculate total sequence length (original input length + generated tokens) + total_length = padded_input_length + max_length + + # Create a new tensor with the right size and fill with padding token + full_output = torch.full( + (total_length,), self.cfg["pad_token_id"], dtype=input_ids.dtype + ) + + # Copy original input (with padding) into the beginning + full_output[:sequence_length] = input_ids[i][:sequence_length] + + # Add generated tokens after the original input + full_output[sequence_length : sequence_length + len(generated_tokens)] = ( + torch.tensor(generated_tokens) + ) + + output_ids_list.append(full_output) + full_logprobs = torch.zeros(total_length, dtype=torch.float32) + if hasattr(generation, "logprobs") and generation.logprobs: + try: + for idx, logprob_dict in enumerate(generation.logprobs): + if logprob_dict: + position = sequence_length + idx + full_logprobs[position] = next(iter(logprob_dict.items()))[ + 1 + ].logprob + except Exception: + import traceback + + traceback.print_exc() + + logprobs_list.append(full_logprobs) + + response_length = sequence_length + len(generated_tokens) + generation_lengths.append(len(generated_tokens)) + unpadded_sequence_lengths.append(response_length) + assert response_length <= self.llm.llm_engine.model_config.max_model_len, ( + f"response_length={response_length} > max_model_len={self.llm.llm_engine.model_config.max_model_len}, which should not happen. Please check this behavior in isolation by running `uv run --extra vllm tools/model_diagnostics/1.max_model_len_respected.py {self.llm.llm_engine.model_config.model}` and raise this issue with the vllm team." + ) + + # Create return data conforming to GenerationOutputSpec + output_ids = torch.stack(output_ids_list) + logprobs = torch.stack(logprobs_list) + + return_data = BatchedDataDict[GenerationOutputSpec]( + { + "output_ids": output_ids, + "logprobs": logprobs, + "generation_lengths": torch.tensor( + generation_lengths, dtype=torch.long + ), + "unpadded_sequence_lengths": torch.tensor( + unpadded_sequence_lengths, dtype=torch.long + ), + } + ) + + return return_data + + @wrap_with_nvtx_name("vllm_genertion_worker/generate_text") + def generate_text( + self, data: BatchedDataDict[GenerationDatumSpec], greedy: bool = False + ) -> BatchedDataDict[GenerationOutputSpec]: + """Generate text responses using vLLM generation. + + Args: + data: BatchedDataDict containing prompts with text strings + greedy: Whether to use greedy decoding instead of sampling + + Returns: + BatchedDataDict containing: + - texts: List of generated text responses + """ + # Check if async engine is enabled + if self.cfg["vllm_cfg"]["async_engine"]: + raise RuntimeError( + "generate_text cannot be used with async_engine=True. Use generate_text_async instead." + ) + + # Extract stop_strings if provided, else use default from config + batch_stop_strings: list[list[str] | None] = data.get( + "stop_strings", [self.cfg.get("stop_strings")] * len(data["prompts"]) + ) + + # This function requires all generations have the same stop strings, so we collect all here + stop_strings: set[str] = set() + for sample_stop_strings in batch_stop_strings: + if sample_stop_strings: + stop_strings.update(sample_stop_strings) + + # Add default stop strings from config + if self.cfg.get("stop_strings", None): + stop_strings.update(self.cfg["stop_strings"]) + + stop_strings = list(stop_strings) if len(stop_strings) > 0 else None + + # Read generation parameters from config + top_k = self.cfg["top_k"] if self.cfg["top_k"] is not None else -1 + sampling_params = self.SamplingParams( + temperature=self.cfg["temperature"] if not greedy else 0, + top_p=self.cfg["top_p"], + top_k=top_k if not greedy else 1, + max_tokens=self.cfg["max_new_tokens"], + stop_token_ids=self.cfg["stop_token_ids"], + stop=stop_strings, + include_stop_str_in_output=True, # returning stop strings like hf + ) + + # Generate outputs + assert self.llm is not None, ( + "Attempting to generate with either an uninitialized vLLM or non-model-owner" + ) + outputs = self.llm.generate(data["prompts"], sampling_params) + texts = [output.outputs[0].text for output in outputs] + + # Convert to BatchedDataDict + return_data: BatchedDataDict[GenerationOutputSpec] = BatchedDataDict( + {"texts": texts} + ) + return return_data + + def report_device_id(self) -> list[str]: + """Report device ID from the vLLM worker.""" + assert self.llm is not None, ( + "Attempting to report device id with either an uninitialized vLLM or non-model-owner" + ) + + if self.cfg["vllm_cfg"]["async_engine"]: + raise RuntimeError( + "report_device_id cannot be used with async_engine=True. Use report_device_id_async instead." + ) + + list_of_worker_results = self.llm.collective_rpc( + "report_device_id", args=tuple() + ) + return cast(list[str], list_of_worker_results) + + def prepare_refit_info(self, state_dict_info: dict[str, Any]) -> None: + """Prepare the info for refit.""" + self.llm.collective_rpc("prepare_refit_info", args=(state_dict_info,)) + + @wrap_with_nvtx_name("vllm_genertion_worker/update_weights_from_ipc_handles") + def update_weights_from_ipc_handles(self, ipc_handles: dict[str, Any]) -> bool: + """Update weights from IPC handles by delegating to the vLLM Worker implementation. + + Args: + ipc_handles (dict): Dictionary mapping device UUIDs (str) to parameter IPC handles. + + Returns: + bool: True if weights were successfully updated, False otherwise. + """ + try: + assert self.llm is not None, ( + "Attempting to update weights with either an uninitialized vLLM or non-model-owner" + ) + + if self.cfg["vllm_cfg"]["async_engine"]: + raise RuntimeError( + "update_weights_from_ipc_handles cannot be used with async_engine=True. Use update_weights_from_ipc_handles_async instead." + ) + + if self.tensor_parallel_size == 1: + # UniProcExecutor + assert len(self.vllm_device_ids) == 1 + result_or_coro = self.llm.collective_rpc( + "update_weights_from_local_ipc_handles", + args=(ipc_handles[self.vllm_device_ids[0]],), + ) + else: + """ + DO NOT USE VLLM's collective_rpc: This code causes duplicate IPC data transfer across Ray workers, + leading to unnecessary network serialization overhead and potential performance degradation. + + result_or_coro = self.llm.collective_rpc( + "update_weights_from_global_ipc_handles", args=(ipc_handles,) + ) + """ + ray_worker_outputs = [] + # MultiProcExecutor + for worker, device_id in zip( + self.llm.llm_engine.model_executor.workers, self.vllm_device_ids + ): + ray_worker_outputs.append( + worker.execute_method.remote( + "update_weights_from_local_ipc_handles", + ipc_handles[device_id], + ) + ) + + # Gather the results + result_or_coro = ray.get(ray_worker_outputs) + + worker_result = result_or_coro[0] + + if not worker_result: + print( + f"Error: Worker failed to update weights. Result: {worker_result}" + ) + return False + return True + except Exception as e: + print(f"Exception during collective_rpc for weight update: {e}") + import traceback + + traceback.print_exc() + return False + + @wrap_with_nvtx_name("vllm_genertion_worker/update_weights_from_collective") + def update_weights_from_collective(self) -> bool: + """Update the model weights from collective communication.""" + try: + assert self.llm is not None, ( + "Attempting to update weights with either an uninitialized vLLM or non-model-owner" + ) + + if self.cfg["vllm_cfg"]["async_engine"]: + raise RuntimeError( + "update_weights_from_collective can only be used with async_engine=False. Use update_weights_from_collective_async instead." + ) + + result_or_coro = self.llm.collective_rpc( + "update_weights_from_collective", args=tuple() + ) + worker_result = result_or_coro[0] + + if not worker_result: + print( + f"Error: Worker failed to update weights. Result: {worker_result}" + ) + return False + return True + except Exception as e: + print(f"Exception during collective_rpc for weight update: {e}") + import traceback + + traceback.print_exc() + return False + + def reset_prefix_cache(self): + """Reset the prefix cache of vLLM engine.""" + assert self.llm is not None, ( + "Attempting to reset prefix cache with either an uninitialized vLLM or non-model-owner" + ) + + if self.cfg["vllm_cfg"]["async_engine"]: + raise RuntimeError( + "reset_prefix_cache can only be used with async_engine=False. Use reset_prefix_cache_async instead." + ) + + self.llm.llm_engine.reset_prefix_cache() + gc.collect() + torch.cuda.empty_cache() + + def sleep(self): + """Put the vLLM engine to sleep.""" + assert self.llm is not None, ( + "Attempting to sleep with either an uninitialized vLLM or non-model-owner" + ) + + if self.cfg["vllm_cfg"]["async_engine"]: + raise RuntimeError( + "sleep cannot be used with async_engine=True. Use sleep_async instead." + ) + + # Reset the prefix cache to ensure that prefix cache is not reused after weights are updated + self.llm.llm_engine.reset_prefix_cache() + self.llm.sleep(level=1) + + gc.collect() + torch.cuda.empty_cache() + + def wake_up(self, **kwargs): + """Wake up the vLLM engine.""" + assert self.llm is not None, ( + "Attempting to wake up with either an uninitialized vLLM or non-model-owner" + ) + + if self.cfg["vllm_cfg"]["async_engine"]: + raise RuntimeError( + "wake_up cannot be used with async_engine=True. Use wake_up_async instead." + ) + + tags = kwargs.get("tags") + + wake_up_args = {} + if tags is not None: + wake_up_args["tags"] = tags + + self.llm.wake_up(**wake_up_args) + + def shutdown(self) -> bool: + """Clean up vLLM resources.""" + try: + if self.llm is not None: + # Explicitly delete the engine. This may trigger its __del__ method. + del self.llm + + self.llm = None + self.tokenizer = None + + # Force garbage collection + gc.collect() + torch.cuda.empty_cache() + + return True + except Exception as e: + print(f"Error during vLLM shutdown: {e}") + return False diff --git a/nemo_rl/models/generation/vllm/vllm_worker_async.py b/nemo_rl/models/generation/vllm/vllm_worker_async.py new file mode 100644 index 0000000000..5260a6e96a --- /dev/null +++ b/nemo_rl/models/generation/vllm/vllm_worker_async.py @@ -0,0 +1,561 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import gc +import uuid +from typing import Any, AsyncGenerator, cast + +import ray +import torch + +from nemo_rl.distributed.batched_data_dict import BatchedDataDict +from nemo_rl.distributed.worker_group_utils import get_nsight_config_if_pattern_matches +from nemo_rl.models.generation.interfaces import ( + GenerationDatumSpec, + GenerationOutputSpec, + verify_right_padding, +) +from nemo_rl.models.generation.vllm.utils import format_prompt_for_vllm_generation +from nemo_rl.models.generation.vllm.vllm_worker import BaseVllmGenerationWorker + + +@ray.remote( + runtime_env={**get_nsight_config_if_pattern_matches("vllm_async_generation_worker")} +) # pragma: no cover +class VllmAsyncGenerationWorker(BaseVllmGenerationWorker): + def _create_engine(self, llm_kwargs: dict[str, Any]) -> None: + from vllm.engine.arg_utils import AsyncEngineArgs + from vllm.v1.engine.async_llm import AsyncLLM + + self.llm = AsyncLLM.from_engine_args(AsyncEngineArgs(**llm_kwargs)) + + async def post_init_async(self): + self.vllm_device_ids = await self.report_device_id_async() + + async def init_collective_async( + self, rank_prefix: int, ip: str, port: int, world_size: int + ) -> None: + await self.llm.collective_rpc( + "init_collective", + args=( + rank_prefix, + ip, + port, + world_size, + ), + ) + + async def generate_async( + self, + data: BatchedDataDict[GenerationDatumSpec], + greedy: bool = False, + ) -> AsyncGenerator[tuple[int, BatchedDataDict[GenerationOutputSpec]], None]: + """Generate a batch of data using vLLM's AsyncLLMEngine, yielding results as they are ready. + + Args: + data: BatchedDataDict with input_ids and input_lengths + greedy: Whether to use greedy decoding instead of sampling + + Yields: + Tuple of (original_index, BatchedDataDict conforming to GenerationOutputSpec for the single sequence) + """ + if not self.cfg["vllm_cfg"]["async_engine"]: + raise RuntimeError( + "generate_async can only be used when async_engine is enabled in vLLM config." + ) + + # Handle empty input case + if len(data["input_ids"]) == 0: + return + + verify_right_padding(data, pad_value=self.cfg["pad_token_id"]) + + input_ids_batch = data["input_ids"] + input_lengths_batch = data["input_lengths"] + batch_size = input_ids_batch.shape[0] + + # Ensure generate_async only receives single samples (batch_size = 1) + assert batch_size == 1, ( + f"generate_async is restricted to handle only single samples, " + f"but received batch_size={batch_size}. Please handle batching outside this method." + ) + + batch_specific_stop_strings_list = data.get( + "stop_strings", [[] for _ in range(batch_size)] + ) + + # Create tasks for each sample in the batch + async def process_single_sample(sample_idx): + """Process a single sample and return the result.""" + current_input_actual_length = input_lengths_batch[sample_idx].item() + prompt = format_prompt_for_vllm_generation(data, sample_idx) + + per_sample_stop_strings = None + if batch_specific_stop_strings_list and sample_idx < len( + batch_specific_stop_strings_list + ): + per_sample_stop_strings = batch_specific_stop_strings_list[sample_idx] + + final_stop_strings_for_sample = self._merge_stop_strings( + [per_sample_stop_strings] if per_sample_stop_strings else None + ) + + remaining_ctx = ( + self.cfg["vllm_cfg"]["max_model_len"] - current_input_actual_length + ) + allowed_new_tokens = max(0, min(self.cfg["max_new_tokens"], remaining_ctx)) + + # Handle case where no tokens can be generated due to length constraints + if allowed_new_tokens == 0: + # Access the input data directly from the function parameters + input_ids_single_row = input_ids_batch[sample_idx] + + # Create output tensors with just the input (no generated tokens) + output_ids_single_item_batched = input_ids_single_row[ + :current_input_actual_length + ].unsqueeze(0) + + logprobs_single_item = torch.zeros( + (1, current_input_actual_length), + dtype=torch.float32, + device=input_ids_single_row.device, + ) + + generation_lengths_tensor = torch.tensor( + [0], dtype=torch.long, device=input_ids_single_row.device + ) + + unpadded_sequence_lengths_tensor = torch.tensor( + [current_input_actual_length], + dtype=torch.long, + device=input_ids_single_row.device, + ) + + result_batch = BatchedDataDict[GenerationOutputSpec]( + { + "output_ids": output_ids_single_item_batched, + "logprobs": logprobs_single_item, + "generation_lengths": generation_lengths_tensor, + "unpadded_sequence_lengths": unpadded_sequence_lengths_tensor, + } + ) + + return (sample_idx, result_batch) + + sampling_params_for_request = self._build_sampling_params( + greedy=greedy, + stop_strings=final_stop_strings_for_sample, + max_new_tokens=allowed_new_tokens, + ) + + request_id = str(uuid.uuid4()) + + # Generate using vLLM async engine + vllm_request_generator = self.llm.generate( + prompt=prompt, + sampling_params=sampling_params_for_request, + request_id=request_id, + ) + + # Get the final result from the generator + final_request_output = None + async for req_output in vllm_request_generator: + final_request_output = req_output + + if final_request_output is None: + raise RuntimeError(f"No output received for request {request_id}") + + # Process the output + generation_details = final_request_output.outputs[0] + generated_token_ids = list(generation_details.token_ids) + num_generated_tokens = len(generated_token_ids) + + original_input_ids_single_row = input_ids_batch[sample_idx] + final_output_tensor_len = current_input_actual_length + num_generated_tokens + + # Create output_ids tensor for this single item + output_ids_single_item = torch.full( + (final_output_tensor_len,), + self.cfg["pad_token_id"], + dtype=original_input_ids_single_row.dtype, + device=original_input_ids_single_row.device, + ) + # Copy original input (up to its actual length) + output_ids_single_item[:current_input_actual_length] = ( + original_input_ids_single_row[:current_input_actual_length] + ) + # Add generated tokens after the actual input + output_ids_single_item[ + current_input_actual_length : current_input_actual_length + + num_generated_tokens + ] = torch.tensor( + generated_token_ids, + dtype=original_input_ids_single_row.dtype, + device=original_input_ids_single_row.device, + ) + + # Reshape to (1, seq_len) for BatchedDataDict + output_ids_single_item_batched = output_ids_single_item.unsqueeze(0) + + # Create logprobs tensor for this single item + logprobs_single_item = torch.zeros( + (1, final_output_tensor_len), + dtype=torch.float32, + device=original_input_ids_single_row.device, + ) + if hasattr(generation_details, "logprobs") and generation_details.logprobs: + for idx, logprob_dict_per_token in enumerate( + generation_details.logprobs + ): + if logprob_dict_per_token and idx < len(generated_token_ids): + token_id_at_idx = generated_token_ids[idx] + if token_id_at_idx in logprob_dict_per_token: + logprob_value = logprob_dict_per_token[ + token_id_at_idx + ].logprob + position_in_output_tensor = ( + current_input_actual_length + idx + ) + if position_in_output_tensor < final_output_tensor_len: + logprobs_single_item[0, position_in_output_tensor] = ( + logprob_value + ) + + # Generation lengths + generation_lengths_tensor = torch.tensor( + [num_generated_tokens], + dtype=torch.long, + device=original_input_ids_single_row.device, + ) + + # Unpadded sequence lengths (actual_input + actual_generated) + unpadded_total_length = current_input_actual_length + num_generated_tokens + unpadded_sequence_lengths_tensor = torch.tensor( + [unpadded_total_length], + dtype=torch.long, + device=original_input_ids_single_row.device, + ) + + result_batch = BatchedDataDict[GenerationOutputSpec]( + { + "output_ids": output_ids_single_item_batched, + "logprobs": logprobs_single_item, + "generation_lengths": generation_lengths_tensor, + "unpadded_sequence_lengths": unpadded_sequence_lengths_tensor, + } + ) + + return (sample_idx, result_batch) + + # Create tasks for all samples and yield results as they complete + sample_tasks = [ + asyncio.create_task(process_single_sample(i)) for i in range(batch_size) + ] + + # Yield results as they become available + for completed_task in asyncio.as_completed(sample_tasks): + try: + result = await completed_task + yield result + except Exception as e: + # Cancel remaining tasks + for task in sample_tasks: + if not task.done(): + task.cancel() + await asyncio.gather(*sample_tasks, return_exceptions=True) + raise e + + async def generate_text_async( + self, data: BatchedDataDict[GenerationDatumSpec], greedy: bool = False + ) -> AsyncGenerator[tuple[int, BatchedDataDict[GenerationOutputSpec]], None]: + """Generate text responses asynchronously, yielding results as they are ready. + + Args: + data: BatchedDataDict containing prompts with text strings + greedy: Whether to use greedy decoding instead of sampling + + Yields: + Tuple of (original_index, BatchedDataDict containing single text response) + """ + if not self.cfg["vllm_cfg"]["async_engine"]: + raise RuntimeError( + "generate_text_async can only be used when async_engine is enabled in vLLM config." + ) + + # Handle empty input case + if len(data["prompts"]) == 0: + return + + prompts = data["prompts"] + batch_size = len(prompts) + + # Extract stop_strings if provided, else use default from config + batch_stop_strings: list[list[str] | None] = data.get( + "stop_strings", [self.cfg.get("stop_strings")] * batch_size + ) + + # Create tasks for each prompt + async def process_single_prompt(prompt_idx): + """Process a single prompt and return the result.""" + prompt = prompts[prompt_idx] + + # Get stop strings for this specific prompt + per_prompt_stop_strings = None + if batch_stop_strings and prompt_idx < len(batch_stop_strings): + per_prompt_stop_strings = batch_stop_strings[prompt_idx] + + # Merge stop strings + final_stop_strings = self._merge_stop_strings( + [per_prompt_stop_strings] if per_prompt_stop_strings else None + ) + + # Create sampling parameters + top_k = self.cfg["top_k"] if self.cfg["top_k"] is not None else -1 + sampling_params = self.SamplingParams( + temperature=self.cfg["temperature"] if not greedy else 0, + top_p=self.cfg["top_p"], + top_k=top_k if not greedy else 1, + max_tokens=self.cfg["max_new_tokens"], + stop_token_ids=self.cfg["stop_token_ids"], + stop=final_stop_strings, + include_stop_str_in_output=True, # returning stop strings like hf + ) + + request_id = str(uuid.uuid4()) + + # Generate using vLLM async engine + vllm_request_generator = self.llm.generate( + prompt=prompt, + sampling_params=sampling_params, + request_id=request_id, + ) + + # Get the final result from the generator + final_request_output = None + async for req_output in vllm_request_generator: + final_request_output = req_output + + if final_request_output is None: + raise RuntimeError(f"No output received for request {request_id}") + + # Extract the generated text + generated_text = final_request_output.outputs[0].text + + # Create result in BatchedDataDict format + result_batch = BatchedDataDict[GenerationOutputSpec]( + {"texts": [generated_text]} + ) + + return (prompt_idx, result_batch) + + # Create tasks for all prompts and yield results as they complete + prompt_tasks = [ + asyncio.create_task(process_single_prompt(i)) for i in range(batch_size) + ] + + # Yield results as they become available + for completed_task in asyncio.as_completed(prompt_tasks): + try: + result = await completed_task + yield result + except Exception as e: + # Cancel remaining tasks + for task in prompt_tasks: + if not task.done(): + task.cancel() + await asyncio.gather(*prompt_tasks, return_exceptions=True) + raise e + + async def report_device_id_async(self) -> list[str]: + """Async version of report_device_id.""" + assert self.llm is not None, ( + "Attempting to report device id with either an uninitialized vLLM or non-model-owner" + ) + + if not self.cfg["vllm_cfg"]["async_engine"]: + raise RuntimeError( + "report_device_id_async can only be used with async_engine=True. Use report_device_id instead." + ) + + result_or_coro = await self.llm.collective_rpc("report_device_id", args=tuple()) + + if asyncio.iscoroutine(result_or_coro): + list_of_worker_results = await result_or_coro + else: + list_of_worker_results = result_or_coro + + return cast(list[str], list_of_worker_results) + + async def prepare_refit_info_async(self, state_dict_info: dict[str, Any]) -> None: + """Async version of prepare_refit_info.""" + await self.llm.collective_rpc("prepare_refit_info", args=(state_dict_info,)) + + async def update_weights_from_ipc_handles_async( + self, ipc_handles: dict[str, Any] + ) -> bool: + """Async version of update_weights_from_ipc_handles. + + Args: + ipc_handles (dict): Dictionary mapping device UUIDs (str) to parameter IPC handles. + + Returns: + bool: True if weights were successfully updated, False otherwise. + """ + try: + assert self.llm is not None, ( + "Attempting to update weights with either an uninitialized vLLM or non-model-owner" + ) + + if not self.cfg["vllm_cfg"]["async_engine"]: + raise RuntimeError( + "update_weights_from_ipc_handles_async can only be used with async_engine=True. Use update_weights_from_ipc_handles instead." + ) + + # TODO: switch to update_weights_from_local_ipc_handles for better performance once collectively report_device_id is supported in asyncLLM initialization + result_or_coro = await self.llm.collective_rpc( + "update_weights_from_global_ipc_handles", args=(ipc_handles,) + ) + + if asyncio.iscoroutine(result_or_coro): + worker_results = await result_or_coro + else: + worker_results = result_or_coro + + worker_result = worker_results[0] + + if not worker_result: + print( + f"Error: Worker failed to update weights. Result: {worker_result}" + ) + return False + return True + except Exception as e: + print(f"Exception during collective_rpc for weight update: {e}") + import traceback + + traceback.print_exc() + return False + + async def update_weights_from_collective_async(self) -> bool: + """Async version of update_weights_from_collective.""" + try: + assert self.llm is not None, ( + "Attempting to update weights with either an uninitialized vLLM or non-model-owner" + ) + + if not self.cfg["vllm_cfg"]["async_engine"]: + raise RuntimeError( + "update_weights_from_collective_async can only be used with async_engine=True. Use update_weights_from_collective instead." + ) + + result_or_coro = await self.llm.collective_rpc( + "update_weights_from_collective", args=tuple() + ) + + if asyncio.iscoroutine(result_or_coro): + worker_results = await result_or_coro + else: + worker_results = result_or_coro + + worker_result = worker_results[0] + + if not worker_result: + print( + f"Error: Worker failed to update weights. Result: {worker_result}" + ) + return False + return True + except Exception as e: + print(f"Exception during collective_rpc for weight update: {e}") + import traceback + + traceback.print_exc() + return False + + async def reset_prefix_cache_async(self): + """Async version of reset_prefix_cache.""" + assert self.llm is not None, ( + "Attempting to reset prefix cache with either an uninitialized vLLM or non-model-owner" + ) + + if not self.cfg["vllm_cfg"]["async_engine"]: + raise RuntimeError( + "reset_prefix_cache_async can only be used with async_engine=True. Use reset_prefix_cache instead." + ) + + await self.llm.reset_prefix_cache() + gc.collect() + torch.cuda.empty_cache() + + async def sleep_async(self): + """Async version of sleep.""" + assert self.llm is not None, ( + "Attempting to sleep with either an uninitialized vLLM or non-model-owner" + ) + + if not self.cfg["vllm_cfg"]["async_engine"]: + raise RuntimeError( + "sleep_async can only be used with async_engine=True. Use sleep instead." + ) + + # Reset the prefix cache to ensure that prefix cache is not reused after weights are updated + await self.llm.reset_prefix_cache() + await self.llm.sleep(level=1) + + gc.collect() + torch.cuda.empty_cache() + + async def wake_up_async(self, **kwargs): + """Async version of wake_up.""" + assert self.llm is not None, ( + "Attempting to wake up with either an uninitialized vLLM or non-model-owner" + ) + + if not self.cfg["vllm_cfg"]["async_engine"]: + raise RuntimeError( + "wake_up_async can only be used with async_engine=True. Use wake_up instead." + ) + + tags = kwargs.get("tags") + + wake_up_args = {} + if tags is not None: + wake_up_args["tags"] = tags + + await self.llm.wake_up(**wake_up_args) + + def shutdown(self) -> bool: + """Clean up vLLM resources.""" + try: + if self.llm is not None: + try: + self.llm.shutdown() + except Exception as e_stop: + print(f"Error calling shutdown_background_loop: {e_stop}") + + # Explicitly delete the engine. This may trigger its __del__ method. + del self.llm + + self.llm = None + self.tokenizer = None + + # Force garbage collection + gc.collect() + torch.cuda.empty_cache() + + return True + except Exception as e: + print(f"Error during vLLM shutdown: {e}") + return False diff --git a/nemo_rl/models/huggingface/common.py b/nemo_rl/models/huggingface/common.py index c057f6d89a..ad26e36327 100644 --- a/nemo_rl/models/huggingface/common.py +++ b/nemo_rl/models/huggingface/common.py @@ -39,22 +39,16 @@ class ModelFlag(Enum): configuration in different parts of the NeMo RL codebase. Flags: - SKIP_DTENSOR_TIED_WEIGHTS_CHECK: Models that should skip the tied weights check - for the DTensor Policy even without setting the - NRL_SKIP_TIED_WEIGHT_CHECK flag. VLLM_LOAD_FORMAT_AUTO: Models that should use the "auto" load format when initializing VLLM. Each flag has a `matches` method that determines if the flag applies to a given model_name. """ - SKIP_DTENSOR_TIED_WEIGHTS_CHECK = auto() VLLM_LOAD_FORMAT_AUTO = auto() def matches(self, model_name: str) -> bool: match self: - case ModelFlag.SKIP_DTENSOR_TIED_WEIGHTS_CHECK: - return is_gemma_model(model_name) case ModelFlag.VLLM_LOAD_FORMAT_AUTO: return is_gemma_model(model_name) case _: diff --git a/nemo_rl/models/megatron/common.py b/nemo_rl/models/megatron/common.py index bc0d499f08..6ae03a64a0 100644 --- a/nemo_rl/models/megatron/common.py +++ b/nemo_rl/models/megatron/common.py @@ -260,6 +260,7 @@ def forward_step_arbitrary_loss( pad_individual_seqs_to_multiple_of: int = 1, pad_full_seq_to: Optional[int] = None, cp_normalize: bool = True, + policy_cfg: Optional[dict] = None, ): """Forward training step with support for packed sequences and context parallelism. @@ -273,6 +274,7 @@ def forward_step_arbitrary_loss( pack_sequences (bool): Whether to pack sequences for efficiency seq_length_key (Optional[str]): Key in data_dict containing actual sequence lengths cp_normalize (bool): Whether to normalize the loss by the cp_size + policy_cfg (Optional[dict]): Policy configuration containing generation parameters Notes on packed sequences with context parallelism (CP): - When CP > 1, each sequence is padded to a multiple of (cp_size * 2) @@ -342,6 +344,15 @@ def forward_step_arbitrary_loss( packed_seq_params=packed_seq_params, ) + # Apply temperature scaling to logits for training + # This matches the dtensor worker's _apply_temperature_scaling in the train method + if ( + policy_cfg is not None + and "generation" in policy_cfg + and policy_cfg["generation"] is not None + ): + output_tensor.div_(policy_cfg["generation"]["temperature"]) + # Unpack the output tensor if we did packed sequences if pack_sequences and packed_seq_params is not None: # remove padding diff --git a/nemo_rl/models/megatron/community_import.py b/nemo_rl/models/megatron/community_import.py index fdaf2f3690..6f8f0b08e2 100644 --- a/nemo_rl/models/megatron/community_import.py +++ b/nemo_rl/models/megatron/community_import.py @@ -85,9 +85,14 @@ def export_model_from_megatron( from nemo.tron.converter.qwen import HFQwen2Exporter exporter_cls = HFQwen2Exporter + + elif hf_config.model_type in ("qwen3", "qwen3_moe"): + from nemo.tron.converter.qwen import HFQwen3Exporter + + exporter_cls = HFQwen3Exporter else: raise ValueError( - f"Unknown model: {hf_model_name}. Currently, only Qwen2 and Llama are supported. " + f"Unknown model: {hf_model_name}. Currently, only Qwen2, Qwen3 and Llama are supported. " "If you'd like to run with a different model, please raise an issue or consider adding your own converter." ) print(f"Exporting model {hf_model_name} to {output_path}...") diff --git a/nemo_rl/models/policy/__init__.py b/nemo_rl/models/policy/__init__.py index c5637d7096..3f2fcfe877 100644 --- a/nemo_rl/models/policy/__init__.py +++ b/nemo_rl/models/policy/__init__.py @@ -19,6 +19,8 @@ class DTensorConfig(TypedDict): enabled: bool + env_vars: NotRequired[dict[str, str]] + _v2: NotRequired[bool] cpu_offload: NotRequired[bool] sequence_parallel: NotRequired[bool] activation_checkpointing: NotRequired[bool] @@ -80,6 +82,7 @@ class MegatronDDPConfig(TypedDict): class MegatronConfig(TypedDict): enabled: bool + env_vars: NotRequired[dict[str, str]] empty_unused_memory_level: int activation_checkpointing: bool converter_type: str @@ -93,6 +96,7 @@ class MegatronConfig(TypedDict): freeze_moe_router: bool expert_tensor_parallel_size: int expert_model_parallel_size: int + defer_fp32_logits: NotRequired[bool] optimizer: NotRequired[MegatronOptimizerConfig] scheduler: NotRequired[MegatronSchedulerConfig] @@ -138,6 +142,7 @@ class PolicyConfig(TypedDict): train_global_batch_size: int train_micro_batch_size: int logprob_batch_size: NotRequired[int] + logprob_chunk_size: NotRequired[int] generation: NotRequired[GenerationConfig] generation_batch_size: NotRequired[ int diff --git a/nemo_rl/models/policy/dtensor_policy_worker.py b/nemo_rl/models/policy/dtensor_policy_worker.py index 8c0198784b..7b2f0de271 100644 --- a/nemo_rl/models/policy/dtensor_policy_worker.py +++ b/nemo_rl/models/policy/dtensor_policy_worker.py @@ -38,25 +38,23 @@ ) from transformers import ( AutoConfig, - AutoModelForCausalLM, AutoModelForSequenceClassification, + AutoProcessor, AutoTokenizer, ) -from transformers.integrations.accelerate import find_tied_parameters from transformers.models.gemma3.modeling_gemma3 import Gemma3ForCausalLM from nemo_rl.algorithms.interfaces import LossFunction, LossType from nemo_rl.algorithms.loss_functions import SequencePackingLossWrapper from nemo_rl.distributed.batched_data_dict import BatchedDataDict +from nemo_rl.distributed.model_utils import get_logprobs_from_vocab_parallel_logits from nemo_rl.models.dtensor.parallelize import ( _parallelize_model, clip_grad_by_total_norm_, get_grad_norm, - get_logprobs_from_vocab_parallel_logits, to_local_if_dtensor, ) from nemo_rl.models.huggingface.common import ( - ModelFlag, get_flash_attention_kwargs, pack_sequences, ) @@ -67,18 +65,18 @@ ) from nemo_rl.models.policy.utils import ( configure_dynamo_cache, - configure_expandable_segments, get_gpu_info, get_handle_from_tensor, get_runtime_env_for_policy_worker, import_class_from_path, - is_vllm_v1_engine_enabled, + resolve_model_class, sliding_window_overwrite, ) from nemo_rl.utils.native_checkpoint import ( load_checkpoint, save_checkpoint, ) +from nemo_rl.utils.nsys import wrap_with_nvtx_name @contextmanager @@ -147,12 +145,19 @@ def __init__( self, config: PolicyConfig, tokenizer: AutoTokenizer, + processor: Optional[AutoProcessor] = None, weights_path: Optional[str] = None, optimizer_path: Optional[str] = None, init_optimizer: bool = True, init_reference_model: bool = True, **kwargs: Any, ): + self.tokenizer = tokenizer + self.processor = processor + self.is_vlm = processor is not None + + print(f"Initializing DTensorPolicyWorker with is_vlm={self.is_vlm}") + self.is_generation_colocated = None if "generation" in config and config["generation"] is not None: self.is_generation_colocated = config["generation"]["colocated"]["enabled"] @@ -166,8 +171,14 @@ def __init__( # with different order of node_bundles configure_dynamo_cache() - # Only enable expandable_segments on Hopper and newer architectures (compute capability 9.x+) - configure_expandable_segments() + # vars used for refit + ## will be initialized in prepare_refit_info + self.refit_param_info = None + ## used for streaming update inference engine weights + self._held_sharded_state_dict_reference: Optional[dict[str, torch.Tensor]] = ( + None + ) + self._held_streamed_param_reference: Optional[dict[str, torch.Tensor]] = None self.cfg = config # torch distributed init. Envars for rank, world_size, and master_addr and master_port are set from the ray remote call @@ -191,6 +202,9 @@ def __init__( print(f"[Rank {self.rank}] Loading model {model_name} on CPU...") self.enable_seq_packing = self.cfg["sequence_packing"]["enabled"] if self.enable_seq_packing: + assert not self.is_vlm, ( + "Sequence packing is not supported for VLM models. Please set policy.sequence_packing.enabled = False to train VLM models." + ) print( f"[Rank {self.rank}] Sequence packing is enabled for model {model_name}" ) @@ -210,8 +224,9 @@ def __init__( else None, ) - self._is_reward_model = self.cfg.get("reward_model_cfg", {}).get( - "enabled", False + # reward model + self._is_reward_model = ( + "reward_model_cfg" in self.cfg and self.cfg["reward_model_cfg"]["enabled"] ) if self._is_reward_model: # Ensure sequence packing is disabled. @@ -240,7 +255,8 @@ def __init__( else: raise ValueError(f"Unknown reward model type: {rm_type}") else: - model_class = AutoModelForCausalLM + # DO NOT assume AutoModelForCausalLM, multimodal models can inherit from AutoModelForImageTextToText, AutoModelForTextToWaveform, etc. + model_class = resolve_model_class(model_config.model_type) full_state_dict = None if self.rank == 0: @@ -257,27 +273,15 @@ def __init__( print(f"[Rank {self.rank}] Initializing empty model for FSDP...") # All ranks initialize model on meta device, so FSDP can shard it. # The actual weights will be broadcast from rank 0. - with init_empty_weights(): self.model = model_class.from_config( model_config, + trust_remote_code=True, ) if self.model.config.pad_token_id is None: self.model.config.pad_token_id = tokenizer.pad_token_id - # caching since this property is not always preserved after FSDP - self.num_tied_weights = len(find_tied_parameters(self.model)) - self.skip_tie_check = os.environ.get( - "NRL_SKIP_TIED_WEIGHT_CHECK" - ) or ModelFlag.SKIP_DTENSOR_TIED_WEIGHTS_CHECK.matches(model_name) - - self.tokenizer = tokenizer - # ------------------------------------------------ - # 3) Move to GPU + Composable FSDP - # (Initialize device mesh, shard submodules, then shard entire model) - # ------------------------------------------------ - tp_size = self.cfg["dtensor_cfg"]["tensor_parallel_size"] cp_size = self.cfg["dtensor_cfg"]["context_parallel_size"] if cp_size > 1 and self.enable_seq_packing: @@ -307,6 +311,10 @@ def __init__( "See https://github.com/NVIDIA-NeMo/RL/issues/659 for more details." ) + assert not self.is_vlm, ( + "Context parallel is yet not supported for VLM models. Please set cp_size = 1 to train VLM models." + ) + device_mesh = torch.distributed.device_mesh.init_device_mesh( "cuda", (dp_size, cp_size, tp_size), mesh_dim_names=("dp", "cp", "tp") ) @@ -323,6 +331,10 @@ def __init__( self.cp_size = cp_size self.device_mesh = device_mesh + # ------------------------------------------------ + # 3) Move to GPU + Composable FSDP + # (Initialize device mesh, shard submodules, then shard entire model) + # ------------------------------------------------ self.model = _parallelize_model( self.model, self.dp_cp_mesh, @@ -425,15 +437,6 @@ def __init__( "No weights path provided. Starting from scratch (default policy init)" ) - # vars used for refit - ## will be initialized in prepare_refit_info - self.refit_param_info = None - ## used for streaming update inference engine weights - self._held_sharded_state_dict_reference: Optional[dict[str, torch.Tensor]] = ( - None - ) - self._held_streamed_param_reference: Optional[dict[str, torch.Tensor]] = None - # Refer to nemo impl. Below is original comment. # based on https://github.com/pytorch/torchtitan/blob/main/torchtitan/distributed/utils.py#L113 @staticmethod @@ -467,13 +470,8 @@ def create_context_parallel_ctx( # based on https://github.com/pytorch/torchtitan/blob/cddd7dc809f36fe0ed51cdaaea0671c084d75442/torchtitan/distributed/utils.py#L178 def _apply_temperature_scaling(self, logits: torch.Tensor) -> torch.Tensor: - # Apply temperature scaling to logits if configured and not using V1 engine. if "generation" in self.cfg and self.cfg["generation"] is not None: - # The V1 engine returns raw logits before temperature scaling. - # The V0 engine returns scaled logits. - # Therefore, we only divide if we are not using the V1 engine. - if not is_vllm_v1_engine_enabled(): - logits.div_(self.cfg["generation"]["temperature"]) + logits.div_(self.cfg["generation"]["temperature"]) return logits @staticmethod @@ -519,6 +517,7 @@ def get_gpu_info(self) -> dict[str, Any]: """Return information about the GPU being used by this worker.""" return get_gpu_info(self.model) + @wrap_with_nvtx_name("dtensor_policy_worker/train") def train( self, data: BatchedDataDict[Any], @@ -528,15 +527,6 @@ def train( mbs: Optional[int] = None, ) -> dict[str, Any]: """Train the policy on a batch of data with a given loss function.""" - # Check if the model has tied weights - if ( - self.num_tied_weights != 0 - and self.cfg["dtensor_cfg"]["tensor_parallel_size"] > 1 - and not self.skip_tie_check - ): - raise ValueError( - f"Using dtensor policy with tp size {self.cfg['dtensor_cfg']['tensor_parallel_size']} for model ({self.cfg['model_name']}) that has tied weights (num_tied_weights={self.num_tied_weights}) is not supported (https://github.com/NVIDIA-NeMo/RL/issues/227). Please use dtensor policy with tensor parallel == 1 instead." - ) if gbs is None: gbs = self.cfg["train_global_batch_size"] if mbs is None: @@ -642,6 +632,8 @@ def train( for mb_idx, mb in enumerate( itertools.chain(mb_iterator, dummy_iterator) ): + torch.cuda.empty_cache() + with torch.autocast(device_type="cuda", dtype=self.dtype): if self.enable_seq_packing: input_ids = mb.get("input_ids").cuda() @@ -677,8 +669,18 @@ def train( ).repeat(batch_size, 1) flash_attn_kwargs = {} + # add vlm kwargs to model call + vlm_kwargs = mb.get_multimodal_dict( + as_tensors=True, device=input_ids.device + ) + if len(vlm_kwargs) > 0: + position_ids = None + context_parallel_ctx = None if self.cp_size > 1: + assert len(vlm_kwargs) == 0, ( + f"multimodal kwargs={vlm_kwargs} are not supported for context parallel" + ) seq_index = torch.arange( seq_len, device=input_ids.device ).repeat(1, 1) @@ -704,6 +706,7 @@ def train( position_ids=position_ids, use_cache=False, flash_attn_kwargs=flash_attn_kwargs, + **vlm_kwargs, ) if self._is_reward_model: @@ -712,6 +715,9 @@ def train( # is not supported for reward models. assert not flash_attn_kwargs del model_args["flash_attn_kwargs"] + # remove flash_attn_kwargs if there are multimodal kwargs + if len(vlm_kwargs) > 0: + del model_args["flash_attn_kwargs"] outputs = self.model(**model_args) @@ -720,6 +726,7 @@ def train( logits = self.model.lm_head(outputs.last_hidden_state) else: logits = outputs.logits + del outputs # Apply temperature scaling logits = self._apply_temperature_scaling(logits) @@ -786,6 +793,7 @@ def train( global_valid_seqs, global_valid_toks, ) + del logits # skip the update for dummy batches if mb_idx < iterator_len: @@ -861,11 +869,14 @@ def train( "global_loss": global_loss.cpu(), "grad_norm": grad_norm, "rank": torch.distributed.get_rank(), + "gpu_name": torch.cuda.get_device_name(), + "model_dtype": self.dtype, "all_mb_metrics": dict(mb_metrics), } return metrics + @wrap_with_nvtx_name("dtensor_policy_worker/get_logprobs") def get_logprobs( self, data: BatchedDataDict[Any], micro_batch_size: Optional[int] = None ) -> BatchedDataDict[LogprobOutputSpec]: @@ -886,6 +897,7 @@ def get_logprobs( if micro_batch_size is not None else self.cfg["logprob_batch_size"] ) + logprob_chunk_size = self.cfg.get("logprob_chunk_size", None) # dim 1 is always assumed to be the sequence dim, sanity check this here sequence_dim = 1 @@ -933,9 +945,15 @@ def get_logprobs( step += 1 input_ids = lp_batch.get("input_ids").cuda() input_lengths = lp_batch.get("input_lengths") + vlm_kwargs = lp_batch.get_multimodal_dict( + as_tensors=True, device=input_ids.device + ) batch_size, seq_len = input_ids.shape if self.enable_seq_packing: + assert len(vlm_kwargs) == 0, ( + "multimodal kwargs are not supported for sequence packing" + ) input_ids, position_ids, _ = pack_sequences( input_ids=input_ids, input_lengths=input_lengths, @@ -975,8 +993,15 @@ def get_logprobs( (batch_size, seq_len), dtype=torch.long, device=input_ids.device ) + # if there are multimodal kwargs, we don't need to add position_ids (computed internally) + if len(vlm_kwargs) > 0: + position_ids = None + context_parallel_ctx = None if self.cp_size > 1: + assert len(vlm_kwargs) == 0, ( + "multimodal kwargs are not supported for context parallel" + ) seq_index = torch.arange(seq_len, device=input_ids.device).repeat( 1, 1 ) @@ -992,13 +1017,18 @@ def get_logprobs( with DTensorPolicyWorker.train_context(context_parallel_ctx): with torch.autocast(device_type="cuda", dtype=self.dtype): - outputs = self.model( + model_args = dict( input_ids=input_ids, attention_mask=attention_mask_input_all_ones, position_ids=position_ids, use_cache=False, flash_attn_kwargs=flash_attn_kwargs, + **vlm_kwargs, ) + if len(vlm_kwargs) > 0: + del model_args["flash_attn_kwargs"] + + outputs = self.model(**model_args) logits = outputs.logits @@ -1043,18 +1073,47 @@ def get_logprobs( ) token_logprobs = get_logprobs_from_vocab_parallel_logits( - logits.to(torch.float32), + logits, input_ids_dtensor, seq_index_tensor, + chunk_size=logprob_chunk_size, ) assert token_logprobs.shape[1] == seq_len - 1 else: if isinstance(logits, DTensor): token_logprobs = get_logprobs_from_vocab_parallel_logits( - logits.to(torch.float32), input_ids + logits, + input_ids, + chunk_size=logprob_chunk_size, ) else: + if logprob_chunk_size is not None: + logits_seq_len = int(logits.shape[1]) + num_chunks = ( + logits_seq_len + logprob_chunk_size - 1 + ) // logprob_chunk_size + chunked_log_probs = [] + for chunk_idx in range(num_chunks): + chunk_start = chunk_idx * logprob_chunk_size + chunk_end = min( + logits_seq_len, + (chunk_idx + 1) * logprob_chunk_size, + ) + chunk_logits = logits[ + :, chunk_start:chunk_end, : + ].to(torch.float32) + log_probs = torch.nn.functional.log_softmax( + chunk_logits, dim=-1 + ) + chunked_log_probs.append(log_probs) + log_probs = torch.cat(chunked_log_probs, dim=1) + del chunked_log_probs + else: + logits = logits.to(torch.float32) + log_probs = torch.nn.functional.log_softmax( + logits, dim=-1 + ) # Extract logprobs for each token in the sequence by gathering the logprob # corresponding to the next token at each position # Input shapes: @@ -1062,15 +1121,14 @@ def get_logprobs( # token_ids: [batch_size, sequence_length] - actual tokens # Output shape: [batch_size, sequence_length] - logprob of each token given previous # We get logprob of token[t+1] from logits[t], prepending 0 to maintain sequence length - - log_probs = torch.nn.functional.log_softmax( - outputs.logits.to(torch.float32), dim=-1 - ) next_tokens = input_ids[:, 1:] log_probs = log_probs[:, :-1] token_logprobs = log_probs.gather( dim=-1, index=next_tokens.unsqueeze(-1) ).squeeze(-1) + del log_probs + + del outputs, logits token_logprobs = torch.cat( [torch.zeros_like(token_logprobs[:, :1]), token_logprobs], dim=1 @@ -1146,6 +1204,7 @@ def use_reference_model(self) -> Generator[None, None, None]: val = to_local_if_dtensor(v) val.copy_(curr_state_dict[k]) + @wrap_with_nvtx_name("dtensor_policy_worker/get_reference_policy_logprobs") def get_reference_policy_logprobs( self, data: BatchedDataDict[Any], micro_batch_size: Optional[int] = None ) -> BatchedDataDict[ReferenceLogprobOutputSpec]: @@ -1175,6 +1234,14 @@ def _add_noise_to_weights(self) -> None: def return_state_dict(self): return self.model.state_dict() + def return_model_config(self) -> dict[str, Any]: + """Return the model configuration as a dictionary. + + Returns: + dict: Model configuration dictionary + """ + return self.model.config + def report_device_id(self) -> str: """Report the UUID of the current CUDA device using NVML. @@ -1222,8 +1289,11 @@ def prepare_weights_for_ipc(self) -> tuple[list[tuple[str, int]], float]: """ from nemo_rl.utils.nvml import get_free_memory_bytes + # Manually move model to cuda for cpu offload case + if self.cpu_offload: + self.model = self.move_to_cuda(self.model) + # Get state_dict - self.model = self.move_to_cuda(self.model) self._held_sharded_state_dict_reference: dict[str, torch.Tensor] = ( self.model.state_dict() ) @@ -1240,6 +1310,7 @@ def prepare_weights_for_ipc(self) -> tuple[list[tuple[str, int]], float]: return self.refit_param_info, total_available_bytes @torch.no_grad() + @wrap_with_nvtx_name("dtensor_policy_worker/get_weights_ipc_handles") def get_weights_ipc_handles(self, keys: Iterable[str]) -> dict[str, Any]: assert self._held_sharded_state_dict_reference is not None, ( "prepare_weights_for_ipc must be called before get_weights_ipc_handles" @@ -1281,6 +1352,15 @@ def get_weights_ipc_handles(self, keys: Iterable[str]) -> dict[str, Any]: @torch.no_grad() def broadcast_weights_for_collective(self) -> None: """Broadcast the weights for collective communication.""" + # Manually move model to cuda for cpu offload case + if self.cpu_offload: + print( + "[WARNING]: Unless you are lacking of memory, it is not recommended to enable cpu_offload when " + "using non-colocated generation since it will have an extra onload and offload at refit stage." + ) + self.model = self.move_to_cuda(self.model) + + # Broadcast the weights for collective communication for _, tensor in self.model.state_dict().items(): if isinstance(tensor, DTensor): tensor = tensor.full_tensor() @@ -1288,6 +1368,12 @@ def broadcast_weights_for_collective(self) -> None: tensor = tensor.to(self.dtype, non_blocking=True) self.model_update_group.broadcast(tensor.data, src=0) + # Manually move model to cpu for cpu offload case + # cpu offload needs model on CPU before model forward + if self.cpu_offload: + self.model = self.move_to_cpu(self.model) + + @wrap_with_nvtx_name("dtensor_policy_worker/prepare_for_lp_inference") def prepare_for_lp_inference(self) -> None: if not self.cpu_offload: self.move_to_cuda(self.model) @@ -1297,6 +1383,7 @@ def prepare_for_lp_inference(self) -> None: self.model.eval() self.offload_before_refit() + @wrap_with_nvtx_name("dtensor_policy_worker/prepare_for_training") def prepare_for_training(self, *args, **kwargs) -> None: # onload models and optimizer state to cuda if not self.cpu_offload: @@ -1306,9 +1393,6 @@ def prepare_for_training(self, *args, **kwargs) -> None: # to cuda automatically, so we need to do that manually self.model = self.move_buffer_to_device(self.model, "cuda") - # have to move buffers to cuda manually for cpu offload case - self.move_buffer_to_device(self.model, "cuda") - self.model.train() # Move optimizer state to CUDA if it exists if ( @@ -1324,6 +1408,7 @@ def prepare_for_training(self, *args, **kwargs) -> None: torch.cuda.empty_cache() @torch.no_grad() + @wrap_with_nvtx_name("dtensor_policy_worker/offload_before_refit") def offload_before_refit(self) -> None: """Offload the optimizer to the CPU.""" torch.randn(1).cuda() # wake up torch allocator @@ -1337,6 +1422,7 @@ def offload_before_refit(self) -> None: torch.cuda.empty_cache() @torch.no_grad() + @wrap_with_nvtx_name("dtensor_policy_worker/offload_after_refit") def offload_after_refit(self) -> None: # Offload as much as possible on the CPU self.model = self.move_to_cpu(self.model) diff --git a/nemo_rl/models/policy/dtensor_policy_worker_v2.py b/nemo_rl/models/policy/dtensor_policy_worker_v2.py new file mode 100644 index 0000000000..8d56c3e6eb --- /dev/null +++ b/nemo_rl/models/policy/dtensor_policy_worker_v2.py @@ -0,0 +1,1413 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import itertools +import os +from collections import defaultdict +from contextlib import AbstractContextManager, contextmanager, nullcontext +from typing import Any, Generator, Iterable, Optional, cast + +import ray +import torch +from accelerate import init_empty_weights +from nemo_automodel import ( + NeMoAutoModelForCausalLM, + NeMoAutoModelForSequenceClassification, +) +from nemo_automodel.components._transformers.utils import sliding_window_overwrite +from nemo_automodel.components.distributed.cp_utils import ( + create_context_parallel_ctx, + get_train_context, +) +from nemo_automodel.components.distributed.grad_utils import ( + clip_grad_by_total_norm_, + get_grad_norm, +) +from nemo_automodel.components.distributed.parallelizer import ( + fsdp2_strategy_parallelize, + unshard_fsdp2_model, +) +from nemo_automodel.components.distributed.tensor_utils import ( + get_cpu_state_dict, + to_local_if_dtensor, +) +from torch import nn +from torch.distributed.checkpoint.state_dict import ( + StateDictOptions, + set_model_state_dict, +) +from torch.distributed.fsdp import ( + CPUOffloadPolicy, + MixedPrecisionPolicy, + OffloadPolicy, +) +from torch.distributed.tensor import DTensor, Shard +from transformers import ( + AutoConfig, + AutoTokenizer, +) +from transformers.models.gemma3.modeling_gemma3 import Gemma3ForCausalLM + +from nemo_rl.algorithms.interfaces import LossFunction, LossType +from nemo_rl.algorithms.loss_functions import SequencePackingLossWrapper +from nemo_rl.distributed.batched_data_dict import BatchedDataDict +from nemo_rl.distributed.model_utils import get_logprobs_from_vocab_parallel_logits +from nemo_rl.models.huggingface.common import ( + get_flash_attention_kwargs, + pack_sequences, +) +from nemo_rl.models.policy import PolicyConfig +from nemo_rl.models.policy.interfaces import ( + LogprobOutputSpec, + ReferenceLogprobOutputSpec, +) +from nemo_rl.models.policy.utils import ( + configure_dynamo_cache, + get_gpu_info, + get_handle_from_tensor, + get_runtime_env_for_policy_worker, + import_class_from_path, +) +from nemo_rl.utils.native_checkpoint import ( + load_checkpoint, + save_checkpoint, +) +from nemo_rl.utils.nsys import wrap_with_nvtx_name + + +@ray.remote( + runtime_env=get_runtime_env_for_policy_worker("dtensor_policy_worker_v2") +) # pragma: no cover +class DTensorPolicyWorkerV2: + def __repr__(self) -> str: + """Customizes the actor's prefix in the Ray logs. + + This makes it easier to identify which worker is producing specific log messages. + """ + if torch.distributed.is_initialized(): + return f"{self.__class__.__qualname__}[rank={torch.distributed.get_rank()}]" + else: + return f"{self.__class__.__qualname__}" + + def __init__( + self, + config: PolicyConfig, + tokenizer: AutoTokenizer, + weights_path: Optional[str] = None, + optimizer_path: Optional[str] = None, + init_optimizer: bool = True, + init_reference_model: bool = True, + **kwargs: Any, + ): + self.is_generation_colocated = None + if "generation" in config and config["generation"] is not None: + self.is_generation_colocated = config["generation"]["colocated"]["enabled"] + + # Explicitly set NCCL_CUMEM_ENABLE to 1 to avoid the P2P initialization error for PyNCCLCommunicator. + # See https://github.com/NVIDIA-NeMo/RL/issues/564 for more details. + if not self.is_generation_colocated: + os.environ["NCCL_CUMEM_ENABLE"] = "1" + + # Disable dynamo autotune_local_cache to avoid crash when there's already a cache + # with different order of node_bundles + configure_dynamo_cache() + + self.cfg = config + # torch distributed init. Envars for rank, world_size, and master_addr and master_port are set from the ray remote call + torch.distributed.init_process_group(backend="nccl") + self.rank = torch.distributed.get_rank() + world_size = torch.distributed.get_world_size() + model_name = self.cfg["model_name"] + + self.cpu_offload = self.cfg["dtensor_cfg"]["cpu_offload"] + self.max_grad_norm = self.cfg["max_grad_norm"] + + if self.cfg["precision"] == "float32": + self.dtype = torch.float32 + elif self.cfg["precision"] == "bfloat16": + self.dtype = torch.bfloat16 + elif self.cfg["precision"] == "float16": + self.dtype = torch.float16 + else: + raise ValueError(f"Unknown precision: {self.cfg['precision']}") + + print(f"[Rank {self.rank}] Loading model {model_name} on CPU...") + self.enable_seq_packing = self.cfg["sequence_packing"]["enabled"] + if self.enable_seq_packing: + print( + f"[Rank {self.rank}] Sequence packing is enabled for model {model_name}" + ) + print(f"[Rank {self.rank}] Using FlashAttention2 for sequence packing") + + model_config = AutoConfig.from_pretrained( + model_name, + # Always load the model in float32 to keep master weights in float32. + # Keeping the master weights in lower precision has shown to cause issues with convergence. + torch_dtype=torch.float32, + trust_remote_code=True, + **sliding_window_overwrite( + model_name + ), # due to https://github.com/huggingface/transformers/issues/38002 + attn_implementation="flash_attention_2" + if self.enable_seq_packing + else None, + ) + + self._is_reward_model = ( + "reward_model_cfg" in self.cfg and self.cfg["reward_model_cfg"]["enabled"] + ) + if self._is_reward_model: + # Ensure sequence packing is disabled. + if self.enable_seq_packing: + raise NotImplementedError( + "Sequence packing is not supported for reward models" + ) + # Load model as a Reward Model. + rm_type = self.cfg["reward_model_cfg"]["reward_model_type"] + if rm_type == "bradley_terry": + model_class = NeMoAutoModelForSequenceClassification + if model_config.num_labels != 1: + # For Bradley-Terry reward models, the linear head has a single output. + # In the transformers library, the default setting for model_config.num_labels is 2 + # (https://github.com/huggingface/transformers/blob/v4.52.4/src/transformers/configuration_utils.py#L259). + # Since num_labels is used as the out_features for the linear head + # (https://github.com/huggingface/transformers/blob/v4.52.4/src/transformers/models/llama/modeling_llama.py#L738) + # if num_labels is not 1, we set it to 1. This change may trigger a warning that some weights are not initialized + # from the model checkpoint and are instead initialized using model_config.initializer_range + # (https://github.com/huggingface/transformers/blob/v4.52.4/src/transformers/models/llama/configuration_llama.py#L62). + print( + "model_config.num_labels is not 1. Setting it to 1 since this value is used as the out_features " + "for the linear head of Bradley-Terry reward models." + ) + model_config.num_labels = 1 + else: + raise ValueError(f"Unknown reward model type: {rm_type}") + else: + model_class = NeMoAutoModelForCausalLM + + full_state_dict = None + if self.rank == 0: + print(f"[Rank {self.rank}] Loading model {model_name} on CPU...") + model = model_class.from_pretrained( + model_name, + device_map="cpu", # load weights onto CPU initially + trust_remote_code=True, + config=model_config, + ) + + full_state_dict = model.state_dict() + del model + + print(f"[Rank {self.rank}] Initializing empty model for FSDP...") + # All ranks initialize model on meta device, so FSDP can shard it. + # The actual weights will be broadcast from rank 0. + + with init_empty_weights(): + # NeMoAutoModelForCausalLM uses flash_attention_2 by default + # so we need to set it to None if sequence packing is disabled + # https://github.com/NVIDIA-NeMo/Automodel/blob/7e748be260651349307862426c0c168cebdeeec3/nemo_automodel/components/_transformers/auto_model.py#L180 + self.model = model_class.from_config( + model_config, + attn_implementation="flash_attention_2" + if self.enable_seq_packing + else None, + trust_remote_code=True, + ) + + if self.model.config.pad_token_id is None: + self.model.config.pad_token_id = tokenizer.pad_token_id + + # caching since this property is not always preserved after FSDP + self.tokenizer = tokenizer + + # ------------------------------------------------ + # 3) Move to GPU + Composable FSDP + # (Initialize device mesh, shard submodules, then shard entire model) + # ------------------------------------------------ + + tp_size = self.cfg["dtensor_cfg"]["tensor_parallel_size"] + cp_size = self.cfg["dtensor_cfg"]["context_parallel_size"] + if cp_size > 1 and self.enable_seq_packing: + raise ValueError( + "Context parallel is not supported for sequence packing. Refer to https://github.com/NVIDIA/NeMo-RL/blob/main/docs/model-quirks.md#context-parallel-with-fsdp2 for more details." + ) + dp_size = world_size // tp_size // cp_size + sequence_parallel_enabled = self.cfg["dtensor_cfg"]["sequence_parallel"] + assert world_size == dp_size * tp_size * cp_size, ( + f"World size({world_size}) must equal to dp_size({dp_size}) * tp_size({tp_size}) * cp_size({cp_size}) to use DTensor" + ) + + if sequence_parallel_enabled and tp_size == 1: + print( + "[WARNING]: sequence_parallel=True, but tp_size=1 which has no effect. Enable tp_size > 1 to use sequence parallelism." + ) + + if cp_size > 1: + assert not isinstance(self.model, Gemma3ForCausalLM), ( + "Context parallel is not supported for Gemma3ForCausalLM. Torch context parallel has many limitations. " + "Please refer to https://github.com/NVIDIA/NeMo-RL/blob/main/docs/model-quirks.md#context-parallel-with-fsdp2 for more details." + ) + + assert not (tp_size > 1 and sequence_parallel_enabled), ( + "It's a known issue that context parallel can't be used together with sequence parallel in DTensor worker. " + "Please either set cp_size = 1 or disable sequence parallel. " + "See https://github.com/NVIDIA-NeMo/RL/issues/659 for more details." + ) + + # For FSDP2 compatibility, we need to support HSDP structure + # For now, we use dp_replicate_size = 1 (no hybrid sharding) + dp_replicate_size = 1 + dp_shard_size = dp_size + + # Create device mesh with HSDP structure for FSDP2 compatibility + device_mesh = torch.distributed.device_mesh.init_device_mesh( + "cuda", + (dp_replicate_size, dp_shard_size, cp_size, tp_size), + mesh_dim_names=("dp_replicate", "dp_shard", "cp", "tp"), + ) + + # Create flattened submeshes for different use cases + # Flatten dp_replicate + dp_shard for the "dp" dimension (backward compatibility) + device_mesh[("dp_replicate", "dp_shard")]._flatten(mesh_dim_name="dp") + + # Flatten dp_shard + cp for FSDP2 sharding + device_mesh[("dp_shard", "cp")]._flatten(mesh_dim_name="dp_shard_cp") + + # Flatten dp_replicate + dp_shard + cp for gradient operations + device_mesh[("dp_replicate", "dp_shard", "cp")]._flatten(mesh_dim_name="dp_cp") + + # Store mesh references for backward compatibility + self.dp_cp_mesh = device_mesh["dp_cp"] + self.dp_mesh = device_mesh["dp"] + self.tp_mesh = device_mesh["tp"] + self.cp_mesh = device_mesh["cp"] + + self.dp_size = dp_size + self.tp_size = tp_size + self.cp_size = cp_size + self.device_mesh = device_mesh + + self.model = fsdp2_strategy_parallelize( + self.model, + device_mesh=self.device_mesh, + mp_policy=MixedPrecisionPolicy( + param_dtype=self.dtype, + reduce_dtype=torch.float32, + output_dtype=torch.float32, + ), + offload_policy=CPUOffloadPolicy(pin_memory=False) + if self.cpu_offload + else OffloadPolicy(), + sequence_parallel=sequence_parallel_enabled, + activation_checkpointing=self.cfg["dtensor_cfg"][ + "activation_checkpointing" + ], + tp_shard_plan=self.cfg["dtensor_cfg"]["custom_parallel_plan"], + dp_replicate_mesh_name="dp_replicate", + dp_shard_cp_mesh_name="dp_shard_cp", + tp_mesh_name="tp", + ) + + print(f"[Rank {self.rank}] Loading state dict from rank 0...") + # This will broadcast the state dict from rank 0 to all other ranks + # and load it into the FSDP model. + set_model_state_dict( + self.model, + model_state_dict=full_state_dict, + options=StateDictOptions( + full_state_dict=True, + broadcast_from_rank0=True, + ), + ) + + # Handle tied word embeddings after loading the state dict + # We need to actually tie the parameters at the model level + is_tied_lm_head = hasattr(self.model, "lm_head") and getattr( + getattr(self.model, "config", {}), "tie_word_embeddings", False + ) + if is_tied_lm_head: + embed_tokens_weight = None + for name, param in self.model.named_parameters(): + if "embed_tokens" in name and name.endswith(".weight"): + embed_tokens_weight = param + break + + if embed_tokens_weight is not None: + self.model.lm_head.weight = embed_tokens_weight + + # Manually broadcast buffers + for _, buf in self.model.named_buffers(): + torch.distributed.broadcast(to_local_if_dtensor(buf), src=0) + + if self.cpu_offload: + self.model = self.move_to_device(self.model, "cpu") + + if init_reference_model: + self.reference_model_state_dict = get_cpu_state_dict( + self.model.state_dict().items(), pin_memory=True + ) + + if init_optimizer: + optimizer_cls = import_class_from_path(self.cfg["optimizer"]["name"]) + self.optimizer = optimizer_cls( + self.model.parameters(), **self.cfg["optimizer"]["kwargs"] + ) + else: + self.optimizer = None + + if "scheduler" in self.cfg and self.optimizer is not None: + if isinstance(self.cfg["scheduler"], dict): + scheduler_cls = import_class_from_path( + cast(str, self.cfg["scheduler"]["name"]) + ) + self.scheduler = scheduler_cls( + self.optimizer, **self.cfg["scheduler"]["kwargs"] + ) + else: + schedulers = [] + for scheduler_cfg in self.cfg["scheduler"]: + if "name" in scheduler_cfg: + schedulers.append( + import_class_from_path(scheduler_cfg["name"])( + self.optimizer, **scheduler_cfg["kwargs"] + ) + ) + else: + assert "milestones" in scheduler_cfg, ( + "unknown scheduler config: ", + scheduler_cfg, + ) + milestones: list[int] = scheduler_cfg["milestones"] + + self.scheduler = torch.optim.lr_scheduler.SequentialLR( + self.optimizer, schedulers, milestones + ) + + elif self.optimizer is not None: + ## default to a passthrough LR schedule + self.scheduler = torch.optim.lr_scheduler.LambdaLR( + self.optimizer, lr_lambda=lambda epoch: 1 + ) + + # restore + if weights_path: + self.load_checkpoint(weights_path, optimizer_path) + else: + print( + "No weights path provided. Starting from scratch (default policy init)" + ) + + # vars used for refit + ## will be initialized in prepare_refit_info + self.refit_param_info = None + ## used for streaming update inference engine weights + self._held_sharded_state_dict_reference: Optional[dict[str, torch.Tensor]] = ( + None + ) + self._held_streamed_param_reference: Optional[dict[str, torch.Tensor]] = None + + def _apply_temperature_scaling(self, logits: torch.Tensor) -> torch.Tensor: + if "generation" in self.cfg and self.cfg["generation"] is not None: + logits.div_(self.cfg["generation"]["temperature"]) + return logits + + def init_collective(self, ip: str, port: int, world_size: int) -> None: + """Initialize the collective communication.""" + from vllm.distributed.device_communicators.pynccl import PyNcclCommunicator + from vllm.distributed.utils import StatelessProcessGroup + + if self.rank == 0: + pg = StatelessProcessGroup.create( + host=ip, port=port, rank=0, world_size=world_size + ) + device = torch.cuda.current_device() + self.model_update_group = PyNcclCommunicator(pg, device=device) + + def is_alive(self) -> bool: + return True + + def reset_peak_memory_stats(self) -> None: + torch.cuda.reset_peak_memory_stats() + + def get_gpu_info(self) -> dict[str, Any]: + """Return information about the GPU being used by this worker.""" + return get_gpu_info(self.model) + + @wrap_with_nvtx_name("dtensor_policy_worker_v2/train") + def train( + self, + data: BatchedDataDict[Any], + loss_fn: LossFunction, + eval_mode: bool = False, + gbs: Optional[int] = None, + mbs: Optional[int] = None, + ) -> dict[str, Any]: + """Train the policy on a batch of data with a given loss function.""" + if gbs is None: + gbs = self.cfg["train_global_batch_size"] + if mbs is None: + mbs = self.cfg["train_micro_batch_size"] + local_gbs = gbs // self.dp_size + total_dataset_size = torch.tensor(data.size, device="cuda") + torch.distributed.all_reduce( + total_dataset_size, + op=torch.distributed.ReduceOp.SUM, + group=self.dp_mesh.get_group(), + ) + num_global_batches = int(total_dataset_size.item()) // gbs + + # dim 1 is always assumed to be the sequence dim, sanity check this here + sequence_dim = 1 + seq_dim_size = data.get("input_ids").shape[sequence_dim] + for k, v in data.items(): + if torch.is_tensor(v) and len(v.shape) > 1: + assert v.shape[sequence_dim] == seq_dim_size, ( + f"Dim 1 must be the sequence dim, expected dim 1={seq_dim_size} but got shape {v.shape}" + ) + + if eval_mode: + ctx: AbstractContextManager[Any] = torch.no_grad() + self.model.eval() + else: + ctx = nullcontext() + # Ensure model is in training mode + self.model.train() + + with ctx: + # Get data from batch and move to device + data.to("cuda") + + losses = [] + all_mb_metrics = [] + for gb_idx in range(num_global_batches): + global_batch = data.get_batch(batch_idx=gb_idx, batch_size=local_gbs) + + assert "sample_mask" in global_batch, ( + "sample_mask must be present in the data!" + ) + ## get the normalization factor for the loss + local_valid_seqs = torch.sum(global_batch["sample_mask"]) + + if not "token_mask" in global_batch: + local_valid_toks = ( + local_valid_seqs * global_batch["input_ids"].shape[1] + ) + else: + local_valid_toks = torch.sum( + global_batch["token_mask"][:, 1:] + * global_batch["sample_mask"].unsqueeze(-1) + ) + + to_reduce = torch.tensor([local_valid_seqs, local_valid_toks]).cuda() + torch.distributed.all_reduce(to_reduce, group=self.dp_mesh.get_group()) + global_valid_seqs, global_valid_toks = to_reduce[0], to_reduce[1] + + if ( + hasattr(loss_fn, "loss_type") + and loss_fn.loss_type == LossType.TOKEN_LEVEL + ): + assert "token_mask" in global_batch, ( + "token_mask must be present in the data when using token-level loss" + ) + + self.optimizer.zero_grad() + mb_losses = [] + batch = data.get_batch(batch_idx=gb_idx, batch_size=local_gbs) + # Calculate number of microbatches to process + # make_microbatch_iterator assumes that the batch size is a multiple of the microbatch size + # so its safe to not check for the case where the last data slice is smaller than mbs + dummy_iterator = iter([]) + if self.cfg["dynamic_batching"]["enabled"]: + mb_iterator = batch.make_microbatch_iterator_with_dynamic_shapes() + iterator_len = batch.get_microbatch_iterator_dynamic_shapes_len() + elif self.enable_seq_packing: + mb_iterator = ( + batch.make_microbatch_iterator_for_packable_sequences() + ) + iterator_len, max_seqlen = ( + batch.get_microbatch_iterator_for_packable_sequences_len() + ) + max_batch_ct = torch.tensor([iterator_len], device="cuda") + torch.distributed.all_reduce( + max_batch_ct, op=torch.distributed.ReduceOp.MAX + ) + + # Sequence packing can end up with unevenly distributed batch counts across DP ranks. + # We add dummy batches to the end of the iterator to make the batch counts equal. + dummy_batch_ct = int(max_batch_ct.item() - iterator_len) + dummy_iterator = ( + batch.make_microbatch_iterator_for_packable_sequences() + ) + dummy_iterator = itertools.islice( + itertools.cycle(dummy_iterator), dummy_batch_ct + ) + else: + mb_iterator = batch.make_microbatch_iterator(mbs) + iterator_len = batch.size // mbs + + for mb_idx, mb in enumerate( + itertools.chain(mb_iterator, dummy_iterator) + ): + torch.cuda.empty_cache() + + with torch.autocast(device_type="cuda", dtype=self.dtype): + if self.enable_seq_packing: + input_ids = mb.get("input_ids").cuda() + input_ids, position_ids, _ = pack_sequences( + input_ids=input_ids, + input_lengths=mb["input_lengths"], + packed_sequence_size=[ + len(mb["input_lengths"]) + ], # flash attention 2 expects flattened input + padding_value=self.tokenizer.eos_token_id, + return_attention_mask=False, + min_seq_len=self.cfg["sequence_packing"][ + "train_mb_tokens" + ], # TODO: this is a WAR for sequence packing, we should fix this. Without this, backward will fail when TP is enabled. + ) + seq_len = input_ids.shape[1] + attention_mask = None + flash_attn_kwargs = get_flash_attention_kwargs( + input_lengths=mb["input_lengths"], + ) + + else: + input_ids = mb.get("input_ids").cuda() + batch_size, seq_len = input_ids.shape + + attention_mask = torch.ones( + (batch_size, seq_len), + dtype=torch.long, + device=input_ids.device, + ) + position_ids = torch.arange( + seq_len, device=input_ids.device + ).repeat(batch_size, 1) + flash_attn_kwargs = {} + + context_parallel_ctx = None + if self.cp_size > 1: + seq_index = torch.arange( + seq_len, device=input_ids.device + ).repeat(1, 1) + cp_buffers = ( + [input_ids, position_ids, seq_index] + if self.cp_size > 1 + else [] + ) + + # Create context parallel context + context_parallel_ctx = create_context_parallel_ctx( + cp_mesh=self.cp_mesh, + cp_buffers=cp_buffers, + cp_seq_dims=[sequence_dim] * len(cp_buffers), + cp_no_restore_buffers=set(cp_buffers), + ) + + with get_train_context(False, False, context_parallel_ctx)(): + with torch.autocast(device_type="cuda", dtype=self.dtype): + model_args = dict( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + use_cache=False, + flash_attn_kwargs=flash_attn_kwargs, + ) + + if self._is_reward_model: + # `flash_attn_kwarg` is not supported for `LlamaForSequenceClassification`. + # Note that it should be empty anyway since sequence packing + # is not supported for reward models. + assert not flash_attn_kwargs + del model_args["flash_attn_kwargs"] + + outputs = self.model(**model_args) + + # Get logprobs + if not hasattr(outputs, "logits"): + logits = self.model.lm_head(outputs.last_hidden_state) + else: + logits = outputs.logits + del outputs + + # Apply temperature scaling + logits = self._apply_temperature_scaling(logits) + + if self.cp_size > 1: + seq_index_dtensor = ( + DTensor.from_local( + seq_index, + device_mesh=self.cp_mesh, + placements=[Shard(1)], + ) + .full_tensor() + .squeeze(0) + ) + + mb["seq_index"] = seq_index_dtensor + + for tensor_name in mb: + current_tensor = mb[tensor_name] + for buffer in cp_buffers: + if current_tensor is buffer: + assert type(current_tensor) == torch.Tensor, ( + f"tensor {tensor_name} is not a tensor" + ) + mb[tensor_name] = DTensor.from_local( + current_tensor, + device_mesh=self.cp_mesh, + placements=[Shard(sequence_dim)], + ) + break + + if isinstance(logits, DTensor): + # Must be tp sharded + assert ( + logits.device_mesh.ndim == 1 + and logits.device_mesh.mesh_dim_names[0] == "tp" + ), "logits must be tp sharded" + + # CP is implicitly sharded on the seq dim, so we need to redistribute to the tp dim + logits = DTensor.from_local( + logits.to_local(), + device_mesh=self.device_mesh[("cp", "tp")], + placements=[Shard(sequence_dim), Shard(-1)], + ) + else: + logits = DTensor.from_local( + logits, + device_mesh=self.device_mesh[("cp", "tp")], + placements=[Shard(sequence_dim), Shard(-1)], + ) + + if self.enable_seq_packing: + loss_fn_ = SequencePackingLossWrapper( + loss_fn=loss_fn, + cu_seqlens_q=flash_attn_kwargs.cu_seqlens_q, + cu_seqlens_q_padded=flash_attn_kwargs.cu_seqlens_q, + ) + else: + loss_fn_ = loss_fn + + loss, loss_metrics = loss_fn_( + logits, + mb, + global_valid_seqs, + global_valid_toks, + ) + del logits + + # skip the update for dummy batches + if mb_idx < iterator_len: + ## scale by the number of global batches so we get the correct + ## value when summing metrics across all microbatches + for k in loss_metrics.keys(): + loss_metrics[k] /= num_global_batches + num_valid_samples = loss_metrics["num_valid_samples"] + loss_metrics["lr"] = self.optimizer.param_groups[0]["lr"] + loss_metrics["global_valid_seqs"] = global_valid_seqs.item() + loss_metrics["global_valid_toks"] = global_valid_toks.item() + else: + loss *= 0 + + # Backward pass + if not eval_mode: + ## NOTE: invalid samples should be multiplied + ## by zero in the loss function to prevent them + ## from affecting the gradient calculation + + # when FSDP reduces the gradients over the DP dim, they're automatically averaged + # but we want to sum them so we cancel out the average here + loss *= self.dp_size * self.cp_size + loss.backward() + + if num_valid_samples > 0: + mb_losses.append(loss.item()) + all_mb_metrics.append(loss_metrics) + + grad_norm: Optional[float | torch.Tensor] = None + if not eval_mode: + with torch.no_grad(): + grad_norm = get_grad_norm( + self.model.parameters(), + dp_cp_group=self.dp_cp_mesh.get_group(), + tp_group=self.tp_mesh.get_group(), + dtype=torch.float32, + ) + if self.max_grad_norm is not None: + clip_grad_by_total_norm_( + self.model.parameters(), + max_grad_norm=self.max_grad_norm, + total_norm=grad_norm, + dtype=torch.float32, + ) + grad_norm = torch.tensor([grad_norm]) + + # Update parameters + self.optimizer.step() + + losses.append(torch.tensor(mb_losses).sum().item()) + + # increment scheduler after all batches in rollout are processed + if not eval_mode: + self.scheduler.step() + # dynamic batch and sequence dims causes alot of fragmentation, so clear + # the memory allocator before moving on + torch.cuda.empty_cache() + + # Compute global loss across all ranks + with torch.no_grad(): + global_loss = torch.tensor(losses, device="cuda") + torch.distributed.all_reduce( + global_loss, group=self.dp_mesh.get_group() + ) + # Aggregate metrics across all microbatches + mb_metrics = defaultdict(list) + for m in all_mb_metrics: + for k, v in m.items(): + mb_metrics[k].append(v) + + metrics = { + "global_loss": global_loss.cpu(), + "grad_norm": grad_norm, + "rank": torch.distributed.get_rank(), + "gpu_name": torch.cuda.get_device_name(), + "model_dtype": self.dtype, + "all_mb_metrics": dict(mb_metrics), + } + + return metrics + + @wrap_with_nvtx_name("dtensor_policy_worker_v2/get_logprobs") + def get_logprobs( + self, data: BatchedDataDict[Any], micro_batch_size: Optional[int] = None + ) -> BatchedDataDict[LogprobOutputSpec]: + """Get the logprobs of the model for a batch of data. + + Uses the configured logprob_batch_size to do microbatching. + + Input data is assumed to be right-padded. The method internally converts to + left-padded format for computation, and returns outputs in right-padded format. + + Returns: + a BatchedDataDict with key "logprobs" and shape [batch_size, sequence_length]. + We use the convention that the logprob of the first token is 0 so that the sequence length is maintained. + The logprob of input token i is specified at position i in the output logprobs tensor. + """ + logprob_batch_size = ( + micro_batch_size + if micro_batch_size is not None + else self.cfg["logprob_batch_size"] + ) + logprob_chunk_size = self.cfg.get("logprob_chunk_size", None) + + # dim 1 is always assumed to be the sequence dim, sanity check this here + sequence_dim = 1 + seq_dim_size = data.get("input_ids").shape[sequence_dim] + for k, v in data.items(): + if torch.is_tensor(v) and len(v.shape) > 1: + assert v.shape[sequence_dim] == seq_dim_size, ( + f"Dim 1 must be the sequence dim, expected dim 1={seq_dim_size} but got shape {v.shape}" + ) + + all_log_probs = [] + self.model.eval() + + with unshard_fsdp2_model(self.model), torch.no_grad(): + data.to("cuda") + dummy_iterator = iter([]) + if self.cfg["dynamic_batching"]["enabled"]: + mb_iterator = data.make_microbatch_iterator_with_dynamic_shapes() + iterator_len = data.get_microbatch_iterator_dynamic_shapes_len() + elif self.enable_seq_packing: + mb_iterator = data.make_microbatch_iterator_for_packable_sequences() + iterator_len, max_seqlen = ( + data.get_microbatch_iterator_for_packable_sequences_len() + ) + max_batch_ct = torch.tensor([iterator_len], device="cuda") + torch.distributed.all_reduce( + max_batch_ct, op=torch.distributed.ReduceOp.MAX + ) + + # Sequence packing can end up with unevenly distributed batch counts across DP ranks. + # We add dummy batches to the end of the iterator to make the batch counts equal. + dummy_batch_ct = int(max_batch_ct.item() - iterator_len) + dummy_iterator = data.make_microbatch_iterator_for_packable_sequences() + dummy_iterator = itertools.islice( + itertools.cycle(dummy_iterator), dummy_batch_ct + ) + else: + mb_iterator = data.make_microbatch_iterator(logprob_batch_size) + iterator_len = data.size // logprob_batch_size + + step = 0 + for batch_idx, lp_batch in enumerate( + itertools.chain(mb_iterator, dummy_iterator) + ): + step += 1 + input_ids = lp_batch.get("input_ids").cuda() + input_lengths = lp_batch.get("input_lengths") + + batch_size, seq_len = input_ids.shape + if self.enable_seq_packing: + input_ids, position_ids, _ = pack_sequences( + input_ids=input_ids, + input_lengths=input_lengths, + packed_sequence_size=[ + batch_size + ], # flash attention 2 expects flattened input + padding_value=self.tokenizer.eos_token_id, + return_attention_mask=False, + ) + seq_len = input_ids.shape[1] + attention_mask = None + flash_attn_kwargs = get_flash_attention_kwargs( + input_lengths=input_lengths, + ) + else: + # Create attention mask for right-padded data + attention_mask = torch.zeros( + (batch_size, seq_len), dtype=torch.long, device=input_ids.device + ) + for i, length in enumerate(input_lengths): + # For right-padded sequence, set 1s at the beginning of the sequence + attention_mask[i, :length] = 1 + + # explicitly create position ids for the input, otherwise the sharding + # for DTensor will be incorrect + position_ids = torch.arange( + seq_len, device=input_ids.device + ).repeat(batch_size, 1) + flash_attn_kwargs = {} + + with torch.autocast(device_type="cuda", dtype=self.dtype): + # DTensor requires the casual attention kernel to hit, + # yet our attention mask above is not always all 1s + # this is fine because we mask with the actual attention mask + # later, but for input it has to be all 1s + attention_mask_input_all_ones = torch.ones( + (batch_size, seq_len), dtype=torch.long, device=input_ids.device + ) + + context_parallel_ctx = None + if self.cp_size > 1: + seq_index = torch.arange(seq_len, device=input_ids.device).repeat( + 1, 1 + ) + cp_buffers = [input_ids, position_ids, seq_index] + + # Create context parallel context + context_parallel_ctx = create_context_parallel_ctx( + cp_mesh=self.cp_mesh, + cp_buffers=cp_buffers, + cp_seq_dims=[sequence_dim] * len(cp_buffers), + cp_no_restore_buffers=set(cp_buffers), + ) + + with get_train_context(False, False, context_parallel_ctx)(): + with torch.autocast(device_type="cuda", dtype=self.dtype): + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask_input_all_ones, + position_ids=position_ids, + use_cache=False, + flash_attn_kwargs=flash_attn_kwargs, + ) + + logits = outputs.logits + + # Apply temperature scaling + logits = self._apply_temperature_scaling(logits) + + if self.cp_size > 1: + seq_index_tensor = ( + DTensor.from_local( + seq_index, + device_mesh=self.cp_mesh, + placements=[Shard(1)], + ) + .full_tensor() + .squeeze(0) + ) + + input_ids_dtensor = DTensor.from_local( + input_ids, + device_mesh=self.cp_mesh, + placements=[Shard(sequence_dim)], + ) + + if isinstance(logits, DTensor): + # Must be tp sharded + assert ( + logits.device_mesh.ndim == 1 + and logits.device_mesh.mesh_dim_names[0] == "tp" + ), "logits must be tp sharded" + + # CP is implicitly sharded on the seq dim, so we need to redistribute to the tp dim + logits = DTensor.from_local( + logits.to_local(), + device_mesh=self.device_mesh[("cp", "tp")], + placements=[Shard(sequence_dim), Shard(-1)], + ) + else: + logits = DTensor.from_local( + logits, + device_mesh=self.device_mesh[("cp", "tp")], + placements=[Shard(sequence_dim), Shard(-1)], + ) + + token_logprobs = get_logprobs_from_vocab_parallel_logits( + logits, + input_ids_dtensor, + seq_index_tensor, + chunk_size=logprob_chunk_size, + ) + + assert token_logprobs.shape[1] == seq_len - 1 + else: + if isinstance(logits, DTensor): + token_logprobs = get_logprobs_from_vocab_parallel_logits( + logits, + input_ids, + chunk_size=logprob_chunk_size, + ) + else: + if logprob_chunk_size is not None: + logits_seq_len = int(logits.shape[1]) + num_chunks = ( + logits_seq_len + logprob_chunk_size - 1 + ) // logprob_chunk_size + chunked_log_probs = [] + for chunk_idx in range(num_chunks): + chunk_start = chunk_idx * logprob_chunk_size + chunk_end = min( + logits_seq_len, + (chunk_idx + 1) * logprob_chunk_size, + ) + chunk_logits = logits[ + :, chunk_start:chunk_end, : + ].to(torch.float32) + log_probs = torch.nn.functional.log_softmax( + chunk_logits, dim=-1 + ) + chunked_log_probs.append(log_probs) + log_probs = torch.cat(chunked_log_probs, dim=1) + del chunked_log_probs + else: + logits = logits.to(torch.float32) + log_probs = torch.nn.functional.log_softmax( + logits, dim=-1 + ) + # Extract logprobs for each token in the sequence by gathering the logprob + # corresponding to the next token at each position + # Input shapes: + # log_probs: [batch_size, sequence_length, vocab_size] - logits for each position + # token_ids: [batch_size, sequence_length] - actual tokens + # Output shape: [batch_size, sequence_length] - logprob of each token given previous + # We get logprob of token[t+1] from logits[t], prepending 0 to maintain sequence length + next_tokens = input_ids[:, 1:] + log_probs = log_probs[:, :-1] + token_logprobs = log_probs.gather( + dim=-1, index=next_tokens.unsqueeze(-1) + ).squeeze(-1) + del log_probs + + del outputs, logits + + token_logprobs = torch.cat( + [torch.zeros_like(token_logprobs[:, :1]), token_logprobs], dim=1 + ) + + # skip keeping the logprobs for the dummy batches + if batch_idx >= iterator_len: + continue + + if not self.enable_seq_packing: + # Apply mask to zero out padding tokens logprobs + token_logprobs = token_logprobs * attention_mask + else: + # For packed sequences, unpack logprobs + unpacked_logprobs = torch.zeros( + (batch_size, seq_dim_size), + dtype=token_logprobs.dtype, + device=token_logprobs.device, + ) + cu_seqlens = flash_attn_kwargs.cu_seqlens_q + for i in range(batch_size): + start = cu_seqlens[i].item() + 1 + end = cu_seqlens[i + 1].item() + seq_len_actual = input_lengths[i].item() + unpacked_logprobs[i, 1:seq_len_actual] = token_logprobs[ + 0, start:end + ] + token_logprobs = unpacked_logprobs + + all_log_probs.append(token_logprobs) + + # Concatenate all batches + return_data = BatchedDataDict[LogprobOutputSpec]() + + all_log_probs_padded = [] + for lp in all_log_probs: + padding_needed = seq_dim_size - lp.shape[1] + if padding_needed > 0: + lp = torch.nn.functional.pad( + lp, (0, padding_needed), mode="constant", value=0.0 + ) + all_log_probs_padded.append(lp) + return_data["logprobs"] = torch.cat(all_log_probs_padded, dim=0).cpu() + + return return_data + + @contextmanager + def use_reference_model(self) -> Generator[None, None, None]: + """Context manager that temporarily swaps the reference model and active model. + + On entry: Moves model to CPU, moves reference_model to CUDA. Swaps the references + On exit: Restores original references and re-flips cuda/cpu + """ + with torch.no_grad(): + try: + # Save train model state_dict + curr_state_dict = get_cpu_state_dict( + self.model.state_dict().items(), pin_memory=True + ) + + # Swap reference model state_dict to self.model + for k, v in self.model.state_dict().items(): + val = to_local_if_dtensor(v) + val.copy_(self.reference_model_state_dict[k]) + + # - self.model is the original reference_model, now on CUDA + # - curr_state_dict is the train model, now on CPU + yield + + finally: + # Restore train model state_dict + for k, v in self.model.state_dict().items(): + val = to_local_if_dtensor(v) + val.copy_(curr_state_dict[k]) + + @wrap_with_nvtx_name("dtensor_policy_worker_v2/get_reference_policy_logprobs") + def get_reference_policy_logprobs( + self, data: BatchedDataDict[Any], micro_batch_size: Optional[int] = None + ) -> BatchedDataDict[ReferenceLogprobOutputSpec]: + """Get the logprobs from the reference policy for a batch of data. + + Returns: + a BatchedDataDict with key "reference_logprobs" and shape [batch_size, sequence_length]. + We use the convention that the logprob of the first token is 0 so that the sequence length is maintained. + The logprob of input token i is specified at position i in the output logprobs tensor. + """ + with self.use_reference_model(): + reference_logprobs = self.get_logprobs(data, micro_batch_size) + + return_data = BatchedDataDict[ReferenceLogprobOutputSpec]() + return_data["reference_logprobs"] = reference_logprobs["logprobs"].cpu() + return return_data + + def _add_noise_to_weights(self) -> None: + """Add small Gaussian noise to the weights of the model. Note that this is used for testing purposes only.""" + noise_std = 0.01 # Standard deviation for the noise + for p in self.model.parameters(): + if p.requires_grad: + noise = torch.randn_like(p.data) * noise_std + p.data.add_(noise) # Add noise in-place + torch.cuda.synchronize() + + def return_state_dict(self): + return self.model.state_dict() + + def return_model_config(self) -> dict[str, Any]: + """Return the model configuration as a dictionary. + + Returns: + dict: Model configuration dictionary + """ + return self.model.config + + def report_device_id(self) -> str: + """Report the UUID of the current CUDA device using NVML. + + Returns: + str: UUID of the device in the format "GPU-xxxxx" + """ + from nemo_rl.utils.nvml import get_device_uuid + + # Get current device index from torch + device_idx = torch.cuda.current_device() + # Get device UUID using NVML + return get_device_uuid(device_idx) + + @torch.no_grad() + def prepare_refit_info(self) -> Optional[dict[str, Any]]: + state_dict = self.model.state_dict() + + if self.is_generation_colocated: + # Collect info for streaming multiple tensors + self.refit_param_info = [] + for name, tensor in state_dict.items(): + # dtensor's numel will return complete tensor instead of only local tensor + size_in_bytes = tensor.element_size() * tensor.numel() + self.refit_param_info.append((name, size_in_bytes)) + + else: + # Collect info for collective communication + state_dict_info = {} + for name, tensor in state_dict.items(): + state_dict_info[name] = (tensor.shape, self.dtype) + + return state_dict_info + + @torch.no_grad() + def prepare_weights_for_ipc(self) -> tuple[list[tuple[str, int]], float]: + """Prepare the weights for IPC. + + This function: + - Prepares the state_dict of the model. + - Collects the info for streaming multiple tensors. + + Returns: + list: The list of parameters sizes. + float: The total available memory in bytes. + """ + from nemo_rl.utils.nvml import get_free_memory_bytes + + # Manually move model to cuda for cpu offload case + if self.cpu_offload: + self.model = self.move_to_cuda(self.model) + + # Get state_dict + self._held_sharded_state_dict_reference: dict[str, torch.Tensor] = ( + self.model.state_dict() + ) + + # Collect current available memory for refit + ## Get current device index from torch + device_idx = torch.cuda.current_device() + ## Get device free memory using NVML + total_available_bytes = get_free_memory_bytes(device_idx) + ## Use 80% of the free memory for safety + memory_ratio = os.getenv("NRL_REFIT_BUFFER_MEMORY_RATIO", "0.8") + total_available_bytes *= float(memory_ratio) + + return self.refit_param_info, total_available_bytes + + @torch.no_grad() + @wrap_with_nvtx_name("dtensor_policy_worker_v2/get_weights_ipc_handles") + def get_weights_ipc_handles(self, keys: Iterable[str]) -> dict[str, Any]: + assert self._held_sharded_state_dict_reference is not None, ( + "prepare_weights_for_ipc must be called before get_weights_ipc_handles" + ) + + # Clean up the held tensors to reduce peak memory + if self._held_streamed_param_reference is not None: + del self._held_streamed_param_reference + self._held_streamed_param_reference = None + + converted_params = {} + for key in keys: + # Get full_tensor for dtensor (GPU > 1) + tensor = self._held_sharded_state_dict_reference[key] + if isinstance(tensor, DTensor): + full_tensor = tensor.full_tensor() + else: + full_tensor = tensor + # Convert parameters to the configured dtype + converted_params[key] = full_tensor.to(self.dtype, non_blocking=True) + + # Temporary record the full tensor for cleanup + # It is needed for cleanup the last full_tensor in the refit process + self._held_streamed_param_reference = converted_params + + # Get device UUID for IPC + device_uuid = self.report_device_id() + # Create handles for the tensors + all_handles = [] + for key, p in converted_params.items(): + handle = get_handle_from_tensor(p) + all_handles.append((key, handle)) + + # (pack_tensor_for_ipc: bool, handles: list) + serialized = (False, all_handles) + + return {device_uuid: serialized} + + @torch.no_grad() + def broadcast_weights_for_collective(self) -> None: + """Broadcast the weights for collective communication.""" + # Manually move model to cuda for cpu offload case + if self.cpu_offload: + print( + "[WARNING]: Unless you are lacking of memory, it is not recommended to enable cpu_offload when " + "using non-colocated generation since it will have an extra onload and offload at refit stage." + ) + self.model = self.move_to_cuda(self.model) + + # Broadcast the weights for collective communication + for _, tensor in self.model.state_dict().items(): + if isinstance(tensor, DTensor): + tensor = tensor.full_tensor() + if self.rank == 0: + tensor = tensor.to(self.dtype, non_blocking=True) + self.model_update_group.broadcast(tensor.data, src=0) + + # Manually move model to cpu for cpu offload case + # cpu offload needs model on CPU before model forward + if self.cpu_offload: + self.model = self.move_to_cpu(self.model) + + @wrap_with_nvtx_name("dtensor_policy_worker_v2/prepare_for_lp_inference") + def prepare_for_lp_inference(self) -> None: + if not self.cpu_offload: + self.move_to_cuda(self.model) + else: + self.model = self.move_buffer_to_device(self.model, "cuda") + + self.model.eval() + self.offload_before_refit() + + @wrap_with_nvtx_name("dtensor_policy_worker_v2/prepare_for_training") + def prepare_for_training(self, *args, **kwargs) -> None: + # onload models and optimizer state to cuda + if not self.cpu_offload: + self.move_to_cuda(self.model) + else: + # when cpu offload is enabled, the buffers do not get moved + # to cuda automatically, so we need to do that manually + self.model = self.move_buffer_to_device(self.model, "cuda") + + self.model.train() + # Move optimizer state to CUDA if it exists + if ( + hasattr(self, "optimizer") + and self.optimizer is not None + and not self.cpu_offload + ): + for state in self.optimizer.state.values(): + for k, v in state.items(): + if isinstance(v, (DTensor, torch.Tensor)): + state[k] = v.to("cuda") + + torch.cuda.empty_cache() + + @torch.no_grad() + @wrap_with_nvtx_name("dtensor_policy_worker_v2/offload_before_refit") + def offload_before_refit(self) -> None: + """Offload the optimizer to the CPU.""" + torch.randn(1).cuda() # wake up torch allocator + if hasattr(self, "optimizer") and self.optimizer is not None: + for state in self.optimizer.state.values(): + for k, v in state.items(): + if isinstance(v, (DTensor, torch.Tensor)): + state[k] = v.to("cpu") + + gc.collect() + torch.cuda.empty_cache() + + @torch.no_grad() + @wrap_with_nvtx_name("dtensor_policy_worker_v2/offload_after_refit") + def offload_after_refit(self) -> None: + # Offload as much as possible on the CPU + self.model = self.move_to_cpu(self.model) + self.model.eval() + torch.randn(1).cuda() # wake up torch allocator + self.offload_before_refit() # rerun the old offload function + + # Clean up the held tensors + if self._held_sharded_state_dict_reference is not None: + del self._held_sharded_state_dict_reference + self._held_sharded_state_dict_reference = None + if self._held_streamed_param_reference is not None: + del self._held_streamed_param_reference + self._held_streamed_param_reference = None + + gc.collect() + torch.cuda.empty_cache() + + # Print memory stats after offloading + allocated = torch.cuda.memory_allocated() / (1024**3) # Convert to GB + reserved = torch.cuda.memory_reserved() / (1024**3) # Convert to GB + print( + f"GPU Memory after optimizer offload: {allocated:.2f}GB allocated, {reserved:.2f}GB reserved" + ) + + def move_to_device(self, model: nn.Module, device: str | torch.device) -> nn.Module: + model = self.move_buffer_to_device(model, device) + return model.to(device) + + def move_buffer_to_device( + self, model: nn.Module, device: str | torch.device + ) -> nn.Module: + # FSDP modules do not move buffers to the device automatically + for v in model.buffers(): + v.data = v.data.to(device) + + return model + + def move_to_cuda(self, model: torch.nn.Module) -> torch.nn.Module: + model = self.move_to_device(model, "cuda") + gc.collect() + torch.cuda.empty_cache() + return model + + def move_to_cpu(self, model: torch.nn.Module) -> torch.nn.Module: + model = self.move_to_device(model, "cpu") + gc.collect() + torch.cuda.empty_cache() + return model + + def save_checkpoint( + self, + weights_path: str, + optimizer_path: Optional[str] = None, + tokenizer_path: Optional[str] = None, + ) -> None: + """Save a checkpoint of the model. + + the optimizer states are saved only if `optimizer` and `optimizer_path` are provided. + """ + save_checkpoint( + model=self.model, + weights_path=weights_path, + optimizer=self.optimizer if optimizer_path else None, + scheduler=self.scheduler if optimizer_path else None, + optimizer_path=optimizer_path, + tokenizer=self.tokenizer if tokenizer_path else None, + tokenizer_path=tokenizer_path, + ) + + def load_checkpoint( + self, weights_path: str, optimizer_path: Optional[str] = None + ) -> None: + """Load a checkpoint into the model.""" + load_checkpoint( + model=self.model, + weights_path=weights_path, + optimizer=self.optimizer if optimizer_path else None, + scheduler=self.scheduler if optimizer_path else None, + optimizer_path=optimizer_path, + ) + + def shutdown(self) -> None: + """Shutdown the policy.""" + + def start_gpu_profiling(self) -> None: + """Start GPU profiling.""" + torch.cuda.profiler.start() + + def stop_gpu_profiling(self) -> None: + """Stop GPU profiling.""" + torch.cuda.profiler.stop() diff --git a/nemo_rl/models/policy/lm_policy.py b/nemo_rl/models/policy/lm_policy.py index ebc608e35d..3d2a6e2d9e 100644 --- a/nemo_rl/models/policy/lm_policy.py +++ b/nemo_rl/models/policy/lm_policy.py @@ -12,13 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. import os +import warnings from collections import defaultdict from typing import Any, Optional, Union import numpy as np import ray from ray.util.queue import Queue as RayQueue -from transformers import PreTrainedTokenizerBase +from transformers import AutoProcessor, PreTrainedTokenizerBase from nemo_rl.algorithms.interfaces import LossFunction from nemo_rl.distributed.batched_data_dict import ( @@ -41,6 +42,11 @@ LogprobOutputSpec, ReferenceLogprobOutputSpec, ) +from nemo_rl.utils.flops_tracker import ( + FLOPTracker, + get_default_hf_config, + get_theoretical_tflops, +) PathLike = Union[str, "os.PathLike[Any]"] @@ -57,6 +63,7 @@ def __init__( weights_path: Optional[PathLike] = None, optimizer_path: Optional[PathLike] = None, init_reference_model: bool = True, + processor: Optional[AutoProcessor] = None, ): if weights_path: weights_path = os.path.abspath(weights_path) @@ -68,7 +75,7 @@ def __init__( pp_size = 1 cp_size = 1 - megatron_enable = config.get("megatron_cfg", {}).get("enabled", False) + megatron_enable = "megatron_cfg" in config and config["megatron_cfg"]["enabled"] if megatron_enable: worker_builder_cls = ( "nemo_rl.models.policy.megatron_policy_worker.MegatronPolicyWorker" @@ -83,9 +90,16 @@ def __init__( "Please either set policy.megatron_cfg.enabled=true to use Megatron training backend " "or set policy.dtensor_cfg.enabled=true to use DTensor training backend." ) - worker_builder_cls = ( - "nemo_rl.models.policy.dtensor_policy_worker.DTensorPolicyWorker" - ) + + # Check if _v2 is enabled in dtensor_cfg (defaults to False for backward compatibility) + use_v2 = config["dtensor_cfg"].get("_v2", False) + if use_v2: + worker_builder_cls = "nemo_rl.models.policy.dtensor_policy_worker_v2.DTensorPolicyWorkerV2" + else: + worker_builder_cls = ( + "nemo_rl.models.policy.dtensor_policy_worker.DTensorPolicyWorker" + ) + tp_size = config["dtensor_cfg"]["tensor_parallel_size"] cp_size = config["dtensor_cfg"]["context_parallel_size"] @@ -111,6 +125,7 @@ def __init__( worker_builder_cls, config, tokenizer=tokenizer, + processor=processor, init_optimizer=init_optimizer, weights_path=weights_path, optimizer_path=optimizer_path, @@ -125,7 +140,7 @@ def __init__( name_prefix=name_prefix, workers_per_node=workers_per_node, sharding_annotations=self.sharding_annotations, - env_vars=env_vars, + env_vars=env_vars or {}, ) if config["dynamic_batching"]["enabled"]: @@ -147,13 +162,20 @@ def __init__( else: self.use_dynamic_batches = False + # initialize FLOPs tracker + try: + self.flops_tracker = FLOPTracker.from_config( + config["model_name"], get_default_hf_config(config["model_name"]) + ) + except ValueError as e: + self.flops_tracker = None + print(f"FLOPS tracker not supported for model {config['model_name']}: {e}") + if config["sequence_packing"]["enabled"]: self.use_sequence_packing = True self.sequence_packing_args: SequencePackingArgs = { "train_mb_tokens": config["sequence_packing"]["train_mb_tokens"], - "logprob_mb_tokens": config["sequence_packing"].get( - "logprob_mb_tokens", None - ), + "logprob_mb_tokens": config["sequence_packing"]["logprob_mb_tokens"], "algorithm": config["sequence_packing"]["algorithm"], "input_key": "input_ids", "input_lengths_key": "input_lengths", @@ -346,6 +368,12 @@ def train( batch_size=batch_size, ) + if self.flops_tracker is not None: + self.flops_tracker.reset() + for shard in sharded_data: + input_lengths = shard["input_lengths"] + self.flops_tracker.track_batch(input_lengths.tolist()) + # Train each shard in parallel futures = self.worker_group.run_all_workers_sharded_data( "train", @@ -376,6 +404,18 @@ def train( "grad_norm": results[0]["grad_norm"], } + if self.flops_tracker is not None: + aggregated_results["total_flops"] = self.flops_tracker.total_flops + aggregated_results["num_ranks"] = len(results) + + try: + aggregated_results["theoretical_tflops"] = sum( + get_theoretical_tflops(r["gpu_name"], r["model_dtype"]) + for r in results + ) + except Exception as e: + warnings.warn(f"Error getting theoretical flops: {e}") + # Aggregate metrics across all workers all_mb_metrics = defaultdict(list) for r in results: diff --git a/nemo_rl/models/policy/megatron_policy_worker.py b/nemo_rl/models/policy/megatron_policy_worker.py index aa1f349b2b..9a16625db2 100644 --- a/nemo_rl/models/policy/megatron_policy_worker.py +++ b/nemo_rl/models/policy/megatron_policy_worker.py @@ -118,12 +118,12 @@ ) from nemo_rl.models.policy.utils import ( configure_dynamo_cache, - configure_expandable_segments, get_gpu_info, get_handle_from_tensor, get_megatron_checkpoint_dir, get_runtime_env_for_policy_worker, ) +from nemo_rl.utils.nsys import wrap_with_nvtx_name TokenizerType = TypeVar("TokenizerType", bound=PreTrainedTokenizerBase) @@ -216,6 +216,9 @@ def re_enable_float32_expert_bias(model_module): overlap_param_gather_with_optimizer_step=cfg.optimizer_config.overlap_param_gather_with_optimizer_step, data_parallel_random_init=cfg.rng_config.data_parallel_random_init, model_post_init_fns=model_post_init_fns, + wrap_cast_model_output_to_fp32=( + not policy_cfg["megatron_cfg"].get("defer_fp32_logits", None) + ), ) if load_optimizer: optimizer, scheduler = setup_optimizer( @@ -378,6 +381,15 @@ def __init__( pre_init_communication_queue: Queue, **kwargs: Any, ): + self.is_generation_colocated = None + if "generation" in config and config["generation"] is not None: + self.is_generation_colocated = config["generation"]["colocated"]["enabled"] + + # Explicitly set NCCL_CUMEM_ENABLE to 1 to avoid the P2P initialization error for PyNCCLCommunicator. + # See https://github.com/NVIDIA-NeMo/RL/issues/564 for more details. + if not self.is_generation_colocated: + os.environ["NCCL_CUMEM_ENABLE"] = "1" + self.cfg = config dtype_map = { "float32": torch.float32, @@ -387,7 +399,7 @@ def __init__( self.dtype = dtype_map[self.cfg["precision"]] # Reward models are not yet supported with Megatron. - if self.cfg.get("reward_model_cfg", {}).get("enabled", False): + if "reward_model_cfg" in self.cfg and self.cfg["reward_model_cfg"]["enabled"]: raise NotImplementedError( "Reward models are not yet supported with the Megatron backend, this issue is " "tracked in https://github.com/NVIDIA-NeMo/RL/issues/720" @@ -397,9 +409,6 @@ def __init__( # with different order of node_bundles configure_dynamo_cache() - # Only enable expandable_segments on Hopper and newer architectures (compute capability 9.x+) - configure_expandable_segments() - # cfg["model_name"] is allowed to be either an HF model name or a path to an HF checkpoint # check if hf_model_name is a path hf_model_name = self.cfg["model_name"] @@ -416,7 +425,8 @@ def __init__( # Ensure clean slate before import destroy_parallel_state() - if get_rank_safe() == 0: + self.rank = get_rank_safe() + if self.rank == 0: if pt_checkpoint_exists: print( f"Checkpoint already exists at {pretrained_path}. Skipping import." @@ -651,6 +661,9 @@ def __init__( use_torch_fsdp2=self.megatron_cfg.dist_config.use_torch_fsdp2, overlap_param_gather_with_optimizer_step=self.megatron_cfg.optimizer_config.overlap_param_gather_with_optimizer_step, data_parallel_random_init=self.megatron_cfg.rng_config.data_parallel_random_init, + wrap_cast_model_output_to_fp32=( + not self.cfg["megatron_cfg"].get("defer_fp32_logits", None) + ), ) print("Loading the Reference Model") if ( @@ -725,6 +738,18 @@ def __init__( ## used for streaming update inference engine weights self._held_gather_buffer = None + def init_collective(self, ip: str, port: int, world_size: int) -> None: + """Initialize the collective communication.""" + from vllm.distributed.device_communicators.pynccl import PyNcclCommunicator + from vllm.distributed.utils import StatelessProcessGroup + + if self.rank == 0: + pg = StatelessProcessGroup.create( + host=ip, port=port, rank=0, world_size=world_size + ) + device = torch.cuda.current_device() + self.model_update_group = PyNcclCommunicator(pg, device=device) + def is_alive(self): return True @@ -743,6 +768,7 @@ def disable_forward_pre_hook(self, param_sync=True): assert isinstance(self.model, DistributedDataParallel) self.model.disable_forward_pre_hook(param_sync=param_sync) + @wrap_with_nvtx_name("megatron_policy_worker/train") def train( self, data: BatchedDataDict, @@ -794,7 +820,9 @@ def train( f"Dim 1 must be the sequence dim, expected dim 1={seq_dim_size} but got shape {v.shape}" ) - forward_step = partial(forward_step_arbitrary_loss, loss_fn=loss_fn) + forward_step = partial( + forward_step_arbitrary_loss, loss_fn=loss_fn, policy_cfg=self.cfg + ) all_mb_metrics = [] losses = [] for gb_idx in range(num_global_batches): @@ -979,6 +1007,8 @@ def train( metrics = { "global_loss": global_loss.cpu(), "rank": torch.distributed.get_rank(), + "gpu_name": torch.cuda.get_device_name(), + "model_dtype": self.dtype, "all_mb_metrics": dict(mb_metrics), "grad_norm": torch.tensor( mb_metrics["grad_norm"][-1] @@ -986,6 +1016,7 @@ def train( } return metrics + @wrap_with_nvtx_name("megatron_policy_worker/get_logprobs") def get_logprobs( self, *, data: BatchedDataDict[Any], micro_batch_size: Optional[int] = None ) -> BatchedDataDict[LogprobOutputSpec]: @@ -1082,10 +1113,16 @@ def forward_step_fn( packed_seq_params=packed_seq_params, ) + # Apply temperature scaling to logits for training + # This matches the dtensor worker's _apply_temperature_scaling in the train method + if "generation" in self.cfg and self.cfg["generation"] is not None: + output_tensor.div_(self.cfg["generation"]["temperature"]) + def collection_fn(output_tensor): stc = time.time() tp_grp = get_tensor_model_parallel_group() tp_rank = get_tensor_model_parallel_rank() + logprob_chunk_size = self.cfg.get("logprob_chunk_size", None) if self.cfg["sequence_packing"]["enabled"]: token_logprobs = from_parallel_logits_to_logprobs_packed_sequences( output_tensor, @@ -1097,15 +1134,17 @@ def collection_fn(output_tensor): group=tp_grp, inference_only=True, cp_group=get_context_parallel_group(), + chunk_size=logprob_chunk_size, ) else: token_logprobs = from_parallel_logits_to_logprobs( - output_tensor.to(torch.float32), + output_tensor, target=unpacked_input_ids, vocab_start_index=tp_rank * output_tensor.shape[-1], vocab_end_index=(tp_rank + 1) * output_tensor.shape[-1], tp_group=tp_grp, inference_only=True, + chunk_size=logprob_chunk_size, ) # Prepend 0 logprob for first token to maintain same sequence length as input @@ -1216,6 +1255,7 @@ def use_reference_model(self): self.enable_forward_pre_hook() # Temporary fix, 'data' is a kwarg due to some sort of ray bug + @wrap_with_nvtx_name("megatron_policy_worker/get_reference_policy_logprobs") def get_reference_policy_logprobs( self, *, data: BatchedDataDict[Any], micro_batch_size: Optional[int] = None ) -> BatchedDataDict[ReferenceLogprobOutputSpec]: @@ -1238,6 +1278,7 @@ def get_reference_policy_logprobs( return_data["reference_logprobs"] = reference_logprobs["logprobs"].cpu() return return_data + @wrap_with_nvtx_name("megatron_policy_worker/generate") def generate( self, *, data: BatchedDataDict[GenerationDatumSpec], greedy: bool = False ) -> BatchedDataDict[GenerationOutputSpec]: @@ -1381,6 +1422,7 @@ def report_device_id(self) -> str: return get_device_uuid(device_idx) @torch.no_grad() + @wrap_with_nvtx_name("megatron_policy_worker/prepare_refit_info") def prepare_refit_info(self) -> None: # Get parameter info for refit # param_info: list of ((name, shape, dtype), size_in_bytes) tuples @@ -1407,14 +1449,15 @@ def prepare_refit_info(self) -> None: ) # collect tensor metadata for name, tensor in gathered_hf_params.items(): - refit_param_info_hf[name] = ( - tensor.shape, - tensor.dtype, - tensor.numel(), - ) + if self.is_generation_colocated: + metadata = (tensor.shape, tensor.dtype, tensor.numel()) + else: + metadata = (tensor.shape, tensor.dtype) + refit_param_info_hf[name] = metadata return refit_param_info_hf + @wrap_with_nvtx_name("megatron_policy_worker/prepare_weights_for_ipc") def prepare_weights_for_ipc(self) -> tuple[list[tuple[str, int]], float]: """Prepare Megatron model weights for IPC transfer to vLLM. @@ -1436,6 +1479,7 @@ def prepare_weights_for_ipc(self) -> tuple[list[tuple[str, int]], float]: # Temporary fix, 'keys' is a kwarg due to some sort of ray bug @torch.no_grad() + @wrap_with_nvtx_name("megatron_policy_worker/get_weights_ipc_handles") def get_weights_ipc_handles(self, *, keys: list[str]) -> dict[str, Any]: """Get IPC handles for the requested Megatron model weights. @@ -1524,6 +1568,25 @@ def get_weights_ipc_handles(self, *, keys: list[str]) -> dict[str, Any]: return {device_uuid: serialized} + @torch.no_grad() + def broadcast_weights_for_collective(self) -> None: + """Broadcast the weights for collective communication.""" + for key, _ in self.refit_param_info_mcore: + # gather megatron params + gathered_megatron_params = gather_params( + self.model, + [key], + key_to_global_keys=self.local_key_to_global_keys, + ) + # convert to hf params + gathered_hf_params = self.megatron_to_hf_converter.convert( + gathered_megatron_params, self.model.config + ) + # broadcast from train rank0 worker to inference workers + if self.rank == 0: + for _, tensor in gathered_hf_params.items(): + self.model_update_group.broadcast(tensor, src=0) + def prepare_for_lp_inference(self): self.model = self.move_model(self.model, "cuda", move_grads=False) self.model.eval() @@ -1549,6 +1612,7 @@ def prepare_for_training(self, *args, **kwargs): torch.cuda.empty_cache() + @wrap_with_nvtx_name("megatron_policy_worker/offload_before_refit") def offload_before_refit(self): """Offload the optimizer and buffers to the CPU.""" no_grad = torch.no_grad() @@ -1587,6 +1651,7 @@ def offload_before_refit(self): ) no_grad.__exit__(None, None, None) + @wrap_with_nvtx_name("megatron_policy_worker/offload_after_refit") def offload_after_refit(self): no_grad = torch.no_grad() no_grad.__enter__() @@ -1708,6 +1773,8 @@ def save_checkpoint( if not is_training: self.model.eval() + if self.should_disable_forward_pre_hook: + self.disable_forward_pre_hook() save_checkpoint( state=self.mcore_state, model=[self.model], @@ -1722,6 +1789,8 @@ def save_checkpoint( blocking=True, terminate=True, ) + if self.should_disable_forward_pre_hook: + self.enable_forward_pre_hook() if not is_training: # Restore training state if it was changed self.model.train() diff --git a/nemo_rl/models/policy/utils.py b/nemo_rl/models/policy/utils.py index c09a201268..42662f7a44 100644 --- a/nemo_rl/models/policy/utils.py +++ b/nemo_rl/models/policy/utils.py @@ -14,13 +14,38 @@ import importlib import os +from collections import defaultdict from typing import Any import torch -from transformers import AutoConfig +from torch import nn +from transformers import ( + AutoConfig, + AutoModelForCausalLM, + AutoModelForImageTextToText, + AutoModelForTextToWaveform, +) from nemo_rl.distributed.worker_group_utils import get_nsight_config_if_pattern_matches +# an automodel factory for loading the huggingface models from correct class +AUTOMODEL_FACTORY = defaultdict(lambda: AutoModelForCausalLM) +AUTOMODEL_FACTORY["qwen2_5_vl"] = AutoModelForImageTextToText +AUTOMODEL_FACTORY["qwen2_vl"] = AutoModelForImageTextToText +AUTOMODEL_FACTORY["qwen2_5_omni"] = AutoModelForTextToWaveform +AUTOMODEL_FACTORY["llava"] = AutoModelForImageTextToText +AUTOMODEL_FACTORY["internvl"] = AutoModelForImageTextToText +AUTOMODEL_FACTORY["gemma3"] = AutoModelForImageTextToText +AUTOMODEL_FACTORY["smolvlm"] = AutoModelForImageTextToText +AUTOMODEL_FACTORY["mistral3"] = AutoModelForImageTextToText +AUTOMODEL_FACTORY["llama4"] = AutoModelForImageTextToText + + +def resolve_model_class(model_name: str) -> nn.Module: + if model_name.lower() in AUTOMODEL_FACTORY.keys(): + return AUTOMODEL_FACTORY[model_name.lower()] + return AutoModelForCausalLM + def is_vllm_v1_engine_enabled() -> bool: """Check if vLLM V1 engine is enabled. @@ -140,49 +165,6 @@ def sliding_window_overwrite(model_name: str) -> dict[str, Any]: return overwrite_dict -def configure_expandable_segments() -> None: - """Configure expandable_segments on Hopper and newer architectures (compute capability 9.x+). - - This helps with memory allocation but causes crashes on Ampere GPUs, so we only enable it - on newer architectures. If PYTORCH_CUDA_ALLOC_CONF is already set, preserves existing values. - """ - compute_capability = torch.cuda.get_device_properties(0).major - - if compute_capability >= 9: # Hopper+ - existing_conf = os.environ.get("PYTORCH_CUDA_ALLOC_CONF", "") - - # Check if expandable_segments is already configured - if "expandable_segments" in existing_conf: - print(f"expandable_segments already configured: {existing_conf}") - # Already configured, don't override - return - - # Add expandable_segments to existing configuration - if existing_conf: - # Append to existing configuration - new_conf = f"{existing_conf},expandable_segments:True" - else: - # Set new configuration - new_conf = "expandable_segments:True" - - print(f"Setting PYTORCH_CUDA_ALLOC_CONF to {new_conf}") - os.environ["PYTORCH_CUDA_ALLOC_CONF"] = new_conf - - else: - ## make sure that expandable_segments is not set to True - if "expandable_segments" in os.environ.get("PYTORCH_CUDA_ALLOC_CONF", ""): - conf_items = os.environ["PYTORCH_CUDA_ALLOC_CONF"].split(",") - for item in conf_items: - if item.strip().startswith("expandable_segments"): - key_value = item.split(":") - if len(key_value) == 2 and key_value[1].strip().lower() == "true": - raise RuntimeError( - "expandable_segments is enabled in PYTORCH_CUDA_ALLOC_CONF, " - "but this is not supported on architectures older than Hopper (compute capability < 9). " - "Please set expandable_segments to False." - ) - - def configure_dynamo_cache() -> None: """Disable dynamo autotune_local_cache. diff --git a/nemo_rl/package_info.py b/nemo_rl/package_info.py index 29883366db..2f28888469 100644 --- a/nemo_rl/package_info.py +++ b/nemo_rl/package_info.py @@ -14,7 +14,7 @@ MAJOR = 0 -MINOR = 3 +MINOR = 4 PATCH = 0 PRE_RELEASE = "rc0" diff --git a/nemo_rl/utils/checkpoint.py b/nemo_rl/utils/checkpoint.py index 48231d76a8..6f84d7782f 100644 --- a/nemo_rl/utils/checkpoint.py +++ b/nemo_rl/utils/checkpoint.py @@ -49,6 +49,7 @@ class CheckpointingConfig(TypedDict): higher_is_better: bool save_period: int keep_top_k: NotRequired[int] + checkpoint_must_save_by: NotRequired[str | None] class CheckpointManager: diff --git a/nemo_rl/utils/flops_formulas.py b/nemo_rl/utils/flops_formulas.py new file mode 100644 index 0000000000..4bbddc39e8 --- /dev/null +++ b/nemo_rl/utils/flops_formulas.py @@ -0,0 +1,553 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import List, Optional, Union + + +# lifted from NeMo/nemo/utils/flops_formulas.py +@dataclass +class FLOPSConfig: + """Contains the model hparams needed for FLOPS computations.""" + + gbs: int + enc_seq_len: Optional[int] = None + hs: Optional[int] = None + layers: Optional[int] = None + ffn_hs: Optional[int] = None + attention_heads: Optional[int] = None + moe_router_topk: Optional[int] = None + query_groups: Optional[int] = None + img_seq_len: Optional[int] = None + img_h: Optional[int] = None + img_w: Optional[int] = None + in_channels: Optional[int] = None + patch_dim: Optional[int] = None + class_token_len: Optional[int] = None + projector_type: Optional[str] = None + inp_s: Optional[int] = None + model_pattern: Optional[str] = None + vocab_size: Optional[int] = None + model_channels: Optional[int] = None + vec_in_dim: Optional[int] = None + q_lora_rank: Optional[int] = None + kv_lora_rank: Optional[int] = None + qk_head_dim: Optional[int] = None + qk_pos_emb_head_dim: Optional[int] = None + v_head_dim: Optional[int] = None + moe_layer_freq: Optional[Union[int, List[int]]] = None + moe_shared_expert_intermediate_size: Optional[int] = None + moe_ffn_hidden_size: Optional[int] = None + mtp_num_layers: Optional[int] = None + causal_self_attn: Optional[bool] = None + is_hybrid_model: bool = False + hybrid_override_pattern: Optional[str] = None + mamba_state_dim: Optional[int] = None + mamba_head_dim: Optional[int] = None + mamba_num_groups: Optional[int] = None + mamba_num_heads: Optional[int] = None + + +def gpt3(config: FLOPSConfig): + """Model FLOPs for GPT3 family.""" + return ( + 24 * config.gbs * config.enc_seq_len * config.hs * config.hs + + 4 * config.gbs * config.enc_seq_len * config.enc_seq_len * config.hs + ) * (3 * config.layers) + ( + 6 * config.gbs * config.enc_seq_len * config.hs * config.vocab_size + ) + + +def llama2(config: FLOPSConfig): + """Model FLOPs for llama2 family.""" + return ( + config.gbs + * config.enc_seq_len + * config.layers + * config.hs + * config.hs + * ( + 12 + + (12 * config.query_groups / config.attention_heads) + + (18 * config.ffn_hs / config.hs) + + (12 * config.enc_seq_len / config.hs) + + (6 * config.vocab_size / (config.layers * config.hs)) + ) + ) + + +def llama3(config: FLOPSConfig): + """Model FLOPs for llama3 family.""" + return ( + config.gbs + * config.enc_seq_len + * config.layers + * config.hs + * config.hs + * ( + 12 + + (12 * config.query_groups / config.attention_heads) + + (18 * config.ffn_hs / config.hs) + + (12 * config.enc_seq_len / config.hs) + + (6 * config.vocab_size / (config.layers * config.hs)) + ) + ) + + +def nemotron(config: FLOPSConfig): + """Model FLOPs for nemotron family.""" + return ( + config.gbs + * config.enc_seq_len + * config.layers + * config.hs + * config.hs + * ( + 12 + + (12 * config.query_groups / config.attention_heads) + + (12 * config.ffn_hs / config.hs) + + (12 * config.enc_seq_len / config.hs) + + (6 * config.vocab_size / (config.layers * config.hs)) + ) + ) + + +def mixtral(config: FLOPSConfig): + """Model FLOPs for mixtral family.""" + return ( + config.gbs + * config.enc_seq_len + * config.layers + * config.hs + * config.hs + * ( + 12 + + (12 * config.query_groups / config.attention_heads) + + (18 * config.moe_router_topk * config.ffn_hs / config.hs) + + (12 * config.enc_seq_len / config.hs) + + (6 * config.vocab_size / (config.layers * config.hs)) + ) + ) + + +def qwen2(config: FLOPSConfig): + """Model FLOPs for Qwen2 family.""" + causal_self_attn = True + seq_len = config.enc_seq_len + hidden_size = config.hs + gated_linear_multiplier = 2 + + # attention flops for GQA + attention_flops = ( + 3 + * 2 + * config.gbs + * config.layers + * seq_len + * hidden_size + * hidden_size + * ( + (2 + 1) # QKV gemm + + ( + seq_len / hidden_size * 2 * (0.5 if causal_self_attn else 1) + ) # attention + + 1 # attention proj gemm + ) + ) + + # mlp flops + mlp_flops = ( + 3 + * 2 + * config.gbs + * config.layers + * seq_len + * hidden_size + * (1 + gated_linear_multiplier) + * config.ffn_hs + ) + + # vocab flops + vocab_flops = 3 * 2 * config.gbs * seq_len * hidden_size * config.vocab_size + + return attention_flops + mlp_flops + vocab_flops + + +def qwen3(config: FLOPSConfig): + """Model FLOPs for Qwen3 family.""" + causal_self_attn = True + seq_len = config.enc_seq_len + hidden_size = config.hs + gated_linear_multiplier = 2 + + # attention flops for GQA + attention_flops = ( + 3 + * 2 + * config.gbs + * config.layers + * seq_len + * hidden_size + * hidden_size + * ( + (config.query_groups / config.attention_heads * 2 + 1) # QKV gemm + + ( + seq_len / hidden_size * 2 * (0.5 if causal_self_attn else 1) + ) # attention + + 1 # attention proj gemm + ) + ) + + # mlp flops + mlp_flops = ( + 3 + * 2 + * config.gbs + * config.layers + * seq_len + * hidden_size + * (1 + gated_linear_multiplier) + * (config.moe_ffn_hidden_size * config.moe_router_topk) # MoE layers + ) + + # vocab flops + vocab_flops = 3 * 2 * config.gbs * seq_len * hidden_size * config.vocab_size + + return attention_flops + mlp_flops + vocab_flops + + +def bert(config: FLOPSConfig): + """Model FLOPs for BERT family.""" + return ( + 72 + * config.gbs + * config.layers + * config.enc_seq_len + * config.hs + * config.hs + * ( + 1 + + (config.enc_seq_len / (6 * config.hs)) + + (config.vocab_size / (12 * config.hs * config.layers)) + ) + ) + + +def transformer(config: FLOPSConfig): + """Calculate FLOPs for a standard Transformer model. + + Note: This does not cover encoder-decoder models. + """ + # Extract parameters from config + batch_size = config.gbs + hidden_size = config.hs + seq_length = config.enc_seq_len + num_layers = config.layers + num_attention_heads = config.attention_heads + ffn_hidden_size = config.ffn_hs + vocab_size = config.vocab_size + + if vocab_size is None: + raise ValueError("vocab_size is required for transformer FLOPs calculation") + + # Handle optional parameters with reasonable defaults + query_groups = ( + config.query_groups if config.query_groups is not None else num_attention_heads + ) + causal_self_attn = ( + config.causal_self_attn if config.causal_self_attn is not None else False + ) + moe_router_topk = ( + config.moe_router_topk if config.moe_router_topk is not None else 0 + ) + kv_channels = hidden_size // num_attention_heads # Standard dimension per head + + # Calculate query projection size and ratio + query_projection_size = kv_channels * num_attention_heads + query_projection_to_hidden_size_ratio = query_projection_size / hidden_size + + # MoE parameters - simplified for NeMo config + # In this implementation, we assume all layers are dense if num_experts is None + if moe_router_topk == 0: + num_dense_layers = num_layers + num_moe_layers = 0 + num_experts_routed_to = 0 + else: + # Simplified MoE handling - assuming uniform distribution of MoE layers + # This can be expanded based on NeMo's actual MoE implementation + num_moe_layers = num_layers // 2 # Simplified assumption + num_dense_layers = num_layers - num_moe_layers + num_experts_routed_to = moe_router_topk + + # Handle SwiGLU vs standard GELU/ReLU + # Default to standard activation (no SwiGLU) + gated_linear_multiplier = 1 + + # Define the expansion factor as described in the paper + # 3x: Each GEMM needs forward pass, backward wgrad, and backward dgrad + # 2x: GEMMs are stacked twice in standard Transformer architectures + # 2x: A GEMM of m*n with n*k requires 2mnk floating-point operations + expansion_factor = 3 * 2 * 2 + # Attention + if not causal_self_attn: + attention_component = ( + 1 + + (query_groups / num_attention_heads) + # Only half of the attention matrix is non-zero and needs to be multiplied with V + + (seq_length / hidden_size) # If causal self attn -> divide by 2. + ) * query_projection_to_hidden_size_ratio + else: + attention_component = ( + 1 + + (query_groups / num_attention_heads) + # Only half of the attention matrix is non-zero and needs to be multiplied with V + + (seq_length / hidden_size / 2) # If causal self attn -> divide by 2. + ) * query_projection_to_hidden_size_ratio + + # Calculate total FLOPs + total_flops = ( + expansion_factor + * batch_size + * seq_length + * num_layers + * hidden_size + * hidden_size + * ( + attention_component + # MLP component + + ( + ( + # Dense layers + (ffn_hidden_size * num_dense_layers) + + + # MoE layers + ( + ( + # Routed experts + ffn_hidden_size * num_experts_routed_to + # Note: Shared experts are not implemented in this version + ) + * num_moe_layers + ) + ) + * gated_linear_multiplier + / (num_layers * hidden_size) + ) + # Logit component + + (vocab_size / (2 * num_layers * hidden_size)) + ) + ) + + return total_flops + + +def flux(config: FLOPSConfig): + """Model FLOPs for FLUX.""" + hs = config.hs + seq_len = config.model_channels + config.inp_s + base_factor = 6 * config.gbs # common multiplier for most terms + + # Joint layer computations + joint_layer_flops = ( + base_factor + * config.layers[0] + * ( + 10 * hs * hs # hidden size operations + + 2 + * hs + * (config.model_channels + config.inp_s) + * (1 + hs * 7) # channel and context joint attention + + 2 * (config.model_channels + config.inp_s) * hs # final projection + ) + ) + + # Single layer computations + single_layer_flops = ( + base_factor + * config.layers[1] + * seq_len + * hs + * ( + 3 # linear Y + + 1 # Modulation + + 4 * hs # Linear computations + + (3 * hs + 2 * seq_len) # attention operations + + 5 * hs # feed-forward + + 1 # Modulation + ) + ) + + # Embedding and projection layers + other_flops = base_factor * ( + config.inp_s * config.in_channels * hs # image embedding + + config.inp_s * hs * config.model_channels # text embedding + + config.vec_in_dim * hs + + hs * hs # vector embedding + + 2 * (config.model_channels * hs + hs * hs) # guidance + timestep embedding + + (config.inp_s * config.in_channels * hs) / config.gbs # final projection + ) + + return joint_layer_flops + single_layer_flops + other_flops + + +def deepseekv3(config: FLOPSConfig): + """Model FLOPs for DeepSeek V3.""" + # self-attention flops + bmm1_flops = ( + 0.5 + * (config.qk_head_dim + config.qk_pos_emb_head_dim) + * config.attention_heads + * (config.enc_seq_len**2) + ) + bmm2_flops = ( + 0.5 * config.v_head_dim * config.attention_heads * (config.enc_seq_len**2) + ) + per_input_attention_flops = 6 * (bmm1_flops + bmm2_flops) * config.layers + if config.mtp_num_layers is not None: + per_input_attention_flops += ( + 6 * (bmm1_flops + bmm2_flops) * config.mtp_num_layers + ) + + # linear layer flops + per_layer_mla_params = config.hs * config.q_lora_rank + config.q_lora_rank * ( + (config.qk_head_dim + config.qk_pos_emb_head_dim) * config.attention_heads + ) # Q + per_layer_mla_params += config.hs * config.qk_pos_emb_head_dim # K^R + per_layer_mla_params += config.hs * config.kv_lora_rank + config.kv_lora_rank * ( + (config.qk_head_dim + config.v_head_dim) * config.attention_heads + ) # K^C and V^C + per_layer_mla_params += ( + config.v_head_dim * config.attention_heads * config.hs + ) # Proj + mla_params = per_layer_mla_params * config.layers + if config.mtp_num_layers is not None: + mla_params += per_layer_mla_params * config.mtp_num_layers + + dense_layer_ffn_params = config.hs * config.ffn_hs * 3 # gated linear unit + per_shared_expert_params = ( + config.hs * config.moe_shared_expert_intermediate_size * 3 + ) + per_selected_expert_params = config.hs * config.moe_ffn_hidden_size * 3 + ffn_params = 0 + + if isinstance(config.moe_layer_freq, int): + moe_layer_pattern = [ + 1 if (i % config.moe_layer_freq == 0) else 0 for i in range(config.layers) + ] + else: + moe_layer_pattern = config.moe_layer_freq + for i in moe_layer_pattern: + if i == 0: + ffn_params += dense_layer_ffn_params + else: + ffn_params += per_shared_expert_params + ( + per_selected_expert_params * config.moe_router_topk + ) + if config.mtp_num_layers is not None: + for i in range(config.mtp_num_layers): + ffn_params += per_shared_expert_params + ( + per_selected_expert_params * config.moe_router_topk + ) + per_input_params = mla_params + ffn_params + per_input_linear_flops = 6 * per_input_params * config.enc_seq_len + + # vocab flops + per_input_vocab_flops = 6 * config.vocab_size * config.hs * config.enc_seq_len + if config.mtp_num_layers is not None: + for i in range(config.mtp_num_layers): + per_input_vocab_flops += ( + 6 * config.vocab_size * config.hs * config.enc_seq_len + ) + per_input_vocab_flops += 6 * config.hs * 2 * config.hs * config.enc_seq_len + + return ( + per_input_attention_flops + per_input_linear_flops + per_input_vocab_flops + ) * config.gbs + + +def _mlp_layer_flops(config: FLOPSConfig): + """Model FLOPs for MLP layer.""" + return ( + 6 + * config.gbs + * config.enc_seq_len + * config.hs + * config.ffn_hs + * (2 if config.gated_linear_unit else 1) + ) + + +def _non_mla_attn_layer_flops(config: FLOPSConfig): + """Model FLOPs for attention layer.""" + return ( + 6 + * config.gbs + * config.enc_seq_len + * config.hs + * ( + config.hs # Q + + config.query_groups / config.attention_heads * config.hs * 2 # KV + + config.enc_seq_len / 2 * 2 + + config.hs + ) + ) + + +def _mamba_layer_flops(config: FLOPSConfig): + """Model FLOPs for Mamba layer. We ignore part of the flops of scan because the chunk size is not known from model config.""" + assert config.mamba_state_dim is not None + assert config.mamba_head_dim is not None + + if config.mamba_num_heads: + nheads = config.mamba_num_heads + else: + nheads = 2 * config.hs // config.mamba_head_dim # default expand is 2 + d_in = nheads * config.mamba_head_dim + return ( + ( + 6 + * config.gbs + * config.enc_seq_len + * config.hs + * (2 * d_in + 2 * config.mamba_num_groups * config.mamba_state_dim + nheads) + ) + + (3 * 2 * config.gbs * config.enc_seq_len * d_in * config.mamba_state_dim) + + (6 * config.gbs * config.enc_seq_len * d_in * config.hs) + ) + + +def _hybrid_model_flops(config: FLOPSConfig): + """Model FLOPs for hybrid model.""" + assert config.is_hybrid_model == True + assert config.hybrid_override_pattern is not None + + num_attn_layers, num_mamba_layers, num_mlp_layers = 0, 0, 0 + for c in config.hybrid_override_pattern: + if c == "M": + num_mamba_layers += 1 + elif c == "-": + num_mlp_layers += 1 + elif c == "*": + num_attn_layers += 1 + return ( + num_attn_layers * _non_mla_attn_layer_flops(config) + + num_mamba_layers * _mamba_layer_flops(config) + + num_mlp_layers * _mlp_layer_flops(config) + + 6 * config.gbs * config.enc_seq_len * config.hs * config.vocab_size + ) + + +def nemotronh(config: FLOPSConfig): + """Model FLOPs for NemotronH.""" + return _hybrid_model_flops(config) diff --git a/nemo_rl/utils/flops_tracker.py b/nemo_rl/utils/flops_tracker.py new file mode 100644 index 0000000000..aab769fdaf --- /dev/null +++ b/nemo_rl/utils/flops_tracker.py @@ -0,0 +1,142 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import asdict +from typing import Callable, Optional + +import torch +from transformers import AutoConfig +from transformers.configuration_utils import PretrainedConfig +from transformers.models.llama.configuration_llama import LlamaConfig +from transformers.models.qwen2.configuration_qwen2 import Qwen2Config +from transformers.models.qwen3.configuration_qwen3 import Qwen3Config + +from nemo_rl.models.policy.utils import sliding_window_overwrite +from nemo_rl.utils.flops_formulas import FLOPSConfig, llama2, llama3, qwen2, qwen3 + + +def get_default_hf_config(model_name: str) -> PretrainedConfig: + """Get the default Hugging Face config for a model. + + Both the DTensor and MCore paths use the same default config, we initialize the model config + here to allow computation of theoretical flops which is agnostic to the backend. + """ + return AutoConfig.from_pretrained( + model_name, + torch_dtype=torch.float32, + trust_remote_code=True, + **sliding_window_overwrite(model_name), + ) + + +def convert_config_to_flops_config( + model_name: str, config: PretrainedConfig +) -> tuple[FLOPSConfig, Callable]: + """Convert a pretrained config to a tuple containing a FLOPSConfig and a flops formula.""" + if isinstance(config, Qwen2Config): + return FLOPSConfig( + gbs=0, + hs=config.hidden_size, + layers=config.num_hidden_layers, + ffn_hs=config.intermediate_size, + vocab_size=config.vocab_size, + ), qwen2 + elif isinstance(config, Qwen3Config): + return FLOPSConfig( + gbs=0, + hs=config.hidden_size, + layers=config.num_hidden_layers, + ffn_hs=config.intermediate_size, + vocab_size=config.vocab_size, + query_groups=config.num_attention_heads / config.num_key_value_heads, + attention_heads=config.num_attention_heads, + # for non-MoE models, we use the intermediate size as the ffn hidden size + moe_ffn_hidden_size=config.intermediate_size, + moe_router_topk=1, + ), qwen3 + elif isinstance(config, LlamaConfig): + return FLOPSConfig( + gbs=0, + hs=config.hidden_size, + layers=config.num_hidden_layers, + ffn_hs=config.intermediate_size, + query_groups=config.num_attention_heads / config.num_key_value_heads, + attention_heads=config.num_attention_heads, + vocab_size=config.vocab_size, + ), llama3 if "llama3" in model_name.lower() else llama2 + else: + raise ValueError(f"Unsupported config type: {type(config)}") + + +THEORETICAL_TFLOPS = { + ("NVIDIA H100 80GB HBM3", torch.bfloat16): 1979 / 2, + ("NVIDIA H100 80GB HBM3", torch.float32): 67.0, +} + + +def get_theoretical_tflops(device_name: str, model_dtype: torch.dtype) -> float: + """Get the theoretical total flops for a device name.""" + if (device_name, model_dtype) in THEORETICAL_TFLOPS: + return THEORETICAL_TFLOPS[(device_name, model_dtype)] + else: + raise ValueError( + f"Unknown device name: {device_name} and dtype name: {model_dtype}" + ) + + +class FLOPTracker: + def __init__( + self, + model_name: str, + base_config: FLOPSConfig | None = None, + flops_formula: Callable[[FLOPSConfig], float] | None = None, + ): + self.model_name = model_name + self.base_config = base_config + self.total_flops = 0 + self.flops_formula: Optional[Callable[[FLOPSConfig], float]] = flops_formula + + @classmethod + def from_config(cls, model_name: str, config: PretrainedConfig) -> "FLOPTracker": + flops_config, flops_formula = convert_config_to_flops_config(model_name, config) + return cls( + model_name=model_name, base_config=flops_config, flops_formula=flops_formula + ) + + def track(self, n_samples: int, padded_seq_len: int): + if self.flops_formula is None: + raise ValueError("Flops formula is not set") + + base_config_dict = ( + asdict(self.base_config) if self.base_config is not None else {} + ) + + # Override gbs and enc_seq_len with current values + config_dict = { + **base_config_dict, + "gbs": n_samples, + "enc_seq_len": padded_seq_len, + } + + # Compute and accumulate flops + flops = self.flops_formula(FLOPSConfig(**config_dict)) + self.total_flops += flops + + def track_batch(self, sequence_lengths: list[int]): + """Track the flops for a batch of sequences.""" + for seq_len in sequence_lengths: + self.track(n_samples=1, padded_seq_len=seq_len) + + def reset(self): + self.total_flops = 0 diff --git a/nemo_rl/utils/logger.py b/nemo_rl/utils/logger.py index 4cf2621cd4..711b8fd596 100644 --- a/nemo_rl/utils/logger.py +++ b/nemo_rl/utils/logger.py @@ -72,11 +72,11 @@ class LoggerConfig(TypedDict): tensorboard_enabled: bool mlflow_enabled: bool wandb: WandbConfig - tensorboard: TensorboardConfig + tensorboard: NotRequired[TensorboardConfig] mlflow: NotRequired[MLflowConfig] monitor_gpus: bool gpu_monitoring: GPUMonitoringConfig - num_val_samples_to_print: int + num_val_samples_to_print: NotRequired[int] class LoggerInterface(ABC): diff --git a/nemo_rl/utils/native_checkpoint.py b/nemo_rl/utils/native_checkpoint.py index 8d21aae7d3..0eb607542e 100644 --- a/nemo_rl/utils/native_checkpoint.py +++ b/nemo_rl/utils/native_checkpoint.py @@ -152,6 +152,8 @@ def save_checkpoint( optimizer: Optional optimizer to save scheduler: Optional scheduler to save optimizer_path: Path to save optimizer state (required if optimizer provided) + tokenizer: Optional tokenizer to save + tokenizer_path: Path to save tokenizer state (required if tokenizer provided) """ model_state = {"model": ModelState(model)} dcp.save(model_state, checkpoint_id=weights_path) @@ -169,6 +171,7 @@ def save_checkpoint( raise ValueError( "tokenizer_path must be provided when saving tokenizer state" ) + print(f"Saving tokenizer (or processor) to {tokenizer_path}") tokenizer.save_pretrained(tokenizer_path) diff --git a/nemo_rl/utils/nsys.py b/nemo_rl/utils/nsys.py index b5609f8c41..d9282970ab 100644 --- a/nemo_rl/utils/nsys.py +++ b/nemo_rl/utils/nsys.py @@ -16,6 +16,7 @@ from typing import Protocol import rich +import torch NRL_NSYS_WORKER_PATTERNS = os.environ.get("NRL_NSYS_WORKER_PATTERNS", "") NRL_NSYS_PROFILE_STEP_RANGE = os.environ.get("NRL_NSYS_PROFILE_STEP_RANGE", "") @@ -76,3 +77,18 @@ def stop_profiler_on_exit(): ) policy.stop_gpu_profiling() policy.__NRL_PROFILE_STARTED = False + + +def wrap_with_nvtx_name(name: str): + """A decorator to wrap a function with an NVTX range with the given name.""" + + def decorator(func): + def wrapper(*args, **kwargs): + torch.cuda.nvtx.range_push(name) + ret = func(*args, **kwargs) + torch.cuda.nvtx.range_pop() + return ret + + return wrapper + + return decorator diff --git a/nemo_rl/utils/timer.py b/nemo_rl/utils/timer.py index 4fdaffee98..5366d3f339 100644 --- a/nemo_rl/utils/timer.py +++ b/nemo_rl/utils/timer.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import sys import time from contextlib import contextmanager from typing import Callable, Generator, Optional, Sequence, Union @@ -245,3 +246,76 @@ def reset(self, label: Optional[str] = None) -> None: else: self._timers = {} self._start_times = {} + + +def convert_to_seconds(time_string: str) -> int: + """Converts a time string in the format 'DD:HH:MM:SS' to total seconds. + + Args: + time_string (str): Time duration string, e.g., '00:03:45:00'. + + Returns: + int: Total time in seconds. + """ + days, hours, minutes, seconds = map(int, time_string.split(":")) + return days * 86400 + hours * 3600 + minutes * 60 + seconds + + +class TimeoutChecker: + def __init__( + self, timeout: Optional[str] = "00:03:45:00", fit_last_save_time: bool = False + ): + """Initializes the TimeoutChecker. + + Args: + timeout (str or None): Timeout in format 'DD:HH:MM:SS'. If None, timeout is considered infinite. + fit_last_save_time (bool): If True, considers average iteration time when checking timeout. + """ + super().__init__() + self.last_save_time = ( + float("inf") if timeout is None else convert_to_seconds(timeout) + ) + self.start_time = time.time() + self.last_saved = False + self.iteration_times = [] + self.previous_iteration_time: Optional[float] = None + self.fit_last_save_time = fit_last_save_time + + def check_save(self): + # Flush + sys.stdout.flush() + sys.stderr.flush() + + # Already saved after timeout + if self.last_saved: + return False + + current_time = time.time() + elapsed_time = current_time - self.start_time + + if self.fit_last_save_time and self.iteration_times: + average_iteration_time = sum(self.iteration_times) / len( + self.iteration_times + ) + if elapsed_time + average_iteration_time >= self.last_save_time: + self.last_saved = True + return True + + if elapsed_time >= self.last_save_time: + self.last_saved = True + return True + + return False + + def start_iterations(self): + self.previous_iteration_time = time.time() + + def mark_iteration(self): + sys.stdout.flush() + sys.stderr.flush() + + current_time = time.time() + if self.previous_iteration_time is not None: + elapsed_time = current_time - self.previous_iteration_time + self.previous_iteration_time = current_time + self.iteration_times.append(elapsed_time) diff --git a/pyproject.toml b/pyproject.toml index f0ce1b1f2a..0a40539cc6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -45,21 +45,36 @@ dependencies = [ "nvtx", "matplotlib", "plotly", + "sympy>=1.14.0", + "pillow>=11.3.0", + "torchvision>=0.22.0", + "num2words>=0.5.14", # for SmolVLM "mlflow", ] [project.optional-dependencies] # Currently unused, but after https://github.com/NVIDIA-NeMo/RL/issues/501 is resolved, we should use this for the "BASE" PYEXECUTABLE automodel = [ + "nemo-automodel", # Flash-attn version should be selected to satisfy both TE + vLLM requirements (xformers in particular) # https://github.com/NVIDIA/TransformerEngine/blob/v2.3/transformer_engine/pytorch/attention/dot_product_attention/utils.py#L108 # https://github.com/facebookresearch/xformers/blob/8354497deb2c04c67fbb2e2ad911e86530da0e90/xformers/ops/fmha/flash.py#L76 + "vllm==0.10.0", # Remove this once https://github.com/NVIDIA-NeMo/RL/issues/811 resolved "flash-attn==2.7.4.post1", + "mamba-ssm", + "causal-conv1d", ] vllm = [ + "cuda-python", + "deep_gemm @ git+https://github.com/deepseek-ai/DeepGEMM.git@7b6b5563b9d4c1ae07ffbce7f78ad3ac9204827c", "vllm==0.10.0", + "num2words>=0.5.14", # Remove this once https://github.com/NVIDIA-NeMo/RL/issues/501 resolved "flash-attn==2.7.4.post1", + # Remove this once https://github.com/NVIDIA-NeMo/RL/issues/501 resolved + "mamba-ssm", + # Remove this once https://github.com/NVIDIA-NeMo/RL/issues/501 resolved + "causal-conv1d", ] mcore = [ # also need cudnn (https://developer.nvidia.com/cudnn-downloads?target_os=Linux&target_arch=x86_64&Distribution=Ubuntu&target_version=20.04&target_type=deb_network) @@ -70,6 +85,8 @@ mcore = [ "transformer-engine[pytorch]==2.3.0", "megatron-core", "nemo-tron", + # Remove this once https://github.com/NVIDIA-NeMo/RL/issues/501 resolved + "vllm==0.10.0", # Flash-attn version should be selected to satisfy both TE + vLLM requirements (xformers in particular) # https://github.com/NVIDIA/TransformerEngine/blob/v2.3/transformer_engine/pytorch/attention/dot_product_attention/utils.py#L108 # https://github.com/facebookresearch/xformers/blob/8354497deb2c04c67fbb2e2ad911e86530da0e90/xformers/ops/fmha/flash.py#L76 @@ -102,7 +119,7 @@ docs = [ "nvidia-sphinx-theme", # Our NVIDIA theme ] dev = [ - "pre-commit==3.6.0", + "pre-commit>=4.2.0", "ruff==0.9.9", "types-PyYAML", "types-requests", @@ -118,6 +135,7 @@ test = [ [tool.uv.sources] megatron-core = { workspace = true } nemo-tron = { workspace = true } +nemo-automodel = { workspace = true } # The NeMo Run source to be used by nemo-tron nemo_run = { git = "https://github.com/NVIDIA-NeMo/Run", rev = "414f0077c648fde2c71bb1186e97ccbf96d6844c" } # torch/torchvision/triton all come from the torch index in order to pick up aarch64 wheels @@ -130,11 +148,14 @@ torchvision = [ triton = [ { index = "pytorch-cu128" }, ] +causal-conv1d = { git = "https://github.com/Dao-AILab/causal-conv1d", tag = "v1.5.0.post8" } +mamba-ssm = { git = "https://github.com/state-spaces/mamba.git", rev = "2e16fc3062cdcd4ebef27a9aa4442676e1c7edf4" } [tool.uv.workspace] members = [ "3rdparty/Megatron-LM-workspace", "3rdparty/NeMo-workspace", + "3rdparty/Automodel-workspace/Automodel", ] [[tool.uv.index]] @@ -143,7 +164,7 @@ url = "https://download.pytorch.org/whl/cu128" explicit = true [tool.uv] -no-build-isolation-package = ["transformer-engine-torch", "transformer-engine", "flash-attn"] +no-build-isolation-package = ["transformer-engine-torch", "transformer-engine", "flash-attn", "mamba-ssm", "causal-conv1d", "deep_gemm"] # Always apply the build group since dependencies like TE/mcore/nemo-run require build dependencies # and this lets us assume they are implicitly installed with a simply `uv sync`. Ideally, we'd # avoid including these in the default dependency set, but for now it's required. @@ -175,6 +196,7 @@ addopts = "--durations=15 -s -rA -x" testpaths = ["tests"] python_files = "test_*.py" markers = [ + "run_first: marks tests that should run before others", "mcore: marks tests that require the mcore extra", "hf_gated: marks tests that require HuggingFace token access for gated models", ] @@ -212,4 +234,3 @@ convention = "google" "*_test.py" = ["D"] # Ignore F401 (import but unused) in __init__.py "__init__.py" = ["F401"] - diff --git a/pyrefly.toml b/pyrefly.toml index 6442672edf..e9717a1ed0 100644 --- a/pyrefly.toml +++ b/pyrefly.toml @@ -50,6 +50,9 @@ project-includes = [ "nemo_rl/data/eval_datasets/mmlu.py", "nemo_rl/data/eval_datasets/mmlu_pro.py", "nemo_rl/data/hf_datasets/__init__.py", + "nemo_rl/data/hf_datasets/clevr.py", + "nemo_rl/data/hf_datasets/geometry3k.py", + "nemo_rl/data/hf_datasets/refcoco.py", "nemo_rl/data/hf_datasets/chat_templates.py", "nemo_rl/data/hf_datasets/deepscaler.py", "nemo_rl/data/hf_datasets/dpo.py", @@ -59,6 +62,8 @@ project-includes = [ "nemo_rl/data/hf_datasets/openmathinstruct2.py", "nemo_rl/data/hf_datasets/prompt_response_dataset.py", "nemo_rl/data/hf_datasets/squad.py", + "nemo_rl/data/hf_datasets/tulu3.py", + "nemo_rl/data/multimodal_utils.py", "nemo_rl/data/interfaces.py", "nemo_rl/data/packing/__init__.py", "nemo_rl/data/processors.py", @@ -69,6 +74,8 @@ project-includes = [ "nemo_rl/distributed/virtual_cluster.py", "nemo_rl/distributed/worker_group_utils.py", "nemo_rl/environments/__init__.py", + "nemo_rl/environments/rewards.py", + "nemo_rl/environments/vlm_environment.py", "nemo_rl/environments/games/sliding_puzzle.py", "nemo_rl/environments/interfaces.py", "nemo_rl/environments/math_environment.py", @@ -85,7 +92,10 @@ project-includes = [ "nemo_rl/models/dtensor/parallelize.py", "nemo_rl/models/generation/__init__.py", "nemo_rl/models/generation/interfaces.py", - "nemo_rl/models/generation/vllm_backend.py", + "nemo_rl/models/generation/vllm/__init__.py", + "nemo_rl/models/generation/vllm/config.py", + "nemo_rl/models/generation/vllm/utils.py", + "nemo_rl/models/generation/vllm/vllm_backend.py", "nemo_rl/models/huggingface/__init__.py", "nemo_rl/models/megatron/__init__.py", "nemo_rl/models/megatron/community_import.py", diff --git a/ray.sub b/ray.sub index 4635f66be6..32076d5fb3 100644 --- a/ray.sub +++ b/ray.sub @@ -31,8 +31,8 @@ maybe_gres_arg() { # Check if any nodes in the partition have GRES configured # Assumes a homogeneous allocation (not a heterogeneous job) if sinfo -p $SLURM_JOB_PARTITION -h -o "%G" | grep -q "gpu:"; then - # Do a quick assert here that gpus:8 == gpus:$GPUS_PER_NODE. It is probably a user error if someone isn't using GPUS_PER_NODE=8 on our clusters if it supports --gres=gpu:8. - if [[ $GPUS_PER_NODE -ne $(sinfo -p $SLURM_JOB_PARTITION -h -o "%G" | grep "gpu:" | cut -d: -f2) ]]; then + # Do a quick assert here that gpus:8 == gpus:$GPUS_PER_NODE. It is probably a user error if someone isn't using GPUS_PER_NODE=8 on our clusters if it supports --gres=gpu:8 or gpu:a100:8 + if [[ $GPUS_PER_NODE -ne $(sinfo -p $SLURM_JOB_PARTITION -h -o "%G" | grep "gpu:" | awk -F: '{print $NF}') ]]; then echo "Error: GPUS_PER_NODE=$GPUS_PER_NODE but GRES detected is $(sinfo -p $SLURM_JOB_PARTITION -h -o "%G" | grep "gpu:") meaning GPUS_PER_NODE is not set to fully claim the GPUs on the nodes." >&2 exit 1 fi @@ -59,18 +59,23 @@ DASHBOARD_AGENT_GRPC_PORT=${DASHBOARD_AGENT_GRPC_PORT:-53007} METRICS_EXPORT_PORT=${METRICS_EXPORT_PORT:-53009} # Ports for the head node -PORT=${PORT:-54258} +PORT=${PORT:-54514} RAY_CLIENT_SERVER_PORT=${RAY_CLIENT_SERVER_PORT:-10001} #REDIT_SHARD_PORTS=${REDIT_SHARD_PORTS:-"random"} ?? DASHBOARD_GRPC_PORT=${DASHBOARD_GRPC_PORT:-52367} DASHBOARD_PORT=${DASHBOARD_PORT:-8265} # Also used by debugger DASHBOARD_AGENT_LISTEN_PORT=${DASHBOARD_AGENT_LISTEN_PORT:-52365} +# Setting ulimit is recommended by ray best practices page +# @ https://docs.ray.io/en/latest/cluster/vms/user-guides/large-cluster-best-practices.html +# It's session based and won't affect the system outside the script +ulimit -Sn 65535 + # On our clusters, the largest port range on an idle worker appeared between 52369-64607 # (not including the other ports set by this script). So this range is chosen to be # somewhere in the middle MIN_WORKER_PORT=${MIN_WORKER_PORT:-54001} -MAX_WORKER_PORT=${MAX_WORKER_PORT:-54257} +MAX_WORKER_PORT=${MAX_WORKER_PORT:-54513} ######################################################## # Number seconds to sync logs from /tmp/ray/session_*/logs to $LOG_DIR/ray/ RAY_LOG_SYNC_FREQUENCY=${RAY_LOG_SYNC_FREQUENCY:-} @@ -124,7 +129,43 @@ nodes_array=($nodes) ip_addresses_array=() for node in $nodes; do - ip_address=$(host $node | awk '/has address/ { print $4 }') + # Try multiple methods to get IP address - ENHANCED VERSION v2.0 + echo "[DEBUG] Resolving hostname: $node using enhanced resolution methods" + ip_address="" + + # Method 1: Try host command + echo "[DEBUG] Method 1: host command" + ip_address=$(host $node 2>/dev/null | awk '/has address/ { print $4 }' | head -1 || true) + echo "[DEBUG] host result: '$ip_address'" + + # Method 2: If host fails, try getent + if [[ -z "$ip_address" ]]; then + echo "[DEBUG] Method 2: getent hosts" + ip_address=$(getent hosts $node 2>/dev/null | awk '{ print $1 }' | head -1 || true) + echo "[DEBUG] getent result: '$ip_address'" + fi + + # Method 3: If getent fails, try nslookup + if [[ -z "$ip_address" ]]; then + echo "[DEBUG] Method 3: nslookup" + ip_address=$(nslookup $node 2>/dev/null | awk '/^Address: / { print $2 }' | head -1 || true) + echo "[DEBUG] nslookup result: '$ip_address'" + fi + + # Method 4: If all DNS methods fail, try ping to extract IP + if [[ -z "$ip_address" ]]; then + echo "[DEBUG] Method 4: ping" + ip_address=$(ping -c 1 $node 2>/dev/null | grep "PING" | sed 's/.*(\([^)]*\)).*/\1/' || true) + echo "[DEBUG] ping result: '$ip_address'" + fi + + # If still no IP, use the hostname itself (might work if it's already an IP or resolvable) + if [[ -z "$ip_address" ]]; then + echo "[WARNING] Could not resolve IP for $node, using hostname as fallback" + ip_address=$node + fi + + echo "[INFO] Node: $node -> IP: $ip_address" # Add the IP address to the array ip_addresses_array+=("$ip_address") done @@ -338,7 +379,7 @@ echo "All workers connected!" # We can now launch a job on this cluster # We do so by launching a driver process on the physical node that the head node is on # This driver process is responsible for launching a job on the Ray cluster -CONTAINER_CWD=$(scontrol show job $SLURM_JOB_ID --json | jq -r '.jobs[].current_working_directory') +CONTAINER_CWD=$(scontrol show job $SLURM_JOB_ID | grep -oP 'WorkDir=\K[^ ]+' | head -1) if [[ -n "$COMMAND" ]]; then srun --no-container-mount-home --overlap --container-name=ray-head --container-workdir=$CONTAINER_CWD --nodes=1 --ntasks=1 -w "$head_node" -o $LOG_DIR/ray-driver.log bash -c "$COMMAND" else diff --git a/tests/check_metrics.py b/tests/check_metrics.py index a48c2f4875..bc9f6ced04 100644 --- a/tests/check_metrics.py +++ b/tests/check_metrics.py @@ -31,9 +31,31 @@ def max(value): return __builtins__.max(float(v) for v in value.values()) -def mean(value): - """Return the mean of values in a dictionary.""" - return statistics.mean(float(v) for v in value.values()) +def mean(value, range_start=1, range_end=0): + """Return the mean of values (or a range of values) in a dictionary. + + Note: + step, and ranges, are 1 indexed. Range_end is exclusive. + range_end=0 means to include until the last step in the run + """ + + ## find potential offset that might arise from resuming from a checkpoint + max_step_reached = __builtins__.max([int(s) for s in value.keys()]) + ## this is the number of steps that occurred prior to resuming + offset = max_step_reached - len(value) + + num_elem = len(value) + if range_start < 0: + range_start += num_elem + 1 + offset + if range_end <= 0: + range_end += num_elem + 1 + offset + + vals = [] + for step, v in value.items(): + if range_start <= int(step) and int(step) < range_end: + vals.append(float(v)) + + return statistics.mean(vals) def evaluate_check(data: dict, check: str) -> tuple[bool, str, object]: diff --git a/tests/functional/L1_Functional_Tests_GPU.sh b/tests/functional/L1_Functional_Tests_GPU.sh index e78d46a990..9c1e1a86af 100644 --- a/tests/functional/L1_Functional_Tests_GPU.sh +++ b/tests/functional/L1_Functional_Tests_GPU.sh @@ -26,6 +26,7 @@ time uv run --no-sync bash ./tests/functional/rm.sh time uv run --no-sync bash ./tests/functional/eval.sh time uv run --no-sync bash ./tests/functional/eval_async.sh time uv run --no-sync bash ./tests/functional/test_mcore_extra_installed_correctly.sh +time uv run --no-sync bash ./tests/functional/vlm_grpo.sh cd /opt/nemo-rl/tests coverage combine .coverage* diff --git a/tests/functional/dpo_megatron.sh b/tests/functional/dpo_megatron.sh new file mode 100755 index 0000000000..8c1524c2c5 --- /dev/null +++ b/tests/functional/dpo_megatron.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +# clean up checkpoint directory on exit +trap "rm -rf /tmp/sft_checkpoints" EXIT + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd) +PROJECT_ROOT=$(realpath $SCRIPT_DIR/../..) +# Mark the current repo as safe, since wandb fetches metadata about the repo +git config --global --add safe.directory $PROJECT_ROOT + +set -eou pipefail + +EXP_NAME=$(basename $0 .sh) +EXP_DIR=$SCRIPT_DIR/$EXP_NAME +LOG_DIR=$EXP_DIR/logs +JSON_METRICS=$EXP_DIR/metrics.json +RUN_LOG=$EXP_DIR/run.log +export PYTHONPATH=${PROJECT_ROOT}:${PYTHONPATH:-} + +rm -rf $EXP_DIR $LOG_DIR +mkdir -p $EXP_DIR $LOG_DIR + +cd $PROJECT_ROOT +uv run $PROJECT_ROOT/examples/run_dpo.py \ + --config $PROJECT_ROOT/examples/configs/recipes/llm/dpo-llama3.1-8b-instruct-4n8g-megatron.yaml \ + policy.model_name=Qwen/Qwen3-0.6B \ + cluster.gpus_per_node=2 \ + dpo.max_num_steps=3 \ + dpo.val_batches=1 \ + dpo.val_period=3 \ + logger.tensorboard_enabled=true \ + logger.log_dir=$LOG_DIR \ + logger.wandb_enabled=false \ + logger.monitor_gpus=true \ + checkpointing.enabled=false \ + policy.megatron_cfg.tensor_model_parallel_size=1 \ + policy.train_global_batch_size=8 \ + $@ \ + 2>&1 | tee $RUN_LOG + +uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS + +uv run tests/check_metrics.py $JSON_METRICS \ + 'data["train/loss"]["3"] < 5' \ + diff --git a/tests/functional/eval.sh b/tests/functional/eval.sh index fdf2e95c6a..2a153ef153 100644 --- a/tests/functional/eval.sh +++ b/tests/functional/eval.sh @@ -27,4 +27,4 @@ uv run coverage run -a --data-file=$PROJECT_ROOT/tests/.coverage --source=$PROJE cat $RUN_LOG | grep "score=" | sed 's/.*score=\([^ ]*\).*/{"score": \1}/' > $JSON_METRICS uv run tests/check_metrics.py $JSON_METRICS \ - 'data["score"] == 0.1' \ + 'data["score"] == 0.1' diff --git a/tests/functional/eval_async.sh b/tests/functional/eval_async.sh index e92bed9249..c8c2a40433 100644 --- a/tests/functional/eval_async.sh +++ b/tests/functional/eval_async.sh @@ -29,4 +29,4 @@ uv run coverage run -a --data-file=$PROJECT_ROOT/tests/.coverage --source=$PROJE cat $RUN_LOG | grep "score=" | sed 's/.*score=\([^ ]*\).*/{"score": \1}/' > $JSON_METRICS uv run tests/check_metrics.py $JSON_METRICS \ - 'data["score"] == 0.1' \ + 'data["score"] == 0.1' diff --git a/tests/functional/grpo.sh b/tests/functional/grpo.sh index 60761c36f1..d581bb9dac 100755 --- a/tests/functional/grpo.sh +++ b/tests/functional/grpo.sh @@ -38,5 +38,5 @@ uv run coverage run -a --data-file=$PROJECT_ROOT/tests/.coverage --source=$PROJE uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS uv run tests/check_metrics.py $JSON_METRICS \ - 'max(data["train/token_mult_prob_error"]) < 1.05' \ + 'max(data["train/token_mult_prob_error"]) < 1.05' diff --git a/tests/functional/grpo_megatron.sh b/tests/functional/grpo_megatron.sh index a0fcd9898b..f4b4ede8c4 100755 --- a/tests/functional/grpo_megatron.sh +++ b/tests/functional/grpo_megatron.sh @@ -41,5 +41,5 @@ uv run coverage run -a --data-file=$PROJECT_ROOT/tests/.coverage --source=$PROJE uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS uv run tests/check_metrics.py $JSON_METRICS \ - 'max(data["train/token_mult_prob_error"]) < 1.05' \ + 'max(data["train/token_mult_prob_error"]) < 1.05' diff --git a/tests/functional/grpo_multiturn.sh b/tests/functional/grpo_multiturn.sh index 53e359b9d8..9af8a8dc97 100755 --- a/tests/functional/grpo_multiturn.sh +++ b/tests/functional/grpo_multiturn.sh @@ -41,5 +41,5 @@ uv run coverage run -a --data-file=$PROJECT_ROOT/tests/.coverage --source=$PROJE uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS uv run tests/check_metrics.py $JSON_METRICS \ - 'max(data["train/token_mult_prob_error"]) < 1.1' \ + 'max(data["train/token_mult_prob_error"]) < 1.1' diff --git a/tests/functional/grpo_non_colocated.sh b/tests/functional/grpo_non_colocated.sh index 987390c43d..5f63fb5e9b 100755 --- a/tests/functional/grpo_non_colocated.sh +++ b/tests/functional/grpo_non_colocated.sh @@ -39,5 +39,5 @@ uv run coverage run -a --data-file=$PROJECT_ROOT/tests/.coverage --source=$PROJE uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS uv run tests/check_metrics.py $JSON_METRICS \ - 'max(data["train/token_mult_prob_error"]) < 1.05' \ + 'max(data["train/token_mult_prob_error"]) < 1.05' diff --git a/tests/functional/sft.sh b/tests/functional/sft.sh index d1d3b9c882..20c5e29479 100755 --- a/tests/functional/sft.sh +++ b/tests/functional/sft.sh @@ -41,5 +41,5 @@ uv run coverage run -a --data-file=$PROJECT_ROOT/tests/.coverage --source=$PROJE uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS uv run tests/check_metrics.py $JSON_METRICS \ - 'data["train/loss"]["3"] < 5.9' \ + 'data["train/loss"]["3"] < 5.9' diff --git a/tests/functional/sft_megatron.sh b/tests/functional/sft_megatron.sh new file mode 100755 index 0000000000..dfb7fcfdba --- /dev/null +++ b/tests/functional/sft_megatron.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +# clean up checkpoint directory on exit +trap "rm -rf /tmp/sft_checkpoints" EXIT + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd) +PROJECT_ROOT=$(realpath $SCRIPT_DIR/../..) +# Mark the current repo as safe, since wandb fetches metadata about the repo +git config --global --add safe.directory $PROJECT_ROOT + +set -eou pipefail + +EXP_NAME=$(basename $0 .sh) +EXP_DIR=$SCRIPT_DIR/$EXP_NAME +LOG_DIR=$EXP_DIR/logs +JSON_METRICS=$EXP_DIR/metrics.json +RUN_LOG=$EXP_DIR/run.log +export PYTHONPATH=${PROJECT_ROOT}:${PYTHONPATH:-} + +rm -rf $EXP_DIR $LOG_DIR +mkdir -p $EXP_DIR $LOG_DIR + +cd $PROJECT_ROOT +uv run $PROJECT_ROOT/examples/run_sft.py \ + --config $PROJECT_ROOT/examples/configs/recipes/llm/sft-llama3.1-8b-1n8g-megatron.yaml \ + policy.model_name=Qwen/Qwen3-0.6B \ + policy.tokenizer.name=Qwen/Qwen3-0.6B \ + cluster.gpus_per_node=2 \ + sft.max_num_steps=3 \ + sft.val_batches=1 \ + sft.val_period=3 \ + logger.tensorboard_enabled=true \ + logger.log_dir=$LOG_DIR \ + logger.wandb_enabled=false \ + logger.monitor_gpus=true \ + checkpointing.enabled=false \ + policy.megatron_cfg.pipeline_model_parallel_size=1 \ + $@ \ + 2>&1 | tee $RUN_LOG + +uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS + +uv run tests/check_metrics.py $JSON_METRICS \ + 'data["train/loss"]["3"] < 0.8' \ + diff --git a/tests/functional/vlm_grpo.sh b/tests/functional/vlm_grpo.sh new file mode 100755 index 0000000000..79ad6831b5 --- /dev/null +++ b/tests/functional/vlm_grpo.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd) +PROJECT_ROOT=$(realpath $SCRIPT_DIR/../..) +# Mark the current repo as safe, since wandb fetches metadata about the repo +git config --global --add safe.directory $PROJECT_ROOT + +set -eou pipefail + +EXP_NAME=$(basename $0 .sh) +EXP_DIR=$SCRIPT_DIR/$EXP_NAME +LOG_DIR=$EXP_DIR/logs +JSON_METRICS=$EXP_DIR/metrics.json +RUN_LOG=$EXP_DIR/run.log +export PYTHONPATH=${PROJECT_ROOT}:${PYTHONPATH:-} + +rm -rf $EXP_DIR $LOG_DIR +mkdir -p $EXP_DIR $LOG_DIR + +cd $PROJECT_ROOT +uv run $PROJECT_ROOT/examples/run_vlm_grpo.py \ + policy.model_name=Qwen/Qwen2.5-VL-3B-Instruct \ + grpo.num_prompts_per_step=2 \ + grpo.num_generations_per_prompt=4 \ + policy.train_global_batch_size=4 \ + policy.train_micro_batch_size=1 \ + cluster.gpus_per_node=2 \ + grpo.max_num_steps=5 \ + logger.tensorboard_enabled=true \ + logger.log_dir=$LOG_DIR \ + logger.wandb_enabled=false \ + logger.monitor_gpus=true \ + checkpointing.enabled=false \ + cluster.gpus_per_node=2 \ + $@ \ + 2>&1 | tee $RUN_LOG + +uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS + +uv run tests/check_metrics.py $JSON_METRICS \ + 'max(data["train/token_mult_prob_error"]) < 1.05' \ + 'mean(data["train/token_mult_prob_error"]) < 1.05' diff --git a/tests/json_dump_tb_logs.py b/tests/json_dump_tb_logs.py index 7d2e5607fa..973e37659b 100644 --- a/tests/json_dump_tb_logs.py +++ b/tests/json_dump_tb_logs.py @@ -38,16 +38,16 @@ error_console = Console(stderr=True) -def merge_tb_logs_to_json(log_dir, output_path, allow_conflicts=False): +def merge_tb_logs_to_json(log_dir, output_path, error_on_conflicts=False): """Merge multiple TensorBoard event files into a single JSON file. Arguments: log_dir: Path to directory containing TensorBoard event files (searched recursively) output_path: Path to save the output JSON file - allow_conflicts: If True, allow multiple values for the same step (last one wins) + error_on_conflicts: If True, raise an error if conflicting values are found for the same step Raises: - ValueError: If conflicting values are found for the same step and allow_conflicts is False + ValueError: If conflicting values are found for the same step and error_on_conflicts is True """ # Find all event files recursively files = glob.glob(f"{log_dir}/**/events*tfevents*", recursive=True) @@ -89,19 +89,19 @@ def merge_tb_logs_to_json(log_dir, output_path, allow_conflicts=False): for scalar in ea.Scalars(metric_name): step, value = scalar.step, scalar.value - # Check for conflicts - immediately raise error if not allowing conflicts + # Check for conflicts - raise error only if error_on_conflicts is True if step in merged_data[metric_name]: existing_value, existing_file = merged_data[metric_name][step] # Only consider it a conflict if the values are different if existing_value != value: - if not allow_conflicts: - # Immediate error if not allowing conflicts + if error_on_conflicts: + # Immediate error if we choose to error on conflicts raise ValueError( f"Conflict detected for metric '{metric_name}' at step {step}:\n" f" File #{file_index_map[existing_file]}: {existing_file} has value {existing_value}\n" f" File #{file_index_map[event_file]}: {event_file} has value {value}\n" - f"Use --allow-conflicts to force merging with latest value." + f"Re-run without --error-on-conflicts to merge with the latest value." ) # Add or override the value @@ -218,15 +218,15 @@ def merge_tb_logs_to_json(log_dir, output_path, allow_conflicts=False): help="Path to save the output JSON file", ) parser.add_argument( - "--allow-conflicts", + "--error-on-conflicts", action="store_true", - help="Allow conflicting values for the same step (last one wins)", + help="Error out when conflicting values are found for the same step", ) args = parser.parse_args() try: - merge_tb_logs_to_json(args.log_dir, args.output_path, args.allow_conflicts) + merge_tb_logs_to_json(args.log_dir, args.output_path, args.error_on_conflicts) except Exception as e: error_console.print(f"[bold red]Error: {e}[/bold red]") sys.exit(1) diff --git a/tests/test_suites/README.md b/tests/test_suites/README.md index 0759f06f25..b262bc4075 100644 --- a/tests/test_suites/README.md +++ b/tests/test_suites/README.md @@ -50,6 +50,10 @@ DRYRUN=1 CONTAINER=... ACCOUNT=... PARTITION=... ../tools/launch ./llm/sft-llama # Prints Estimated GPUhrs, creates code snapshot, then exits DRYRUN=2 CONTAINER=... ACCOUNT=... PARTITION=... ../tools/launch ./llm/sft-llama3.2-1b-1n8g-fsdp2tp1.sh + +# Launch but set extra env vars +EXTRA_ENV="NRL_FORCE_REBUILD_VENVS=true NRL_DEEPSCALER_8K_CKPT=/8k-ckpt NRL_DEEPSCALER_16K_CKPT=/16k-ckpt" \ +CONTAINER=... ACCOUNT=... PARTITION=... ../tools/launch ./llm/sft-llama3.2-1b-1n8g-fsdp2tp1.sh ``` After this completes, you can find the result under diff --git a/tests/test_suites/llm/dpo-llama3.1-8b-instruct-4n8g-fsdp2tp2-quick.v2.sh b/tests/test_suites/llm/dpo-llama3.1-8b-instruct-4n8g-fsdp2tp2-quick.v2.sh index f5b29b7db7..0162bd8bb9 100755 --- a/tests/test_suites/llm/dpo-llama3.1-8b-instruct-4n8g-fsdp2tp2-quick.v2.sh +++ b/tests/test_suites/llm/dpo-llama3.1-8b-instruct-4n8g-fsdp2tp2-quick.v2.sh @@ -38,5 +38,6 @@ if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | ma 'data["train/loss"]["20"] < 3.4' \ 'data["train/preference_loss"]["1"] > 0.69314' \ 'data["train/preference_loss"]["1"] < 0.69316' \ - 'data["train/preference_loss"]["20"] < 0.6' + 'data["train/preference_loss"]["20"] < 0.6' \ + 'mean(data["timing/train/total_step_time"], -10, -1) < 7.8' fi diff --git a/tests/test_suites/llm/dpo-llama3.1-8b-instruct-4n8g-fsdp2tp1.v2.sh b/tests/test_suites/llm/dpo-llama3.1-8b-instruct-4n8g-fsdp2tp4.sh similarity index 91% rename from tests/test_suites/llm/dpo-llama3.1-8b-instruct-4n8g-fsdp2tp1.v2.sh rename to tests/test_suites/llm/dpo-llama3.1-8b-instruct-4n8g-fsdp2tp4.sh index e9ccb1e147..df74127ba2 100755 --- a/tests/test_suites/llm/dpo-llama3.1-8b-instruct-4n8g-fsdp2tp1.v2.sh +++ b/tests/test_suites/llm/dpo-llama3.1-8b-instruct-4n8g-fsdp2tp4.sh @@ -38,5 +38,6 @@ if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | ma 'data["train/loss"]["150"] < 3.0' \ 'data["train/preference_loss"]["1"] > 0.69314' \ 'data["train/preference_loss"]["1"] < 0.69316' \ - 'data["train/preference_loss"]["150"] < 0.4' + 'data["train/preference_loss"]["150"] < 0.4' \ + 'mean(data["timing/train/total_step_time"], -11, -1) < 24' fi diff --git a/tests/test_suites/llm/dpo-llama3.1-8b-instruct-4n8g-megatron.sh b/tests/test_suites/llm/dpo-llama3.1-8b-instruct-4n8g-megatron.v2.sh similarity index 91% rename from tests/test_suites/llm/dpo-llama3.1-8b-instruct-4n8g-megatron.sh rename to tests/test_suites/llm/dpo-llama3.1-8b-instruct-4n8g-megatron.v2.sh index e9ccb1e147..8701d63d1f 100755 --- a/tests/test_suites/llm/dpo-llama3.1-8b-instruct-4n8g-megatron.sh +++ b/tests/test_suites/llm/dpo-llama3.1-8b-instruct-4n8g-megatron.v2.sh @@ -38,5 +38,6 @@ if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | ma 'data["train/loss"]["150"] < 3.0' \ 'data["train/preference_loss"]["1"] > 0.69314' \ 'data["train/preference_loss"]["1"] < 0.69316' \ - 'data["train/preference_loss"]["150"] < 0.4' + 'data["train/preference_loss"]["150"] < 0.4' \ + 'mean(data["timing/train/total_step_time"], -11, -1) < 11.5' fi diff --git a/tests/test_suites/llm/dpo-llama3.1-8b-instruct-4n8g-megatrontp2pp2-quick.sh b/tests/test_suites/llm/dpo-llama3.1-8b-instruct-4n8g-megatrontp2pp2-quick.sh index f5b29b7db7..0bc8e13e28 100755 --- a/tests/test_suites/llm/dpo-llama3.1-8b-instruct-4n8g-megatrontp2pp2-quick.sh +++ b/tests/test_suites/llm/dpo-llama3.1-8b-instruct-4n8g-megatrontp2pp2-quick.sh @@ -38,5 +38,6 @@ if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | ma 'data["train/loss"]["20"] < 3.4' \ 'data["train/preference_loss"]["1"] > 0.69314' \ 'data["train/preference_loss"]["1"] < 0.69316' \ - 'data["train/preference_loss"]["20"] < 0.6' + 'data["train/preference_loss"]["20"] < 0.6' \ + 'mean(data["timing/train/total_step_time"], -10) < 6.7' fi diff --git a/tests/test_suites/llm/dpo-llama3.1-8b-tulu3-1n8g-fsdp2tp1.sh b/tests/test_suites/llm/dpo-llama3.1-8b-tulu3-1n8g-fsdp2tp1.sh new file mode 100755 index 0000000000..35da749ac7 --- /dev/null +++ b/tests/test_suites/llm/dpo-llama3.1-8b-tulu3-1n8g-fsdp2tp1.sh @@ -0,0 +1,43 @@ +#!/bin/bash +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd) +source $SCRIPT_DIR/common.env + +# ===== BEGIN CONFIG ===== +NUM_NODES=1 +STEPS_PER_RUN=150 +MAX_STEPS=150 +NUM_RUNS=$(( (MAX_STEPS + STEPS_PER_RUN - 1) / STEPS_PER_RUN )) # Round up +NUM_MINUTES=45 +# ===== END CONFIG ===== + +exit_if_max_steps_reached + +# Run the experiment +cd $PROJECT_ROOT +uv run examples/run_dpo.py \ + --config $CONFIG_PATH \ + dpo.max_num_steps=$MAX_STEPS \ + logger.log_dir=$LOG_DIR \ + logger.wandb_enabled=True \ + logger.wandb.project=nemo-rl \ + logger.wandb.name=$EXP_NAME \ + logger.monitor_gpus=True \ + logger.tensorboard_enabled=True \ + checkpointing.enabled=True \ + checkpointing.checkpoint_dir=$CKPT_DIR \ + $@ \ + 2>&1 | tee $RUN_LOG + +# Convert tensorboard logs to json +uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS + +# Only run metrics if the target step is reached +if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | map(tonumber) | max' $JSON_METRICS) -ge $MAX_STEPS ]]; then + uv run tests/check_metrics.py $JSON_METRICS \ + 'data["train/sft_loss"]["1"] < 0.00001' \ + 'data["train/sft_loss"]["150"] < 0.00001' \ + 'data["train/preference_loss"]["1"] > 0.6930' \ + 'data["train/preference_loss"]["1"] < 0.6932' \ + 'data["train/preference_loss"]["150"] < 0.68' +fi + diff --git a/tests/test_suites/llm/dpo-llama3.2-1b-instruct-1n8g-fsdp2tp1.v2.sh b/tests/test_suites/llm/dpo-llama3.2-1b-instruct-1n8g-fsdp2tp1.v2.sh index 6606099df7..48691c0df4 100755 --- a/tests/test_suites/llm/dpo-llama3.2-1b-instruct-1n8g-fsdp2tp1.v2.sh +++ b/tests/test_suites/llm/dpo-llama3.2-1b-instruct-1n8g-fsdp2tp1.v2.sh @@ -36,5 +36,6 @@ if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | ma uv run tests/check_metrics.py $JSON_METRICS \ 'data["train/loss"]["1"] > 0.69314' \ 'data["train/loss"]["1"] < 0.69316' \ - 'data["train/loss"]["150"] < 0.55' + 'data["train/loss"]["150"] < 0.55' \ + 'mean(data["timing/train/total_step_time"], -11, -1) < 1.3' fi diff --git a/tests/test_suites/llm/dpo-mistral-nemo-instruct-2407-1n8g-fsdp2tp8-actckpt-long.sh b/tests/test_suites/llm/dpo-mistral-nemo-instruct-2407-1n8g-fsdp2tp8-actckpt-long.sh new file mode 100755 index 0000000000..8f9e22f337 --- /dev/null +++ b/tests/test_suites/llm/dpo-mistral-nemo-instruct-2407-1n8g-fsdp2tp8-actckpt-long.sh @@ -0,0 +1,40 @@ +#!/bin/bash +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd) +source $SCRIPT_DIR/common.env + +# ===== BEGIN CONFIG ===== +NUM_NODES=1 +STEPS_PER_RUN=100 +MAX_STEPS=100 +NUM_RUNS=$(( (MAX_STEPS + STEPS_PER_RUN - 1) / STEPS_PER_RUN )) # Round up +NUM_MINUTES=45 +# ===== END CONFIG ===== + +exit_if_max_steps_reached + +# Run the experiment +cd $PROJECT_ROOT +uv run examples/run_dpo.py \ + --config $CONFIG_PATH \ + dpo.max_num_steps=$MAX_STEPS \ + logger.log_dir=$LOG_DIR \ + logger.wandb_enabled=True \ + logger.wandb.project=nemo-rl \ + logger.wandb.name=$EXP_NAME \ + logger.monitor_gpus=True \ + logger.tensorboard_enabled=True \ + checkpointing.enabled=True \ + checkpointing.checkpoint_dir=$CKPT_DIR \ + $@ \ + 2>&1 | tee $RUN_LOG + +# Convert tensorboard logs to json +uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS + +# Only run metrics if the target step is reached +if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | map(tonumber) | max' $JSON_METRICS) -ge $MAX_STEPS ]]; then + uv run tests/check_metrics.py $JSON_METRICS \ + 'data["train/loss"]["1"] > 0.6990' \ + 'data["train/loss"]["1"] < 0.6992' \ + 'data["train/loss"]["100"] < 0.60' +fi diff --git a/tests/test_suites/llm/grpo-deepscaler-1.5b-16K.sh b/tests/test_suites/llm/grpo-deepscaler-1.5b-16K.sh new file mode 100755 index 0000000000..68c4f0c6b2 --- /dev/null +++ b/tests/test_suites/llm/grpo-deepscaler-1.5b-16K.sh @@ -0,0 +1,69 @@ +#!/bin/bash +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd) +source $SCRIPT_DIR/common.env + +# ===== BEGIN CONFIG ===== +NUM_NODES=1 +STEPS_PER_RUN=20 +MAX_STEPS=20 +NUM_RUNS=$(( (MAX_STEPS + STEPS_PER_RUN - 1) / STEPS_PER_RUN )) # Round up +NUM_MINUTES=240 +# ===== END CONFIG ===== + +exit_if_max_steps_reached + +# Use checkpoint created from the 8K checkpoint in grpo-deepscaler-1.5b-8K.sh +if [[ -z "$NRL_DEEPSCALER_8K_CKPT" ]]; then + echo "Need to set NRL_DEEPSCALER_8K_CKPT to the path to the trained 8K checkpoint" + exit 1 +fi + +# Run the experiment +cd $PROJECT_ROOT +uv run examples/run_grpo_math.py \ + --config $CONFIG_PATH \ + policy.model_name=$NRL_DEEPSCALER_8K_CKPT \ + grpo.max_num_steps=$MAX_STEPS \ + logger.log_dir=$LOG_DIR \ + logger.wandb_enabled=True \ + logger.wandb.project=nemo-rl \ + logger.wandb.name=$EXP_NAME \ + logger.monitor_gpus=True \ + logger.tensorboard_enabled=True \ + checkpointing.enabled=True \ + checkpointing.checkpoint_dir=$CKPT_DIR \ + $@ \ + 2>&1 | tee $RUN_LOG + +# Convert tensorboard logs to json +uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS + +# Only run metrics if the target step is reached +if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | map(tonumber) | max' $JSON_METRICS) -ge $MAX_STEPS ]]; then + uv run tests/check_metrics.py $JSON_METRICS \ + 'mean(data["train/token_mult_prob_error"]) < 1.05' \ + "data['train/token_mult_prob_error']['$MAX_STEPS'] < 1.05" +fi + +# Convert 16k checkpoint +uv run examples/converters/convert_dcp_to_hf.py \ + --config=$CKPT_DIR/step_${MAX_STEPS}/config.yaml \ + --dcp-ckpt-path=$CKPT_DIR/step_${MAX_STEPS}/policy/weights \ + --hf-ckpt-path=$CKPT_DIR/grpo-deepscaler-16k-${MAX_STEPS}-hf + +# Run eval +uv run examples/run_eval.py \ + generation.model_name=$CKPT_DIR/grpo-deepscaler-16k-${MAX_STEPS}-hf \ + data.prompt_file=examples/prompts/cot.txt \ + generation.vllm_cfg.max_model_len=32768 \ + generation.vllm_cfg.enforce_eager=True \ + generation.temperature=1.0 \ + eval.num_tests_per_prompt=16 \ + 2>&1 | tee ${RUN_LOG}.aime-16k + +cat ${RUN_LOG}.aime-16k | grep "score=" | sed 's/.*score=\([^ ]*\).*/{"score": \1}/' > ${RUN_LOG}-16k-metric.json + +# 240 step checkpoint 0.3 +uv run tests/check_metrics.py ${RUN_LOG}-16k-metric.json \ + 'data["score"] >= 0.2396' + diff --git a/tests/test_suites/llm/grpo-deepscaler-1.5b-24K.sh b/tests/test_suites/llm/grpo-deepscaler-1.5b-24K.sh new file mode 100755 index 0000000000..b045607570 --- /dev/null +++ b/tests/test_suites/llm/grpo-deepscaler-1.5b-24K.sh @@ -0,0 +1,68 @@ +#!/bin/bash +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd) +source $SCRIPT_DIR/common.env + +# ===== BEGIN CONFIG ===== +NUM_NODES=1 +STEPS_PER_RUN=15 +MAX_STEPS=15 +NUM_RUNS=$(( (MAX_STEPS + STEPS_PER_RUN - 1) / STEPS_PER_RUN )) # Round up +NUM_MINUTES=240 +# ===== END CONFIG ===== + +exit_if_max_steps_reached + +# Use checkpoint created from the 16K checkpoint in grpo-deepscaler-1.5b-16K.sh +if [[ -z "$NRL_DEEPSCALER_16K_CKPT" ]]; then + echo "Need to set NRL_DEEPSCALER_16K_CKPT to the path to the trained 16K checkpoint" + exit 1 +fi + +# Run the experiment +cd $PROJECT_ROOT +uv run examples/run_grpo_math.py \ + --config $CONFIG_PATH \ + policy.model_name=$NRL_DEEPSCALER_16K_CKPT \ + grpo.max_num_steps=$MAX_STEPS \ + logger.log_dir=$LOG_DIR \ + logger.wandb_enabled=True \ + logger.wandb.project=nemo-rl \ + logger.wandb.name=$EXP_NAME \ + logger.monitor_gpus=True \ + logger.tensorboard_enabled=True \ + checkpointing.enabled=True \ + checkpointing.checkpoint_dir=$CKPT_DIR \ + $@ \ + 2>&1 | tee $RUN_LOG + +# Convert tensorboard logs to json +uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS + +# Only run metrics if the target step is reached +if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | map(tonumber) | max' $JSON_METRICS) -ge $MAX_STEPS ]]; then + uv run tests/check_metrics.py $JSON_METRICS \ + 'mean(data["train/token_mult_prob_error"]) < 1.05' \ + "data['train/token_mult_prob_error']['$MAX_STEPS'] < 1.05" +fi + +# Convert 24k checkpoint +uv run examples/converters/convert_dcp_to_hf.py \ + --config=$CKPT_DIR/step_${MAX_STEPS}/config.yaml \ + --dcp-ckpt-path=$CKPT_DIR/step_${MAX_STEPS}/policy/weights \ + --hf-ckpt-path=$CKPT_DIR/grpo-deepscaler-24k-${MAX_STEPS}-hf + +# Run eval +uv run examples/run_eval.py \ + generation.model_name=$CKPT_DIR/grpo-deepscaler-24k-${MAX_STEPS}-hf \ + data.prompt_file=examples/prompts/cot.txt \ + generation.vllm_cfg.max_model_len=32768 \ + generation.vllm_cfg.enforce_eager=True \ + generation.temperature=1.0 \ + eval.num_tests_per_prompt=16 \ + 2>&1 | tee ${RUN_LOG}.aime-24k + +cat ${RUN_LOG}.aime-24k | grep "score=" | sed 's/.*score=\([^ ]*\).*/{"score": \1}/' > ${RUN_LOG}-24k-metric.json + +uv run tests/check_metrics.py ${RUN_LOG}-24k-metric.json \ + 'data["score"] >= 0.2396' + diff --git a/tests/test_suites/llm/grpo-deepscaler-1.5b-8K.sh b/tests/test_suites/llm/grpo-deepscaler-1.5b-8K.sh new file mode 100755 index 0000000000..ba2f5993d4 --- /dev/null +++ b/tests/test_suites/llm/grpo-deepscaler-1.5b-8K.sh @@ -0,0 +1,123 @@ +#!/bin/bash +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd) +source $SCRIPT_DIR/common.env + +# ===== BEGIN CONFIG ===== +NUM_NODES=1 +STEPS_PER_RUN=40 +MAX_STEPS=40 +NUM_RUNS=$(( (MAX_STEPS + STEPS_PER_RUN - 1) / STEPS_PER_RUN )) # Round up +NUM_MINUTES=240 +# ===== END CONFIG ===== + +exit_if_max_steps_reached + +# Run the experiment +cd $PROJECT_ROOT +uv run examples/run_grpo_math.py \ + --config $CONFIG_PATH \ + grpo.max_num_steps=$MAX_STEPS \ + logger.log_dir=$LOG_DIR \ + logger.wandb_enabled=True \ + logger.wandb.project=nemo-rl \ + logger.wandb.name=$EXP_NAME \ + logger.monitor_gpus=True \ + logger.tensorboard_enabled=True \ + checkpointing.enabled=True \ + checkpointing.checkpoint_dir=$CKPT_DIR \ + $@ \ + 2>&1 | tee $RUN_LOG + +# Convert tensorboard logs to json +uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS + +# Only run metrics if the target step is reached +if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | map(tonumber) | max' $JSON_METRICS) -ge $MAX_STEPS ]]; then + uv run tests/check_metrics.py $JSON_METRICS \ + 'mean(data["train/token_mult_prob_error"]) < 1.05' \ + "data['train/token_mult_prob_error']['$MAX_STEPS'] < 1.05" +fi + +# Convert 8k checkpoint +uv run examples/converters/convert_dcp_to_hf.py \ + --config=$CKPT_DIR/step_${MAX_STEPS}/config.yaml \ + --dcp-ckpt-path=$CKPT_DIR/step_${MAX_STEPS}/policy/weights \ + --hf-ckpt-path=$CKPT_DIR/grpo-deepscaler-8k-${MAX_STEPS}-hf + +# Run eval +uv run examples/run_eval.py \ + generation.model_name=$CKPT_DIR/grpo-deepscaler-8k-${MAX_STEPS}-hf \ + data.prompt_file=examples/prompts/cot.txt \ + generation.vllm_cfg.max_model_len=32768 \ + generation.vllm_cfg.enforce_eager=True \ + generation.temperature=1.0 \ + eval.num_tests_per_prompt=16 \ + 2>&1 | tee ${RUN_LOG}.aime-8k + +cat ${RUN_LOG}.aime-8k | grep "score=" | sed 's/.*score=\([^ ]*\).*/{"score": \1}/' > ${RUN_LOG}-8k-metric.json + +# 0.2 is the baseline score for AIME on the base checkpoint +uv run tests/check_metrics.py ${RUN_LOG}-8k-metric.json \ + 'data["score"] >= 0.2396' + +# This comment is for reference on how the aime24 eval baseline was chosen: +# The variance in aime24 is pretty high when only taking one sample per prompt. +# I have observed huge variance even between A100 and H100 with one sample per prompt, +# and even 2-3% difference with 16 prompts. Anecdotally, when there is something wrong +# with logprob error, the accuracy can fall below even the starting checkpoint. For that +# reason, all the deepscaler recipes compare against 0.2396 and use 16 generations per +# prompt to mitigate the variance. +# +# Additionally, 16 generations is about 12 minutes, so that should be factored into +# the overall time to run the test. +######################################################## +# deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B +######################################################## +# num_tests_per_prompt=1 +# score=0.2333 +# real 3m9.173s +# num_tests_per_prompt=5 +# score=0.2267 +# real 4m50.247s +# num_tests_per_prompt=10 +# score=0.2367 +# real 8m1.174s +# num_tests_per_prompt=16 +# score=0.2396 +# real 11m46.489s + +######################################################## +# grpo-deepscaler-8k-240-hf +######################################################## +# num_tests_per_prompt=1 +# score=0.2667 +# num_tests_per_prompt=5 +# score=0.3267 +# num_tests_per_prompt=10 +# score=0.3367 +# num_tests_per_prompt=16 +# score=0.2833 + +######################################################## +# grpo-deepscaler-16k-290-hf +######################################################## +# num_tests_per_prompt=1 +# score=0.2000 +# num_tests_per_prompt=5 +# score=0.3267 +# num_tests_per_prompt=10 +# score=0.3167 +# num_tests_per_prompt=16 +# score=0.3271 + +######################################################## +# grpo-deepscaler-24k-100-hf +######################################################## +# num_tests_per_prompt=1 +# score=0.3000 +# num_tests_per_prompt=5 +# score=0.3333 +# num_tests_per_prompt=10 +# score=0.3700 +# num_tests_per_prompt=16 +# score=0.3396 diff --git a/tests/test_suites/llm/grpo-gemma3-1b-it-1n8g-fsdp2tp1.sh b/tests/test_suites/llm/grpo-gemma3-1b-it-1n8g-fsdp2tp1.sh index aea8c91747..4624b7282d 100755 --- a/tests/test_suites/llm/grpo-gemma3-1b-it-1n8g-fsdp2tp1.sh +++ b/tests/test_suites/llm/grpo-gemma3-1b-it-1n8g-fsdp2tp1.sh @@ -4,8 +4,8 @@ source $SCRIPT_DIR/common.env # ===== BEGIN CONFIG ===== NUM_NODES=1 -STEPS_PER_RUN=500 -MAX_STEPS=500 +STEPS_PER_RUN=400 +MAX_STEPS=400 NUM_RUNS=$(( (MAX_STEPS + STEPS_PER_RUN - 1) / STEPS_PER_RUN )) # Round up NUM_MINUTES=120 # ===== END CONFIG ===== @@ -35,5 +35,6 @@ uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | map(tonumber) | max' $JSON_METRICS) -ge $MAX_STEPS ]]; then uv run tests/check_metrics.py $JSON_METRICS \ 'mean(data["train/token_mult_prob_error"]) < 1.1' \ - 'data["train/token_mult_prob_error"]["500"] < 1.1' + "data[\"train/token_mult_prob_error\"][\"${MAX_STEPS}\"] < 1.1" \ + 'mean(data["timing/train/total_step_time"], -6, -1) < 14' fi diff --git a/tests/test_suites/llm/grpo-gspo-deepscaler-1.5b-8K.sh b/tests/test_suites/llm/grpo-gspo-deepscaler-1.5b-8K.sh new file mode 100755 index 0000000000..ce2adb1c51 --- /dev/null +++ b/tests/test_suites/llm/grpo-gspo-deepscaler-1.5b-8K.sh @@ -0,0 +1,67 @@ +#!/bin/bash +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd) +source $SCRIPT_DIR/common.env + +# ===== BEGIN CONFIG ===== +NUM_NODES=1 +STEPS_PER_RUN=40 +MAX_STEPS=40 +NUM_RUNS=$(( (MAX_STEPS + STEPS_PER_RUN - 1) / STEPS_PER_RUN )) # Round up +NUM_MINUTES=240 +# ===== END CONFIG ===== + +exit_if_max_steps_reached + +# Run the experiment +cd $PROJECT_ROOT +uv run examples/run_grpo_math.py \ + --config $CONFIG_PATH \ + grpo.max_num_steps=$MAX_STEPS \ + logger.log_dir=$LOG_DIR \ + logger.wandb_enabled=True \ + logger.wandb.project=nemo-rl \ + logger.wandb.name=$EXP_NAME \ + logger.monitor_gpus=True \ + logger.tensorboard_enabled=True \ + checkpointing.enabled=True \ + checkpointing.checkpoint_dir=$CKPT_DIR \ + $@ \ + 2>&1 | tee $RUN_LOG + +# Convert tensorboard logs to json +uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS + +# Only run metrics if the target step is reached +if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | map(tonumber) | max' $JSON_METRICS) -ge $MAX_STEPS ]]; then + uv run tests/check_metrics.py $JSON_METRICS \ + 'mean(data["train/token_mult_prob_error"]) < 1.1' \ + "data['train/token_mult_prob_error']['$MAX_STEPS'] < 1.1" +fi + +# TODO: enable in subsequent PR to do a quick accuracy check +## Convert 8k checkpoint +#uv run examples/converters/convert_dcp_to_hf.py \ +# --config=$CKPT_DIR/step_${MAX_STEPS}/config.yaml \ +# --dcp-ckpt-path=$CKPT_DIR/step_${MAX_STEPS}/policy/weights \ +# --hf-ckpt-path=$CKPT_DIR/gspo-deepscaler-8k-${MAX_STEPS}-hf +# +## Run eval +#uv run examples/run_eval.py \ +# generation.model_name=$CKPT_DIR/gspo-deepscaler-8k-${MAX_STEPS}-hf \ +# data.prompt_file=examples/prompts/cot.txt \ +# generation.vllm_cfg.max_model_len=32768 2>&1 | tee ${RUN_LOG}.aime-8k +# +#cat ${RUN_LOG}.aime-8k | grep "score=" | sed 's/.*score=\([^ ]*\).*/{"score": \1}/' > ${RUN_LOG}-8k-metric.json +# +#uv run tests/check_metrics.py ${RUN_LOG}-8k-metric.json \ +# 'data["score"] >= 0.25' \ +# +##uv run examples/run_eval.py \ +## generation.model_name=deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B \ +## data.prompt_file=examples/prompts/cot.txt \ +## generation.vllm_cfg.max_model_len=32768 2>&1 | tee ${RUN_LOG}.aime-baseline +# +##cat ${RUN_LOG}.aime-baseline | grep "score=" | sed 's/.*score=\([^ ]*\).*/{"score": \1}/' > ${RUN_LOG}-baseline-metric.json +# +##uv run tests/check_metrics.py ${RUN_LOG}-baseline-metric.json \ +## 'data["score"] == 0.2' \ diff --git a/tests/test_suites/llm/grpo-llama3.1-8b-instruct-1n8g-megatron-fp8.sh b/tests/test_suites/llm/grpo-llama3.1-8b-instruct-1n8g-megatron-fp8.sh new file mode 100755 index 0000000000..e0b2f928da --- /dev/null +++ b/tests/test_suites/llm/grpo-llama3.1-8b-instruct-1n8g-megatron-fp8.sh @@ -0,0 +1,40 @@ +#!/bin/bash +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd) +source $SCRIPT_DIR/common.env + +# ===== BEGIN CONFIG ===== +NUM_NODES=1 +STEPS_PER_RUN=100 +MAX_STEPS=100 +NUM_RUNS=$(( (MAX_STEPS + STEPS_PER_RUN - 1) / STEPS_PER_RUN )) # Round up +NUM_MINUTES=120 +# ===== END CONFIG ===== + +exit_if_max_steps_reached + +# Run the experiment +cd $PROJECT_ROOT +uv run examples/run_grpo_math.py \ + --config $CONFIG_PATH \ + grpo.max_num_steps=$MAX_STEPS \ + logger.log_dir=$LOG_DIR \ + logger.wandb_enabled=True \ + logger.wandb.project=nemo-rl \ + logger.wandb.name=$EXP_NAME \ + logger.monitor_gpus=True \ + logger.tensorboard_enabled=True \ + checkpointing.enabled=True \ + checkpointing.checkpoint_dir=$CKPT_DIR \ + $@ \ + 2>&1 | tee $RUN_LOG + +# Convert tensorboard logs to json +uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS + +# Only run metrics if the target step is reached +if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | map(tonumber) | max' $JSON_METRICS) -ge $MAX_STEPS ]]; then + uv run tests/check_metrics.py $JSON_METRICS \ + 'mean(data["train/token_mult_prob_error"]) < 1.1' \ + 'data["train/token_mult_prob_error"]["100"] < 1.1' +fi + diff --git a/tests/test_suites/llm/grpo-llama3.2-1b-instruct-1n8g-fsdp2tp1.v3.sh b/tests/test_suites/llm/grpo-llama3.2-1b-instruct-1n8g-fsdp2tp1.v3.sh index 45cfad6e83..3661370fa6 100755 --- a/tests/test_suites/llm/grpo-llama3.2-1b-instruct-1n8g-fsdp2tp1.v3.sh +++ b/tests/test_suites/llm/grpo-llama3.2-1b-instruct-1n8g-fsdp2tp1.v3.sh @@ -35,6 +35,7 @@ uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | map(tonumber) | max' $JSON_METRICS) -ge $MAX_STEPS ]]; then uv run tests/check_metrics.py $JSON_METRICS \ 'mean(data["train/token_mult_prob_error"]) < 1.1' \ - 'data["train/token_mult_prob_error"]["500"] < 1.1' + 'data["train/token_mult_prob_error"]["500"] < 1.1' \ + 'mean(data["timing/train/total_step_time"], -6, -1) < 10' fi diff --git a/tests/test_suites/llm/grpo-llama3.2-1b-instruct-1n8g-megatron.sh b/tests/test_suites/llm/grpo-llama3.2-1b-instruct-1n8g-megatron.sh new file mode 100755 index 0000000000..83071c70e3 --- /dev/null +++ b/tests/test_suites/llm/grpo-llama3.2-1b-instruct-1n8g-megatron.sh @@ -0,0 +1,42 @@ +#!/bin/bash +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd) +source $SCRIPT_DIR/common.env + +# ===== BEGIN CONFIG ===== +NUM_NODES=1 +STEPS_PER_RUN=500 +MAX_STEPS=500 +NUM_RUNS=$(( (MAX_STEPS + STEPS_PER_RUN - 1) / STEPS_PER_RUN )) # Round up +NUM_MINUTES=180 +# ===== END CONFIG ===== + +exit_if_max_steps_reached + +# Run the experiment +cd $PROJECT_ROOT +uv run examples/run_grpo_math.py \ + --config $CONFIG_PATH \ + grpo.max_num_steps=$MAX_STEPS \ + logger.log_dir=$LOG_DIR \ + logger.wandb_enabled=True \ + logger.wandb.project=nemo-rl \ + logger.wandb.name=$EXP_NAME \ + logger.monitor_gpus=True \ + logger.tensorboard_enabled=True \ + checkpointing.enabled=True \ + checkpointing.checkpoint_dir=$CKPT_DIR \ + $@ \ + 2>&1 | tee $RUN_LOG + +# Convert tensorboard logs to json +uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS + +# Only run metrics if the target step is reached +if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | map(tonumber) | max' $JSON_METRICS) -ge $MAX_STEPS ]]; then + uv run tests/check_metrics.py $JSON_METRICS \ + 'mean(data["train/token_mult_prob_error"]) < 1.1' \ + 'data["train/token_mult_prob_error"]["500"] < 1.1' \ + 'data["train/reward"]["500"] > 0.1' \ + 'mean(data["timing/train/total_step_time"], -6, -1) < 10.5' + +fi diff --git a/tests/test_suites/llm/grpo-math-qwen3-30ba3b-megatron-tp4-32k.sh b/tests/test_suites/llm/grpo-math-qwen3-30ba3b-megatron-tp4-32k.sh new file mode 100755 index 0000000000..993d541871 --- /dev/null +++ b/tests/test_suites/llm/grpo-math-qwen3-30ba3b-megatron-tp4-32k.sh @@ -0,0 +1,39 @@ +#!/bin/bash +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd) +source $SCRIPT_DIR/common.env + +# ===== BEGIN CONFIG ===== +NUM_NODES=4 +STEPS_PER_RUN=3 +MAX_STEPS=3 +NUM_RUNS=$(( (MAX_STEPS + STEPS_PER_RUN - 1) / STEPS_PER_RUN )) # Round up +NUM_MINUTES=240 +# ===== END CONFIG ===== + +exit_if_max_steps_reached + +# Run the experiment +cd $PROJECT_ROOT +uv run examples/run_grpo_math.py \ + --config $CONFIG_PATH \ + grpo.max_num_steps=$MAX_STEPS \ + logger.log_dir=$LOG_DIR \ + logger.wandb_enabled=True \ + logger.wandb.project=nemo-rl \ + logger.wandb.name=$EXP_NAME \ + logger.monitor_gpus=True \ + logger.tensorboard_enabled=True \ + checkpointing.enabled=True \ + checkpointing.checkpoint_dir=$CKPT_DIR \ + $@ \ + 2>&1 | tee $RUN_LOG + +# Convert tensorboard logs to json +uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS + +# Only run metrics if the target step is reached +if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | map(tonumber) | max' $JSON_METRICS) -ge $MAX_STEPS ]]; then + uv run tests/check_metrics.py $JSON_METRICS \ + 'mean(data["train/token_mult_prob_error"]) < 1.1' \ + 'data["train/token_mult_prob_error"]["$MAX_STEPS"] < 1.1' +fi diff --git a/tests/test_suites/llm/grpo-moonlight-16ba3b-4n8g-megatron.sh b/tests/test_suites/llm/grpo-moonlight-16ba3b-4n8g-megatron.sh new file mode 100755 index 0000000000..7288252eec --- /dev/null +++ b/tests/test_suites/llm/grpo-moonlight-16ba3b-4n8g-megatron.sh @@ -0,0 +1,41 @@ +#!/bin/bash +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd) +source $SCRIPT_DIR/common.env + +# ===== BEGIN CONFIG ===== +NUM_NODES=4 +STEPS_PER_RUN=30 +MAX_STEPS=30 +NUM_RUNS=$(( (MAX_STEPS + STEPS_PER_RUN - 1) / STEPS_PER_RUN )) # Round up +NUM_MINUTES=150 +# ===== END CONFIG ===== + +exit_if_max_steps_reached + +# Run the experiment +cd $PROJECT_ROOT +PYTHONPATH=$HF_HOME/modules:$PYTHONPATH uv run examples/run_grpo_math.py \ + --config $CONFIG_PATH \ + grpo.max_num_steps=$MAX_STEPS \ + logger.log_dir=$LOG_DIR \ + logger.wandb_enabled=True \ + logger.wandb.project=nemo-rl \ + logger.wandb.name=$EXP_NAME \ + logger.monitor_gpus=True \ + logger.tensorboard_enabled=True \ + checkpointing.enabled=True \ + checkpointing.checkpoint_dir=$CKPT_DIR \ + $@ \ + 2>&1 | tee $RUN_LOG + +# Convert tensorboard logs to json +uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS + +# Only run metrics if the target step is reached +if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | map(tonumber) | max' $JSON_METRICS) -ge $MAX_STEPS ]]; then + uv run tests/check_metrics.py $JSON_METRICS \ + 'mean(data["train/token_mult_prob_error"]) < 1.1' \ + 'data["train/token_mult_prob_error"]["30"] < 1.1' \ + 'mean(data["train/reward"]) > 0.45' \ + 'mean(data["timing/train/total_step_time"], -11, -1) < 70' +fi diff --git a/tests/test_suites/llm/grpo-qwen2.5-7b-instruct-4n8g-megatron.sh b/tests/test_suites/llm/grpo-qwen2.5-7b-instruct-4n8g-megatron.sh new file mode 100755 index 0000000000..45f354043a --- /dev/null +++ b/tests/test_suites/llm/grpo-qwen2.5-7b-instruct-4n8g-megatron.sh @@ -0,0 +1,41 @@ +#!/bin/bash +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd) +source $SCRIPT_DIR/common.env + +# ===== BEGIN CONFIG ===== +NUM_NODES=4 +STEPS_PER_RUN=30 +MAX_STEPS=30 +NUM_RUNS=$(( (MAX_STEPS + STEPS_PER_RUN - 1) / STEPS_PER_RUN )) # Round up +NUM_MINUTES=180 +# ===== END CONFIG ===== + +exit_if_max_steps_reached + +# Run the experiment +cd $PROJECT_ROOT +uv run examples/run_grpo_math.py \ + --config $CONFIG_PATH \ + grpo.max_num_steps=$MAX_STEPS \ + logger.log_dir=$LOG_DIR \ + logger.wandb_enabled=True \ + logger.wandb.project=nemo-rl \ + logger.wandb.name=$EXP_NAME \ + logger.monitor_gpus=True \ + logger.tensorboard_enabled=True \ + checkpointing.enabled=True \ + checkpointing.checkpoint_dir=$CKPT_DIR \ + $@ \ + 2>&1 | tee $RUN_LOG + +# Convert tensorboard logs to json +uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS + +# Only run metrics if the target step is reached +if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | map(tonumber) | max' $JSON_METRICS) -ge $MAX_STEPS ]]; then + uv run tests/check_metrics.py $JSON_METRICS \ + 'mean(data["train/token_mult_prob_error"]) < 1.1' \ + 'data["train/token_mult_prob_error"]["30"] < 1.1' \ + 'mean(data["train/reward"]) > 0.56' \ + 'mean(data["timing/train/total_step_time"], 2) < 50' +fi diff --git a/tests/test_suites/llm/grpo-qwen2.5-math-1.5b-instruct-1n8g-fsdp2tp1.v3.sh b/tests/test_suites/llm/grpo-qwen2.5-math-1.5b-instruct-1n8g-fsdp2tp1.v3.sh index 98df00c25c..0a31e74590 100755 --- a/tests/test_suites/llm/grpo-qwen2.5-math-1.5b-instruct-1n8g-fsdp2tp1.v3.sh +++ b/tests/test_suites/llm/grpo-qwen2.5-math-1.5b-instruct-1n8g-fsdp2tp1.v3.sh @@ -35,6 +35,7 @@ uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | map(tonumber) | max' $JSON_METRICS) -ge $MAX_STEPS ]]; then uv run tests/check_metrics.py $JSON_METRICS \ 'mean(data["train/token_mult_prob_error"]) < 1.1' \ - 'data["train/token_mult_prob_error"]["450"] < 1.1' + 'data["train/token_mult_prob_error"]["450"] < 1.1' \ + 'mean(data["timing/train/total_step_time"], 2) < 25' fi diff --git a/tests/test_suites/llm/grpo-qwen3-30ba3b-8n8g-megatron.sh b/tests/test_suites/llm/grpo-qwen3-30ba3b-8n8g-megatron.sh new file mode 100755 index 0000000000..f89041cd40 --- /dev/null +++ b/tests/test_suites/llm/grpo-qwen3-30ba3b-8n8g-megatron.sh @@ -0,0 +1,40 @@ +#!/bin/bash +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd) +source $SCRIPT_DIR/common.env + +# ===== BEGIN CONFIG ===== +NUM_NODES=8 +STEPS_PER_RUN=30 +MAX_STEPS=30 +NUM_RUNS=$(( (MAX_STEPS + STEPS_PER_RUN - 1) / STEPS_PER_RUN )) # Round up +NUM_MINUTES=240 +# ===== END CONFIG ===== + +exit_if_max_steps_reached + +# Run the experiment +cd $PROJECT_ROOT +uv run examples/run_grpo_math.py \ + --config $CONFIG_PATH \ + grpo.max_num_steps=$MAX_STEPS \ + logger.log_dir=$LOG_DIR \ + logger.wandb_enabled=True \ + logger.wandb.project=nemo-rl \ + logger.wandb.name=$EXP_NAME \ + logger.monitor_gpus=True \ + logger.tensorboard_enabled=True \ + checkpointing.enabled=True \ + checkpointing.checkpoint_dir=$CKPT_DIR \ + $@ \ + 2>&1 | tee $RUN_LOG + +# Convert tensorboard logs to json +uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS + +# Only run metrics if the target step is reached +if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | map(tonumber) | max' $JSON_METRICS) -ge $MAX_STEPS ]]; then + uv run tests/check_metrics.py $JSON_METRICS \ + 'data["train/token_mult_prob_error"]["30"] < 1.1' \ + 'data["train/reward"]["30"] > 0.43' \ + 'mean(data["timing/train/total_step_time"], -6, -1) < 220' +fi diff --git a/tests/test_suites/llm/sft-llama3.1-70b-8n8g-tp4pp2-long-megatron.sh b/tests/test_suites/llm/sft-llama3.1-70b-8n8g-tp4pp2-long-megatron.sh new file mode 100755 index 0000000000..718322e33a --- /dev/null +++ b/tests/test_suites/llm/sft-llama3.1-70b-8n8g-tp4pp2-long-megatron.sh @@ -0,0 +1,42 @@ +#!/bin/bash +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd) +source $SCRIPT_DIR/common.env + +# ===== BEGIN CONFIG ===== +NUM_NODES=8 +STEPS_PER_RUN=300 +MAX_STEPS=300 +NUM_RUNS=$(( (MAX_STEPS + STEPS_PER_RUN - 1) / STEPS_PER_RUN )) # Round up +NUM_MINUTES=240 +# ===== END CONFIG ===== + +exit_if_max_steps_reached + +# Run the experiment +cd $PROJECT_ROOT +uv run examples/run_sft.py \ + --config $CONFIG_PATH \ + sft.max_num_steps=$MAX_STEPS \ + logger.log_dir=$LOG_DIR \ + logger.wandb_enabled=True \ + logger.wandb.project=nemo-rl \ + logger.wandb.name=$EXP_NAME \ + logger.monitor_gpus=True \ + logger.tensorboard_enabled=True \ + checkpointing.enabled=True \ + checkpointing.checkpoint_dir=$CKPT_DIR \ + $@ \ + 2>&1 | tee $RUN_LOG + +# Convert tensorboard logs to json +uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS + +# TODO: the memory check is known to OOM. see https://github.com/NVIDIA-NeMo/RL/issues/263 +# Only run metrics if the target step is reached +if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | map(tonumber) | max' $JSON_METRICS) -ge $MAX_STEPS ]]; then + uv run tests/check_metrics.py $JSON_METRICS \ + 'data["train/loss"]["1"] < 0.55' \ + 'data["train/loss"]["300"] < 0.285' \ + 'max(data["ray/node.0.gpu.0.mem_gb"]) < 70' \ + 'mean(data["timing/train/total_step_time"], 2) < 20' +fi diff --git a/tests/test_suites/llm/sft-llama3.1-8b-instruct-1n8g-fsdp2tp1-long.v2.sh b/tests/test_suites/llm/sft-llama3.1-8b-1n8g-fsdp2tp1-dynamicbatch.sh similarity index 81% rename from tests/test_suites/llm/sft-llama3.1-8b-instruct-1n8g-fsdp2tp1-long.v2.sh rename to tests/test_suites/llm/sft-llama3.1-8b-1n8g-fsdp2tp1-dynamicbatch.sh index b22c00dec0..76c600c648 100755 --- a/tests/test_suites/llm/sft-llama3.1-8b-instruct-1n8g-fsdp2tp1-long.v2.sh +++ b/tests/test_suites/llm/sft-llama3.1-8b-1n8g-fsdp2tp1-dynamicbatch.sh @@ -2,11 +2,10 @@ SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd) source $SCRIPT_DIR/common.env -# TODO: @ashors real convergence run (dataset only has 2737) # ===== BEGIN CONFIG ===== NUM_NODES=1 -STEPS_PER_RUN=2730 -MAX_STEPS=2730 +STEPS_PER_RUN=250 +MAX_STEPS=250 NUM_RUNS=$(( (MAX_STEPS + STEPS_PER_RUN - 1) / STEPS_PER_RUN )) # Round up NUM_MINUTES=120 # ===== END CONFIG ===== @@ -35,9 +34,9 @@ uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS # TODO: the memory check is known to OOM. see https://github.com/NVIDIA-NeMo/RL/issues/263 # Only run metrics if the target step is reached if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | map(tonumber) | max' $JSON_METRICS) -ge $MAX_STEPS ]]; then - # TODO: FIGURE OUT CORRECT METRICS uv run tests/check_metrics.py $JSON_METRICS \ - 'data["train/loss"]["1"] < 5' \ - 'data["train/loss"]["2730"] < 0.3' \ - 'max(data["ray/node.0.gpu.0.mem_gb"]) < 50' + 'data["train/loss"]["1"] < 0.6' \ + 'data["train/loss"]["250"] < 0.36' \ + 'max(data["ray/node.0.gpu.0.mem_gb"]) < 70' \ + 'mean(data["timing/train/total_step_time"], 2) < 10' fi diff --git a/tests/test_suites/llm/sft-llama3.1-8b-1n8g-fsdp2tp1-long.sh b/tests/test_suites/llm/sft-llama3.1-8b-1n8g-fsdp2tp1-long.sh new file mode 100755 index 0000000000..90fd03467c --- /dev/null +++ b/tests/test_suites/llm/sft-llama3.1-8b-1n8g-fsdp2tp1-long.sh @@ -0,0 +1,42 @@ +#!/bin/bash +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd) +source $SCRIPT_DIR/common.env + +# ===== BEGIN CONFIG ===== +NUM_NODES=1 +STEPS_PER_RUN=250 +MAX_STEPS=250 +NUM_RUNS=$(( (MAX_STEPS + STEPS_PER_RUN - 1) / STEPS_PER_RUN )) # Round up +NUM_MINUTES=120 +# ===== END CONFIG ===== + +exit_if_max_steps_reached + +# Run the experiment +cd $PROJECT_ROOT +uv run examples/run_sft.py \ + --config $CONFIG_PATH \ + sft.max_num_steps=$MAX_STEPS \ + logger.log_dir=$LOG_DIR \ + logger.wandb_enabled=True \ + logger.wandb.project=nemo-rl \ + logger.wandb.name=$EXP_NAME \ + logger.monitor_gpus=True \ + logger.tensorboard_enabled=True \ + checkpointing.enabled=True \ + checkpointing.checkpoint_dir=$CKPT_DIR \ + $@ \ + 2>&1 | tee $RUN_LOG + +# Convert tensorboard logs to json +uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS + +# TODO: the memory check is known to OOM. see https://github.com/NVIDIA-NeMo/RL/issues/263 +# Only run metrics if the target step is reached +if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | map(tonumber) | max' $JSON_METRICS) -ge $MAX_STEPS ]]; then + uv run tests/check_metrics.py $JSON_METRICS \ + 'data["train/loss"]["1"] < 0.6' \ + 'data["train/loss"]["250"] < 0.36' \ + 'max(data["ray/node.0.gpu.0.mem_gb"]) < 80' \ + 'mean(data["timing/train/total_step_time"], 2) < 22' +fi diff --git a/tests/test_suites/llm/sft-llama3.1-8b-instruct-1n8g-fsdp2tp2sp.v2.sh b/tests/test_suites/llm/sft-llama3.1-8b-1n8g-fsdp2tp2sp.sh similarity index 83% rename from tests/test_suites/llm/sft-llama3.1-8b-instruct-1n8g-fsdp2tp2sp.v2.sh rename to tests/test_suites/llm/sft-llama3.1-8b-1n8g-fsdp2tp2sp.sh index abed80e5ed..8f69d0f0b8 100755 --- a/tests/test_suites/llm/sft-llama3.1-8b-instruct-1n8g-fsdp2tp2sp.v2.sh +++ b/tests/test_suites/llm/sft-llama3.1-8b-1n8g-fsdp2tp2sp.sh @@ -4,8 +4,8 @@ source $SCRIPT_DIR/common.env # ===== BEGIN CONFIG ===== NUM_NODES=1 -STEPS_PER_RUN=350 -MAX_STEPS=350 +STEPS_PER_RUN=50 +MAX_STEPS=50 NUM_RUNS=$(( (MAX_STEPS + STEPS_PER_RUN - 1) / STEPS_PER_RUN )) # Round up NUM_MINUTES=45 # ===== END CONFIG ===== @@ -35,9 +35,9 @@ uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS # Only run metrics if the target step is reached if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | map(tonumber) | max' $JSON_METRICS) -ge $MAX_STEPS ]]; then - # TODO: FIGURE OUT CORRECT METRICS uv run tests/check_metrics.py $JSON_METRICS \ - 'data["train/loss"]["1"] < 5' \ - 'data["train/loss"]["350"] < 0.5' \ - 'max(data["ray/node.0.gpu.0.mem_gb"]) < 45' + 'data["train/loss"]["1"] < 0.6' \ + 'data["train/loss"]["50"] < 0.38' \ + 'max(data["ray/node.0.gpu.0.mem_gb"]) < 70' \ + 'mean(data["timing/train/total_step_time"], 2) < 32' fi diff --git a/tests/test_suites/llm/sft-llama3.1-8b-instruct-1n8g-megatron.sh b/tests/test_suites/llm/sft-llama3.1-8b-1n8g-megatron-seqpack.sh similarity index 87% rename from tests/test_suites/llm/sft-llama3.1-8b-instruct-1n8g-megatron.sh rename to tests/test_suites/llm/sft-llama3.1-8b-1n8g-megatron-seqpack.sh index cf72bd9377..fe54af1fbd 100755 --- a/tests/test_suites/llm/sft-llama3.1-8b-instruct-1n8g-megatron.sh +++ b/tests/test_suites/llm/sft-llama3.1-8b-1n8g-megatron-seqpack.sh @@ -31,9 +31,9 @@ uv run examples/run_sft.py \ # Convert tensorboard logs to json uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS -# TODO: @ashors tighter bounds if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | map(tonumber) | max' $JSON_METRICS) -ge $MAX_STEPS ]]; then uv run tests/check_metrics.py $JSON_METRICS \ - 'data["train/loss"]["1"] < 2' \ - 'data["train/loss"]["250"] < 0.3' -fi \ No newline at end of file + 'data["train/loss"]["1"] < 0.6' \ + 'data["train/loss"]["250"] < 0.36' \ + 'mean(data["timing/train/total_step_time"], 2) < 6' +fi diff --git a/tests/test_suites/llm/sft-llama3.1-8b-1n8g-megatron.sh b/tests/test_suites/llm/sft-llama3.1-8b-1n8g-megatron.sh new file mode 100755 index 0000000000..bc5eae73a2 --- /dev/null +++ b/tests/test_suites/llm/sft-llama3.1-8b-1n8g-megatron.sh @@ -0,0 +1,39 @@ +#!/bin/bash +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd) +source $SCRIPT_DIR/common.env + +# ===== BEGIN CONFIG ===== +NUM_NODES=1 +STEPS_PER_RUN=250 +MAX_STEPS=250 +NUM_RUNS=$(( (MAX_STEPS + STEPS_PER_RUN - 1) / STEPS_PER_RUN )) # Round up +NUM_MINUTES=120 +# ===== END CONFIG ===== + +exit_if_max_steps_reached + +# Run the experiment +cd $PROJECT_ROOT +uv run examples/run_sft.py \ + --config $CONFIG_PATH \ + sft.max_num_steps=$MAX_STEPS \ + logger.log_dir=$LOG_DIR \ + logger.wandb_enabled=True \ + logger.wandb.project=nemo-rl \ + logger.wandb.name=$EXP_NAME \ + logger.monitor_gpus=True \ + logger.tensorboard_enabled=True \ + checkpointing.enabled=True \ + checkpointing.checkpoint_dir=$CKPT_DIR \ + $@ \ + 2>&1 | tee $RUN_LOG + +# Convert tensorboard logs to json +uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS + +if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | map(tonumber) | max' $JSON_METRICS) -ge $MAX_STEPS ]]; then + uv run tests/check_metrics.py $JSON_METRICS \ + 'data["train/loss"]["1"] < 0.6' \ + 'data["train/loss"]["250"] < 0.36' \ + 'mean(data["timing/train/total_step_time"], 2) < 20' +fi diff --git a/tests/test_suites/llm/sft-llama3.2-1b-1n8g-fsdp2tp1.v2.sh b/tests/test_suites/llm/sft-llama3.2-1b-1n8g-fsdp2tp1.v3.sh similarity index 82% rename from tests/test_suites/llm/sft-llama3.2-1b-1n8g-fsdp2tp1.v2.sh rename to tests/test_suites/llm/sft-llama3.2-1b-1n8g-fsdp2tp1.v3.sh index 32c66dae04..a4b44bd1f1 100755 --- a/tests/test_suites/llm/sft-llama3.2-1b-1n8g-fsdp2tp1.v2.sh +++ b/tests/test_suites/llm/sft-llama3.2-1b-1n8g-fsdp2tp1.v3.sh @@ -4,8 +4,8 @@ source $SCRIPT_DIR/common.env # ===== BEGIN CONFIG ===== NUM_NODES=1 -STEPS_PER_RUN=500 -MAX_STEPS=500 +STEPS_PER_RUN=250 +MAX_STEPS=250 NUM_RUNS=$(( (MAX_STEPS + STEPS_PER_RUN - 1) / STEPS_PER_RUN )) # Round up NUM_MINUTES=15 # ===== END CONFIG ===== @@ -34,8 +34,9 @@ uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS # Only run metrics if the target step is reached if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | map(tonumber) | max' $JSON_METRICS) -ge $MAX_STEPS ]]; then uv run tests/check_metrics.py $JSON_METRICS \ - 'data["train/loss"]["1"] < 2.4' \ - 'data["train/loss"]["500"] < 0.5' \ - 'max(data["ray/node.0.gpu.0.mem_gb"]) < 25' + 'data["train/loss"]["1"] < 0.82' \ + 'data["train/loss"]["250"] < 0.5' \ + 'max(data["ray/node.0.gpu.0.mem_gb"]) < 25' \ + 'mean(data["timing/train/total_step_time"], -6, -1) < 0.6' fi diff --git a/tests/test_suites/llm/sft-qwen2.5-32b-4n8g-fsdp2tp8sp-actckpt.v2.sh b/tests/test_suites/llm/sft-qwen2.5-32b-4n8g-fsdp2tp8sp-actckpt.v3.sh similarity index 96% rename from tests/test_suites/llm/sft-qwen2.5-32b-4n8g-fsdp2tp8sp-actckpt.v2.sh rename to tests/test_suites/llm/sft-qwen2.5-32b-4n8g-fsdp2tp8sp-actckpt.v3.sh index 257add6fc5..d16a3d8d98 100755 --- a/tests/test_suites/llm/sft-qwen2.5-32b-4n8g-fsdp2tp8sp-actckpt.v2.sh +++ b/tests/test_suites/llm/sft-qwen2.5-32b-4n8g-fsdp2tp8sp-actckpt.v3.sh @@ -37,7 +37,7 @@ uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS # Only run metrics if the target step is reached if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | map(tonumber) | max' $JSON_METRICS) -ge $MAX_STEPS ]]; then uv run tests/check_metrics.py $JSON_METRICS \ - 'data["train/loss"]["1"] < 1.5' \ + 'data["train/loss"]["1"] < 0.37' \ 'data["train/loss"]["20"] < 0.3' \ 'max(data["ray/node.0.gpu.0.mem_gb"]) < 35' fi diff --git a/tests/test_suites/nightly.txt b/tests/test_suites/nightly.txt index d28e61a8e6..e6611fa8b5 100644 --- a/tests/test_suites/nightly.txt +++ b/tests/test_suites/nightly.txt @@ -10,24 +10,52 @@ tests/test_suites/llm/grpo-gemma3-1b-it-1n8g-fsdp2tp1.sh # Dtensor (Qwen/Qwen2.5-7B-Instruct) tests/test_suites/llm/grpo-qwen2.5-7b-instruct-4n8g-fsdp2tp4sp.v3.sh +# Megatron +tests/test_suites/llm/grpo-llama3.2-1b-instruct-1n8g-megatron.sh + # Functional 32b run tests/test_suites/llm/grpo-qwen2.5-32b-32n8g-fsdp2tp8sp-actckpt.v3.sh +# Functional moonlight run +tests/test_suites/llm/grpo-moonlight-16ba3b-4n8g-megatron.sh + +# Functional VLM run +tests/test_suites/vlm/vlm_grpo-qwen2.5-vl-3b-instruct-clevr-1n2g-dtensor2tp1.v1.sh +tests/test_suites/vlm/vlm_grpo-smolvlm2-2.2b-instruct-clevr-1n2g-dtensor2tp1.v1.sh + +# Deepscaler (short tests) +tests/test_suites/llm/grpo-deepscaler-1.5b-16K.sh +tests/test_suites/llm/grpo-deepscaler-1.5b-24K.sh +tests/test_suites/llm/grpo-deepscaler-1.5b-8K.sh + +# Deepscaler (GSPO) +tests/test_suites/llm/grpo-gspo-deepscaler-1.5b-8K.sh + +# GRPO math test run (32K context mcore) +tests/test_suites/llm/grpo-math-qwen3-30ba3b-megatron-tp4-32k.sh + +# FP8 +tests/test_suites/llm/grpo-llama3.1-8b-instruct-1n8g-megatron-fp8.sh + ####### # SFT # ####### # 1N 1B/8B runs -tests/test_suites/llm/sft-llama3.2-1b-1n8g-fsdp2tp1.v2.sh +tests/test_suites/llm/sft-llama3.2-1b-1n8g-fsdp2tp1.v3.sh # Dtensor (8B) -tests/test_suites/llm/sft-llama3.1-8b-instruct-1n8g-fsdp2tp2sp.v2.sh +tests/test_suites/llm/sft-llama3.1-8b-1n8g-fsdp2tp2sp.sh +# dynamic batching +tests/test_suites/llm/sft-llama3.1-8b-1n8g-fsdp2tp1-dynamicbatch.sh # Functional 32b test -tests/test_suites/llm/sft-qwen2.5-32b-4n8g-fsdp2tp8sp-actckpt.v2.sh +tests/test_suites/llm/sft-qwen2.5-32b-4n8g-fsdp2tp8sp-actckpt.v3.sh # Megatron -tests/test_suites/llm/sft-llama3.1-8b-instruct-1n8g-megatron.sh +tests/test_suites/llm/sft-llama3.1-8b-1n8g-megatron.sh +# sequence packing +tests/test_suites/llm/sft-llama3.1-8b-1n8g-megatron-seqpack.sh ####### # DPO # @@ -41,3 +69,6 @@ tests/test_suites/llm/dpo-llama3.1-8b-instruct-4n8g-fsdp2tp2-quick.v2.sh # Short megatron tests/test_suites/llm/dpo-llama3.1-8b-instruct-4n8g-megatrontp2pp2-quick.sh + +# Long dtensor +tests/test_suites/llm/dpo-mistral-nemo-instruct-2407-1n8g-fsdp2tp8-actckpt-long.sh diff --git a/tests/test_suites/release.txt b/tests/test_suites/release.txt index e339ef0bc1..b9db763409 100644 --- a/tests/test_suites/release.txt +++ b/tests/test_suites/release.txt @@ -2,6 +2,9 @@ # GRPO # ######## +# Megatron (Qwen/Qwen2.5-7B-Instruct) +tests/test_suites/llm/grpo-qwen2.5-7b-instruct-4n8g-megatron.sh + # Long 8b run tests/test_suites/llm/grpo-llama3.1-8b-instruct-4n8g-fsdp2tp1-long.v3.sh @@ -11,17 +14,24 @@ tests/test_suites/llm/grpo-qwen2.5-32b-32n8g-fsdp2tp8sp-actckpt-long.v3.sh # Long Gemma3 27b run tests/test_suites/llm/grpo-gemma3-27b-it-16n8g-fsdp2tp8sp-actckpt-long.sh +# Long Megatron Qwen3 30B-A3B run +tests/test_suites/llm/grpo-qwen3-30ba3b-8n8g-megatron.sh + ####### # SFT # ####### # Long 8b convergence -tests/test_suites/llm/sft-llama3.1-8b-instruct-1n8g-fsdp2tp1-long.v2.sh +tests/test_suites/llm/sft-llama3.1-8b-1n8g-fsdp2tp1-long.sh + +# 300 step 70b convergence +tests/test_suites/llm/sft-llama3.1-70b-8n8g-tp4pp2-long-megatron.sh ####### # DPO # ####### # Long 8b convergence -tests/test_suites/llm/dpo-llama3.1-8b-instruct-4n8g-fsdp2tp1.v2.sh -tests/test_suites/llm/dpo-llama3.1-8b-instruct-4n8g-megatron.sh +tests/test_suites/llm/dpo-llama3.1-8b-instruct-4n8g-fsdp2tp4.sh +tests/test_suites/llm/dpo-llama3.1-8b-instruct-4n8g-megatron.v2.sh +tests/test_suites/llm/dpo-llama3.1-8b-tulu3-1n8g-fsdp2tp1.sh diff --git a/tests/test_suites/vlm/common.env b/tests/test_suites/vlm/common.env new file mode 120000 index 0000000000..ec5d3dc65c --- /dev/null +++ b/tests/test_suites/vlm/common.env @@ -0,0 +1 @@ +../llm/common.env \ No newline at end of file diff --git a/tests/test_suites/vlm/vlm_grpo-qwen2.5-vl-3b-instruct-clevr-1n2g-dtensor2tp1.v1.sh b/tests/test_suites/vlm/vlm_grpo-qwen2.5-vl-3b-instruct-clevr-1n2g-dtensor2tp1.v1.sh new file mode 100755 index 0000000000..b3c6764f65 --- /dev/null +++ b/tests/test_suites/vlm/vlm_grpo-qwen2.5-vl-3b-instruct-clevr-1n2g-dtensor2tp1.v1.sh @@ -0,0 +1,40 @@ +#!/bin/bash +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd) +source $SCRIPT_DIR/common.env + +# ===== BEGIN CONFIG ===== +NUM_NODES=1 +STEPS_PER_RUN=200 +MAX_STEPS=200 +NUM_RUNS=$(( (MAX_STEPS + STEPS_PER_RUN - 1) / STEPS_PER_RUN )) # Round up +NUM_MINUTES=180 +# ===== END CONFIG ===== + +exit_if_max_steps_reached + +# Run the experiment +cd $PROJECT_ROOT +uv run examples/run_vlm_grpo.py \ + --config $CONFIG_PATH \ + grpo.max_num_steps=$MAX_STEPS \ + logger.log_dir=$LOG_DIR \ + logger.wandb_enabled=True \ + logger.wandb.project=nemo-rl \ + logger.wandb.name=$EXP_NAME \ + logger.monitor_gpus=True \ + logger.tensorboard_enabled=True \ + checkpointing.enabled=True \ + checkpointing.checkpoint_dir=$CKPT_DIR \ + $@ \ + 2>&1 | tee $RUN_LOG + +# Convert tensorboard logs to json +uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS + +# Only run metrics if the target step is reached +if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | map(tonumber) | max' $JSON_METRICS) -ge $MAX_STEPS ]]; then + uv run tests/check_metrics.py $JSON_METRICS \ + 'data["train/loss"]["200"] < 0.1' \ + 'data["train/reward"]["200"] > 0.9' +fi + diff --git a/tests/test_suites/vlm/vlm_grpo-smolvlm2-2.2b-instruct-clevr-1n2g-dtensor2tp1.v1.sh b/tests/test_suites/vlm/vlm_grpo-smolvlm2-2.2b-instruct-clevr-1n2g-dtensor2tp1.v1.sh new file mode 100755 index 0000000000..680018b5a4 --- /dev/null +++ b/tests/test_suites/vlm/vlm_grpo-smolvlm2-2.2b-instruct-clevr-1n2g-dtensor2tp1.v1.sh @@ -0,0 +1,40 @@ +#!/bin/bash +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd) +source $SCRIPT_DIR/common.env + +# ===== BEGIN CONFIG ===== +NUM_NODES=1 +STEPS_PER_RUN=200 +MAX_STEPS=200 +NUM_RUNS=$(( (MAX_STEPS + STEPS_PER_RUN - 1) / STEPS_PER_RUN )) # Round up +NUM_MINUTES=180 +# ===== END CONFIG ===== + +exit_if_max_steps_reached + +# Run the experiment +cd $PROJECT_ROOT +uv run examples/run_vlm_grpo.py \ + --config $CONFIG_PATH \ + grpo.max_num_steps=$MAX_STEPS \ + logger.log_dir=$LOG_DIR \ + logger.wandb_enabled=True \ + logger.wandb.project=nemo-rl \ + logger.wandb.name=$EXP_NAME \ + logger.monitor_gpus=True \ + logger.tensorboard_enabled=True \ + checkpointing.enabled=True \ + checkpointing.checkpoint_dir=$CKPT_DIR \ + $@ \ + 2>&1 | tee $RUN_LOG + +# Convert tensorboard logs to json +uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS + +# Only run metrics if the target step is reached +if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | map(tonumber) | max' $JSON_METRICS) -ge $MAX_STEPS ]]; then + uv run tests/check_metrics.py $JSON_METRICS \ + 'data["train/loss"]["200"] < 0.1' \ + 'data["train/reward"]["200"] > 0.7' # less performant than qwen +fi + diff --git a/tests/unit/L0_Unit_Tests_Generation.sh b/tests/unit/L0_Unit_Tests_Generation.sh index 1e33bea35e..3f607cc080 100644 --- a/tests/unit/L0_Unit_Tests_Generation.sh +++ b/tests/unit/L0_Unit_Tests_Generation.sh @@ -15,6 +15,8 @@ #!/bin/bash set -xeuo pipefail # Exit immediately if a command exits with a non-zero status +uv run tests/unit/prepare_unit_test_assets.py + cd /opt/nemo-rl uv run --no-sync bash -x ./tests/run_unit.sh unit/models/generation/ --cov=nemo_rl --cov-report=term-missing --cov-report=json --hf-gated diff --git a/tests/unit/L0_Unit_Tests_Other.sh b/tests/unit/L0_Unit_Tests_Other.sh index e86d6f887a..a639730044 100644 --- a/tests/unit/L0_Unit_Tests_Other.sh +++ b/tests/unit/L0_Unit_Tests_Other.sh @@ -15,6 +15,8 @@ #!/bin/bash set -xeuo pipefail # Exit immediately if a command exits with a non-zero status +uv run tests/unit/prepare_unit_test_assets.py + cd /opt/nemo-rl uv run --no-sync bash -x ./tests/run_unit.sh unit/ --ignore=unit/models/generation/ --ignore=unit/models/policy/ --cov=nemo_rl --cov-report=term-missing --cov-report=json --hf-gated diff --git a/tests/unit/L0_Unit_Tests_Policy.sh b/tests/unit/L0_Unit_Tests_Policy.sh index 4df69728ff..6fe9309fe6 100644 --- a/tests/unit/L0_Unit_Tests_Policy.sh +++ b/tests/unit/L0_Unit_Tests_Policy.sh @@ -15,6 +15,8 @@ #!/bin/bash set -xeuo pipefail # Exit immediately if a command exits with a non-zero status +uv run tests/unit/prepare_unit_test_assets.py + cd /opt/nemo-rl uv run --no-sync bash -x ./tests/run_unit.sh unit/models/policy/ --cov=nemo_rl --cov-report=term-missing --cov-report=json --hf-gated diff --git a/tests/unit/algorithms/test_loss_functions.py b/tests/unit/algorithms/test_loss_functions.py index 65ed834625..764a849ed3 100644 --- a/tests/unit/algorithms/test_loss_functions.py +++ b/tests/unit/algorithms/test_loss_functions.py @@ -11,6 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import itertools + import pytest import torch @@ -371,41 +373,45 @@ def _setup_clipped_pg_test_data(batch_size=1, seq_len=4, vocab_size=8, device="c } ) # Return seq_len and vocab_size needed by tests - return data, seq_len, vocab_size + return data, batch_size, seq_len, vocab_size # Helper to create logits that yield specific target log probs after log_softmax -def _create_exact_logits(target_curr_lp_masked, input_ids, seq_len, vocab_size, device): +def _create_exact_logits( + target_curr_lp_masked, input_ids, batch_size, seq_len, vocab_size, device +): """Constructs logits such that log_softmax results in target_curr_lp_masked.""" dummy_logits = torch.full( - (1, seq_len, vocab_size), -100.0, device=device + (batch_size, seq_len, vocab_size), -100.0, device=device ) # Start very low # Loss fn uses logits[:, :-1] and gathers based on next_tokens = input_ids[:, 1:] # We need to set logits for indices i=0..S-2 of the sliced logits tensor. # These correspond to target logprobs at indices 0..S-2 of target_curr_lp_masked. num_effective_pos = target_curr_lp_masked.shape[1] - for i in range(num_effective_pos): + for batch_idx, i in itertools.product(range(batch_size), range(num_effective_pos)): logit_idx = i # Index in the sliced logits tensor (dummy_logits[:, 0:S-1, :]) data_idx = i + 1 # Index in the original input_ids to find the target token - target_token_id = input_ids[0, data_idx].item() + target_token_id = input_ids[batch_idx, data_idx].item() # Keep target_lp as a 0-dim tensor for torch ops - target_lp = target_curr_lp_masked[0, i] + target_lp = target_curr_lp_masked[batch_idx, i] # Handle target_lp = 0 case separately if torch.isclose(target_lp, torch.tensor(0.0, device=device)): - dummy_logits[0, logit_idx, target_token_id] = 100.0 # Large positive logit + dummy_logits[batch_idx, logit_idx, target_token_id] = ( + 100.0 # Large positive logit + ) elif target_lp < 0: # Set target token logit to 0 - dummy_logits[0, logit_idx, target_token_id] = 0.0 + dummy_logits[batch_idx, logit_idx, target_token_id] = 0.0 # Set one distractor token logit using the formula distractor_token_id = (target_token_id + 1) % vocab_size # Ensure distractor isn't same as target if vocab_size=1 (edge case) if distractor_token_id == target_token_id: distractor_token_id = (target_token_id + 2) % vocab_size distractor_logit = torch.log(torch.exp(-target_lp) - 1.0) - dummy_logits[0, logit_idx, distractor_token_id] = distractor_logit + dummy_logits[batch_idx, logit_idx, distractor_token_id] = distractor_logit else: # target_lp > 0 is not supported by this method raise ValueError( "Target log probability must be negative or zero for this construction" @@ -420,7 +426,7 @@ def test_clipped_pg_loss_ppo_clipping(): pytest.skip("No GPU available") device = "cuda" - data, seq_len, vocab_size = _setup_clipped_pg_test_data(device=device) + data, batch_size, seq_len, vocab_size = _setup_clipped_pg_test_data(device=device) ratio_clip = 0.2 cfg = { @@ -486,7 +492,7 @@ def test_clipped_pg_loss_ppo_clipping(): input_ids = data["input_ids"] dummy_logits = _create_exact_logits( - curr_lp_masked, input_ids, seq_len, vocab_size, device + curr_lp_masked, input_ids, batch_size, seq_len, vocab_size, device ) actual_loss, _ = loss_fn( @@ -505,7 +511,7 @@ def test_clipped_pg_loss_reinforce_mode(): pytest.skip("No GPU available") device = "cuda" - data, seq_len, vocab_size = _setup_clipped_pg_test_data(device=device) + data, batch_size, seq_len, vocab_size = _setup_clipped_pg_test_data(device=device) cfg = { "disable_ppo_ratio": True, @@ -539,7 +545,7 @@ def test_clipped_pg_loss_reinforce_mode(): input_ids = data["input_ids"] dummy_logits = _create_exact_logits( - curr_lp_masked, input_ids, seq_len, vocab_size, device + curr_lp_masked, input_ids, batch_size, seq_len, vocab_size, device ) actual_loss, _ = loss_fn( @@ -560,7 +566,7 @@ def test_clipped_pg_loss_kl_penalty(): pytest.skip("No GPU available") device = "cuda" - data, seq_len, vocab_size = _setup_clipped_pg_test_data(device=device) + data, batch_size, seq_len, vocab_size = _setup_clipped_pg_test_data(device=device) # --- Test Setup --- kl_beta = 0.1 @@ -607,7 +613,7 @@ def test_clipped_pg_loss_kl_penalty(): input_ids = data["input_ids"] dummy_logits = _create_exact_logits( - curr_lp_masked, input_ids, seq_len, vocab_size, device + curr_lp_masked, input_ids, batch_size, seq_len, vocab_size, device ) actual_loss, _ = loss_fn( @@ -632,7 +638,7 @@ def test_clipped_pg_loss_masking(): device = "cuda" # Use original loss function for masking tests, as it involves interactions # that the Testable class might obscure slightly. - data, seq_len, vocab_size = _setup_clipped_pg_test_data( + data, batch_size, seq_len, vocab_size = _setup_clipped_pg_test_data( batch_size=batch_size, seq_len=seq_len, device=device ) # Need some realistic-ish logits and logprobs for masking test @@ -734,7 +740,7 @@ def test_clipped_pg_loss_zero_mask(): pytest.skip("No GPU available") device = "cuda" - data, seq_len, vocab_size = _setup_clipped_pg_test_data(device=device) + data, batch_size, seq_len, vocab_size = _setup_clipped_pg_test_data(device=device) # Need dummy logits dummy_logits = torch.randn(1, seq_len, vocab_size, device=device) @@ -772,7 +778,7 @@ def test_clipped_pg_loss_on_policy_kl_importance_sampling(): pytest.skip("No GPU available") device = "cuda" - data, seq_len, vocab_size = _setup_clipped_pg_test_data(device=device) + data, batch_size, seq_len, vocab_size = _setup_clipped_pg_test_data(device=device) ratio_clip = 0.2 kl_beta = 0.1 @@ -905,7 +911,7 @@ def test_clipped_pg_loss_on_policy_kl_importance_sampling(): input_ids = data["input_ids"] dummy_logits = _create_exact_logits( - curr_lp_masked, input_ids, seq_len, vocab_size, device + curr_lp_masked, input_ids, batch_size, seq_len, vocab_size, device ) actual_loss, _ = loss_fn( @@ -954,7 +960,7 @@ def test_clipped_pg_loss_dual_clip(): pytest.skip("No GPU available") device = "cuda" - data, seq_len, vocab_size = _setup_clipped_pg_test_data(device=device) + data, batch_size, seq_len, vocab_size = _setup_clipped_pg_test_data(device=device) ratio_clip = 0.2 ratio_clip_c = 3.0 @@ -1034,7 +1040,7 @@ def test_clipped_pg_loss_dual_clip(): input_ids = data["input_ids"] dummy_logits = _create_exact_logits( - curr_lp_masked, input_ids, seq_len, vocab_size, device + curr_lp_masked, input_ids, batch_size, seq_len, vocab_size, device ) actual_loss, _ = loss_fn( @@ -1054,7 +1060,7 @@ def test_clipped_pg_loss_entropy(): pytest.skip("No GPU available") device = "cuda" - data, seq_len, vocab_size = _setup_clipped_pg_test_data(device=device) + data, batch_size, seq_len, vocab_size = _setup_clipped_pg_test_data(device=device) cfg = { "ratio_clip_min": 0.2, @@ -1092,7 +1098,7 @@ def test_clipped_pg_loss_entropy(): ) # torch.mean because default mask applies to these 3 terms dummy_logits = _create_exact_logits( - curr_lp_masked, data["input_ids"], seq_len, vocab_size, device + curr_lp_masked, data["input_ids"], batch_size, seq_len, vocab_size, device ) _, metrics = loss_fn( dummy_logits, @@ -1107,3 +1113,300 @@ def test_clipped_pg_loss_entropy(): rtol=1e-3, atol=1e-5, ) + + +def test_clipped_pg_loss_gspo(): + """Tests GSPO path in ClippedPGLossFn.""" + if not torch.cuda.is_available(): + pytest.skip("No GPU available") + + device = "cuda" + data, batch_size, seq_len, vocab_size = _setup_clipped_pg_test_data(device=device) + + ratio_clip = 0.2 + cfg = { + "ratio_clip_min": ratio_clip, + "ratio_clip_max": ratio_clip, + "ratio_clip_c": None, + "reference_policy_kl_penalty": 0.0, # Disable KL + "disable_ppo_ratio": False, + "use_on_policy_kl_approximation": False, + "use_importance_sampling_correction": False, + "sequence_level_importance_ratios": True, + "token_level_loss": False, + } + loss_fn = ClippedPGLossFn(cfg) + + adv_masked = torch.tensor([[1.0, -1.0, 2.0]], device=device) + # Use non-zero prev_lp to allow ratios > 1 with valid curr_lp <= 0 + prev_lp_masked = torch.tensor([[-1.0, -1.0, -1.0]], device=device) + # Target Curr logprobs (masked pos 1, 2, 3) - design for clipping + # Target ratios: 0.5 (<0.8), 1.0 (in [0.8, 1.2]), 1.5 (>1.2) + # Curr = log(Ratio) + Prev + curr_lp_masked = torch.tensor( + [[-1.69315, -1.0, -0.59453]], device=device + ) # approx log(0.5)-1, log(1)-1, log(1.5)-1 + + # Fill full tensors (only need first dim for B=1) + data["advantages"][0, 1:] = adv_masked + data["prev_logprobs"][0, 1:] = prev_lp_masked + + # --- Hand Calculation --- + log_ratios = curr_lp_masked - prev_lp_masked + seq_log_ratios_mean = torch.mean(log_ratios, dim=-1).unsqueeze(-1) + ratios = seq_log_ratios_mean.exp().repeat(1, 3) + assert torch.allclose( + ratios, torch.tensor([[0.9086, 0.9086, 0.9086]], device=device), rtol=1e-3 + ) + + ratios_clamped = torch.clamp(ratios, 1.0 - ratio_clip, 1.0 + ratio_clip) + assert torch.allclose( + ratios_clamped, + torch.tensor([[0.9086, 0.9086, 0.9086]], device=device), + rtol=1e-3, + ) + + loss1 = -adv_masked * ratios + assert torch.allclose( + loss1, torch.tensor([[-0.9086, 0.9086, -1.8171]], device=device), rtol=1e-3 + ) + + loss2 = -adv_masked * ratios_clamped + assert torch.allclose( + loss2, torch.tensor([[-0.9086, 0.9086, -1.8171]], device=device), rtol=1e-3 + ) + + max_loss = torch.maximum(loss1, loss2) + assert torch.allclose( + max_loss, torch.tensor([[-0.9086, 0.9086, -1.8171]], device=device), rtol=1e-3 + ) + + expected_loss = torch.mean(max_loss) + assert torch.allclose( + expected_loss, torch.tensor(-0.6057, device=device), rtol=1e-3 + ) + + input_ids = data["input_ids"] + dummy_logits = _create_exact_logits( + curr_lp_masked, input_ids, batch_size, seq_len, vocab_size, device + ) + + actual_loss, _ = loss_fn( + dummy_logits, + data, + global_valid_seqs=torch.sum(data["sample_mask"]), + global_valid_toks=torch.sum(data["sample_mask"] * data["token_mask"]), + ) + torch.testing.assert_close(actual_loss, expected_loss) + + +def test_clipped_pg_loss_gspo_batch_size_2(): + """Tests non-unit batch size GSPO path in ClippedPGLossFn.""" + if not torch.cuda.is_available(): + pytest.skip("No GPU available") + + device = "cuda" + data, batch_size, seq_len, vocab_size = _setup_clipped_pg_test_data( + batch_size=2, device=device + ) + + ratio_clip = 0.2 + cfg = { + "ratio_clip_min": ratio_clip, + "ratio_clip_max": ratio_clip, + "ratio_clip_c": None, + "reference_policy_kl_penalty": 0.0, # Disable KL + "disable_ppo_ratio": False, + "use_on_policy_kl_approximation": False, + "use_importance_sampling_correction": False, + "sequence_level_importance_ratios": True, + "token_level_loss": False, + } + loss_fn = ClippedPGLossFn(cfg) + + adv_masked = torch.tensor([[1.0, -1.0, 2.0], [1.0, -1.0, 2.0]], device=device) + # Use non-zero prev_lp to allow ratios > 1 with valid curr_lp <= 0 + prev_lp_masked = torch.tensor( + [[-1.0, -1.0, -1.0], [-2.0, -2.0, -2.0]], device=device + ) + # Target Curr logprobs (masked pos 1, 2, 3) - design for clipping + # Target ratios: 0.5 (<0.8), 1.0 (in [0.8, 1.2]), 1.5 (>1.2) + # Curr = log(Ratio) + Prev + curr_lp_masked = torch.tensor( + [[-1.69315, -1.0, -0.59453], [-1.69315, -1.0, -0.59453]], device=device + ) # approx log(0.5)-1, log(1)-1, log(1.5)-1 + + # Fill full tensors (only need first dim for B=1) + data["advantages"][:, 1:] = adv_masked + data["prev_logprobs"][:, 1:] = prev_lp_masked + + # --- Hand Calculation --- + log_ratios = curr_lp_masked - prev_lp_masked + seq_log_ratios_mean = torch.mean(log_ratios, dim=-1).unsqueeze(-1) + ratios = seq_log_ratios_mean.exp().repeat(1, 3) + assert torch.allclose( + ratios, + torch.tensor( + [[0.9086, 0.9086, 0.9086], [2.4697, 2.4697, 2.4697]], device=device + ), + rtol=1e-3, + ) + + ratios_clamped = torch.clamp(ratios, 1.0 - ratio_clip, 1.0 + ratio_clip) + assert torch.allclose( + ratios_clamped, + torch.tensor([[0.9086, 0.9086, 0.9086], [1.2, 1.2, 1.2]], device=device), + rtol=1e-3, + ) + + loss1 = -adv_masked * ratios + assert torch.allclose( + loss1, + torch.tensor( + [[-0.9086, 0.9086, -1.8171], [-2.4697, 2.4697, -4.9394]], device=device + ), + rtol=1e-3, + ) + + loss2 = -adv_masked * ratios_clamped + assert torch.allclose( + loss2, + torch.tensor( + [[-0.9086, 0.9086, -1.8171], [-1.2000, 1.2000, -2.4000]], device=device + ), + rtol=1e-3, + ) + + max_loss = torch.maximum(loss1, loss2) + assert torch.allclose( + max_loss, + torch.tensor( + [[-0.9086, 0.9086, -1.8171], [-1.2000, 2.4697, -2.4000]], device=device + ), + rtol=1e-3, + ) + + expected_loss = torch.mean(max_loss) + assert torch.allclose( + expected_loss, torch.tensor(-0.4912, device=device), rtol=1e-3 + ) + + input_ids = data["input_ids"] + dummy_logits = _create_exact_logits( + curr_lp_masked, input_ids, batch_size, seq_len, vocab_size, device + ) + + actual_loss, _ = loss_fn( + dummy_logits, + data, + global_valid_seqs=torch.sum(data["sample_mask"]), + global_valid_toks=torch.sum( + data["sample_mask"].unsqueeze(1) * data["token_mask"] + ), + ) + torch.testing.assert_close(actual_loss, expected_loss) + + +def test_clipped_pg_loss_gspo_importance_sampling_correction(): + """Tests GSPO w/ importance sampling correction in ClippedPGLossFn.""" + if not torch.cuda.is_available(): + pytest.skip("No GPU available") + + device = "cuda" + data, batch_size, seq_len, vocab_size = _setup_clipped_pg_test_data(device=device) + + ratio_clip = 0.2 + cfg = { + "ratio_clip_min": ratio_clip, + "ratio_clip_max": ratio_clip, + "ratio_clip_c": None, + "reference_policy_kl_penalty": 0.0, # Disable KL + "disable_ppo_ratio": False, + "use_on_policy_kl_approximation": False, + "use_importance_sampling_correction": True, + "sequence_level_importance_ratios": True, + "token_level_loss": False, + } + loss_fn = ClippedPGLossFn(cfg) + + adv_masked = torch.tensor([[1.0, -1.0, 2.0]], device=device) + prev_lp_masked = torch.tensor([[-1.0, -1.0, -1.0]], device=device) + curr_lp_masked = torch.tensor( + [[-1.69315, -1.0, -0.59453]], device=device + ) # approx log(0.5)-1, log(1)-1, log(1.5)-1 + + ref_lp_masked = torch.tensor([[-1.0, -1.0, -1.0]], device=device) + + # For Importance Sampling + gen_lp_masked = torch.tensor([[-0.5, -1.5, -0.8]], device=device) + + # Fill full tensors + data["advantages"][0, 1:] = adv_masked + data["prev_logprobs"][0, 1:] = prev_lp_masked + data["generation_logprobs"][0, 1:] = gen_lp_masked + data["reference_policy_logprobs"][0, 1:] = ref_lp_masked + + # --- Hand Calculation --- + # Actor Loss Calculation + actor_importance_weights = torch.exp( + (prev_lp_masked - gen_lp_masked).sum(dim=-1).unsqueeze(-1) + ) # exp([-1 - (-0.5), -1 - (-1.5), -1 - (-0.8)]) = [0.6065, 1.6487, 0.8187] + assert torch.allclose( + actor_importance_weights, + torch.tensor([[0.8187]], device=device), + rtol=1e-3, + ) + + log_ratios = curr_lp_masked - prev_lp_masked + seq_log_ratios_mean = torch.mean(log_ratios, dim=-1).unsqueeze(-1) + ratios = seq_log_ratios_mean.exp().repeat(1, 3) + assert torch.allclose( + ratios, torch.tensor([[0.9086, 0.9086, 0.9086]], device=device), rtol=1e-3 + ) + + ratios_clamped = torch.clamp(ratios, 1.0 - ratio_clip, 1.0 + ratio_clip) + assert torch.allclose( + ratios_clamped, + torch.tensor([[0.9086, 0.9086, 0.9086]], device=device), + rtol=1e-3, + ) + + loss1 = -adv_masked * ratios + assert torch.allclose( + loss1, torch.tensor([[-0.9086, 0.9086, -1.8171]], device=device), rtol=1e-3 + ) + + loss2 = -adv_masked * ratios_clamped + assert torch.allclose( + loss2, torch.tensor([[-0.9086, 0.9086, -1.8171]], device=device), rtol=1e-3 + ) + + max_loss = torch.maximum(loss1, loss2) + assert torch.allclose( + max_loss, torch.tensor([[-0.9086, 0.9086, -1.8171]], device=device), rtol=1e-3 + ) + + importance_weighted_max_loss = actor_importance_weights * max_loss + assert torch.allclose( + importance_weighted_max_loss, + torch.tensor([[-0.7439, 0.7439, -1.4877]], device=device), + rtol=1e-3, + ) + + expected_actor_loss = torch.mean(importance_weighted_max_loss) + assert torch.allclose( + expected_actor_loss, torch.tensor(-0.4959, device=device), rtol=1e-3 + ) + + input_ids = data["input_ids"] + dummy_logits = _create_exact_logits( + curr_lp_masked, input_ids, batch_size, seq_len, vocab_size, device + ) + + actual_loss, _ = loss_fn( + dummy_logits, + data, + global_valid_seqs=torch.sum(data["sample_mask"]), + global_valid_toks=torch.sum(data["sample_mask"] * data["token_mask"]), + ) + torch.testing.assert_close(actual_loss, expected_actor_loss, atol=1e-4, rtol=1e-3) diff --git a/tests/unit/algorithms/test_sft.py b/tests/unit/algorithms/test_sft.py index 4b6d9ee2ce..538c29ff14 100644 --- a/tests/unit/algorithms/test_sft.py +++ b/tests/unit/algorithms/test_sft.py @@ -78,7 +78,11 @@ def val_iter(self): "train_global_batch_size": 1, "make_sequence_length_divisible_by": 8, }, - "checkpointing": {"enabled": False}, + "checkpointing": { + "enabled": False, + "checkpoint_must_save_by": None, + "save_period": 10, + }, } return { diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 1346a1173d..1f7dad4a2b 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -53,8 +53,9 @@ def pytest_collection_modifyitems(config, items): run_mcore_only = config.getoption("--mcore-only") marker_expr = config.getoption("-m", default="") - # If user specified -m marker expressions, let pytest handle everything normally + # If user specified -m marker expressions, still prioritize run_first tests if marker_expr: + items.sort(key=lambda item: 0 if item.get_closest_marker("run_first") else 1) return # Filter tests based on the desired configurations @@ -83,6 +84,9 @@ def pytest_collection_modifyitems(config, items): and not item.get_closest_marker("mcore") ] + # Ensure run_first tests are prioritized + new_items.sort(key=lambda item: 0 if item.get_closest_marker("run_first") else 1) + # Update the items list in-place items[:] = new_items @@ -576,3 +580,49 @@ def tiny_gemma3_model_path(): tokenizer.save_pretrained(model_path) del model, tokenizer yield model_path + + +def _build_tiny_nemotron5_h_checkpoint(model_path: str) -> None: + import shutil + + from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer + + config = AutoConfig.from_pretrained( + "nvidia/Nemotron-H-8B-Base-8K", trust_remote_code=True + ) + config.hybrid_override_pattern = "M*-" + config.num_hidden_layers = 3 + config.intermediate_size = 32 + config.hidden_size = 256 + config.num_attention_heads = 8 + config.mamba_num_heads = 8 + config.num_key_value_heads = 8 + config.n_groups = 1 + + model = AutoModelForCausalLM.from_config(config, trust_remote_code=True) + tokenizer = AutoTokenizer.from_pretrained( + "nvidia/Nemotron-H-8B-Base-8K", trust_remote_code=True + ) + + shutil.rmtree(model_path, ignore_errors=True) + model.save_pretrained(model_path) + tokenizer.save_pretrained(model_path) + + +@pytest.fixture(scope="session") +def tiny_nemotron5_h_model_path(): + """Fixture that returns a path to a tiny nemotron model with a dummy tokenizer. + + If the asset hasn't been prepared by the prepare script, skip the tests that require it. + """ + model_path = os.path.join( + TEST_ASSETS_DIR, "tiny_nemotron5_h_with_nemotron_tokenizer" + ) + + config_file = os.path.join(model_path, "config.json") + if not os.path.exists(config_file): + pytest.skip( + "Tiny Nemotron-H test asset not prepared. Run `uv run tests/unit/prepare_unit_test_assets.py` first." + ) + + yield model_path diff --git a/tests/unit/data/test_data_shuffle_reproducity.py b/tests/unit/data/test_data_shuffle_reproducity.py new file mode 100644 index 0000000000..3821423d16 --- /dev/null +++ b/tests/unit/data/test_data_shuffle_reproducity.py @@ -0,0 +1,151 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import tempfile +from collections import defaultdict + +import pytest +import torch +from torchdata.stateful_dataloader import StatefulDataLoader + +from examples.run_grpo_math import hf_data_processor +from nemo_rl.algorithms.utils import get_tokenizer, set_seed +from nemo_rl.data.datasets import AllTaskProcessedDataset, rl_collate_fn +from nemo_rl.data.hf_datasets.openmathinstruct2 import OpenMathInstruct2Dataset +from nemo_rl.data.interfaces import TaskDataProcessFnCallable, TaskDataSpec +from nemo_rl.models.policy import TokenizerConfig + +# Test configuration +TOKENIZER_CONFIG: TokenizerConfig = { + "name": "Qwen/Qwen2.5-Math-1.5B-Instruct", + "chat_template": "default", +} + +MAX_BATCHES_TO_TEST = 10 + + +def create_dataloader( + seed: int = 42, max_seq_length: int = 128, batch_size: int = 4 +) -> StatefulDataLoader: + """Create a dataloader with consistent configuration for testing.""" + # Initialize dataset + data = OpenMathInstruct2Dataset(seed=seed) + + # Setup tokenizer + tokenizer = get_tokenizer(TOKENIZER_CONFIG) + + # Configure task specification + math_task_spec = TaskDataSpec( + task_name="math", + prompt_file=f"{os.path.dirname(os.path.abspath(__file__))}/../../../examples/prompts/cot.txt", + system_prompt_file=None, + ) + + task_data_processors: dict[str, tuple[TaskDataSpec, TaskDataProcessFnCallable]] = ( + defaultdict(lambda: (math_task_spec, hf_data_processor)) + ) + task_data_processors["math"] = (math_task_spec, hf_data_processor) + + dataset = AllTaskProcessedDataset( + dataset=data.formatted_ds["train"].select(range(1000)), + tokenizer=tokenizer, + default_task_data_spec=math_task_spec, + task_data_processors=task_data_processors, + max_seq_length=max_seq_length, + ) + + return StatefulDataLoader( + dataset, + batch_size=batch_size, + shuffle=True, + collate_fn=rl_collate_fn, + drop_last=True, + ) + + +@pytest.mark.parametrize("seed", [42, 24]) +def test_data_shuffle_reproducity_from_start(seed): + """Test that dataloader shuffling is reproducible with the same seed.""" + # Step 1: Set seed and create initial dataloader + set_seed(seed) + original_dataloader = create_dataloader(seed=seed) + + expected_batches = [] + for batch in original_dataloader: + expected_batches.append(batch) + if len(expected_batches) >= MAX_BATCHES_TO_TEST: + break + + # Step 2: to mimic a new experiment: + # set original seed and create new dataloader under the same seed environment + set_seed(seed) + new_dataloader = create_dataloader(seed=seed) + + for i, (expected_batch, actual_batch) in enumerate( + zip(expected_batches, new_dataloader) + ): + assert str(expected_batch) == str(actual_batch), f"Batch {i} is different" + + +@pytest.mark.parametrize("save_state_at_batch", [6, 10]) +def test_data_shuffle_reproducity_from_continue(save_state_at_batch, seed=42): + """Test that dataloader state can be saved and restored for continuation.""" + # Step 1: Set seed and create initial dataloader + set_seed(seed) + original_dataloader = create_dataloader(seed=seed) + + with tempfile.TemporaryDirectory() as temp_dir: + expected_batches = [] + for i, batch in enumerate(original_dataloader): + if ( + i >= save_state_at_batch - 1 + ): # Stop after consuming exactly save_state_at_batch batches + if i == save_state_at_batch - 1: + # Step 2: Save the state at this point + state_file = os.path.join(temp_dir, "dataloader_state.pt") + torch.save(original_dataloader.state_dict(), state_file) + else: + # Step 3: Get the expected continuation from original dataloader + expected_batches.append(batch) + if len(expected_batches) >= MAX_BATCHES_TO_TEST: + break + + # step 4: to mimic a continued experiment: + # set original seed and create new dataloader under the same seed environment + # load the saved state and continue from the saved point + set_seed(seed) + continued_dataloader = create_dataloader(seed=seed) + + state_dict = torch.load(state_file) + continued_dataloader.load_state_dict(state_dict) + + # Step 5: Get batches from the continued dataloader + actual_batches = [] + for batch in continued_dataloader: + if len(actual_batches) >= MAX_BATCHES_TO_TEST: + break + actual_batches.append(batch) + + assert len(actual_batches) == len(expected_batches) + + # Step 6: Compare the batches - they should be identical + for i, (actual_batch, expected_batch) in enumerate( + zip(actual_batches, expected_batches) + ): + assert str(actual_batch) == str(expected_batch), ( + f"Batch {i} from continued dataloader doesn't match expected batch\n" + f"actual_batch['idx']:\t{actual_batch['idx']}\n" + f"expected_batch['idx']:\t{expected_batch['idx']}" + ) diff --git a/tests/unit/data/test_llm_message_utils.py b/tests/unit/data/test_llm_message_utils.py index 91ae2e41b7..afb1f71398 100644 --- a/tests/unit/data/test_llm_message_utils.py +++ b/tests/unit/data/test_llm_message_utils.py @@ -13,9 +13,12 @@ # limitations under the License. +from typing import Any, Callable + import pytest import torch -from transformers import AutoTokenizer +from PIL import Image +from transformers import AutoProcessor, AutoTokenizer from nemo_rl.data.hf_datasets import COMMON_CHAT_TEMPLATES from nemo_rl.data.interfaces import LLMMessageLogType, TaskDataSpec @@ -328,177 +331,124 @@ def test_batch_pad_message_log_custom_pad_value( ) -@pytest.mark.hf_gated -def test_get_formatted_message_log_llama( - raw_chat_message_log: LLMMessageLogType, -) -> None: - tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct") - - ## get expected result - formatted_system_message = tokenizer.apply_chat_template( - [raw_chat_message_log[0]], - tokenize=False, - add_generation_prompt=False, - add_special_tokens=False, - ) - formatted_user_message = tokenizer.apply_chat_template( - [raw_chat_message_log[1]], - tokenize=False, - add_generation_prompt=False, - add_special_tokens=False, - ) - formatted_assistant_message = tokenizer.apply_chat_template( - [raw_chat_message_log[2]], - tokenize=False, - add_generation_prompt=False, - add_special_tokens=False, - ) - - ## text should be equivalent to if we apply chat template - ## to each turn separately and manually remove the bot string - ## from the intermediate turns - bot_str = "<|begin_of_text|>" - expected_text = [ - formatted_system_message, - formatted_user_message[len(bot_str) :], - formatted_assistant_message[len(bot_str) :], - ] - - task_data_spec = TaskDataSpec( - task_name="test", - ) - result = get_formatted_message_log(raw_chat_message_log, tokenizer, task_data_spec) - actual_text = [m["content"] for m in result] - - assert actual_text == expected_text - - -@pytest.mark.hf_gated -def test_get_formatted_message_log_add_generation_prompt_llama( +@pytest.mark.parametrize( + "model_id, chat_log_transform", + [ + pytest.param( + "meta-llama/Meta-Llama-3-8B-Instruct", + lambda raw: raw, + marks=pytest.mark.hf_gated, + id="llama", + ), + pytest.param( + "google/gemma-3-27b-it", + # Some Gemma chat templates (or versions) raise on system turns. + # For portability across environments, test on user+assistant only. + # If your tokenizer supports system turns, you can change this to `lambda raw: raw`. + lambda raw: [raw[1], raw[2]], + marks=pytest.mark.hf_gated, + id="gemma", + ), + pytest.param( + "Qwen/Qwen2.5-Coder-32B-Instruct", + lambda raw: raw, + id="qwen", + ), + ], +) +@pytest.mark.parametrize("add_generation_prompt", [False, True]) +def test_get_formatted_message_log_models( raw_chat_message_log: LLMMessageLogType, + model_id: str, + chat_log_transform: Callable[[Any], Any], + add_generation_prompt: bool, ) -> None: - tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct") - - ## get expected result - formatted_system_message = tokenizer.apply_chat_template( - [raw_chat_message_log[0]], - tokenize=False, - add_generation_prompt=False, - add_special_tokens=False, - ) - formatted_user_message = tokenizer.apply_chat_template( - [raw_chat_message_log[1]], - tokenize=False, - add_generation_prompt=True, - add_special_tokens=False, - ) - formatted_assistant_message = ( - raw_chat_message_log[2]["content"] + tokenizer.eos_token - ) - - ## text should be equivalent to if we apply chat template - ## to each turn separately and manually remove the bot string - ## from the intermediate turns - bot_str = "<|begin_of_text|>" - expected_text = [ - formatted_system_message, - formatted_user_message[len(bot_str) :], - formatted_assistant_message, - ] - - task_data_spec = TaskDataSpec( - task_name="test", - ) + """Validate that get_formatted_message_log produces text consistent with the + tokenizer's chat template across models. + + This test is parametrized over model/tokenizer and whether to include a + generation prompt. For models like Gemma that error on system turns, the + input chat log is transformed to exclude the system message. + + Expectations: + - Require an EOS token for well-defined end-of-turn comparison. + - When add_generation_prompt is False, the concatenated contents must match + the tokenizer's apply_chat_template output; if the tokenizer omits a final + EOS, accept the actual with EOS by appending EOS to the expected before + comparison. + - When add_generation_prompt is True and the last turn is an assistant + message, accept either: + (1) prefix built with add_generation_prompt=True followed by the raw + assistant content plus EOS; or + (2) the tokenizer's full non-generation template output plus EOS. + This avoids hard-coding model-specific headers or delimiters while still + verifying semantic equivalence. + - Only normalization performed is trimming a trailing newline after EOS. + """ + tokenizer = AutoTokenizer.from_pretrained(model_id) + chat_log = chat_log_transform(raw_chat_message_log) + # Ensure tokenizer defines an EOS token; otherwise the test logic is ill-defined + assert tokenizer.eos_token, "Tokenizer must define eos_token for this test" + eos = tokenizer.eos_token + task_data_spec = TaskDataSpec(task_name="test") result = get_formatted_message_log( - raw_chat_message_log, + chat_log, tokenizer, task_data_spec, - add_generation_prompt=True, + add_generation_prompt=add_generation_prompt, ) - actual_text = [m["content"] for m in result] - - assert actual_text == expected_text - - -def test_get_formatted_message_log_qwen( - raw_chat_message_log: LLMMessageLogType, -) -> None: - ## test using a tokenizer that does not have a bos token - tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-32B-Instruct") - assert tokenizer.bos_token is None - - ## get expected result - ## result is equivalent to if we apply chat template to the full message log, - ## remove the trailing newline, and then partition by the delimiter - expected_text_string = tokenizer.apply_chat_template( - [raw_chat_message_log], - tokenize=False, - add_generation_prompt=False, - add_special_tokens=False, - )[0].rstrip("\n") ## remove trailing newline - - delimiter = "<|im_end|>\n" - split_text = expected_text_string.split(delimiter) - expected_text = [] - for i in range(len(split_text)): - if i == len(raw_chat_message_log) - 1: - expected_text.append(split_text[i]) - else: - expected_text.append(split_text[i] + delimiter) - - task_data_spec = TaskDataSpec( - task_name="test", - ) - result = get_formatted_message_log(raw_chat_message_log, tokenizer, task_data_spec) - actual_text = [m["content"] for m in result] - - assert actual_text == expected_text - - -def test_get_formatted_message_log_add_generation_prompt_qwen( - raw_chat_message_log: LLMMessageLogType, -) -> None: - ## test using a tokenizer that does not have a bos token - tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-32B-Instruct") - assert tokenizer.bos_token is None - - ## get expected result - ## result is equivalent to if we apply chat template to the full message log, - ## remove the trailing newline, and then partition by the delimiter - ## Separately handle the last message because of the generation prompt - expected_text_string = tokenizer.apply_chat_template( - [raw_chat_message_log[:2]], - tokenize=False, - add_generation_prompt=True, - add_special_tokens=False, - )[0] - - delimiter = "<|im_end|>\n" - split_text = expected_text_string.split(delimiter, 1) - expected_text = [] - for i in range(len(split_text)): - if i == len(split_text) - 1: - expected_text.append(split_text[i]) + actual_concat = "".join(m["content"] for m in result) + + def normalize(s: str) -> str: + # Normalize EOS+newline quirk to EOS only + if s.endswith(eos + "\n"): + return s[:-1] + return s + + if not add_generation_prompt: + expected_concat = tokenizer.apply_chat_template( + [chat_log], + tokenize=False, + add_generation_prompt=False, + add_special_tokens=False, + )[0] + # Accept EOS presence even if the tokenizer's template omits it + if actual_concat.endswith(eos) and not expected_concat.endswith(eos): + expected_concat = expected_concat + eos + assert normalize(actual_concat) == normalize(expected_concat) + else: + if len(chat_log) > 0 and chat_log[-1].get("role") == "assistant": + prefix_log = chat_log[:-1] + # Some tokenizers include a role header when add_generation_prompt=True. + # Accept either behavior without hard-coding model-specific strings. + prefix_gen = tokenizer.apply_chat_template( + [prefix_log], + tokenize=False, + add_generation_prompt=True, + add_special_tokens=False, + )[0] + assistant_suffix = chat_log[-1]["content"] + eos + expected_concat_a = prefix_gen + assistant_suffix + # Alternative: take the full non-generation template output and just append EOS + full_no_gen = tokenizer.apply_chat_template( + [chat_log], + tokenize=False, + add_generation_prompt=False, + add_special_tokens=False, + )[0] + expected_concat_b = full_no_gen + eos + actual_norm = normalize(actual_concat) + assert actual_norm == normalize( + expected_concat_a + ) or actual_norm == normalize(expected_concat_b) else: - expected_text.append(split_text[i] + delimiter) - - formatted_assistant_message = ( - raw_chat_message_log[2]["content"] + tokenizer.eos_token - ) - expected_text.append(formatted_assistant_message) - - task_data_spec = TaskDataSpec( - task_name="test", - ) - result = get_formatted_message_log( - raw_chat_message_log, - tokenizer, - task_data_spec, - add_generation_prompt=True, - ) - actual_text = [m["content"] for m in result] - - assert actual_text == expected_text + expected_concat = tokenizer.apply_chat_template( + [chat_log], + tokenize=False, + add_generation_prompt=True, + add_special_tokens=False, + )[0] + assert normalize(actual_concat) == normalize(expected_concat) @pytest.mark.hf_gated @@ -591,3 +541,175 @@ def test_get_first_index_that_differs(): assert get_first_index_that_differs("hello world", "hello") == 5 assert get_first_index_that_differs("hi1", "hello2") == 1 assert get_first_index_that_differs("hello2", "hi1") == 1 + + +def test_message_log_to_flat_messages_with_packed_images() -> None: + from nemo_rl.data.multimodal_utils import PackedTensor + + # two turns, each with an image tensor wrapped in PackedTensor + img1 = torch.randn(2, 3, 8, 8) + img2 = torch.randn(3, 3, 8, 8) + message_log: LLMMessageLogType = [ + { + "role": "user", + "content": "see image", + "token_ids": torch.tensor([1, 2]), + "images": PackedTensor(img1, dim_to_pack=0), + }, + { + "role": "assistant", + "content": "ok", + "token_ids": torch.tensor([3]), + "images": PackedTensor(img2, dim_to_pack=0), + }, + ] + flat = message_log_to_flat_messages(message_log) + assert isinstance(flat["images"], PackedTensor) + assert tuple(flat["images"].as_tensor().shape) == (5, 3, 8, 8) + assert torch.equal(flat["token_ids"], torch.tensor([1, 2, 3])) + + +def test_batched_message_log_to_flat_message_with_packed_images() -> None: + from nemo_rl.data.multimodal_utils import PackedTensor + + img_a = torch.randn(1, 3, 4, 4) + img_b = torch.randn(2, 3, 4, 4) + img_c = torch.randn(1, 3, 4, 4) + + batch_logs = [ + [ + { + "role": "user", + "content": "prompt a", + "token_ids": torch.tensor([1, 2, 3]), + "images": PackedTensor(img_a, dim_to_pack=0), + }, + {"role": "assistant", "content": "resp", "token_ids": torch.tensor([4])}, + ], + [ + { + "role": "user", + "content": "prompt b", + "token_ids": torch.tensor([5, 6]), + "images": PackedTensor(img_b, dim_to_pack=0), + }, + { + "role": "assistant", + "content": "resp2", + "token_ids": torch.tensor([7, 8]), + }, + { + "role": "user", + "content": "again", + "token_ids": torch.tensor([9]), + "images": PackedTensor(img_c, dim_to_pack=0), + }, + ], + ] + + batched, input_lengths = batched_message_log_to_flat_message( + batch_logs, pad_value_dict={"token_ids": 0} + ) + assert isinstance(batched["images"], PackedTensor) + # flattened_concat keeps two packed tensors (one per convo) + assert len(batched["images"]) == 2 + # total packed along dim 0 = 1 + (2 + 1) = 4 + assert tuple(batched["images"].as_tensor().shape) == (4, 3, 4, 4) + assert torch.equal(input_lengths, torch.tensor([4, 5], dtype=torch.int32)) + + +@pytest.mark.hf_gated +def test_get_formatted_message_log_multimodal_prompt_formatting() -> None: + processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-3B-Instruct") + task_data_spec = TaskDataSpec(task_name="t") + task_data_spec.prompt = "Question: {} Answer:" + + # one user turn with text+image, then assistant + image = Image.new("RGB", (16, 16), color=(0, 0, 0)) + message_log: LLMMessageLogType = [ + { + "role": "system", + "content": "", # to prevent Qwen's default system prompt taking over + }, + { + "role": "user", + "content": [ + {"type": "text", "text": "a cat?"}, + {"type": "image", "image": image}, + ], + }, + {"role": "assistant", "content": "okay"}, + ] + + out = get_formatted_message_log( + message_log, processor, task_data_spec, add_bos_token=False, add_eos_token=False + ) + # First message text should be formatted by prompt + assert isinstance(out[1]["content"], list) + assert any( + item["type"] == "text" + and item["text"].startswith("<|im_start|>user\nQuestion: ") + for item in out[1]["content"] + ) # type: ignore[index] + # pixel_values should be added as PackedTensor for the first message + from nemo_rl.data.multimodal_utils import PackedTensor + + assert isinstance(out[1]["pixel_values"], PackedTensor) + assert isinstance(out[1]["image_grid_thw"], PackedTensor) + pv = out[1]["pixel_values"].as_tensor() + grid_thw = out[1]["image_grid_thw"].as_tensor() + assert pv.ndim == 2 and pv.shape[1] == 1176 + assert grid_thw.ndim == 2 and grid_thw.shape == torch.Size([1, 3]) + # token_ids should be non-empty tensors + assert ( + isinstance(out[1]["token_ids"], torch.Tensor) + and out[1]["token_ids"].numel() > 0 + ) + assert ( + isinstance(out[2]["token_ids"], torch.Tensor) + and out[2]["token_ids"].numel() > 0 + ) + + #### Case 2 : without system prompt + image = Image.new("RGB", (16, 16), color=(0, 0, 0)) + message_log: LLMMessageLogType = [ + { + "role": "user", + "content": [ + {"type": "text", "text": "a cat?"}, + {"type": "image", "image": image}, + ], + }, + {"role": "assistant", "content": "okay"}, + ] + + out = get_formatted_message_log( + message_log, processor, task_data_spec, add_bos_token=False, add_eos_token=False + ) + # First message text should be formatted by prompt + assert isinstance(out[0]["content"], list) + assert any( + item["type"] == "text" + and item["text"].startswith( + "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\nQuestion: " + ) + for item in out[0]["content"] + ) # type: ignore[index] + # pixel_values should be added as PackedTensor for the first message + from nemo_rl.data.multimodal_utils import PackedTensor + + assert isinstance(out[0]["pixel_values"], PackedTensor) + assert isinstance(out[0]["image_grid_thw"], PackedTensor) + pv = out[0]["pixel_values"].as_tensor() + grid_thw = out[0]["image_grid_thw"].as_tensor() + assert pv.ndim == 2 and pv.shape[1] == 1176 + assert grid_thw.ndim == 2 and grid_thw.shape == torch.Size([1, 3]) + # token_ids should be non-empty tensors + assert ( + isinstance(out[0]["token_ids"], torch.Tensor) + and out[0]["token_ids"].numel() > 0 + ) + assert ( + isinstance(out[1]["token_ids"], torch.Tensor) + and out[1]["token_ids"].numel() > 0 + ) diff --git a/tests/unit/data/test_multimodal_dict.py b/tests/unit/data/test_multimodal_dict.py new file mode 100644 index 0000000000..ff95534e83 --- /dev/null +++ b/tests/unit/data/test_multimodal_dict.py @@ -0,0 +1,318 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pytest +import torch + +from nemo_rl.data.llm_message_utils import batched_message_log_to_flat_message +from nemo_rl.data.multimodal_utils import ( + PackedTensor, +) +from nemo_rl.distributed.batched_data_dict import ( + BatchedDataDict, + DynamicBatchingArgs, + SequencePackingArgs, +) + + +def test_packed_data_basic(): + """Test basic functionality of PackedTensor.""" + # Create sample packed items + tensor1 = torch.randn(16, 3) + tensor2 = torch.randn(45, 3) + + item1 = PackedTensor(tensor1, dim_to_pack=0) + item2 = PackedTensor(tensor2, dim_to_pack=0) + + # Test item functionality + assert torch.equal(item1.as_tensor(), tensor1) + assert item1.dim_to_pack == 0 + + # Test batch creation and concatenation + batch = PackedTensor([item1.as_tensor(), item2.as_tensor()], dim_to_pack=0) + assert len(batch) == 2 + + # Test as_tensor + expected_tensor = torch.cat([tensor1, tensor2], dim=0) + assert torch.equal(batch.as_tensor(), expected_tensor) + + +def test_shard_by_batch_size_with_packed_data(): + """Test shard_by_batch_size with packed multimodal data.""" + # Create sample data + text_tensor = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) + image_tensors = [torch.randn(3 * i + 2, 3, 128, 128) for i in range(4)] + + # Create packed image data + packed_batch = PackedTensor(image_tensors, dim_to_pack=0) + + # Create BatchedDataDict + batch = BatchedDataDict( + { + "text_ids": text_tensor, + "image_features": packed_batch, + "labels": [1, 2, 3, 4], + } + ) + + # Test sharding + shards = batch.shard_by_batch_size(shards=2) + assert len(shards) == 2 + + # Verify first shard + assert torch.equal(shards[0]["text_ids"], torch.tensor([[1, 2, 3], [4, 5, 6]])) + assert isinstance(shards[0]["image_features"], PackedTensor) + assert len(shards[0]["image_features"]) == 2 + assert shards[0]["image_features"].as_tensor().shape == (2 + 5, 3, 128, 128) + assert shards[0]["labels"] == [1, 2] + + # Verify second shard + assert torch.equal(shards[1]["text_ids"], torch.tensor([[7, 8, 9], [10, 11, 12]])) + assert isinstance(shards[1]["image_features"], PackedTensor) + assert len(shards[1]["image_features"]) == 2 + assert shards[1]["image_features"].as_tensor().shape == (8 + 11, 3, 128, 128) + assert shards[1]["labels"] == [3, 4] + + +def test_truncate_tensors_with_packed_data(): + """Test truncate_tensors with packed multimodal data.""" + # Create sample data + text_tensor = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]) + image_tensors = [ + torch.randn(5, 3, 128, 4, 2, 2) for i in range(2) + ] # also check a different dim_to_pack + + # Create packed image data + packed_batch = PackedTensor(image_tensors, dim_to_pack=1) + + # Create BatchedDataDict + batch = BatchedDataDict({"text_ids": text_tensor, "image_features": packed_batch}) + + # Test truncation + batch.truncate_tensors(dim=1, truncated_len=2) + + # Verify text was truncated + assert torch.equal(batch["text_ids"], torch.tensor([[1, 2], [5, 6]])) + # Verify image features were not affected (assumed safe as per comment in truncate_tensors) + assert isinstance(batch["image_features"], PackedTensor) + assert batch["image_features"].as_tensor().shape == (5, 6, 128, 4, 2, 2) + + +def test_multiturn_rollout_with_packed_data(): + """Test multiturn conversations with packed multimodal data.""" + message_log_1 = [ + { + "role": "user", + "token_ids": torch.tensor([1, 2, 3, 4, 5, 6, 7, 8]), + "images": PackedTensor(torch.randn(3, 128, 128), dim_to_pack=0), + }, + { + "role": "assistant", + "token_ids": torch.tensor([9, 10, 11, 12, 13, 14, 15, 16]), + }, + { + "role": "user", + "token_ids": torch.tensor([17, 18, 19, 20, 21, 22, 23, 24]), + "images": PackedTensor(torch.randn(3, 128, 128), dim_to_pack=0), + }, + ] + message_log_2 = [ + { + "role": "user", + "token_ids": torch.tensor([1, 2, 3, 4, 5, 6, 7, 8]), + "images": PackedTensor(torch.randn(3, 128, 128), dim_to_pack=0), + }, + { + "role": "assistant", + "token_ids": torch.tensor([9, 10, 11, 12, 13, 14, 15, 16]), + }, + { + "role": "user", + "token_ids": torch.tensor([17, 18, 19, 20, 21, 22, 23, 24]), + }, + ] + # data spec + message_logs = BatchedDataDict( + { + "message_log": [message_log_1, message_log_2], + } + ) + flat_message, input_lengths = batched_message_log_to_flat_message( + message_logs["message_log"], + pad_value_dict={ + "token_ids": -1, + }, + ) + shards = flat_message.shard_by_batch_size(shards=2) + assert len(shards) == 2 + assert tuple(shards[0]["images"].as_tensor().shape) == (6, 128, 128) + assert tuple(shards[1]["images"].as_tensor().shape) == (3, 128, 128) + + +def test_sequence_packing_with_packed_data(): + """Test sequence packing with packed multimodal data.""" + # Create sample data + text_tensor = torch.tensor( + [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]] + ) + image_tensors = [torch.randn(2**i, 1176) for i in range(4)] + + # Create packed image data + packed_batch = PackedTensor(image_tensors, dim_to_pack=0) + + # Create BatchedDataDict + batch = BatchedDataDict( + { + "text_ids": text_tensor, + "image_features": packed_batch, + "sequence_lengths": torch.tensor([2, 3, 2, 4]), + } + ) + + sequence_packing_args = SequencePackingArgs( + max_tokens_per_microbatch=6, + input_key="text_ids", + input_lengths_key="sequence_lengths", + algorithm="modified_first_fit_decreasing", + sequence_length_pad_multiple=1, + ) + + # Test sequence packing + sharded_batches, sorted_indices = batch.shard_by_batch_size( + shards=2, sequence_packing_args=sequence_packing_args + ) + + # Verify basic structure + assert len(sharded_batches) == 2 + assert len(sorted_indices) == 4 + + print("sequence packing sorted indices", sorted_indices) + + # Verify each shard has the necessary attributes + for shard in sharded_batches: + assert hasattr(shard, "micro_batch_indices") + assert hasattr(shard, "micro_batch_lengths") + assert isinstance(shard["image_features"], PackedTensor) + + +def test_dynamic_batching_with_packed_data(): + """Test dynamic batching with packed multimodal data.""" + # Create sample data + text_tensor = torch.tensor( + [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]] + ) + image_tensors = [torch.randn(2**i, 1176) for i in range(4)] + + # Create packed image data + packed_batch = PackedTensor(image_tensors, dim_to_pack=0) + + # Create BatchedDataDict + batch = BatchedDataDict( + { + "text_ids": text_tensor, + "image_features": packed_batch, + "sequence_lengths": torch.tensor([2, 3, 2, 4]), + } + ) + + dynamic_batching_args: DynamicBatchingArgs = { + "input_key": "text_ids", + "input_lengths_key": "sequence_lengths", + "sequence_length_round": 2, + "max_tokens_per_microbatch": 6, + } + + # Test dynamic batching + sharded_batches, sorted_indices = batch.shard_by_batch_size( + shards=2, dynamic_batching_args=dynamic_batching_args + ) + + print("dynamic batching sorted indices", sorted_indices) + + # Verify basic structure + assert len(sharded_batches) == 2 + assert len(sorted_indices) == 4 + + # Verify each shard has the necessary attributes + for shard in sharded_batches: + assert hasattr(shard, "micro_batch_indices") + assert hasattr(shard, "micro_batch_lengths") + assert isinstance(shard["image_features"], PackedTensor) + + +def test_multimodal_specific_functionality(): + """Test functionality specific to multimodal data handling. (length, device movement, as_tensor)""" + # Create sample data + text_tensor = torch.tensor([[1, 2, 3], [4, 5, 6]]) + image_tensor = torch.tensor([[[1.0, 2.0]], [[3.0, 4.0]]]) + + # Test PackedTensorItem + mm_data = PackedTensor(image_tensor, dim_to_pack=0) + assert isinstance(mm_data, PackedTensor) + assert torch.equal(mm_data.as_tensor(), image_tensor) + assert len(mm_data) == 1 + + # Test device movement + if torch.cuda.is_available(): + mm_data = mm_data.to("cuda") + assert mm_data.tensors[0].device.type == "cuda" + + # images differ along a different dimension + image_tensors = [torch.randn(3, 128, 128 + i) for i in range(2)] + + mm_batch = PackedTensor(image_tensors, dim_to_pack=0) + with pytest.raises(RuntimeError): + batch_tensor = mm_batch.as_tensor() + + # check for packing on correct dimension + image_tensors = [torch.randn(3 + 10**i, 128, 128) for i in range(2)] + mm_batch = PackedTensor(image_tensors, dim_to_pack=0) + mm_tensor = mm_batch.as_tensor() + + expected_dim = sum([3 + 10**i for i in range(2)]) + assert mm_tensor.shape == (expected_dim, 128, 128) + + +def test_get_multimodal_dict(): + """Test the get_multimodal_dict functionality.""" + # Create sample data + text_tensor = torch.tensor([[1, 2, 3], [4, 5, 6]]) + image_tensor = torch.tensor([[[1.0, 2.0]], [[3.0, 4.0]]]) + token_type_ids = torch.tensor([[1, 1, 1], [1, 1, 1]]) + + # Create packed image data + packed_image = PackedTensor(image_tensor, dim_to_pack=0) + + # Create BatchedDataDict + batch = BatchedDataDict( + { + "text_ids": text_tensor, + "image_features": packed_image, + "token_type_ids": token_type_ids, # Special key that should be included + } + ) + + # Test getting multimodal dict as tensors + mm_dict = batch.get_multimodal_dict(as_tensors=True) + assert "image_features" in mm_dict + assert "token_type_ids" in mm_dict + assert torch.is_tensor(mm_dict["image_features"]) + assert torch.is_tensor(mm_dict["token_type_ids"]) + assert "text_ids" not in mm_dict # Regular tensors should not be included + + # Test getting multimodal dict as packed items + mm_dict = batch.get_multimodal_dict(as_tensors=False) + assert "image_features" in mm_dict + assert "token_type_ids" in mm_dict + assert isinstance(mm_dict["image_features"], PackedTensor) + assert torch.is_tensor(mm_dict["token_type_ids"]) diff --git a/tests/unit/distributed/test_batched_data_dict.py b/tests/unit/distributed/test_batched_data_dict.py index 539f2fab2b..4bf42e02f7 100644 --- a/tests/unit/distributed/test_batched_data_dict.py +++ b/tests/unit/distributed/test_batched_data_dict.py @@ -14,6 +14,7 @@ import pytest import torch +from nemo_rl.data.multimodal_utils import PackedTensor from nemo_rl.distributed.batched_data_dict import ( BatchedDataDict, DynamicBatchingArgs, @@ -469,6 +470,82 @@ def test_sequence_packing_with_dynamic_batching_conflict(): ) +def test_shard_by_batch_size_with_packed_multimodal(): + """Sharding should slice PackedTensor items correctly and preserve types.""" + text = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) + images = [ + torch.randn(2, 3, 8, 8), + torch.randn(3, 3, 8, 8), + torch.randn(1, 3, 8, 8), + torch.randn(5, 3, 8, 8), + ] + packed = PackedTensor(images, dim_to_pack=0) + batch = BatchedDataDict( + { + "input_ids": text, + "pixel_values": packed, + "labels": [0, 1, 2, 3], + } + ) + + shards = batch.shard_by_batch_size(shards=2) + assert len(shards) == 2 + # First shard should contain first two items + assert torch.equal(shards[0]["input_ids"], torch.tensor([[1, 2, 3], [4, 5, 6]])) + assert isinstance(shards[0]["pixel_values"], PackedTensor) + assert len(shards[0]["pixel_values"]) == 2 + assert shards[0]["labels"] == [0, 1] + # Packed lengths along dim 0: 2 + 3 + assert tuple(shards[0]["pixel_values"].as_tensor().shape) == (5, 3, 8, 8) + # Second shard should contain last two items + assert torch.equal(shards[1]["input_ids"], torch.tensor([[7, 8, 9], [10, 11, 12]])) + assert isinstance(shards[1]["pixel_values"], PackedTensor) + assert len(shards[1]["pixel_values"]) == 2 + assert shards[1]["labels"] == [2, 3] + # Packed lengths along dim 0: 1 + 5 + assert tuple(shards[1]["pixel_values"].as_tensor().shape) == (6, 3, 8, 8) + + +def test_get_multimodal_dict_mixed_content_and_device_move(): + """get_multimodal_dict should include PackedTensor and optional keys, and support device movement.""" + images = [torch.randn(2, 3, 8, 8), torch.randn(1, 3, 8, 8)] + packed = PackedTensor(images, dim_to_pack=0) + token_type_ids = torch.ones(2, 4, dtype=torch.long) + regular = torch.arange(2) + + batch = BatchedDataDict( + { + "pixel_values": packed, + "token_type_ids": token_type_ids, + "regular_tensor": regular, + "labels": [0, 1], + } + ) + + # as tensors + mm_dict_t = batch.get_multimodal_dict(as_tensors=True) + assert set(mm_dict_t.keys()) == {"pixel_values", "token_type_ids"} + assert ( + torch.is_tensor(mm_dict_t["pixel_values"]) + and mm_dict_t["pixel_values"].shape[0] == 3 + ) + assert torch.is_tensor(mm_dict_t["token_type_ids"]) and tuple( + mm_dict_t["token_type_ids"].shape + ) == (2, 4) + + # as packed + mm_dict_p = batch.get_multimodal_dict(as_tensors=False) + assert isinstance(mm_dict_p["pixel_values"], PackedTensor) + + # move device + device = "cuda" if torch.cuda.is_available() else "cpu" + moved = BatchedDataDict({"pixel_values": packed}).to(device) + mm_after_move = moved.get_multimodal_dict(as_tensors=True) + assert torch.is_tensor(mm_after_move["pixel_values"]) and mm_after_move[ + "pixel_values" + ].device.type == ("cuda" if torch.cuda.is_available() else "cpu") + + @pytest.mark.parametrize("pad_to_multiple_of", [1, 32, 64, 256]) def test_sequence_packing_microbatch_boundaries(pad_to_multiple_of): """Test that microbatch boundaries are correctly maintained across chunks with random sequences.""" diff --git a/tests/unit/distributed/test_model_utils.py b/tests/unit/distributed/test_model_utils.py index 2f8ef2011a..371080a384 100644 --- a/tests/unit/distributed/test_model_utils.py +++ b/tests/unit/distributed/test_model_utils.py @@ -18,6 +18,7 @@ import torch from nemo_rl.distributed.model_utils import ( + ChunkedDistributedLogprob, DistributedLogprob, _compute_distributed_log_softmax, _get_tokens_on_this_cp_rank, @@ -428,8 +429,9 @@ def test_allgather_cp_sharded_tensor(register_allgather_cp_test_actor, cp_size): @ray.remote(num_gpus=1) class DistributedLogprobTestActor: - def __init__(self, tp_size): + def __init__(self, tp_size, chunk_size): self.tp_size = tp_size + self.chunk_size = chunk_size self.env_vars = dict(os.environ) torch.distributed.init_process_group(backend="nccl") self.tp_group = torch.distributed.new_group(ranks=list(range(tp_size))) @@ -455,6 +457,7 @@ def test_distributed_logprob_forward_and_backward(self): seq_len = 8 full_vocab_size = 1024 vocab_part_size = full_vocab_size // self.tp_size + chunk_size = self.chunk_size # Calculate vocab partition for this rank vocab_start_index = rank * vocab_part_size @@ -490,14 +493,25 @@ def test_distributed_logprob_forward_and_backward(self): ) # Compute using DistributedLogprob (forward only first) - distributed_log_probs_inference = DistributedLogprob.apply( - vocab_parallel_logits.clone().detach(), # Clone to avoid affecting backward test - target, - vocab_start_index, - vocab_end_index, - self.tp_group, - True, # inference_only=True for forward test - ) + if chunk_size is not None: + distributed_log_probs_inference = ChunkedDistributedLogprob.apply( + vocab_parallel_logits.clone().detach(), # Clone to avoid affecting backward test + target, + vocab_start_index, + vocab_end_index, + chunk_size, + self.tp_group, + True, # inference_only=True for forward test + ) + else: + distributed_log_probs_inference = DistributedLogprob.apply( + vocab_parallel_logits.clone().detach(), # Clone to avoid affecting backward test + target, + vocab_start_index, + vocab_end_index, + self.tp_group, + True, # inference_only=True for forward test + ) # Compare forward results torch.testing.assert_close( @@ -700,9 +714,17 @@ def register_distributed_logprob_test_actor(): ) -@pytest.mark.parametrize("tp_size", [1, 2]) +@pytest.mark.parametrize( + "tp_size, chunk_size", + [ + (1, None), + (2, None), + (1, 4), + (2, 4), + ], +) def test_distributed_logprob_all_tests( - register_distributed_logprob_test_actor, tp_size + register_distributed_logprob_test_actor, tp_size, chunk_size ): """Test all DistributedLogprob functionality for a given TP size.""" # Skip if not enough GPUs @@ -718,7 +740,7 @@ def test_distributed_logprob_all_tests( # Create sharding for TP sharding = NamedSharding(layout=list(range(tp_size)), names=["tp"]) - builder = RayWorkerBuilder(actor_fqn, tp_size) + builder = RayWorkerBuilder(actor_fqn, tp_size, chunk_size) worker_group = RayWorkerGroup( cluster=cluster, @@ -728,7 +750,9 @@ def test_distributed_logprob_all_tests( ) # Test 1: Combined Forward and Backward pass - print(f"\n=== Testing TP={tp_size}: Forward & Backward Pass ===") + print( + f"\n=== Testing TP={tp_size} ChunkSize={chunk_size}: Forward & Backward Pass ===" + ) futures = worker_group.run_all_workers_single_data( "test_distributed_logprob_forward_and_backward" ) @@ -743,7 +767,7 @@ def test_distributed_logprob_all_tests( ) # Test 2: Log softmax function - print(f"\n=== Testing TP={tp_size}: Log Softmax ===") + print(f"\n=== Testing TP={tp_size} ChunkSize={chunk_size}: Log Softmax ===") futures = worker_group.run_all_workers_single_data( "test_distributed_log_softmax" ) @@ -756,7 +780,7 @@ def test_distributed_logprob_all_tests( # Test 3: Edge cases (only for TP=2) if tp_size == 2: - print(f"\n=== Testing TP={tp_size}: Edge Cases ===") + print(f"\n=== Testing TP={tp_size} ChunkSize={chunk_size}: Edge Cases ===") futures = worker_group.run_all_workers_single_data("test_edge_cases") results = ray.get(futures) print("Edge cases test completed successfully") diff --git a/tests/unit/models/generation/test_vllm_generation.py b/tests/unit/models/generation/test_vllm_generation.py index eae44d8dfe..4133f1d1b7 100644 --- a/tests/unit/models/generation/test_vllm_generation.py +++ b/tests/unit/models/generation/test_vllm_generation.py @@ -23,10 +23,7 @@ from nemo_rl.algorithms.loss_functions import NLLLoss from nemo_rl.algorithms.utils import get_tokenizer from nemo_rl.distributed.batched_data_dict import BatchedDataDict -from nemo_rl.distributed.virtual_cluster import ( - RayVirtualCluster, - _get_node_ip_and_free_port, -) +from nemo_rl.distributed.virtual_cluster import RayVirtualCluster from nemo_rl.models.generation import configure_generation_config from nemo_rl.models.generation.vllm import VllmConfig, VllmGeneration from nemo_rl.models.policy import PolicyConfig @@ -42,7 +39,13 @@ }, "dtype": "bfloat16", "max_new_tokens": 5, # Small number of tokens for testing - "temperature": 0.8, + # Set temperature=1.0 to ensure consistent probability scaling when comparing vLLM and HF policy outputs. + # Note: greedy=True is only used in tests for deterministic behavior and not used in the real training. + # In vLLM, enabling greedy=True disables temperature scaling (temperature is overridden to None). + # The HF policy worker does not currently support greedy=True for get_logprobs. + # Using temperature=1.0 allows us to meaningfully test the average probability multiplicative error between the two implementations, + # while still maintaining the deterministic behavior. + "temperature": 1.0, "top_p": 1.0, "top_k": None, "stop_token_ids": None, @@ -305,17 +308,6 @@ def test_input_data(tokenizer): ) -@pytest.fixture(scope="module", autouse=True) -def skip_tied_weight_check_for_all(): - """Automatically skip tied weight check for all tests in this module.""" - os.environ["NRL_SKIP_TIED_WEIGHT_CHECK"] = "1" - - yield - - # Restore the original value - os.environ.pop("NRL_SKIP_TIED_WEIGHT_CHECK", None) - - def test_vllm_missing_required_config_key(cluster): """Test that an assertion error is raised when a required config key is missing.""" # Create a config missing a required key by removing 'model_name' @@ -340,6 +332,43 @@ def test_vllm_missing_required_config_key(cluster): print(f"Successfully caught missing config key with error: {error_message}") +def test_vllm_top_p_top_k_validation(cluster): + """Test that top_p and top_k validation works correctly with threshold-based logic.""" + # Test that values above thresholds are allowed + config_above_thresholds = deepcopy(basic_vllm_test_config) + config_above_thresholds["top_p"] = 0.99 # Above TOP_P_THRESHOLD + config_above_thresholds["top_k"] = 8000 # Above TOP_K_THRESHOLD + + # Should not raise an error + try: + VllmGeneration(cluster, config_above_thresholds) + print("Successfully initialized with top_p=0.99 and top_k=8000") + except Exception as e: + pytest.fail(f"Should not raise error with values above thresholds: {e}") + + # Test that values below thresholds are rejected + config_below_thresholds = deepcopy(basic_vllm_test_config) + config_below_thresholds["top_p"] = 0.9 # Below TOP_P_THRESHOLD + + with pytest.raises(ValueError) as excinfo: + VllmGeneration(cluster, config_below_thresholds) + + error_message = str(excinfo.value) + assert "top_p sampling with values < 0.99 is not supported" in error_message + print(f"Successfully caught low top_p value with error: {error_message}") + + # Test that low top_k values are rejected + config_low_top_k = deepcopy(basic_vllm_test_config) + config_low_top_k["top_k"] = 7999 # Below TOP_K_THRESHOLD + + with pytest.raises(ValueError) as excinfo: + VllmGeneration(cluster, config_low_top_k) + + error_message = str(excinfo.value) + assert "top_k sampling with values < 8000 is not supported" in error_message + print(f"Successfully caught low top_k value with error: {error_message}") + + def test_vllm_policy_generation(policy, test_input_data, tokenizer): """Test vLLM policy generation capabilities.""" # Test generation @@ -626,29 +655,16 @@ def configure_worker_fixed_seed(num_gpus, bundle_indices=None): torch.cuda.empty_cache() -@pytest.mark.timeout(360) -@pytest.mark.asyncio -@pytest.mark.parametrize("async_engine", [True, False]) -async def test_vllm_generation_with_hf_training(cluster, tokenizer, async_engine): - """1. Use vLLM for generation - 2. Use HF policy for training and logprob computation +async def run_hf_train_process( + lm_policy, vllm_policy, tokenizer, async_engine, colocated, vllm_precision +): + """Validates that the two policies can work together. - This test validates that the two policies can work together. + 1. Use vLLM for generation + 2. Use HF policy for training and logprob computation """ - from nemo_rl.models.policy.lm_policy import Policy from tests.unit.test_utils import SimpleNLLLoss - # Create separate configs for each policy - vllm_config = deepcopy(basic_vllm_test_config) - vllm_config["vllm_cfg"]["async_engine"] = async_engine - vllm_config = configure_generation_config(vllm_config, tokenizer) - - dtensor_config = deepcopy(basic_dtensor_test_config) - dtensor_config["train_global_batch_size"] = 4 - - vllm_policy = None - lm_policy = None - try: prompts = [ "Write a story about a magical forest", @@ -680,22 +696,8 @@ async def test_vllm_generation_with_hf_training(cluster, tokenizer, async_engine } ) - # Create both policies - print("Creating vLLM policy...") - vllm_policy = VllmGeneration(cluster, vllm_config) - vllm_policy.finish_generation() - - print("Creating DTensor policy...") - lm_policy = Policy(cluster, dtensor_config, tokenizer) - - print("preparing refit info...") - state_dict_info = lm_policy.prepare_refit_info() - vllm_policy.prepare_refit_info(state_dict_info) - print("refitting vllm policy...") - refit_policy_generation( - lm_policy, vllm_policy, vllm_config["colocated"]["enabled"] - ) + refit_policy_generation(lm_policy, vllm_policy, colocated) # Step 1: Use vLLM for generation print("Using vLLM policy for fast generation...") @@ -760,7 +762,14 @@ async def test_vllm_generation_with_hf_training(cluster, tokenizer, async_engine ) print(f"Average probability multiplicative error: {avg_prob_mult_error}") - assert avg_prob_mult_error <= 1.043, "vLLM and HF logprobs should closely match" + if vllm_precision == "fp8": + assert avg_prob_mult_error <= 1.080, ( + "vLLM and HF logprobs should closely match" + ) + else: + assert avg_prob_mult_error <= 1.043, ( + "vLLM and HF logprobs should closely match" + ) # Step 2: Prepare simplified training data (smaller and with padding removed to prevent OOM) # Use a very small sequence for training to ensure it works @@ -797,7 +806,7 @@ async def test_vllm_generation_with_hf_training(cluster, tokenizer, async_engine print(f"Training loss: {results['loss']}") lm_policy.finish_training() - lm_policy.offload_after_refit() + refit_policy_generation(lm_policy, vllm_policy, colocated) # Step 4: Use vLLM for generation again to complete the workflow print("Using vLLM for generation again...") @@ -824,6 +833,118 @@ async def test_vllm_generation_with_hf_training(cluster, tokenizer, async_engine lm_policy.shutdown() +@pytest.mark.timeout(300) +@pytest.mark.asyncio +@pytest.mark.parametrize( + ("async_engine", "cpu_offload", "vllm_precision"), + [ + (True, False, "bfloat16"), + (False, True, "bfloat16"), + (True, False, "fp8"), + (False, True, "fp8"), + ], +) +async def test_vllm_generation_with_hf_training_colocated( + cluster, tokenizer, async_engine, cpu_offload, vllm_precision +): + """This test validates that DTensor policy can work together with colocated vLLM policy.""" + + # Skip the fp8 tests if the GPU is not H100 or newer (compute capability < 9.0) + if vllm_precision == "fp8": + major_capability, _ = torch.cuda.get_device_capability() + if major_capability < 9: + pytest.skip( + f"Skipping FP8 test. GPU compute capability {major_capability}.0 is < 9.0 (H100 required)." + ) + + # Create VllmGeneration Policy + print("Creating vLLM policy...") + vllm_config = deepcopy(basic_vllm_test_config) + vllm_config["vllm_cfg"]["async_engine"] = async_engine + vllm_config["vllm_cfg"]["precision"] = vllm_precision + + vllm_config = configure_generation_config(vllm_config, tokenizer) + vllm_policy = VllmGeneration(cluster, vllm_config) + vllm_policy.finish_generation() + + # Create Policy + print("Creating DTensor policy...") + dtensor_config = deepcopy(basic_dtensor_test_config) + dtensor_config["dtensor_cfg"]["cpu_offload"] = cpu_offload + dtensor_config["train_global_batch_size"] = 4 + lm_policy = Policy(cluster, dtensor_config, tokenizer) + + # Prepare refit info + print("Preparing refit info...") + state_dict_info = lm_policy.prepare_refit_info() + vllm_policy.prepare_refit_info(state_dict_info) + + # Test + await run_hf_train_process( + lm_policy, vllm_policy, tokenizer, async_engine, True, vllm_precision + ) + + +@pytest.mark.timeout(300) +@pytest.mark.asyncio +@pytest.mark.parametrize( + ("async_engine", "cpu_offload", "vllm_precision"), + [ + (True, False, "bfloat16"), + (False, True, "bfloat16"), + (True, False, "fp8"), + (False, True, "fp8"), + ], +) +async def test_vllm_generation_with_hf_training_non_colocated( + policy_cluster_separate, tokenizer, async_engine, cpu_offload, vllm_precision +): + # Skip the fp8 tests if the GPU is not H100 or newer (compute capability < 9.0) + if vllm_precision == "fp8": + major_capability, _ = torch.cuda.get_device_capability() + if major_capability < 9: + pytest.skip( + f"Skipping FP8 test. GPU compute capability {major_capability}.0 is < 9.0 (H100 required)." + ) + + """This test validates that DTensor policy can work together with non-colocated vLLM policy.""" + generation_cluster_separate = get_generation_cluster_separate(1) + + # Create VllmGeneration Policy + print("Creating vLLM policy...") + vllm_config = deepcopy(basic_vllm_test_config) + vllm_config["vllm_cfg"]["async_engine"] = async_engine + vllm_config["vllm_cfg"]["precision"] = vllm_precision + vllm_config["colocated"]["enabled"] = False + vllm_config = configure_generation_config(vllm_config, tokenizer) + vllm_policy = VllmGeneration(generation_cluster_separate, vllm_config) + vllm_policy.finish_generation() + + # Create Policy + print("Creating DTensor policy...") + dtensor_config = deepcopy(basic_dtensor_test_config) + dtensor_config["generation"]["colocated"]["enabled"] = False + dtensor_config["dtensor_cfg"]["cpu_offload"] = cpu_offload + dtensor_config["train_global_batch_size"] = 4 + lm_policy = Policy(policy_cluster_separate, dtensor_config, tokenizer) + + # Refit + # initialize collective communication for update weights + ip, port = policy_cluster_separate.get_master_address_and_port() + futures_train = lm_policy.init_collective(ip, port, world_size=2) + futures_inference = vllm_policy.init_collective(ip, port, world_size=2) + ray.get(futures_train + futures_inference) + + # prepare refit info + state_dict_info = lm_policy.prepare_refit_info() + vllm_policy.prepare_refit_info(state_dict_info) + + # Test + await run_hf_train_process( + lm_policy, vllm_policy, tokenizer, async_engine, False, vllm_precision + ) + + def test_vllm_policy_tensor_parallel(cluster, tokenizer): """Test vLLM policy with tensor parallelism > 1.""" # Configure with tensor_parallel_size=2 @@ -918,17 +1039,27 @@ def test_vllm_generate_text(cluster, tokenizer): @pytest.mark.timeout(180) @pytest.mark.parametrize("tensor_parallel_size", [1, 2]) -@pytest.mark.parametrize("enable_dtensor", [True, False]) +@pytest.mark.parametrize("vllm_precision", ["bfloat16", "fp8"]) def test_vllm_weight_update_and_prefix_cache_reset( - cluster, tokenizer, tensor_parallel_size, enable_dtensor + cluster, tokenizer, tensor_parallel_size, vllm_precision ): """Test that the vLLM prefix cache is correctly reset when weights change.""" + + if vllm_precision == "fp8": + major_capability, _ = torch.cuda.get_device_capability() + if major_capability < 9: + pytest.skip( + f"Skipping FP8 test. GPU compute capability {major_capability}.0 is < 9.0 (H100 required)." + ) + from nemo_rl.models.policy.lm_policy import Policy # Create configs vllm_config = deepcopy(basic_vllm_test_config) vllm_config = configure_generation_config(vllm_config, tokenizer, is_eval=True) vllm_config["vllm_cfg"]["tensor_parallel_size"] = tensor_parallel_size + vllm_config["vllm_cfg"]["precision"] = vllm_precision + if tensor_parallel_size > 1: vllm_config["vllm_kwargs"] = {"distributed_executor_backend": "ray"} @@ -1024,8 +1155,7 @@ def test_vllm_weight_update_and_prefix_cache_reset( torch.cuda.empty_cache() -@pytest.mark.parametrize("enable_dtensor", [True, False]) -def test_vllm_weight_update_memory(cluster, tokenizer, enable_dtensor): +def test_vllm_weight_update_memory(cluster, tokenizer): """Test that vLLM streaming weight update and can save memory.""" from nemo_rl.models.policy.lm_policy import Policy @@ -1084,12 +1214,8 @@ def test_vllm_weight_update_memory(cluster, tokenizer, enable_dtensor): assert current_reserved == 0.0, "Memory should be 0 after refit completed" # memory threshold: memory during non-streaming weight update on 0.6B model on 2 GPUs # memory during streaming weight update should less than this baseline threshold - if enable_dtensor: - assert peak_allocated < 4005, "Peak allocated memory should < 4005 MB" - assert peak_reserved < 4016, "Peak reserved memory should < 4016 MB" - else: - assert peak_allocated < 5736, "Peak allocated memory should < 5736 MB" - assert peak_reserved < 5748, "Peak reserved memory should < 5748 MB" + assert peak_allocated < 4005, "Peak allocated memory should < 4005 MB" + assert peak_reserved < 4016, "Peak reserved memory should < 4016 MB" # Clean up vllm_policy.shutdown() @@ -1097,10 +1223,7 @@ def test_vllm_weight_update_memory(cluster, tokenizer, enable_dtensor): @pytest.mark.parametrize("is_eval", [True, False]) -@pytest.mark.parametrize("enable_dtensor", [True, False]) -def test_vllm_generation_with_stop( - cluster, test_input_data, tokenizer, is_eval, enable_dtensor -): +def test_vllm_generation_with_stop(cluster, test_input_data, tokenizer, is_eval): """Test vLLM generation with stop.""" from nemo_rl.models.policy.lm_policy import Policy @@ -1200,12 +1323,14 @@ def test_vllm_non_divisible_batch_handling(policy): @pytest.mark.asyncio @pytest.mark.parametrize("async_engine", [True, False]) @pytest.mark.parametrize("tensor_parallel_size", [1, 2]) -async def test_vllm_refit_non_collocated_update_weights( +@pytest.mark.parametrize("policy_type", ["dtensor", "megatron"]) +async def test_vllm_refit_non_colocated_update_weights( policy_cluster_separate, tokenizer, test_input_data, async_engine, tensor_parallel_size, + policy_type, ): # Skip tensor_parallel_size == 2 until we have resources in CI if tensor_parallel_size == 2: @@ -1223,23 +1348,41 @@ async def test_vllm_refit_non_collocated_update_weights( "Test requires at least two GPUs to run policies on separate clusters." ) - # Create Policy on its own cluster - dtensor_config = deepcopy(basic_dtensor_test_config) - dtensor_config["generation"]["colocated"]["enabled"] = False - lm_policy = Policy(policy_cluster_separate, dtensor_config, tokenizer) + # Get policy config + if policy_type == "dtensor": + lm_config = deepcopy(basic_dtensor_test_config) + else: + assert policy_type == "megatron" + lm_config = get_basic_megatron_test_config(tp=1, pp=1, precision="float32") + lm_config["generation"]["colocated"]["enabled"] = False - # Create VllmGeneration policy on its own cluster + # Get vllm config vllm_config = deepcopy(basic_vllm_test_config) vllm_config = configure_generation_config(vllm_config, tokenizer, is_eval=True) vllm_config["vllm_cfg"]["async_engine"] = async_engine vllm_config["vllm_cfg"]["tensor_parallel_size"] = tensor_parallel_size vllm_config["colocated"]["enabled"] = False + + # Megatron config with Qwen2.5-0.5B + if policy_type == "megatron": + model_name = "Qwen/Qwen2.5-0.5B" + tokenizer = get_tokenizer({"name": model_name}) + + lm_config["model_name"] = model_name + lm_config["tokenizer"]["name"] = model_name + + vllm_config["model_name"] = model_name + vllm_config["tokenizer"]["name"] = model_name + + # Create Policy and VllmGeneration + lm_policy = Policy(policy_cluster_separate, lm_config, tokenizer) vllm_generation = VllmGeneration(generation_cluster_separate, vllm_config) # initialize collective communication for update weights - ip, port = ray.get(_get_node_ip_and_free_port.remote()) - futures_train = lm_policy.init_collective(ip, port, world_size=2) - futures_inference = vllm_generation.init_collective(ip, port, world_size=2) + ip, port = policy_cluster_separate.get_master_address_and_port() + world_size = tensor_parallel_size + 1 + futures_train = lm_policy.init_collective(ip, port, world_size=world_size) + futures_inference = vllm_generation.init_collective(ip, port, world_size=world_size) ray.get(futures_train + futures_inference) # prepare refit info @@ -1247,9 +1390,7 @@ async def test_vllm_refit_non_collocated_update_weights( vllm_generation.prepare_refit_info(state_dict_info) print("refitting vllm policy...") - refit_policy_generation( - lm_policy, vllm_generation, vllm_config["colocated"]["enabled"] - ) + refit_policy_generation(lm_policy, vllm_generation, False) # test generate if async_engine: @@ -1258,12 +1399,23 @@ async def test_vllm_refit_non_collocated_update_weights( ) else: outputs = vllm_generation.generate(test_input_data, greedy=True) + output_ids = outputs["output_ids"] generated_texts = tokenizer.batch_decode(output_ids, skip_special_tokens=True) - assert generated_texts == [ - "Hello, my name is Lina. I'm", - "The capital of France is Paris. The capital of", - ], "Output should be the same as the expected output" + + if policy_type == "dtensor": + expected_texts = [ + "Hello, my name is Lina. I'm", + "The capital of France is Paris. The capital of", + ] + else: + expected_texts = [ + "Hello, my name is Kaitlin and I", + "The capital of France is Paris. It is the", + ] + assert generated_texts == expected_texts, ( + "Output should be the same as the expected output" + ) # Clean up vllm_generation.shutdown() @@ -1276,14 +1428,23 @@ async def test_vllm_refit_non_collocated_update_weights( @pytest.mark.timeout(360) @pytest.mark.parametrize("tensor_parallel_size", [1, 2]) +@pytest.mark.parametrize("vllm_precision", ["bfloat16", "fp8"]) def test_vllm_generation_with_megatron_training( - cluster, tokenizer, tensor_parallel_size + cluster, tokenizer, tensor_parallel_size, vllm_precision ): """Test that uses vLLM for generation and Megatron policy for training and logprob computation. This test validates that vLLM and Megatron policies can work together. """ + # Skip the fp8 tests if the GPU is not H100 or newer (compute capability < 9.0) + if vllm_precision == "fp8": + major_capability, _ = torch.cuda.get_device_capability() + if major_capability < 9: + pytest.skip( + f"Skipping FP8 test. GPU compute capability {major_capability}.0 is < 9.0 (H100 required)." + ) + if cluster.num_gpus_per_node < tensor_parallel_size: pytest.skip(f"Need at least {tensor_parallel_size} GPUs for this test") @@ -1298,6 +1459,7 @@ def test_vllm_generation_with_megatron_training( vllm_config["model_name"] = model_name vllm_config["tokenizer"]["name"] = model_name vllm_config["vllm_cfg"]["async_engine"] = False + vllm_config["vllm_cfg"]["precision"] = vllm_precision vllm_config = configure_generation_config(vllm_config, test_tokenizer) # Megatron config with same model diff --git a/tests/unit/models/generation/test_vllm_large_model.py b/tests/unit/models/generation/test_vllm_large_model.py index 7b93ef46d1..1b7387e832 100644 --- a/tests/unit/models/generation/test_vllm_large_model.py +++ b/tests/unit/models/generation/test_vllm_large_model.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os from copy import deepcopy import pytest @@ -63,14 +62,6 @@ } -@pytest.fixture(scope="module", autouse=True) -def skip_tied_weight_check(): - """Automatically skip tied weight check for all tests in this module.""" - os.environ["NRL_SKIP_TIED_WEIGHT_CHECK"] = "1" - yield - os.environ.pop("NRL_SKIP_TIED_WEIGHT_CHECK", None) - - @pytest.fixture(scope="function") def two_node_cluster(): """Create a virtual cluster with 2 nodes for testing large models.""" diff --git a/tests/unit/models/huggingface/test_common.py b/tests/unit/models/huggingface/test_common.py index 95da64b0b4..e1f7b948aa 100644 --- a/tests/unit/models/huggingface/test_common.py +++ b/tests/unit/models/huggingface/test_common.py @@ -39,7 +39,6 @@ ) def test_gemma_models(model_name): assert is_gemma_model(model_name) - assert ModelFlag.SKIP_DTENSOR_TIED_WEIGHTS_CHECK.matches(model_name) assert ModelFlag.VLLM_LOAD_FORMAT_AUTO.matches(model_name) @@ -54,5 +53,4 @@ def test_gemma_models(model_name): ) def test_non_gemma_models(model_name): assert not is_gemma_model(model_name) - assert not ModelFlag.SKIP_DTENSOR_TIED_WEIGHTS_CHECK.matches(model_name) assert not ModelFlag.VLLM_LOAD_FORMAT_AUTO.matches(model_name) diff --git a/tests/unit/models/megatron/converters/test_converters_common.py b/tests/unit/models/megatron/converters/test_converters_common.py new file mode 100755 index 0000000000..c8731eb573 --- /dev/null +++ b/tests/unit/models/megatron/converters/test_converters_common.py @@ -0,0 +1,252 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from unittest.mock import Mock, patch + +import pytest +import torch + +try: + from nemo_rl.models.megatron.converters.common import ( + get_global_expert_num, + get_global_key_from_local_key, + get_global_layer_num, + get_local_expert_num, + get_local_layer_num, + split_fc1_etp, + split_fc1_tp, + split_qkv_bias_gpu, + split_qkv_gpu, + update_transforms_for_nemorl, + ) +except ImportError: + pass + +# Apply mcore marker to all tests in this module +pytestmark = pytest.mark.mcore + + +class TestLayerNumberFunctions: + """Test functions related to layer number extraction and conversion.""" + + def test_get_local_layer_num_valid(self): + """Test get_local_layer_num with valid layer keys.""" + assert get_local_layer_num("layers.5.attention.weight") == 5 + assert get_local_layer_num("decoder.layers.10.mlp.weight") == 10 + assert get_local_layer_num("model.layers.0.self_attn.weight") == 0 + + def test_get_local_layer_num_invalid(self): + """Test get_local_layer_num with invalid layer keys.""" + assert get_local_layer_num("attention.weight") is None + assert get_local_layer_num("layers.abc.weight") is None + assert get_local_layer_num("layers.") is None + + def test_get_global_layer_num_pp(self): + """Test get_global_layer_num with simple pipeline configuration.""" + mock_cfg = Mock() + mock_cfg.num_layers = 10 + mock_cfg.num_layers_in_first_pipeline_stage = 4 + mock_cfg.num_layers_in_last_pipeline_stage = 3 + + with patch( + "nemo_rl.models.megatron.converters.common.parallel_state" + ) as mock_ps: + mock_ps.get_pipeline_model_parallel_rank.return_value = 1 + mock_ps.get_pipeline_model_parallel_world_size.return_value = 3 + + result = get_global_layer_num("layers.2.weight", mock_cfg) + assert result == 6 + + +class TestExpertNumberFunctions: + """Test functions related to expert number extraction and conversion.""" + + def test_get_local_expert_num_valid(self): + """Test get_local_expert_num with valid expert keys.""" + assert get_local_expert_num("layers.0.mlp.experts.weight2") == 2 + assert get_local_expert_num("decoder.layers.1.experts.weight5") == 5 + assert get_local_expert_num("model.layers.0.experts.weight0") == 0 + + def test_get_local_expert_num_invalid(self): + """Test get_local_expert_num with invalid expert keys.""" + assert get_local_expert_num("layers.0.mlp.weight") is None + assert get_local_expert_num("layers.0.mlp.experts.2._extra_state") is None + + def test_get_global_expert_num(self): + """Test get_global_expert_num with expert parallel configuration.""" + mock_cfg = Mock() + mock_cfg.num_moe_experts = 8 + + with patch( + "nemo_rl.models.megatron.converters.common.parallel_state" + ) as mock_ps: + mock_ps.get_expert_model_parallel_rank.return_value = 1 + mock_ps.get_expert_model_parallel_world_size.return_value = 2 + + result = get_global_expert_num("layers.0.mlp.experts.weight2", mock_cfg) + assert result == 6 # 8 // 2 + 2 + + +class TestKeyConversionFunctions: + """Test functions related to key conversion between local and global.""" + + def test_get_global_key_from_local_key_layer_only(self): + """Test key conversion with only layer numbers.""" + mock_cfg = Mock() + mock_cfg.num_layers = 12 + mock_cfg.num_layers_in_first_pipeline_stage = None + mock_cfg.num_layers_in_last_pipeline_stage = None + + with patch( + "nemo_rl.models.megatron.converters.common.parallel_state" + ) as mock_ps: + mock_ps.get_pipeline_model_parallel_rank.return_value = 1 + mock_ps.get_pipeline_model_parallel_world_size.return_value = 2 + + result = get_global_key_from_local_key( + "layers.3.attention.weight", mock_cfg + ) + assert result == "layers.9.attention.weight" + + def test_get_global_key_from_local_key_expert_and_layer(self): + """Test key conversion with only expert numbers.""" + mock_cfg = Mock() + mock_cfg.num_moe_experts = 8 + mock_cfg.num_layers = 12 + mock_cfg.num_layers_in_first_pipeline_stage = None + mock_cfg.num_layers_in_last_pipeline_stage = None + + with patch( + "nemo_rl.models.megatron.converters.common.parallel_state" + ) as mock_ps: + mock_ps.get_expert_model_parallel_rank.return_value = 1 + mock_ps.get_expert_model_parallel_world_size.return_value = 2 + + mock_ps.get_pipeline_model_parallel_rank.return_value = 1 + mock_ps.get_pipeline_model_parallel_world_size.return_value = 3 + + result = get_global_key_from_local_key( + "layers.0.mlp.experts.weight2", mock_cfg + ) + assert result == "layers.4.mlp.experts.weight6" + + +class TestTensorSplittingFunctions: + """Test functions related to tensor splitting operations.""" + + def test_split_fc1_tp(self): + """Test split_fc1_tp function.""" + mock_ctx = Mock() + mock_ctx.source.config.tensor_model_parallel_size = 2 + + # Create a tensor with shape (4, 10) representing 2 TP ranks with 2 components each + linear_fc1 = torch.randn(4, 10) + + gate_proj, up_proj = split_fc1_tp(mock_ctx, linear_fc1) + + assert gate_proj.shape == (2, 10) + assert up_proj.shape == (2, 10) + assert torch.allclose(gate_proj, linear_fc1[::2]) + assert torch.allclose(up_proj, linear_fc1[1::2]) + + def test_split_fc1_etp(self): + """Test split_fc1_etp function.""" + mock_ctx = Mock() + mock_ctx.source.config.expert_tensor_parallel_size = 2 + + # Create a tensor with shape (4, 10) representing 2 ETP ranks with 2 components each + linear_fc1 = torch.randn(4, 10) + + gate_proj, up_proj = split_fc1_etp(mock_ctx, linear_fc1) + + assert gate_proj.shape == (2, 10) + assert up_proj.shape == (2, 10) + assert torch.allclose(gate_proj, linear_fc1[::2]) + assert torch.allclose(up_proj, linear_fc1[1::2]) + + def test_split_qkv_gpu(self): + """Test split_qkv_gpu function.""" + mock_ctx = Mock() + mock_ctx.source.config.num_attention_heads = 8 + mock_ctx.source.config.num_query_groups = 2 + mock_ctx.source.config.kv_channels = 16 + + # Create QKV tensor: (heads + 2*groups) * head_size * hidden_size + qkv_total_dim = 8 + 2 * 2 # 12 + linear_qkv = torch.randn(qkv_total_dim, 16, 64) + + q_proj, k_proj, v_proj = split_qkv_gpu(mock_ctx, linear_qkv) + + # Q should have 8 heads * 16 channels = 128 + assert q_proj.shape == (128, 64) + # K and V should have 2 groups * 16 channels = 32 each + assert k_proj.shape == (32, 64) + assert v_proj.shape == (32, 64) + + def test_split_qkv_bias_gpu(self): + """Test split_qkv_bias_gpu function.""" + mock_ctx = Mock() + mock_ctx.source.config.num_attention_heads = 8 + mock_ctx.source.config.num_query_groups = 2 + mock_ctx.source.config.kv_channels = 16 + + # Create QKV bias tensor: (heads + 2*groups) * head_size + qkv_total_dim = 8 + 2 * 2 # 12 + qkv_bias = torch.randn(qkv_total_dim, 16) + + q_bias, k_bias, v_bias = split_qkv_bias_gpu(mock_ctx, qkv_bias) + + # Q should have 8 heads * 16 channels = 128 + assert q_bias.shape == (128,) + # K and V should have 2 groups * 16 channels = 32 each + assert k_bias.shape == (32,) + assert v_bias.shape == (32,) + + +class TestTransformUpdateFunctions: + """Test functions related to transform updates.""" + + def test_update_transforms_for_nemorl(self): + """Test update_transforms_for_nemorl function.""" + # Create mock transforms + mock_transform1 = Mock() + mock_transform1.transform.__name__ = "split_fc1" + mock_transform1.source_key = "layers.0.mlp.experts.0.linear_fc1.weight" + + mock_transform2 = Mock() + mock_transform2.transform.__name__ = "split_fc1" + mock_transform2.source_key = "layers.0.mlp.shared_experts.linear_fc1.weight" + + mock_transform3 = Mock() + mock_transform3.transform.__name__ = "split_qkv" + + mock_transform4 = Mock() + mock_transform4.transform.__name__ = "split_qkv_bias" + + transforms = [ + mock_transform1, + mock_transform2, + mock_transform3, + mock_transform4, + ] + + updated_transforms = update_transforms_for_nemorl(transforms) + + # Check that expert transforms use split_fc1_etp + assert updated_transforms[0].transform == split_fc1_etp + # Check that non-expert transforms use split_fc1_tp + assert updated_transforms[1].transform == split_fc1_tp + # Check that qkv transforms are updated + assert updated_transforms[2].transform == split_qkv_gpu + assert updated_transforms[3].transform == split_qkv_bias_gpu diff --git a/tests/unit/models/policy/test_dtensor_worker.py b/tests/unit/models/policy/test_dtensor_worker.py index ba33408a8d..40243e2b5a 100644 --- a/tests/unit/models/policy/test_dtensor_worker.py +++ b/tests/unit/models/policy/test_dtensor_worker.py @@ -11,16 +11,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import os import pprint import pytest import ray import torch - -# Define a custom marker for model configuration tests -pytestmark = pytest.mark.modelconfig - from transformers import AutoModelForCausalLM from nemo_rl.algorithms.interfaces import LossFunction @@ -38,10 +33,11 @@ def create_test_config( model_name: str, tp: int = 1, cp: int = 1, - sequence_parallel: bool = False, + sp: bool = False, cpu_offload: bool = False, activation_checkpointing: bool = False, custom_parallel_plan: str | None = None, + dtensor_v2: bool = False, ) -> PolicyConfig: return { "model_name": model_name, @@ -69,9 +65,10 @@ def create_test_config( }, }, "dtensor_cfg": { + **({"_v2": dtensor_v2} if dtensor_v2 else {}), "enabled": True, "cpu_offload": cpu_offload, - "sequence_parallel": sequence_parallel, + "sequence_parallel": sp, "activation_checkpointing": activation_checkpointing, "tensor_parallel_size": tp, "context_parallel_size": cp, @@ -107,17 +104,6 @@ def create_test_config( } -@pytest.fixture(scope="module", autouse=True) -def skip_tied_weight_check_for_all(): - """Automatically skip tied weight check for all tests in this module.""" - os.environ["NRL_SKIP_TIED_WEIGHT_CHECK"] = "1" - - yield - - # Restore the original value - os.environ.pop("NRL_SKIP_TIED_WEIGHT_CHECK", None) - - @pytest.fixture(scope="module") def two_gpu_virtual_cluster(): cluster_name = "test" @@ -144,9 +130,10 @@ def gc_collect(): @pytest.fixture -def policy_setup(two_gpu_virtual_cluster, tiny_llama_model_path): +def policy_setup(request, two_gpu_virtual_cluster, tiny_llama_model_path): """Setup and teardown for policy tests - creates a virtual cluster and policy.""" - config = create_test_config(tiny_llama_model_path) + use_v2 = request.param if hasattr(request, "param") else False + config = create_test_config(tiny_llama_model_path, dtensor_v2=use_v2) tokenizer = get_tokenizer(config["tokenizer"]) config["generation"] = configure_generation_config(config["generation"], tokenizer) @@ -161,6 +148,7 @@ def policy_setup(two_gpu_virtual_cluster, tiny_llama_model_path): @pytest.mark.hf_gated @pytest.mark.timeout(360) +@pytest.mark.parametrize("policy_setup", [True, False], indirect=True) def test_lm_policy_init(policy_setup): policy = policy_setup @@ -240,11 +228,30 @@ def test_lm_policy_init(policy_setup): @pytest.fixture def training_setup(request, two_gpu_virtual_cluster): """Setup and teardown specifically for training tests.""" + # Get the use_v2 parameter from the test function + use_v2 = getattr(request.function, "pytestmark", []) + use_v2_value = False + for mark in use_v2: + if ( + hasattr(mark, "args") + and len(mark.args) > 1 + and "use_v2" in str(mark.args[0]) + ): + for param_set in mark.args[1]: + if isinstance(param_set, bool): + use_v2_value = param_set + break + + # If multiple parametrize decorators, we need to check the node id + if hasattr(request, "node") and hasattr(request.node, "callspec"): + if "use_v2" in request.node.callspec.params: + use_v2_value = request.node.callspec.params["use_v2"] + ( model_fixture_name, tp, cp, - sequence_parallel, + sp, cpu_offload, activation_checkpointing, ) = request.param @@ -257,11 +264,17 @@ def training_setup(request, two_gpu_virtual_cluster): try: config = create_test_config( - model_name, tp, cp, sequence_parallel, cpu_offload, activation_checkpointing + model_name, + tp, + cp, + sp, + cpu_offload, + activation_checkpointing, + dtensor_v2=use_v2_value, ) tokenizer = get_tokenizer(config["tokenizer"]) print( - f"Creating training Policy with tp={tp}, cpu_offload={cpu_offload}, sequence_parallel={sequence_parallel}, activation_checkpointing={activation_checkpointing}..." + f"Creating training Policy with tp={tp}, cpu_offload={cpu_offload}, sequence_parallel={sp}, activation_checkpointing={activation_checkpointing}..." ) policy = Policy( cluster=two_gpu_virtual_cluster, @@ -309,6 +322,7 @@ def training_setup(request, two_gpu_virtual_cluster): @pytest.mark.hf_gated @pytest.mark.timeout(360) +@pytest.mark.parametrize("use_v2", [True, False]) @pytest.mark.parametrize( "training_setup", [ @@ -340,10 +354,17 @@ def training_setup(request, two_gpu_virtual_cluster): ("tiny_gemma3_model_path", 1, 1, False, True, True), ("tiny_gemma3_model_path", 1, 1, True, True, True), # CP doesn't support gemma3 due to spda input has attent_mask != None. + # Nemotron-H doesn't support SP https://github.com/NVIDIA-NeMo/RL/issues/881 + # ("tiny_nemotron5_h_model_path", 1, 1, True, True, False), + # ("tiny_nemotron5_h_model_path", 1, 1, True, False, True), + # ("tiny_nemotron5_h_model_path", 1, 1, True, True, True), + ("tiny_nemotron5_h_model_path", 1, 1, False, False, False), + ("tiny_nemotron5_h_model_path", 1, 1, False, True, True), + # nemotron5_h doesn't support cp ], indirect=True, ) -def test_dtensor_worker_training(training_setup): +def test_dtensor_worker_training(use_v2, training_setup): def verify_loss_tensor(loss_tensor): assert not torch.isnan(loss_tensor).any(), "Loss should not be NaN" assert not torch.isinf(loss_tensor).any(), "Loss should not be Inf" @@ -377,15 +398,43 @@ def verify_loss_tensor(loss_tensor): # Verify loss changed between iterations (model parameters were updated) assert losses[0] > losses[-1], "Loss should decrease over training iterations" + # Verify the train function returns the performance metrics + + if policy.flops_tracker is not None: + assert "total_flops" in results and isinstance( + results["total_flops"], (int, float) + ), "training backend should report total_flops" + assert results["total_flops"] > 0, "total_flops should be positive" + assert "num_ranks" in results and isinstance(results["num_ranks"], int), ( + "training backend should report num_ranks" + ) + assert results["num_ranks"] > 0, "num_ranks should be positive" + + # we don't always require theoretical_tflops since the data about the GPU + # is not always available. + if "theoretical_tflops" in results: + assert isinstance(results["theoretical_tflops"], (int, float)), ( + "training backend should report theoretical_tflops" + ) + assert results["theoretical_tflops"] > 0, ( + "theoretical_tflops should be positive" + ) + @pytest.fixture def logprob_setup(request, two_gpu_virtual_cluster): """Setup and teardown specifically for training tests.""" + # Get the use_v2 parameter from the test function + use_v2_value = False + if hasattr(request, "node") and hasattr(request.node, "callspec"): + if "use_v2" in request.node.callspec.params: + use_v2_value = request.node.callspec.params["use_v2"] + ( model_fixture_name, tp, cp, - sequence_parallel, + sp, cpu_offload, activation_checkpointing, ) = request.param @@ -397,11 +446,17 @@ def logprob_setup(request, two_gpu_virtual_cluster): try: config = create_test_config( - model_name, tp, cp, sequence_parallel, cpu_offload, activation_checkpointing + model_name, + tp, + cp, + sp, + cpu_offload, + activation_checkpointing, + dtensor_v2=use_v2_value, ) tokenizer = get_tokenizer(config["tokenizer"]) print( - f"Creating logprob Policy with tp={tp}, cpu_offload={cpu_offload}, sequence_parallel={sequence_parallel}, activation_checkpointing={activation_checkpointing}..." + f"Creating logprob Policy with tp={tp}, cpu_offload={cpu_offload}, sequence_parallel={sp}, activation_checkpointing={activation_checkpointing}..." ) policy = Policy( cluster=two_gpu_virtual_cluster, @@ -468,6 +523,7 @@ def logprob_setup(request, two_gpu_virtual_cluster): @pytest.mark.hf_gated @pytest.mark.timeout(360) +@pytest.mark.parametrize("use_v2", [True, False]) @pytest.mark.parametrize( "logprob_setup", [ @@ -492,7 +548,7 @@ def logprob_setup(request, two_gpu_virtual_cluster): ], indirect=True, ) -def test_dtensor_worker_logprob_tp2_or_cp2_matches_unsharded(logprob_setup): +def test_dtensor_worker_logprob_tp2_or_cp2_matches_unsharded(use_v2, logprob_setup): policy, data, logprobs = logprob_setup # Verify resources were created properly assert policy is not None, "Policy was not created properly" @@ -510,8 +566,9 @@ def test_dtensor_worker_logprob_tp2_or_cp2_matches_unsharded(logprob_setup): @pytest.mark.hf_gated +@pytest.mark.parametrize("use_v2", [True, False]) def test_dtensor_tp_and_tied_model_with_custom_parallel_plan( - two_gpu_virtual_cluster, tiny_llama_tied_model_path + use_v2, two_gpu_virtual_cluster, tiny_llama_tied_model_path ): """Test that DTensor with a tp > 1 and a tied model with a custom parallel plan works.""" from torch.distributed.tensor.parallel import ColwiseParallel @@ -525,10 +582,11 @@ def test_dtensor_tp_and_tied_model_with_custom_parallel_plan( model_name=tiny_llama_tied_model_path, tp=2, cp=1, - sequence_parallel=False, + sp=False, cpu_offload=False, activation_checkpointing=False, custom_parallel_plan=custom_parallel_plan, + dtensor_v2=use_v2, ) tokenizer = get_tokenizer(config["tokenizer"]) diff --git a/tests/unit/models/policy/test_dtensor_worker_v2.py b/tests/unit/models/policy/test_dtensor_worker_v2.py new file mode 100644 index 0000000000..a16e3afda5 --- /dev/null +++ b/tests/unit/models/policy/test_dtensor_worker_v2.py @@ -0,0 +1,243 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import ray + +from nemo_rl.distributed.virtual_cluster import RayVirtualCluster +from nemo_rl.models.policy import PolicyConfig + + +def create_test_config( + model_name: str, + tp: int = 1, + cp: int = 1, + sp: bool = False, + cpu_offload: bool = False, + activation_checkpointing: bool = False, + custom_parallel_plan: str | None = None, + dtensor_v2: bool = False, +) -> PolicyConfig: + return { + "model_name": model_name, + "tokenizer": {"name": model_name}, + "generation_batch_size": 1, # Small batch size for testing + "train_global_batch_size": 4, + "train_micro_batch_size": 1, + "learning_rate": 5e-6, + "logprob_batch_size": 1, + "precision": "float32", + "generation": { + "backend": "hf", + "temperature": 1.0, + "max_new_tokens": 16, # Small number of tokens for testing + "top_p": 1.0, + "top_k": None, + "stop_token_ids": None, + "stop_strings": None, + "colocated": { + "enabled": True, + "resources": { + "gpus_per_node": None, + "num_nodes": None, + }, + }, + }, + "dtensor_cfg": { + **({"_v2": dtensor_v2} if dtensor_v2 else {}), + "enabled": True, + "cpu_offload": cpu_offload, + "sequence_parallel": sp, + "activation_checkpointing": activation_checkpointing, + "tensor_parallel_size": tp, + "context_parallel_size": cp, + "custom_parallel_plan": custom_parallel_plan, + }, + "dynamic_batching": { + "enabled": True, + "train_mb_tokens": 128, + "logprob_mb_tokens": 128, + "sequence_length_round": 4, + }, + "sequence_packing": { + "enabled": False, + }, + "optimizer": { + "name": "torch.optim.AdamW", + "kwargs": { + "lr": 5e-6, + "weight_decay": 0.01, + "betas": [0.9, 0.999], + "eps": 1e-8, + "foreach": False, + "fused": False, + }, + }, + "scheduler": { + "name": "torch.optim.lr_scheduler.CosineAnnealingLR", + "kwargs": { + "T_max": 100, + }, + }, + "max_grad_norm": 1.0, + } + + +@pytest.fixture(scope="module") +def two_gpu_virtual_cluster(): + cluster_name = "test" + print(f"Creating virtual cluster '{cluster_name}'...") + cluster = RayVirtualCluster( + name=cluster_name, + bundle_ct_per_node_list=[2], # Use tp bundles, one per GPU + use_gpus=True, + num_gpus_per_node=2, # Using tp GPUs + max_colocated_worker_groups=1, # Only one worker group + ) + yield cluster + print("Shutting down virtual cluster...") + cluster.shutdown() + + +from nemo_rl.algorithms.utils import get_tokenizer +from nemo_rl.models.policy.lm_policy import Policy + + +def compare_model_configs(config_v1: dict, config_v2: dict) -> list[str]: + """ + Compare two model configurations and return a list of discrepancies. + + Args: + config_v1: Model config from dtensor worker v1 + config_v2: Model config from dtensor worker v2 + + Returns: + List of discrepancy descriptions. Empty list if configs are equivalent. + """ + discrepancies = [] + + def compare_dicts(d1, d2, path=""): + """Recursively compare two dictionaries.""" + all_keys = set(d1.keys()) | set(d2.keys()) + + for key in all_keys: + current_path = f"{path}.{key}" if path else key + + if key not in d1: + discrepancies.append(f"Key '{current_path}' missing in v1 config") + elif key not in d2: + discrepancies.append(f"Key '{current_path}' missing in v2 config") + else: + val1, val2 = d1[key], d2[key] + + if isinstance(val1, dict) and isinstance(val2, dict): + compare_dicts(val1, val2, current_path) + elif val1 != val2: + discrepancies.append( + f"Value mismatch at '{current_path}': v1={val1}, v2={val2}" + ) + + compare_dicts(config_v1, config_v2) + return discrepancies + + +@pytest.mark.hf_gated +@pytest.mark.parametrize( + "model_fixture_name,tp,cp,sp,cpu_offload,activation_checkpointing", + [ + # TP=2, CP=1 + ("tiny_qwen2_model_path", 2, 1, False, False, False), + ("tiny_llama_model_path", 2, 1, False, False, False), + ("tiny_qwen3_model_path", 2, 1, False, False, False), + ("tiny_gemma3_model_path", 2, 1, False, False, False), + # TP=1, CP=2 + ("tiny_qwen2_model_path", 1, 2, False, False, False), + ("tiny_llama_model_path", 1, 2, False, False, False), + ("tiny_qwen3_model_path", 1, 2, False, False, False), + ], +) +def test_dtensor_worker_v1_v2_model_config_equivalence( + request, + two_gpu_virtual_cluster, # noqa: F811 + model_fixture_name, + tp, + cp, + sp, + cpu_offload, + activation_checkpointing, +): + """Test that dtensor worker v1 and v2 produce equivalent model configurations.""" + # Get the actual model path from the fixture name + model_name = request.getfixturevalue(model_fixture_name) + # Create v1 configuration + config_v1 = create_test_config( + model_name=model_name, + tp=tp, + cp=cp, + sp=sp, + cpu_offload=cpu_offload, + activation_checkpointing=activation_checkpointing, + dtensor_v2=False, # Use v1 worker + ) + # Create and test v1 policy first + print("Creating policy with v1 worker...") + policy_v1 = Policy( + tokenizer=get_tokenizer(config_v1["tokenizer"]), + config=config_v1, + init_optimizer=False, + init_reference_model=False, + cluster=two_gpu_virtual_cluster, + name_prefix="lm_policy_v1", + ) + + model_config_v1 = ray.get( + policy_v1.worker_group.workers[0].return_model_config.remote() + ) + policy_v1.shutdown() + + # Create v2 configuration + config_v2 = create_test_config( + model_name=model_name, + tp=tp, + cp=cp, + sp=sp, + cpu_offload=cpu_offload, + activation_checkpointing=activation_checkpointing, + dtensor_v2=True, # Use v2 worker + ) + policy_v2 = Policy( + tokenizer=get_tokenizer(config_v2["tokenizer"]), + config=config_v2, + init_optimizer=False, + init_reference_model=False, + cluster=two_gpu_virtual_cluster, + name_prefix="lm_policy_v2", + ) + + model_config_v2 = ray.get( + policy_v2.worker_group.workers[0].return_model_config.remote() + ) + policy_v2.shutdown() + + config_v1_dict = vars(model_config_v1) + config_v2_dict = vars(model_config_v2) + config_v1_dict.pop("nemo_version", None) + config_v2_dict.pop("nemo_version", None) + config_v1_dict.pop("pad_token_id", None) + config_v2_dict.pop("pad_token_id", None) + + discrepancies = compare_model_configs(config_v1_dict, config_v2_dict) + assert not discrepancies, ( + f"Model configurations differ between v1 and v2 approaches for {model_name}" + ) diff --git a/tests/unit/models/policy/test_megatron_worker.py b/tests/unit/models/policy/test_megatron_worker.py index 38607ba59f..48d315137c 100644 --- a/tests/unit/models/policy/test_megatron_worker.py +++ b/tests/unit/models/policy/test_megatron_worker.py @@ -13,13 +13,11 @@ # limitations under the License. import os import tempfile +from typing import Optional import pytest import torch -# Define a custom marker for model configuration tests -pytestmark = pytest.mark.modelconfig - from nemo_rl.algorithms.interfaces import LossFunction from nemo_rl.algorithms.loss_functions import ClippedPGLossFn, DPOLossFn, NLLLoss from nemo_rl.algorithms.utils import get_tokenizer @@ -40,6 +38,8 @@ def create_megatron_test_config( generation_backend: str = "megatron", sequence_parallel: bool = False, converter_type: str = "LlamaForCausalLM", + logprob_chunk_size: Optional[int] = None, + defer_fp32_logits: Optional[bool] = None, ) -> PolicyConfig: """Create a test config for Megatron policy worker.""" return { @@ -50,6 +50,7 @@ def create_megatron_test_config( "train_micro_batch_size": 2, "learning_rate": 5e-6, "logprob_batch_size": 2, + "logprob_chunk_size": logprob_chunk_size, "precision": precision, "generation": { "backend": generation_backend, @@ -95,6 +96,7 @@ def create_megatron_test_config( "moe_router_load_balancing_type": "none", "moe_router_bias_update_rate": 0.0, "apply_rope_fusion": True, + "defer_fp32_logits": defer_fp32_logits, "optimizer": { "optimizer": "adam", "lr": 5.0e-6, @@ -133,14 +135,6 @@ def create_megatron_test_config( } -@pytest.fixture(scope="module", autouse=True) -def skip_tied_weight_check_for_all(): - """Automatically skip tied weight check for all tests in this module.""" - os.environ["NRL_SKIP_TIED_WEIGHT_CHECK"] = "1" - yield - os.environ.pop("NRL_SKIP_TIED_WEIGHT_CHECK", None) - - @pytest.fixture(scope="function") def gc_collect(): """Helper function to force garbage collection after a test""" @@ -387,6 +381,26 @@ def verify_loss_tensor(loss_tensor): # Verify loss changed between iterations (model parameters were updated) assert losses[0] > losses[-1], "Loss should decrease over training iterations" + if policy.flops_tracker is not None: + assert "total_flops" in results and isinstance( + results["total_flops"], (int, float) + ), "training backend should report total_flops" + assert results["total_flops"] > 0, "total_flops should be positive" + assert "num_ranks" in results and isinstance(results["num_ranks"], int), ( + "training backend should report num_ranks" + ) + assert results["num_ranks"] > 0, "num_ranks should be positive" + + # we don't always require theoretical_tflops since the data about the GPU + # is not always available. + if "theoretical_tflops" in results: + assert "theoretical_tflops" in results and isinstance( + results["theoretical_tflops"], (int, float) + ), "training backend should report theoretical_tflops" + assert results["theoretical_tflops"] > 0, ( + "theoretical_tflops should be positive" + ) + @pytest.fixture def generation_setup(request, tiny_llama_model_path): @@ -487,7 +501,7 @@ def generation_setup(request, tiny_llama_model_path): cluster.shutdown() -@pytest.mark.skip(reason="Skipping megatorn generation tests for now") +@pytest.mark.skip(reason="Skipping megatron generation tests for now") @pytest.mark.timeout(240) @pytest.mark.parametrize( "generation_setup", @@ -542,9 +556,23 @@ def logprob_setup(request): """Setup and teardown specifically for logprob tests.""" # Parse parameters: (num_gpus, tp, pp, model_fixture_name) if hasattr(request, "param") and request.param is not None: - num_gpus, tp, pp, model_fixture_name = request.param + ( + num_gpus, + tp, + pp, + logprob_chunk_size, + defer_fp32_logits, + model_fixture_name, + ) = request.param else: - num_gpus, tp, pp, model_fixture_name = 2, 1, 1, "tiny_llama_model_path" + ( + num_gpus, + tp, + pp, + logprob_chunk_size, + defer_fp32_logits, + model_fixture_name, + ) = (2, 1, 1, None, None, "tiny_llama_model_path") # Get the actual model path from the requested fixture model_name = request.getfixturevalue(model_fixture_name) @@ -579,6 +607,8 @@ def logprob_setup(request): tp=tp, pp=pp, converter_type=converter_type, + logprob_chunk_size=logprob_chunk_size, + defer_fp32_logits=defer_fp32_logits, ) tokenizer = get_tokenizer(config["tokenizer"]) config["generation"] = configure_generation_config( @@ -627,14 +657,35 @@ def logprob_setup(request): @pytest.mark.parametrize( "logprob_setup", [ - # (num_gpus, tp, pp, model_fixture_name) - (2, 1, 1, "tiny_llama_model_path"), - (2, 2, 1, "tiny_llama_model_path"), - (2, 1, 1, "tiny_qwen2_model_path"), - (2, 2, 1, "tiny_qwen2_model_path"), + # (num_gpus, tp, pp, chunk sz, defer fp32, model_fixture_name) + (2, 1, 1, None, None, "tiny_llama_model_path"), + (2, 2, 1, None, None, "tiny_llama_model_path"), + (2, 1, 1, None, None, "tiny_qwen2_model_path"), + (2, 2, 1, None, None, "tiny_qwen2_model_path"), + (2, 1, 1, None, True, "tiny_llama_model_path"), + (2, 2, 1, None, True, "tiny_llama_model_path"), + (2, 1, 1, None, True, "tiny_qwen2_model_path"), + (2, 2, 1, None, True, "tiny_qwen2_model_path"), + (2, 1, 1, 16, True, "tiny_llama_model_path"), + (2, 2, 1, 16, True, "tiny_llama_model_path"), + (2, 1, 1, 16, True, "tiny_qwen2_model_path"), + (2, 2, 1, 16, True, "tiny_qwen2_model_path"), ], indirect=True, - ids=["2gpu_dp2_llama", "2gpu_tp2_llama", "2gpu_dp2_qwen2", "2gpu_tp2_qwen2"], + ids=[ + "2gpu_dp2_llama", + "2gpu_tp2_llama", + "2gpu_dp2_qwen2", + "2gpu_tp2_qwen2", + "2gpu_dp2_deferfp32_llama", + "2gpu_tp2_deferfp32_llama", + "2gpu_dp2_deferfp32_qwen2", + "2gpu_tp2_deferfp32_qwen2", + "2gpu_dp2_chunked_deferfp32_llama", + "2gpu_tp2_chunked_deferfp32_llama", + "2gpu_dp2_chunked_deferfp32_qwen2", + "2gpu_tp2_chunked_deferfp32_qwen2", + ], ) def test_megatron_policy_logprobs(logprob_setup): """Test Megatron policy logprob computation.""" @@ -651,6 +702,7 @@ def test_megatron_policy_logprobs(logprob_setup): # Basic validation assert isinstance(policy_logprobs, torch.Tensor), "Logprobs should be a tensor" + assert policy_logprobs.dtype == torch.float32 assert policy_logprobs.shape == data.get("input_ids").shape, ( f"Logprobs shape {policy_logprobs.shape} should match input shape {data.get('input_ids').shape}" ) diff --git a/tests/unit/models/policy/test_utils.py b/tests/unit/models/policy/test_utils.py index 5712985cd3..8fb4d8f8b2 100644 --- a/tests/unit/models/policy/test_utils.py +++ b/tests/unit/models/policy/test_utils.py @@ -14,151 +14,12 @@ import os import unittest.mock -from unittest.mock import MagicMock, patch from nemo_rl.models.policy.utils import ( - configure_expandable_segments, get_megatron_checkpoint_dir, ) -class TestConfigureExpandableSegments(unittest.TestCase): - """Test cases for configure_expandable_segments function.""" - - def setUp(self): - """Set up test environment.""" - # Store original environment variable - self.original_pytorch_cuda_alloc_conf = os.environ.get( - "PYTORCH_CUDA_ALLOC_CONF" - ) - - def tearDown(self): - """Clean up after tests.""" - # Restore original environment variable - if self.original_pytorch_cuda_alloc_conf is not None: - os.environ["PYTORCH_CUDA_ALLOC_CONF"] = ( - self.original_pytorch_cuda_alloc_conf - ) - elif "PYTORCH_CUDA_ALLOC_CONF" in os.environ: - del os.environ["PYTORCH_CUDA_ALLOC_CONF"] - - @patch("torch.cuda.get_device_properties") - def test_hopper_gpu_no_existing_config(self, mock_get_device_properties): - """Test Hopper+ GPU (compute capability >= 9) with no existing PYTORCH_CUDA_ALLOC_CONF.""" - # Mock GPU properties for Hopper+ architecture - mock_device_properties = MagicMock() - mock_device_properties.major = 9 - mock_get_device_properties.return_value = mock_device_properties - - # Ensure no existing config - if "PYTORCH_CUDA_ALLOC_CONF" in os.environ: - del os.environ["PYTORCH_CUDA_ALLOC_CONF"] - - # Call the function - configure_expandable_segments() - - # Verify the environment variable was set correctly - self.assertEqual( - os.environ["PYTORCH_CUDA_ALLOC_CONF"], "expandable_segments:True" - ) - - @patch("torch.cuda.get_device_properties") - def test_hopper_gpu_with_existing_config(self, mock_get_device_properties): - """Test Hopper+ GPU with existing PYTORCH_CUDA_ALLOC_CONF.""" - # Mock GPU properties for Hopper+ architecture - mock_device_properties = MagicMock() - mock_device_properties.major = 9 - mock_get_device_properties.return_value = mock_device_properties - - # Set existing config - existing_config = "max_split_size_mb:128" - os.environ["PYTORCH_CUDA_ALLOC_CONF"] = existing_config - - # Call the function - configure_expandable_segments() - - # Verify the environment variable was updated correctly - expected_config = f"{existing_config},expandable_segments:True" - self.assertEqual(os.environ["PYTORCH_CUDA_ALLOC_CONF"], expected_config) - - @patch("torch.cuda.get_device_properties") - def test_hopper_gpu_already_configured(self, mock_get_device_properties): - """Test Hopper+ GPU with existing config that already has expandable_segments.""" - # Mock GPU properties for Hopper+ architecture - mock_device_properties = MagicMock() - mock_device_properties.major = 9 - mock_get_device_properties.return_value = mock_device_properties - - # Set existing config with expandable_segments already present - existing_config = "max_split_size_mb:128,expandable_segments:False" - os.environ["PYTORCH_CUDA_ALLOC_CONF"] = existing_config - - # Call the function - configure_expandable_segments() - - # Verify the environment variable was not changed - self.assertEqual(os.environ["PYTORCH_CUDA_ALLOC_CONF"], existing_config) - - @patch("torch.cuda.get_device_properties") - def test_ampere_gpu_no_config_change(self, mock_get_device_properties): - """Test Ampere GPU (compute capability < 9) should not modify config.""" - # Mock GPU properties for Ampere architecture - mock_device_properties = MagicMock() - mock_device_properties.major = 8 # Ampere - mock_get_device_properties.return_value = mock_device_properties - - # Set existing config - existing_config = "max_split_size_mb:128" - os.environ["PYTORCH_CUDA_ALLOC_CONF"] = existing_config - - # Call the function - configure_expandable_segments() - - # Verify the environment variable was not changed - self.assertEqual(os.environ["PYTORCH_CUDA_ALLOC_CONF"], existing_config) - - @patch("torch.cuda.get_device_properties") - def test_ampere_gpu_no_existing_config(self, mock_get_device_properties): - """Test Ampere GPU with no existing config should not set anything.""" - # Mock GPU properties for Ampere architecture - mock_device_properties = MagicMock() - mock_device_properties.major = 8 # Ampere - mock_get_device_properties.return_value = mock_device_properties - - # Ensure no existing config - if "PYTORCH_CUDA_ALLOC_CONF" in os.environ: - del os.environ["PYTORCH_CUDA_ALLOC_CONF"] - - # Call the function - configure_expandable_segments() - - # Verify the environment variable was not set - self.assertNotIn("PYTORCH_CUDA_ALLOC_CONF", os.environ) - - @patch("torch.cuda.get_device_properties") - def test_ampere_gpu_with_expandable_segments_true_raises_error( - self, mock_get_device_properties - ): - """Test Ampere GPU with expandable_segments:True in config raises RuntimeError.""" - # Mock GPU properties for Ampere architecture - mock_device_properties = MagicMock() - mock_device_properties.major = 8 # Ampere - mock_get_device_properties.return_value = mock_device_properties - - # Set config with expandable_segments:True - os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" - - # Call the function and expect RuntimeError - with self.assertRaises(RuntimeError) as context: - configure_expandable_segments() - - # Verify the error message - self.assertIn("expandable_segments is enabled", str(context.exception)) - self.assertIn( - "not supported on architectures older than Hopper", str(context.exception) - ) - - class TestGetMegatronCheckpointDir: """Test cases for the get_megatron_checkpoint_dir function.""" diff --git a/tests/unit/prepare_unit_test_assets.py b/tests/unit/prepare_unit_test_assets.py new file mode 100644 index 0000000000..6cb8344c55 --- /dev/null +++ b/tests/unit/prepare_unit_test_assets.py @@ -0,0 +1,98 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This script exists to help load any unit asset that requires special handling. + +The initial reason for this was to help with Nemotron-H which has a requirement +to have mamaba-ssm in the base environment in order to initialize a dummy model. Since +the unit tests should be runable with the base environment (without mamba-ssm), +we use ray.remotes to build the asset here. We do this outside of a fixture +like the other test assets because this one sometimes takes a while to build. This +extra setup time can sometimes cause timeouts in the unit tests if unlucky. +""" + +import os + +import ray + +from nemo_rl.distributed.virtual_cluster import PY_EXECUTABLES +from nemo_rl.utils.venvs import create_local_venv + +TESTS_DIR = os.path.dirname(os.path.abspath(__file__)) +TEST_ASSETS_DIR = os.path.join(TESTS_DIR, "test_assets") + + +def build_tiny_nemotron5_h_checkpoint(model_path: str) -> None: + import shutil + + from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer + + config = AutoConfig.from_pretrained( + "nvidia/Nemotron-H-8B-Base-8K", trust_remote_code=True + ) + config.hybrid_override_pattern = "M*-" + config.num_hidden_layers = 3 + config.intermediate_size = 32 + config.hidden_size = 256 + config.num_attention_heads = 8 + config.mamba_num_heads = 8 + config.num_key_value_heads = 8 + config.n_groups = 1 + + model = AutoModelForCausalLM.from_config(config, trust_remote_code=True) + tokenizer = AutoTokenizer.from_pretrained( + "nvidia/Nemotron-H-8B-Base-8K", trust_remote_code=True + ) + + shutil.rmtree(model_path, ignore_errors=True) + model.save_pretrained(model_path) + tokenizer.save_pretrained(model_path) + print(f"āœ“ Built tiny Nemotron-H asset at: {model_path}") + + +def main() -> None: + os.makedirs(TEST_ASSETS_DIR, exist_ok=True) + + target = os.path.join(TEST_ASSETS_DIR, "tiny_nemotron5_h_with_nemotron_tokenizer") + + # Create Automodel env venv + automodel_python = create_local_venv( + py_executable=PY_EXECUTABLES.AUTOMODEL, venv_name="automodel_env" + ) + + ############################################################################ + # Add other remote calls here + ############################################################################ + # Submit as list of remote calls and wait individually + remote_calls = [ + ray.remote(build_tiny_nemotron5_h_checkpoint) + .options( + num_gpus=0.01, # tiny reservation to satisfy CUDA-inspecting deps + runtime_env={"py_executable": automodel_python}, + name="build-nemotron5h", + ) + .remote(target) + ] + + for obj_ref in remote_calls: + ray.get(obj_ref) + + +if __name__ == "__main__": + if not ray.is_initialized(): + ray.init(ignore_reinit_error=True, include_dashboard=False) + try: + main() + finally: + ray.shutdown() diff --git a/tests/unit/rewards/test_rewards.py b/tests/unit/rewards/test_rewards.py new file mode 100644 index 0000000000..38ea52d74e --- /dev/null +++ b/tests/unit/rewards/test_rewards.py @@ -0,0 +1,259 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from numpy.testing import assert_allclose + +from nemo_rl.environments.rewards import ( + bbox_giou_reward, + combine_reward_functions, + exact_answer_alphanumeric_reward, + format_reward, + math_expression_reward, +) + + +def test_math_expression_reward(): + # Test correct math expression + ground_truth = "2x + 3" + response = "Let me solve for y ... 5x + 5 = 3x + 2 + y \implies y = 2x + 3 2x + 3" + reward, is_correct = math_expression_reward(ground_truth, response) + assert_allclose(reward, 1.0, atol=1e-6) + assert is_correct is True + + # Test incorrect math expression + response = "Let me solve this... I'm a dumb LLM so I have no reasoning trace to actuallysolve this 3x + 2" + reward, is_correct = math_expression_reward(ground_truth, response) + assert_allclose(reward, 0.0, atol=1e-6) + assert is_correct is False + + # Test for missing answer tags + response = "Let me solve this... The answer is 2x + 3" + reward, is_correct = math_expression_reward(ground_truth, response) + assert_allclose(reward, 0.0, atol=1e-6) + assert is_correct is False + + +def test_format_reward(): + ground_truth = "any_ground_truth" # Format reward doesn't use ground truth + + # Test complete format + response = "My thinking My answer" + reward, is_correct = format_reward(ground_truth, response) + assert reward == 1.0 + assert is_correct is None + + # Test only think tags + response = "My thinking" + reward, is_correct = format_reward(ground_truth, response) + assert reward == 0.25 + assert is_correct is None + + # Test only answer tags + response = "My answer" + reward, is_correct = format_reward(ground_truth, response) + assert reward == 0.75 + assert is_correct is None + + # Test no tags + response = "Just plain text" + reward, is_correct = format_reward(ground_truth, response) + assert reward == 0.0 + assert is_correct is None + + +def test_format_reward_custom_tags(): + ground_truth = "does_not_matter" + + # Both tags in response and reward function match + response = "Reasoning here 42" + reward, is_correct = format_reward( + ground_truth, response, think_tag="think_trace", answer_tag="solution" + ) + assert reward == 1.0 + assert is_correct is None + + # Only think tag present, tags match + response = "Reasoning here" + reward, is_correct = format_reward( + ground_truth, response, think_tag="think_trace", answer_tag="solution" + ) + assert reward == 0.25 + assert is_correct is None + + # Only answer tag present, tags match + response = "42" + reward, is_correct = format_reward( + ground_truth, response, think_tag="think_trace", answer_tag="solution" + ) + assert reward == 0.75 + assert is_correct is None + + # Neither tag present, tags match + response = "No tags here" + reward, is_correct = format_reward( + ground_truth, response, think_tag="think_trace", answer_tag="solution" + ) + assert reward == 0.0 + assert is_correct is None + + # Tags in response do not match those in reward function (should yield 0.0) + response = "Reasoning here 42" + reward, is_correct = format_reward( + ground_truth, response, think_tag="think_trace", answer_tag="solution" + ) + assert reward == 0.0 + assert is_correct is None + + # Mixed: one tag matches, one does not (should yield 0.25 for think_trace, 0 for solution) + response = "Reasoning here 42" + reward, is_correct = format_reward( + ground_truth, response, think_tag="think_trace", answer_tag="solution" + ) + assert reward == 0.25 + assert is_correct is None + + # Mixed: one tag matches, one does not (should yield 0.75 for solution, 0 for think_trace) + response = "Reasoning here 42" + reward, is_correct = format_reward( + ground_truth, response, think_tag="think_trace", answer_tag="solution" + ) + assert reward == 0.75 + assert is_correct is None + + +def test_exact_answer_alphanumeric_reward(): + ground_truth = "Hello123" + + # Test exact match + response = "Hello123" + reward, is_correct = exact_answer_alphanumeric_reward(ground_truth, response) + assert_allclose(reward, 1.0, atol=1e-6) + assert is_correct is True + + # Test case insensitive match + response = "HELLO123" + reward, is_correct = exact_answer_alphanumeric_reward(ground_truth, response) + assert_allclose(reward, 1.0, atol=1e-6) + assert is_correct is True + + # Test with special characters + response = "Hello-123!" + reward, is_correct = exact_answer_alphanumeric_reward(ground_truth, response) + assert_allclose(reward, 1.0, atol=1e-6) + assert is_correct is True + + # Test incorrect answer + response = "Hello124" + reward, is_correct = exact_answer_alphanumeric_reward(ground_truth, response) + assert_allclose(reward, 0.0, atol=1e-6) + assert is_correct is False + + +def test_bbox_giou_reward(): + ground_truth = "[0.1, 0.1, 0.5, 0.5]" + + # Test perfect match + response = "[0.1, 0.1, 0.5, 0.5]" + reward, is_correct = bbox_giou_reward(ground_truth, response) + print(f"reward: {reward}, is_correct: {is_correct}") + assert_allclose(reward, 1.0, atol=1e-6) + assert is_correct is True + + # Test partial overlap + response = "[0.2, 0.2, 0.6, 0.6]" + reward, is_correct = bbox_giou_reward(ground_truth, response) + print(f"reward: {reward}, is_correct: {is_correct}") + assert 0 < reward < 1.0 + assert is_correct is False + + # Test no overlap + response = "[0.6, 0.6, 0.9, 0.9]" + reward, is_correct = bbox_giou_reward(ground_truth, response) + print(f"reward: {reward}, is_correct: {is_correct}") + assert reward < 0.0 # GIoU can be negative when boxes don't overlap + assert is_correct is False + + # test bad bounding box format (5 numbers) + response = "[0.6, 0.6, 0.9, 0.9, 0.1]" + reward, is_correct = bbox_giou_reward(ground_truth, response) + print(f"reward: {reward}, is_correct: {is_correct}") + assert_allclose(reward, 0.0, atol=1e-6) + assert is_correct is False + + # Test invalid format + response = "invalid bbox format" + reward, is_correct = bbox_giou_reward(ground_truth, response) + print(f"reward: {reward}, is_correct: {is_correct}") + assert_allclose(reward, 0.0, atol=1e-6) + assert is_correct is False + + +def test_exact_answer_alphanumeric_reward_combined(): + # Define test cases + ground_truth = "test123" + good_response = "thinking test123" + bad_response = "thinking wrong" + incorrect_format_response = "here is a bbox: [0.1, 0.1, 0.5, 0.5] without any tags" + + # Create reward function combinations with weights + reward_functions = [(format_reward, 0.3), (exact_answer_alphanumeric_reward, 0.7)] + combined_reward = combine_reward_functions(reward_functions) + + # Test good response + reward, is_correct = combined_reward(ground_truth, good_response) + assert_allclose(reward, 1.0, atol=1e-6) + assert is_correct is True + + # Test bad response + reward, is_correct = combined_reward(ground_truth, bad_response) + assert_allclose(reward, 0.3, atol=1e-6) + assert is_correct is False + + # test bad format + reward, is_correct = combined_reward(ground_truth, incorrect_format_response) + assert_allclose(reward, 0.0, atol=1e-6) + assert is_correct is False + + +def test_bbox_giou_reward_combined(): + # Test combining all reward functions + ground_truth_bbox = "[0.1, 0.1, 0.5, 0.5]" + good_response = "The bounding box coordinates are [0.1, 0.1, 0.5, 0.5] [0.1, 0.1, 0.5, 0.5]" + no_think_response = "[0.1, 0.1, 0.5, 0.5]" + no_answer_response = "thinking" + no_think_no_answer_response = ( + "here is a bbox: [0.1, 0.1, 0.5, 0.5] without any tags" + ) + + reward_functions = [(format_reward, 0.2), (bbox_giou_reward, 0.8)] + + combined_reward = combine_reward_functions(reward_functions) + + # Test perfect response + reward, is_correct = combined_reward(ground_truth_bbox, good_response) + assert_allclose(reward, 1.0, atol=1e-6) + assert is_correct is True + + # Test partially correct response (correct format, wrong bbox) + reward, is_correct = combined_reward(ground_truth_bbox, no_think_response) + assert_allclose(reward, 0.75 * 0.2 + 0.8, atol=1e-6) + assert is_correct is True + + reward, is_correct = combined_reward(ground_truth_bbox, no_answer_response) + assert_allclose(reward, 0.2 * 0.25, atol=1e-6) + assert is_correct is False + + reward, is_correct = combined_reward(ground_truth_bbox, no_think_no_answer_response) + assert_allclose(reward, 0.0, atol=1e-6) + assert is_correct is False diff --git a/tests/unit/test_config_validation.py b/tests/unit/test_config_validation.py index 3056ae270d..e5fa73ee7d 100644 --- a/tests/unit/test_config_validation.py +++ b/tests/unit/test_config_validation.py @@ -32,6 +32,9 @@ from nemo_rl.utils.config import load_config_with_inheritance from nemo_rl.utils.logger import LoggerConfig +# All tests in this module should run first +pytestmark = pytest.mark.run_first + def get_keys_from_typeddict(typed_dict_class: dict) -> Set[str]: """Extract required keys from a TypedDict class, excluding NotRequired fields.""" diff --git a/tests/unit/test_recipes_and_test_suites.py b/tests/unit/test_recipes_and_test_suites.py index 47d1d2f45b..4ac4414b9d 100644 --- a/tests/unit/test_recipes_and_test_suites.py +++ b/tests/unit/test_recipes_and_test_suites.py @@ -17,6 +17,9 @@ import pytest +# All tests in this module should run first +pytestmark = pytest.mark.run_first + dir_path = os.path.dirname(os.path.abspath(__file__)) project_root = os.path.abspath(os.path.join(dir_path, "..", "..")) configs_dir = os.path.join(project_root, "examples", "configs") @@ -37,6 +40,7 @@ "sft": "examples/configs/sft.yaml", "dpo": "examples/configs/dpo.yaml", "grpo": "examples/configs/grpo_math_1B.yaml", + "vlm_grpo": "examples/configs/vlm_grpo_3B.yaml", } @@ -182,7 +186,7 @@ def test_all_recipe_yamls_accounted_for_in_test_suites( ) -def test_nightly_compute_stays_below_1024_hours(nightly_test_suite, tracker): +def test_nightly_compute_stays_below_1030_hours(nightly_test_suite, tracker): command = f"DRYRUN=1 HF_HOME=... HF_DATASETS_CACHE=... CONTAINER= ACCOUNT= PARTITION= ./tools/launch {' '.join(nightly_test_suite)}" print(f"Running command: {command}") @@ -214,8 +218,8 @@ def test_nightly_compute_stays_below_1024_hours(nightly_test_suite, tracker): f"Last line of output was not as expected: '{last_line}'" ) total_gpu_hours = float(last_line.split(":")[-1].strip()) - assert total_gpu_hours <= 1024, ( - f"Total GPU hours exceeded 1024: {last_line}. We should revisit the test suites to reduce the total GPU hours." + assert total_gpu_hours <= 1030, ( + f"Total GPU hours exceeded 1030: {last_line}. We should revisit the test suites to reduce the total GPU hours." ) tracker.track("total_nightly_gpu_hours", total_gpu_hours) @@ -283,6 +287,8 @@ def test_all_recipes_can_merge_configs_with_base_config( ): from omegaconf import OmegaConf + from nemo_rl.utils.config import load_config + base_yaml = os.path.join(project_root, algo_base_yaml) base_config = OmegaConf.load(base_yaml) # Would result in an error if we couldn't merge our config with the recipe's config @@ -293,7 +299,7 @@ def test_all_recipes_can_merge_configs_with_base_config( # test_all_recipes_start_with_algo_hyphen() continue recipe_yaml_path = os.path.join(recipes_dir, recipe_yaml) - recipe_config = OmegaConf.load(recipe_yaml_path) + recipe_config = load_config(recipe_yaml_path) OmegaConf.set_struct(recipe_config, True) # This will raise a error if the config can't be merged print(f"Merging {recipe_yaml} with {base_yaml}") diff --git a/tests/unit/utils/test_native_checkpoint.py b/tests/unit/utils/test_native_checkpoint.py index 69493da3b3..88003941cb 100755 --- a/tests/unit/utils/test_native_checkpoint.py +++ b/tests/unit/utils/test_native_checkpoint.py @@ -130,17 +130,6 @@ def policy(cluster, tokenizer): policy.worker_group.shutdown() -@pytest.fixture(scope="module", autouse=True) -def skip_tied_weight_check_for_all(): - """Automatically skip tied weight check for all tests in this module.""" - os.environ["NRL_SKIP_TIED_WEIGHT_CHECK"] = "1" - - yield - - # Restore the original value - os.environ.pop("NRL_SKIP_TIED_WEIGHT_CHECK", None) - - def get_dummy_state_dict(state_dict, dummy_dict={}): """Recursively get the dummy state dict by replacing tensors with random ones of the same shape. diff --git a/tests/unit/utils/test_timer.py b/tests/unit/utils/test_timer.py index 56ba315b55..041193b777 100644 --- a/tests/unit/utils/test_timer.py +++ b/tests/unit/utils/test_timer.py @@ -18,7 +18,7 @@ import numpy as np import pytest -from nemo_rl.utils.timer import Timer +from nemo_rl.utils.timer import TimeoutChecker, Timer class TestTimer: @@ -188,3 +188,48 @@ def test_precise_timing(self, mock_perf_counter, timer): # Check the elapsed time assert elapsed == 5.0 assert timer._timers["precise_test"][0] == 5.0 + + +class TestTimeoutChecker: + def test_infinite_timeout(self): + checker = TimeoutChecker(timeout=None) + time.sleep(0.1) + assert checker.check_save() is False + + def test_short_timeout(self): + checker = TimeoutChecker(timeout="00:00:00:01") + time.sleep(1.1) + assert checker.check_save() is True + + def test_double_save_prevented(self): + checker = TimeoutChecker(timeout="00:00:00:01") + time.sleep(1.1) + assert checker.check_save() is True + assert checker.check_save() is False + + def test_fit_last_save_time_enabled(self): + # Create a TimeoutChecker with a 3-second timeout and enable fit_last_save_time logic + checker = TimeoutChecker(timeout="00:00:00:03", fit_last_save_time=True) + checker.start_iterations() + + # Simulate 10 iterations, each taking about 0.1 seconds + # This builds up a stable average iteration time + for _ in range(10): + time.sleep(0.1) + checker.mark_iteration() + + # Wait an additional ~2.0 seconds so that: + # elapsed time + avg iteration time >= timeout (3 seconds) + time.sleep(2.0) + + result = checker.check_save() + # Assert that the checker triggers a save due to timeout + assert result is True + + def test_iteration_tracking(self): + checker = TimeoutChecker() + checker.start_iterations() + time.sleep(0.05) + checker.mark_iteration() + assert len(checker.iteration_times) == 1 + assert checker.iteration_times[0] > 0 diff --git a/tools/code_snapshot.sh b/tools/code_snapshot.sh index 62136a8632..8d7b51f81c 100644 --- a/tools/code_snapshot.sh +++ b/tools/code_snapshot.sh @@ -16,12 +16,14 @@ if [[ ! -e "$PROJECT_ROOT/.git" ]]; then elif [[ $# -lt 1 ]]; then echo2 "[Error]: This script requires one argument: the name of the experiment to be used as the snapshot directory name" echo2 "Usage: bash tools/code_snapshot.sh " + echo2 "Usage: CODE_SNAPSHOT_DIRNAME=code_snapshots_dbg bash tools/code_snapshot.sh " exit 1 fi EXP_NAME=$1 +CODE_SNAPSHOT_DIRNAME=${CODE_SNAPSHOT_DIRNAME:-code_snapshots} -SNAPSHOT_DIR="$PROJECT_ROOT/code_snapshots/${EXP_NAME}" +SNAPSHOT_DIR="$PROJECT_ROOT/${CODE_SNAPSHOT_DIRNAME}/${EXP_NAME}" if [[ ! -d "$SNAPSHOT_DIR" ]]; then echo2 "Creating new code snapshot in $SNAPSHOT_DIR" mkdir -p $SNAPSHOT_DIR @@ -32,9 +34,15 @@ else exit fi -echo2 "Copying git-tracked files..." -rsync -a --files-from=<(git ls-files) ./ $SNAPSHOT_DIR/ +echo2 "Copying git-tracked files and submodules..." +rsync -a --files-from=<( + { + git ls-files + echo .gitmodules + git submodule foreach --recursive --quiet 'git ls-files | sed "s|^|$path/|"' + } +) ./ $SNAPSHOT_DIR/ # Echo the snapshot directory so the caller can use it to `cd` into it -echo ${SNAPSHOT_DIR} \ No newline at end of file +echo ${SNAPSHOT_DIR} diff --git a/tools/launch b/tools/launch index 4c76cee78d..980d0fef01 100755 --- a/tools/launch +++ b/tools/launch @@ -150,6 +150,7 @@ for SCRIPT in $SCRIPTS; do SCRIPT_DIR=\$( cd -- "\$( dirname -- "\${BASH_SOURCE[0]}" )" &> /dev/null && pwd) cd \$SCRIPT_DIR +${EXTRA_ENV:-} \\ HF_HOME=$HF_HOME \\ HF_DATASETS_CACHE=$HF_DATASETS_CACHE \\ COMMAND="apt install -y jq && uv run $rel_script ${RELEASE_ARGS[@]}" \\ diff --git a/tools/model_diagnostics/3.check_hf_model_embeddings_untrained.py b/tools/model_diagnostics/3.check_hf_model_embeddings_untrained.py new file mode 100755 index 0000000000..d3684df45c --- /dev/null +++ b/tools/model_diagnostics/3.check_hf_model_embeddings_untrained.py @@ -0,0 +1,288 @@ +#!/usr/bin/env python3 +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Model Diagnostic: Check HuggingFace Model Embeddings for Untrained Patterns. + +This script loads a HuggingFace model and analyzes the input and output embeddings +to detect patterns that suggest the model may be untrained or improperly initialized. + +uv run --extra mcore 3.check_hf_model_embeddings_untrained.py --model nvidia/Nemotron-H-8B-Base-8K +""" + +import argparse + +import torch +from transformers import AutoModelForCausalLM, AutoTokenizer + + +def format_index_ranges(indices): + """Format a list of indices into range strings like '0-1,3-6'.""" + if not indices: + return "" + + ranges = [] + start = end = indices[0] + + for i in range(1, len(indices)): + if indices[i] == end + 1: + end = indices[i] + else: + ranges.append(str(start) if start == end else f"{start}-{end}") + start = end = indices[i] + + # Add the last range + ranges.append(str(start) if start == end else f"{start}-{end}") + return ",".join(ranges) + + +def get_token_info(tokenizer, idx): + """Get token information for a given index.""" + if not tokenizer: + return "N/A" + try: + return repr(tokenizer.decode([idx])) + except Exception: + return "N/A" + + +def print_problematic_embeddings( + weights, indices, problem_type, metric_values, threshold, tokenizer=None +): + """Print detailed information about each problematic embedding.""" + if not indices: + return + + print(f"\n--- Detailed {problem_type} Embeddings ---") + for idx in indices: + embedding = weights[idx] + metric_val = metric_values[idx].item() + token_info = get_token_info(tokenizer, idx) + + # Get first 2 and last 2 values + first_two = embedding[:2].tolist() + last_two = embedding[-2:].tolist() + + print( + f"Index {idx}: {problem_type} (metric: {metric_val:.2e} > {threshold:.2e})" + ) + print(f" Token: {token_info}") + print( + f" Values: [{first_two[0]:.2e}, {first_two[1]:.2e}, ..., {last_two[0]:.2e}, {last_two[1]:.2e}]" + ) + + +def find_output_embeddings(model): + """Find the output embeddings layer in various model architectures.""" + if hasattr(model, "get_output_embeddings"): + return model.get_output_embeddings() + elif hasattr(model, "lm_head"): + return model.lm_head + elif hasattr(model, "embed_out"): + return model.embed_out + return None + + +def check_embedding_layer( + embeddings, + layer_name, + near_zero_threshold, + identical_threshold, + tokenizer=None, + model=None, +): + """Check an embedding layer for untrained patterns.""" + print(f"\n=== {layer_name} Analysis ===") + + # Check if embeddings are tied (for output embeddings) + tied_info = "" + if layer_name == "Output Embeddings" and model and hasattr(model, "config"): + tied = getattr(model.config, "tie_word_embeddings", False) + tied_info = f" (Tied: {tied})" + print(f"Tied word embeddings: {tied}") + + # Get embedding weights + weights = ( + embeddings.weight.data if hasattr(embeddings, "weight") else embeddings.data + ) + + print(f"Shape: {weights.shape}") + print(f"Dtype: {weights.dtype}") + + # Check for near-zero embeddings + near_zero_mask = torch.abs(weights) < near_zero_threshold + near_zero_rows = near_zero_mask.all(dim=1) + near_zero_indices = torch.where(near_zero_rows)[0].tolist() + + # Check for identical embeddings using standard deviation + row_stds = weights.std(dim=1) + identical_mask = row_stds < identical_threshold + identical_indices = torch.where(identical_mask)[0].tolist() + + # Print detailed problematic embeddings + max_abs_values = torch.abs(weights).max(dim=1)[0] + print_problematic_embeddings( + weights, + near_zero_indices, + "Near-zero", + max_abs_values, + near_zero_threshold, + tokenizer, + ) + print_problematic_embeddings( + weights, + identical_indices, + "Identical", + row_stds, + identical_threshold, + tokenizer, + ) + + # Return summary data instead of printing + num_near_zero = len(near_zero_indices) + num_identical = len(identical_indices) + total_embeddings = weights.shape[0] + + # Flag potential issues + issues = [] + if num_near_zero > 0: + issues.append(f"{num_near_zero} near-zero embeddings") + if num_identical > 0: + issues.append(f"{num_identical} identical embeddings") + + return { + "layer_name": layer_name, + "tied_info": tied_info, + "shape": weights.shape, + "dtype": weights.dtype, + "num_near_zero": num_near_zero, + "num_identical": num_identical, + "total_embeddings": total_embeddings, + "near_zero_indices": near_zero_indices, + "identical_indices": identical_indices, + "near_zero_threshold": near_zero_threshold, + "identical_threshold": identical_threshold, + "mean_abs": torch.abs(weights).mean().item(), + "max_abs": torch.abs(weights).max().item(), + "min_std": row_stds.min().item(), + "max_std": row_stds.max().item(), + "issues": issues, + } + + +def main(): + parser = argparse.ArgumentParser( + description="Check HuggingFace model embeddings for untrained patterns" + ) + parser.add_argument( + "--model", + default="nvidia/Nemotron-H-8B-Base-8K", + help="HuggingFace model name or path", + ) + parser.add_argument( + "--near-zero-threshold", + type=float, + default=1e-10, + help="Threshold for detecting near-zero embeddings (default: 1e-10)", + ) + parser.add_argument( + "--identical-threshold", + type=float, + default=1e-8, + help="Threshold for detecting identical embeddings via std dev (default: 1e-8)", + ) + + args = parser.parse_args() + + print(f"Loading model: {args.model}") + + # Load model and tokenizer + model = AutoModelForCausalLM.from_pretrained( + args.model, torch_dtype="auto", trust_remote_code=True + ) + tokenizer = AutoTokenizer.from_pretrained(args.model, trust_remote_code=True) + + print("Model loaded successfully") + print(f"Model type: {type(model).__name__}") + print(f"Vocabulary size: {len(tokenizer)}") + + # Collect summary data from both embeddings + summaries = [] + + # Check input embeddings + input_embeddings = model.get_input_embeddings() + if input_embeddings is not None: + input_summary = check_embedding_layer( + input_embeddings, + "Input Embeddings", + args.near_zero_threshold, + args.identical_threshold, + tokenizer, + model, + ) + summaries.append(input_summary) + else: + print("\nāš ļø Could not find input embeddings layer") + + # Check output embeddings + output_embeddings = find_output_embeddings(model) + if output_embeddings is not None: + output_summary = check_embedding_layer( + output_embeddings, + "Output Embeddings", + args.near_zero_threshold, + args.identical_threshold, + tokenizer, + model, + ) + summaries.append(output_summary) + else: + print("\nāš ļø Could not find output embeddings layer") + + # Print summaries together + print("\n" + "=" * 80) + print("EMBEDDING SUMMARIES") + print("=" * 80) + + for summary in summaries: + print(f"\n--- {summary['layer_name']} Summary{summary['tied_info']} ---") + print(f"Shape: {summary['shape']}, Dtype: {summary['dtype']}") + + print( + f"Near-zero embeddings (abs < {summary['near_zero_threshold']:.2e}): {summary['num_near_zero']}/{summary['total_embeddings']} ({100 * summary['num_near_zero'] / summary['total_embeddings']:.1f}%)" + ) + if summary["near_zero_indices"]: + print(f" Indices: {format_index_ranges(summary['near_zero_indices'])}") + + print( + f"Identical embeddings (std < {summary['identical_threshold']:.2e}): {summary['num_identical']}/{summary['total_embeddings']} ({100 * summary['num_identical'] / summary['total_embeddings']:.1f}%)" + ) + if summary["identical_indices"]: + print(f" Indices: {format_index_ranges(summary['identical_indices'])}") + + print( + f"Statistics: mean_abs={summary['mean_abs']:.6f}, max_abs={summary['max_abs']:.6f}, std_range=[{summary['min_std']:.6f}, {summary['max_std']:.6f}]" + ) + + if summary["issues"]: + print(f"āš ļø POTENTIAL ISSUES: {', '.join(summary['issues'])}") + else: + print("āœ… No obvious untrained patterns detected") + + print("\n=== Final Summary ===") + print(f"Model: {args.model}") + print("Analysis complete.") + + +if __name__ == "__main__": + main() diff --git a/uv.lock b/uv.lock index 9688b33fb9..336c0853c9 100644 --- a/uv.lock +++ b/uv.lock @@ -2,20 +2,18 @@ version = 1 revision = 2 requires-python = ">=3.12" resolution-markers = [ - "python_full_version < '3.13' and platform_machine == 'aarch64' and sys_platform == 'linux'", - "python_full_version < '3.13' and platform_machine != 'aarch64' and platform_machine != 'arm64' and sys_platform == 'linux'", - "python_full_version < '3.13' and platform_machine == 'arm64' and sys_platform == 'linux'", - "python_full_version >= '3.13' and platform_machine == 'aarch64' and sys_platform == 'linux'", "python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux'", - "python_full_version >= '3.13' and sys_platform == 'darwin'", - "python_full_version < '3.13' and sys_platform == 'darwin'", - "python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'linux'", - "python_full_version < '3.13' and sys_platform != 'darwin' and sys_platform != 'linux'", + "python_full_version < '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version >= '3.13' and sys_platform != 'linux'", + "python_full_version < '3.13' and sys_platform != 'linux'", + "python_full_version >= '3.13' and platform_machine == 'aarch64' and sys_platform == 'linux'", + "python_full_version < '3.13' and platform_machine == 'aarch64' and sys_platform == 'linux'", ] [manifest] members = [ "megatron-core", + "nemo-automodel", "nemo-rl", "nemo-tron", ] @@ -26,16 +24,16 @@ requires-dist = ["torch", "einops", "setuptools", "psutil", "ninja"] [[package]] name = "absl-py" -version = "2.3.0" +version = "2.3.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/03/15/18693af986560a5c3cc0b84a8046b536ffb2cdb536e03cce897f2759e284/absl_py-2.3.0.tar.gz", hash = "sha256:d96fda5c884f1b22178852f30ffa85766d50b99e00775ea626c23304f582fc4f", size = 116400, upload-time = "2025-05-27T09:15:50.143Z" } +sdist = { url = "https://files.pythonhosted.org/packages/10/2a/c93173ffa1b39c1d0395b7e842bbdc62e556ca9d8d3b5572926f3e4ca752/absl_py-2.3.1.tar.gz", hash = "sha256:a97820526f7fbfd2ec1bce83f3f25e3a14840dac0d8e02a0b71cd75db3f77fc9", size = 116588, upload-time = "2025-07-03T09:31:44.05Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/87/04/9d75e1d3bb4ab8ec67ff10919476ccdee06c098bcfcf3a352da5f985171d/absl_py-2.3.0-py3-none-any.whl", hash = "sha256:9824a48b654a306168f63e0d97714665f8490b8d89ec7bf2efc24bf67cf579b3", size = 135657, upload-time = "2025-05-27T09:15:48.742Z" }, + { url = "https://files.pythonhosted.org/packages/8f/aa/ba0014cc4659328dc818a28827be78e6d97312ab0cb98105a770924dc11e/absl_py-2.3.1-py3-none-any.whl", hash = "sha256:eeecf07f0c2a93ace0772c92e596ace6d3d3996c042b2128459aaae2a76de11d", size = 135811, upload-time = "2025-07-03T09:31:42.253Z" }, ] [[package]] name = "accelerate" -version = "1.8.1" +version = "1.10.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "huggingface-hub" }, @@ -46,9 +44,9 @@ dependencies = [ { name = "safetensors" }, { name = "torch" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/bd/c2/b9e33ad13232606dded4c546e654fb06a15f1dbcbd95d81c9f9dd3ccc771/accelerate-1.8.1.tar.gz", hash = "sha256:f60df931671bc4e75077b852990469d4991ce8bd3a58e72375c3c95132034db9", size = 380872, upload-time = "2025-06-20T15:36:14.618Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f7/66/be171836d86dc5b8698b3a9bf4b9eb10cb53369729939f88bf650167588b/accelerate-1.10.0.tar.gz", hash = "sha256:8270568fda9036b5cccdc09703fef47872abccd56eb5f6d53b54ea5fb7581496", size = 392261, upload-time = "2025-08-07T10:54:51.664Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/91/d9/e044c9d42d8ad9afa96533b46ecc9b7aea893d362b3c52bd78fb9fe4d7b3/accelerate-1.8.1-py3-none-any.whl", hash = "sha256:c47b8994498875a2b1286e945bd4d20e476956056c7941d512334f4eb44ff991", size = 365338, upload-time = "2025-06-20T15:36:12.71Z" }, + { url = "https://files.pythonhosted.org/packages/30/dd/0107f0aa179869ee9f47ef5a2686abd5e022fdc82af901d535e52fe91ce1/accelerate-1.10.0-py3-none-any.whl", hash = "sha256:260a72b560e100e839b517a331ec85ed495b3889d12886e79d1913071993c5a3", size = 374718, upload-time = "2025-08-07T10:54:49.988Z" }, ] [[package]] @@ -74,7 +72,7 @@ wheels = [ [[package]] name = "aiohttp" -version = "3.12.13" +version = "3.12.15" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohappyeyeballs" }, @@ -85,42 +83,42 @@ dependencies = [ { name = "propcache" }, { name = "yarl" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/42/6e/ab88e7cb2a4058bed2f7870276454f85a7c56cd6da79349eb314fc7bbcaa/aiohttp-3.12.13.tar.gz", hash = "sha256:47e2da578528264a12e4e3dd8dd72a7289e5f812758fe086473fab037a10fcce", size = 7819160, upload-time = "2025-06-14T15:15:41.354Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/b4/6a/ce40e329788013cd190b1d62bbabb2b6a9673ecb6d836298635b939562ef/aiohttp-3.12.13-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0aa580cf80558557285b49452151b9c69f2fa3ad94c5c9e76e684719a8791b73", size = 700491, upload-time = "2025-06-14T15:14:00.048Z" }, - { url = "https://files.pythonhosted.org/packages/28/d9/7150d5cf9163e05081f1c5c64a0cdf3c32d2f56e2ac95db2a28fe90eca69/aiohttp-3.12.13-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b103a7e414b57e6939cc4dece8e282cfb22043efd0c7298044f6594cf83ab347", size = 475104, upload-time = "2025-06-14T15:14:01.691Z" }, - { url = "https://files.pythonhosted.org/packages/f8/91/d42ba4aed039ce6e449b3e2db694328756c152a79804e64e3da5bc19dffc/aiohttp-3.12.13-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:78f64e748e9e741d2eccff9597d09fb3cd962210e5b5716047cbb646dc8fe06f", size = 467948, upload-time = "2025-06-14T15:14:03.561Z" }, - { url = "https://files.pythonhosted.org/packages/99/3b/06f0a632775946981d7c4e5a865cddb6e8dfdbaed2f56f9ade7bb4a1039b/aiohttp-3.12.13-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29c955989bf4c696d2ededc6b0ccb85a73623ae6e112439398935362bacfaaf6", size = 1714742, upload-time = "2025-06-14T15:14:05.558Z" }, - { url = "https://files.pythonhosted.org/packages/92/a6/2552eebad9ec5e3581a89256276009e6a974dc0793632796af144df8b740/aiohttp-3.12.13-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:d640191016763fab76072c87d8854a19e8e65d7a6fcfcbf017926bdbbb30a7e5", size = 1697393, upload-time = "2025-06-14T15:14:07.194Z" }, - { url = "https://files.pythonhosted.org/packages/d8/9f/bd08fdde114b3fec7a021381b537b21920cdd2aa29ad48c5dffd8ee314f1/aiohttp-3.12.13-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4dc507481266b410dede95dd9f26c8d6f5a14315372cc48a6e43eac652237d9b", size = 1752486, upload-time = "2025-06-14T15:14:08.808Z" }, - { url = "https://files.pythonhosted.org/packages/f7/e1/affdea8723aec5bd0959171b5490dccd9a91fcc505c8c26c9f1dca73474d/aiohttp-3.12.13-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8a94daa873465d518db073bd95d75f14302e0208a08e8c942b2f3f1c07288a75", size = 1798643, upload-time = "2025-06-14T15:14:10.767Z" }, - { url = "https://files.pythonhosted.org/packages/f3/9d/666d856cc3af3a62ae86393baa3074cc1d591a47d89dc3bf16f6eb2c8d32/aiohttp-3.12.13-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f52420cde4ce0bb9425a375d95577fe082cb5721ecb61da3049b55189e4e6", size = 1718082, upload-time = "2025-06-14T15:14:12.38Z" }, - { url = "https://files.pythonhosted.org/packages/f3/ce/3c185293843d17be063dada45efd2712bb6bf6370b37104b4eda908ffdbd/aiohttp-3.12.13-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f7df1f620ec40f1a7fbcb99ea17d7326ea6996715e78f71a1c9a021e31b96b8", size = 1633884, upload-time = "2025-06-14T15:14:14.415Z" }, - { url = "https://files.pythonhosted.org/packages/3a/5b/f3413f4b238113be35dfd6794e65029250d4b93caa0974ca572217745bdb/aiohttp-3.12.13-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3062d4ad53b36e17796dce1c0d6da0ad27a015c321e663657ba1cc7659cfc710", size = 1694943, upload-time = "2025-06-14T15:14:16.48Z" }, - { url = "https://files.pythonhosted.org/packages/82/c8/0e56e8bf12081faca85d14a6929ad5c1263c146149cd66caa7bc12255b6d/aiohttp-3.12.13-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:8605e22d2a86b8e51ffb5253d9045ea73683d92d47c0b1438e11a359bdb94462", size = 1716398, upload-time = "2025-06-14T15:14:18.589Z" }, - { url = "https://files.pythonhosted.org/packages/ea/f3/33192b4761f7f9b2f7f4281365d925d663629cfaea093a64b658b94fc8e1/aiohttp-3.12.13-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:54fbbe6beafc2820de71ece2198458a711e224e116efefa01b7969f3e2b3ddae", size = 1657051, upload-time = "2025-06-14T15:14:20.223Z" }, - { url = "https://files.pythonhosted.org/packages/5e/0b/26ddd91ca8f84c48452431cb4c5dd9523b13bc0c9766bda468e072ac9e29/aiohttp-3.12.13-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:050bd277dfc3768b606fd4eae79dd58ceda67d8b0b3c565656a89ae34525d15e", size = 1736611, upload-time = "2025-06-14T15:14:21.988Z" }, - { url = "https://files.pythonhosted.org/packages/c3/8d/e04569aae853302648e2c138a680a6a2f02e374c5b6711732b29f1e129cc/aiohttp-3.12.13-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:2637a60910b58f50f22379b6797466c3aa6ae28a6ab6404e09175ce4955b4e6a", size = 1764586, upload-time = "2025-06-14T15:14:23.979Z" }, - { url = "https://files.pythonhosted.org/packages/ac/98/c193c1d1198571d988454e4ed75adc21c55af247a9fda08236602921c8c8/aiohttp-3.12.13-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e986067357550d1aaa21cfe9897fa19e680110551518a5a7cf44e6c5638cb8b5", size = 1724197, upload-time = "2025-06-14T15:14:25.692Z" }, - { url = "https://files.pythonhosted.org/packages/e7/9e/07bb8aa11eec762c6b1ff61575eeeb2657df11ab3d3abfa528d95f3e9337/aiohttp-3.12.13-cp312-cp312-win32.whl", hash = "sha256:ac941a80aeea2aaae2875c9500861a3ba356f9ff17b9cb2dbfb5cbf91baaf5bf", size = 421771, upload-time = "2025-06-14T15:14:27.364Z" }, - { url = "https://files.pythonhosted.org/packages/52/66/3ce877e56ec0813069cdc9607cd979575859c597b6fb9b4182c6d5f31886/aiohttp-3.12.13-cp312-cp312-win_amd64.whl", hash = "sha256:671f41e6146a749b6c81cb7fd07f5a8356d46febdaaaf07b0e774ff04830461e", size = 447869, upload-time = "2025-06-14T15:14:29.05Z" }, - { url = "https://files.pythonhosted.org/packages/11/0f/db19abdf2d86aa1deec3c1e0e5ea46a587b97c07a16516b6438428b3a3f8/aiohttp-3.12.13-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d4a18e61f271127465bdb0e8ff36e8f02ac4a32a80d8927aa52371e93cd87938", size = 694910, upload-time = "2025-06-14T15:14:30.604Z" }, - { url = "https://files.pythonhosted.org/packages/d5/81/0ab551e1b5d7f1339e2d6eb482456ccbe9025605b28eed2b1c0203aaaade/aiohttp-3.12.13-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:532542cb48691179455fab429cdb0d558b5e5290b033b87478f2aa6af5d20ace", size = 472566, upload-time = "2025-06-14T15:14:32.275Z" }, - { url = "https://files.pythonhosted.org/packages/34/3f/6b7d336663337672d29b1f82d1f252ec1a040fe2d548f709d3f90fa2218a/aiohttp-3.12.13-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d7eea18b52f23c050ae9db5d01f3d264ab08f09e7356d6f68e3f3ac2de9dfabb", size = 464856, upload-time = "2025-06-14T15:14:34.132Z" }, - { url = "https://files.pythonhosted.org/packages/26/7f/32ca0f170496aa2ab9b812630fac0c2372c531b797e1deb3deb4cea904bd/aiohttp-3.12.13-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad7c8e5c25f2a26842a7c239de3f7b6bfb92304593ef997c04ac49fb703ff4d7", size = 1703683, upload-time = "2025-06-14T15:14:36.034Z" }, - { url = "https://files.pythonhosted.org/packages/ec/53/d5513624b33a811c0abea8461e30a732294112318276ce3dbf047dbd9d8b/aiohttp-3.12.13-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6af355b483e3fe9d7336d84539fef460120c2f6e50e06c658fe2907c69262d6b", size = 1684946, upload-time = "2025-06-14T15:14:38Z" }, - { url = "https://files.pythonhosted.org/packages/37/72/4c237dd127827b0247dc138d3ebd49c2ded6114c6991bbe969058575f25f/aiohttp-3.12.13-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a95cf9f097498f35c88e3609f55bb47b28a5ef67f6888f4390b3d73e2bac6177", size = 1737017, upload-time = "2025-06-14T15:14:39.951Z" }, - { url = "https://files.pythonhosted.org/packages/0d/67/8a7eb3afa01e9d0acc26e1ef847c1a9111f8b42b82955fcd9faeb84edeb4/aiohttp-3.12.13-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8ed8c38a1c584fe99a475a8f60eefc0b682ea413a84c6ce769bb19a7ff1c5ef", size = 1786390, upload-time = "2025-06-14T15:14:42.151Z" }, - { url = "https://files.pythonhosted.org/packages/48/19/0377df97dd0176ad23cd8cad4fd4232cfeadcec6c1b7f036315305c98e3f/aiohttp-3.12.13-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a0b9170d5d800126b5bc89d3053a2363406d6e327afb6afaeda2d19ee8bb103", size = 1708719, upload-time = "2025-06-14T15:14:44.039Z" }, - { url = "https://files.pythonhosted.org/packages/61/97/ade1982a5c642b45f3622255173e40c3eed289c169f89d00eeac29a89906/aiohttp-3.12.13-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:372feeace612ef8eb41f05ae014a92121a512bd5067db8f25101dd88a8db11da", size = 1622424, upload-time = "2025-06-14T15:14:45.945Z" }, - { url = "https://files.pythonhosted.org/packages/99/ab/00ad3eea004e1d07ccc406e44cfe2b8da5acb72f8c66aeeb11a096798868/aiohttp-3.12.13-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a946d3702f7965d81f7af7ea8fb03bb33fe53d311df48a46eeca17e9e0beed2d", size = 1675447, upload-time = "2025-06-14T15:14:47.911Z" }, - { url = "https://files.pythonhosted.org/packages/3f/fe/74e5ce8b2ccaba445fe0087abc201bfd7259431d92ae608f684fcac5d143/aiohttp-3.12.13-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:a0c4725fae86555bbb1d4082129e21de7264f4ab14baf735278c974785cd2041", size = 1707110, upload-time = "2025-06-14T15:14:50.334Z" }, - { url = "https://files.pythonhosted.org/packages/ef/c4/39af17807f694f7a267bd8ab1fbacf16ad66740862192a6c8abac2bff813/aiohttp-3.12.13-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:9b28ea2f708234f0a5c44eb6c7d9eb63a148ce3252ba0140d050b091b6e842d1", size = 1649706, upload-time = "2025-06-14T15:14:52.378Z" }, - { url = "https://files.pythonhosted.org/packages/38/e8/f5a0a5f44f19f171d8477059aa5f28a158d7d57fe1a46c553e231f698435/aiohttp-3.12.13-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:d4f5becd2a5791829f79608c6f3dc745388162376f310eb9c142c985f9441cc1", size = 1725839, upload-time = "2025-06-14T15:14:54.617Z" }, - { url = "https://files.pythonhosted.org/packages/fd/ac/81acc594c7f529ef4419d3866913f628cd4fa9cab17f7bf410a5c3c04c53/aiohttp-3.12.13-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:60f2ce6b944e97649051d5f5cc0f439360690b73909230e107fd45a359d3e911", size = 1759311, upload-time = "2025-06-14T15:14:56.597Z" }, - { url = "https://files.pythonhosted.org/packages/38/0d/aabe636bd25c6ab7b18825e5a97d40024da75152bec39aa6ac8b7a677630/aiohttp-3.12.13-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:69fc1909857401b67bf599c793f2183fbc4804717388b0b888f27f9929aa41f3", size = 1708202, upload-time = "2025-06-14T15:14:58.598Z" }, - { url = "https://files.pythonhosted.org/packages/1f/ab/561ef2d8a223261683fb95a6283ad0d36cb66c87503f3a7dde7afe208bb2/aiohttp-3.12.13-cp313-cp313-win32.whl", hash = "sha256:7d7e68787a2046b0e44ba5587aa723ce05d711e3a3665b6b7545328ac8e3c0dd", size = 420794, upload-time = "2025-06-14T15:15:00.939Z" }, - { url = "https://files.pythonhosted.org/packages/9d/47/b11d0089875a23bff0abd3edb5516bcd454db3fefab8604f5e4b07bd6210/aiohttp-3.12.13-cp313-cp313-win_amd64.whl", hash = "sha256:5a178390ca90419bfd41419a809688c368e63c86bd725e1186dd97f6b89c2706", size = 446735, upload-time = "2025-06-14T15:15:02.858Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/9b/e7/d92a237d8802ca88483906c388f7c201bbe96cd80a165ffd0ac2f6a8d59f/aiohttp-3.12.15.tar.gz", hash = "sha256:4fc61385e9c98d72fcdf47e6dd81833f47b2f77c114c29cd64a361be57a763a2", size = 7823716, upload-time = "2025-07-29T05:52:32.215Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/63/97/77cb2450d9b35f517d6cf506256bf4f5bda3f93a66b4ad64ba7fc917899c/aiohttp-3.12.15-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:802d3868f5776e28f7bf69d349c26fc0efadb81676d0afa88ed00d98a26340b7", size = 702333, upload-time = "2025-07-29T05:50:46.507Z" }, + { url = "https://files.pythonhosted.org/packages/83/6d/0544e6b08b748682c30b9f65640d006e51f90763b41d7c546693bc22900d/aiohttp-3.12.15-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f2800614cd560287be05e33a679638e586a2d7401f4ddf99e304d98878c29444", size = 476948, upload-time = "2025-07-29T05:50:48.067Z" }, + { url = "https://files.pythonhosted.org/packages/3a/1d/c8c40e611e5094330284b1aea8a4b02ca0858f8458614fa35754cab42b9c/aiohttp-3.12.15-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8466151554b593909d30a0a125d638b4e5f3836e5aecde85b66b80ded1cb5b0d", size = 469787, upload-time = "2025-07-29T05:50:49.669Z" }, + { url = "https://files.pythonhosted.org/packages/38/7d/b76438e70319796bfff717f325d97ce2e9310f752a267bfdf5192ac6082b/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e5a495cb1be69dae4b08f35a6c4579c539e9b5706f606632102c0f855bcba7c", size = 1716590, upload-time = "2025-07-29T05:50:51.368Z" }, + { url = "https://files.pythonhosted.org/packages/79/b1/60370d70cdf8b269ee1444b390cbd72ce514f0d1cd1a715821c784d272c9/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6404dfc8cdde35c69aaa489bb3542fb86ef215fc70277c892be8af540e5e21c0", size = 1699241, upload-time = "2025-07-29T05:50:53.628Z" }, + { url = "https://files.pythonhosted.org/packages/a3/2b/4968a7b8792437ebc12186db31523f541943e99bda8f30335c482bea6879/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3ead1c00f8521a5c9070fcb88f02967b1d8a0544e6d85c253f6968b785e1a2ab", size = 1754335, upload-time = "2025-07-29T05:50:55.394Z" }, + { url = "https://files.pythonhosted.org/packages/fb/c1/49524ed553f9a0bec1a11fac09e790f49ff669bcd14164f9fab608831c4d/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6990ef617f14450bc6b34941dba4f12d5613cbf4e33805932f853fbd1cf18bfb", size = 1800491, upload-time = "2025-07-29T05:50:57.202Z" }, + { url = "https://files.pythonhosted.org/packages/de/5e/3bf5acea47a96a28c121b167f5ef659cf71208b19e52a88cdfa5c37f1fcc/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd736ed420f4db2b8148b52b46b88ed038d0354255f9a73196b7bbce3ea97545", size = 1719929, upload-time = "2025-07-29T05:50:59.192Z" }, + { url = "https://files.pythonhosted.org/packages/39/94/8ae30b806835bcd1cba799ba35347dee6961a11bd507db634516210e91d8/aiohttp-3.12.15-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c5092ce14361a73086b90c6efb3948ffa5be2f5b6fbcf52e8d8c8b8848bb97c", size = 1635733, upload-time = "2025-07-29T05:51:01.394Z" }, + { url = "https://files.pythonhosted.org/packages/7a/46/06cdef71dd03acd9da7f51ab3a9107318aee12ad38d273f654e4f981583a/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:aaa2234bb60c4dbf82893e934d8ee8dea30446f0647e024074237a56a08c01bd", size = 1696790, upload-time = "2025-07-29T05:51:03.657Z" }, + { url = "https://files.pythonhosted.org/packages/02/90/6b4cfaaf92ed98d0ec4d173e78b99b4b1a7551250be8937d9d67ecb356b4/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:6d86a2fbdd14192e2f234a92d3b494dd4457e683ba07e5905a0b3ee25389ac9f", size = 1718245, upload-time = "2025-07-29T05:51:05.911Z" }, + { url = "https://files.pythonhosted.org/packages/2e/e6/2593751670fa06f080a846f37f112cbe6f873ba510d070136a6ed46117c6/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a041e7e2612041a6ddf1c6a33b883be6a421247c7afd47e885969ee4cc58bd8d", size = 1658899, upload-time = "2025-07-29T05:51:07.753Z" }, + { url = "https://files.pythonhosted.org/packages/8f/28/c15bacbdb8b8eb5bf39b10680d129ea7410b859e379b03190f02fa104ffd/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5015082477abeafad7203757ae44299a610e89ee82a1503e3d4184e6bafdd519", size = 1738459, upload-time = "2025-07-29T05:51:09.56Z" }, + { url = "https://files.pythonhosted.org/packages/00/de/c269cbc4faa01fb10f143b1670633a8ddd5b2e1ffd0548f7aa49cb5c70e2/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:56822ff5ddfd1b745534e658faba944012346184fbfe732e0d6134b744516eea", size = 1766434, upload-time = "2025-07-29T05:51:11.423Z" }, + { url = "https://files.pythonhosted.org/packages/52/b0/4ff3abd81aa7d929b27d2e1403722a65fc87b763e3a97b3a2a494bfc63bc/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b2acbbfff69019d9014508c4ba0401822e8bae5a5fdc3b6814285b71231b60f3", size = 1726045, upload-time = "2025-07-29T05:51:13.689Z" }, + { url = "https://files.pythonhosted.org/packages/71/16/949225a6a2dd6efcbd855fbd90cf476052e648fb011aa538e3b15b89a57a/aiohttp-3.12.15-cp312-cp312-win32.whl", hash = "sha256:d849b0901b50f2185874b9a232f38e26b9b3d4810095a7572eacea939132d4e1", size = 423591, upload-time = "2025-07-29T05:51:15.452Z" }, + { url = "https://files.pythonhosted.org/packages/2b/d8/fa65d2a349fe938b76d309db1a56a75c4fb8cc7b17a398b698488a939903/aiohttp-3.12.15-cp312-cp312-win_amd64.whl", hash = "sha256:b390ef5f62bb508a9d67cb3bba9b8356e23b3996da7062f1a57ce1a79d2b3d34", size = 450266, upload-time = "2025-07-29T05:51:17.239Z" }, + { url = "https://files.pythonhosted.org/packages/f2/33/918091abcf102e39d15aba2476ad9e7bd35ddb190dcdd43a854000d3da0d/aiohttp-3.12.15-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:9f922ffd05034d439dde1c77a20461cf4a1b0831e6caa26151fe7aa8aaebc315", size = 696741, upload-time = "2025-07-29T05:51:19.021Z" }, + { url = "https://files.pythonhosted.org/packages/b5/2a/7495a81e39a998e400f3ecdd44a62107254803d1681d9189be5c2e4530cd/aiohttp-3.12.15-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2ee8a8ac39ce45f3e55663891d4b1d15598c157b4d494a4613e704c8b43112cd", size = 474407, upload-time = "2025-07-29T05:51:21.165Z" }, + { url = "https://files.pythonhosted.org/packages/49/fc/a9576ab4be2dcbd0f73ee8675d16c707cfc12d5ee80ccf4015ba543480c9/aiohttp-3.12.15-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3eae49032c29d356b94eee45a3f39fdf4b0814b397638c2f718e96cfadf4c4e4", size = 466703, upload-time = "2025-07-29T05:51:22.948Z" }, + { url = "https://files.pythonhosted.org/packages/09/2f/d4bcc8448cf536b2b54eed48f19682031ad182faa3a3fee54ebe5b156387/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b97752ff12cc12f46a9b20327104448042fce5c33a624f88c18f66f9368091c7", size = 1705532, upload-time = "2025-07-29T05:51:25.211Z" }, + { url = "https://files.pythonhosted.org/packages/f1/f3/59406396083f8b489261e3c011aa8aee9df360a96ac8fa5c2e7e1b8f0466/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:894261472691d6fe76ebb7fcf2e5870a2ac284c7406ddc95823c8598a1390f0d", size = 1686794, upload-time = "2025-07-29T05:51:27.145Z" }, + { url = "https://files.pythonhosted.org/packages/dc/71/164d194993a8d114ee5656c3b7ae9c12ceee7040d076bf7b32fb98a8c5c6/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5fa5d9eb82ce98959fc1031c28198b431b4d9396894f385cb63f1e2f3f20ca6b", size = 1738865, upload-time = "2025-07-29T05:51:29.366Z" }, + { url = "https://files.pythonhosted.org/packages/1c/00/d198461b699188a93ead39cb458554d9f0f69879b95078dce416d3209b54/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0fa751efb11a541f57db59c1dd821bec09031e01452b2b6217319b3a1f34f3d", size = 1788238, upload-time = "2025-07-29T05:51:31.285Z" }, + { url = "https://files.pythonhosted.org/packages/85/b8/9e7175e1fa0ac8e56baa83bf3c214823ce250d0028955dfb23f43d5e61fd/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5346b93e62ab51ee2a9d68e8f73c7cf96ffb73568a23e683f931e52450e4148d", size = 1710566, upload-time = "2025-07-29T05:51:33.219Z" }, + { url = "https://files.pythonhosted.org/packages/59/e4/16a8eac9df39b48ae102ec030fa9f726d3570732e46ba0c592aeeb507b93/aiohttp-3.12.15-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:049ec0360f939cd164ecbfd2873eaa432613d5e77d6b04535e3d1fbae5a9e645", size = 1624270, upload-time = "2025-07-29T05:51:35.195Z" }, + { url = "https://files.pythonhosted.org/packages/1f/f8/cd84dee7b6ace0740908fd0af170f9fab50c2a41ccbc3806aabcb1050141/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b52dcf013b57464b6d1e51b627adfd69a8053e84b7103a7cd49c030f9ca44461", size = 1677294, upload-time = "2025-07-29T05:51:37.215Z" }, + { url = "https://files.pythonhosted.org/packages/ce/42/d0f1f85e50d401eccd12bf85c46ba84f947a84839c8a1c2c5f6e8ab1eb50/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:9b2af240143dd2765e0fb661fd0361a1b469cab235039ea57663cda087250ea9", size = 1708958, upload-time = "2025-07-29T05:51:39.328Z" }, + { url = "https://files.pythonhosted.org/packages/d5/6b/f6fa6c5790fb602538483aa5a1b86fcbad66244997e5230d88f9412ef24c/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ac77f709a2cde2cc71257ab2d8c74dd157c67a0558a0d2799d5d571b4c63d44d", size = 1651553, upload-time = "2025-07-29T05:51:41.356Z" }, + { url = "https://files.pythonhosted.org/packages/04/36/a6d36ad545fa12e61d11d1932eef273928b0495e6a576eb2af04297fdd3c/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:47f6b962246f0a774fbd3b6b7be25d59b06fdb2f164cf2513097998fc6a29693", size = 1727688, upload-time = "2025-07-29T05:51:43.452Z" }, + { url = "https://files.pythonhosted.org/packages/aa/c8/f195e5e06608a97a4e52c5d41c7927301bf757a8e8bb5bbf8cef6c314961/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:760fb7db442f284996e39cf9915a94492e1896baac44f06ae551974907922b64", size = 1761157, upload-time = "2025-07-29T05:51:45.643Z" }, + { url = "https://files.pythonhosted.org/packages/05/6a/ea199e61b67f25ba688d3ce93f63b49b0a4e3b3d380f03971b4646412fc6/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ad702e57dc385cae679c39d318def49aef754455f237499d5b99bea4ef582e51", size = 1710050, upload-time = "2025-07-29T05:51:48.203Z" }, + { url = "https://files.pythonhosted.org/packages/b4/2e/ffeb7f6256b33635c29dbed29a22a723ff2dd7401fff42ea60cf2060abfb/aiohttp-3.12.15-cp313-cp313-win32.whl", hash = "sha256:f813c3e9032331024de2eb2e32a88d86afb69291fbc37a3a3ae81cc9917fb3d0", size = 422647, upload-time = "2025-07-29T05:51:50.718Z" }, + { url = "https://files.pythonhosted.org/packages/1b/8e/78ee35774201f38d5e1ba079c9958f7629b1fd079459aea9467441dbfbf5/aiohttp-3.12.15-cp313-cp313-win_amd64.whl", hash = "sha256:1a649001580bdb37c6fdb1bebbd7e3bc688e8ec2b5c6f52edbb664662b17dc84", size = 449067, upload-time = "2025-07-29T05:51:52.549Z" }, ] [[package]] @@ -137,14 +135,15 @@ wheels = [ [[package]] name = "aiosignal" -version = "1.3.2" +version = "1.4.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "frozenlist" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ba/b5/6d55e80f6d8a08ce22b982eafa278d823b541c925f11ee774b0b9c43473d/aiosignal-1.3.2.tar.gz", hash = "sha256:a8c255c66fafb1e499c9351d0bf32ff2d8a0321595ebac3b93713656d2436f54", size = 19424, upload-time = "2024-12-13T17:10:40.86Z" } +sdist = { url = "https://files.pythonhosted.org/packages/61/62/06741b579156360248d1ec624842ad0edf697050bbaf7c3e46394e106ad1/aiosignal-1.4.0.tar.gz", hash = "sha256:f47eecd9468083c2029cc99945502cb7708b082c232f9aca65da147157b251c7", size = 25007, upload-time = "2025-07-03T22:54:43.528Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ec/6a/bc7e17a3e87a2985d3e8f4da4cd0f481060eb78fb08596c42be62c90a4d9/aiosignal-1.3.2-py2.py3-none-any.whl", hash = "sha256:45cde58e409a301715980c2b01d0c28bdde3770d8290b5eb2173759d9acb31a5", size = 7597, upload-time = "2024-12-13T17:10:38.469Z" }, + { url = "https://files.pythonhosted.org/packages/fb/76/641ae371508676492379f16e2fa48f4e2c11741bd63c48be4b12a6b09cba/aiosignal-1.4.0-py3-none-any.whl", hash = "sha256:053243f8b92b990551949e63930a839ff0cf0b0ebbe0597b0f3fb19e1a0fe82e", size = 7490, upload-time = "2025-07-03T22:54:42.156Z" }, ] [[package]] @@ -196,16 +195,16 @@ sdist = { url = "https://files.pythonhosted.org/packages/3e/38/7859ff46355f76f8d [[package]] name = "anyio" -version = "4.9.0" +version = "4.10.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "idna" }, { name = "sniffio" }, { name = "typing-extensions", marker = "python_full_version < '3.13'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/95/7d/4c1bd541d4dffa1b52bd83fb8527089e097a106fc90b467a7313b105f840/anyio-4.9.0.tar.gz", hash = "sha256:673c0c244e15788651a4ff38710fea9675823028a6f08a5eda409e0c9840a028", size = 190949, upload-time = "2025-03-17T00:02:54.77Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f1/b4/636b3b65173d3ce9a38ef5f0522789614e590dab6a8d505340a4efe4c567/anyio-4.10.0.tar.gz", hash = "sha256:3f3fae35c96039744587aa5b8371e7e8e603c0702999535961dd336026973ba6", size = 213252, upload-time = "2025-08-04T08:54:26.451Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a1/ee/48ca1a7c89ffec8b6a0c5d02b89c305671d5ffd8d3c94acf8b8c408575bb/anyio-4.9.0-py3-none-any.whl", hash = "sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c", size = 100916, upload-time = "2025-03-17T00:02:52.713Z" }, + { url = "https://files.pythonhosted.org/packages/6f/12/e5e0282d673bb9746bacfb6e2dba8719989d3660cdb2ea79aee9a9651afb/anyio-4.10.0-py3-none-any.whl", hash = "sha256:60e474ac86736bbfd6f210f7a61218939c318f43f9972497381f1c5e930ed3d1", size = 107213, upload-time = "2025-08-04T08:54:24.882Z" }, ] [[package]] @@ -219,11 +218,11 @@ wheels = [ [[package]] name = "astroid" -version = "3.3.10" +version = "3.3.11" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/00/c2/9b2de9ed027f9fe5734a6c0c0a601289d796b3caaf1e372e23fa88a73047/astroid-3.3.10.tar.gz", hash = "sha256:c332157953060c6deb9caa57303ae0d20b0fbdb2e59b4a4f2a6ba49d0a7961ce", size = 398941, upload-time = "2025-05-10T13:33:10.405Z" } +sdist = { url = "https://files.pythonhosted.org/packages/18/74/dfb75f9ccd592bbedb175d4a32fc643cf569d7c218508bfbd6ea7ef9c091/astroid-3.3.11.tar.gz", hash = "sha256:1e5a5011af2920c7c67a53f65d536d65bfa7116feeaf2354d8b94f29573bb0ce", size = 400439, upload-time = "2025-07-13T18:04:23.177Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/15/58/5260205b9968c20b6457ed82f48f9e3d6edf2f1f95103161798b73aeccf0/astroid-3.3.10-py3-none-any.whl", hash = "sha256:104fb9cb9b27ea95e847a94c003be03a9e039334a8ebca5ee27dafaf5c5711eb", size = 275388, upload-time = "2025-05-10T13:33:08.391Z" }, + { url = "https://files.pythonhosted.org/packages/af/0f/3b8fdc946b4d9cc8cc1e8af42c4e409468c84441b933d037e101b3d72d86/astroid-3.3.11-py3-none-any.whl", hash = "sha256:54c760ae8322ece1abd213057c4b5bba7c49818853fc901ef09719a60dbf9dec", size = 275612, upload-time = "2025-07-13T18:04:21.07Z" }, ] [[package]] @@ -235,6 +234,32 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/77/06/bb80f5f86020c4551da315d78b3ab75e8228f89f0162f2c3a819e407941a/attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3", size = 63815, upload-time = "2025-03-13T11:10:21.14Z" }, ] +[[package]] +name = "av" +version = "15.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/17/89/940a509ee7e9449f0c877fa984b37b7cc485546035cc67bbc353f2ac20f3/av-15.0.0.tar.gz", hash = "sha256:871c1a9becddf00b60b1294dc0bff9ff193ac31286aeec1a34039bd27e650183", size = 3833128, upload-time = "2025-07-03T16:23:48.455Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/89/81/c5d009ea9c01a513b7af6aac2ac49c0f2f7193345071cd6dd4d91bef3ab9/av-15.0.0-cp312-cp312-macosx_13_0_arm64.whl", hash = "sha256:84e2ede9459e64e768f4bc56d9df65da9e94b704ee3eccfe2e5b1da1da754313", size = 21782026, upload-time = "2025-07-03T16:22:18.41Z" }, + { url = "https://files.pythonhosted.org/packages/16/8a/ffe9fcac35a07efc6aa0d765015efa499d88823c01499f318760460f8088/av-15.0.0-cp312-cp312-macosx_13_0_x86_64.whl", hash = "sha256:9473ed92d6942c5a449a2c79d49f3425eb0272499d1a3559b32c1181ff736a08", size = 26974939, upload-time = "2025-07-03T16:22:21.493Z" }, + { url = "https://files.pythonhosted.org/packages/a0/e7/0816e52134dc2d0259bb1aaad78573eacaf2bebc1a643de34e3384b520d6/av-15.0.0-cp312-cp312-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:56a53fe4e09bebd99355eaa0ce221b681eaf205bdda114f5e17fb79f3c3746ad", size = 34573486, upload-time = "2025-07-03T16:22:24.684Z" }, + { url = "https://files.pythonhosted.org/packages/a3/f4/07cc05712e9824a4bb68beea44eb5a7369dee3f00fa258879190004b7fc5/av-15.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:247dd9a99d7ed3577b8c1e9977e811f423b04504ff36c9dcd7a4de3e6e5fe5ad", size = 38418908, upload-time = "2025-07-03T16:22:27.799Z" }, + { url = "https://files.pythonhosted.org/packages/19/48/7f3a21a41e291f8c5b8a98f95cfef308ce1b024a634413ce910c270efd7d/av-15.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:fc50a7d5f60109221ccf44f8fa4c56ce73f22948b7f19b1717fcc58f7fbc383e", size = 40010257, upload-time = "2025-07-03T16:22:31.15Z" }, + { url = "https://files.pythonhosted.org/packages/6d/c9/ced392e82d39084544d2d0c05decb36446028928eddf0d40ec3d8fe6c050/av-15.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:77deaec8943abfebd4e262924f2f452d6594cf0bc67d8d98aac0462b476e4182", size = 40381801, upload-time = "2025-07-03T16:22:34.254Z" }, + { url = "https://files.pythonhosted.org/packages/d2/73/a23ad111200e27f5773e94b0b6f9e2ea492a72ded7f4787a358d9d504a8b/av-15.0.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:601d9b0740e47a17ec96ba2a537ebfd4d6edc859ae6f298475c06caa51f0a019", size = 37219417, upload-time = "2025-07-03T16:22:37.497Z" }, + { url = "https://files.pythonhosted.org/packages/45/0c/2ac20143b74e3792ede40bfd397ce72fa4e76a03999c2fd0aee3997b6971/av-15.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e021f67e0db7256c9f5d3d6a2a4237a4a4a804b131b33e7f2778981070519b20", size = 41242077, upload-time = "2025-07-03T16:22:40.86Z" }, + { url = "https://files.pythonhosted.org/packages/bd/30/40452705dffbfef0f5505d36218970dfeff0a86048689910219c8717b310/av-15.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:383f1b57520d790069d85fc75f43cfa32fca07f5fb3fb842be37bd596638602c", size = 31357617, upload-time = "2025-07-03T16:22:43.934Z" }, + { url = "https://files.pythonhosted.org/packages/a6/27/c2e248498ce78dd504b0b1818ce88e71e30a7e26c348bdf5d6467d7b06f7/av-15.0.0-cp313-cp313-macosx_13_0_arm64.whl", hash = "sha256:0701c116f32bd9478023f610722f6371d15ca0c068ff228d355f54a7cf23d9cb", size = 21746400, upload-time = "2025-07-03T16:22:46.604Z" }, + { url = "https://files.pythonhosted.org/packages/1d/d8/11f8452f19f4ddc189e978b215420131db40e3919135c14a0d13520f7c94/av-15.0.0-cp313-cp313-macosx_13_0_x86_64.whl", hash = "sha256:57fb6232494ec575b8e78e5a9ef9b811d78f8d67324476ec8430ca3146751124", size = 26939576, upload-time = "2025-07-03T16:22:49.255Z" }, + { url = "https://files.pythonhosted.org/packages/00/1c/b109fd41487d91b8843f9e199b65e89ca533a612ec788b11ed0ba9812ea3/av-15.0.0-cp313-cp313-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:801a3e0afd5c36df70d012d083bfca67ab22d0ebd2c860c0d9432ac875bc0ad6", size = 34284344, upload-time = "2025-07-03T16:22:52.373Z" }, + { url = "https://files.pythonhosted.org/packages/99/71/aee35fa182d0a41227fbd3f4250fd94c54acdd2995025ee59dd948bba930/av-15.0.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:d5e97791b96741b344bf6dbea4fb14481c117b1f7fe8113721e8d80e26cbb388", size = 38130346, upload-time = "2025-07-03T16:22:56.755Z" }, + { url = "https://files.pythonhosted.org/packages/b7/c4/2d9bbc9c42a804c99bc571eeacb2fe1582fe9cfdb726616876cada937d6a/av-15.0.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:acb4e4aa6bb394d3a9e60feb4cb7a856fc7bac01f3c99019b1d0f11c898c682c", size = 39728857, upload-time = "2025-07-03T16:23:00.392Z" }, + { url = "https://files.pythonhosted.org/packages/7c/d6/a5746e9fb4fdf326e9897abd7538413210e66f35ad4793fe30f87859249d/av-15.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:02d2d80bdbe184f1f3f49b3f5eae7f0ff7cba0a62ab3b18be0505715e586ad29", size = 40109012, upload-time = "2025-07-03T16:23:04.1Z" }, + { url = "https://files.pythonhosted.org/packages/77/1f/da89798231ad0feacfaaea4efec4f1779060226986f97498eabe2c7c54a8/av-15.0.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:603f3ae751f6678df5d8b949f92c6f8257064bba8b3e8db606a24c29d31b4e25", size = 36929211, upload-time = "2025-07-03T16:23:07.694Z" }, + { url = "https://files.pythonhosted.org/packages/d5/4c/2bcabe65a1c19e552f03540f16155a0d02cb9b7a90d31242ab3e0c7ea0d8/av-15.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:682686a9ea2745e63c8878641ec26b1787b9210533f3e945a6e07e24ab788c2e", size = 40967172, upload-time = "2025-07-03T16:23:13.488Z" }, + { url = "https://files.pythonhosted.org/packages/c9/f0/fe14adaa670ab7a3f709805a8494fd0a2eeb6a5b18b8c59dc6014639a5b1/av-15.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:5758231163b5486dfbf664036be010b7f5ebb24564aaeb62577464be5ea996e0", size = 31332650, upload-time = "2025-07-03T16:23:16.558Z" }, +] + [[package]] name = "babel" version = "2.17.0" @@ -244,6 +269,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b7/b8/3fe70c75fe32afc4bb507f75563d39bc5642255d1d94f1f23604725780bf/babel-2.17.0-py3-none-any.whl", hash = "sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2", size = 10182537, upload-time = "2025-02-01T15:17:37.39Z" }, ] +[[package]] +name = "backoff" +version = "2.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/47/d7/5bbeb12c44d7c4f2fb5b56abce497eb5ed9f34d85701de869acedd602619/backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba", size = 17001, upload-time = "2022-10-05T19:19:32.061Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/df/73/b6e24bd22e6720ca8ee9a85a0c4a2971af8497d8f3193fa05390cbd46e09/backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8", size = 15148, upload-time = "2022-10-05T19:19:30.546Z" }, +] + [[package]] name = "bcrypt" version = "4.3.0" @@ -307,6 +341,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/50/cd/30110dc0ffcf3b131156077b90e9f60ed75711223f306da4db08eff8403b/beautifulsoup4-4.13.4-py3-none-any.whl", hash = "sha256:9bbbb14bfde9d79f38b8cd5f8c7c85f4b8f2523190ebed90e950a8dea4cb1c4b", size = 187285, upload-time = "2025-04-15T17:05:12.221Z" }, ] +[[package]] +name = "bitsandbytes" +version = "0.45.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy", marker = "platform_machine != 'aarch64' or sys_platform != 'linux'" }, + { name = "torch", marker = "platform_machine != 'aarch64' or sys_platform != 'linux'" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/b7/cb5ce4d1a382cf53c19ef06c5fc29e85f5e129b4da6527dd207d90a5b8ad/bitsandbytes-0.45.5-py3-none-manylinux_2_24_x86_64.whl", hash = "sha256:a5453f30cc6aab6ccaac364e6bf51a7808d3da5f71763dffeb6d9694c59136e4", size = 76059261, upload-time = "2025-04-07T13:32:52.573Z" }, + { url = "https://files.pythonhosted.org/packages/a6/4c/77b535e025ce780d2ada8271c1e481fb7337c1df2588a52fe1c9bd87d2e8/bitsandbytes-0.45.5-py3-none-win_amd64.whl", hash = "sha256:ed1c61b91d989d6a33fd05737d6edbf5086d8ebc89235ee632c7a19144085da2", size = 75430204, upload-time = "2025-04-07T13:32:57.553Z" }, +] + [[package]] name = "blake3" version = "1.0.5" @@ -402,36 +449,56 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/9e/96/d32b941a501ab566a16358d68b6eb4e4acc373fab3c3c4d7d9e649f7b4bb/catalogue-2.0.10-py3-none-any.whl", hash = "sha256:58c2de0020aa90f4a2da7dfad161bf7b3b054c86a5f09fcedc0b2b740c109a9f", size = 17325, upload-time = "2023-09-25T06:29:23.337Z" }, ] +[[package]] +name = "causal-conv1d" +version = "1.5.0.post8" +source = { git = "https://github.com/Dao-AILab/causal-conv1d?tag=v1.5.0.post8#82867a9d2e6907cc0f637ac6aff318f696838548" } +dependencies = [ + { name = "ninja" }, + { name = "packaging" }, + { name = "torch" }, +] + [[package]] name = "cbor2" -version = "5.6.5" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e4/aa/ba55b47d51d27911981a18743b4d3cebfabccbb0598c09801b734cec4184/cbor2-5.6.5.tar.gz", hash = "sha256:b682820677ee1dbba45f7da11898d2720f92e06be36acec290867d5ebf3d7e09", size = 100886, upload-time = "2024-10-09T12:26:24.106Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/57/af/84ced14c541451696825b7b8ccbb7668f688372ad8ee74aaca4311e79672/cbor2-5.6.5-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:e25c2aebc9db99af7190e2261168cdde8ed3d639ca06868e4f477cf3a228a8e9", size = 67553, upload-time = "2024-10-09T12:25:45.767Z" }, - { url = "https://files.pythonhosted.org/packages/f2/d6/f63a840c68fed4de67d5441947af2dc695152cc488bb0e57312832fb923a/cbor2-5.6.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fde21ac1cf29336a31615a2c469a9cb03cf0add3ae480672d4d38cda467d07fc", size = 67569, upload-time = "2024-10-09T12:25:46.665Z" }, - { url = "https://files.pythonhosted.org/packages/77/ac/5fb79db6e882ec29680f4a974d35c098020a1b4709cad077667a8c3f4676/cbor2-5.6.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a8947c102cac79d049eadbd5e2ffb8189952890df7cbc3ee262bbc2f95b011a9", size = 276610, upload-time = "2024-10-09T12:25:48.14Z" }, - { url = "https://files.pythonhosted.org/packages/cf/cb/70751377d94112001d46c311b5c40b45f34863dfa78a6bc71b71f40c8c7f/cbor2-5.6.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38886c41bebcd7dca57739439455bce759f1e4c551b511f618b8e9c1295b431b", size = 270004, upload-time = "2024-10-09T12:25:49.769Z" }, - { url = "https://files.pythonhosted.org/packages/f1/90/08800367e920aef31b93bd7b0cd6fadcb3a3f2243f4ed77a0d1c76f22b99/cbor2-5.6.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ae2b49226224e92851c333b91d83292ec62eba53a19c68a79890ce35f1230d70", size = 264913, upload-time = "2024-10-09T12:25:50.92Z" }, - { url = "https://files.pythonhosted.org/packages/a8/9c/76b11a5ea7548bccb0dfef3e8fb3ede48bfeb39348f0c217519e0c40d33a/cbor2-5.6.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f2764804ffb6553283fc4afb10a280715905a4cea4d6dc7c90d3e89c4a93bc8d", size = 266751, upload-time = "2024-10-09T12:25:52.777Z" }, - { url = "https://files.pythonhosted.org/packages/10/18/3866693a87c90cb12f7942e791d0f03a40ba44887dde7b7fc85319647efe/cbor2-5.6.5-cp312-cp312-win_amd64.whl", hash = "sha256:a3ac50485cf67dfaab170a3e7b527630e93cb0a6af8cdaa403054215dff93adf", size = 66739, upload-time = "2024-10-09T12:25:54.606Z" }, - { url = "https://files.pythonhosted.org/packages/2b/69/77e93caae71d1baee927c9762e702c464715d88073133052c74ecc9d37d4/cbor2-5.6.5-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f0d0a9c5aabd48ecb17acf56004a7542a0b8d8212be52f3102b8218284bd881e", size = 67647, upload-time = "2024-10-09T12:25:55.637Z" }, - { url = "https://files.pythonhosted.org/packages/84/83/cb941d4fd10e4696b2c0f6fb2e3056d9a296e5765b2000a69e29a507f819/cbor2-5.6.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:61ceb77e6aa25c11c814d4fe8ec9e3bac0094a1f5bd8a2a8c95694596ea01e08", size = 67657, upload-time = "2024-10-09T12:25:56.528Z" }, - { url = "https://files.pythonhosted.org/packages/5c/3f/e16a1e29994483c751b714cdf61d2956290b0b30e94690fa714a9f155c5c/cbor2-5.6.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97a7e409b864fecf68b2ace8978eb5df1738799a333ec3ea2b9597bfcdd6d7d2", size = 275863, upload-time = "2024-10-09T12:25:57.462Z" }, - { url = "https://files.pythonhosted.org/packages/64/04/f64bda3eea649fe6644c59f13d0e1f4666d975ce305cadf13835233b2a26/cbor2-5.6.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f6d69f38f7d788b04c09ef2b06747536624b452b3c8b371ab78ad43b0296fab", size = 269131, upload-time = "2024-10-09T12:25:59.635Z" }, - { url = "https://files.pythonhosted.org/packages/f4/8d/0d5ad3467f70578b032b3f52eb0f01f0327d5ae6b1f9e7d4d4e01a73aa95/cbor2-5.6.5-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f91e6d74fa6917df31f8757fdd0e154203b0dd0609ec53eb957016a2b474896a", size = 264728, upload-time = "2024-10-09T12:26:01.407Z" }, - { url = "https://files.pythonhosted.org/packages/77/cb/9b4f7890325eaa374c21fcccfee61a099ccb9ea0bc0f606acf7495f9568c/cbor2-5.6.5-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5ce13a27ef8fddf643fc17a753fe34aa72b251d03c23da6a560c005dc171085b", size = 266314, upload-time = "2024-10-09T12:26:02.451Z" }, - { url = "https://files.pythonhosted.org/packages/a8/cd/793dc041395609f5dd1edfdf0aecde504dc0fd35ed67eb3b2db79fb8ef4d/cbor2-5.6.5-cp313-cp313-win_amd64.whl", hash = "sha256:54c72a3207bb2d4480c2c39dad12d7971ce0853a99e3f9b8d559ce6eac84f66f", size = 66792, upload-time = "2024-10-09T12:26:03.615Z" }, - { url = "https://files.pythonhosted.org/packages/9b/ef/1c4698cac96d792005ef0611832f38eaee477c275ab4b02cbfc4daba7ad3/cbor2-5.6.5-py3-none-any.whl", hash = "sha256:3038523b8fc7de312bb9cdcbbbd599987e64307c4db357cd2030c472a6c7d468", size = 23752, upload-time = "2024-10-09T12:26:23.167Z" }, +version = "5.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3a/89/01df16cdc9c60c07956756c90fe92c684021003079e358a78e213bce45a2/cbor2-5.7.0.tar.gz", hash = "sha256:3f6d843f4db4d0ec501c46453c22a4fbebb1abfb5b740e1bcab34c615cd7406b", size = 102374, upload-time = "2025-08-14T08:59:47.294Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b1/b1/d54c41b1bc71b8dea0bad3409d2a497df35f7b5ae5db70c1cc9ebc8d556d/cbor2-5.7.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7ad36f0537b75c1aa2c7a462cbdbeec5e8ba02802ea985e0b9fe5deee3b946f4", size = 69020, upload-time = "2025-08-14T08:59:02.276Z" }, + { url = "https://files.pythonhosted.org/packages/f4/e0/45368d5d78b520caaa9ca5a09f55365bc9933d43bce978a528922654ca9f/cbor2-5.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5fc9b335cf28e63d9eed4ae03d1e8f90f1a6b287cabc8d29bfddf73fa70643e9", size = 68950, upload-time = "2025-08-14T08:59:03.882Z" }, + { url = "https://files.pythonhosted.org/packages/1e/6a/9aed5b716407c1d48425ba55c6022a01a9abdbf58a691f50416461fa371d/cbor2-5.7.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:16bea83598a1eeedbd50c2e9fdf3685bae78ca9d9ec8cd8010777db14a315578", size = 285685, upload-time = "2025-08-14T08:59:05.165Z" }, + { url = "https://files.pythonhosted.org/packages/a8/6e/3499eaa2b858c7695a447b6311303f06ffc90fc2c45851337121661f1f5c/cbor2-5.7.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8e837825a16e60ace6e284095aa9fbe504bf87a8f4494bf7d95931e37fb01a70", size = 284948, upload-time = "2025-08-14T08:59:06.64Z" }, + { url = "https://files.pythonhosted.org/packages/d1/3e/ae67866ef65717665e0acf2873d466c5d4a1d965b0d0348f2269b73f28fb/cbor2-5.7.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:27396c5e275ff7c7cd87fe8aaadf781e6194903921f250934af7c86d5efec82e", size = 276375, upload-time = "2025-08-14T08:59:08.845Z" }, + { url = "https://files.pythonhosted.org/packages/b6/3d/2f8e9671111661dd571de206344ecd7706f6d458aab191e06834c89aa58e/cbor2-5.7.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c84bfef78c4e9c81eb0a10cec340222ba4e39498a63fc2e3d5f982a3f4efa4a7", size = 277680, upload-time = "2025-08-14T08:59:10.292Z" }, + { url = "https://files.pythonhosted.org/packages/85/03/27a9fefa4e084c1129d7180727791a166629fdae39e0609508401d322626/cbor2-5.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:f64270a24aaadb15dd31cbd64a98d99fca8e0398a65b1570ba07f3c259eb5516", size = 68354, upload-time = "2025-08-14T08:59:11.561Z" }, + { url = "https://files.pythonhosted.org/packages/25/d9/b856d078696542a0d7486d1ece5c936e937bebe5b114674db18d76feb131/cbor2-5.7.0-cp312-cp312-win_arm64.whl", hash = "sha256:73ef321d7b580f08c9fadc41c3d2a218aa3f01e163be9793c6969aadee07f57a", size = 63896, upload-time = "2025-08-14T08:59:12.977Z" }, + { url = "https://files.pythonhosted.org/packages/5c/2f/25da2b08f7a3d7b3f72e678a373092619821ab706f3f720d29e567a426df/cbor2-5.7.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7654e77b7f6be029fb37a074b175483a4a8ae3fe5e2a91008926625aa91aef2c", size = 69046, upload-time = "2025-08-14T08:59:14.123Z" }, + { url = "https://files.pythonhosted.org/packages/4b/b5/d324166a5a1feed61aeb32fed70182306796b67cedaf65c91671c8674ea2/cbor2-5.7.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9bd76624b090faa6900739025d798a4e3130da80dbae15391b42b3d4672a4022", size = 69061, upload-time = "2025-08-14T08:59:15.228Z" }, + { url = "https://files.pythonhosted.org/packages/1f/f9/180e953da537602d8530910f5a5f76c3d7215829d145d93f97fa43324dd7/cbor2-5.7.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:428d58b54a7b32ede869e79c294d686f826dcfdab9de7f92135dd3ce12e313b8", size = 284642, upload-time = "2025-08-14T08:59:16.511Z" }, + { url = "https://files.pythonhosted.org/packages/17/eb/7d79831a5081d25002e36a1b2685210ae8783582d1a99fae350b2b1b899c/cbor2-5.7.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a91b6912e2ff64f33464f67ec6528cf2e26c06a5f3cc3fb1954f94aa58d68670", size = 283690, upload-time = "2025-08-14T08:59:17.989Z" }, + { url = "https://files.pythonhosted.org/packages/38/43/1403610711ea6b9b957d86bd15fd0585a3917a3d9f8bafbb2cb1ad016361/cbor2-5.7.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9faeec4525fe3103a71f0fd3d6fe9a49ea6ff4ade8cb7cf1c395001b906a01e5", size = 276305, upload-time = "2025-08-14T08:59:19.43Z" }, + { url = "https://files.pythonhosted.org/packages/77/06/df4a5c7c16df3b604bd560234aff686da443bf70a124c5e3f80dff954e5a/cbor2-5.7.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:361315ccd8022c44bb501877fd9b236479c975f1a7aed69c8541bd609c0a8908", size = 277416, upload-time = "2025-08-14T08:59:20.798Z" }, + { url = "https://files.pythonhosted.org/packages/84/aa/62288bac4e501e25d04d50bb79ac46d4a6678ff9545941436a702c654eba/cbor2-5.7.0-cp313-cp313-win_amd64.whl", hash = "sha256:293c4a5d6a9a69fcecf595a47430dc3b11f4a3355089b1fe300d0ac48c5776c5", size = 68378, upload-time = "2025-08-14T08:59:22.227Z" }, + { url = "https://files.pythonhosted.org/packages/b6/d6/8358c144767731ffa03c16bb1222b59cb3be632833c70a2132cbe2ed8300/cbor2-5.7.0-cp313-cp313-win_arm64.whl", hash = "sha256:52d6e1a9b2f4475540063d7b966b1b2e93ac497e08ab9a1514fd6330f8db5b4c", size = 63966, upload-time = "2025-08-14T08:59:23.369Z" }, + { url = "https://files.pythonhosted.org/packages/99/32/b653a2a3cfb283bdf0539dbd79d3bafa528aaa26fbe44796897d167e733d/cbor2-5.7.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:f4f0464425ff809b1dd737db8c65a937516aba5eb3794cb1433f7eb8eb7a6535", size = 68993, upload-time = "2025-08-14T08:59:24.497Z" }, + { url = "https://files.pythonhosted.org/packages/c9/90/79d38f7f645a33e44b87f9333f74c04d01006a11f5291d2e8686815fe731/cbor2-5.7.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:866d993ebc9c4e4018ab001503dafc4145bb6ec91e1eddf12b8d7b6898021201", size = 69248, upload-time = "2025-08-14T08:59:25.63Z" }, + { url = "https://files.pythonhosted.org/packages/46/ca/59d65f12ef14c54c564f0e4363d9dd049a90d5b0e2a0dab0183062268a36/cbor2-5.7.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bc7a11433ea1c45b6d47484bef99e822fd8a40b4cfbcdc1e00378a7e8704e317", size = 283739, upload-time = "2025-08-14T08:59:26.856Z" }, + { url = "https://files.pythonhosted.org/packages/19/51/5da8661b1aa7a4b7afe06724994b23eca6f7912d2cca705721dbd4aa764a/cbor2-5.7.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e33242570cb4542302dcb6cf429cc9abe315ff7ebb370de2828eed22a8b00fe8", size = 281246, upload-time = "2025-08-14T08:59:28.261Z" }, + { url = "https://files.pythonhosted.org/packages/d4/2f/565f5f215a9d4211c23e94c5b1761d697d248603ae11ecf83a9a70e99382/cbor2-5.7.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:855fe80517071028a5804a29b607864b8d90bbb2223331ab2d8cae94b979d61f", size = 275442, upload-time = "2025-08-14T08:59:29.794Z" }, + { url = "https://files.pythonhosted.org/packages/84/11/307a558f6ddc3bd0fc539ac65696acb0253554c88bab5da7d459706eb20e/cbor2-5.7.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:87170260845c2ea3d74288f667e0bc81c8a6bbc72ff60265d19c59b3e76be266", size = 275372, upload-time = "2025-08-14T08:59:31.589Z" }, + { url = "https://files.pythonhosted.org/packages/92/f0/960b7050a53b8d60f92e6e4c1ce670f9c50ab2ff48468e83b2bef0399b38/cbor2-5.7.0-cp314-cp314-win_amd64.whl", hash = "sha256:a2b591904555e51843c95776df2d6b161226af045e655f464c101d8ad8708e99", size = 70188, upload-time = "2025-08-14T08:59:32.827Z" }, + { url = "https://files.pythonhosted.org/packages/a7/83/51805084b6208529f82e5a52261468a56b758728153ee2400c421fa845f4/cbor2-5.7.0-cp314-cp314-win_arm64.whl", hash = "sha256:4460164ffd0ceaf8cc3f5597e73dd99fd781541c7bba0ea64ac93043bf08bb6a", size = 66162, upload-time = "2025-08-14T08:59:34.35Z" }, + { url = "https://files.pythonhosted.org/packages/41/cc/0ce73676d2a0c9e5a9330c301940c50eb325dacf5f6d9690fd43a8817fe9/cbor2-5.7.0-py3-none-any.whl", hash = "sha256:a871e7a6f7cba1ddb02503ea974f15f6524c95078fbfe0b860fd4193d7c8f27a", size = 23828, upload-time = "2025-08-14T08:59:46.129Z" }, ] [[package]] name = "certifi" -version = "2025.6.15" +version = "2025.8.3" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/73/f7/f14b46d4bcd21092d7d3ccef689615220d8a08fb25e564b65d20738e672e/certifi-2025.6.15.tar.gz", hash = "sha256:d747aa5a8b9bbbb1bb8c22bb13e22bd1f18e9796defa16bab421f7f7a317323b", size = 158753, upload-time = "2025-06-15T02:45:51.329Z" } +sdist = { url = "https://files.pythonhosted.org/packages/dc/67/960ebe6bf230a96cda2e0abcf73af550ec4f090005363542f0765df162e0/certifi-2025.8.3.tar.gz", hash = "sha256:e564105f78ded564e3ae7c923924435e1daa7463faeab5bb932bc53ffae63407", size = 162386, upload-time = "2025-08-03T03:07:47.08Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/84/ae/320161bd181fc06471eed047ecce67b693fd7515b16d495d8932db763426/certifi-2025.6.15-py3-none-any.whl", hash = "sha256:2e0c7ce7cb5d8f8634ca55d2ba7e6ec2689a2fd6537d8dec1296a477a4910057", size = 157650, upload-time = "2025-06-15T02:45:49.977Z" }, + { url = "https://files.pythonhosted.org/packages/e5/48/1549795ba7742c948d2ad169c1c8cdbae65bc450d6cd753d124b17c8cd32/certifi-2025.8.3-py3-none-any.whl", hash = "sha256:f6c12493cfb1b06ba2ff328595af9350c65d6644968e5d3a2ffd78699af217a5", size = 161216, upload-time = "2025-08-03T03:07:45.777Z" }, ] [[package]] @@ -478,37 +545,44 @@ wheels = [ [[package]] name = "charset-normalizer" -version = "3.4.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e4/33/89c2ced2b67d1c2a61c19c6751aa8902d46ce3dacb23600a283619f5a12d/charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63", size = 126367, upload-time = "2025-05-02T08:34:42.01Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d7/a4/37f4d6035c89cac7930395a35cc0f1b872e652eaafb76a6075943754f095/charset_normalizer-3.4.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0c29de6a1a95f24b9a1aa7aefd27d2487263f00dfd55a77719b530788f75cff7", size = 199936, upload-time = "2025-05-02T08:32:33.712Z" }, - { url = "https://files.pythonhosted.org/packages/ee/8a/1a5e33b73e0d9287274f899d967907cd0bf9c343e651755d9307e0dbf2b3/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cddf7bd982eaa998934a91f69d182aec997c6c468898efe6679af88283b498d3", size = 143790, upload-time = "2025-05-02T08:32:35.768Z" }, - { url = "https://files.pythonhosted.org/packages/66/52/59521f1d8e6ab1482164fa21409c5ef44da3e9f653c13ba71becdd98dec3/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcbe676a55d7445b22c10967bceaaf0ee69407fbe0ece4d032b6eb8d4565982a", size = 153924, upload-time = "2025-05-02T08:32:37.284Z" }, - { url = "https://files.pythonhosted.org/packages/86/2d/fb55fdf41964ec782febbf33cb64be480a6b8f16ded2dbe8db27a405c09f/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d41c4d287cfc69060fa91cae9683eacffad989f1a10811995fa309df656ec214", size = 146626, upload-time = "2025-05-02T08:32:38.803Z" }, - { url = "https://files.pythonhosted.org/packages/8c/73/6ede2ec59bce19b3edf4209d70004253ec5f4e319f9a2e3f2f15601ed5f7/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e594135de17ab3866138f496755f302b72157d115086d100c3f19370839dd3a", size = 148567, upload-time = "2025-05-02T08:32:40.251Z" }, - { url = "https://files.pythonhosted.org/packages/09/14/957d03c6dc343c04904530b6bef4e5efae5ec7d7990a7cbb868e4595ee30/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf713fe9a71ef6fd5adf7a79670135081cd4431c2943864757f0fa3a65b1fafd", size = 150957, upload-time = "2025-05-02T08:32:41.705Z" }, - { url = "https://files.pythonhosted.org/packages/0d/c8/8174d0e5c10ccebdcb1b53cc959591c4c722a3ad92461a273e86b9f5a302/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a370b3e078e418187da8c3674eddb9d983ec09445c99a3a263c2011993522981", size = 145408, upload-time = "2025-05-02T08:32:43.709Z" }, - { url = "https://files.pythonhosted.org/packages/58/aa/8904b84bc8084ac19dc52feb4f5952c6df03ffb460a887b42615ee1382e8/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a955b438e62efdf7e0b7b52a64dc5c3396e2634baa62471768a64bc2adb73d5c", size = 153399, upload-time = "2025-05-02T08:32:46.197Z" }, - { url = "https://files.pythonhosted.org/packages/c2/26/89ee1f0e264d201cb65cf054aca6038c03b1a0c6b4ae998070392a3ce605/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7222ffd5e4de8e57e03ce2cef95a4c43c98fcb72ad86909abdfc2c17d227fc1b", size = 156815, upload-time = "2025-05-02T08:32:48.105Z" }, - { url = "https://files.pythonhosted.org/packages/fd/07/68e95b4b345bad3dbbd3a8681737b4338ff2c9df29856a6d6d23ac4c73cb/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:bee093bf902e1d8fc0ac143c88902c3dfc8941f7ea1d6a8dd2bcb786d33db03d", size = 154537, upload-time = "2025-05-02T08:32:49.719Z" }, - { url = "https://files.pythonhosted.org/packages/77/1a/5eefc0ce04affb98af07bc05f3bac9094513c0e23b0562d64af46a06aae4/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dedb8adb91d11846ee08bec4c8236c8549ac721c245678282dcb06b221aab59f", size = 149565, upload-time = "2025-05-02T08:32:51.404Z" }, - { url = "https://files.pythonhosted.org/packages/37/a0/2410e5e6032a174c95e0806b1a6585eb21e12f445ebe239fac441995226a/charset_normalizer-3.4.2-cp312-cp312-win32.whl", hash = "sha256:db4c7bf0e07fc3b7d89ac2a5880a6a8062056801b83ff56d8464b70f65482b6c", size = 98357, upload-time = "2025-05-02T08:32:53.079Z" }, - { url = "https://files.pythonhosted.org/packages/6c/4f/c02d5c493967af3eda9c771ad4d2bbc8df6f99ddbeb37ceea6e8716a32bc/charset_normalizer-3.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:5a9979887252a82fefd3d3ed2a8e3b937a7a809f65dcb1e068b090e165bbe99e", size = 105776, upload-time = "2025-05-02T08:32:54.573Z" }, - { url = "https://files.pythonhosted.org/packages/ea/12/a93df3366ed32db1d907d7593a94f1fe6293903e3e92967bebd6950ed12c/charset_normalizer-3.4.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:926ca93accd5d36ccdabd803392ddc3e03e6d4cd1cf17deff3b989ab8e9dbcf0", size = 199622, upload-time = "2025-05-02T08:32:56.363Z" }, - { url = "https://files.pythonhosted.org/packages/04/93/bf204e6f344c39d9937d3c13c8cd5bbfc266472e51fc8c07cb7f64fcd2de/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eba9904b0f38a143592d9fc0e19e2df0fa2e41c3c3745554761c5f6447eedabf", size = 143435, upload-time = "2025-05-02T08:32:58.551Z" }, - { url = "https://files.pythonhosted.org/packages/22/2a/ea8a2095b0bafa6c5b5a55ffdc2f924455233ee7b91c69b7edfcc9e02284/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3fddb7e2c84ac87ac3a947cb4e66d143ca5863ef48e4a5ecb83bd48619e4634e", size = 153653, upload-time = "2025-05-02T08:33:00.342Z" }, - { url = "https://files.pythonhosted.org/packages/b6/57/1b090ff183d13cef485dfbe272e2fe57622a76694061353c59da52c9a659/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98f862da73774290f251b9df8d11161b6cf25b599a66baf087c1ffe340e9bfd1", size = 146231, upload-time = "2025-05-02T08:33:02.081Z" }, - { url = "https://files.pythonhosted.org/packages/e2/28/ffc026b26f441fc67bd21ab7f03b313ab3fe46714a14b516f931abe1a2d8/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c9379d65defcab82d07b2a9dfbfc2e95bc8fe0ebb1b176a3190230a3ef0e07c", size = 148243, upload-time = "2025-05-02T08:33:04.063Z" }, - { url = "https://files.pythonhosted.org/packages/c0/0f/9abe9bd191629c33e69e47c6ef45ef99773320e9ad8e9cb08b8ab4a8d4cb/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e635b87f01ebc977342e2697d05b56632f5f879a4f15955dfe8cef2448b51691", size = 150442, upload-time = "2025-05-02T08:33:06.418Z" }, - { url = "https://files.pythonhosted.org/packages/67/7c/a123bbcedca91d5916c056407f89a7f5e8fdfce12ba825d7d6b9954a1a3c/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1c95a1e2902a8b722868587c0e1184ad5c55631de5afc0eb96bc4b0d738092c0", size = 145147, upload-time = "2025-05-02T08:33:08.183Z" }, - { url = "https://files.pythonhosted.org/packages/ec/fe/1ac556fa4899d967b83e9893788e86b6af4d83e4726511eaaad035e36595/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ef8de666d6179b009dce7bcb2ad4c4a779f113f12caf8dc77f0162c29d20490b", size = 153057, upload-time = "2025-05-02T08:33:09.986Z" }, - { url = "https://files.pythonhosted.org/packages/2b/ff/acfc0b0a70b19e3e54febdd5301a98b72fa07635e56f24f60502e954c461/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:32fc0341d72e0f73f80acb0a2c94216bd704f4f0bce10aedea38f30502b271ff", size = 156454, upload-time = "2025-05-02T08:33:11.814Z" }, - { url = "https://files.pythonhosted.org/packages/92/08/95b458ce9c740d0645feb0e96cea1f5ec946ea9c580a94adfe0b617f3573/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:289200a18fa698949d2b39c671c2cc7a24d44096784e76614899a7ccf2574b7b", size = 154174, upload-time = "2025-05-02T08:33:13.707Z" }, - { url = "https://files.pythonhosted.org/packages/78/be/8392efc43487ac051eee6c36d5fbd63032d78f7728cb37aebcc98191f1ff/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a476b06fbcf359ad25d34a057b7219281286ae2477cc5ff5e3f70a246971148", size = 149166, upload-time = "2025-05-02T08:33:15.458Z" }, - { url = "https://files.pythonhosted.org/packages/44/96/392abd49b094d30b91d9fbda6a69519e95802250b777841cf3bda8fe136c/charset_normalizer-3.4.2-cp313-cp313-win32.whl", hash = "sha256:aaeeb6a479c7667fbe1099af9617c83aaca22182d6cf8c53966491a0f1b7ffb7", size = 98064, upload-time = "2025-05-02T08:33:17.06Z" }, - { url = "https://files.pythonhosted.org/packages/e9/b0/0200da600134e001d91851ddc797809e2fe0ea72de90e09bec5a2fbdaccb/charset_normalizer-3.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:aa6af9e7d59f9c12b33ae4e9450619cf2488e2bbe9b44030905877f0b2324980", size = 105641, upload-time = "2025-05-02T08:33:18.753Z" }, - { url = "https://files.pythonhosted.org/packages/20/94/c5790835a017658cbfabd07f3bfb549140c3ac458cfc196323996b10095a/charset_normalizer-3.4.2-py3-none-any.whl", hash = "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0", size = 52626, upload-time = "2025-05-02T08:34:40.053Z" }, +version = "3.4.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/83/2d/5fd176ceb9b2fc619e63405525573493ca23441330fcdaee6bef9460e924/charset_normalizer-3.4.3.tar.gz", hash = "sha256:6fce4b8500244f6fcb71465d4a4930d132ba9ab8e71a7859e6a5d59851068d14", size = 122371, upload-time = "2025-08-09T07:57:28.46Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/5e/14c94999e418d9b87682734589404a25854d5f5d0408df68bc15b6ff54bb/charset_normalizer-3.4.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e28e334d3ff134e88989d90ba04b47d84382a828c061d0d1027b1b12a62b39b1", size = 205655, upload-time = "2025-08-09T07:56:08.475Z" }, + { url = "https://files.pythonhosted.org/packages/7d/a8/c6ec5d389672521f644505a257f50544c074cf5fc292d5390331cd6fc9c3/charset_normalizer-3.4.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0cacf8f7297b0c4fcb74227692ca46b4a5852f8f4f24b3c766dd94a1075c4884", size = 146223, upload-time = "2025-08-09T07:56:09.708Z" }, + { url = "https://files.pythonhosted.org/packages/fc/eb/a2ffb08547f4e1e5415fb69eb7db25932c52a52bed371429648db4d84fb1/charset_normalizer-3.4.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c6fd51128a41297f5409deab284fecbe5305ebd7e5a1f959bee1c054622b7018", size = 159366, upload-time = "2025-08-09T07:56:11.326Z" }, + { url = "https://files.pythonhosted.org/packages/82/10/0fd19f20c624b278dddaf83b8464dcddc2456cb4b02bb902a6da126b87a1/charset_normalizer-3.4.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3cfb2aad70f2c6debfbcb717f23b7eb55febc0bb23dcffc0f076009da10c6392", size = 157104, upload-time = "2025-08-09T07:56:13.014Z" }, + { url = "https://files.pythonhosted.org/packages/16/ab/0233c3231af734f5dfcf0844aa9582d5a1466c985bbed6cedab85af9bfe3/charset_normalizer-3.4.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1606f4a55c0fd363d754049cdf400175ee96c992b1f8018b993941f221221c5f", size = 151830, upload-time = "2025-08-09T07:56:14.428Z" }, + { url = "https://files.pythonhosted.org/packages/ae/02/e29e22b4e02839a0e4a06557b1999d0a47db3567e82989b5bb21f3fbbd9f/charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:027b776c26d38b7f15b26a5da1044f376455fb3766df8fc38563b4efbc515154", size = 148854, upload-time = "2025-08-09T07:56:16.051Z" }, + { url = "https://files.pythonhosted.org/packages/05/6b/e2539a0a4be302b481e8cafb5af8792da8093b486885a1ae4d15d452bcec/charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:42e5088973e56e31e4fa58eb6bd709e42fc03799c11c42929592889a2e54c491", size = 160670, upload-time = "2025-08-09T07:56:17.314Z" }, + { url = "https://files.pythonhosted.org/packages/31/e7/883ee5676a2ef217a40ce0bffcc3d0dfbf9e64cbcfbdf822c52981c3304b/charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:cc34f233c9e71701040d772aa7490318673aa7164a0efe3172b2981218c26d93", size = 158501, upload-time = "2025-08-09T07:56:18.641Z" }, + { url = "https://files.pythonhosted.org/packages/c1/35/6525b21aa0db614cf8b5792d232021dca3df7f90a1944db934efa5d20bb1/charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:320e8e66157cc4e247d9ddca8e21f427efc7a04bbd0ac8a9faf56583fa543f9f", size = 153173, upload-time = "2025-08-09T07:56:20.289Z" }, + { url = "https://files.pythonhosted.org/packages/50/ee/f4704bad8201de513fdc8aac1cabc87e38c5818c93857140e06e772b5892/charset_normalizer-3.4.3-cp312-cp312-win32.whl", hash = "sha256:fb6fecfd65564f208cbf0fba07f107fb661bcd1a7c389edbced3f7a493f70e37", size = 99822, upload-time = "2025-08-09T07:56:21.551Z" }, + { url = "https://files.pythonhosted.org/packages/39/f5/3b3836ca6064d0992c58c7561c6b6eee1b3892e9665d650c803bd5614522/charset_normalizer-3.4.3-cp312-cp312-win_amd64.whl", hash = "sha256:86df271bf921c2ee3818f0522e9a5b8092ca2ad8b065ece5d7d9d0e9f4849bcc", size = 107543, upload-time = "2025-08-09T07:56:23.115Z" }, + { url = "https://files.pythonhosted.org/packages/65/ca/2135ac97709b400c7654b4b764daf5c5567c2da45a30cdd20f9eefe2d658/charset_normalizer-3.4.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:14c2a87c65b351109f6abfc424cab3927b3bdece6f706e4d12faaf3d52ee5efe", size = 205326, upload-time = "2025-08-09T07:56:24.721Z" }, + { url = "https://files.pythonhosted.org/packages/71/11/98a04c3c97dd34e49c7d247083af03645ca3730809a5509443f3c37f7c99/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41d1fc408ff5fdfb910200ec0e74abc40387bccb3252f3f27c0676731df2b2c8", size = 146008, upload-time = "2025-08-09T07:56:26.004Z" }, + { url = "https://files.pythonhosted.org/packages/60/f5/4659a4cb3c4ec146bec80c32d8bb16033752574c20b1252ee842a95d1a1e/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:1bb60174149316da1c35fa5233681f7c0f9f514509b8e399ab70fea5f17e45c9", size = 159196, upload-time = "2025-08-09T07:56:27.25Z" }, + { url = "https://files.pythonhosted.org/packages/86/9e/f552f7a00611f168b9a5865a1414179b2c6de8235a4fa40189f6f79a1753/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:30d006f98569de3459c2fc1f2acde170b7b2bd265dc1943e87e1a4efe1b67c31", size = 156819, upload-time = "2025-08-09T07:56:28.515Z" }, + { url = "https://files.pythonhosted.org/packages/7e/95/42aa2156235cbc8fa61208aded06ef46111c4d3f0de233107b3f38631803/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:416175faf02e4b0810f1f38bcb54682878a4af94059a1cd63b8747244420801f", size = 151350, upload-time = "2025-08-09T07:56:29.716Z" }, + { url = "https://files.pythonhosted.org/packages/c2/a9/3865b02c56f300a6f94fc631ef54f0a8a29da74fb45a773dfd3dcd380af7/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6aab0f181c486f973bc7262a97f5aca3ee7e1437011ef0c2ec04b5a11d16c927", size = 148644, upload-time = "2025-08-09T07:56:30.984Z" }, + { url = "https://files.pythonhosted.org/packages/77/d9/cbcf1a2a5c7d7856f11e7ac2d782aec12bdfea60d104e60e0aa1c97849dc/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:fdabf8315679312cfa71302f9bd509ded4f2f263fb5b765cf1433b39106c3cc9", size = 160468, upload-time = "2025-08-09T07:56:32.252Z" }, + { url = "https://files.pythonhosted.org/packages/f6/42/6f45efee8697b89fda4d50580f292b8f7f9306cb2971d4b53f8914e4d890/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:bd28b817ea8c70215401f657edef3a8aa83c29d447fb0b622c35403780ba11d5", size = 158187, upload-time = "2025-08-09T07:56:33.481Z" }, + { url = "https://files.pythonhosted.org/packages/70/99/f1c3bdcfaa9c45b3ce96f70b14f070411366fa19549c1d4832c935d8e2c3/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:18343b2d246dc6761a249ba1fb13f9ee9a2bcd95decc767319506056ea4ad4dc", size = 152699, upload-time = "2025-08-09T07:56:34.739Z" }, + { url = "https://files.pythonhosted.org/packages/a3/ad/b0081f2f99a4b194bcbb1934ef3b12aa4d9702ced80a37026b7607c72e58/charset_normalizer-3.4.3-cp313-cp313-win32.whl", hash = "sha256:6fb70de56f1859a3f71261cbe41005f56a7842cc348d3aeb26237560bfa5e0ce", size = 99580, upload-time = "2025-08-09T07:56:35.981Z" }, + { url = "https://files.pythonhosted.org/packages/9a/8f/ae790790c7b64f925e5c953b924aaa42a243fb778fed9e41f147b2a5715a/charset_normalizer-3.4.3-cp313-cp313-win_amd64.whl", hash = "sha256:cf1ebb7d78e1ad8ec2a8c4732c7be2e736f6e5123a4146c5b89c9d1f585f8cef", size = 107366, upload-time = "2025-08-09T07:56:37.339Z" }, + { url = "https://files.pythonhosted.org/packages/8e/91/b5a06ad970ddc7a0e513112d40113e834638f4ca1120eb727a249fb2715e/charset_normalizer-3.4.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3cd35b7e8aedeb9e34c41385fda4f73ba609e561faedfae0a9e75e44ac558a15", size = 204342, upload-time = "2025-08-09T07:56:38.687Z" }, + { url = "https://files.pythonhosted.org/packages/ce/ec/1edc30a377f0a02689342f214455c3f6c2fbedd896a1d2f856c002fc3062/charset_normalizer-3.4.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b89bc04de1d83006373429975f8ef9e7932534b8cc9ca582e4db7d20d91816db", size = 145995, upload-time = "2025-08-09T07:56:40.048Z" }, + { url = "https://files.pythonhosted.org/packages/17/e5/5e67ab85e6d22b04641acb5399c8684f4d37caf7558a53859f0283a650e9/charset_normalizer-3.4.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2001a39612b241dae17b4687898843f254f8748b796a2e16f1051a17078d991d", size = 158640, upload-time = "2025-08-09T07:56:41.311Z" }, + { url = "https://files.pythonhosted.org/packages/f1/e5/38421987f6c697ee3722981289d554957c4be652f963d71c5e46a262e135/charset_normalizer-3.4.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8dcfc373f888e4fb39a7bc57e93e3b845e7f462dacc008d9749568b1c4ece096", size = 156636, upload-time = "2025-08-09T07:56:43.195Z" }, + { url = "https://files.pythonhosted.org/packages/a0/e4/5a075de8daa3ec0745a9a3b54467e0c2967daaaf2cec04c845f73493e9a1/charset_normalizer-3.4.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:18b97b8404387b96cdbd30ad660f6407799126d26a39ca65729162fd810a99aa", size = 150939, upload-time = "2025-08-09T07:56:44.819Z" }, + { url = "https://files.pythonhosted.org/packages/02/f7/3611b32318b30974131db62b4043f335861d4d9b49adc6d57c1149cc49d4/charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ccf600859c183d70eb47e05a44cd80a4ce77394d1ac0f79dbd2dd90a69a3a049", size = 148580, upload-time = "2025-08-09T07:56:46.684Z" }, + { url = "https://files.pythonhosted.org/packages/7e/61/19b36f4bd67f2793ab6a99b979b4e4f3d8fc754cbdffb805335df4337126/charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:53cd68b185d98dde4ad8990e56a58dea83a4162161b1ea9272e5c9182ce415e0", size = 159870, upload-time = "2025-08-09T07:56:47.941Z" }, + { url = "https://files.pythonhosted.org/packages/06/57/84722eefdd338c04cf3030ada66889298eaedf3e7a30a624201e0cbe424a/charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:30a96e1e1f865f78b030d65241c1ee850cdf422d869e9028e2fc1d5e4db73b92", size = 157797, upload-time = "2025-08-09T07:56:49.756Z" }, + { url = "https://files.pythonhosted.org/packages/72/2a/aff5dd112b2f14bcc3462c312dce5445806bfc8ab3a7328555da95330e4b/charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d716a916938e03231e86e43782ca7878fb602a125a91e7acb8b5112e2e96ac16", size = 152224, upload-time = "2025-08-09T07:56:51.369Z" }, + { url = "https://files.pythonhosted.org/packages/b7/8c/9839225320046ed279c6e839d51f028342eb77c91c89b8ef2549f951f3ec/charset_normalizer-3.4.3-cp314-cp314-win32.whl", hash = "sha256:c6dbd0ccdda3a2ba7c2ecd9d77b37f3b5831687d8dc1b6ca5f56a4880cc7b7ce", size = 100086, upload-time = "2025-08-09T07:56:52.722Z" }, + { url = "https://files.pythonhosted.org/packages/ee/7a/36fbcf646e41f710ce0a563c1c9a343c6edf9be80786edeb15b6f62e17db/charset_normalizer-3.4.3-cp314-cp314-win_amd64.whl", hash = "sha256:73dc19b562516fc9bcf6e5d6e596df0b4eb98d87e4f79f3ae71840e6ed21361c", size = 107400, upload-time = "2025-08-09T07:56:55.172Z" }, + { url = "https://files.pythonhosted.org/packages/8a/1f/f041989e93b001bc4e44bb1669ccdcf54d3f00e628229a85b08d330615c5/charset_normalizer-3.4.3-py3-none-any.whl", hash = "sha256:ce571ab16d890d23b5c278547ba694193a45011ff86a9162a71307ed9f86759a", size = 53175, upload-time = "2025-08-09T07:57:26.864Z" }, ] [[package]] @@ -552,14 +626,14 @@ wheels = [ [[package]] name = "colorful" -version = "0.5.6" +version = "0.5.7" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/fa/5f/38e40c3bc4107c39e4062d943026b8ee25154cb4b185b882f274a1ab65da/colorful-0.5.6.tar.gz", hash = "sha256:b56d5c01db1dac4898308ea889edcb113fbee3e6ec5df4bacffd61d5241b5b8d", size = 209280, upload-time = "2024-01-07T19:38:54.904Z" } +sdist = { url = "https://files.pythonhosted.org/packages/0c/0c/d180ebf230b771907f46981023a80f62cf592d49673cc5f8a5993aa67bb6/colorful-0.5.7.tar.gz", hash = "sha256:c5452179b56601c178b03d468a5326cc1fe37d9be81d24d0d6bdab36c4b93ad8", size = 209487, upload-time = "2025-06-30T15:24:03.936Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b3/61/39e7db0cb326c9c8f6a49fad4fc9c2f1241f05a4e10f0643fc31ce26a7e0/colorful-0.5.6-py2.py3-none-any.whl", hash = "sha256:eab8c1c809f5025ad2b5238a50bd691e26850da8cac8f90d660ede6ea1af9f1e", size = 201369, upload-time = "2024-01-07T19:38:53.29Z" }, + { url = "https://files.pythonhosted.org/packages/e2/98/0d791b3d1eaed89d7d370b5cf9b8079b124da0545559417f394ba21b5532/colorful-0.5.7-py2.py3-none-any.whl", hash = "sha256:495dd3a23151a9568cee8a90fc1174c902ad7ef06655f50b6bddf9e80008da69", size = 201475, upload-time = "2025-06-30T15:24:02.693Z" }, ] [[package]] @@ -578,85 +652,132 @@ wheels = [ [[package]] name = "contourpy" -version = "1.3.2" +version = "1.3.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "numpy" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/66/54/eb9bfc647b19f2009dd5c7f5ec51c4e6ca831725f1aea7a993034f483147/contourpy-1.3.2.tar.gz", hash = "sha256:b6945942715a034c671b7fc54f9588126b0b8bf23db2696e3ca8328f3ff0ab54", size = 13466130, upload-time = "2025-04-15T17:47:53.79Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/34/f7/44785876384eff370c251d58fd65f6ad7f39adce4a093c934d4a67a7c6b6/contourpy-1.3.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4caf2bcd2969402bf77edc4cb6034c7dd7c0803213b3523f111eb7460a51b8d2", size = 271580, upload-time = "2025-04-15T17:37:03.105Z" }, - { url = "https://files.pythonhosted.org/packages/93/3b/0004767622a9826ea3d95f0e9d98cd8729015768075d61f9fea8eeca42a8/contourpy-1.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:82199cb78276249796419fe36b7386bd8d2cc3f28b3bc19fe2454fe2e26c4c15", size = 255530, upload-time = "2025-04-15T17:37:07.026Z" }, - { url = "https://files.pythonhosted.org/packages/e7/bb/7bd49e1f4fa805772d9fd130e0d375554ebc771ed7172f48dfcd4ca61549/contourpy-1.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:106fab697af11456fcba3e352ad50effe493a90f893fca6c2ca5c033820cea92", size = 307688, upload-time = "2025-04-15T17:37:11.481Z" }, - { url = "https://files.pythonhosted.org/packages/fc/97/e1d5dbbfa170725ef78357a9a0edc996b09ae4af170927ba8ce977e60a5f/contourpy-1.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d14f12932a8d620e307f715857107b1d1845cc44fdb5da2bc8e850f5ceba9f87", size = 347331, upload-time = "2025-04-15T17:37:18.212Z" }, - { url = "https://files.pythonhosted.org/packages/6f/66/e69e6e904f5ecf6901be3dd16e7e54d41b6ec6ae3405a535286d4418ffb4/contourpy-1.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:532fd26e715560721bb0d5fc7610fce279b3699b018600ab999d1be895b09415", size = 318963, upload-time = "2025-04-15T17:37:22.76Z" }, - { url = "https://files.pythonhosted.org/packages/a8/32/b8a1c8965e4f72482ff2d1ac2cd670ce0b542f203c8e1d34e7c3e6925da7/contourpy-1.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f26b383144cf2d2c29f01a1e8170f50dacf0eac02d64139dcd709a8ac4eb3cfe", size = 323681, upload-time = "2025-04-15T17:37:33.001Z" }, - { url = "https://files.pythonhosted.org/packages/30/c6/12a7e6811d08757c7162a541ca4c5c6a34c0f4e98ef2b338791093518e40/contourpy-1.3.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:c49f73e61f1f774650a55d221803b101d966ca0c5a2d6d5e4320ec3997489441", size = 1308674, upload-time = "2025-04-15T17:37:48.64Z" }, - { url = "https://files.pythonhosted.org/packages/2a/8a/bebe5a3f68b484d3a2b8ffaf84704b3e343ef1addea528132ef148e22b3b/contourpy-1.3.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3d80b2c0300583228ac98d0a927a1ba6a2ba6b8a742463c564f1d419ee5b211e", size = 1380480, upload-time = "2025-04-15T17:38:06.7Z" }, - { url = "https://files.pythonhosted.org/packages/34/db/fcd325f19b5978fb509a7d55e06d99f5f856294c1991097534360b307cf1/contourpy-1.3.2-cp312-cp312-win32.whl", hash = "sha256:90df94c89a91b7362e1142cbee7568f86514412ab8a2c0d0fca72d7e91b62912", size = 178489, upload-time = "2025-04-15T17:38:10.338Z" }, - { url = "https://files.pythonhosted.org/packages/01/c8/fadd0b92ffa7b5eb5949bf340a63a4a496a6930a6c37a7ba0f12acb076d6/contourpy-1.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:8c942a01d9163e2e5cfb05cb66110121b8d07ad438a17f9e766317bcb62abf73", size = 223042, upload-time = "2025-04-15T17:38:14.239Z" }, - { url = "https://files.pythonhosted.org/packages/2e/61/5673f7e364b31e4e7ef6f61a4b5121c5f170f941895912f773d95270f3a2/contourpy-1.3.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:de39db2604ae755316cb5967728f4bea92685884b1e767b7c24e983ef5f771cb", size = 271630, upload-time = "2025-04-15T17:38:19.142Z" }, - { url = "https://files.pythonhosted.org/packages/ff/66/a40badddd1223822c95798c55292844b7e871e50f6bfd9f158cb25e0bd39/contourpy-1.3.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3f9e896f447c5c8618f1edb2bafa9a4030f22a575ec418ad70611450720b5b08", size = 255670, upload-time = "2025-04-15T17:38:23.688Z" }, - { url = "https://files.pythonhosted.org/packages/1e/c7/cf9fdee8200805c9bc3b148f49cb9482a4e3ea2719e772602a425c9b09f8/contourpy-1.3.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71e2bd4a1c4188f5c2b8d274da78faab884b59df20df63c34f74aa1813c4427c", size = 306694, upload-time = "2025-04-15T17:38:28.238Z" }, - { url = "https://files.pythonhosted.org/packages/dd/e7/ccb9bec80e1ba121efbffad7f38021021cda5be87532ec16fd96533bb2e0/contourpy-1.3.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de425af81b6cea33101ae95ece1f696af39446db9682a0b56daaa48cfc29f38f", size = 345986, upload-time = "2025-04-15T17:38:33.502Z" }, - { url = "https://files.pythonhosted.org/packages/dc/49/ca13bb2da90391fa4219fdb23b078d6065ada886658ac7818e5441448b78/contourpy-1.3.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:977e98a0e0480d3fe292246417239d2d45435904afd6d7332d8455981c408b85", size = 318060, upload-time = "2025-04-15T17:38:38.672Z" }, - { url = "https://files.pythonhosted.org/packages/c8/65/5245ce8c548a8422236c13ffcdcdada6a2a812c361e9e0c70548bb40b661/contourpy-1.3.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:434f0adf84911c924519d2b08fc10491dd282b20bdd3fa8f60fd816ea0b48841", size = 322747, upload-time = "2025-04-15T17:38:43.712Z" }, - { url = "https://files.pythonhosted.org/packages/72/30/669b8eb48e0a01c660ead3752a25b44fdb2e5ebc13a55782f639170772f9/contourpy-1.3.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c66c4906cdbc50e9cba65978823e6e00b45682eb09adbb78c9775b74eb222422", size = 1308895, upload-time = "2025-04-15T17:39:00.224Z" }, - { url = "https://files.pythonhosted.org/packages/05/5a/b569f4250decee6e8d54498be7bdf29021a4c256e77fe8138c8319ef8eb3/contourpy-1.3.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8b7fc0cd78ba2f4695fd0a6ad81a19e7e3ab825c31b577f384aa9d7817dc3bef", size = 1379098, upload-time = "2025-04-15T17:43:29.649Z" }, - { url = "https://files.pythonhosted.org/packages/19/ba/b227c3886d120e60e41b28740ac3617b2f2b971b9f601c835661194579f1/contourpy-1.3.2-cp313-cp313-win32.whl", hash = "sha256:15ce6ab60957ca74cff444fe66d9045c1fd3e92c8936894ebd1f3eef2fff075f", size = 178535, upload-time = "2025-04-15T17:44:44.532Z" }, - { url = "https://files.pythonhosted.org/packages/12/6e/2fed56cd47ca739b43e892707ae9a13790a486a3173be063681ca67d2262/contourpy-1.3.2-cp313-cp313-win_amd64.whl", hash = "sha256:e1578f7eafce927b168752ed7e22646dad6cd9bca673c60bff55889fa236ebf9", size = 223096, upload-time = "2025-04-15T17:44:48.194Z" }, - { url = "https://files.pythonhosted.org/packages/54/4c/e76fe2a03014a7c767d79ea35c86a747e9325537a8b7627e0e5b3ba266b4/contourpy-1.3.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0475b1f6604896bc7c53bb070e355e9321e1bc0d381735421a2d2068ec56531f", size = 285090, upload-time = "2025-04-15T17:43:34.084Z" }, - { url = "https://files.pythonhosted.org/packages/7b/e2/5aba47debd55d668e00baf9651b721e7733975dc9fc27264a62b0dd26eb8/contourpy-1.3.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:c85bb486e9be652314bb5b9e2e3b0d1b2e643d5eec4992c0fbe8ac71775da739", size = 268643, upload-time = "2025-04-15T17:43:38.626Z" }, - { url = "https://files.pythonhosted.org/packages/a1/37/cd45f1f051fe6230f751cc5cdd2728bb3a203f5619510ef11e732109593c/contourpy-1.3.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:745b57db7758f3ffc05a10254edd3182a2a83402a89c00957a8e8a22f5582823", size = 310443, upload-time = "2025-04-15T17:43:44.522Z" }, - { url = "https://files.pythonhosted.org/packages/8b/a2/36ea6140c306c9ff6dd38e3bcec80b3b018474ef4d17eb68ceecd26675f4/contourpy-1.3.2-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:970e9173dbd7eba9b4e01aab19215a48ee5dd3f43cef736eebde064a171f89a5", size = 349865, upload-time = "2025-04-15T17:43:49.545Z" }, - { url = "https://files.pythonhosted.org/packages/95/b7/2fc76bc539693180488f7b6cc518da7acbbb9e3b931fd9280504128bf956/contourpy-1.3.2-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6c4639a9c22230276b7bffb6a850dfc8258a2521305e1faefe804d006b2e532", size = 321162, upload-time = "2025-04-15T17:43:54.203Z" }, - { url = "https://files.pythonhosted.org/packages/f4/10/76d4f778458b0aa83f96e59d65ece72a060bacb20cfbee46cf6cd5ceba41/contourpy-1.3.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc829960f34ba36aad4302e78eabf3ef16a3a100863f0d4eeddf30e8a485a03b", size = 327355, upload-time = "2025-04-15T17:44:01.025Z" }, - { url = "https://files.pythonhosted.org/packages/43/a3/10cf483ea683f9f8ab096c24bad3cce20e0d1dd9a4baa0e2093c1c962d9d/contourpy-1.3.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:d32530b534e986374fc19eaa77fcb87e8a99e5431499949b828312bdcd20ac52", size = 1307935, upload-time = "2025-04-15T17:44:17.322Z" }, - { url = "https://files.pythonhosted.org/packages/78/73/69dd9a024444489e22d86108e7b913f3528f56cfc312b5c5727a44188471/contourpy-1.3.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:e298e7e70cf4eb179cc1077be1c725b5fd131ebc81181bf0c03525c8abc297fd", size = 1372168, upload-time = "2025-04-15T17:44:33.43Z" }, - { url = "https://files.pythonhosted.org/packages/0f/1b/96d586ccf1b1a9d2004dd519b25fbf104a11589abfd05484ff12199cca21/contourpy-1.3.2-cp313-cp313t-win32.whl", hash = "sha256:d0e589ae0d55204991450bb5c23f571c64fe43adaa53f93fc902a84c96f52fe1", size = 189550, upload-time = "2025-04-15T17:44:37.092Z" }, - { url = "https://files.pythonhosted.org/packages/b0/e6/6000d0094e8a5e32ad62591c8609e269febb6e4db83a1c75ff8868b42731/contourpy-1.3.2-cp313-cp313t-win_amd64.whl", hash = "sha256:78e9253c3de756b3f6a5174d024c4835acd59eb3f8e2ca13e775dbffe1558f69", size = 238214, upload-time = "2025-04-15T17:44:40.827Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/58/01/1253e6698a07380cd31a736d248a3f2a50a7c88779a1813da27503cadc2a/contourpy-1.3.3.tar.gz", hash = "sha256:083e12155b210502d0bca491432bb04d56dc3432f95a979b429f2848c3dbe880", size = 13466174, upload-time = "2025-07-26T12:03:12.549Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/be/45/adfee365d9ea3d853550b2e735f9d66366701c65db7855cd07621732ccfc/contourpy-1.3.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b08a32ea2f8e42cf1d4be3169a98dd4be32bafe4f22b6c4cb4ba810fa9e5d2cb", size = 293419, upload-time = "2025-07-26T12:01:21.16Z" }, + { url = "https://files.pythonhosted.org/packages/53/3e/405b59cfa13021a56bba395a6b3aca8cec012b45bf177b0eaf7a202cde2c/contourpy-1.3.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:556dba8fb6f5d8742f2923fe9457dbdd51e1049c4a43fd3986a0b14a1d815fc6", size = 273979, upload-time = "2025-07-26T12:01:22.448Z" }, + { url = "https://files.pythonhosted.org/packages/d4/1c/a12359b9b2ca3a845e8f7f9ac08bdf776114eb931392fcad91743e2ea17b/contourpy-1.3.3-cp312-cp312-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:92d9abc807cf7d0e047b95ca5d957cf4792fcd04e920ca70d48add15c1a90ea7", size = 332653, upload-time = "2025-07-26T12:01:24.155Z" }, + { url = "https://files.pythonhosted.org/packages/63/12/897aeebfb475b7748ea67b61e045accdfcf0d971f8a588b67108ed7f5512/contourpy-1.3.3-cp312-cp312-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b2e8faa0ed68cb29af51edd8e24798bb661eac3bd9f65420c1887b6ca89987c8", size = 379536, upload-time = "2025-07-26T12:01:25.91Z" }, + { url = "https://files.pythonhosted.org/packages/43/8a/a8c584b82deb248930ce069e71576fc09bd7174bbd35183b7943fb1064fd/contourpy-1.3.3-cp312-cp312-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:626d60935cf668e70a5ce6ff184fd713e9683fb458898e4249b63be9e28286ea", size = 384397, upload-time = "2025-07-26T12:01:27.152Z" }, + { url = "https://files.pythonhosted.org/packages/cc/8f/ec6289987824b29529d0dfda0d74a07cec60e54b9c92f3c9da4c0ac732de/contourpy-1.3.3-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4d00e655fcef08aba35ec9610536bfe90267d7ab5ba944f7032549c55a146da1", size = 362601, upload-time = "2025-07-26T12:01:28.808Z" }, + { url = "https://files.pythonhosted.org/packages/05/0a/a3fe3be3ee2dceb3e615ebb4df97ae6f3828aa915d3e10549ce016302bd1/contourpy-1.3.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:451e71b5a7d597379ef572de31eeb909a87246974d960049a9848c3bc6c41bf7", size = 1331288, upload-time = "2025-07-26T12:01:31.198Z" }, + { url = "https://files.pythonhosted.org/packages/33/1d/acad9bd4e97f13f3e2b18a3977fe1b4a37ecf3d38d815333980c6c72e963/contourpy-1.3.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:459c1f020cd59fcfe6650180678a9993932d80d44ccde1fa1868977438f0b411", size = 1403386, upload-time = "2025-07-26T12:01:33.947Z" }, + { url = "https://files.pythonhosted.org/packages/cf/8f/5847f44a7fddf859704217a99a23a4f6417b10e5ab1256a179264561540e/contourpy-1.3.3-cp312-cp312-win32.whl", hash = "sha256:023b44101dfe49d7d53932be418477dba359649246075c996866106da069af69", size = 185018, upload-time = "2025-07-26T12:01:35.64Z" }, + { url = "https://files.pythonhosted.org/packages/19/e8/6026ed58a64563186a9ee3f29f41261fd1828f527dd93d33b60feca63352/contourpy-1.3.3-cp312-cp312-win_amd64.whl", hash = "sha256:8153b8bfc11e1e4d75bcb0bff1db232f9e10b274e0929de9d608027e0d34ff8b", size = 226567, upload-time = "2025-07-26T12:01:36.804Z" }, + { url = "https://files.pythonhosted.org/packages/d1/e2/f05240d2c39a1ed228d8328a78b6f44cd695f7ef47beb3e684cf93604f86/contourpy-1.3.3-cp312-cp312-win_arm64.whl", hash = "sha256:07ce5ed73ecdc4a03ffe3e1b3e3c1166db35ae7584be76f65dbbe28a7791b0cc", size = 193655, upload-time = "2025-07-26T12:01:37.999Z" }, + { url = "https://files.pythonhosted.org/packages/68/35/0167aad910bbdb9599272bd96d01a9ec6852f36b9455cf2ca67bd4cc2d23/contourpy-1.3.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:177fb367556747a686509d6fef71d221a4b198a3905fe824430e5ea0fda54eb5", size = 293257, upload-time = "2025-07-26T12:01:39.367Z" }, + { url = "https://files.pythonhosted.org/packages/96/e4/7adcd9c8362745b2210728f209bfbcf7d91ba868a2c5f40d8b58f54c509b/contourpy-1.3.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d002b6f00d73d69333dac9d0b8d5e84d9724ff9ef044fd63c5986e62b7c9e1b1", size = 274034, upload-time = "2025-07-26T12:01:40.645Z" }, + { url = "https://files.pythonhosted.org/packages/73/23/90e31ceeed1de63058a02cb04b12f2de4b40e3bef5e082a7c18d9c8ae281/contourpy-1.3.3-cp313-cp313-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:348ac1f5d4f1d66d3322420f01d42e43122f43616e0f194fc1c9f5d830c5b286", size = 334672, upload-time = "2025-07-26T12:01:41.942Z" }, + { url = "https://files.pythonhosted.org/packages/ed/93/b43d8acbe67392e659e1d984700e79eb67e2acb2bd7f62012b583a7f1b55/contourpy-1.3.3-cp313-cp313-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:655456777ff65c2c548b7c454af9c6f33f16c8884f11083244b5819cc214f1b5", size = 381234, upload-time = "2025-07-26T12:01:43.499Z" }, + { url = "https://files.pythonhosted.org/packages/46/3b/bec82a3ea06f66711520f75a40c8fc0b113b2a75edb36aa633eb11c4f50f/contourpy-1.3.3-cp313-cp313-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:644a6853d15b2512d67881586bd03f462c7ab755db95f16f14d7e238f2852c67", size = 385169, upload-time = "2025-07-26T12:01:45.219Z" }, + { url = "https://files.pythonhosted.org/packages/4b/32/e0f13a1c5b0f8572d0ec6ae2f6c677b7991fafd95da523159c19eff0696a/contourpy-1.3.3-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4debd64f124ca62069f313a9cb86656ff087786016d76927ae2cf37846b006c9", size = 362859, upload-time = "2025-07-26T12:01:46.519Z" }, + { url = "https://files.pythonhosted.org/packages/33/71/e2a7945b7de4e58af42d708a219f3b2f4cff7386e6b6ab0a0fa0033c49a9/contourpy-1.3.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a15459b0f4615b00bbd1e91f1b9e19b7e63aea7483d03d804186f278c0af2659", size = 1332062, upload-time = "2025-07-26T12:01:48.964Z" }, + { url = "https://files.pythonhosted.org/packages/12/fc/4e87ac754220ccc0e807284f88e943d6d43b43843614f0a8afa469801db0/contourpy-1.3.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ca0fdcd73925568ca027e0b17ab07aad764be4706d0a925b89227e447d9737b7", size = 1403932, upload-time = "2025-07-26T12:01:51.979Z" }, + { url = "https://files.pythonhosted.org/packages/a6/2e/adc197a37443f934594112222ac1aa7dc9a98faf9c3842884df9a9d8751d/contourpy-1.3.3-cp313-cp313-win32.whl", hash = "sha256:b20c7c9a3bf701366556e1b1984ed2d0cedf999903c51311417cf5f591d8c78d", size = 185024, upload-time = "2025-07-26T12:01:53.245Z" }, + { url = "https://files.pythonhosted.org/packages/18/0b/0098c214843213759692cc638fce7de5c289200a830e5035d1791d7a2338/contourpy-1.3.3-cp313-cp313-win_amd64.whl", hash = "sha256:1cadd8b8969f060ba45ed7c1b714fe69185812ab43bd6b86a9123fe8f99c3263", size = 226578, upload-time = "2025-07-26T12:01:54.422Z" }, + { url = "https://files.pythonhosted.org/packages/8a/9a/2f6024a0c5995243cd63afdeb3651c984f0d2bc727fd98066d40e141ad73/contourpy-1.3.3-cp313-cp313-win_arm64.whl", hash = "sha256:fd914713266421b7536de2bfa8181aa8c699432b6763a0ea64195ebe28bff6a9", size = 193524, upload-time = "2025-07-26T12:01:55.73Z" }, + { url = "https://files.pythonhosted.org/packages/c0/b3/f8a1a86bd3298513f500e5b1f5fd92b69896449f6cab6a146a5d52715479/contourpy-1.3.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:88df9880d507169449d434c293467418b9f6cbe82edd19284aa0409e7fdb933d", size = 306730, upload-time = "2025-07-26T12:01:57.051Z" }, + { url = "https://files.pythonhosted.org/packages/3f/11/4780db94ae62fc0c2053909b65dc3246bd7cecfc4f8a20d957ad43aa4ad8/contourpy-1.3.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:d06bb1f751ba5d417047db62bca3c8fde202b8c11fb50742ab3ab962c81e8216", size = 287897, upload-time = "2025-07-26T12:01:58.663Z" }, + { url = "https://files.pythonhosted.org/packages/ae/15/e59f5f3ffdd6f3d4daa3e47114c53daabcb18574a26c21f03dc9e4e42ff0/contourpy-1.3.3-cp313-cp313t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e4e6b05a45525357e382909a4c1600444e2a45b4795163d3b22669285591c1ae", size = 326751, upload-time = "2025-07-26T12:02:00.343Z" }, + { url = "https://files.pythonhosted.org/packages/0f/81/03b45cfad088e4770b1dcf72ea78d3802d04200009fb364d18a493857210/contourpy-1.3.3-cp313-cp313t-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ab3074b48c4e2cf1a960e6bbeb7f04566bf36b1861d5c9d4d8ac04b82e38ba20", size = 375486, upload-time = "2025-07-26T12:02:02.128Z" }, + { url = "https://files.pythonhosted.org/packages/0c/ba/49923366492ffbdd4486e970d421b289a670ae8cf539c1ea9a09822b371a/contourpy-1.3.3-cp313-cp313t-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:6c3d53c796f8647d6deb1abe867daeb66dcc8a97e8455efa729516b997b8ed99", size = 388106, upload-time = "2025-07-26T12:02:03.615Z" }, + { url = "https://files.pythonhosted.org/packages/9f/52/5b00ea89525f8f143651f9f03a0df371d3cbd2fccd21ca9b768c7a6500c2/contourpy-1.3.3-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:50ed930df7289ff2a8d7afeb9603f8289e5704755c7e5c3bbd929c90c817164b", size = 352548, upload-time = "2025-07-26T12:02:05.165Z" }, + { url = "https://files.pythonhosted.org/packages/32/1d/a209ec1a3a3452d490f6b14dd92e72280c99ae3d1e73da74f8277d4ee08f/contourpy-1.3.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4feffb6537d64b84877da813a5c30f1422ea5739566abf0bd18065ac040e120a", size = 1322297, upload-time = "2025-07-26T12:02:07.379Z" }, + { url = "https://files.pythonhosted.org/packages/bc/9e/46f0e8ebdd884ca0e8877e46a3f4e633f6c9c8c4f3f6e72be3fe075994aa/contourpy-1.3.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:2b7e9480ffe2b0cd2e787e4df64270e3a0440d9db8dc823312e2c940c167df7e", size = 1391023, upload-time = "2025-07-26T12:02:10.171Z" }, + { url = "https://files.pythonhosted.org/packages/b9/70/f308384a3ae9cd2209e0849f33c913f658d3326900d0ff5d378d6a1422d2/contourpy-1.3.3-cp313-cp313t-win32.whl", hash = "sha256:283edd842a01e3dcd435b1c5116798d661378d83d36d337b8dde1d16a5fc9ba3", size = 196157, upload-time = "2025-07-26T12:02:11.488Z" }, + { url = "https://files.pythonhosted.org/packages/b2/dd/880f890a6663b84d9e34a6f88cded89d78f0091e0045a284427cb6b18521/contourpy-1.3.3-cp313-cp313t-win_amd64.whl", hash = "sha256:87acf5963fc2b34825e5b6b048f40e3635dd547f590b04d2ab317c2619ef7ae8", size = 240570, upload-time = "2025-07-26T12:02:12.754Z" }, + { url = "https://files.pythonhosted.org/packages/80/99/2adc7d8ffead633234817ef8e9a87115c8a11927a94478f6bb3d3f4d4f7d/contourpy-1.3.3-cp313-cp313t-win_arm64.whl", hash = "sha256:3c30273eb2a55024ff31ba7d052dde990d7d8e5450f4bbb6e913558b3d6c2301", size = 199713, upload-time = "2025-07-26T12:02:14.4Z" }, + { url = "https://files.pythonhosted.org/packages/72/8b/4546f3ab60f78c514ffb7d01a0bd743f90de36f0019d1be84d0a708a580a/contourpy-1.3.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fde6c716d51c04b1c25d0b90364d0be954624a0ee9d60e23e850e8d48353d07a", size = 292189, upload-time = "2025-07-26T12:02:16.095Z" }, + { url = "https://files.pythonhosted.org/packages/fd/e1/3542a9cb596cadd76fcef413f19c79216e002623158befe6daa03dbfa88c/contourpy-1.3.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:cbedb772ed74ff5be440fa8eee9bd49f64f6e3fc09436d9c7d8f1c287b121d77", size = 273251, upload-time = "2025-07-26T12:02:17.524Z" }, + { url = "https://files.pythonhosted.org/packages/b1/71/f93e1e9471d189f79d0ce2497007731c1e6bf9ef6d1d61b911430c3db4e5/contourpy-1.3.3-cp314-cp314-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:22e9b1bd7a9b1d652cd77388465dc358dafcd2e217d35552424aa4f996f524f5", size = 335810, upload-time = "2025-07-26T12:02:18.9Z" }, + { url = "https://files.pythonhosted.org/packages/91/f9/e35f4c1c93f9275d4e38681a80506b5510e9327350c51f8d4a5a724d178c/contourpy-1.3.3-cp314-cp314-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a22738912262aa3e254e4f3cb079a95a67132fc5a063890e224393596902f5a4", size = 382871, upload-time = "2025-07-26T12:02:20.418Z" }, + { url = "https://files.pythonhosted.org/packages/b5/71/47b512f936f66a0a900d81c396a7e60d73419868fba959c61efed7a8ab46/contourpy-1.3.3-cp314-cp314-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:afe5a512f31ee6bd7d0dda52ec9864c984ca3d66664444f2d72e0dc4eb832e36", size = 386264, upload-time = "2025-07-26T12:02:21.916Z" }, + { url = "https://files.pythonhosted.org/packages/04/5f/9ff93450ba96b09c7c2b3f81c94de31c89f92292f1380261bd7195bea4ea/contourpy-1.3.3-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f64836de09927cba6f79dcd00fdd7d5329f3fccc633468507079c829ca4db4e3", size = 363819, upload-time = "2025-07-26T12:02:23.759Z" }, + { url = "https://files.pythonhosted.org/packages/3e/a6/0b185d4cc480ee494945cde102cb0149ae830b5fa17bf855b95f2e70ad13/contourpy-1.3.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:1fd43c3be4c8e5fd6e4f2baeae35ae18176cf2e5cced681cca908addf1cdd53b", size = 1333650, upload-time = "2025-07-26T12:02:26.181Z" }, + { url = "https://files.pythonhosted.org/packages/43/d7/afdc95580ca56f30fbcd3060250f66cedbde69b4547028863abd8aa3b47e/contourpy-1.3.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:6afc576f7b33cf00996e5c1102dc2a8f7cc89e39c0b55df93a0b78c1bd992b36", size = 1404833, upload-time = "2025-07-26T12:02:28.782Z" }, + { url = "https://files.pythonhosted.org/packages/e2/e2/366af18a6d386f41132a48f033cbd2102e9b0cf6345d35ff0826cd984566/contourpy-1.3.3-cp314-cp314-win32.whl", hash = "sha256:66c8a43a4f7b8df8b71ee1840e4211a3c8d93b214b213f590e18a1beca458f7d", size = 189692, upload-time = "2025-07-26T12:02:30.128Z" }, + { url = "https://files.pythonhosted.org/packages/7d/c2/57f54b03d0f22d4044b8afb9ca0e184f8b1afd57b4f735c2fa70883dc601/contourpy-1.3.3-cp314-cp314-win_amd64.whl", hash = "sha256:cf9022ef053f2694e31d630feaacb21ea24224be1c3ad0520b13d844274614fd", size = 232424, upload-time = "2025-07-26T12:02:31.395Z" }, + { url = "https://files.pythonhosted.org/packages/18/79/a9416650df9b525737ab521aa181ccc42d56016d2123ddcb7b58e926a42c/contourpy-1.3.3-cp314-cp314-win_arm64.whl", hash = "sha256:95b181891b4c71de4bb404c6621e7e2390745f887f2a026b2d99e92c17892339", size = 198300, upload-time = "2025-07-26T12:02:32.956Z" }, + { url = "https://files.pythonhosted.org/packages/1f/42/38c159a7d0f2b7b9c04c64ab317042bb6952b713ba875c1681529a2932fe/contourpy-1.3.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:33c82d0138c0a062380332c861387650c82e4cf1747aaa6938b9b6516762e772", size = 306769, upload-time = "2025-07-26T12:02:34.2Z" }, + { url = "https://files.pythonhosted.org/packages/c3/6c/26a8205f24bca10974e77460de68d3d7c63e282e23782f1239f226fcae6f/contourpy-1.3.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:ea37e7b45949df430fe649e5de8351c423430046a2af20b1c1961cae3afcda77", size = 287892, upload-time = "2025-07-26T12:02:35.807Z" }, + { url = "https://files.pythonhosted.org/packages/66/06/8a475c8ab718ebfd7925661747dbb3c3ee9c82ac834ccb3570be49d129f4/contourpy-1.3.3-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d304906ecc71672e9c89e87c4675dc5c2645e1f4269a5063b99b0bb29f232d13", size = 326748, upload-time = "2025-07-26T12:02:37.193Z" }, + { url = "https://files.pythonhosted.org/packages/b4/a3/c5ca9f010a44c223f098fccd8b158bb1cb287378a31ac141f04730dc49be/contourpy-1.3.3-cp314-cp314t-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ca658cd1a680a5c9ea96dc61cdbae1e85c8f25849843aa799dfd3cb370ad4fbe", size = 375554, upload-time = "2025-07-26T12:02:38.894Z" }, + { url = "https://files.pythonhosted.org/packages/80/5b/68bd33ae63fac658a4145088c1e894405e07584a316738710b636c6d0333/contourpy-1.3.3-cp314-cp314t-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:ab2fd90904c503739a75b7c8c5c01160130ba67944a7b77bbf36ef8054576e7f", size = 388118, upload-time = "2025-07-26T12:02:40.642Z" }, + { url = "https://files.pythonhosted.org/packages/40/52/4c285a6435940ae25d7410a6c36bda5145839bc3f0beb20c707cda18b9d2/contourpy-1.3.3-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b7301b89040075c30e5768810bc96a8e8d78085b47d8be6e4c3f5a0b4ed478a0", size = 352555, upload-time = "2025-07-26T12:02:42.25Z" }, + { url = "https://files.pythonhosted.org/packages/24/ee/3e81e1dd174f5c7fefe50e85d0892de05ca4e26ef1c9a59c2a57e43b865a/contourpy-1.3.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:2a2a8b627d5cc6b7c41a4beff6c5ad5eb848c88255fda4a8745f7e901b32d8e4", size = 1322295, upload-time = "2025-07-26T12:02:44.668Z" }, + { url = "https://files.pythonhosted.org/packages/3c/b2/6d913d4d04e14379de429057cd169e5e00f6c2af3bb13e1710bcbdb5da12/contourpy-1.3.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:fd6ec6be509c787f1caf6b247f0b1ca598bef13f4ddeaa126b7658215529ba0f", size = 1391027, upload-time = "2025-07-26T12:02:47.09Z" }, + { url = "https://files.pythonhosted.org/packages/93/8a/68a4ec5c55a2971213d29a9374913f7e9f18581945a7a31d1a39b5d2dfe5/contourpy-1.3.3-cp314-cp314t-win32.whl", hash = "sha256:e74a9a0f5e3fff48fb5a7f2fd2b9b70a3fe014a67522f79b7cca4c0c7e43c9ae", size = 202428, upload-time = "2025-07-26T12:02:48.691Z" }, + { url = "https://files.pythonhosted.org/packages/fa/96/fd9f641ffedc4fa3ace923af73b9d07e869496c9cc7a459103e6e978992f/contourpy-1.3.3-cp314-cp314t-win_amd64.whl", hash = "sha256:13b68d6a62db8eafaebb8039218921399baf6e47bf85006fd8529f2a08ef33fc", size = 250331, upload-time = "2025-07-26T12:02:50.137Z" }, + { url = "https://files.pythonhosted.org/packages/ae/8c/469afb6465b853afff216f9528ffda78a915ff880ed58813ba4faf4ba0b6/contourpy-1.3.3-cp314-cp314t-win_arm64.whl", hash = "sha256:b7448cb5a725bb1e35ce88771b86fba35ef418952474492cf7c764059933ff8b", size = 203831, upload-time = "2025-07-26T12:02:51.449Z" }, ] [[package]] name = "coverage" -version = "7.9.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e7/e0/98670a80884f64578f0c22cd70c5e81a6e07b08167721c7487b4d70a7ca0/coverage-7.9.1.tar.gz", hash = "sha256:6cf43c78c4282708a28e466316935ec7489a9c487518a77fa68f716c67909cec", size = 813650, upload-time = "2025-06-13T13:02:28.627Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/68/d9/7f66eb0a8f2fce222de7bdc2046ec41cb31fe33fb55a330037833fb88afc/coverage-7.9.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a8de12b4b87c20de895f10567639c0797b621b22897b0af3ce4b4e204a743626", size = 212336, upload-time = "2025-06-13T13:01:10.909Z" }, - { url = "https://files.pythonhosted.org/packages/20/20/e07cb920ef3addf20f052ee3d54906e57407b6aeee3227a9c91eea38a665/coverage-7.9.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5add197315a054e92cee1b5f686a2bcba60c4c3e66ee3de77ace6c867bdee7cb", size = 212571, upload-time = "2025-06-13T13:01:12.518Z" }, - { url = "https://files.pythonhosted.org/packages/78/f8/96f155de7e9e248ca9c8ff1a40a521d944ba48bec65352da9be2463745bf/coverage-7.9.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:600a1d4106fe66f41e5d0136dfbc68fe7200a5cbe85610ddf094f8f22e1b0300", size = 246377, upload-time = "2025-06-13T13:01:14.87Z" }, - { url = "https://files.pythonhosted.org/packages/3e/cf/1d783bd05b7bca5c10ded5f946068909372e94615a4416afadfe3f63492d/coverage-7.9.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a876e4c3e5a2a1715a6608906aa5a2e0475b9c0f68343c2ada98110512ab1d8", size = 243394, upload-time = "2025-06-13T13:01:16.23Z" }, - { url = "https://files.pythonhosted.org/packages/02/dd/e7b20afd35b0a1abea09fb3998e1abc9f9bd953bee548f235aebd2b11401/coverage-7.9.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81f34346dd63010453922c8e628a52ea2d2ccd73cb2487f7700ac531b247c8a5", size = 245586, upload-time = "2025-06-13T13:01:17.532Z" }, - { url = "https://files.pythonhosted.org/packages/4e/38/b30b0006fea9d617d1cb8e43b1bc9a96af11eff42b87eb8c716cf4d37469/coverage-7.9.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:888f8eee13f2377ce86d44f338968eedec3291876b0b8a7289247ba52cb984cd", size = 245396, upload-time = "2025-06-13T13:01:19.164Z" }, - { url = "https://files.pythonhosted.org/packages/31/e4/4d8ec1dc826e16791f3daf1b50943e8e7e1eb70e8efa7abb03936ff48418/coverage-7.9.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9969ef1e69b8c8e1e70d591f91bbc37fc9a3621e447525d1602801a24ceda898", size = 243577, upload-time = "2025-06-13T13:01:22.433Z" }, - { url = "https://files.pythonhosted.org/packages/25/f4/b0e96c5c38e6e40ef465c4bc7f138863e2909c00e54a331da335faf0d81a/coverage-7.9.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:60c458224331ee3f1a5b472773e4a085cc27a86a0b48205409d364272d67140d", size = 244809, upload-time = "2025-06-13T13:01:24.143Z" }, - { url = "https://files.pythonhosted.org/packages/8a/65/27e0a1fa5e2e5079bdca4521be2f5dabf516f94e29a0defed35ac2382eb2/coverage-7.9.1-cp312-cp312-win32.whl", hash = "sha256:5f646a99a8c2b3ff4c6a6e081f78fad0dde275cd59f8f49dc4eab2e394332e74", size = 214724, upload-time = "2025-06-13T13:01:25.435Z" }, - { url = "https://files.pythonhosted.org/packages/9b/a8/d5b128633fd1a5e0401a4160d02fa15986209a9e47717174f99dc2f7166d/coverage-7.9.1-cp312-cp312-win_amd64.whl", hash = "sha256:30f445f85c353090b83e552dcbbdad3ec84c7967e108c3ae54556ca69955563e", size = 215535, upload-time = "2025-06-13T13:01:27.861Z" }, - { url = "https://files.pythonhosted.org/packages/a3/37/84bba9d2afabc3611f3e4325ee2c6a47cd449b580d4a606b240ce5a6f9bf/coverage-7.9.1-cp312-cp312-win_arm64.whl", hash = "sha256:af41da5dca398d3474129c58cb2b106a5d93bbb196be0d307ac82311ca234342", size = 213904, upload-time = "2025-06-13T13:01:29.202Z" }, - { url = "https://files.pythonhosted.org/packages/d0/a7/a027970c991ca90f24e968999f7d509332daf6b8c3533d68633930aaebac/coverage-7.9.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:31324f18d5969feef7344a932c32428a2d1a3e50b15a6404e97cba1cc9b2c631", size = 212358, upload-time = "2025-06-13T13:01:30.909Z" }, - { url = "https://files.pythonhosted.org/packages/f2/48/6aaed3651ae83b231556750280682528fea8ac7f1232834573472d83e459/coverage-7.9.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0c804506d624e8a20fb3108764c52e0eef664e29d21692afa375e0dd98dc384f", size = 212620, upload-time = "2025-06-13T13:01:32.256Z" }, - { url = "https://files.pythonhosted.org/packages/6c/2a/f4b613f3b44d8b9f144847c89151992b2b6b79cbc506dee89ad0c35f209d/coverage-7.9.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef64c27bc40189f36fcc50c3fb8f16ccda73b6a0b80d9bd6e6ce4cffcd810bbd", size = 245788, upload-time = "2025-06-13T13:01:33.948Z" }, - { url = "https://files.pythonhosted.org/packages/04/d2/de4fdc03af5e4e035ef420ed26a703c6ad3d7a07aff2e959eb84e3b19ca8/coverage-7.9.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d4fe2348cc6ec372e25adec0219ee2334a68d2f5222e0cba9c0d613394e12d86", size = 243001, upload-time = "2025-06-13T13:01:35.285Z" }, - { url = "https://files.pythonhosted.org/packages/f5/e8/eed18aa5583b0423ab7f04e34659e51101135c41cd1dcb33ac1d7013a6d6/coverage-7.9.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:34ed2186fe52fcc24d4561041979a0dec69adae7bce2ae8d1c49eace13e55c43", size = 244985, upload-time = "2025-06-13T13:01:36.712Z" }, - { url = "https://files.pythonhosted.org/packages/17/f8/ae9e5cce8885728c934eaa58ebfa8281d488ef2afa81c3dbc8ee9e6d80db/coverage-7.9.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:25308bd3d00d5eedd5ae7d4357161f4df743e3c0240fa773ee1b0f75e6c7c0f1", size = 245152, upload-time = "2025-06-13T13:01:39.303Z" }, - { url = "https://files.pythonhosted.org/packages/5a/c8/272c01ae792bb3af9b30fac14d71d63371db227980682836ec388e2c57c0/coverage-7.9.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:73e9439310f65d55a5a1e0564b48e34f5369bee943d72c88378f2d576f5a5751", size = 243123, upload-time = "2025-06-13T13:01:40.727Z" }, - { url = "https://files.pythonhosted.org/packages/8c/d0/2819a1e3086143c094ab446e3bdf07138527a7b88cb235c488e78150ba7a/coverage-7.9.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:37ab6be0859141b53aa89412a82454b482c81cf750de4f29223d52268a86de67", size = 244506, upload-time = "2025-06-13T13:01:42.184Z" }, - { url = "https://files.pythonhosted.org/packages/8b/4e/9f6117b89152df7b6112f65c7a4ed1f2f5ec8e60c4be8f351d91e7acc848/coverage-7.9.1-cp313-cp313-win32.whl", hash = "sha256:64bdd969456e2d02a8b08aa047a92d269c7ac1f47e0c977675d550c9a0863643", size = 214766, upload-time = "2025-06-13T13:01:44.482Z" }, - { url = "https://files.pythonhosted.org/packages/27/0f/4b59f7c93b52c2c4ce7387c5a4e135e49891bb3b7408dcc98fe44033bbe0/coverage-7.9.1-cp313-cp313-win_amd64.whl", hash = "sha256:be9e3f68ca9edb897c2184ad0eee815c635565dbe7a0e7e814dc1f7cbab92c0a", size = 215568, upload-time = "2025-06-13T13:01:45.772Z" }, - { url = "https://files.pythonhosted.org/packages/09/1e/9679826336f8c67b9c39a359352882b24a8a7aee48d4c9cad08d38d7510f/coverage-7.9.1-cp313-cp313-win_arm64.whl", hash = "sha256:1c503289ffef1d5105d91bbb4d62cbe4b14bec4d13ca225f9c73cde9bb46207d", size = 213939, upload-time = "2025-06-13T13:01:47.087Z" }, - { url = "https://files.pythonhosted.org/packages/bb/5b/5c6b4e7a407359a2e3b27bf9c8a7b658127975def62077d441b93a30dbe8/coverage-7.9.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0b3496922cb5f4215bf5caaef4cf12364a26b0be82e9ed6d050f3352cf2d7ef0", size = 213079, upload-time = "2025-06-13T13:01:48.554Z" }, - { url = "https://files.pythonhosted.org/packages/a2/22/1e2e07279fd2fd97ae26c01cc2186e2258850e9ec125ae87184225662e89/coverage-7.9.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:9565c3ab1c93310569ec0d86b017f128f027cab0b622b7af288696d7ed43a16d", size = 213299, upload-time = "2025-06-13T13:01:49.997Z" }, - { url = "https://files.pythonhosted.org/packages/14/c0/4c5125a4b69d66b8c85986d3321520f628756cf524af810baab0790c7647/coverage-7.9.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2241ad5dbf79ae1d9c08fe52b36d03ca122fb9ac6bca0f34439e99f8327ac89f", size = 256535, upload-time = "2025-06-13T13:01:51.314Z" }, - { url = "https://files.pythonhosted.org/packages/81/8b/e36a04889dda9960be4263e95e777e7b46f1bb4fc32202612c130a20c4da/coverage-7.9.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3bb5838701ca68b10ebc0937dbd0eb81974bac54447c55cd58dea5bca8451029", size = 252756, upload-time = "2025-06-13T13:01:54.403Z" }, - { url = "https://files.pythonhosted.org/packages/98/82/be04eff8083a09a4622ecd0e1f31a2c563dbea3ed848069e7b0445043a70/coverage-7.9.1-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b30a25f814591a8c0c5372c11ac8967f669b97444c47fd794926e175c4047ece", size = 254912, upload-time = "2025-06-13T13:01:56.769Z" }, - { url = "https://files.pythonhosted.org/packages/0f/25/c26610a2c7f018508a5ab958e5b3202d900422cf7cdca7670b6b8ca4e8df/coverage-7.9.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:2d04b16a6062516df97969f1ae7efd0de9c31eb6ebdceaa0d213b21c0ca1a683", size = 256144, upload-time = "2025-06-13T13:01:58.19Z" }, - { url = "https://files.pythonhosted.org/packages/c5/8b/fb9425c4684066c79e863f1e6e7ecebb49e3a64d9f7f7860ef1688c56f4a/coverage-7.9.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:7931b9e249edefb07cd6ae10c702788546341d5fe44db5b6108a25da4dca513f", size = 254257, upload-time = "2025-06-13T13:01:59.645Z" }, - { url = "https://files.pythonhosted.org/packages/93/df/27b882f54157fc1131e0e215b0da3b8d608d9b8ef79a045280118a8f98fe/coverage-7.9.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:52e92b01041151bf607ee858e5a56c62d4b70f4dac85b8c8cb7fb8a351ab2c10", size = 255094, upload-time = "2025-06-13T13:02:01.37Z" }, - { url = "https://files.pythonhosted.org/packages/41/5f/cad1c3dbed8b3ee9e16fa832afe365b4e3eeab1fb6edb65ebbf745eabc92/coverage-7.9.1-cp313-cp313t-win32.whl", hash = "sha256:684e2110ed84fd1ca5f40e89aa44adf1729dc85444004111aa01866507adf363", size = 215437, upload-time = "2025-06-13T13:02:02.905Z" }, - { url = "https://files.pythonhosted.org/packages/99/4d/fad293bf081c0e43331ca745ff63673badc20afea2104b431cdd8c278b4c/coverage-7.9.1-cp313-cp313t-win_amd64.whl", hash = "sha256:437c576979e4db840539674e68c84b3cda82bc824dd138d56bead1435f1cb5d7", size = 216605, upload-time = "2025-06-13T13:02:05.638Z" }, - { url = "https://files.pythonhosted.org/packages/1f/56/4ee027d5965fc7fc126d7ec1187529cc30cc7d740846e1ecb5e92d31b224/coverage-7.9.1-cp313-cp313t-win_arm64.whl", hash = "sha256:18a0912944d70aaf5f399e350445738a1a20b50fbea788f640751c2ed9208b6c", size = 214392, upload-time = "2025-06-13T13:02:07.642Z" }, - { url = "https://files.pythonhosted.org/packages/08/b8/7ddd1e8ba9701dea08ce22029917140e6f66a859427406579fd8d0ca7274/coverage-7.9.1-py3-none-any.whl", hash = "sha256:66b974b145aa189516b6bf2d8423e888b742517d37872f6ee4c5be0073bd9a3c", size = 204000, upload-time = "2025-06-13T13:02:27.173Z" }, +version = "7.10.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d6/4e/08b493f1f1d8a5182df0044acc970799b58a8d289608e0d891a03e9d269a/coverage-7.10.4.tar.gz", hash = "sha256:25f5130af6c8e7297fd14634955ba9e1697f47143f289e2a23284177c0061d27", size = 823798, upload-time = "2025-08-17T00:26:43.314Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9e/4a/781c9e4dd57cabda2a28e2ce5b00b6be416015265851060945a5ed4bd85e/coverage-7.10.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a1f0264abcabd4853d4cb9b3d164adbf1565da7dab1da1669e93f3ea60162d79", size = 216706, upload-time = "2025-08-17T00:24:51.528Z" }, + { url = "https://files.pythonhosted.org/packages/6a/8c/51255202ca03d2e7b664770289f80db6f47b05138e06cce112b3957d5dfd/coverage-7.10.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:536cbe6b118a4df231b11af3e0f974a72a095182ff8ec5f4868c931e8043ef3e", size = 216939, upload-time = "2025-08-17T00:24:53.171Z" }, + { url = "https://files.pythonhosted.org/packages/06/7f/df11131483698660f94d3c847dc76461369782d7a7644fcd72ac90da8fd0/coverage-7.10.4-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:9a4c0d84134797b7bf3f080599d0cd501471f6c98b715405166860d79cfaa97e", size = 248429, upload-time = "2025-08-17T00:24:54.934Z" }, + { url = "https://files.pythonhosted.org/packages/eb/fa/13ac5eda7300e160bf98f082e75f5c5b4189bf3a883dd1ee42dbedfdc617/coverage-7.10.4-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:7c155fc0f9cee8c9803ea0ad153ab6a3b956baa5d4cd993405dc0b45b2a0b9e0", size = 251178, upload-time = "2025-08-17T00:24:56.353Z" }, + { url = "https://files.pythonhosted.org/packages/9a/bc/f63b56a58ad0bec68a840e7be6b7ed9d6f6288d790760647bb88f5fea41e/coverage-7.10.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0a5f2ab6e451d4b07855d8bcf063adf11e199bff421a4ba57f5bb95b7444ca62", size = 252313, upload-time = "2025-08-17T00:24:57.692Z" }, + { url = "https://files.pythonhosted.org/packages/2b/b6/79338f1ea27b01266f845afb4485976211264ab92407d1c307babe3592a7/coverage-7.10.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:685b67d99b945b0c221be0780c336b303a7753b3e0ec0d618c795aada25d5e7a", size = 250230, upload-time = "2025-08-17T00:24:59.293Z" }, + { url = "https://files.pythonhosted.org/packages/bc/93/3b24f1da3e0286a4dc5832427e1d448d5296f8287464b1ff4a222abeeeb5/coverage-7.10.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0c079027e50c2ae44da51c2e294596cbc9dbb58f7ca45b30651c7e411060fc23", size = 248351, upload-time = "2025-08-17T00:25:00.676Z" }, + { url = "https://files.pythonhosted.org/packages/de/5f/d59412f869e49dcc5b89398ef3146c8bfaec870b179cc344d27932e0554b/coverage-7.10.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3749aa72b93ce516f77cf5034d8e3c0dfd45c6e8a163a602ede2dc5f9a0bb927", size = 249788, upload-time = "2025-08-17T00:25:02.354Z" }, + { url = "https://files.pythonhosted.org/packages/cc/52/04a3b733f40a0cc7c4a5b9b010844111dbf906df3e868b13e1ce7b39ac31/coverage-7.10.4-cp312-cp312-win32.whl", hash = "sha256:fecb97b3a52fa9bcd5a7375e72fae209088faf671d39fae67261f37772d5559a", size = 219131, upload-time = "2025-08-17T00:25:03.79Z" }, + { url = "https://files.pythonhosted.org/packages/83/dd/12909fc0b83888197b3ec43a4ac7753589591c08d00d9deda4158df2734e/coverage-7.10.4-cp312-cp312-win_amd64.whl", hash = "sha256:26de58f355626628a21fe6a70e1e1fad95702dafebfb0685280962ae1449f17b", size = 219939, upload-time = "2025-08-17T00:25:05.494Z" }, + { url = "https://files.pythonhosted.org/packages/83/c7/058bb3220fdd6821bada9685eadac2940429ab3c97025ce53549ff423cc1/coverage-7.10.4-cp312-cp312-win_arm64.whl", hash = "sha256:67e8885408f8325198862bc487038a4980c9277d753cb8812510927f2176437a", size = 218572, upload-time = "2025-08-17T00:25:06.897Z" }, + { url = "https://files.pythonhosted.org/packages/46/b0/4a3662de81f2ed792a4e425d59c4ae50d8dd1d844de252838c200beed65a/coverage-7.10.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2b8e1d2015d5dfdbf964ecef12944c0c8c55b885bb5c0467ae8ef55e0e151233", size = 216735, upload-time = "2025-08-17T00:25:08.617Z" }, + { url = "https://files.pythonhosted.org/packages/c5/e8/e2dcffea01921bfffc6170fb4406cffb763a3b43a047bbd7923566708193/coverage-7.10.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:25735c299439018d66eb2dccf54f625aceb78645687a05f9f848f6e6c751e169", size = 216982, upload-time = "2025-08-17T00:25:10.384Z" }, + { url = "https://files.pythonhosted.org/packages/9d/59/cc89bb6ac869704d2781c2f5f7957d07097c77da0e8fdd4fd50dbf2ac9c0/coverage-7.10.4-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:715c06cb5eceac4d9b7cdf783ce04aa495f6aff657543fea75c30215b28ddb74", size = 247981, upload-time = "2025-08-17T00:25:11.854Z" }, + { url = "https://files.pythonhosted.org/packages/aa/23/3da089aa177ceaf0d3f96754ebc1318597822e6387560914cc480086e730/coverage-7.10.4-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:e017ac69fac9aacd7df6dc464c05833e834dc5b00c914d7af9a5249fcccf07ef", size = 250584, upload-time = "2025-08-17T00:25:13.483Z" }, + { url = "https://files.pythonhosted.org/packages/ad/82/e8693c368535b4e5fad05252a366a1794d481c79ae0333ed943472fd778d/coverage-7.10.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bad180cc40b3fccb0f0e8c702d781492654ac2580d468e3ffc8065e38c6c2408", size = 251856, upload-time = "2025-08-17T00:25:15.27Z" }, + { url = "https://files.pythonhosted.org/packages/56/19/8b9cb13292e602fa4135b10a26ac4ce169a7fc7c285ff08bedd42ff6acca/coverage-7.10.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:becbdcd14f685fada010a5f792bf0895675ecf7481304fe159f0cd3f289550bd", size = 250015, upload-time = "2025-08-17T00:25:16.759Z" }, + { url = "https://files.pythonhosted.org/packages/10/e7/e5903990ce089527cf1c4f88b702985bd65c61ac245923f1ff1257dbcc02/coverage-7.10.4-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:0b485ca21e16a76f68060911f97ebbe3e0d891da1dbbce6af7ca1ab3f98b9097", size = 247908, upload-time = "2025-08-17T00:25:18.232Z" }, + { url = "https://files.pythonhosted.org/packages/dd/c9/7d464f116df1df7fe340669af1ddbe1a371fc60f3082ff3dc837c4f1f2ab/coverage-7.10.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6c1d098ccfe8e1e0a1ed9a0249138899948afd2978cbf48eb1cc3fcd38469690", size = 249525, upload-time = "2025-08-17T00:25:20.141Z" }, + { url = "https://files.pythonhosted.org/packages/ce/42/722e0cdbf6c19e7235c2020837d4e00f3b07820fd012201a983238cc3a30/coverage-7.10.4-cp313-cp313-win32.whl", hash = "sha256:8630f8af2ca84b5c367c3df907b1706621abe06d6929f5045fd628968d421e6e", size = 219173, upload-time = "2025-08-17T00:25:21.56Z" }, + { url = "https://files.pythonhosted.org/packages/97/7e/aa70366f8275955cd51fa1ed52a521c7fcebcc0fc279f53c8c1ee6006dfe/coverage-7.10.4-cp313-cp313-win_amd64.whl", hash = "sha256:f68835d31c421736be367d32f179e14ca932978293fe1b4c7a6a49b555dff5b2", size = 219969, upload-time = "2025-08-17T00:25:23.501Z" }, + { url = "https://files.pythonhosted.org/packages/ac/96/c39d92d5aad8fec28d4606556bfc92b6fee0ab51e4a548d9b49fb15a777c/coverage-7.10.4-cp313-cp313-win_arm64.whl", hash = "sha256:6eaa61ff6724ca7ebc5326d1fae062d85e19b38dd922d50903702e6078370ae7", size = 218601, upload-time = "2025-08-17T00:25:25.295Z" }, + { url = "https://files.pythonhosted.org/packages/79/13/34d549a6177bd80fa5db758cb6fd3057b7ad9296d8707d4ab7f480b0135f/coverage-7.10.4-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:702978108876bfb3d997604930b05fe769462cc3000150b0e607b7b444f2fd84", size = 217445, upload-time = "2025-08-17T00:25:27.129Z" }, + { url = "https://files.pythonhosted.org/packages/6a/c0/433da866359bf39bf595f46d134ff2d6b4293aeea7f3328b6898733b0633/coverage-7.10.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e8f978e8c5521d9c8f2086ac60d931d583fab0a16f382f6eb89453fe998e2484", size = 217676, upload-time = "2025-08-17T00:25:28.641Z" }, + { url = "https://files.pythonhosted.org/packages/7e/d7/2b99aa8737f7801fd95222c79a4ebc8c5dd4460d4bed7ef26b17a60c8d74/coverage-7.10.4-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:df0ac2ccfd19351411c45e43ab60932b74472e4648b0a9edf6a3b58846e246a9", size = 259002, upload-time = "2025-08-17T00:25:30.065Z" }, + { url = "https://files.pythonhosted.org/packages/08/cf/86432b69d57debaef5abf19aae661ba8f4fcd2882fa762e14added4bd334/coverage-7.10.4-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:73a0d1aaaa3796179f336448e1576a3de6fc95ff4f07c2d7251d4caf5d18cf8d", size = 261178, upload-time = "2025-08-17T00:25:31.517Z" }, + { url = "https://files.pythonhosted.org/packages/23/78/85176593f4aa6e869cbed7a8098da3448a50e3fac5cb2ecba57729a5220d/coverage-7.10.4-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:873da6d0ed6b3ffc0bc01f2c7e3ad7e2023751c0d8d86c26fe7322c314b031dc", size = 263402, upload-time = "2025-08-17T00:25:33.339Z" }, + { url = "https://files.pythonhosted.org/packages/88/1d/57a27b6789b79abcac0cc5805b31320d7a97fa20f728a6a7c562db9a3733/coverage-7.10.4-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c6446c75b0e7dda5daa876a1c87b480b2b52affb972fedd6c22edf1aaf2e00ec", size = 260957, upload-time = "2025-08-17T00:25:34.795Z" }, + { url = "https://files.pythonhosted.org/packages/fa/e5/3e5ddfd42835c6def6cd5b2bdb3348da2e34c08d9c1211e91a49e9fd709d/coverage-7.10.4-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:6e73933e296634e520390c44758d553d3b573b321608118363e52113790633b9", size = 258718, upload-time = "2025-08-17T00:25:36.259Z" }, + { url = "https://files.pythonhosted.org/packages/1a/0b/d364f0f7ef111615dc4e05a6ed02cac7b6f2ac169884aa57faeae9eb5fa0/coverage-7.10.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:52073d4b08d2cb571234c8a71eb32af3c6923149cf644a51d5957ac128cf6aa4", size = 259848, upload-time = "2025-08-17T00:25:37.754Z" }, + { url = "https://files.pythonhosted.org/packages/10/c6/bbea60a3b309621162e53faf7fac740daaf083048ea22077418e1ecaba3f/coverage-7.10.4-cp313-cp313t-win32.whl", hash = "sha256:e24afb178f21f9ceb1aefbc73eb524769aa9b504a42b26857243f881af56880c", size = 219833, upload-time = "2025-08-17T00:25:39.252Z" }, + { url = "https://files.pythonhosted.org/packages/44/a5/f9f080d49cfb117ddffe672f21eab41bd23a46179a907820743afac7c021/coverage-7.10.4-cp313-cp313t-win_amd64.whl", hash = "sha256:be04507ff1ad206f4be3d156a674e3fb84bbb751ea1b23b142979ac9eebaa15f", size = 220897, upload-time = "2025-08-17T00:25:40.772Z" }, + { url = "https://files.pythonhosted.org/packages/46/89/49a3fc784fa73d707f603e586d84a18c2e7796707044e9d73d13260930b7/coverage-7.10.4-cp313-cp313t-win_arm64.whl", hash = "sha256:f3e3ff3f69d02b5dad67a6eac68cc9c71ae343b6328aae96e914f9f2f23a22e2", size = 219160, upload-time = "2025-08-17T00:25:42.229Z" }, + { url = "https://files.pythonhosted.org/packages/b5/22/525f84b4cbcff66024d29f6909d7ecde97223f998116d3677cfba0d115b5/coverage-7.10.4-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:a59fe0af7dd7211ba595cf7e2867458381f7e5d7b4cffe46274e0b2f5b9f4eb4", size = 216717, upload-time = "2025-08-17T00:25:43.875Z" }, + { url = "https://files.pythonhosted.org/packages/a6/58/213577f77efe44333a416d4bcb251471e7f64b19b5886bb515561b5ce389/coverage-7.10.4-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:3a6c35c5b70f569ee38dc3350cd14fdd0347a8b389a18bb37538cc43e6f730e6", size = 216994, upload-time = "2025-08-17T00:25:45.405Z" }, + { url = "https://files.pythonhosted.org/packages/17/85/34ac02d0985a09472f41b609a1d7babc32df87c726c7612dc93d30679b5a/coverage-7.10.4-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:acb7baf49f513554c4af6ef8e2bd6e8ac74e6ea0c7386df8b3eb586d82ccccc4", size = 248038, upload-time = "2025-08-17T00:25:46.981Z" }, + { url = "https://files.pythonhosted.org/packages/47/4f/2140305ec93642fdaf988f139813629cbb6d8efa661b30a04b6f7c67c31e/coverage-7.10.4-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:a89afecec1ed12ac13ed203238b560cbfad3522bae37d91c102e690b8b1dc46c", size = 250575, upload-time = "2025-08-17T00:25:48.613Z" }, + { url = "https://files.pythonhosted.org/packages/f2/b5/41b5784180b82a083c76aeba8f2c72ea1cb789e5382157b7dc852832aea2/coverage-7.10.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:480442727f464407d8ade6e677b7f21f3b96a9838ab541b9a28ce9e44123c14e", size = 251927, upload-time = "2025-08-17T00:25:50.881Z" }, + { url = "https://files.pythonhosted.org/packages/78/ca/c1dd063e50b71f5aea2ebb27a1c404e7b5ecf5714c8b5301f20e4e8831ac/coverage-7.10.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:a89bf193707f4a17f1ed461504031074d87f035153239f16ce86dfb8f8c7ac76", size = 249930, upload-time = "2025-08-17T00:25:52.422Z" }, + { url = "https://files.pythonhosted.org/packages/8d/66/d8907408612ffee100d731798e6090aedb3ba766ecf929df296c1a7ee4fb/coverage-7.10.4-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:3ddd912c2fc440f0fb3229e764feec85669d5d80a988ff1b336a27d73f63c818", size = 247862, upload-time = "2025-08-17T00:25:54.316Z" }, + { url = "https://files.pythonhosted.org/packages/29/db/53cd8ec8b1c9c52d8e22a25434785bfc2d1e70c0cfb4d278a1326c87f741/coverage-7.10.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:8a538944ee3a42265e61c7298aeba9ea43f31c01271cf028f437a7b4075592cf", size = 249360, upload-time = "2025-08-17T00:25:55.833Z" }, + { url = "https://files.pythonhosted.org/packages/4f/75/5ec0a28ae4a0804124ea5a5becd2b0fa3adf30967ac656711fb5cdf67c60/coverage-7.10.4-cp314-cp314-win32.whl", hash = "sha256:fd2e6002be1c62476eb862b8514b1ba7e7684c50165f2a8d389e77da6c9a2ebd", size = 219449, upload-time = "2025-08-17T00:25:57.984Z" }, + { url = "https://files.pythonhosted.org/packages/9d/ab/66e2ee085ec60672bf5250f11101ad8143b81f24989e8c0e575d16bb1e53/coverage-7.10.4-cp314-cp314-win_amd64.whl", hash = "sha256:ec113277f2b5cf188d95fb66a65c7431f2b9192ee7e6ec9b72b30bbfb53c244a", size = 220246, upload-time = "2025-08-17T00:25:59.868Z" }, + { url = "https://files.pythonhosted.org/packages/37/3b/00b448d385f149143190846217797d730b973c3c0ec2045a7e0f5db3a7d0/coverage-7.10.4-cp314-cp314-win_arm64.whl", hash = "sha256:9744954bfd387796c6a091b50d55ca7cac3d08767795b5eec69ad0f7dbf12d38", size = 218825, upload-time = "2025-08-17T00:26:01.44Z" }, + { url = "https://files.pythonhosted.org/packages/ee/2e/55e20d3d1ce00b513efb6fd35f13899e1c6d4f76c6cbcc9851c7227cd469/coverage-7.10.4-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:5af4829904dda6aabb54a23879f0f4412094ba9ef153aaa464e3c1b1c9bc98e6", size = 217462, upload-time = "2025-08-17T00:26:03.014Z" }, + { url = "https://files.pythonhosted.org/packages/47/b3/aab1260df5876f5921e2c57519e73a6f6eeacc0ae451e109d44ee747563e/coverage-7.10.4-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7bba5ed85e034831fac761ae506c0644d24fd5594727e174b5a73aff343a7508", size = 217675, upload-time = "2025-08-17T00:26:04.606Z" }, + { url = "https://files.pythonhosted.org/packages/67/23/1cfe2aa50c7026180989f0bfc242168ac7c8399ccc66eb816b171e0ab05e/coverage-7.10.4-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d57d555b0719834b55ad35045de6cc80fc2b28e05adb6b03c98479f9553b387f", size = 259176, upload-time = "2025-08-17T00:26:06.159Z" }, + { url = "https://files.pythonhosted.org/packages/9d/72/5882b6aeed3f9de7fc4049874fd7d24213bf1d06882f5c754c8a682606ec/coverage-7.10.4-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:ba62c51a72048bb1ea72db265e6bd8beaabf9809cd2125bbb5306c6ce105f214", size = 261341, upload-time = "2025-08-17T00:26:08.137Z" }, + { url = "https://files.pythonhosted.org/packages/1b/70/a0c76e3087596ae155f8e71a49c2c534c58b92aeacaf4d9d0cbbf2dde53b/coverage-7.10.4-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0acf0c62a6095f07e9db4ec365cc58c0ef5babb757e54745a1aa2ea2a2564af1", size = 263600, upload-time = "2025-08-17T00:26:11.045Z" }, + { url = "https://files.pythonhosted.org/packages/cb/5f/27e4cd4505b9a3c05257fb7fc509acbc778c830c450cb4ace00bf2b7bda7/coverage-7.10.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e1033bf0f763f5cf49ffe6594314b11027dcc1073ac590b415ea93463466deec", size = 261036, upload-time = "2025-08-17T00:26:12.693Z" }, + { url = "https://files.pythonhosted.org/packages/02/d6/cf2ae3a7f90ab226ea765a104c4e76c5126f73c93a92eaea41e1dc6a1892/coverage-7.10.4-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:92c29eff894832b6a40da1789b1f252305af921750b03ee4535919db9179453d", size = 258794, upload-time = "2025-08-17T00:26:14.261Z" }, + { url = "https://files.pythonhosted.org/packages/9e/b1/39f222eab0d78aa2001cdb7852aa1140bba632db23a5cfd832218b496d6c/coverage-7.10.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:822c4c830989c2093527e92acd97be4638a44eb042b1bdc0e7a278d84a070bd3", size = 259946, upload-time = "2025-08-17T00:26:15.899Z" }, + { url = "https://files.pythonhosted.org/packages/74/b2/49d82acefe2fe7c777436a3097f928c7242a842538b190f66aac01f29321/coverage-7.10.4-cp314-cp314t-win32.whl", hash = "sha256:e694d855dac2e7cf194ba33653e4ba7aad7267a802a7b3fc4347d0517d5d65cd", size = 220226, upload-time = "2025-08-17T00:26:17.566Z" }, + { url = "https://files.pythonhosted.org/packages/06/b0/afb942b6b2fc30bdbc7b05b087beae11c2b0daaa08e160586cf012b6ad70/coverage-7.10.4-cp314-cp314t-win_amd64.whl", hash = "sha256:efcc54b38ef7d5bfa98050f220b415bc5bb3d432bd6350a861cf6da0ede2cdcd", size = 221346, upload-time = "2025-08-17T00:26:19.311Z" }, + { url = "https://files.pythonhosted.org/packages/d8/66/e0531c9d1525cb6eac5b5733c76f27f3053ee92665f83f8899516fea6e76/coverage-7.10.4-cp314-cp314t-win_arm64.whl", hash = "sha256:6f3a3496c0fa26bfac4ebc458747b778cff201c8ae94fa05e1391bab0dbc473c", size = 219368, upload-time = "2025-08-17T00:26:21.011Z" }, + { url = "https://files.pythonhosted.org/packages/bb/78/983efd23200921d9edb6bd40512e1aa04af553d7d5a171e50f9b2b45d109/coverage-7.10.4-py3-none-any.whl", hash = "sha256:065d75447228d05121e5c938ca8f0e91eed60a1eb2d1258d42d5084fecfc3302", size = 208365, upload-time = "2025-08-17T00:26:41.479Z" }, ] [[package]] @@ -734,21 +855,68 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a2/7b/b0d330852dd5953daee6b15f742f15d9f18e9c0154eb4cfcc8718f0436da/cryptography-42.0.8-cp39-abi3-win_amd64.whl", hash = "sha256:fa76fbb7596cc5839320000cdd5d0955313696d9511debab7ee7278fc8b5c84a", size = 2886038, upload-time = "2024-06-04T19:54:18.707Z" }, ] +[[package]] +name = "cuda-bindings" +version = "13.0.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cuda-pathfinder" }, + { name = "pywin32", marker = "sys_platform == 'win32'" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/85/b5/e90add0eb01d1ceaaae38c944c8a968090eb25dfbe3c81f5300e39c71739/cuda_bindings-13.0.1-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a06268a4226c867a7234f12ca183e186e7962a4971b53983c8de182dd62878a3", size = 11929946, upload-time = "2025-08-18T15:29:36.485Z" }, + { url = "https://files.pythonhosted.org/packages/71/2e/51f77c396bb54128a63da74e299edf2c6c4c08ebfb15d48e43665b5fe3b3/cuda_bindings-13.0.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:12dd61b782b1558ac3e3790a02e3d9dc4827c6702a3315a9b79b5e1f6bed30f2", size = 12302099, upload-time = "2025-08-18T15:29:38.756Z" }, + { url = "https://files.pythonhosted.org/packages/f0/84/e1ccf4e52d60da76ae538f86c6e73425ae1dc226b4a528893ea2012e0646/cuda_bindings-13.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:06b4533e43c65bf2422db25eb86cd0813a818a2e3cb4b793f4afbdb2f801d894", size = 12046683, upload-time = "2025-08-18T15:29:41.267Z" }, + { url = "https://files.pythonhosted.org/packages/b5/8a/4112bc04f110a89d751b3d580189debe64240c94f6609b6d9e1bd16db16b/cuda_bindings-13.0.1-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a38090fa452f71c30b93b9007c26f1874c28abc5662b37c5caefbd41506a64f7", size = 11937804, upload-time = "2025-08-18T15:29:43.262Z" }, + { url = "https://files.pythonhosted.org/packages/53/43/c3aa3637458edd10014cf16a4152faca17d8fb6cc233fec23d469eb042aa/cuda_bindings-13.0.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4bdea32e9ede085c72dbc670d2c6ecd68451aa06d0f1cfe597374483dcdd1657", size = 12316943, upload-time = "2025-08-18T15:29:45.261Z" }, + { url = "https://files.pythonhosted.org/packages/ed/fd/033f669fdda93bab7c342d9e08b8bd97b9a3670bd8f5ee5dbc51054d54db/cuda_bindings-13.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:448bf908d17b29e3c5dfa55f848e37f3d4170e5c8644536323bf54a0785e6b98", size = 12009034, upload-time = "2025-08-18T15:29:47.343Z" }, +] + +[[package]] +name = "cuda-pathfinder" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/52/07/7978a4c4d8e70620170aa247ce16241a72d4cf6e4336bd3b296926baf7df/cuda_pathfinder-1.1.0-py3-none-any.whl", hash = "sha256:3e66fe0af8ead20eca25e077d2e0cb2dcc027d4297d550a74f99a0211e610799", size = 17673, upload-time = "2025-08-07T01:34:08.562Z" }, +] + +[[package]] +name = "cuda-python" +version = "13.0.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cuda-bindings" }, + { name = "cuda-pathfinder" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/02/02/078f4cba58349faad5597306ca54bf0bf129f8c713b261e1def59468a505/cuda_python-13.0.1-py3-none-any.whl", hash = "sha256:9d8c021953cfbb2c1916a3977c04ad23846cc8ac7647916cb6a1bf4f3280412c", size = 7611, upload-time = "2025-08-18T15:39:40.456Z" }, +] + [[package]] name = "cupy-cuda12x" -version = "13.4.1" +version = "13.6.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "fastrlock", marker = "sys_platform != 'darwin'" }, - { name = "numpy", marker = "sys_platform != 'darwin'" }, + { name = "fastrlock" }, + { name = "numpy" }, ] wheels = [ - { url = "https://files.pythonhosted.org/packages/b7/6d/a5e08d225b1664b400fb4a87262878d315267c310b93d43efd5b7b0b1f64/cupy_cuda12x-13.4.1-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:a714db3dae534b9d869951366ae2431f3e72036b07827927ffccd24076507ca8", size = 118354020, upload-time = "2025-03-21T07:25:10.378Z" }, - { url = "https://files.pythonhosted.org/packages/56/58/5bfc83265455ff783d5be65451392a6920a90fe8996a091006ba02512848/cupy_cuda12x-13.4.1-cp312-cp312-manylinux2014_x86_64.whl", hash = "sha256:06103dd2dc2ff7f36c67d2d01cb658befd68da350fae78a0e113fbab6895755f", size = 105273045, upload-time = "2025-03-21T07:25:17.966Z" }, - { url = "https://files.pythonhosted.org/packages/6f/e9/abc5ae5d8f6e05fb44c83105f8663d46c1bdfc9d0039fbaf21e79f51a985/cupy_cuda12x-13.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:7d73a32b3b49311cf384f2dd9c686cc9244435b2288d628568af6a77262964ad", size = 82066008, upload-time = "2025-03-21T07:25:24.372Z" }, - { url = "https://files.pythonhosted.org/packages/cd/59/c5200651fc3c0e1e92393d4e582e7812d5f76f26607c1fb310399c335b21/cupy_cuda12x-13.4.1-cp313-cp313-manylinux2014_aarch64.whl", hash = "sha256:43f97bedd6e2385f61b939ee37faadff0e1fa701d35f2a328cdc13d5b1b74b48", size = 117957759, upload-time = "2025-03-21T07:25:31.363Z" }, - { url = "https://files.pythonhosted.org/packages/13/33/de71853fcd28aaf961092d895d126bfe5ebecc56d89865ea41ad8e48e559/cupy_cuda12x-13.4.1-cp313-cp313-manylinux2014_x86_64.whl", hash = "sha256:d0d153ac5b24ad183a7bcbe83693a6df06840355bf94b30c1606c519added468", size = 105047230, upload-time = "2025-03-21T07:25:38.084Z" }, - { url = "https://files.pythonhosted.org/packages/08/f6/38f02f85d6062868425180d9b36097bac05a3d222973be5b90aa3a8fd580/cupy_cuda12x-13.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:4ca400146ab1c5f65dad180bc2562b58b91e239b322d33689fafed7b6399e229", size = 82031139, upload-time = "2025-03-21T07:25:44.085Z" }, + { url = "https://files.pythonhosted.org/packages/12/c5/7e7fc4816d0de0154e5d9053242c3a08a0ca8b43ee656a6f7b3b95055a7b/cupy_cuda12x-13.6.0-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:a6970ceefe40f9acbede41d7fe17416bd277b1bd2093adcde457b23b578c5a59", size = 127334633, upload-time = "2025-08-18T08:24:43.065Z" }, + { url = "https://files.pythonhosted.org/packages/e0/95/d7e1295141e7d530674a3cc567e13ed0eb6b81524cb122d797ed996b5bea/cupy_cuda12x-13.6.0-cp312-cp312-manylinux2014_x86_64.whl", hash = "sha256:79b0cacb5e8b190ef409f9e03f06ac8de1b021b0c0dda47674d446f5557e0eb1", size = 112886268, upload-time = "2025-08-18T08:24:49.294Z" }, + { url = "https://files.pythonhosted.org/packages/ae/8c/14555b63fd78cfac7b88af0094cea0a3cb845d243661ec7da69f7b3ea0de/cupy_cuda12x-13.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:ca06fede7b8b83ca9ad80062544ef2e5bb8d4762d1c4fc3ac8349376de9c8a5e", size = 89785108, upload-time = "2025-08-18T08:24:54.527Z" }, + { url = "https://files.pythonhosted.org/packages/19/ec/f62cb991f11fb41291c4c15b6936d7b67ffa71ddb344ad6e8894e06ce58d/cupy_cuda12x-13.6.0-cp313-cp313-manylinux2014_aarch64.whl", hash = "sha256:e5426ae3b1b9cf59927481e457a89e3f0b50a35b114a8034ec9110e7a833434c", size = 126904601, upload-time = "2025-08-18T08:24:59.951Z" }, + { url = "https://files.pythonhosted.org/packages/f8/b8/30127bcdac53a25f94ee201bf4802fcd8d012145567d77c54174d6d01c01/cupy_cuda12x-13.6.0-cp313-cp313-manylinux2014_x86_64.whl", hash = "sha256:52d9e7f83d920da7d81ec2e791c2c2c747fdaa1d7b811971b34865ce6371e98a", size = 112654824, upload-time = "2025-08-18T08:25:05.944Z" }, + { url = "https://files.pythonhosted.org/packages/72/36/c9e24acb19f039f814faea880b3704a3661edaa6739456b73b27540663e3/cupy_cuda12x-13.6.0-cp313-cp313-win_amd64.whl", hash = "sha256:297b4268f839de67ef7865c2202d3f5a0fb8d20bd43360bc51b6e60cb4406447", size = 89750580, upload-time = "2025-08-18T08:25:10.972Z" }, +] + +[[package]] +name = "cut-cross-entropy" +version = "25.3.2" +source = { git = "https://github.com/apple/ml-cross-entropy.git?rev=87a86ab#87a86aba72cfd2f0d8abecaf81c13c4528ea07d8" } +dependencies = [ + { name = "setuptools" }, + { name = "torch" }, + { name = "triton", marker = "sys_platform != 'darwin'" }, ] [[package]] @@ -762,15 +930,15 @@ wheels = [ [[package]] name = "databricks-sdk" -version = "0.59.0" +version = "0.64.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "google-auth" }, { name = "requests" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/1c/d9/b48531b1b2caa3ed559ece34bf2abff2536048bf88447592621daeaec5d5/databricks_sdk-0.59.0.tar.gz", hash = "sha256:f60a27f00ccdf57d8496dd4a2e46ad17bb9557add09a6b2e23d46f29c0bca613", size = 719165, upload-time = "2025-07-17T11:13:57.847Z" } +sdist = { url = "https://files.pythonhosted.org/packages/cf/31/18a655a4382851c897a84c94e547e3a8e1a0f2b51e4ee74227c982a53943/databricks_sdk-0.64.0.tar.gz", hash = "sha256:e21cce45bb4f1254ad5d22ea77fc30484378beb54b5b42db098d1f975c813e81", size = 746326, upload-time = "2025-08-20T11:47:22.469Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/1b/ac/1d97e438f86c26314227f7b2f0711476db79522a137b60533c5181ae481b/databricks_sdk-0.59.0-py3-none-any.whl", hash = "sha256:2ae4baefd1f7360c8314e2ebdc0a0a6d7e76a88805a65d0415ff73631c1e4c0d", size = 676213, upload-time = "2025-07-17T11:13:56.088Z" }, + { url = "https://files.pythonhosted.org/packages/21/70/734d3b559e72c4231531c77685f204d8c14202ada640c4f16229a6456b57/databricks_sdk-0.64.0-py3-none-any.whl", hash = "sha256:3efb2a739deda3186d0380ad6ced7d4811ced7adcaf61cbf0f897eab52974a17", size = 703407, upload-time = "2025-08-20T11:47:20.509Z" }, ] [[package]] @@ -799,19 +967,19 @@ wheels = [ [[package]] name = "debugpy" -version = "1.8.14" +version = "1.8.16" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/bd/75/087fe07d40f490a78782ff3b0a30e3968936854105487decdb33446d4b0e/debugpy-1.8.14.tar.gz", hash = "sha256:7cd287184318416850aa8b60ac90105837bb1e59531898c07569d197d2ed5322", size = 1641444, upload-time = "2025-04-10T19:46:10.981Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ca/d4/722d0bcc7986172ac2ef3c979ad56a1030e3afd44ced136d45f8142b1f4a/debugpy-1.8.16.tar.gz", hash = "sha256:31e69a1feb1cf6b51efbed3f6c9b0ef03bc46ff050679c4be7ea6d2e23540870", size = 1643809, upload-time = "2025-08-06T18:00:02.647Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d9/2a/ac2df0eda4898f29c46eb6713a5148e6f8b2b389c8ec9e425a4a1d67bf07/debugpy-1.8.14-cp312-cp312-macosx_14_0_universal2.whl", hash = "sha256:8899c17920d089cfa23e6005ad9f22582fd86f144b23acb9feeda59e84405b84", size = 2501268, upload-time = "2025-04-10T19:46:26.044Z" }, - { url = "https://files.pythonhosted.org/packages/10/53/0a0cb5d79dd9f7039169f8bf94a144ad3efa52cc519940b3b7dde23bcb89/debugpy-1.8.14-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6bb5c0dcf80ad5dbc7b7d6eac484e2af34bdacdf81df09b6a3e62792b722826", size = 4221077, upload-time = "2025-04-10T19:46:27.464Z" }, - { url = "https://files.pythonhosted.org/packages/f8/d5/84e01821f362327bf4828728aa31e907a2eca7c78cd7c6ec062780d249f8/debugpy-1.8.14-cp312-cp312-win32.whl", hash = "sha256:281d44d248a0e1791ad0eafdbbd2912ff0de9eec48022a5bfbc332957487ed3f", size = 5255127, upload-time = "2025-04-10T19:46:29.467Z" }, - { url = "https://files.pythonhosted.org/packages/33/16/1ed929d812c758295cac7f9cf3dab5c73439c83d9091f2d91871e648093e/debugpy-1.8.14-cp312-cp312-win_amd64.whl", hash = "sha256:5aa56ef8538893e4502a7d79047fe39b1dae08d9ae257074c6464a7b290b806f", size = 5297249, upload-time = "2025-04-10T19:46:31.538Z" }, - { url = "https://files.pythonhosted.org/packages/4d/e4/395c792b243f2367d84202dc33689aa3d910fb9826a7491ba20fc9e261f5/debugpy-1.8.14-cp313-cp313-macosx_14_0_universal2.whl", hash = "sha256:329a15d0660ee09fec6786acdb6e0443d595f64f5d096fc3e3ccf09a4259033f", size = 2485676, upload-time = "2025-04-10T19:46:32.96Z" }, - { url = "https://files.pythonhosted.org/packages/ba/f1/6f2ee3f991327ad9e4c2f8b82611a467052a0fb0e247390192580e89f7ff/debugpy-1.8.14-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f920c7f9af409d90f5fd26e313e119d908b0dd2952c2393cd3247a462331f15", size = 4217514, upload-time = "2025-04-10T19:46:34.336Z" }, - { url = "https://files.pythonhosted.org/packages/79/28/b9d146f8f2dc535c236ee09ad3e5ac899adb39d7a19b49f03ac95d216beb/debugpy-1.8.14-cp313-cp313-win32.whl", hash = "sha256:3784ec6e8600c66cbdd4ca2726c72d8ca781e94bce2f396cc606d458146f8f4e", size = 5254756, upload-time = "2025-04-10T19:46:36.199Z" }, - { url = "https://files.pythonhosted.org/packages/e0/62/a7b4a57013eac4ccaef6977966e6bec5c63906dd25a86e35f155952e29a1/debugpy-1.8.14-cp313-cp313-win_amd64.whl", hash = "sha256:684eaf43c95a3ec39a96f1f5195a7ff3d4144e4a18d69bb66beeb1a6de605d6e", size = 5297119, upload-time = "2025-04-10T19:46:38.141Z" }, - { url = "https://files.pythonhosted.org/packages/97/1a/481f33c37ee3ac8040d3d51fc4c4e4e7e61cb08b8bc8971d6032acc2279f/debugpy-1.8.14-py2.py3-none-any.whl", hash = "sha256:5cd9a579d553b6cb9759a7908a41988ee6280b961f24f63336835d9418216a20", size = 5256230, upload-time = "2025-04-10T19:46:54.077Z" }, + { url = "https://files.pythonhosted.org/packages/61/fb/0387c0e108d842c902801bc65ccc53e5b91d8c169702a9bbf4f7efcedf0c/debugpy-1.8.16-cp312-cp312-macosx_14_0_universal2.whl", hash = "sha256:b202e2843e32e80b3b584bcebfe0e65e0392920dc70df11b2bfe1afcb7a085e4", size = 2511822, upload-time = "2025-08-06T18:00:18.526Z" }, + { url = "https://files.pythonhosted.org/packages/37/44/19e02745cae22bf96440141f94e15a69a1afaa3a64ddfc38004668fcdebf/debugpy-1.8.16-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64473c4a306ba11a99fe0bb14622ba4fbd943eb004847d9b69b107bde45aa9ea", size = 4230135, upload-time = "2025-08-06T18:00:19.997Z" }, + { url = "https://files.pythonhosted.org/packages/f3/0b/19b1ba5ee4412f303475a2c7ad5858efb99c90eae5ec627aa6275c439957/debugpy-1.8.16-cp312-cp312-win32.whl", hash = "sha256:833a61ed446426e38b0dd8be3e9d45ae285d424f5bf6cd5b2b559c8f12305508", size = 5281271, upload-time = "2025-08-06T18:00:21.281Z" }, + { url = "https://files.pythonhosted.org/packages/b1/e0/bc62e2dc141de53bd03e2c7cb9d7011de2e65e8bdcdaa26703e4d28656ba/debugpy-1.8.16-cp312-cp312-win_amd64.whl", hash = "sha256:75f204684581e9ef3dc2f67687c3c8c183fde2d6675ab131d94084baf8084121", size = 5323149, upload-time = "2025-08-06T18:00:23.033Z" }, + { url = "https://files.pythonhosted.org/packages/62/66/607ab45cc79e60624df386e233ab64a6d8d39ea02e7f80e19c1d451345bb/debugpy-1.8.16-cp313-cp313-macosx_14_0_universal2.whl", hash = "sha256:85df3adb1de5258dca910ae0bb185e48c98801ec15018a263a92bb06be1c8787", size = 2496157, upload-time = "2025-08-06T18:00:24.361Z" }, + { url = "https://files.pythonhosted.org/packages/4d/a0/c95baae08a75bceabb79868d663a0736655e427ab9c81fb848da29edaeac/debugpy-1.8.16-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bee89e948bc236a5c43c4214ac62d28b29388453f5fd328d739035e205365f0b", size = 4222491, upload-time = "2025-08-06T18:00:25.806Z" }, + { url = "https://files.pythonhosted.org/packages/5b/2f/1c8db6ddd8a257c3cd2c46413b267f1d5fa3df910401c899513ce30392d6/debugpy-1.8.16-cp313-cp313-win32.whl", hash = "sha256:cf358066650439847ec5ff3dae1da98b5461ea5da0173d93d5e10f477c94609a", size = 5281126, upload-time = "2025-08-06T18:00:27.207Z" }, + { url = "https://files.pythonhosted.org/packages/d3/ba/c3e154ab307366d6c5a9c1b68de04914e2ce7fa2f50d578311d8cc5074b2/debugpy-1.8.16-cp313-cp313-win_amd64.whl", hash = "sha256:b5aea1083f6f50023e8509399d7dc6535a351cc9f2e8827d1e093175e4d9fa4c", size = 5323094, upload-time = "2025-08-06T18:00:29.03Z" }, + { url = "https://files.pythonhosted.org/packages/52/57/ecc9ae29fa5b2d90107cd1d9bf8ed19aacb74b2264d986ae9d44fe9bdf87/debugpy-1.8.16-py2.py3-none-any.whl", hash = "sha256:19c9521962475b87da6f673514f7fd610328757ec993bf7ec0d8c96f9a325f9e", size = 5287700, upload-time = "2025-08-06T18:00:42.333Z" }, ] [[package]] @@ -823,6 +991,23 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/4e/8c/f3147f5c4b73e7550fe5f9352eaa956ae838d5c51eb58e7a25b9f3e2643b/decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a", size = 9190, upload-time = "2025-02-24T04:41:32.565Z" }, ] +[[package]] +name = "decord" +version = "0.6.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/79/936af42edf90a7bd4e41a6cac89c913d4b47fa48a26b042d5129a9242ee3/decord-0.6.0-py3-none-manylinux2010_x86_64.whl", hash = "sha256:51997f20be8958e23b7c4061ba45d0efcd86bffd5fe81c695d0befee0d442976", size = 13602299, upload-time = "2021-06-14T21:30:55.486Z" }, + { url = "https://files.pythonhosted.org/packages/6c/be/e15b5b866da452e62635a7b27513f31cb581fa2ea9cc9b768b535d62a955/decord-0.6.0-py3-none-win_amd64.whl", hash = "sha256:02665d7c4f1193a330205a791bc128f7e108eb6ae5b67144437a02f700943bad", size = 24733380, upload-time = "2021-06-14T21:30:57.766Z" }, +] + +[[package]] +name = "deep-gemm" +version = "2.0.0+7b6b556" +source = { git = "https://github.com/deepseek-ai/DeepGEMM.git?rev=7b6b5563b9d4c1ae07ffbce7f78ad3ac9204827c#7b6b5563b9d4c1ae07ffbce7f78ad3ac9204827c" } + [[package]] name = "deprecated" version = "1.2.18" @@ -868,11 +1053,11 @@ wheels = [ [[package]] name = "distlib" -version = "0.3.9" +version = "0.4.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/0d/dd/1bec4c5ddb504ca60fc29472f3d27e8d4da1257a854e1d96742f15c1d02d/distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403", size = 613923, upload-time = "2024-10-09T18:35:47.551Z" } +sdist = { url = "https://files.pythonhosted.org/packages/96/8e/709914eb2b5749865801041647dc7f4e6d00b549cfe88b65ca192995f07c/distlib-0.4.0.tar.gz", hash = "sha256:feec40075be03a04501a973d81f633735b4b69f98b05450592310c0f401a4e0d", size = 614605, upload-time = "2025-07-17T16:52:00.465Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/91/a1/cf2472db20f7ce4a6be1253a81cfdf85ad9c7885ffbed7047fb72c24cf87/distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87", size = 468973, upload-time = "2024-10-09T18:35:44.272Z" }, + { url = "https://files.pythonhosted.org/packages/33/6b/e0547afaf41bf2c42e52430072fa5658766e3d65bd4b03a563d1b6336f57/distlib-0.4.0-py2.py3-none-any.whl", hash = "sha256:9659f7d87e46584a30b5780e43ac7a2143098441670ff0a49d5f9034c54a6c16", size = 469047, upload-time = "2025-07-17T16:51:58.613Z" }, ] [[package]] @@ -907,13 +1092,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e3/26/57c6fb270950d476074c087527a558ccb6f4436657314bfb6cdf484114c4/docker-7.1.0-py3-none-any.whl", hash = "sha256:c96b93b7f0a746f9e77d325bcfb87422a3d8bd4f03136ae8a85b37f1898d5fc0", size = 147774, upload-time = "2024-05-23T11:13:55.01Z" }, ] +[[package]] +name = "docopt" +version = "0.6.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/55/8f8cab2afd404cf578136ef2cc5dfb50baa1761b68c9da1fb1e4eed343c9/docopt-0.6.2.tar.gz", hash = "sha256:49b3a825280bd66b3aa83585ef59c4a8c82f2c8a522dbe754a8bc8d08c85c491", size = 25901, upload-time = "2014-06-16T11:18:57.406Z" } + [[package]] name = "docstring-parser" -version = "0.16" +version = "0.17.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/08/12/9c22a58c0b1e29271051222d8906257616da84135af9ed167c9e28f85cb3/docstring_parser-0.16.tar.gz", hash = "sha256:538beabd0af1e2db0146b6bd3caa526c35a34d61af9fd2887f3a8a27a739aa6e", size = 26565, upload-time = "2024-03-15T10:39:44.419Z" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/9d/c3b43da9515bd270df0f80548d9944e389870713cc1fe2b8fb35fe2bcefd/docstring_parser-0.17.0.tar.gz", hash = "sha256:583de4a309722b3315439bb31d64ba3eebada841f2e2cee23b99df001434c912", size = 27442, upload-time = "2025-07-21T07:35:01.868Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d5/7c/e9fcff7623954d86bdc17782036cbf715ecab1bec4847c008557affe1ca8/docstring_parser-0.16-py3-none-any.whl", hash = "sha256:bf0a1387354d3691d102edef7ec124f219ef639982d096e26e3b60aeffa90637", size = 36533, upload-time = "2024-03-15T10:39:41.527Z" }, + { url = "https://files.pythonhosted.org/packages/55/e2/2537ebcff11c1ee1ff17d8d0b6f4db75873e3b0fb32c2d4a2ee31ecb310a/docstring_parser-0.17.0-py3-none-any.whl", hash = "sha256:cf2569abd23dce8099b300f9b4fa8191e9582dda731fd533daf54c4551658708", size = 36896, upload-time = "2025-07-21T07:35:00.684Z" }, ] [[package]] @@ -976,16 +1167,16 @@ wheels = [ [[package]] name = "fastapi" -version = "0.115.13" +version = "0.116.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "pydantic" }, { name = "starlette" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/20/64/ec0788201b5554e2a87c49af26b77a4d132f807a0fa9675257ac92c6aa0e/fastapi-0.115.13.tar.gz", hash = "sha256:55d1d25c2e1e0a0a50aceb1c8705cd932def273c102bff0b1c1da88b3c6eb307", size = 295680, upload-time = "2025-06-17T11:49:45.575Z" } +sdist = { url = "https://files.pythonhosted.org/packages/78/d7/6c8b3bfe33eeffa208183ec037fee0cce9f7f024089ab1c5d12ef04bd27c/fastapi-0.116.1.tar.gz", hash = "sha256:ed52cbf946abfd70c5a0dccb24673f0670deeb517a88b3544d03c2a6bf283143", size = 296485, upload-time = "2025-07-11T16:22:32.057Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/59/4a/e17764385382062b0edbb35a26b7cf76d71e27e456546277a42ba6545c6e/fastapi-0.115.13-py3-none-any.whl", hash = "sha256:0a0cab59afa7bab22f5eb347f8c9864b681558c278395e94035a741fc10cd865", size = 95315, upload-time = "2025-06-17T11:49:44.106Z" }, + { url = "https://files.pythonhosted.org/packages/e5/47/d63c60f59a59467fda0f93f46335c9d18526d7071f025cb5b89d5353ea42/fastapi-0.116.1-py3-none-any.whl", hash = "sha256:c46ac7c312df840f0c9e220f7964bada936781bc4e2e6eb71f1c4d7553786565", size = 95631, upload-time = "2025-07-11T16:22:30.485Z" }, ] [package.optional-dependencies] @@ -1000,23 +1191,42 @@ standard = [ [[package]] name = "fastapi-cli" -version = "0.0.7" +version = "0.0.8" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "rich-toolkit" }, { name = "typer" }, { name = "uvicorn", extra = ["standard"] }, ] -sdist = { url = "https://files.pythonhosted.org/packages/fe/73/82a5831fbbf8ed75905bacf5b2d9d3dfd6f04d6968b29fe6f72a5ae9ceb1/fastapi_cli-0.0.7.tar.gz", hash = "sha256:02b3b65956f526412515907a0793c9094abd4bfb5457b389f645b0ea6ba3605e", size = 16753, upload-time = "2024-12-15T14:28:10.028Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c6/94/3ef75d9c7c32936ecb539b9750ccbdc3d2568efd73b1cb913278375f4533/fastapi_cli-0.0.8.tar.gz", hash = "sha256:2360f2989b1ab4a3d7fc8b3a0b20e8288680d8af2e31de7c38309934d7f8a0ee", size = 16884, upload-time = "2025-07-07T14:44:09.326Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a1/e6/5daefc851b514ce2287d8f5d358ae4341089185f78f3217a69d0ce3a390c/fastapi_cli-0.0.7-py3-none-any.whl", hash = "sha256:d549368ff584b2804336c61f192d86ddea080c11255f375959627911944804f4", size = 10705, upload-time = "2024-12-15T14:28:06.18Z" }, + { url = "https://files.pythonhosted.org/packages/e0/3f/6ad3103c5f59208baf4c798526daea6a74085bb35d1c161c501863470476/fastapi_cli-0.0.8-py3-none-any.whl", hash = "sha256:0ea95d882c85b9219a75a65ab27e8da17dac02873e456850fa0a726e96e985eb", size = 10770, upload-time = "2025-07-07T14:44:08.255Z" }, ] [package.optional-dependencies] standard = [ + { name = "fastapi-cloud-cli" }, { name = "uvicorn", extra = ["standard"] }, ] +[[package]] +name = "fastapi-cloud-cli" +version = "0.1.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "httpx" }, + { name = "pydantic", extra = ["email"] }, + { name = "rich-toolkit" }, + { name = "rignore" }, + { name = "sentry-sdk" }, + { name = "typer" }, + { name = "uvicorn", extra = ["standard"] }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a9/2e/3b6e5016affc310e5109bc580f760586eabecea0c8a7ab067611cd849ac0/fastapi_cloud_cli-0.1.5.tar.gz", hash = "sha256:341ee585eb731a6d3c3656cb91ad38e5f39809bf1a16d41de1333e38635a7937", size = 22710, upload-time = "2025-07-28T13:30:48.216Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/a6/5aa862489a2918a096166fd98d9fe86b7fd53c607678b3fa9d8c432d88d5/fastapi_cloud_cli-0.1.5-py3-none-any.whl", hash = "sha256:d80525fb9c0e8af122370891f9fa83cf5d496e4ad47a8dd26c0496a6c85a012a", size = 18992, upload-time = "2025-07-28T13:30:47.427Z" }, +] + [[package]] name = "fastrlock" version = "0.8.3" @@ -1052,11 +1262,11 @@ wheels = [ [[package]] name = "filelock" -version = "3.18.0" +version = "3.19.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/0a/10/c23352565a6544bdc5353e0b15fc1c563352101f30e24bf500207a54df9a/filelock-3.18.0.tar.gz", hash = "sha256:adbc88eabb99d2fec8c9c1b229b171f18afa655400173ddc653d5d01501fb9f2", size = 18075, upload-time = "2025-03-14T07:11:40.47Z" } +sdist = { url = "https://files.pythonhosted.org/packages/40/bb/0ab3e58d22305b6f5440629d20683af28959bf793d98d11950e305c1c326/filelock-3.19.1.tar.gz", hash = "sha256:66eda1888b0171c998b35be2bcc0f6d75c388a7ce20c3f3f37aa8e96c2dddf58", size = 17687, upload-time = "2025-08-14T16:56:03.016Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/4d/36/2a115987e2d8c300a974597416d9de88f2444426de9571f4b59b2cca3acc/filelock-3.18.0-py3-none-any.whl", hash = "sha256:c401f4f8377c4464e6db25fff06205fd89bdd83b65eb0488ed1b160f780e21de", size = 16215, upload-time = "2025-03-14T07:11:39.145Z" }, + { url = "https://files.pythonhosted.org/packages/42/14/42b2651a2f46b022ccd948bca9f2d5af0fd8929c4eec235b8d6d844fbe67/filelock-3.19.1-py3-none-any.whl", hash = "sha256:d38e30481def20772f5baf097c122c3babc4fcdb7e14e57049eb9d88c6dc017d", size = 15988, upload-time = "2025-08-14T16:56:01.633Z" }, ] [[package]] @@ -1074,7 +1284,7 @@ sdist = { url = "https://files.pythonhosted.org/packages/11/34/9bf60e736ed7bbe15 [[package]] name = "flask" -version = "3.1.1" +version = "3.1.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "blinker" }, @@ -1084,9 +1294,9 @@ dependencies = [ { name = "markupsafe" }, { name = "werkzeug" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/c0/de/e47735752347f4128bcf354e0da07ef311a78244eba9e3dc1d4a5ab21a98/flask-3.1.1.tar.gz", hash = "sha256:284c7b8f2f58cb737f0cf1c30fd7eaf0ccfcde196099d24ecede3fc2005aa59e", size = 753440, upload-time = "2025-05-13T15:01:17.447Z" } +sdist = { url = "https://files.pythonhosted.org/packages/dc/6d/cfe3c0fcc5e477df242b98bfe186a4c34357b4847e87ecaef04507332dab/flask-3.1.2.tar.gz", hash = "sha256:bf656c15c80190ed628ad08cdfd3aaa35beb087855e2f494910aa3774cc4fd87", size = 720160, upload-time = "2025-08-19T21:03:21.205Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/3d/68/9d4508e893976286d2ead7f8f571314af6c2037af34853a30fd769c02e9d/flask-3.1.1-py3-none-any.whl", hash = "sha256:07aae2bb5eaf77993ef57e357491839f5fd9f4dc281593a81a9e4d79a24f295c", size = 103305, upload-time = "2025-05-13T15:01:15.591Z" }, + { url = "https://files.pythonhosted.org/packages/ec/f9/7f9263c5695f4bd0023734af91bedb2ff8209e8de6ead162f35d8dc762fd/flask-3.1.2-py3-none-any.whl", hash = "sha256:ca1d8112ec8a6158cc29ea4858963350011b5c846a414cdb7a954aa9e967d03c", size = 103308, upload-time = "2025-08-19T21:03:19.499Z" }, ] [[package]] @@ -1106,27 +1316,43 @@ wheels = [ [[package]] name = "fonttools" -version = "4.58.4" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/2e/5a/1124b2c8cb3a8015faf552e92714040bcdbc145dfa29928891b02d147a18/fonttools-4.58.4.tar.gz", hash = "sha256:928a8009b9884ed3aae17724b960987575155ca23c6f0b8146e400cc9e0d44ba", size = 3525026, upload-time = "2025-06-13T17:25:15.426Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/04/3c/1d1792bfe91ef46f22a3d23b4deb514c325e73c17d4f196b385b5e2faf1c/fonttools-4.58.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:462211c0f37a278494e74267a994f6be9a2023d0557aaa9ecbcbfce0f403b5a6", size = 2754082, upload-time = "2025-06-13T17:24:24.862Z" }, - { url = "https://files.pythonhosted.org/packages/2a/1f/2b261689c901a1c3bc57a6690b0b9fc21a9a93a8b0c83aae911d3149f34e/fonttools-4.58.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:0c7a12fb6f769165547f00fcaa8d0df9517603ae7e04b625e5acb8639809b82d", size = 2321677, upload-time = "2025-06-13T17:24:26.815Z" }, - { url = "https://files.pythonhosted.org/packages/fe/6b/4607add1755a1e6581ae1fc0c9a640648e0d9cdd6591cc2d581c2e07b8c3/fonttools-4.58.4-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:2d42c63020a922154add0a326388a60a55504629edc3274bc273cd3806b4659f", size = 4896354, upload-time = "2025-06-13T17:24:28.428Z" }, - { url = "https://files.pythonhosted.org/packages/cd/95/34b4f483643d0cb11a1f830b72c03fdd18dbd3792d77a2eb2e130a96fada/fonttools-4.58.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8f2b4e6fd45edc6805f5f2c355590b092ffc7e10a945bd6a569fc66c1d2ae7aa", size = 4941633, upload-time = "2025-06-13T17:24:30.568Z" }, - { url = "https://files.pythonhosted.org/packages/81/ac/9bafbdb7694059c960de523e643fa5a61dd2f698f3f72c0ca18ae99257c7/fonttools-4.58.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f155b927f6efb1213a79334e4cb9904d1e18973376ffc17a0d7cd43d31981f1e", size = 4886170, upload-time = "2025-06-13T17:24:32.724Z" }, - { url = "https://files.pythonhosted.org/packages/ae/44/a3a3b70d5709405f7525bb7cb497b4e46151e0c02e3c8a0e40e5e9fe030b/fonttools-4.58.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e38f687d5de97c7fb7da3e58169fb5ba349e464e141f83c3c2e2beb91d317816", size = 5037851, upload-time = "2025-06-13T17:24:35.034Z" }, - { url = "https://files.pythonhosted.org/packages/21/cb/e8923d197c78969454eb876a4a55a07b59c9c4c46598f02b02411dc3b45c/fonttools-4.58.4-cp312-cp312-win32.whl", hash = "sha256:636c073b4da9db053aa683db99580cac0f7c213a953b678f69acbca3443c12cc", size = 2187428, upload-time = "2025-06-13T17:24:36.996Z" }, - { url = "https://files.pythonhosted.org/packages/46/e6/fe50183b1a0e1018e7487ee740fa8bb127b9f5075a41e20d017201e8ab14/fonttools-4.58.4-cp312-cp312-win_amd64.whl", hash = "sha256:82e8470535743409b30913ba2822e20077acf9ea70acec40b10fcf5671dceb58", size = 2236649, upload-time = "2025-06-13T17:24:38.985Z" }, - { url = "https://files.pythonhosted.org/packages/d4/4f/c05cab5fc1a4293e6bc535c6cb272607155a0517700f5418a4165b7f9ec8/fonttools-4.58.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:5f4a64846495c543796fa59b90b7a7a9dff6839bd852741ab35a71994d685c6d", size = 2745197, upload-time = "2025-06-13T17:24:40.645Z" }, - { url = "https://files.pythonhosted.org/packages/3e/d3/49211b1f96ae49308f4f78ca7664742377a6867f00f704cdb31b57e4b432/fonttools-4.58.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e80661793a5d4d7ad132a2aa1eae2e160fbdbb50831a0edf37c7c63b2ed36574", size = 2317272, upload-time = "2025-06-13T17:24:43.428Z" }, - { url = "https://files.pythonhosted.org/packages/b2/11/c9972e46a6abd752a40a46960e431c795ad1f306775fc1f9e8c3081a1274/fonttools-4.58.4-cp313-cp313-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:fe5807fc64e4ba5130f1974c045a6e8d795f3b7fb6debfa511d1773290dbb76b", size = 4877184, upload-time = "2025-06-13T17:24:45.527Z" }, - { url = "https://files.pythonhosted.org/packages/ea/24/5017c01c9ef8df572cc9eaf9f12be83ad8ed722ff6dc67991d3d752956e4/fonttools-4.58.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b610b9bef841cb8f4b50472494158b1e347d15cad56eac414c722eda695a6cfd", size = 4939445, upload-time = "2025-06-13T17:24:47.647Z" }, - { url = "https://files.pythonhosted.org/packages/79/b0/538cc4d0284b5a8826b4abed93a69db52e358525d4b55c47c8cef3669767/fonttools-4.58.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:2daa7f0e213c38f05f054eb5e1730bd0424aebddbeac094489ea1585807dd187", size = 4878800, upload-time = "2025-06-13T17:24:49.766Z" }, - { url = "https://files.pythonhosted.org/packages/5a/9b/a891446b7a8250e65bffceb248508587958a94db467ffd33972723ab86c9/fonttools-4.58.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:66cccb6c0b944496b7f26450e9a66e997739c513ffaac728d24930df2fd9d35b", size = 5021259, upload-time = "2025-06-13T17:24:51.754Z" }, - { url = "https://files.pythonhosted.org/packages/17/b2/c4d2872cff3ace3ddd1388bf15b76a1d8d5313f0a61f234e9aed287e674d/fonttools-4.58.4-cp313-cp313-win32.whl", hash = "sha256:94d2aebb5ca59a5107825520fde596e344652c1f18170ef01dacbe48fa60c889", size = 2185824, upload-time = "2025-06-13T17:24:54.324Z" }, - { url = "https://files.pythonhosted.org/packages/98/57/cddf8bcc911d4f47dfca1956c1e3aeeb9f7c9b8e88b2a312fe8c22714e0b/fonttools-4.58.4-cp313-cp313-win_amd64.whl", hash = "sha256:b554bd6e80bba582fd326ddab296e563c20c64dca816d5e30489760e0c41529f", size = 2236382, upload-time = "2025-06-13T17:24:56.291Z" }, - { url = "https://files.pythonhosted.org/packages/0b/2f/c536b5b9bb3c071e91d536a4d11f969e911dbb6b227939f4c5b0bca090df/fonttools-4.58.4-py3-none-any.whl", hash = "sha256:a10ce13a13f26cbb9f37512a4346bb437ad7e002ff6fa966a7ce7ff5ac3528bd", size = 1114660, upload-time = "2025-06-13T17:25:13.321Z" }, +version = "4.59.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/11/7f/29c9c3fe4246f6ad96fee52b88d0dc3a863c7563b0afc959e36d78b965dc/fonttools-4.59.1.tar.gz", hash = "sha256:74995b402ad09822a4c8002438e54940d9f1ecda898d2bb057729d7da983e4cb", size = 3534394, upload-time = "2025-08-14T16:28:14.266Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ac/fe/6e069cc4cb8881d164a9bd956e9df555bc62d3eb36f6282e43440200009c/fonttools-4.59.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:43ab814bbba5f02a93a152ee61a04182bb5809bd2bc3609f7822e12c53ae2c91", size = 2769172, upload-time = "2025-08-14T16:26:45.729Z" }, + { url = "https://files.pythonhosted.org/packages/b9/98/ec4e03f748fefa0dd72d9d95235aff6fef16601267f4a2340f0e16b9330f/fonttools-4.59.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4f04c3ffbfa0baafcbc550657cf83657034eb63304d27b05cff1653b448ccff6", size = 2337281, upload-time = "2025-08-14T16:26:47.921Z" }, + { url = "https://files.pythonhosted.org/packages/8b/b1/890360a7e3d04a30ba50b267aca2783f4c1364363797e892e78a4f036076/fonttools-4.59.1-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:d601b153e51a5a6221f0d4ec077b6bfc6ac35bfe6c19aeaa233d8990b2b71726", size = 4909215, upload-time = "2025-08-14T16:26:49.682Z" }, + { url = "https://files.pythonhosted.org/packages/8a/ec/2490599550d6c9c97a44c1e36ef4de52d6acf742359eaa385735e30c05c4/fonttools-4.59.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c735e385e30278c54f43a0d056736942023c9043f84ee1021eff9fd616d17693", size = 4951958, upload-time = "2025-08-14T16:26:51.616Z" }, + { url = "https://files.pythonhosted.org/packages/d1/40/bd053f6f7634234a9b9805ff8ae4f32df4f2168bee23cafd1271ba9915a9/fonttools-4.59.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1017413cdc8555dce7ee23720da490282ab7ec1cf022af90a241f33f9a49afc4", size = 4894738, upload-time = "2025-08-14T16:26:53.836Z" }, + { url = "https://files.pythonhosted.org/packages/ac/a1/3cd12a010d288325a7cfcf298a84825f0f9c29b01dee1baba64edfe89257/fonttools-4.59.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5c6d8d773470a5107052874341ed3c487c16ecd179976d81afed89dea5cd7406", size = 5045983, upload-time = "2025-08-14T16:26:56.153Z" }, + { url = "https://files.pythonhosted.org/packages/a2/af/8a2c3f6619cc43cf87951405337cc8460d08a4e717bb05eaa94b335d11dc/fonttools-4.59.1-cp312-cp312-win32.whl", hash = "sha256:2a2d0d33307f6ad3a2086a95dd607c202ea8852fa9fb52af9b48811154d1428a", size = 2203407, upload-time = "2025-08-14T16:26:58.165Z" }, + { url = "https://files.pythonhosted.org/packages/8e/f2/a19b874ddbd3ebcf11d7e25188ef9ac3f68b9219c62263acb34aca8cde05/fonttools-4.59.1-cp312-cp312-win_amd64.whl", hash = "sha256:0b9e4fa7eaf046ed6ac470f6033d52c052481ff7a6e0a92373d14f556f298dc0", size = 2251561, upload-time = "2025-08-14T16:27:00.646Z" }, + { url = "https://files.pythonhosted.org/packages/19/5e/94a4d7f36c36e82f6a81e0064d148542e0ad3e6cf51fc5461ca128f3658d/fonttools-4.59.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:89d9957b54246c6251345297dddf77a84d2c19df96af30d2de24093bbdf0528b", size = 2760192, upload-time = "2025-08-14T16:27:03.024Z" }, + { url = "https://files.pythonhosted.org/packages/ee/a5/f50712fc33ef9d06953c660cefaf8c8fe4b8bc74fa21f44ee5e4f9739439/fonttools-4.59.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8156b11c0d5405810d216f53907bd0f8b982aa5f1e7e3127ab3be1a4062154ff", size = 2332694, upload-time = "2025-08-14T16:27:04.883Z" }, + { url = "https://files.pythonhosted.org/packages/e9/a2/5a9fc21c354bf8613215ce233ab0d933bd17d5ff4c29693636551adbc7b3/fonttools-4.59.1-cp313-cp313-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:8387876a8011caec52d327d5e5bca705d9399ec4b17afb8b431ec50d47c17d23", size = 4889254, upload-time = "2025-08-14T16:27:07.02Z" }, + { url = "https://files.pythonhosted.org/packages/2d/e5/54a6dc811eba018d022ca2e8bd6f2969291f9586ccf9a22a05fc55f91250/fonttools-4.59.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fb13823a74b3a9204a8ed76d3d6d5ec12e64cc5bc44914eb9ff1cdac04facd43", size = 4949109, upload-time = "2025-08-14T16:27:09.3Z" }, + { url = "https://files.pythonhosted.org/packages/db/15/b05c72a248a95bea0fd05fbd95acdf0742945942143fcf961343b7a3663a/fonttools-4.59.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e1ca10da138c300f768bb68e40e5b20b6ecfbd95f91aac4cc15010b6b9d65455", size = 4888428, upload-time = "2025-08-14T16:27:11.514Z" }, + { url = "https://files.pythonhosted.org/packages/63/71/c7d6840f858d695adc0c4371ec45e3fb1c8e060b276ba944e2800495aca4/fonttools-4.59.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:2beb5bfc4887a3130f8625349605a3a45fe345655ce6031d1bac11017454b943", size = 5032668, upload-time = "2025-08-14T16:27:13.872Z" }, + { url = "https://files.pythonhosted.org/packages/90/54/57be4aca6f1312e2bc4d811200dd822325794e05bdb26eeff0976edca651/fonttools-4.59.1-cp313-cp313-win32.whl", hash = "sha256:419f16d750d78e6d704bfe97b48bba2f73b15c9418f817d0cb8a9ca87a5b94bf", size = 2201832, upload-time = "2025-08-14T16:27:16.126Z" }, + { url = "https://files.pythonhosted.org/packages/fc/1f/1899a6175a5f900ed8730a0d64f53ca1b596ed7609bfda033cf659114258/fonttools-4.59.1-cp313-cp313-win_amd64.whl", hash = "sha256:c536f8a852e8d3fa71dde1ec03892aee50be59f7154b533f0bf3c1174cfd5126", size = 2250673, upload-time = "2025-08-14T16:27:18.033Z" }, + { url = "https://files.pythonhosted.org/packages/15/07/f6ba82c22f118d9985c37fea65d8d715ca71300d78b6c6e90874dc59f11d/fonttools-4.59.1-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:d5c3bfdc9663f3d4b565f9cb3b8c1efb3e178186435b45105bde7328cfddd7fe", size = 2758606, upload-time = "2025-08-14T16:27:20.064Z" }, + { url = "https://files.pythonhosted.org/packages/3a/81/84aa3d0ce27b0112c28b67b637ff7a47cf401cf5fbfee6476e4bc9777580/fonttools-4.59.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:ea03f1da0d722fe3c2278a05957e6550175571a4894fbf9d178ceef4a3783d2b", size = 2330187, upload-time = "2025-08-14T16:27:22.42Z" }, + { url = "https://files.pythonhosted.org/packages/17/41/b3ba43f78afb321e2e50232c87304c8d0f5ab39b64389b8286cc39cdb824/fonttools-4.59.1-cp314-cp314-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:57a3708ca6bfccb790f585fa6d8f29432ec329618a09ff94c16bcb3c55994643", size = 4832020, upload-time = "2025-08-14T16:27:24.214Z" }, + { url = "https://files.pythonhosted.org/packages/67/b1/3af871c7fb325a68938e7ce544ca48bfd2c6bb7b357f3c8252933b29100a/fonttools-4.59.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:729367c91eb1ee84e61a733acc485065a00590618ca31c438e7dd4d600c01486", size = 4930687, upload-time = "2025-08-14T16:27:26.484Z" }, + { url = "https://files.pythonhosted.org/packages/c5/4f/299fc44646b30d9ef03ffaa78b109c7bd32121f0d8f10009ee73ac4514bc/fonttools-4.59.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:8f8ef66ac6db450193ed150e10b3b45dde7aded10c5d279968bc63368027f62b", size = 4875794, upload-time = "2025-08-14T16:27:28.887Z" }, + { url = "https://files.pythonhosted.org/packages/90/cf/a0a3d763ab58f5f81ceff104ddb662fd9da94248694862b9c6cbd509fdd5/fonttools-4.59.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:075f745d539a998cd92cb84c339a82e53e49114ec62aaea8307c80d3ad3aef3a", size = 4985780, upload-time = "2025-08-14T16:27:30.858Z" }, + { url = "https://files.pythonhosted.org/packages/72/c5/ba76511aaae143d89c29cd32ce30bafb61c477e8759a1590b8483f8065f8/fonttools-4.59.1-cp314-cp314-win32.whl", hash = "sha256:c2b0597522d4c5bb18aa5cf258746a2d4a90f25878cbe865e4d35526abd1b9fc", size = 2205610, upload-time = "2025-08-14T16:27:32.578Z" }, + { url = "https://files.pythonhosted.org/packages/a9/65/b250e69d6caf35bc65cddbf608be0662d741c248f2e7503ab01081fc267e/fonttools-4.59.1-cp314-cp314-win_amd64.whl", hash = "sha256:e9ad4ce044e3236f0814c906ccce8647046cc557539661e35211faadf76f283b", size = 2255376, upload-time = "2025-08-14T16:27:34.653Z" }, + { url = "https://files.pythonhosted.org/packages/11/f3/0bc63a23ac0f8175e23d82f85d6ee693fbd849de7ad739f0a3622182ad29/fonttools-4.59.1-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:652159e8214eb4856e8387ebcd6b6bd336ee258cbeb639c8be52005b122b9609", size = 2826546, upload-time = "2025-08-14T16:27:36.783Z" }, + { url = "https://files.pythonhosted.org/packages/e9/46/a3968205590e068fdf60e926be329a207782576cb584d3b7dcd2d2844957/fonttools-4.59.1-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:43d177cd0e847ea026fedd9f099dc917da136ed8792d142298a252836390c478", size = 2359771, upload-time = "2025-08-14T16:27:39.678Z" }, + { url = "https://files.pythonhosted.org/packages/b8/ff/d14b4c283879e8cb57862d9624a34fe6522b6fcdd46ccbfc58900958794a/fonttools-4.59.1-cp314-cp314t-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:e54437651e1440ee53a95e6ceb6ee440b67a3d348c76f45f4f48de1a5ecab019", size = 4831575, upload-time = "2025-08-14T16:27:41.885Z" }, + { url = "https://files.pythonhosted.org/packages/9c/04/a277d9a584a49d98ca12d3b2c6663bdf333ae97aaa83bd0cdabf7c5a6c84/fonttools-4.59.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6065fdec8ff44c32a483fd44abe5bcdb40dd5e2571a5034b555348f2b3a52cea", size = 5069962, upload-time = "2025-08-14T16:27:44.284Z" }, + { url = "https://files.pythonhosted.org/packages/16/6f/3d2ae69d96c4cdee6dfe7598ca5519a1514487700ca3d7c49c5a1ad65308/fonttools-4.59.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:42052b56d176f8b315fbc09259439c013c0cb2109df72447148aeda677599612", size = 4942926, upload-time = "2025-08-14T16:27:46.523Z" }, + { url = "https://files.pythonhosted.org/packages/0c/d3/c17379e0048d03ce26b38e4ab0e9a98280395b00529e093fe2d663ac0658/fonttools-4.59.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:bcd52eaa5c4c593ae9f447c1d13e7e4a00ca21d755645efa660b6999425b3c88", size = 4958678, upload-time = "2025-08-14T16:27:48.555Z" }, + { url = "https://files.pythonhosted.org/packages/8c/3f/c5543a1540abdfb4d375e3ebeb84de365ab9b153ec14cb7db05f537dd1e7/fonttools-4.59.1-cp314-cp314t-win32.whl", hash = "sha256:02e4fdf27c550dded10fe038a5981c29f81cb9bc649ff2eaa48e80dab8998f97", size = 2266706, upload-time = "2025-08-14T16:27:50.556Z" }, + { url = "https://files.pythonhosted.org/packages/3e/99/85bff6e674226bc8402f983e365f07e76d990e7220ba72bcc738fef52391/fonttools-4.59.1-cp314-cp314t-win_amd64.whl", hash = "sha256:412a5fd6345872a7c249dac5bcce380393f40c1c316ac07f447bc17d51900922", size = 2329994, upload-time = "2025-08-14T16:27:52.36Z" }, + { url = "https://files.pythonhosted.org/packages/0f/64/9d606e66d498917cd7a2ff24f558010d42d6fd4576d9dd57f0bd98333f5a/fonttools-4.59.1-py3-none-any.whl", hash = "sha256:647db657073672a8330608970a984d51573557f328030566521bc03415535042", size = 1130094, upload-time = "2025-08-14T16:28:12.048Z" }, ] [[package]] @@ -1231,14 +1457,14 @@ wheels = [ [[package]] name = "gitpython" -version = "3.1.44" +version = "3.1.45" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "gitdb" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/c0/89/37df0b71473153574a5cdef8f242de422a0f5d26d7a9e231e6f169b4ad14/gitpython-3.1.44.tar.gz", hash = "sha256:c87e30b26253bf5418b01b0660f818967f3c503193838337fe5e573331249269", size = 214196, upload-time = "2025-01-02T07:32:43.59Z" } +sdist = { url = "https://files.pythonhosted.org/packages/9a/c8/dd58967d119baab745caec2f9d853297cec1989ec1d63f677d3880632b88/gitpython-3.1.45.tar.gz", hash = "sha256:85b0ee964ceddf211c41b9f27a49086010a190fd8132a24e21f362a4b36a791c", size = 215076, upload-time = "2025-07-24T03:45:54.871Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/1d/9a/4114a9057db2f1462d5c8f8390ab7383925fe1ac012eaa42402ad65c2963/GitPython-3.1.44-py3-none-any.whl", hash = "sha256:9e0e10cda9bed1ee64bc9a6de50e7e38a9c9943241cd7f585f6df3ed28011110", size = 207599, upload-time = "2025-01-02T07:32:40.731Z" }, + { url = "https://files.pythonhosted.org/packages/01/61/d4b89fec821f72385526e1b9d9a3a0385dda4a72b206d28049e2c7cd39b8/gitpython-3.1.45-py3-none-any.whl", hash = "sha256:8908cb2e02fb3b93b7eb0f2827125cb699869470432cc885f019b8fd0fccff77", size = 208168, upload-time = "2025-07-24T03:45:52.517Z" }, ] [[package]] @@ -1330,63 +1556,111 @@ wheels = [ [[package]] name = "greenlet" -version = "3.2.3" +version = "3.2.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/03/b8/704d753a5a45507a7aab61f18db9509302ed3d0a27ac7e0359ec2905b1a6/greenlet-3.2.4.tar.gz", hash = "sha256:0dca0d95ff849f9a364385f36ab49f50065d76964944638be9691e1832e9f86d", size = 188260, upload-time = "2025-08-07T13:24:33.51Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/44/69/9b804adb5fd0671f367781560eb5eb586c4d495277c93bde4307b9e28068/greenlet-3.2.4-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:3b67ca49f54cede0186854a008109d6ee71f66bd57bb36abd6d0a0267b540cdd", size = 274079, upload-time = "2025-08-07T13:15:45.033Z" }, + { url = "https://files.pythonhosted.org/packages/46/e9/d2a80c99f19a153eff70bc451ab78615583b8dac0754cfb942223d2c1a0d/greenlet-3.2.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ddf9164e7a5b08e9d22511526865780a576f19ddd00d62f8a665949327fde8bb", size = 640997, upload-time = "2025-08-07T13:42:56.234Z" }, + { url = "https://files.pythonhosted.org/packages/3b/16/035dcfcc48715ccd345f3a93183267167cdd162ad123cd93067d86f27ce4/greenlet-3.2.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f28588772bb5fb869a8eb331374ec06f24a83a9c25bfa1f38b6993afe9c1e968", size = 655185, upload-time = "2025-08-07T13:45:27.624Z" }, + { url = "https://files.pythonhosted.org/packages/31/da/0386695eef69ffae1ad726881571dfe28b41970173947e7c558d9998de0f/greenlet-3.2.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:5c9320971821a7cb77cfab8d956fa8e39cd07ca44b6070db358ceb7f8797c8c9", size = 649926, upload-time = "2025-08-07T13:53:15.251Z" }, + { url = "https://files.pythonhosted.org/packages/68/88/69bf19fd4dc19981928ceacbc5fd4bb6bc2215d53199e367832e98d1d8fe/greenlet-3.2.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c60a6d84229b271d44b70fb6e5fa23781abb5d742af7b808ae3f6efd7c9c60f6", size = 651839, upload-time = "2025-08-07T13:18:30.281Z" }, + { url = "https://files.pythonhosted.org/packages/19/0d/6660d55f7373b2ff8152401a83e02084956da23ae58cddbfb0b330978fe9/greenlet-3.2.4-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3b3812d8d0c9579967815af437d96623f45c0f2ae5f04e366de62a12d83a8fb0", size = 607586, upload-time = "2025-08-07T13:18:28.544Z" }, + { url = "https://files.pythonhosted.org/packages/8e/1a/c953fdedd22d81ee4629afbb38d2f9d71e37d23caace44775a3a969147d4/greenlet-3.2.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:abbf57b5a870d30c4675928c37278493044d7c14378350b3aa5d484fa65575f0", size = 1123281, upload-time = "2025-08-07T13:42:39.858Z" }, + { url = "https://files.pythonhosted.org/packages/3f/c7/12381b18e21aef2c6bd3a636da1088b888b97b7a0362fac2e4de92405f97/greenlet-3.2.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:20fb936b4652b6e307b8f347665e2c615540d4b42b3b4c8a321d8286da7e520f", size = 1151142, upload-time = "2025-08-07T13:18:22.981Z" }, + { url = "https://files.pythonhosted.org/packages/e9/08/b0814846b79399e585f974bbeebf5580fbe59e258ea7be64d9dfb253c84f/greenlet-3.2.4-cp312-cp312-win_amd64.whl", hash = "sha256:a7d4e128405eea3814a12cc2605e0e6aedb4035bf32697f72deca74de4105e02", size = 299899, upload-time = "2025-08-07T13:38:53.448Z" }, + { url = "https://files.pythonhosted.org/packages/49/e8/58c7f85958bda41dafea50497cbd59738c5c43dbbea5ee83d651234398f4/greenlet-3.2.4-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:1a921e542453fe531144e91e1feedf12e07351b1cf6c9e8a3325ea600a715a31", size = 272814, upload-time = "2025-08-07T13:15:50.011Z" }, + { url = "https://files.pythonhosted.org/packages/62/dd/b9f59862e9e257a16e4e610480cfffd29e3fae018a68c2332090b53aac3d/greenlet-3.2.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd3c8e693bff0fff6ba55f140bf390fa92c994083f838fece0f63be121334945", size = 641073, upload-time = "2025-08-07T13:42:57.23Z" }, + { url = "https://files.pythonhosted.org/packages/f7/0b/bc13f787394920b23073ca3b6c4a7a21396301ed75a655bcb47196b50e6e/greenlet-3.2.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:710638eb93b1fa52823aa91bf75326f9ecdfd5e0466f00789246a5280f4ba0fc", size = 655191, upload-time = "2025-08-07T13:45:29.752Z" }, + { url = "https://files.pythonhosted.org/packages/f2/d6/6adde57d1345a8d0f14d31e4ab9c23cfe8e2cd39c3baf7674b4b0338d266/greenlet-3.2.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:c5111ccdc9c88f423426df3fd1811bfc40ed66264d35aa373420a34377efc98a", size = 649516, upload-time = "2025-08-07T13:53:16.314Z" }, + { url = "https://files.pythonhosted.org/packages/7f/3b/3a3328a788d4a473889a2d403199932be55b1b0060f4ddd96ee7cdfcad10/greenlet-3.2.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d76383238584e9711e20ebe14db6c88ddcedc1829a9ad31a584389463b5aa504", size = 652169, upload-time = "2025-08-07T13:18:32.861Z" }, + { url = "https://files.pythonhosted.org/packages/ee/43/3cecdc0349359e1a527cbf2e3e28e5f8f06d3343aaf82ca13437a9aa290f/greenlet-3.2.4-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:23768528f2911bcd7e475210822ffb5254ed10d71f4028387e5a99b4c6699671", size = 610497, upload-time = "2025-08-07T13:18:31.636Z" }, + { url = "https://files.pythonhosted.org/packages/b8/19/06b6cf5d604e2c382a6f31cafafd6f33d5dea706f4db7bdab184bad2b21d/greenlet-3.2.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:00fadb3fedccc447f517ee0d3fd8fe49eae949e1cd0f6a611818f4f6fb7dc83b", size = 1121662, upload-time = "2025-08-07T13:42:41.117Z" }, + { url = "https://files.pythonhosted.org/packages/a2/15/0d5e4e1a66fab130d98168fe984c509249c833c1a3c16806b90f253ce7b9/greenlet-3.2.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:d25c5091190f2dc0eaa3f950252122edbbadbb682aa7b1ef2f8af0f8c0afefae", size = 1149210, upload-time = "2025-08-07T13:18:24.072Z" }, + { url = "https://files.pythonhosted.org/packages/0b/55/2321e43595e6801e105fcfdee02b34c0f996eb71e6ddffca6b10b7e1d771/greenlet-3.2.4-cp313-cp313-win_amd64.whl", hash = "sha256:554b03b6e73aaabec3745364d6239e9e012d64c68ccd0b8430c64ccc14939a8b", size = 299685, upload-time = "2025-08-07T13:24:38.824Z" }, + { url = "https://files.pythonhosted.org/packages/22/5c/85273fd7cc388285632b0498dbbab97596e04b154933dfe0f3e68156c68c/greenlet-3.2.4-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:49a30d5fda2507ae77be16479bdb62a660fa51b1eb4928b524975b3bde77b3c0", size = 273586, upload-time = "2025-08-07T13:16:08.004Z" }, + { url = "https://files.pythonhosted.org/packages/d1/75/10aeeaa3da9332c2e761e4c50d4c3556c21113ee3f0afa2cf5769946f7a3/greenlet-3.2.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:299fd615cd8fc86267b47597123e3f43ad79c9d8a22bebdce535e53550763e2f", size = 686346, upload-time = "2025-08-07T13:42:59.944Z" }, + { url = "https://files.pythonhosted.org/packages/c0/aa/687d6b12ffb505a4447567d1f3abea23bd20e73a5bed63871178e0831b7a/greenlet-3.2.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:c17b6b34111ea72fc5a4e4beec9711d2226285f0386ea83477cbb97c30a3f3a5", size = 699218, upload-time = "2025-08-07T13:45:30.969Z" }, + { url = "https://files.pythonhosted.org/packages/dc/8b/29aae55436521f1d6f8ff4e12fb676f3400de7fcf27fccd1d4d17fd8fecd/greenlet-3.2.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b4a1870c51720687af7fa3e7cda6d08d801dae660f75a76f3845b642b4da6ee1", size = 694659, upload-time = "2025-08-07T13:53:17.759Z" }, + { url = "https://files.pythonhosted.org/packages/92/2e/ea25914b1ebfde93b6fc4ff46d6864564fba59024e928bdc7de475affc25/greenlet-3.2.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:061dc4cf2c34852b052a8620d40f36324554bc192be474b9e9770e8c042fd735", size = 695355, upload-time = "2025-08-07T13:18:34.517Z" }, + { url = "https://files.pythonhosted.org/packages/72/60/fc56c62046ec17f6b0d3060564562c64c862948c9d4bc8aa807cf5bd74f4/greenlet-3.2.4-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:44358b9bf66c8576a9f57a590d5f5d6e72fa4228b763d0e43fee6d3b06d3a337", size = 657512, upload-time = "2025-08-07T13:18:33.969Z" }, + { url = "https://files.pythonhosted.org/packages/e3/a5/6ddab2b4c112be95601c13428db1d8b6608a8b6039816f2ba09c346c08fc/greenlet-3.2.4-cp314-cp314-win_amd64.whl", hash = "sha256:e37ab26028f12dbb0ff65f29a8d3d44a765c61e729647bf2ddfbbed621726f01", size = 303425, upload-time = "2025-08-07T13:32:27.59Z" }, +] + +[[package]] +name = "grimp" +version = "3.9" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/c9/92/bb85bd6e80148a4d2e0c59f7c0c2891029f8fd510183afc7d8d2feeed9b6/greenlet-3.2.3.tar.gz", hash = "sha256:8b0dd8ae4c0d6f5e54ee55ba935eeb3d735a9b58a8a1e5b5cbab64e01a39f365", size = 185752, upload-time = "2025-06-05T16:16:09.955Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/f3/94/ad0d435f7c48debe960c53b8f60fb41c2026b1d0fa4a99a1cb17c3461e09/greenlet-3.2.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:25ad29caed5783d4bd7a85c9251c651696164622494c00802a139c00d639242d", size = 271992, upload-time = "2025-06-05T16:11:23.467Z" }, - { url = "https://files.pythonhosted.org/packages/93/5d/7c27cf4d003d6e77749d299c7c8f5fd50b4f251647b5c2e97e1f20da0ab5/greenlet-3.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:88cd97bf37fe24a6710ec6a3a7799f3f81d9cd33317dcf565ff9950c83f55e0b", size = 638820, upload-time = "2025-06-05T16:38:52.882Z" }, - { url = "https://files.pythonhosted.org/packages/c6/7e/807e1e9be07a125bb4c169144937910bf59b9d2f6d931578e57f0bce0ae2/greenlet-3.2.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:baeedccca94880d2f5666b4fa16fc20ef50ba1ee353ee2d7092b383a243b0b0d", size = 653046, upload-time = "2025-06-05T16:41:36.343Z" }, - { url = "https://files.pythonhosted.org/packages/9d/ab/158c1a4ea1068bdbc78dba5a3de57e4c7aeb4e7fa034320ea94c688bfb61/greenlet-3.2.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:be52af4b6292baecfa0f397f3edb3c6092ce071b499dd6fe292c9ac9f2c8f264", size = 647701, upload-time = "2025-06-05T16:48:19.604Z" }, - { url = "https://files.pythonhosted.org/packages/cc/0d/93729068259b550d6a0288da4ff72b86ed05626eaf1eb7c0d3466a2571de/greenlet-3.2.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0cc73378150b8b78b0c9fe2ce56e166695e67478550769536a6742dca3651688", size = 649747, upload-time = "2025-06-05T16:13:04.628Z" }, - { url = "https://files.pythonhosted.org/packages/f6/f6/c82ac1851c60851302d8581680573245c8fc300253fc1ff741ae74a6c24d/greenlet-3.2.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:706d016a03e78df129f68c4c9b4c4f963f7d73534e48a24f5f5a7101ed13dbbb", size = 605461, upload-time = "2025-06-05T16:12:50.792Z" }, - { url = "https://files.pythonhosted.org/packages/98/82/d022cf25ca39cf1200650fc58c52af32c90f80479c25d1cbf57980ec3065/greenlet-3.2.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:419e60f80709510c343c57b4bb5a339d8767bf9aef9b8ce43f4f143240f88b7c", size = 1121190, upload-time = "2025-06-05T16:36:48.59Z" }, - { url = "https://files.pythonhosted.org/packages/f5/e1/25297f70717abe8104c20ecf7af0a5b82d2f5a980eb1ac79f65654799f9f/greenlet-3.2.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:93d48533fade144203816783373f27a97e4193177ebaaf0fc396db19e5d61163", size = 1149055, upload-time = "2025-06-05T16:12:40.457Z" }, - { url = "https://files.pythonhosted.org/packages/1f/8f/8f9e56c5e82eb2c26e8cde787962e66494312dc8cb261c460e1f3a9c88bc/greenlet-3.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:7454d37c740bb27bdeddfc3f358f26956a07d5220818ceb467a483197d84f849", size = 297817, upload-time = "2025-06-05T16:29:49.244Z" }, - { url = "https://files.pythonhosted.org/packages/b1/cf/f5c0b23309070ae93de75c90d29300751a5aacefc0a3ed1b1d8edb28f08b/greenlet-3.2.3-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:500b8689aa9dd1ab26872a34084503aeddefcb438e2e7317b89b11eaea1901ad", size = 270732, upload-time = "2025-06-05T16:10:08.26Z" }, - { url = "https://files.pythonhosted.org/packages/48/ae/91a957ba60482d3fecf9be49bc3948f341d706b52ddb9d83a70d42abd498/greenlet-3.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:a07d3472c2a93117af3b0136f246b2833fdc0b542d4a9799ae5f41c28323faef", size = 639033, upload-time = "2025-06-05T16:38:53.983Z" }, - { url = "https://files.pythonhosted.org/packages/6f/df/20ffa66dd5a7a7beffa6451bdb7400d66251374ab40b99981478c69a67a8/greenlet-3.2.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:8704b3768d2f51150626962f4b9a9e4a17d2e37c8a8d9867bbd9fa4eb938d3b3", size = 652999, upload-time = "2025-06-05T16:41:37.89Z" }, - { url = "https://files.pythonhosted.org/packages/51/b4/ebb2c8cb41e521f1d72bf0465f2f9a2fd803f674a88db228887e6847077e/greenlet-3.2.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:5035d77a27b7c62db6cf41cf786cfe2242644a7a337a0e155c80960598baab95", size = 647368, upload-time = "2025-06-05T16:48:21.467Z" }, - { url = "https://files.pythonhosted.org/packages/8e/6a/1e1b5aa10dced4ae876a322155705257748108b7fd2e4fae3f2a091fe81a/greenlet-3.2.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2d8aa5423cd4a396792f6d4580f88bdc6efcb9205891c9d40d20f6e670992efb", size = 650037, upload-time = "2025-06-05T16:13:06.402Z" }, - { url = "https://files.pythonhosted.org/packages/26/f2/ad51331a157c7015c675702e2d5230c243695c788f8f75feba1af32b3617/greenlet-3.2.3-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2c724620a101f8170065d7dded3f962a2aea7a7dae133a009cada42847e04a7b", size = 608402, upload-time = "2025-06-05T16:12:51.91Z" }, - { url = "https://files.pythonhosted.org/packages/26/bc/862bd2083e6b3aff23300900a956f4ea9a4059de337f5c8734346b9b34fc/greenlet-3.2.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:873abe55f134c48e1f2a6f53f7d1419192a3d1a4e873bace00499a4e45ea6af0", size = 1119577, upload-time = "2025-06-05T16:36:49.787Z" }, - { url = "https://files.pythonhosted.org/packages/86/94/1fc0cc068cfde885170e01de40a619b00eaa8f2916bf3541744730ffb4c3/greenlet-3.2.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:024571bbce5f2c1cfff08bf3fbaa43bbc7444f580ae13b0099e95d0e6e67ed36", size = 1147121, upload-time = "2025-06-05T16:12:42.527Z" }, - { url = "https://files.pythonhosted.org/packages/27/1a/199f9587e8cb08a0658f9c30f3799244307614148ffe8b1e3aa22f324dea/greenlet-3.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:5195fb1e75e592dd04ce79881c8a22becdfa3e6f500e7feb059b1e6fdd54d3e3", size = 297603, upload-time = "2025-06-05T16:20:12.651Z" }, - { url = "https://files.pythonhosted.org/packages/d8/ca/accd7aa5280eb92b70ed9e8f7fd79dc50a2c21d8c73b9a0856f5b564e222/greenlet-3.2.3-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:3d04332dddb10b4a211b68111dabaee2e1a073663d117dc10247b5b1642bac86", size = 271479, upload-time = "2025-06-05T16:10:47.525Z" }, - { url = "https://files.pythonhosted.org/packages/55/71/01ed9895d9eb49223280ecc98a557585edfa56b3d0e965b9fa9f7f06b6d9/greenlet-3.2.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8186162dffde068a465deab08fc72c767196895c39db26ab1c17c0b77a6d8b97", size = 683952, upload-time = "2025-06-05T16:38:55.125Z" }, - { url = "https://files.pythonhosted.org/packages/ea/61/638c4bdf460c3c678a0a1ef4c200f347dff80719597e53b5edb2fb27ab54/greenlet-3.2.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f4bfbaa6096b1b7a200024784217defedf46a07c2eee1a498e94a1b5f8ec5728", size = 696917, upload-time = "2025-06-05T16:41:38.959Z" }, - { url = "https://files.pythonhosted.org/packages/22/cc/0bd1a7eb759d1f3e3cc2d1bc0f0b487ad3cc9f34d74da4b80f226fde4ec3/greenlet-3.2.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:ed6cfa9200484d234d8394c70f5492f144b20d4533f69262d530a1a082f6ee9a", size = 692443, upload-time = "2025-06-05T16:48:23.113Z" }, - { url = "https://files.pythonhosted.org/packages/67/10/b2a4b63d3f08362662e89c103f7fe28894a51ae0bc890fabf37d1d780e52/greenlet-3.2.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:02b0df6f63cd15012bed5401b47829cfd2e97052dc89da3cfaf2c779124eb892", size = 692995, upload-time = "2025-06-05T16:13:07.972Z" }, - { url = "https://files.pythonhosted.org/packages/5a/c6/ad82f148a4e3ce9564056453a71529732baf5448ad53fc323e37efe34f66/greenlet-3.2.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:86c2d68e87107c1792e2e8d5399acec2487a4e993ab76c792408e59394d52141", size = 655320, upload-time = "2025-06-05T16:12:53.453Z" }, - { url = "https://files.pythonhosted.org/packages/5c/4f/aab73ecaa6b3086a4c89863d94cf26fa84cbff63f52ce9bc4342b3087a06/greenlet-3.2.3-cp314-cp314-win_amd64.whl", hash = "sha256:8c47aae8fbbfcf82cc13327ae802ba13c9c36753b67e760023fd116bc124a62a", size = 301236, upload-time = "2025-06-05T16:15:20.111Z" }, +dependencies = [ + { name = "joblib" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f5/a4/b5109e7457e647e859c3f68cab22c55139f30dbc5549f62b0f216a00e3f1/grimp-3.9.tar.gz", hash = "sha256:b677ac17301d7e0f1e19cc7057731bd7956a2121181eb5057e51efb44301fb0a", size = 840675, upload-time = "2025-05-05T13:46:49.069Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a8/dd/6b528f821d98d240f4654d7ad947be078e27e55f6d1128207b313213cdde/grimp-3.9-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:c19a27aa7541b620df94ceafde89d6ebf9ee1b263e80d278ea45bdd504fec769", size = 1783791, upload-time = "2025-05-05T13:45:40.592Z" }, + { url = "https://files.pythonhosted.org/packages/74/a6/646828c8afe6b30b4270b43f1a550f7d3a2334867a002bf3f6b035a37255/grimp-3.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f68e7a771c9eb4459106decd6cc4f11313202b10d943a1a8bed463b528889dd0", size = 1710400, upload-time = "2025-05-05T13:45:32.833Z" }, + { url = "https://files.pythonhosted.org/packages/99/62/b12ed166268e73d676b72accde5493ff6a7781b284f7830a596af2b7fb98/grimp-3.9-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8290eb4561dc29c590fc099f2bdac4827a9b86a018e146428854f9742ab480ef", size = 1858308, upload-time = "2025-05-05T13:44:13.816Z" }, + { url = "https://files.pythonhosted.org/packages/f0/6a/da220f9fdb4ceed9bd03f624b00c493e7357387257b695a0624be6d6cf11/grimp-3.9-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4574c0d135e6af8cddc31ac9617c00aac3181bb4d476f5aea173a5f2ac8c7479", size = 1823353, upload-time = "2025-05-05T13:44:28.538Z" }, + { url = "https://files.pythonhosted.org/packages/f0/93/1eb6615f9c12a4eb752ea29e3880c5313ad3d7c771150f544e53e10fa807/grimp-3.9-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c5e4110bd0aedd7da899e44ec0d4a93529e93f2d03e5786e3469a5f7562e11e9", size = 1948889, upload-time = "2025-05-05T13:45:12.57Z" }, + { url = "https://files.pythonhosted.org/packages/86/7e/e5d3a2ee933e2c83b412a89efc4f939dbf5bf5098c78717e6a432401b206/grimp-3.9-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4d098f6e10c0e42c6be0eca2726a7d7218e90ba020141fa3f88426a5f7d09d71", size = 2025587, upload-time = "2025-05-05T13:44:42.212Z" }, + { url = "https://files.pythonhosted.org/packages/fa/59/ead04d7658b977ffafcc3b382c54bc0231f03b5298343db9d4cc547edcde/grimp-3.9-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:69573ecc5cc84bb175e5aa5af2fe09dfb2f33a399c59c025f5f3d7d2f6f202fe", size = 2119002, upload-time = "2025-05-05T13:44:57.901Z" }, + { url = "https://files.pythonhosted.org/packages/0e/80/790e40d77703f846082d6a7f2f37ceec481e9ebe2763551d591083c84e4d/grimp-3.9-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63e4bdb4382fb0afd52216e70a0e4da3f0500de8f9e40ee8d2b68a16a35c40c4", size = 1922590, upload-time = "2025-05-05T13:45:22.985Z" }, + { url = "https://files.pythonhosted.org/packages/eb/31/c490b387298540ef5fe1960df13879cab7a56b37af0f6b4a7d351e131c15/grimp-3.9-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1ddde011e9bb2fa1abb816373bd8898d1a486cf4f4b13dc46a11ddcd57406e1b", size = 2032993, upload-time = "2025-05-05T13:45:48.831Z" }, + { url = "https://files.pythonhosted.org/packages/aa/46/f02ebadff9ddddbf9f930b78bf3011d038380c059a4b3e0395ed38894c42/grimp-3.9-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:fa32eed6fb383ec4e54b4073e8ce75a5b151bb1f1d11be66be18aee04d3c9c4b", size = 2087494, upload-time = "2025-05-05T13:46:04.07Z" }, + { url = "https://files.pythonhosted.org/packages/c2/10/93c4d705126c3978b247a28f90510489f3f3cb477cbcf8a2a851cd18a0ae/grimp-3.9-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e9cc09977f8688839e0c9873fd214e11c971f5df38bffb31d402d04803dfff92", size = 2069454, upload-time = "2025-05-05T13:46:20.056Z" }, + { url = "https://files.pythonhosted.org/packages/eb/ae/2afb75600941f6e09cfb91762704e85a420678f5de6b97e1e2a34ad53e60/grimp-3.9-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3a732b461db86403aa3c8154ffab85d1964c8c6adaa763803ce260abbc504b6f", size = 2092176, upload-time = "2025-05-05T13:46:35.619Z" }, + { url = "https://files.pythonhosted.org/packages/51/de/c5b12fd191e39c9888a57be8d5a62892ee25fa5e61017d2b5835fbf28076/grimp-3.9-cp312-cp312-win32.whl", hash = "sha256:829d60b4c1c8c6bfb1c7348cf3e30b87f462a7d9316ced9d8265146a2153a0cd", size = 1494790, upload-time = "2025-05-05T13:47:01.642Z" }, + { url = "https://files.pythonhosted.org/packages/ef/31/3faf755b0cde71f1d3e7f6069d873586f9293930fadd3fca5f21c4ee35b8/grimp-3.9-cp312-cp312-win_amd64.whl", hash = "sha256:556ab4fbf943299fd90e467d481803b8e1a57d28c24af5867012559f51435ceb", size = 1598355, upload-time = "2025-05-05T13:46:53.461Z" }, + { url = "https://files.pythonhosted.org/packages/47/51/469735ff46942adace8b5723d4d64e81c8c14ab429c49b75d0421cfde9ca/grimp-3.9-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:867b476677b1d2f89b6c9ca0d7c47b279fe9d0230087f621c6aba94331411690", size = 1783474, upload-time = "2025-05-05T13:45:42.151Z" }, + { url = "https://files.pythonhosted.org/packages/11/8c/5647fb256216f7f7fd960a29ece28a736f859a80cc36793e103602b81828/grimp-3.9-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:faf5dd2cc7012a6024e743976674d55e66c6e556eaffd30e5843a88cc4623c16", size = 1709699, upload-time = "2025-05-05T13:45:34.622Z" }, + { url = "https://files.pythonhosted.org/packages/26/40/b02a8226c80aa8130e583ae62e12563476d74b909944e80092fe73ba7f9b/grimp-3.9-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ff6c0de2e9cffed8f7ec1a9c80888f01017806cfb9acf9c3d8fc3137a629d51", size = 1857628, upload-time = "2025-05-05T13:44:15.268Z" }, + { url = "https://files.pythonhosted.org/packages/b8/a0/936147329ceb0398c848fdb80a96d32805afccdd382772a9cd553c91b5ed/grimp-3.9-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e38f92a650756f9b00198991cb60c5e3add9d68475425fb4fe0960d1586660ce", size = 1822818, upload-time = "2025-05-05T13:44:29.895Z" }, + { url = "https://files.pythonhosted.org/packages/d5/44/afdd11a6ece8f801a0af8653adb6bfaa64d2652da564e9f53137392f4e8c/grimp-3.9-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f4e1ef77c7841b15d9f5002c767da1060ec42cb477fa7ae33d7f9dffb4705dc0", size = 1948678, upload-time = "2025-05-05T13:45:14.026Z" }, + { url = "https://files.pythonhosted.org/packages/4f/44/2b9ba423068f88a3ea177e0c5633afb0154f677885647dd5b98737fa56f6/grimp-3.9-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:19a9bb0b05d1b0738920c604cdc544c9073df6edd71f31963054576647c8f897", size = 2025146, upload-time = "2025-05-05T13:44:44.044Z" }, + { url = "https://files.pythonhosted.org/packages/9b/7a/97fc0ecd9e91fe5bd18a01de7dc70c11fc8b06954ee83d82df306f14f644/grimp-3.9-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9f9d5e6182859900610f15704847897115707b28ca2c9b5c754ef3bef9adb485", size = 2118665, upload-time = "2025-05-05T13:44:59.385Z" }, + { url = "https://files.pythonhosted.org/packages/37/c4/fa75d6ffc4b87d9d920ec912b24f6af61aff8b26b0ebb0d8f5d8b2a66cc4/grimp-3.9-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4e63efe9c2df2e8efe98142fa754ef9140e3aa3ce942ef55f52bb7a177a0822", size = 1921756, upload-time = "2025-05-05T13:45:24.356Z" }, + { url = "https://files.pythonhosted.org/packages/c6/43/af4590755aab31ffa1227a6560f34bfa575d1dc606dff6d3dc15b7200ced/grimp-3.9-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e204b17675763a7091fd5e8b7c58c83c8383505d90b6aea6a5e0d5bb737cb856", size = 2032640, upload-time = "2025-05-05T13:45:50.304Z" }, + { url = "https://files.pythonhosted.org/packages/06/d3/d627d9678f6074cc6bb614cfaa5208f352e32523cd26c61a282d6c07aadf/grimp-3.9-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:15d23a90d34d3f94e5437c7bc29ad1b82d059ed9b039c84d6ef20d83b826ca88", size = 2086606, upload-time = "2025-05-05T13:46:06.064Z" }, + { url = "https://files.pythonhosted.org/packages/9e/ae/8ffa1377d45bca60a25d2120258b5d9738eb23c25eb8bb702dcffbe222d3/grimp-3.9-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:04ed7f682ac07aee6e8cd99c1ea3d0ba26ea8167b71b4b79f05640982c1b1fa3", size = 2069295, upload-time = "2025-05-05T13:46:21.513Z" }, + { url = "https://files.pythonhosted.org/packages/d5/5a/f42bd065775927d47e7281f49bc85ccc639e97fba5842e6f348da8249acc/grimp-3.9-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:75f33e7b98652ce17fc9a5d0dce0bc5f4ba68fd73a15f10dd4cd1ea511bab0c1", size = 2091251, upload-time = "2025-05-05T13:46:37.529Z" }, + { url = "https://files.pythonhosted.org/packages/4b/87/d35867fe1791450fe802d0dc6e04bfc7601c289357910455912c8c0e7a4b/grimp-3.9-cp313-cp313-win32.whl", hash = "sha256:72921d8727a508b34393a330748db91fca62fa506b86f5a4c457f713a6468c15", size = 1494320, upload-time = "2025-05-05T13:47:03.099Z" }, + { url = "https://files.pythonhosted.org/packages/95/c9/b25441ecb3b8a317d5cf5aee708a76adc7eb11e09ac2b7abf41a8e53effa/grimp-3.9-cp313-cp313-win_amd64.whl", hash = "sha256:cd65bc6d030d9d788a1794e01cdc3b4abce2971cc821e2e7dc02d09c45febc56", size = 1597627, upload-time = "2025-05-05T13:46:55.321Z" }, + { url = "https://files.pythonhosted.org/packages/86/e0/a906b3f8136b761b955e4a8b4576b648c53ae096d3af50ee3a69849df202/grimp-3.9-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:057d4f7e4b9f62909406701d5bab773b39e1fd8591043c6b19dba3ab3b275625", size = 1855680, upload-time = "2025-05-05T13:44:16.812Z" }, + { url = "https://files.pythonhosted.org/packages/14/ee/a9aa98f692feddee20463d2572d1ae7b7e274a2e66be9d8159e0c926fd8e/grimp-3.9-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0c660f1222b7c11d725d298bce09b85376b0084d5515b8364a7a70c0547a0992", size = 1822232, upload-time = "2025-05-05T13:44:31.726Z" }, + { url = "https://files.pythonhosted.org/packages/6b/00/78c1cb3a2792d00ef3ecf5e2b4df92dc8faac92c71755c05ba160b1beddf/grimp-3.9-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:78662f2c0ae4e7ff3eacff051e6b3110ed026135545a1825a53a858d4e966ebb", size = 2022814, upload-time = "2025-05-05T13:44:45.458Z" }, + { url = "https://files.pythonhosted.org/packages/fd/4f/2fde4f9b3cde995af35bef9b7496d8e76f661ac2b747caa69d5d62cc34a2/grimp-3.9-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1b57b20f51ce7765adaffd80b3a17a365b770a5d237a772a2a8a74cc19c186f2", size = 2118021, upload-time = "2025-05-05T13:45:00.758Z" }, + { url = "https://files.pythonhosted.org/packages/bc/e0/9a7a56bc8b2789cae9d4fa32a809e060ddeb681dec84d8344a48f9b10298/grimp-3.9-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:335511ad698e2a7d6e15dccdb843afc6ad4bde79f213479c799f67c98ce36002", size = 2031477, upload-time = "2025-05-05T13:45:51.908Z" }, + { url = "https://files.pythonhosted.org/packages/89/fc/63bb580ccbd015a37ff3f0841f17957f14e3cfee096b94837e2f43f7c422/grimp-3.9-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:574c94895d4fcac2e5ae794636fe687fb80b9ca59fe3bb8458d7a64bc3b3ed9e", size = 2086058, upload-time = "2025-05-05T13:46:07.948Z" }, + { url = "https://files.pythonhosted.org/packages/02/ad/8a90b922b52525279c3eb22d578b6b2580fafffed9e48ff788cceb34ef62/grimp-3.9-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:84c95f9df61ddaffd8f41a4181aa652f3fdf9932b26634cd8273d4dcd926321e", size = 2068266, upload-time = "2025-05-05T13:46:22.971Z" }, + { url = "https://files.pythonhosted.org/packages/34/b2/056fd4642637cd4627d59ccf2be3f62dd41b8da98e49300eeecd8d4faaa5/grimp-3.9-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:9ddcbfd11d6e6b813121db1116f6b3c4930ab433a949522b5e80542c5da3d805", size = 2092059, upload-time = "2025-05-05T13:46:41.095Z" }, ] [[package]] name = "grpcio" -version = "1.73.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/8e/7b/ca3f561aeecf0c846d15e1b38921a60dffffd5d4113931198fbf455334ee/grpcio-1.73.0.tar.gz", hash = "sha256:3af4c30918a7f0d39de500d11255f8d9da4f30e94a2033e70fe2a720e184bd8e", size = 12786424, upload-time = "2025-06-09T10:08:23.365Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/9d/4d/e938f3a0e51a47f2ce7e55f12f19f316e7074770d56a7c2765e782ec76bc/grpcio-1.73.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:fb9d7c27089d9ba3746f18d2109eb530ef2a37452d2ff50f5a6696cd39167d3b", size = 5334911, upload-time = "2025-06-09T10:03:33.494Z" }, - { url = "https://files.pythonhosted.org/packages/13/56/f09c72c43aa8d6f15a71f2c63ebdfac9cf9314363dea2598dc501d8370db/grpcio-1.73.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:128ba2ebdac41e41554d492b82c34586a90ebd0766f8ebd72160c0e3a57b9155", size = 10601460, upload-time = "2025-06-09T10:03:36.613Z" }, - { url = "https://files.pythonhosted.org/packages/20/e3/85496edc81e41b3c44ebefffc7bce133bb531120066877df0f910eabfa19/grpcio-1.73.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:068ecc415f79408d57a7f146f54cdf9f0acb4b301a52a9e563973dc981e82f3d", size = 5759191, upload-time = "2025-06-09T10:03:39.838Z" }, - { url = "https://files.pythonhosted.org/packages/88/cc/fef74270a6d29f35ad744bfd8e6c05183f35074ff34c655a2c80f3b422b2/grpcio-1.73.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ddc1cfb2240f84d35d559ade18f69dcd4257dbaa5ba0de1a565d903aaab2968", size = 6409961, upload-time = "2025-06-09T10:03:42.706Z" }, - { url = "https://files.pythonhosted.org/packages/b0/e6/13cfea15e3b8f79c4ae7b676cb21fab70978b0fde1e1d28bb0e073291290/grpcio-1.73.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e53007f70d9783f53b41b4cf38ed39a8e348011437e4c287eee7dd1d39d54b2f", size = 6003948, upload-time = "2025-06-09T10:03:44.96Z" }, - { url = "https://files.pythonhosted.org/packages/c2/ed/b1a36dad4cc0dbf1f83f6d7b58825fefd5cc9ff3a5036e46091335649473/grpcio-1.73.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4dd8d8d092efede7d6f48d695ba2592046acd04ccf421436dd7ed52677a9ad29", size = 6103788, upload-time = "2025-06-09T10:03:48.053Z" }, - { url = "https://files.pythonhosted.org/packages/e7/c8/d381433d3d46d10f6858126d2d2245ef329e30f3752ce4514c93b95ca6fc/grpcio-1.73.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:70176093d0a95b44d24baa9c034bb67bfe2b6b5f7ebc2836f4093c97010e17fd", size = 6749508, upload-time = "2025-06-09T10:03:51.185Z" }, - { url = "https://files.pythonhosted.org/packages/87/0a/ff0c31dbd15e63b34320efafac647270aa88c31aa19ff01154a73dc7ce86/grpcio-1.73.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:085ebe876373ca095e24ced95c8f440495ed0b574c491f7f4f714ff794bbcd10", size = 6284342, upload-time = "2025-06-09T10:03:54.467Z" }, - { url = "https://files.pythonhosted.org/packages/fd/73/f762430c0ba867403b9d6e463afe026bf019bd9206eee753785239719273/grpcio-1.73.0-cp312-cp312-win32.whl", hash = "sha256:cfc556c1d6aef02c727ec7d0016827a73bfe67193e47c546f7cadd3ee6bf1a60", size = 3669319, upload-time = "2025-06-09T10:03:56.751Z" }, - { url = "https://files.pythonhosted.org/packages/10/8b/3411609376b2830449cf416f457ad9d2aacb7f562e1b90fdd8bdedf26d63/grpcio-1.73.0-cp312-cp312-win_amd64.whl", hash = "sha256:bbf45d59d090bf69f1e4e1594832aaf40aa84b31659af3c5e2c3f6a35202791a", size = 4335596, upload-time = "2025-06-09T10:03:59.866Z" }, - { url = "https://files.pythonhosted.org/packages/60/da/6f3f7a78e5455c4cbe87c85063cc6da05d65d25264f9d4aed800ece46294/grpcio-1.73.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:da1d677018ef423202aca6d73a8d3b2cb245699eb7f50eb5f74cae15a8e1f724", size = 5335867, upload-time = "2025-06-09T10:04:03.153Z" }, - { url = "https://files.pythonhosted.org/packages/53/14/7d1f2526b98b9658d7be0bb163fd78d681587de6709d8b0c74b4b481b013/grpcio-1.73.0-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:36bf93f6a657f37c131d9dd2c391b867abf1426a86727c3575393e9e11dadb0d", size = 10595587, upload-time = "2025-06-09T10:04:05.694Z" }, - { url = "https://files.pythonhosted.org/packages/02/24/a293c398ae44e741da1ed4b29638edbb002258797b07a783f65506165b4c/grpcio-1.73.0-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:d84000367508ade791d90c2bafbd905574b5ced8056397027a77a215d601ba15", size = 5765793, upload-time = "2025-06-09T10:04:09.235Z" }, - { url = "https://files.pythonhosted.org/packages/e1/24/d84dbd0b5bf36fb44922798d525a85cefa2ffee7b7110e61406e9750ed15/grpcio-1.73.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c98ba1d928a178ce33f3425ff823318040a2b7ef875d30a0073565e5ceb058d9", size = 6415494, upload-time = "2025-06-09T10:04:12.377Z" }, - { url = "https://files.pythonhosted.org/packages/5e/85/c80dc65aed8e9dce3d54688864bac45331d9c7600985541f18bd5cb301d4/grpcio-1.73.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a73c72922dfd30b396a5f25bb3a4590195ee45ecde7ee068acb0892d2900cf07", size = 6007279, upload-time = "2025-06-09T10:04:14.878Z" }, - { url = "https://files.pythonhosted.org/packages/37/fc/207c00a4c6fa303d26e2cbd62fbdb0582facdfd08f55500fd83bf6b0f8db/grpcio-1.73.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:10e8edc035724aba0346a432060fd192b42bd03675d083c01553cab071a28da5", size = 6105505, upload-time = "2025-06-09T10:04:17.39Z" }, - { url = "https://files.pythonhosted.org/packages/72/35/8fe69af820667b87ebfcb24214e42a1d53da53cb39edd6b4f84f6b36da86/grpcio-1.73.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:f5cdc332b503c33b1643b12ea933582c7b081957c8bc2ea4cc4bc58054a09288", size = 6753792, upload-time = "2025-06-09T10:04:19.989Z" }, - { url = "https://files.pythonhosted.org/packages/e2/d8/738c77c1e821e350da4a048849f695ff88a02b291f8c69db23908867aea6/grpcio-1.73.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:07ad7c57233c2109e4ac999cb9c2710c3b8e3f491a73b058b0ce431f31ed8145", size = 6287593, upload-time = "2025-06-09T10:04:22.878Z" }, - { url = "https://files.pythonhosted.org/packages/09/ec/8498eabc018fa39ae8efe5e47e3f4c1bc9ed6281056713871895dc998807/grpcio-1.73.0-cp313-cp313-win32.whl", hash = "sha256:0eb5df4f41ea10bda99a802b2a292d85be28958ede2a50f2beb8c7fc9a738419", size = 3668637, upload-time = "2025-06-09T10:04:25.787Z" }, - { url = "https://files.pythonhosted.org/packages/d7/35/347db7d2e7674b621afd21b12022e7f48c7b0861b5577134b4e939536141/grpcio-1.73.0-cp313-cp313-win_amd64.whl", hash = "sha256:38cf518cc54cd0c47c9539cefa8888549fcc067db0b0c66a46535ca8032020c4", size = 4335872, upload-time = "2025-06-09T10:04:29.032Z" }, +version = "1.74.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/38/b4/35feb8f7cab7239c5b94bd2db71abb3d6adb5f335ad8f131abb6060840b6/grpcio-1.74.0.tar.gz", hash = "sha256:80d1f4fbb35b0742d3e3d3bb654b7381cd5f015f8497279a1e9c21ba623e01b1", size = 12756048, upload-time = "2025-07-24T18:54:23.039Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4c/5d/e504d5d5c4469823504f65687d6c8fb97b7f7bf0b34873b7598f1df24630/grpcio-1.74.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:8533e6e9c5bd630ca98062e3a1326249e6ada07d05acf191a77bc33f8948f3d8", size = 5445551, upload-time = "2025-07-24T18:53:23.641Z" }, + { url = "https://files.pythonhosted.org/packages/43/01/730e37056f96f2f6ce9f17999af1556df62ee8dab7fa48bceeaab5fd3008/grpcio-1.74.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:2918948864fec2a11721d91568effffbe0a02b23ecd57f281391d986847982f6", size = 10979810, upload-time = "2025-07-24T18:53:25.349Z" }, + { url = "https://files.pythonhosted.org/packages/79/3d/09fd100473ea5c47083889ca47ffd356576173ec134312f6aa0e13111dee/grpcio-1.74.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:60d2d48b0580e70d2e1954d0d19fa3c2e60dd7cbed826aca104fff518310d1c5", size = 5941946, upload-time = "2025-07-24T18:53:27.387Z" }, + { url = "https://files.pythonhosted.org/packages/8a/99/12d2cca0a63c874c6d3d195629dcd85cdf5d6f98a30d8db44271f8a97b93/grpcio-1.74.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3601274bc0523f6dc07666c0e01682c94472402ac2fd1226fd96e079863bfa49", size = 6621763, upload-time = "2025-07-24T18:53:29.193Z" }, + { url = "https://files.pythonhosted.org/packages/9d/2c/930b0e7a2f1029bbc193443c7bc4dc2a46fedb0203c8793dcd97081f1520/grpcio-1.74.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:176d60a5168d7948539def20b2a3adcce67d72454d9ae05969a2e73f3a0feee7", size = 6180664, upload-time = "2025-07-24T18:53:30.823Z" }, + { url = "https://files.pythonhosted.org/packages/db/d5/ff8a2442180ad0867717e670f5ec42bfd8d38b92158ad6bcd864e6d4b1ed/grpcio-1.74.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e759f9e8bc908aaae0412642afe5416c9f983a80499448fcc7fab8692ae044c3", size = 6301083, upload-time = "2025-07-24T18:53:32.454Z" }, + { url = "https://files.pythonhosted.org/packages/b0/ba/b361d390451a37ca118e4ec7dccec690422e05bc85fba2ec72b06cefec9f/grpcio-1.74.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:9e7c4389771855a92934b2846bd807fc25a3dfa820fd912fe6bd8136026b2707", size = 6994132, upload-time = "2025-07-24T18:53:34.506Z" }, + { url = "https://files.pythonhosted.org/packages/3b/0c/3a5fa47d2437a44ced74141795ac0251bbddeae74bf81df3447edd767d27/grpcio-1.74.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:cce634b10aeab37010449124814b05a62fb5f18928ca878f1bf4750d1f0c815b", size = 6489616, upload-time = "2025-07-24T18:53:36.217Z" }, + { url = "https://files.pythonhosted.org/packages/ae/95/ab64703b436d99dc5217228babc76047d60e9ad14df129e307b5fec81fd0/grpcio-1.74.0-cp312-cp312-win32.whl", hash = "sha256:885912559974df35d92219e2dc98f51a16a48395f37b92865ad45186f294096c", size = 3807083, upload-time = "2025-07-24T18:53:37.911Z" }, + { url = "https://files.pythonhosted.org/packages/84/59/900aa2445891fc47a33f7d2f76e00ca5d6ae6584b20d19af9c06fa09bf9a/grpcio-1.74.0-cp312-cp312-win_amd64.whl", hash = "sha256:42f8fee287427b94be63d916c90399ed310ed10aadbf9e2e5538b3e497d269bc", size = 4490123, upload-time = "2025-07-24T18:53:39.528Z" }, + { url = "https://files.pythonhosted.org/packages/d4/d8/1004a5f468715221450e66b051c839c2ce9a985aa3ee427422061fcbb6aa/grpcio-1.74.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:2bc2d7d8d184e2362b53905cb1708c84cb16354771c04b490485fa07ce3a1d89", size = 5449488, upload-time = "2025-07-24T18:53:41.174Z" }, + { url = "https://files.pythonhosted.org/packages/94/0e/33731a03f63740d7743dced423846c831d8e6da808fcd02821a4416df7fa/grpcio-1.74.0-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:c14e803037e572c177ba54a3e090d6eb12efd795d49327c5ee2b3bddb836bf01", size = 10974059, upload-time = "2025-07-24T18:53:43.066Z" }, + { url = "https://files.pythonhosted.org/packages/0d/c6/3d2c14d87771a421205bdca991467cfe473ee4c6a1231c1ede5248c62ab8/grpcio-1.74.0-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:f6ec94f0e50eb8fa1744a731088b966427575e40c2944a980049798b127a687e", size = 5945647, upload-time = "2025-07-24T18:53:45.269Z" }, + { url = "https://files.pythonhosted.org/packages/c5/83/5a354c8aaff58594eef7fffebae41a0f8995a6258bbc6809b800c33d4c13/grpcio-1.74.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:566b9395b90cc3d0d0c6404bc8572c7c18786ede549cdb540ae27b58afe0fb91", size = 6626101, upload-time = "2025-07-24T18:53:47.015Z" }, + { url = "https://files.pythonhosted.org/packages/3f/ca/4fdc7bf59bf6994aa45cbd4ef1055cd65e2884de6113dbd49f75498ddb08/grpcio-1.74.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1ea6176d7dfd5b941ea01c2ec34de9531ba494d541fe2057c904e601879f249", size = 6182562, upload-time = "2025-07-24T18:53:48.967Z" }, + { url = "https://files.pythonhosted.org/packages/fd/48/2869e5b2c1922583686f7ae674937986807c2f676d08be70d0a541316270/grpcio-1.74.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:64229c1e9cea079420527fa8ac45d80fc1e8d3f94deaa35643c381fa8d98f362", size = 6303425, upload-time = "2025-07-24T18:53:50.847Z" }, + { url = "https://files.pythonhosted.org/packages/a6/0e/bac93147b9a164f759497bc6913e74af1cb632c733c7af62c0336782bd38/grpcio-1.74.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:0f87bddd6e27fc776aacf7ebfec367b6d49cad0455123951e4488ea99d9b9b8f", size = 6996533, upload-time = "2025-07-24T18:53:52.747Z" }, + { url = "https://files.pythonhosted.org/packages/84/35/9f6b2503c1fd86d068b46818bbd7329db26a87cdd8c01e0d1a9abea1104c/grpcio-1.74.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:3b03d8f2a07f0fea8c8f74deb59f8352b770e3900d143b3d1475effcb08eec20", size = 6491489, upload-time = "2025-07-24T18:53:55.06Z" }, + { url = "https://files.pythonhosted.org/packages/75/33/a04e99be2a82c4cbc4039eb3a76f6c3632932b9d5d295221389d10ac9ca7/grpcio-1.74.0-cp313-cp313-win32.whl", hash = "sha256:b6a73b2ba83e663b2480a90b82fdae6a7aa6427f62bf43b29912c0cfd1aa2bfa", size = 3805811, upload-time = "2025-07-24T18:53:56.798Z" }, + { url = "https://files.pythonhosted.org/packages/34/80/de3eb55eb581815342d097214bed4c59e806b05f1b3110df03b2280d6dfd/grpcio-1.74.0-cp313-cp313-win_amd64.whl", hash = "sha256:fd3c71aeee838299c5887230b8a1822795325ddfea635edd82954c1eaa831e24", size = 4489214, upload-time = "2025-07-24T18:53:59.771Z" }, ] [[package]] @@ -1448,17 +1722,17 @@ wheels = [ [[package]] name = "hf-xet" -version = "1.1.5" +version = "1.1.8" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ed/d4/7685999e85945ed0d7f0762b686ae7015035390de1161dcea9d5276c134c/hf_xet-1.1.5.tar.gz", hash = "sha256:69ebbcfd9ec44fdc2af73441619eeb06b94ee34511bbcf57cd423820090f5694", size = 495969, upload-time = "2025-06-20T21:48:38.007Z" } +sdist = { url = "https://files.pythonhosted.org/packages/7a/49/91010b59debc7c862a5fd426d343134dd9a68778dbe570234b6495a4e204/hf_xet-1.1.8.tar.gz", hash = "sha256:62a0043e441753bbc446dcb5a3fe40a4d03f5fb9f13589ef1df9ab19252beb53", size = 484065, upload-time = "2025-08-18T22:01:03.584Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/00/89/a1119eebe2836cb25758e7661d6410d3eae982e2b5e974bcc4d250be9012/hf_xet-1.1.5-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:f52c2fa3635b8c37c7764d8796dfa72706cc4eded19d638331161e82b0792e23", size = 2687929, upload-time = "2025-06-20T21:48:32.284Z" }, - { url = "https://files.pythonhosted.org/packages/de/5f/2c78e28f309396e71ec8e4e9304a6483dcbc36172b5cea8f291994163425/hf_xet-1.1.5-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:9fa6e3ee5d61912c4a113e0708eaaef987047616465ac7aa30f7121a48fc1af8", size = 2556338, upload-time = "2025-06-20T21:48:30.079Z" }, - { url = "https://files.pythonhosted.org/packages/6d/2f/6cad7b5fe86b7652579346cb7f85156c11761df26435651cbba89376cd2c/hf_xet-1.1.5-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc874b5c843e642f45fd85cda1ce599e123308ad2901ead23d3510a47ff506d1", size = 3102894, upload-time = "2025-06-20T21:48:28.114Z" }, - { url = "https://files.pythonhosted.org/packages/d0/54/0fcf2b619720a26fbb6cc941e89f2472a522cd963a776c089b189559447f/hf_xet-1.1.5-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:dbba1660e5d810bd0ea77c511a99e9242d920790d0e63c0e4673ed36c4022d18", size = 3002134, upload-time = "2025-06-20T21:48:25.906Z" }, - { url = "https://files.pythonhosted.org/packages/f3/92/1d351ac6cef7c4ba8c85744d37ffbfac2d53d0a6c04d2cabeba614640a78/hf_xet-1.1.5-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ab34c4c3104133c495785d5d8bba3b1efc99de52c02e759cf711a91fd39d3a14", size = 3171009, upload-time = "2025-06-20T21:48:33.987Z" }, - { url = "https://files.pythonhosted.org/packages/c9/65/4b2ddb0e3e983f2508528eb4501288ae2f84963586fbdfae596836d5e57a/hf_xet-1.1.5-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:83088ecea236d5113de478acb2339f92c95b4fb0462acaa30621fac02f5a534a", size = 3279245, upload-time = "2025-06-20T21:48:36.051Z" }, - { url = "https://files.pythonhosted.org/packages/f0/55/ef77a85ee443ae05a9e9cba1c9f0dd9241eb42da2aeba1dc50f51154c81a/hf_xet-1.1.5-cp37-abi3-win_amd64.whl", hash = "sha256:73e167d9807d166596b4b2f0b585c6d5bd84a26dea32843665a8b58f6edba245", size = 2738931, upload-time = "2025-06-20T21:48:39.482Z" }, + { url = "https://files.pythonhosted.org/packages/9c/91/5814db3a0d4a65fb6a87f0931ae28073b87f06307701fe66e7c41513bfb4/hf_xet-1.1.8-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:3d5f82e533fc51c7daad0f9b655d9c7811b5308e5890236828bd1dd3ed8fea74", size = 2752357, upload-time = "2025-08-18T22:00:58.777Z" }, + { url = "https://files.pythonhosted.org/packages/70/72/ce898516e97341a7a9d450609e130e108643389110261eaee6deb1ba8545/hf_xet-1.1.8-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:8e2dba5896bca3ab61d0bef4f01a1647004de59640701b37e37eaa57087bbd9d", size = 2613142, upload-time = "2025-08-18T22:00:57.252Z" }, + { url = "https://files.pythonhosted.org/packages/b7/d6/13af5f916cef795ac2b5e4cc1de31f2e0e375f4475d50799915835f301c2/hf_xet-1.1.8-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfe5700bc729be3d33d4e9a9b5cc17a951bf8c7ada7ba0c9198a6ab2053b7453", size = 3175859, upload-time = "2025-08-18T22:00:55.978Z" }, + { url = "https://files.pythonhosted.org/packages/4c/ed/34a193c9d1d72b7c3901b3b5153b1be9b2736b832692e1c3f167af537102/hf_xet-1.1.8-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:09e86514c3c4284ed8a57d6b0f3d089f9836a0af0a1ceb3c9dd664f1f3eaefef", size = 3074178, upload-time = "2025-08-18T22:00:54.147Z" }, + { url = "https://files.pythonhosted.org/packages/4a/1b/de6817b4bf65385280252dff5c9cceeedfbcb27ddb93923639323c1034a4/hf_xet-1.1.8-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:4a9b99ab721d385b83f4fc8ee4e0366b0b59dce03b5888a86029cc0ca634efbf", size = 3238122, upload-time = "2025-08-18T22:01:00.546Z" }, + { url = "https://files.pythonhosted.org/packages/b7/13/874c85c7ed519ec101deb654f06703d9e5e68d34416730f64c4755ada36a/hf_xet-1.1.8-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:25b9d43333bbef39aeae1616789ec329c21401a7fe30969d538791076227b591", size = 3344325, upload-time = "2025-08-18T22:01:02.013Z" }, + { url = "https://files.pythonhosted.org/packages/9e/d3/0aaf279f4f3dea58e99401b92c31c0f752924ba0e6c7d7bb07b1dbd7f35e/hf_xet-1.1.8-cp37-abi3-win_amd64.whl", hash = "sha256:4171f31d87b13da4af1ed86c98cf763292e4720c088b4957cf9d564f92904ca9", size = 2801689, upload-time = "2025-08-18T22:01:04.81Z" }, ] [[package]] @@ -1513,7 +1787,7 @@ wheels = [ [[package]] name = "huggingface-hub" -version = "0.34.2" +version = "0.34.4" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "filelock" }, @@ -1525,9 +1799,9 @@ dependencies = [ { name = "tqdm" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/11/53/572b9c03ca0cabb3d71e02b1750b595196332cfb8c4d74a90de383451171/huggingface_hub-0.34.2.tar.gz", hash = "sha256:a27c1ba3d2a70b378dce546c8be3a90349a64e6bd5d7a806679d4bf5e5d2d8fe", size = 456837, upload-time = "2025-07-28T10:12:09.32Z" } +sdist = { url = "https://files.pythonhosted.org/packages/45/c9/bdbe19339f76d12985bc03572f330a01a93c04dffecaaea3061bdd7fb892/huggingface_hub-0.34.4.tar.gz", hash = "sha256:a4228daa6fb001be3f4f4bdaf9a0db00e1739235702848df00885c9b5742c85c", size = 459768, upload-time = "2025-08-08T09:14:52.365Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/24/20/5ee412acef0af05bd3ccc78186ccb7ca672f9998a7cbc94c011df8f101f4/huggingface_hub-0.34.2-py3-none-any.whl", hash = "sha256:699843fc58d3d257dbd3cb014e0cd34066a56372246674322ba0909981ec239c", size = 558843, upload-time = "2025-07-28T10:12:07.064Z" }, + { url = "https://files.pythonhosted.org/packages/39/7b/bb06b061991107cd8783f300adff3e7b7f284e330fd82f507f2a1417b11d/huggingface_hub-0.34.4-py3-none-any.whl", hash = "sha256:9b365d781739c93ff90c359844221beef048403f1bc1f1c123c191257c3c890a", size = 561452, upload-time = "2025-08-08T09:14:50.159Z" }, ] [package.optional-dependencies] @@ -1551,11 +1825,11 @@ wheels = [ [[package]] name = "identify" -version = "2.6.12" +version = "2.6.13" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a2/88/d193a27416618628a5eea64e3223acd800b40749a96ffb322a9b55a49ed1/identify-2.6.12.tar.gz", hash = "sha256:d8de45749f1efb108badef65ee8386f0f7bb19a7f26185f74de6367bffbaf0e6", size = 99254, upload-time = "2025-05-23T20:37:53.3Z" } +sdist = { url = "https://files.pythonhosted.org/packages/82/ca/ffbabe3635bb839aa36b3a893c91a9b0d368cb4d8073e03a12896970af82/identify-2.6.13.tar.gz", hash = "sha256:da8d6c828e773620e13bfa86ea601c5a5310ba4bcd65edf378198b56a1f9fb32", size = 99243, upload-time = "2025-08-09T19:35:00.6Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7a/cd/18f8da995b658420625f7ef13f037be53ae04ec5ad33f9b718240dcfd48c/identify-2.6.12-py2.py3-none-any.whl", hash = "sha256:ad9672d5a72e0d2ff7c5c8809b62dfa60458626352fb0eb7b55e69bdc45334a2", size = 99145, upload-time = "2025-05-23T20:37:51.495Z" }, + { url = "https://files.pythonhosted.org/packages/e7/ce/461b60a3ee109518c055953729bf9ed089a04db895d47e95444071dcdef2/identify-2.6.13-py2.py3-none-any.whl", hash = "sha256:60381139b3ae39447482ecc406944190f690d4a2997f2584062089848361b33b", size = 99153, upload-time = "2025-08-09T19:34:59.1Z" }, ] [[package]] @@ -1617,6 +1891,20 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ff/62/85c4c919272577931d407be5ba5d71c20f0b616d31a0befe0ae45bb79abd/imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b", size = 8769, upload-time = "2022-07-01T12:21:02.467Z" }, ] +[[package]] +name = "import-linter" +version = "2.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "grimp" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/db/33/e3c29beb4d8a33cfacdbe2858a3a4533694a0c1d0c060daaa761eff6d929/import_linter-2.4.tar.gz", hash = "sha256:4888fde83dd18bdbecd57ea1a98a1f3d52c6b6507d700f89f8678b44306c0ab4", size = 29942, upload-time = "2025-08-15T06:57:23.423Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/29/11/2c108fc1138e506762db332c4a7ebc589cb379bc443939a81ec738b4cf73/import_linter-2.4-py3-none-any.whl", hash = "sha256:2ad6d5a164cdcd5ebdda4172cf0169f73dde1a8925ef7216672c321cd38f8499", size = 42355, upload-time = "2025-08-15T06:57:22.221Z" }, +] + [[package]] name = "importlib-metadata" version = "8.7.0" @@ -1749,7 +2037,7 @@ wheels = [ [[package]] name = "jsonschema" -version = "4.24.0" +version = "4.25.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "attrs" }, @@ -1757,9 +2045,9 @@ dependencies = [ { name = "referencing" }, { name = "rpds-py" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/bf/d3/1cf5326b923a53515d8f3a2cd442e6d7e94fcc444716e879ea70a0ce3177/jsonschema-4.24.0.tar.gz", hash = "sha256:0b4e8069eb12aedfa881333004bccaec24ecef5a8a6a4b6df142b2cc9599d196", size = 353480, upload-time = "2025-05-26T18:48:10.459Z" } +sdist = { url = "https://files.pythonhosted.org/packages/74/69/f7185de793a29082a9f3c7728268ffb31cb5095131a9c139a74078e27336/jsonschema-4.25.1.tar.gz", hash = "sha256:e4a9655ce0da0c0b67a085847e00a3a51449e1157f4f75e9fb5aa545e122eb85", size = 357342, upload-time = "2025-08-18T17:03:50.038Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a2/3d/023389198f69c722d039351050738d6755376c8fd343e91dc493ea485905/jsonschema-4.24.0-py3-none-any.whl", hash = "sha256:a462455f19f5faf404a7902952b6f0e3ce868f3ee09a359b05eca6673bd8412d", size = 88709, upload-time = "2025-05-26T18:48:08.417Z" }, + { url = "https://files.pythonhosted.org/packages/bf/9c/8c95d856233c1f82500c2450b8c68576b4cf1c871db3afac5c34ff84e6fd/jsonschema-4.25.1-py3-none-any.whl", hash = "sha256:3fba0169e345c7175110351d456342c364814cfcf3b964ba4587f22915230a63", size = 90040, upload-time = "2025-08-18T17:03:48.373Z" }, ] [[package]] @@ -1776,53 +2064,74 @@ wheels = [ [[package]] name = "kiwisolver" -version = "1.4.8" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/82/59/7c91426a8ac292e1cdd53a63b6d9439abd573c875c3f92c146767dd33faf/kiwisolver-1.4.8.tar.gz", hash = "sha256:23d5f023bdc8c7e54eb65f03ca5d5bb25b601eac4d7f1a042888a1f45237987e", size = 97538, upload-time = "2024-12-24T18:30:51.519Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/fc/aa/cea685c4ab647f349c3bc92d2daf7ae34c8e8cf405a6dcd3a497f58a2ac3/kiwisolver-1.4.8-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d6af5e8815fd02997cb6ad9bbed0ee1e60014438ee1a5c2444c96f87b8843502", size = 124152, upload-time = "2024-12-24T18:29:16.85Z" }, - { url = "https://files.pythonhosted.org/packages/c5/0b/8db6d2e2452d60d5ebc4ce4b204feeb16176a851fd42462f66ade6808084/kiwisolver-1.4.8-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:bade438f86e21d91e0cf5dd7c0ed00cda0f77c8c1616bd83f9fc157fa6760d31", size = 66555, upload-time = "2024-12-24T18:29:19.146Z" }, - { url = "https://files.pythonhosted.org/packages/60/26/d6a0db6785dd35d3ba5bf2b2df0aedc5af089962c6eb2cbf67a15b81369e/kiwisolver-1.4.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b83dc6769ddbc57613280118fb4ce3cd08899cc3369f7d0e0fab518a7cf37fdb", size = 65067, upload-time = "2024-12-24T18:29:20.096Z" }, - { url = "https://files.pythonhosted.org/packages/c9/ed/1d97f7e3561e09757a196231edccc1bcf59d55ddccefa2afc9c615abd8e0/kiwisolver-1.4.8-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:111793b232842991be367ed828076b03d96202c19221b5ebab421ce8bcad016f", size = 1378443, upload-time = "2024-12-24T18:29:22.843Z" }, - { url = "https://files.pythonhosted.org/packages/29/61/39d30b99954e6b46f760e6289c12fede2ab96a254c443639052d1b573fbc/kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:257af1622860e51b1a9d0ce387bf5c2c4f36a90594cb9514f55b074bcc787cfc", size = 1472728, upload-time = "2024-12-24T18:29:24.463Z" }, - { url = "https://files.pythonhosted.org/packages/0c/3e/804163b932f7603ef256e4a715e5843a9600802bb23a68b4e08c8c0ff61d/kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:69b5637c3f316cab1ec1c9a12b8c5f4750a4c4b71af9157645bf32830e39c03a", size = 1478388, upload-time = "2024-12-24T18:29:25.776Z" }, - { url = "https://files.pythonhosted.org/packages/8a/9e/60eaa75169a154700be74f875a4d9961b11ba048bef315fbe89cb6999056/kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:782bb86f245ec18009890e7cb8d13a5ef54dcf2ebe18ed65f795e635a96a1c6a", size = 1413849, upload-time = "2024-12-24T18:29:27.202Z" }, - { url = "https://files.pythonhosted.org/packages/bc/b3/9458adb9472e61a998c8c4d95cfdfec91c73c53a375b30b1428310f923e4/kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc978a80a0db3a66d25767b03688f1147a69e6237175c0f4ffffaaedf744055a", size = 1475533, upload-time = "2024-12-24T18:29:28.638Z" }, - { url = "https://files.pythonhosted.org/packages/e4/7a/0a42d9571e35798de80aef4bb43a9b672aa7f8e58643d7bd1950398ffb0a/kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:36dbbfd34838500a31f52c9786990d00150860e46cd5041386f217101350f0d3", size = 2268898, upload-time = "2024-12-24T18:29:30.368Z" }, - { url = "https://files.pythonhosted.org/packages/d9/07/1255dc8d80271400126ed8db35a1795b1a2c098ac3a72645075d06fe5c5d/kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:eaa973f1e05131de5ff3569bbba7f5fd07ea0595d3870ed4a526d486fe57fa1b", size = 2425605, upload-time = "2024-12-24T18:29:33.151Z" }, - { url = "https://files.pythonhosted.org/packages/84/df/5a3b4cf13780ef6f6942df67b138b03b7e79e9f1f08f57c49957d5867f6e/kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:a66f60f8d0c87ab7f59b6fb80e642ebb29fec354a4dfad687ca4092ae69d04f4", size = 2375801, upload-time = "2024-12-24T18:29:34.584Z" }, - { url = "https://files.pythonhosted.org/packages/8f/10/2348d068e8b0f635c8c86892788dac7a6b5c0cb12356620ab575775aad89/kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:858416b7fb777a53f0c59ca08190ce24e9abbd3cffa18886a5781b8e3e26f65d", size = 2520077, upload-time = "2024-12-24T18:29:36.138Z" }, - { url = "https://files.pythonhosted.org/packages/32/d8/014b89fee5d4dce157d814303b0fce4d31385a2af4c41fed194b173b81ac/kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:085940635c62697391baafaaeabdf3dd7a6c3643577dde337f4d66eba021b2b8", size = 2338410, upload-time = "2024-12-24T18:29:39.991Z" }, - { url = "https://files.pythonhosted.org/packages/bd/72/dfff0cc97f2a0776e1c9eb5bef1ddfd45f46246c6533b0191887a427bca5/kiwisolver-1.4.8-cp312-cp312-win_amd64.whl", hash = "sha256:01c3d31902c7db5fb6182832713d3b4122ad9317c2c5877d0539227d96bb2e50", size = 71853, upload-time = "2024-12-24T18:29:42.006Z" }, - { url = "https://files.pythonhosted.org/packages/dc/85/220d13d914485c0948a00f0b9eb419efaf6da81b7d72e88ce2391f7aed8d/kiwisolver-1.4.8-cp312-cp312-win_arm64.whl", hash = "sha256:a3c44cb68861de93f0c4a8175fbaa691f0aa22550c331fefef02b618a9dcb476", size = 65424, upload-time = "2024-12-24T18:29:44.38Z" }, - { url = "https://files.pythonhosted.org/packages/79/b3/e62464a652f4f8cd9006e13d07abad844a47df1e6537f73ddfbf1bc997ec/kiwisolver-1.4.8-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:1c8ceb754339793c24aee1c9fb2485b5b1f5bb1c2c214ff13368431e51fc9a09", size = 124156, upload-time = "2024-12-24T18:29:45.368Z" }, - { url = "https://files.pythonhosted.org/packages/8d/2d/f13d06998b546a2ad4f48607a146e045bbe48030774de29f90bdc573df15/kiwisolver-1.4.8-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:54a62808ac74b5e55a04a408cda6156f986cefbcf0ada13572696b507cc92fa1", size = 66555, upload-time = "2024-12-24T18:29:46.37Z" }, - { url = "https://files.pythonhosted.org/packages/59/e3/b8bd14b0a54998a9fd1e8da591c60998dc003618cb19a3f94cb233ec1511/kiwisolver-1.4.8-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:68269e60ee4929893aad82666821aaacbd455284124817af45c11e50a4b42e3c", size = 65071, upload-time = "2024-12-24T18:29:47.333Z" }, - { url = "https://files.pythonhosted.org/packages/f0/1c/6c86f6d85ffe4d0ce04228d976f00674f1df5dc893bf2dd4f1928748f187/kiwisolver-1.4.8-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:34d142fba9c464bc3bbfeff15c96eab0e7310343d6aefb62a79d51421fcc5f1b", size = 1378053, upload-time = "2024-12-24T18:29:49.636Z" }, - { url = "https://files.pythonhosted.org/packages/4e/b9/1c6e9f6dcb103ac5cf87cb695845f5fa71379021500153566d8a8a9fc291/kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ddc373e0eef45b59197de815b1b28ef89ae3955e7722cc9710fb91cd77b7f47", size = 1472278, upload-time = "2024-12-24T18:29:51.164Z" }, - { url = "https://files.pythonhosted.org/packages/ee/81/aca1eb176de671f8bda479b11acdc42c132b61a2ac861c883907dde6debb/kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:77e6f57a20b9bd4e1e2cedda4d0b986ebd0216236f0106e55c28aea3d3d69b16", size = 1478139, upload-time = "2024-12-24T18:29:52.594Z" }, - { url = "https://files.pythonhosted.org/packages/49/f4/e081522473671c97b2687d380e9e4c26f748a86363ce5af48b4a28e48d06/kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08e77738ed7538f036cd1170cbed942ef749137b1311fa2bbe2a7fda2f6bf3cc", size = 1413517, upload-time = "2024-12-24T18:29:53.941Z" }, - { url = "https://files.pythonhosted.org/packages/8f/e9/6a7d025d8da8c4931522922cd706105aa32b3291d1add8c5427cdcd66e63/kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a5ce1e481a74b44dd5e92ff03ea0cb371ae7a0268318e202be06c8f04f4f1246", size = 1474952, upload-time = "2024-12-24T18:29:56.523Z" }, - { url = "https://files.pythonhosted.org/packages/82/13/13fa685ae167bee5d94b415991c4fc7bb0a1b6ebea6e753a87044b209678/kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:fc2ace710ba7c1dfd1a3b42530b62b9ceed115f19a1656adefce7b1782a37794", size = 2269132, upload-time = "2024-12-24T18:29:57.989Z" }, - { url = "https://files.pythonhosted.org/packages/ef/92/bb7c9395489b99a6cb41d502d3686bac692586db2045adc19e45ee64ed23/kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:3452046c37c7692bd52b0e752b87954ef86ee2224e624ef7ce6cb21e8c41cc1b", size = 2425997, upload-time = "2024-12-24T18:29:59.393Z" }, - { url = "https://files.pythonhosted.org/packages/ed/12/87f0e9271e2b63d35d0d8524954145837dd1a6c15b62a2d8c1ebe0f182b4/kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:7e9a60b50fe8b2ec6f448fe8d81b07e40141bfced7f896309df271a0b92f80f3", size = 2376060, upload-time = "2024-12-24T18:30:01.338Z" }, - { url = "https://files.pythonhosted.org/packages/02/6e/c8af39288edbce8bf0fa35dee427b082758a4b71e9c91ef18fa667782138/kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:918139571133f366e8362fa4a297aeba86c7816b7ecf0bc79168080e2bd79957", size = 2520471, upload-time = "2024-12-24T18:30:04.574Z" }, - { url = "https://files.pythonhosted.org/packages/13/78/df381bc7b26e535c91469f77f16adcd073beb3e2dd25042efd064af82323/kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e063ef9f89885a1d68dd8b2e18f5ead48653176d10a0e324e3b0030e3a69adeb", size = 2338793, upload-time = "2024-12-24T18:30:06.25Z" }, - { url = "https://files.pythonhosted.org/packages/d0/dc/c1abe38c37c071d0fc71c9a474fd0b9ede05d42f5a458d584619cfd2371a/kiwisolver-1.4.8-cp313-cp313-win_amd64.whl", hash = "sha256:a17b7c4f5b2c51bb68ed379defd608a03954a1845dfed7cc0117f1cc8a9b7fd2", size = 71855, upload-time = "2024-12-24T18:30:07.535Z" }, - { url = "https://files.pythonhosted.org/packages/a0/b6/21529d595b126ac298fdd90b705d87d4c5693de60023e0efcb4f387ed99e/kiwisolver-1.4.8-cp313-cp313-win_arm64.whl", hash = "sha256:3cd3bc628b25f74aedc6d374d5babf0166a92ff1317f46267f12d2ed54bc1d30", size = 65430, upload-time = "2024-12-24T18:30:08.504Z" }, - { url = "https://files.pythonhosted.org/packages/34/bd/b89380b7298e3af9b39f49334e3e2a4af0e04819789f04b43d560516c0c8/kiwisolver-1.4.8-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:370fd2df41660ed4e26b8c9d6bbcad668fbe2560462cba151a721d49e5b6628c", size = 126294, upload-time = "2024-12-24T18:30:09.508Z" }, - { url = "https://files.pythonhosted.org/packages/83/41/5857dc72e5e4148eaac5aa76e0703e594e4465f8ab7ec0fc60e3a9bb8fea/kiwisolver-1.4.8-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:84a2f830d42707de1d191b9490ac186bf7997a9495d4e9072210a1296345f7dc", size = 67736, upload-time = "2024-12-24T18:30:11.039Z" }, - { url = "https://files.pythonhosted.org/packages/e1/d1/be059b8db56ac270489fb0b3297fd1e53d195ba76e9bbb30e5401fa6b759/kiwisolver-1.4.8-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:7a3ad337add5148cf51ce0b55642dc551c0b9d6248458a757f98796ca7348712", size = 66194, upload-time = "2024-12-24T18:30:14.886Z" }, - { url = "https://files.pythonhosted.org/packages/e1/83/4b73975f149819eb7dcf9299ed467eba068ecb16439a98990dcb12e63fdd/kiwisolver-1.4.8-cp313-cp313t-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7506488470f41169b86d8c9aeff587293f530a23a23a49d6bc64dab66bedc71e", size = 1465942, upload-time = "2024-12-24T18:30:18.927Z" }, - { url = "https://files.pythonhosted.org/packages/c7/2c/30a5cdde5102958e602c07466bce058b9d7cb48734aa7a4327261ac8e002/kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f0121b07b356a22fb0414cec4666bbe36fd6d0d759db3d37228f496ed67c880", size = 1595341, upload-time = "2024-12-24T18:30:22.102Z" }, - { url = "https://files.pythonhosted.org/packages/ff/9b/1e71db1c000385aa069704f5990574b8244cce854ecd83119c19e83c9586/kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d6d6bd87df62c27d4185de7c511c6248040afae67028a8a22012b010bc7ad062", size = 1598455, upload-time = "2024-12-24T18:30:24.947Z" }, - { url = "https://files.pythonhosted.org/packages/85/92/c8fec52ddf06231b31cbb779af77e99b8253cd96bd135250b9498144c78b/kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:291331973c64bb9cce50bbe871fb2e675c4331dab4f31abe89f175ad7679a4d7", size = 1522138, upload-time = "2024-12-24T18:30:26.286Z" }, - { url = "https://files.pythonhosted.org/packages/0b/51/9eb7e2cd07a15d8bdd976f6190c0164f92ce1904e5c0c79198c4972926b7/kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:893f5525bb92d3d735878ec00f781b2de998333659507d29ea4466208df37bed", size = 1582857, upload-time = "2024-12-24T18:30:28.86Z" }, - { url = "https://files.pythonhosted.org/packages/0f/95/c5a00387a5405e68ba32cc64af65ce881a39b98d73cc394b24143bebc5b8/kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b47a465040146981dc9db8647981b8cb96366fbc8d452b031e4f8fdffec3f26d", size = 2293129, upload-time = "2024-12-24T18:30:30.34Z" }, - { url = "https://files.pythonhosted.org/packages/44/83/eeb7af7d706b8347548313fa3a3a15931f404533cc54fe01f39e830dd231/kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:99cea8b9dd34ff80c521aef46a1dddb0dcc0283cf18bde6d756f1e6f31772165", size = 2421538, upload-time = "2024-12-24T18:30:33.334Z" }, - { url = "https://files.pythonhosted.org/packages/05/f9/27e94c1b3eb29e6933b6986ffc5fa1177d2cd1f0c8efc5f02c91c9ac61de/kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:151dffc4865e5fe6dafce5480fab84f950d14566c480c08a53c663a0020504b6", size = 2390661, upload-time = "2024-12-24T18:30:34.939Z" }, - { url = "https://files.pythonhosted.org/packages/d9/d4/3c9735faa36ac591a4afcc2980d2691000506050b7a7e80bcfe44048daa7/kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:577facaa411c10421314598b50413aa1ebcf5126f704f1e5d72d7e4e9f020d90", size = 2546710, upload-time = "2024-12-24T18:30:37.281Z" }, - { url = "https://files.pythonhosted.org/packages/4c/fa/be89a49c640930180657482a74970cdcf6f7072c8d2471e1babe17a222dc/kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:be4816dc51c8a471749d664161b434912eee82f2ea66bd7628bd14583a833e85", size = 2349213, upload-time = "2024-12-24T18:30:40.019Z" }, +version = "1.4.9" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5c/3c/85844f1b0feb11ee581ac23fe5fce65cd049a200c1446708cc1b7f922875/kiwisolver-1.4.9.tar.gz", hash = "sha256:c3b22c26c6fd6811b0ae8363b95ca8ce4ea3c202d3d0975b2914310ceb1bcc4d", size = 97564, upload-time = "2025-08-10T21:27:49.279Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/86/c9/13573a747838aeb1c76e3267620daa054f4152444d1f3d1a2324b78255b5/kiwisolver-1.4.9-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:ac5a486ac389dddcc5bef4f365b6ae3ffff2c433324fb38dd35e3fab7c957999", size = 123686, upload-time = "2025-08-10T21:26:10.034Z" }, + { url = "https://files.pythonhosted.org/packages/51/ea/2ecf727927f103ffd1739271ca19c424d0e65ea473fbaeea1c014aea93f6/kiwisolver-1.4.9-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f2ba92255faa7309d06fe44c3a4a97efe1c8d640c2a79a5ef728b685762a6fd2", size = 66460, upload-time = "2025-08-10T21:26:11.083Z" }, + { url = "https://files.pythonhosted.org/packages/5b/5a/51f5464373ce2aeb5194508298a508b6f21d3867f499556263c64c621914/kiwisolver-1.4.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a2899935e724dd1074cb568ce7ac0dce28b2cd6ab539c8e001a8578eb106d14", size = 64952, upload-time = "2025-08-10T21:26:12.058Z" }, + { url = "https://files.pythonhosted.org/packages/70/90/6d240beb0f24b74371762873e9b7f499f1e02166a2d9c5801f4dbf8fa12e/kiwisolver-1.4.9-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f6008a4919fdbc0b0097089f67a1eb55d950ed7e90ce2cc3e640abadd2757a04", size = 1474756, upload-time = "2025-08-10T21:26:13.096Z" }, + { url = "https://files.pythonhosted.org/packages/12/42/f36816eaf465220f683fb711efdd1bbf7a7005a2473d0e4ed421389bd26c/kiwisolver-1.4.9-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:67bb8b474b4181770f926f7b7d2f8c0248cbcb78b660fdd41a47054b28d2a752", size = 1276404, upload-time = "2025-08-10T21:26:14.457Z" }, + { url = "https://files.pythonhosted.org/packages/2e/64/bc2de94800adc830c476dce44e9b40fd0809cddeef1fde9fcf0f73da301f/kiwisolver-1.4.9-cp312-cp312-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2327a4a30d3ee07d2fbe2e7933e8a37c591663b96ce42a00bc67461a87d7df77", size = 1294410, upload-time = "2025-08-10T21:26:15.73Z" }, + { url = "https://files.pythonhosted.org/packages/5f/42/2dc82330a70aa8e55b6d395b11018045e58d0bb00834502bf11509f79091/kiwisolver-1.4.9-cp312-cp312-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:7a08b491ec91b1d5053ac177afe5290adacf1f0f6307d771ccac5de30592d198", size = 1343631, upload-time = "2025-08-10T21:26:17.045Z" }, + { url = "https://files.pythonhosted.org/packages/22/fd/f4c67a6ed1aab149ec5a8a401c323cee7a1cbe364381bb6c9c0d564e0e20/kiwisolver-1.4.9-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d8fc5c867c22b828001b6a38d2eaeb88160bf5783c6cb4a5e440efc981ce286d", size = 2224963, upload-time = "2025-08-10T21:26:18.737Z" }, + { url = "https://files.pythonhosted.org/packages/45/aa/76720bd4cb3713314677d9ec94dcc21ced3f1baf4830adde5bb9b2430a5f/kiwisolver-1.4.9-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:3b3115b2581ea35bb6d1f24a4c90af37e5d9b49dcff267eeed14c3893c5b86ab", size = 2321295, upload-time = "2025-08-10T21:26:20.11Z" }, + { url = "https://files.pythonhosted.org/packages/80/19/d3ec0d9ab711242f56ae0dc2fc5d70e298bb4a1f9dfab44c027668c673a1/kiwisolver-1.4.9-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:858e4c22fb075920b96a291928cb7dea5644e94c0ee4fcd5af7e865655e4ccf2", size = 2487987, upload-time = "2025-08-10T21:26:21.49Z" }, + { url = "https://files.pythonhosted.org/packages/39/e9/61e4813b2c97e86b6fdbd4dd824bf72d28bcd8d4849b8084a357bc0dd64d/kiwisolver-1.4.9-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ed0fecd28cc62c54b262e3736f8bb2512d8dcfdc2bcf08be5f47f96bf405b145", size = 2291817, upload-time = "2025-08-10T21:26:22.812Z" }, + { url = "https://files.pythonhosted.org/packages/a0/41/85d82b0291db7504da3c2defe35c9a8a5c9803a730f297bd823d11d5fb77/kiwisolver-1.4.9-cp312-cp312-win_amd64.whl", hash = "sha256:f68208a520c3d86ea51acf688a3e3002615a7f0238002cccc17affecc86a8a54", size = 73895, upload-time = "2025-08-10T21:26:24.37Z" }, + { url = "https://files.pythonhosted.org/packages/e2/92/5f3068cf15ee5cb624a0c7596e67e2a0bb2adee33f71c379054a491d07da/kiwisolver-1.4.9-cp312-cp312-win_arm64.whl", hash = "sha256:2c1a4f57df73965f3f14df20b80ee29e6a7930a57d2d9e8491a25f676e197c60", size = 64992, upload-time = "2025-08-10T21:26:25.732Z" }, + { url = "https://files.pythonhosted.org/packages/31/c1/c2686cda909742ab66c7388e9a1a8521a59eb89f8bcfbee28fc980d07e24/kiwisolver-1.4.9-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a5d0432ccf1c7ab14f9949eec60c5d1f924f17c037e9f8b33352fa05799359b8", size = 123681, upload-time = "2025-08-10T21:26:26.725Z" }, + { url = "https://files.pythonhosted.org/packages/ca/f0/f44f50c9f5b1a1860261092e3bc91ecdc9acda848a8b8c6abfda4a24dd5c/kiwisolver-1.4.9-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efb3a45b35622bb6c16dbfab491a8f5a391fe0e9d45ef32f4df85658232ca0e2", size = 66464, upload-time = "2025-08-10T21:26:27.733Z" }, + { url = "https://files.pythonhosted.org/packages/2d/7a/9d90a151f558e29c3936b8a47ac770235f436f2120aca41a6d5f3d62ae8d/kiwisolver-1.4.9-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1a12cf6398e8a0a001a059747a1cbf24705e18fe413bc22de7b3d15c67cffe3f", size = 64961, upload-time = "2025-08-10T21:26:28.729Z" }, + { url = "https://files.pythonhosted.org/packages/e9/e9/f218a2cb3a9ffbe324ca29a9e399fa2d2866d7f348ec3a88df87fc248fc5/kiwisolver-1.4.9-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:b67e6efbf68e077dd71d1a6b37e43e1a99d0bff1a3d51867d45ee8908b931098", size = 1474607, upload-time = "2025-08-10T21:26:29.798Z" }, + { url = "https://files.pythonhosted.org/packages/d9/28/aac26d4c882f14de59041636292bc838db8961373825df23b8eeb807e198/kiwisolver-1.4.9-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5656aa670507437af0207645273ccdfee4f14bacd7f7c67a4306d0dcaeaf6eed", size = 1276546, upload-time = "2025-08-10T21:26:31.401Z" }, + { url = "https://files.pythonhosted.org/packages/8b/ad/8bfc1c93d4cc565e5069162f610ba2f48ff39b7de4b5b8d93f69f30c4bed/kiwisolver-1.4.9-cp313-cp313-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:bfc08add558155345129c7803b3671cf195e6a56e7a12f3dde7c57d9b417f525", size = 1294482, upload-time = "2025-08-10T21:26:32.721Z" }, + { url = "https://files.pythonhosted.org/packages/da/f1/6aca55ff798901d8ce403206d00e033191f63d82dd708a186e0ed2067e9c/kiwisolver-1.4.9-cp313-cp313-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:40092754720b174e6ccf9e845d0d8c7d8e12c3d71e7fc35f55f3813e96376f78", size = 1343720, upload-time = "2025-08-10T21:26:34.032Z" }, + { url = "https://files.pythonhosted.org/packages/d1/91/eed031876c595c81d90d0f6fc681ece250e14bf6998c3d7c419466b523b7/kiwisolver-1.4.9-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:497d05f29a1300d14e02e6441cf0f5ee81c1ff5a304b0d9fb77423974684e08b", size = 2224907, upload-time = "2025-08-10T21:26:35.824Z" }, + { url = "https://files.pythonhosted.org/packages/e9/ec/4d1925f2e49617b9cca9c34bfa11adefad49d00db038e692a559454dfb2e/kiwisolver-1.4.9-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:bdd1a81a1860476eb41ac4bc1e07b3f07259e6d55bbf739b79c8aaedcf512799", size = 2321334, upload-time = "2025-08-10T21:26:37.534Z" }, + { url = "https://files.pythonhosted.org/packages/43/cb/450cd4499356f68802750c6ddc18647b8ea01ffa28f50d20598e0befe6e9/kiwisolver-1.4.9-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:e6b93f13371d341afee3be9f7c5964e3fe61d5fa30f6a30eb49856935dfe4fc3", size = 2488313, upload-time = "2025-08-10T21:26:39.191Z" }, + { url = "https://files.pythonhosted.org/packages/71/67/fc76242bd99f885651128a5d4fa6083e5524694b7c88b489b1b55fdc491d/kiwisolver-1.4.9-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d75aa530ccfaa593da12834b86a0724f58bff12706659baa9227c2ccaa06264c", size = 2291970, upload-time = "2025-08-10T21:26:40.828Z" }, + { url = "https://files.pythonhosted.org/packages/75/bd/f1a5d894000941739f2ae1b65a32892349423ad49c2e6d0771d0bad3fae4/kiwisolver-1.4.9-cp313-cp313-win_amd64.whl", hash = "sha256:dd0a578400839256df88c16abddf9ba14813ec5f21362e1fe65022e00c883d4d", size = 73894, upload-time = "2025-08-10T21:26:42.33Z" }, + { url = "https://files.pythonhosted.org/packages/95/38/dce480814d25b99a391abbddadc78f7c117c6da34be68ca8b02d5848b424/kiwisolver-1.4.9-cp313-cp313-win_arm64.whl", hash = "sha256:d4188e73af84ca82468f09cadc5ac4db578109e52acb4518d8154698d3a87ca2", size = 64995, upload-time = "2025-08-10T21:26:43.889Z" }, + { url = "https://files.pythonhosted.org/packages/e2/37/7d218ce5d92dadc5ebdd9070d903e0c7cf7edfe03f179433ac4d13ce659c/kiwisolver-1.4.9-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:5a0f2724dfd4e3b3ac5a82436a8e6fd16baa7d507117e4279b660fe8ca38a3a1", size = 126510, upload-time = "2025-08-10T21:26:44.915Z" }, + { url = "https://files.pythonhosted.org/packages/23/b0/e85a2b48233daef4b648fb657ebbb6f8367696a2d9548a00b4ee0eb67803/kiwisolver-1.4.9-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:1b11d6a633e4ed84fc0ddafd4ebfd8ea49b3f25082c04ad12b8315c11d504dc1", size = 67903, upload-time = "2025-08-10T21:26:45.934Z" }, + { url = "https://files.pythonhosted.org/packages/44/98/f2425bc0113ad7de24da6bb4dae1343476e95e1d738be7c04d31a5d037fd/kiwisolver-1.4.9-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61874cdb0a36016354853593cffc38e56fc9ca5aa97d2c05d3dcf6922cd55a11", size = 66402, upload-time = "2025-08-10T21:26:47.101Z" }, + { url = "https://files.pythonhosted.org/packages/98/d8/594657886df9f34c4177cc353cc28ca7e6e5eb562d37ccc233bff43bbe2a/kiwisolver-1.4.9-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:60c439763a969a6af93b4881db0eed8fadf93ee98e18cbc35bc8da868d0c4f0c", size = 1582135, upload-time = "2025-08-10T21:26:48.665Z" }, + { url = "https://files.pythonhosted.org/packages/5c/c6/38a115b7170f8b306fc929e166340c24958347308ea3012c2b44e7e295db/kiwisolver-1.4.9-cp313-cp313t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:92a2f997387a1b79a75e7803aa7ded2cfbe2823852ccf1ba3bcf613b62ae3197", size = 1389409, upload-time = "2025-08-10T21:26:50.335Z" }, + { url = "https://files.pythonhosted.org/packages/bf/3b/e04883dace81f24a568bcee6eb3001da4ba05114afa622ec9b6fafdc1f5e/kiwisolver-1.4.9-cp313-cp313t-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a31d512c812daea6d8b3be3b2bfcbeb091dbb09177706569bcfc6240dcf8b41c", size = 1401763, upload-time = "2025-08-10T21:26:51.867Z" }, + { url = "https://files.pythonhosted.org/packages/9f/80/20ace48e33408947af49d7d15c341eaee69e4e0304aab4b7660e234d6288/kiwisolver-1.4.9-cp313-cp313t-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:52a15b0f35dad39862d376df10c5230155243a2c1a436e39eb55623ccbd68185", size = 1453643, upload-time = "2025-08-10T21:26:53.592Z" }, + { url = "https://files.pythonhosted.org/packages/64/31/6ce4380a4cd1f515bdda976a1e90e547ccd47b67a1546d63884463c92ca9/kiwisolver-1.4.9-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a30fd6fdef1430fd9e1ba7b3398b5ee4e2887783917a687d86ba69985fb08748", size = 2330818, upload-time = "2025-08-10T21:26:55.051Z" }, + { url = "https://files.pythonhosted.org/packages/fa/e9/3f3fcba3bcc7432c795b82646306e822f3fd74df0ee81f0fa067a1f95668/kiwisolver-1.4.9-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:cc9617b46837c6468197b5945e196ee9ca43057bb7d9d1ae688101e4e1dddf64", size = 2419963, upload-time = "2025-08-10T21:26:56.421Z" }, + { url = "https://files.pythonhosted.org/packages/99/43/7320c50e4133575c66e9f7dadead35ab22d7c012a3b09bb35647792b2a6d/kiwisolver-1.4.9-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:0ab74e19f6a2b027ea4f845a78827969af45ce790e6cb3e1ebab71bdf9f215ff", size = 2594639, upload-time = "2025-08-10T21:26:57.882Z" }, + { url = "https://files.pythonhosted.org/packages/65/d6/17ae4a270d4a987ef8a385b906d2bdfc9fce502d6dc0d3aea865b47f548c/kiwisolver-1.4.9-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:dba5ee5d3981160c28d5490f0d1b7ed730c22470ff7f6cc26cfcfaacb9896a07", size = 2391741, upload-time = "2025-08-10T21:26:59.237Z" }, + { url = "https://files.pythonhosted.org/packages/2a/8f/8f6f491d595a9e5912971f3f863d81baddccc8a4d0c3749d6a0dd9ffc9df/kiwisolver-1.4.9-cp313-cp313t-win_arm64.whl", hash = "sha256:0749fd8f4218ad2e851e11cc4dc05c7cbc0cbc4267bdfdb31782e65aace4ee9c", size = 68646, upload-time = "2025-08-10T21:27:00.52Z" }, + { url = "https://files.pythonhosted.org/packages/6b/32/6cc0fbc9c54d06c2969faa9c1d29f5751a2e51809dd55c69055e62d9b426/kiwisolver-1.4.9-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:9928fe1eb816d11ae170885a74d074f57af3a0d65777ca47e9aeb854a1fba386", size = 123806, upload-time = "2025-08-10T21:27:01.537Z" }, + { url = "https://files.pythonhosted.org/packages/b2/dd/2bfb1d4a4823d92e8cbb420fe024b8d2167f72079b3bb941207c42570bdf/kiwisolver-1.4.9-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:d0005b053977e7b43388ddec89fa567f43d4f6d5c2c0affe57de5ebf290dc552", size = 66605, upload-time = "2025-08-10T21:27:03.335Z" }, + { url = "https://files.pythonhosted.org/packages/f7/69/00aafdb4e4509c2ca6064646cba9cd4b37933898f426756adb2cb92ebbed/kiwisolver-1.4.9-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:2635d352d67458b66fd0667c14cb1d4145e9560d503219034a18a87e971ce4f3", size = 64925, upload-time = "2025-08-10T21:27:04.339Z" }, + { url = "https://files.pythonhosted.org/packages/43/dc/51acc6791aa14e5cb6d8a2e28cefb0dc2886d8862795449d021334c0df20/kiwisolver-1.4.9-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:767c23ad1c58c9e827b649a9ab7809fd5fd9db266a9cf02b0e926ddc2c680d58", size = 1472414, upload-time = "2025-08-10T21:27:05.437Z" }, + { url = "https://files.pythonhosted.org/packages/3d/bb/93fa64a81db304ac8a246f834d5094fae4b13baf53c839d6bb6e81177129/kiwisolver-1.4.9-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:72d0eb9fba308b8311685c2268cf7d0a0639a6cd027d8128659f72bdd8a024b4", size = 1281272, upload-time = "2025-08-10T21:27:07.063Z" }, + { url = "https://files.pythonhosted.org/packages/70/e6/6df102916960fb8d05069d4bd92d6d9a8202d5a3e2444494e7cd50f65b7a/kiwisolver-1.4.9-cp314-cp314-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f68e4f3eeca8fb22cc3d731f9715a13b652795ef657a13df1ad0c7dc0e9731df", size = 1298578, upload-time = "2025-08-10T21:27:08.452Z" }, + { url = "https://files.pythonhosted.org/packages/7c/47/e142aaa612f5343736b087864dbaebc53ea8831453fb47e7521fa8658f30/kiwisolver-1.4.9-cp314-cp314-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d84cd4061ae292d8ac367b2c3fa3aad11cb8625a95d135fe93f286f914f3f5a6", size = 1345607, upload-time = "2025-08-10T21:27:10.125Z" }, + { url = "https://files.pythonhosted.org/packages/54/89/d641a746194a0f4d1a3670fb900d0dbaa786fb98341056814bc3f058fa52/kiwisolver-1.4.9-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:a60ea74330b91bd22a29638940d115df9dc00af5035a9a2a6ad9399ffb4ceca5", size = 2230150, upload-time = "2025-08-10T21:27:11.484Z" }, + { url = "https://files.pythonhosted.org/packages/aa/6b/5ee1207198febdf16ac11f78c5ae40861b809cbe0e6d2a8d5b0b3044b199/kiwisolver-1.4.9-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:ce6a3a4e106cf35c2d9c4fa17c05ce0b180db622736845d4315519397a77beaf", size = 2325979, upload-time = "2025-08-10T21:27:12.917Z" }, + { url = "https://files.pythonhosted.org/packages/fc/ff/b269eefd90f4ae14dcc74973d5a0f6d28d3b9bb1afd8c0340513afe6b39a/kiwisolver-1.4.9-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:77937e5e2a38a7b48eef0585114fe7930346993a88060d0bf886086d2aa49ef5", size = 2491456, upload-time = "2025-08-10T21:27:14.353Z" }, + { url = "https://files.pythonhosted.org/packages/fc/d4/10303190bd4d30de547534601e259a4fbf014eed94aae3e5521129215086/kiwisolver-1.4.9-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:24c175051354f4a28c5d6a31c93906dc653e2bf234e8a4bbfb964892078898ce", size = 2294621, upload-time = "2025-08-10T21:27:15.808Z" }, + { url = "https://files.pythonhosted.org/packages/28/e0/a9a90416fce5c0be25742729c2ea52105d62eda6c4be4d803c2a7be1fa50/kiwisolver-1.4.9-cp314-cp314-win_amd64.whl", hash = "sha256:0763515d4df10edf6d06a3c19734e2566368980d21ebec439f33f9eb936c07b7", size = 75417, upload-time = "2025-08-10T21:27:17.436Z" }, + { url = "https://files.pythonhosted.org/packages/1f/10/6949958215b7a9a264299a7db195564e87900f709db9245e4ebdd3c70779/kiwisolver-1.4.9-cp314-cp314-win_arm64.whl", hash = "sha256:0e4e2bf29574a6a7b7f6cb5fa69293b9f96c928949ac4a53ba3f525dffb87f9c", size = 66582, upload-time = "2025-08-10T21:27:18.436Z" }, + { url = "https://files.pythonhosted.org/packages/ec/79/60e53067903d3bc5469b369fe0dfc6b3482e2133e85dae9daa9527535991/kiwisolver-1.4.9-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:d976bbb382b202f71c67f77b0ac11244021cfa3f7dfd9e562eefcea2df711548", size = 126514, upload-time = "2025-08-10T21:27:19.465Z" }, + { url = "https://files.pythonhosted.org/packages/25/d1/4843d3e8d46b072c12a38c97c57fab4608d36e13fe47d47ee96b4d61ba6f/kiwisolver-1.4.9-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:2489e4e5d7ef9a1c300a5e0196e43d9c739f066ef23270607d45aba368b91f2d", size = 67905, upload-time = "2025-08-10T21:27:20.51Z" }, + { url = "https://files.pythonhosted.org/packages/8c/ae/29ffcbd239aea8b93108de1278271ae764dfc0d803a5693914975f200596/kiwisolver-1.4.9-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:e2ea9f7ab7fbf18fffb1b5434ce7c69a07582f7acc7717720f1d69f3e806f90c", size = 66399, upload-time = "2025-08-10T21:27:21.496Z" }, + { url = "https://files.pythonhosted.org/packages/a1/ae/d7ba902aa604152c2ceba5d352d7b62106bedbccc8e95c3934d94472bfa3/kiwisolver-1.4.9-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:b34e51affded8faee0dfdb705416153819d8ea9250bbbf7ea1b249bdeb5f1122", size = 1582197, upload-time = "2025-08-10T21:27:22.604Z" }, + { url = "https://files.pythonhosted.org/packages/f2/41/27c70d427eddb8bc7e4f16420a20fefc6f480312122a59a959fdfe0445ad/kiwisolver-1.4.9-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d8aacd3d4b33b772542b2e01beb50187536967b514b00003bdda7589722d2a64", size = 1390125, upload-time = "2025-08-10T21:27:24.036Z" }, + { url = "https://files.pythonhosted.org/packages/41/42/b3799a12bafc76d962ad69083f8b43b12bf4fe78b097b12e105d75c9b8f1/kiwisolver-1.4.9-cp314-cp314t-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7cf974dd4e35fa315563ac99d6287a1024e4dc2077b8a7d7cd3d2fb65d283134", size = 1402612, upload-time = "2025-08-10T21:27:25.773Z" }, + { url = "https://files.pythonhosted.org/packages/d2/b5/a210ea073ea1cfaca1bb5c55a62307d8252f531beb364e18aa1e0888b5a0/kiwisolver-1.4.9-cp314-cp314t-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:85bd218b5ecfbee8c8a82e121802dcb519a86044c9c3b2e4aef02fa05c6da370", size = 1453990, upload-time = "2025-08-10T21:27:27.089Z" }, + { url = "https://files.pythonhosted.org/packages/5f/ce/a829eb8c033e977d7ea03ed32fb3c1781b4fa0433fbadfff29e39c676f32/kiwisolver-1.4.9-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:0856e241c2d3df4efef7c04a1e46b1936b6120c9bcf36dd216e3acd84bc4fb21", size = 2331601, upload-time = "2025-08-10T21:27:29.343Z" }, + { url = "https://files.pythonhosted.org/packages/e0/4b/b5e97eb142eb9cd0072dacfcdcd31b1c66dc7352b0f7c7255d339c0edf00/kiwisolver-1.4.9-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:9af39d6551f97d31a4deebeac6f45b156f9755ddc59c07b402c148f5dbb6482a", size = 2422041, upload-time = "2025-08-10T21:27:30.754Z" }, + { url = "https://files.pythonhosted.org/packages/40/be/8eb4cd53e1b85ba4edc3a9321666f12b83113a178845593307a3e7891f44/kiwisolver-1.4.9-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:bb4ae2b57fc1d8cbd1cf7b1d9913803681ffa903e7488012be5b76dedf49297f", size = 2594897, upload-time = "2025-08-10T21:27:32.803Z" }, + { url = "https://files.pythonhosted.org/packages/99/dd/841e9a66c4715477ea0abc78da039832fbb09dac5c35c58dc4c41a407b8a/kiwisolver-1.4.9-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:aedff62918805fb62d43a4aa2ecd4482c380dc76cd31bd7c8878588a61bd0369", size = 2391835, upload-time = "2025-08-10T21:27:34.23Z" }, + { url = "https://files.pythonhosted.org/packages/0c/28/4b2e5c47a0da96896fdfdb006340ade064afa1e63675d01ea5ac222b6d52/kiwisolver-1.4.9-cp314-cp314t-win_amd64.whl", hash = "sha256:1fa333e8b2ce4d9660f2cda9c0e1b6bafcfb2457a9d259faa82289e73ec24891", size = 79988, upload-time = "2025-08-10T21:27:35.587Z" }, + { url = "https://files.pythonhosted.org/packages/80/be/3578e8afd18c88cdf9cb4cffde75a96d2be38c5a903f1ed0ceec061bd09e/kiwisolver-1.4.9-cp314-cp314t-win_arm64.whl", hash = "sha256:4a48a2ce79d65d363597ef7b567ce3d14d68783d2b2263d98db3d9477805ba32", size = 70260, upload-time = "2025-08-10T21:27:36.606Z" }, ] [[package]] @@ -1836,15 +2145,15 @@ wheels = [ [[package]] name = "latex2sympy2-extended" -version = "1.10.1" +version = "1.10.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "antlr4-python3-runtime" }, { name = "sympy" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/14/8d/de5cc53dcf8a98396094b81a6478eb7234ef2e5c5fa4900dcc335d3b7fc2/latex2sympy2_extended-1.10.1.tar.gz", hash = "sha256:e365f0cb8807296c9d1f1661be926859bebd922a6139ce4be1e6572ca183ca57", size = 205808, upload-time = "2025-02-27T16:02:48.124Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f4/de/472f9115c14c6f6d8a5889cabe3418283d708bde62ce00402c29441deed4/latex2sympy2_extended-1.10.2.tar.gz", hash = "sha256:41a517ffcc5a140e910a7d1646ce6ff440817e5f9d48fc8279d88bd0925bc389", size = 206188, upload-time = "2025-07-02T15:26:06.225Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/6d/eb/a5e8b06b924b4149cf498e1598116bad1e91ab23046c2dfc2c498154d393/latex2sympy2_extended-1.10.1-py3-none-any.whl", hash = "sha256:917a23e8f3b6edea88a56978fbbe87ed9fca4197f8277646be57b4660710347c", size = 207460, upload-time = "2025-02-27T16:02:45.941Z" }, + { url = "https://files.pythonhosted.org/packages/ab/60/dfbbf40e3a371388c0e03ff65b01319b7d4023e883df6d7261125772ffdc/latex2sympy2_extended-1.10.2-py3-none-any.whl", hash = "sha256:f910442c5b02a466c1046f47d05cc5285181068b882399281f30102715337fb7", size = 207855, upload-time = "2025-07-02T15:26:04.88Z" }, ] [[package]] @@ -1889,9 +2198,22 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d3/8f/da755d6d517eb8ec9664afae967b00a9b8dd567bbbb350e261359c1b47fc/libcst-1.8.2-cp313-cp313t-win_arm64.whl", hash = "sha256:4f14f5045766646ed9e8826b959c6d07194788babed1e0ba08c94ea4f39517e3", size = 1974355, upload-time = "2025-06-13T20:56:18.064Z" }, ] +[[package]] +name = "liger-kernel" +version = "0.5.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "torch", marker = "platform_machine != 'aarch64' or sys_platform != 'linux'" }, + { name = "triton", marker = "platform_machine != 'aarch64' or sys_platform != 'linux'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a2/55/3a703f337110e2a121a04e503abfeec2c191529cbee18bb1fb630d65642a/liger_kernel-0.5.8.tar.gz", hash = "sha256:3246d7dced89e0f982a52de259d4f78fd10eb9171246b28ae52b63ad09fc0732", size = 3593097, upload-time = "2025-04-12T16:44:32.252Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3d/40/75d82d90062b60e2aedd0b1741fe5b3dfbfd250aedd25933ef0b177b640e/liger_kernel-0.5.8-py3-none-any.whl", hash = "sha256:3102f99f89e9b9da249c83ea3f12b68680a8e8df0e477d4513e232da9af7d1a0", size = 150758, upload-time = "2025-04-12T16:44:30.791Z" }, +] + [[package]] name = "lightning" -version = "2.5.2" +version = "2.5.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "fsspec", extra = ["http"] }, @@ -1904,23 +2226,23 @@ dependencies = [ { name = "tqdm" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/42/3c/6a930ac7c64fb896adbe560a9141570732d9ca890a11e6d158edd5aece76/lightning-2.5.2.tar.gz", hash = "sha256:9550df613cfb22358ebf77b4a8ad45f3767cd7d26ba2d52b7f036bd3cdd701c4", size = 633391, upload-time = "2025-06-20T15:58:22.065Z" } +sdist = { url = "https://files.pythonhosted.org/packages/01/80/dddb5a382aa0ff18045aee6491f81e40371102cb05da2ad5a8436a51c475/lightning-2.5.3.tar.gz", hash = "sha256:4ed3e12369a1e0f928beecf5c9f5efdabda60a9216057954851e2d89f1abecde", size = 636577, upload-time = "2025-08-13T20:29:32.361Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/71/a9/5d39280e55dc5df9e98be074029f6b48f86fe3db4929cb9ada6401234b47/lightning-2.5.2-py3-none-any.whl", hash = "sha256:7e7f23245e214c8ec14d5d8119d3856c25cfe96f9856296fd5df4e29c2ff88a7", size = 821145, upload-time = "2025-06-20T15:58:18.609Z" }, + { url = "https://files.pythonhosted.org/packages/00/6b/00e9c2b03a449c21d7a4d73a7104ac94f56c37a1e6eae77b1c702d8dddf0/lightning-2.5.3-py3-none-any.whl", hash = "sha256:c551111fda0db0bce267791f9a90cd4f9cf94bc327d36348af0ef79ec752d666", size = 824181, upload-time = "2025-08-13T20:29:30.244Z" }, ] [[package]] name = "lightning-utilities" -version = "0.14.3" +version = "0.15.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "packaging" }, { name = "setuptools" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/0f/bb/63a6a8c9e7a96b6ba92647fa5b1595c2dbee29f8178705adb4704d82ecba/lightning_utilities-0.14.3.tar.gz", hash = "sha256:37e2f83f273890052955a44054382c211a303012ee577619efbaa5df9e65e9f5", size = 30346, upload-time = "2025-04-03T15:59:56.928Z" } +sdist = { url = "https://files.pythonhosted.org/packages/b8/39/6fc58ca81492db047149b4b8fd385aa1bfb8c28cd7cacb0c7eb0c44d842f/lightning_utilities-0.15.2.tar.gz", hash = "sha256:cdf12f530214a63dacefd713f180d1ecf5d165338101617b4742e8f22c032e24", size = 31090, upload-time = "2025-08-06T13:57:39.242Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/1a/c1/31b3184cba7b257a4a3b5ca5b88b9204ccb7aa02fe3c992280899293ed54/lightning_utilities-0.14.3-py3-none-any.whl", hash = "sha256:4ab9066aa36cd7b93a05713808901909e96cc3f187ea6fd3052b2fd91313b468", size = 28894, upload-time = "2025-04-03T15:59:55.658Z" }, + { url = "https://files.pythonhosted.org/packages/de/73/3d757cb3fc16f0f9794dd289bcd0c4a031d9cf54d8137d6b984b2d02edf3/lightning_utilities-0.15.2-py3-none-any.whl", hash = "sha256:ad3ab1703775044bbf880dbf7ddaaac899396c96315f3aa1779cec9d618a9841", size = 29431, upload-time = "2025-08-06T13:57:38.046Z" }, ] [[package]] @@ -1956,7 +2278,7 @@ wheels = [ [[package]] name = "lm-format-enforcer" -version = "0.10.11" +version = "0.10.12" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "interegular" }, @@ -1964,51 +2286,49 @@ dependencies = [ { name = "pydantic" }, { name = "pyyaml" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/5b/cc/8a5bf6706385c89474161081d2eeec4dd9cef12dc29cca6acc872685ceb6/lm_format_enforcer-0.10.11.tar.gz", hash = "sha256:8ab371924e166a1df68f243aca73a8a647bea5909f37edd6a53a694e7e7c3274", size = 39390, upload-time = "2025-02-26T22:18:45.338Z" } +sdist = { url = "https://files.pythonhosted.org/packages/eb/e0/bdbfad8f5d319de5d05cc2b70d579b49eb8ce3a09989cd0999b8c138c068/lm_format_enforcer-0.10.12.tar.gz", hash = "sha256:130bd7ce8a6b224f25b6314ba9ae78ee4b48594db1767c74391c9182e2902a6c", size = 39481, upload-time = "2025-08-04T21:13:45.727Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/06/cb/bf172960241842e953b3354247f792aae2fc5221552a0741a1c98f35b6f7/lm_format_enforcer-0.10.11-py3-none-any.whl", hash = "sha256:563e0dbc930a6d50fb687951506c5de098c6e962601be0ce723f3b7d0b916a1b", size = 44229, upload-time = "2025-02-26T22:18:42.543Z" }, + { url = "https://files.pythonhosted.org/packages/57/1c/7bb80fe2dff9a9c38b180571ca867f518eb9110f79d4b670ea124e153680/lm_format_enforcer-0.10.12-py3-none-any.whl", hash = "sha256:267c2b421c77f7cd51ac2e0e3af8db278a373704d834b49ff55f18a2c05e9800", size = 44327, upload-time = "2025-08-04T21:13:44.492Z" }, ] [[package]] name = "lxml" -version = "5.4.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/76/3d/14e82fc7c8fb1b7761f7e748fd47e2ec8276d137b6acfe5a4bb73853e08f/lxml-5.4.0.tar.gz", hash = "sha256:d12832e1dbea4be280b22fd0ea7c9b87f0d8fc51ba06e92dc62d52f804f78ebd", size = 3679479, upload-time = "2025-04-23T01:50:29.322Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/f8/4c/d101ace719ca6a4ec043eb516fcfcb1b396a9fccc4fcd9ef593df34ba0d5/lxml-5.4.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b5aff6f3e818e6bdbbb38e5967520f174b18f539c2b9de867b1e7fde6f8d95a4", size = 8127392, upload-time = "2025-04-23T01:46:04.09Z" }, - { url = "https://files.pythonhosted.org/packages/11/84/beddae0cec4dd9ddf46abf156f0af451c13019a0fa25d7445b655ba5ccb7/lxml-5.4.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:942a5d73f739ad7c452bf739a62a0f83e2578afd6b8e5406308731f4ce78b16d", size = 4415103, upload-time = "2025-04-23T01:46:07.227Z" }, - { url = "https://files.pythonhosted.org/packages/d0/25/d0d93a4e763f0462cccd2b8a665bf1e4343dd788c76dcfefa289d46a38a9/lxml-5.4.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:460508a4b07364d6abf53acaa0a90b6d370fafde5693ef37602566613a9b0779", size = 5024224, upload-time = "2025-04-23T01:46:10.237Z" }, - { url = "https://files.pythonhosted.org/packages/31/ce/1df18fb8f7946e7f3388af378b1f34fcf253b94b9feedb2cec5969da8012/lxml-5.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:529024ab3a505fed78fe3cc5ddc079464e709f6c892733e3f5842007cec8ac6e", size = 4769913, upload-time = "2025-04-23T01:46:12.757Z" }, - { url = "https://files.pythonhosted.org/packages/4e/62/f4a6c60ae7c40d43657f552f3045df05118636be1165b906d3423790447f/lxml-5.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ca56ebc2c474e8f3d5761debfd9283b8b18c76c4fc0967b74aeafba1f5647f9", size = 5290441, upload-time = "2025-04-23T01:46:16.037Z" }, - { url = "https://files.pythonhosted.org/packages/9e/aa/04f00009e1e3a77838c7fc948f161b5d2d5de1136b2b81c712a263829ea4/lxml-5.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a81e1196f0a5b4167a8dafe3a66aa67c4addac1b22dc47947abd5d5c7a3f24b5", size = 4820165, upload-time = "2025-04-23T01:46:19.137Z" }, - { url = "https://files.pythonhosted.org/packages/c9/1f/e0b2f61fa2404bf0f1fdf1898377e5bd1b74cc9b2cf2c6ba8509b8f27990/lxml-5.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00b8686694423ddae324cf614e1b9659c2edb754de617703c3d29ff568448df5", size = 4932580, upload-time = "2025-04-23T01:46:21.963Z" }, - { url = "https://files.pythonhosted.org/packages/24/a2/8263f351b4ffe0ed3e32ea7b7830f845c795349034f912f490180d88a877/lxml-5.4.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:c5681160758d3f6ac5b4fea370495c48aac0989d6a0f01bb9a72ad8ef5ab75c4", size = 4759493, upload-time = "2025-04-23T01:46:24.316Z" }, - { url = "https://files.pythonhosted.org/packages/05/00/41db052f279995c0e35c79d0f0fc9f8122d5b5e9630139c592a0b58c71b4/lxml-5.4.0-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:2dc191e60425ad70e75a68c9fd90ab284df64d9cd410ba8d2b641c0c45bc006e", size = 5324679, upload-time = "2025-04-23T01:46:27.097Z" }, - { url = "https://files.pythonhosted.org/packages/1d/be/ee99e6314cdef4587617d3b3b745f9356d9b7dd12a9663c5f3b5734b64ba/lxml-5.4.0-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:67f779374c6b9753ae0a0195a892a1c234ce8416e4448fe1e9f34746482070a7", size = 4890691, upload-time = "2025-04-23T01:46:30.009Z" }, - { url = "https://files.pythonhosted.org/packages/ad/36/239820114bf1d71f38f12208b9c58dec033cbcf80101cde006b9bde5cffd/lxml-5.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:79d5bfa9c1b455336f52343130b2067164040604e41f6dc4d8313867ed540079", size = 4955075, upload-time = "2025-04-23T01:46:32.33Z" }, - { url = "https://files.pythonhosted.org/packages/d4/e1/1b795cc0b174efc9e13dbd078a9ff79a58728a033142bc6d70a1ee8fc34d/lxml-5.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3d3c30ba1c9b48c68489dc1829a6eede9873f52edca1dda900066542528d6b20", size = 4838680, upload-time = "2025-04-23T01:46:34.852Z" }, - { url = "https://files.pythonhosted.org/packages/72/48/3c198455ca108cec5ae3662ae8acd7fd99476812fd712bb17f1b39a0b589/lxml-5.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:1af80c6316ae68aded77e91cd9d80648f7dd40406cef73df841aa3c36f6907c8", size = 5391253, upload-time = "2025-04-23T01:46:37.608Z" }, - { url = "https://files.pythonhosted.org/packages/d6/10/5bf51858971c51ec96cfc13e800a9951f3fd501686f4c18d7d84fe2d6352/lxml-5.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:4d885698f5019abe0de3d352caf9466d5de2baded00a06ef3f1216c1a58ae78f", size = 5261651, upload-time = "2025-04-23T01:46:40.183Z" }, - { url = "https://files.pythonhosted.org/packages/2b/11/06710dd809205377da380546f91d2ac94bad9ff735a72b64ec029f706c85/lxml-5.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:aea53d51859b6c64e7c51d522c03cc2c48b9b5d6172126854cc7f01aa11f52bc", size = 5024315, upload-time = "2025-04-23T01:46:43.333Z" }, - { url = "https://files.pythonhosted.org/packages/f5/b0/15b6217834b5e3a59ebf7f53125e08e318030e8cc0d7310355e6edac98ef/lxml-5.4.0-cp312-cp312-win32.whl", hash = "sha256:d90b729fd2732df28130c064aac9bb8aff14ba20baa4aee7bd0795ff1187545f", size = 3486149, upload-time = "2025-04-23T01:46:45.684Z" }, - { url = "https://files.pythonhosted.org/packages/91/1e/05ddcb57ad2f3069101611bd5f5084157d90861a2ef460bf42f45cced944/lxml-5.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:1dc4ca99e89c335a7ed47d38964abcb36c5910790f9bd106f2a8fa2ee0b909d2", size = 3817095, upload-time = "2025-04-23T01:46:48.521Z" }, - { url = "https://files.pythonhosted.org/packages/87/cb/2ba1e9dd953415f58548506fa5549a7f373ae55e80c61c9041b7fd09a38a/lxml-5.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:773e27b62920199c6197130632c18fb7ead3257fce1ffb7d286912e56ddb79e0", size = 8110086, upload-time = "2025-04-23T01:46:52.218Z" }, - { url = "https://files.pythonhosted.org/packages/b5/3e/6602a4dca3ae344e8609914d6ab22e52ce42e3e1638c10967568c5c1450d/lxml-5.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ce9c671845de9699904b1e9df95acfe8dfc183f2310f163cdaa91a3535af95de", size = 4404613, upload-time = "2025-04-23T01:46:55.281Z" }, - { url = "https://files.pythonhosted.org/packages/4c/72/bf00988477d3bb452bef9436e45aeea82bb40cdfb4684b83c967c53909c7/lxml-5.4.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9454b8d8200ec99a224df8854786262b1bd6461f4280064c807303c642c05e76", size = 5012008, upload-time = "2025-04-23T01:46:57.817Z" }, - { url = "https://files.pythonhosted.org/packages/92/1f/93e42d93e9e7a44b2d3354c462cd784dbaaf350f7976b5d7c3f85d68d1b1/lxml-5.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cccd007d5c95279e529c146d095f1d39ac05139de26c098166c4beb9374b0f4d", size = 4760915, upload-time = "2025-04-23T01:47:00.745Z" }, - { url = "https://files.pythonhosted.org/packages/45/0b/363009390d0b461cf9976a499e83b68f792e4c32ecef092f3f9ef9c4ba54/lxml-5.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0fce1294a0497edb034cb416ad3e77ecc89b313cff7adbee5334e4dc0d11f422", size = 5283890, upload-time = "2025-04-23T01:47:04.702Z" }, - { url = "https://files.pythonhosted.org/packages/19/dc/6056c332f9378ab476c88e301e6549a0454dbee8f0ae16847414f0eccb74/lxml-5.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:24974f774f3a78ac12b95e3a20ef0931795ff04dbb16db81a90c37f589819551", size = 4812644, upload-time = "2025-04-23T01:47:07.833Z" }, - { url = "https://files.pythonhosted.org/packages/ee/8a/f8c66bbb23ecb9048a46a5ef9b495fd23f7543df642dabeebcb2eeb66592/lxml-5.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:497cab4d8254c2a90bf988f162ace2ddbfdd806fce3bda3f581b9d24c852e03c", size = 4921817, upload-time = "2025-04-23T01:47:10.317Z" }, - { url = "https://files.pythonhosted.org/packages/04/57/2e537083c3f381f83d05d9b176f0d838a9e8961f7ed8ddce3f0217179ce3/lxml-5.4.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:e794f698ae4c5084414efea0f5cc9f4ac562ec02d66e1484ff822ef97c2cadff", size = 4753916, upload-time = "2025-04-23T01:47:12.823Z" }, - { url = "https://files.pythonhosted.org/packages/d8/80/ea8c4072109a350848f1157ce83ccd9439601274035cd045ac31f47f3417/lxml-5.4.0-cp313-cp313-manylinux_2_28_ppc64le.whl", hash = "sha256:2c62891b1ea3094bb12097822b3d44b93fc6c325f2043c4d2736a8ff09e65f60", size = 5289274, upload-time = "2025-04-23T01:47:15.916Z" }, - { url = "https://files.pythonhosted.org/packages/b3/47/c4be287c48cdc304483457878a3f22999098b9a95f455e3c4bda7ec7fc72/lxml-5.4.0-cp313-cp313-manylinux_2_28_s390x.whl", hash = "sha256:142accb3e4d1edae4b392bd165a9abdee8a3c432a2cca193df995bc3886249c8", size = 4874757, upload-time = "2025-04-23T01:47:19.793Z" }, - { url = "https://files.pythonhosted.org/packages/2f/04/6ef935dc74e729932e39478e44d8cfe6a83550552eaa072b7c05f6f22488/lxml-5.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:1a42b3a19346e5601d1b8296ff6ef3d76038058f311902edd574461e9c036982", size = 4947028, upload-time = "2025-04-23T01:47:22.401Z" }, - { url = "https://files.pythonhosted.org/packages/cb/f9/c33fc8daa373ef8a7daddb53175289024512b6619bc9de36d77dca3df44b/lxml-5.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4291d3c409a17febf817259cb37bc62cb7eb398bcc95c1356947e2871911ae61", size = 4834487, upload-time = "2025-04-23T01:47:25.513Z" }, - { url = "https://files.pythonhosted.org/packages/8d/30/fc92bb595bcb878311e01b418b57d13900f84c2b94f6eca9e5073ea756e6/lxml-5.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4f5322cf38fe0e21c2d73901abf68e6329dc02a4994e483adbcf92b568a09a54", size = 5381688, upload-time = "2025-04-23T01:47:28.454Z" }, - { url = "https://files.pythonhosted.org/packages/43/d1/3ba7bd978ce28bba8e3da2c2e9d5ae3f8f521ad3f0ca6ea4788d086ba00d/lxml-5.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:0be91891bdb06ebe65122aa6bf3fc94489960cf7e03033c6f83a90863b23c58b", size = 5242043, upload-time = "2025-04-23T01:47:31.208Z" }, - { url = "https://files.pythonhosted.org/packages/ee/cd/95fa2201041a610c4d08ddaf31d43b98ecc4b1d74b1e7245b1abdab443cb/lxml-5.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:15a665ad90054a3d4f397bc40f73948d48e36e4c09f9bcffc7d90c87410e478a", size = 5021569, upload-time = "2025-04-23T01:47:33.805Z" }, - { url = "https://files.pythonhosted.org/packages/2d/a6/31da006fead660b9512d08d23d31e93ad3477dd47cc42e3285f143443176/lxml-5.4.0-cp313-cp313-win32.whl", hash = "sha256:d5663bc1b471c79f5c833cffbc9b87d7bf13f87e055a5c86c363ccd2348d7e82", size = 3485270, upload-time = "2025-04-23T01:47:36.133Z" }, - { url = "https://files.pythonhosted.org/packages/fc/14/c115516c62a7d2499781d2d3d7215218c0731b2c940753bf9f9b7b73924d/lxml-5.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:bcb7a1096b4b6b24ce1ac24d4942ad98f983cd3810f9711bcd0293f43a9d8b9f", size = 3814606, upload-time = "2025-04-23T01:47:39.028Z" }, +version = "6.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c5/ed/60eb6fa2923602fba988d9ca7c5cdbd7cf25faa795162ed538b527a35411/lxml-6.0.0.tar.gz", hash = "sha256:032e65120339d44cdc3efc326c9f660f5f7205f3a535c1fdbf898b29ea01fb72", size = 4096938, upload-time = "2025-06-26T16:28:19.373Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/89/c3/d01d735c298d7e0ddcedf6f028bf556577e5ab4f4da45175ecd909c79378/lxml-6.0.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:78718d8454a6e928470d511bf8ac93f469283a45c354995f7d19e77292f26108", size = 8429515, upload-time = "2025-06-26T16:26:06.776Z" }, + { url = "https://files.pythonhosted.org/packages/06/37/0e3eae3043d366b73da55a86274a590bae76dc45aa004b7042e6f97803b1/lxml-6.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:84ef591495ffd3f9dcabffd6391db7bb70d7230b5c35ef5148354a134f56f2be", size = 4601387, upload-time = "2025-06-26T16:26:09.511Z" }, + { url = "https://files.pythonhosted.org/packages/a3/28/e1a9a881e6d6e29dda13d633885d13acb0058f65e95da67841c8dd02b4a8/lxml-6.0.0-cp312-cp312-manylinux2010_i686.manylinux2014_i686.manylinux_2_12_i686.manylinux_2_17_i686.whl", hash = "sha256:2930aa001a3776c3e2601cb8e0a15d21b8270528d89cc308be4843ade546b9ab", size = 5228928, upload-time = "2025-06-26T16:26:12.337Z" }, + { url = "https://files.pythonhosted.org/packages/9a/55/2cb24ea48aa30c99f805921c1c7860c1f45c0e811e44ee4e6a155668de06/lxml-6.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:219e0431ea8006e15005767f0351e3f7f9143e793e58519dc97fe9e07fae5563", size = 4952289, upload-time = "2025-06-28T18:47:25.602Z" }, + { url = "https://files.pythonhosted.org/packages/31/c0/b25d9528df296b9a3306ba21ff982fc5b698c45ab78b94d18c2d6ae71fd9/lxml-6.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:bd5913b4972681ffc9718bc2d4c53cde39ef81415e1671ff93e9aa30b46595e7", size = 5111310, upload-time = "2025-06-28T18:47:28.136Z" }, + { url = "https://files.pythonhosted.org/packages/e9/af/681a8b3e4f668bea6e6514cbcb297beb6de2b641e70f09d3d78655f4f44c/lxml-6.0.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:390240baeb9f415a82eefc2e13285016f9c8b5ad71ec80574ae8fa9605093cd7", size = 5025457, upload-time = "2025-06-26T16:26:15.068Z" }, + { url = "https://files.pythonhosted.org/packages/99/b6/3a7971aa05b7be7dfebc7ab57262ec527775c2c3c5b2f43675cac0458cad/lxml-6.0.0-cp312-cp312-manylinux_2_27_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:d6e200909a119626744dd81bae409fc44134389e03fbf1d68ed2a55a2fb10991", size = 5657016, upload-time = "2025-07-03T19:19:06.008Z" }, + { url = "https://files.pythonhosted.org/packages/69/f8/693b1a10a891197143c0673fcce5b75fc69132afa81a36e4568c12c8faba/lxml-6.0.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ca50bd612438258a91b5b3788c6621c1f05c8c478e7951899f492be42defc0da", size = 5257565, upload-time = "2025-06-26T16:26:17.906Z" }, + { url = "https://files.pythonhosted.org/packages/a8/96/e08ff98f2c6426c98c8964513c5dab8d6eb81dadcd0af6f0c538ada78d33/lxml-6.0.0-cp312-cp312-manylinux_2_31_armv7l.whl", hash = "sha256:c24b8efd9c0f62bad0439283c2c795ef916c5a6b75f03c17799775c7ae3c0c9e", size = 4713390, upload-time = "2025-06-26T16:26:20.292Z" }, + { url = "https://files.pythonhosted.org/packages/a8/83/6184aba6cc94d7413959f6f8f54807dc318fdcd4985c347fe3ea6937f772/lxml-6.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:afd27d8629ae94c5d863e32ab0e1d5590371d296b87dae0a751fb22bf3685741", size = 5066103, upload-time = "2025-06-26T16:26:22.765Z" }, + { url = "https://files.pythonhosted.org/packages/ee/01/8bf1f4035852d0ff2e36a4d9aacdbcc57e93a6cd35a54e05fa984cdf73ab/lxml-6.0.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:54c4855eabd9fc29707d30141be99e5cd1102e7d2258d2892314cf4c110726c3", size = 4791428, upload-time = "2025-06-26T16:26:26.461Z" }, + { url = "https://files.pythonhosted.org/packages/29/31/c0267d03b16954a85ed6b065116b621d37f559553d9339c7dcc4943a76f1/lxml-6.0.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c907516d49f77f6cd8ead1322198bdfd902003c3c330c77a1c5f3cc32a0e4d16", size = 5678523, upload-time = "2025-07-03T19:19:09.837Z" }, + { url = "https://files.pythonhosted.org/packages/5c/f7/5495829a864bc5f8b0798d2b52a807c89966523140f3d6fa3a58ab6720ea/lxml-6.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:36531f81c8214e293097cd2b7873f178997dae33d3667caaae8bdfb9666b76c0", size = 5281290, upload-time = "2025-06-26T16:26:29.406Z" }, + { url = "https://files.pythonhosted.org/packages/79/56/6b8edb79d9ed294ccc4e881f4db1023af56ba451909b9ce79f2a2cd7c532/lxml-6.0.0-cp312-cp312-win32.whl", hash = "sha256:690b20e3388a7ec98e899fd54c924e50ba6693874aa65ef9cb53de7f7de9d64a", size = 3613495, upload-time = "2025-06-26T16:26:31.588Z" }, + { url = "https://files.pythonhosted.org/packages/0b/1e/cc32034b40ad6af80b6fd9b66301fc0f180f300002e5c3eb5a6110a93317/lxml-6.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:310b719b695b3dd442cdfbbe64936b2f2e231bb91d998e99e6f0daf991a3eba3", size = 4014711, upload-time = "2025-06-26T16:26:33.723Z" }, + { url = "https://files.pythonhosted.org/packages/55/10/dc8e5290ae4c94bdc1a4c55865be7e1f31dfd857a88b21cbba68b5fea61b/lxml-6.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:8cb26f51c82d77483cdcd2b4a53cda55bbee29b3c2f3ddeb47182a2a9064e4eb", size = 3674431, upload-time = "2025-06-26T16:26:35.959Z" }, + { url = "https://files.pythonhosted.org/packages/79/21/6e7c060822a3c954ff085e5e1b94b4a25757c06529eac91e550f3f5cd8b8/lxml-6.0.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6da7cd4f405fd7db56e51e96bff0865b9853ae70df0e6720624049da76bde2da", size = 8414372, upload-time = "2025-06-26T16:26:39.079Z" }, + { url = "https://files.pythonhosted.org/packages/a4/f6/051b1607a459db670fc3a244fa4f06f101a8adf86cda263d1a56b3a4f9d5/lxml-6.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b34339898bb556a2351a1830f88f751679f343eabf9cf05841c95b165152c9e7", size = 4593940, upload-time = "2025-06-26T16:26:41.891Z" }, + { url = "https://files.pythonhosted.org/packages/8e/74/dd595d92a40bda3c687d70d4487b2c7eff93fd63b568acd64fedd2ba00fe/lxml-6.0.0-cp313-cp313-manylinux2010_i686.manylinux2014_i686.manylinux_2_12_i686.manylinux_2_17_i686.whl", hash = "sha256:51a5e4c61a4541bd1cd3ba74766d0c9b6c12d6a1a4964ef60026832aac8e79b3", size = 5214329, upload-time = "2025-06-26T16:26:44.669Z" }, + { url = "https://files.pythonhosted.org/packages/52/46/3572761efc1bd45fcafb44a63b3b0feeb5b3f0066886821e94b0254f9253/lxml-6.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d18a25b19ca7307045581b18b3ec9ead2b1db5ccd8719c291f0cd0a5cec6cb81", size = 4947559, upload-time = "2025-06-28T18:47:31.091Z" }, + { url = "https://files.pythonhosted.org/packages/94/8a/5e40de920e67c4f2eef9151097deb9b52d86c95762d8ee238134aff2125d/lxml-6.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d4f0c66df4386b75d2ab1e20a489f30dc7fd9a06a896d64980541506086be1f1", size = 5102143, upload-time = "2025-06-28T18:47:33.612Z" }, + { url = "https://files.pythonhosted.org/packages/7c/4b/20555bdd75d57945bdabfbc45fdb1a36a1a0ff9eae4653e951b2b79c9209/lxml-6.0.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9f4b481b6cc3a897adb4279216695150bbe7a44c03daba3c894f49d2037e0a24", size = 5021931, upload-time = "2025-06-26T16:26:47.503Z" }, + { url = "https://files.pythonhosted.org/packages/b6/6e/cf03b412f3763d4ca23b25e70c96a74cfece64cec3addf1c4ec639586b13/lxml-6.0.0-cp313-cp313-manylinux_2_27_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8a78d6c9168f5bcb20971bf3329c2b83078611fbe1f807baadc64afc70523b3a", size = 5645469, upload-time = "2025-07-03T19:19:13.32Z" }, + { url = "https://files.pythonhosted.org/packages/d4/dd/39c8507c16db6031f8c1ddf70ed95dbb0a6d466a40002a3522c128aba472/lxml-6.0.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ae06fbab4f1bb7db4f7c8ca9897dc8db4447d1a2b9bee78474ad403437bcc29", size = 5247467, upload-time = "2025-06-26T16:26:49.998Z" }, + { url = "https://files.pythonhosted.org/packages/4d/56/732d49def0631ad633844cfb2664563c830173a98d5efd9b172e89a4800d/lxml-6.0.0-cp313-cp313-manylinux_2_31_armv7l.whl", hash = "sha256:1fa377b827ca2023244a06554c6e7dc6828a10aaf74ca41965c5d8a4925aebb4", size = 4720601, upload-time = "2025-06-26T16:26:52.564Z" }, + { url = "https://files.pythonhosted.org/packages/8f/7f/6b956fab95fa73462bca25d1ea7fc8274ddf68fb8e60b78d56c03b65278e/lxml-6.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1676b56d48048a62ef77a250428d1f31f610763636e0784ba67a9740823988ca", size = 5060227, upload-time = "2025-06-26T16:26:55.054Z" }, + { url = "https://files.pythonhosted.org/packages/97/06/e851ac2924447e8b15a294855caf3d543424364a143c001014d22c8ca94c/lxml-6.0.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:0e32698462aacc5c1cf6bdfebc9c781821b7e74c79f13e5ffc8bfe27c42b1abf", size = 4790637, upload-time = "2025-06-26T16:26:57.384Z" }, + { url = "https://files.pythonhosted.org/packages/06/d4/fd216f3cd6625022c25b336c7570d11f4a43adbaf0a56106d3d496f727a7/lxml-6.0.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4d6036c3a296707357efb375cfc24bb64cd955b9ec731abf11ebb1e40063949f", size = 5662049, upload-time = "2025-07-03T19:19:16.409Z" }, + { url = "https://files.pythonhosted.org/packages/52/03/0e764ce00b95e008d76b99d432f1807f3574fb2945b496a17807a1645dbd/lxml-6.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7488a43033c958637b1a08cddc9188eb06d3ad36582cebc7d4815980b47e27ef", size = 5272430, upload-time = "2025-06-26T16:27:00.031Z" }, + { url = "https://files.pythonhosted.org/packages/5f/01/d48cc141bc47bc1644d20fe97bbd5e8afb30415ec94f146f2f76d0d9d098/lxml-6.0.0-cp313-cp313-win32.whl", hash = "sha256:5fcd7d3b1d8ecb91445bd71b9c88bdbeae528fefee4f379895becfc72298d181", size = 3612896, upload-time = "2025-06-26T16:27:04.251Z" }, + { url = "https://files.pythonhosted.org/packages/f4/87/6456b9541d186ee7d4cb53bf1b9a0d7f3b1068532676940fdd594ac90865/lxml-6.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:2f34687222b78fff795feeb799a7d44eca2477c3d9d3a46ce17d51a4f383e32e", size = 4013132, upload-time = "2025-06-26T16:27:06.415Z" }, + { url = "https://files.pythonhosted.org/packages/b7/42/85b3aa8f06ca0d24962f8100f001828e1f1f1a38c954c16e71154ed7d53a/lxml-6.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:21db1ec5525780fd07251636eb5f7acb84003e9382c72c18c542a87c416ade03", size = 3672642, upload-time = "2025-06-26T16:27:09.888Z" }, ] [[package]] @@ -2023,6 +2343,20 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/87/fb/99f81ac72ae23375f22b7afdb7642aba97c00a713c217124420147681a2f/mako-1.3.10-py3-none-any.whl", hash = "sha256:baef24a52fc4fc514a0887ac600f9f1cff3d82c61d4d700a1fa84d597b88db59", size = 78509, upload-time = "2025-04-10T12:50:53.297Z" }, ] +[[package]] +name = "mamba-ssm" +version = "2.2.4" +source = { git = "https://github.com/state-spaces/mamba.git?rev=2e16fc3062cdcd4ebef27a9aa4442676e1c7edf4#2e16fc3062cdcd4ebef27a9aa4442676e1c7edf4" } +dependencies = [ + { name = "einops" }, + { name = "ninja" }, + { name = "packaging" }, + { name = "setuptools" }, + { name = "torch" }, + { name = "transformers" }, + { name = "triton" }, +] + [[package]] name = "markdown" version = "3.8.2" @@ -2084,19 +2418,19 @@ wheels = [ [[package]] name = "math-verify" -version = "0.7.0" +version = "0.8.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "latex2sympy2-extended" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ab/59/7daf53e52593af0f7f455ea97c91dc34682f1c2e72f493490259a640836c/math_verify-0.7.0.tar.gz", hash = "sha256:b7459c7b9f1c503b7df32566a95f1e6a33a6cad8cbb759eaa6f100d401112024", size = 57582, upload-time = "2025-02-27T16:21:04.151Z" } +sdist = { url = "https://files.pythonhosted.org/packages/35/b5/b1db6fa6b6c28ebbe1889ee11a4703a72a2ca7750ec415f4559c758cf01a/math_verify-0.8.0.tar.gz", hash = "sha256:3295e0adb94bfe553ff6e3189c44f1916a85aa24ab5d1900f2086a706e28f7c4", size = 60191, upload-time = "2025-07-02T15:52:07.209Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/44/c5/e5de2e29f9db4c92956ce167e1aa534b1e1770ddc0b17600257cf5f8bd50/math_verify-0.7.0-py3-none-any.whl", hash = "sha256:bbdd491b511f6ceef27c5e08543affa1735807b6e3abb6f005bf493dc0eb485b", size = 28677, upload-time = "2025-02-27T16:20:56.612Z" }, + { url = "https://files.pythonhosted.org/packages/fe/9f/59979f699b5c97334298f1295bc9fcdc9904d98d2276479bffff863d23b1/math_verify-0.8.0-py3-none-any.whl", hash = "sha256:31ca651296d817a9bb3fd58ca1fd0d192dcea709b1e5ecf2d0a4514c16f89087", size = 29994, upload-time = "2025-07-02T15:52:05.023Z" }, ] [[package]] name = "matplotlib" -version = "3.10.3" +version = "3.10.5" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "contourpy" }, @@ -2109,38 +2443,55 @@ dependencies = [ { name = "pyparsing" }, { name = "python-dateutil" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/26/91/d49359a21893183ed2a5b6c76bec40e0b1dcbf8ca148f864d134897cfc75/matplotlib-3.10.3.tar.gz", hash = "sha256:2f82d2c5bb7ae93aaaa4cd42aca65d76ce6376f83304fa3a630b569aca274df0", size = 34799811, upload-time = "2025-05-08T19:10:54.39Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/eb/43/6b80eb47d1071f234ef0c96ca370c2ca621f91c12045f1401b5c9b28a639/matplotlib-3.10.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:0ab1affc11d1f495ab9e6362b8174a25afc19c081ba5b0775ef00533a4236eea", size = 8179689, upload-time = "2025-05-08T19:10:07.602Z" }, - { url = "https://files.pythonhosted.org/packages/0f/70/d61a591958325c357204870b5e7b164f93f2a8cca1dc6ce940f563909a13/matplotlib-3.10.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2a818d8bdcafa7ed2eed74487fdb071c09c1ae24152d403952adad11fa3c65b4", size = 8050466, upload-time = "2025-05-08T19:10:09.383Z" }, - { url = "https://files.pythonhosted.org/packages/e7/75/70c9d2306203148cc7902a961240c5927dd8728afedf35e6a77e105a2985/matplotlib-3.10.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:748ebc3470c253e770b17d8b0557f0aa85cf8c63fd52f1a61af5b27ec0b7ffee", size = 8456252, upload-time = "2025-05-08T19:10:11.958Z" }, - { url = "https://files.pythonhosted.org/packages/c4/91/ba0ae1ff4b3f30972ad01cd4a8029e70a0ec3b8ea5be04764b128b66f763/matplotlib-3.10.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed70453fd99733293ace1aec568255bc51c6361cb0da94fa5ebf0649fdb2150a", size = 8601321, upload-time = "2025-05-08T19:10:14.47Z" }, - { url = "https://files.pythonhosted.org/packages/d2/88/d636041eb54a84b889e11872d91f7cbf036b3b0e194a70fa064eb8b04f7a/matplotlib-3.10.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dbed9917b44070e55640bd13419de83b4c918e52d97561544814ba463811cbc7", size = 9406972, upload-time = "2025-05-08T19:10:16.569Z" }, - { url = "https://files.pythonhosted.org/packages/b1/79/0d1c165eac44405a86478082e225fce87874f7198300bbebc55faaf6d28d/matplotlib-3.10.3-cp312-cp312-win_amd64.whl", hash = "sha256:cf37d8c6ef1a48829443e8ba5227b44236d7fcaf7647caa3178a4ff9f7a5be05", size = 8067954, upload-time = "2025-05-08T19:10:18.663Z" }, - { url = "https://files.pythonhosted.org/packages/3b/c1/23cfb566a74c696a3b338d8955c549900d18fe2b898b6e94d682ca21e7c2/matplotlib-3.10.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9f2efccc8dcf2b86fc4ee849eea5dcaecedd0773b30f47980dc0cbeabf26ec84", size = 8180318, upload-time = "2025-05-08T19:10:20.426Z" }, - { url = "https://files.pythonhosted.org/packages/6c/0c/02f1c3b66b30da9ee343c343acbb6251bef5b01d34fad732446eaadcd108/matplotlib-3.10.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3ddbba06a6c126e3301c3d272a99dcbe7f6c24c14024e80307ff03791a5f294e", size = 8051132, upload-time = "2025-05-08T19:10:22.569Z" }, - { url = "https://files.pythonhosted.org/packages/b4/ab/8db1a5ac9b3a7352fb914133001dae889f9fcecb3146541be46bed41339c/matplotlib-3.10.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:748302b33ae9326995b238f606e9ed840bf5886ebafcb233775d946aa8107a15", size = 8457633, upload-time = "2025-05-08T19:10:24.749Z" }, - { url = "https://files.pythonhosted.org/packages/f5/64/41c4367bcaecbc03ef0d2a3ecee58a7065d0a36ae1aa817fe573a2da66d4/matplotlib-3.10.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a80fcccbef63302c0efd78042ea3c2436104c5b1a4d3ae20f864593696364ac7", size = 8601031, upload-time = "2025-05-08T19:10:27.03Z" }, - { url = "https://files.pythonhosted.org/packages/12/6f/6cc79e9e5ab89d13ed64da28898e40fe5b105a9ab9c98f83abd24e46d7d7/matplotlib-3.10.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:55e46cbfe1f8586adb34f7587c3e4f7dedc59d5226719faf6cb54fc24f2fd52d", size = 9406988, upload-time = "2025-05-08T19:10:29.056Z" }, - { url = "https://files.pythonhosted.org/packages/b1/0f/eed564407bd4d935ffabf561ed31099ed609e19287409a27b6d336848653/matplotlib-3.10.3-cp313-cp313-win_amd64.whl", hash = "sha256:151d89cb8d33cb23345cd12490c76fd5d18a56581a16d950b48c6ff19bb2ab93", size = 8068034, upload-time = "2025-05-08T19:10:31.221Z" }, - { url = "https://files.pythonhosted.org/packages/3e/e5/2f14791ff69b12b09e9975e1d116d9578ac684460860ce542c2588cb7a1c/matplotlib-3.10.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:c26dd9834e74d164d06433dc7be5d75a1e9890b926b3e57e74fa446e1a62c3e2", size = 8218223, upload-time = "2025-05-08T19:10:33.114Z" }, - { url = "https://files.pythonhosted.org/packages/5c/08/30a94afd828b6e02d0a52cae4a29d6e9ccfcf4c8b56cc28b021d3588873e/matplotlib-3.10.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:24853dad5b8c84c8c2390fc31ce4858b6df504156893292ce8092d190ef8151d", size = 8094985, upload-time = "2025-05-08T19:10:35.337Z" }, - { url = "https://files.pythonhosted.org/packages/89/44/f3bc6b53066c889d7a1a3ea8094c13af6a667c5ca6220ec60ecceec2dabe/matplotlib-3.10.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68f7878214d369d7d4215e2a9075fef743be38fa401d32e6020bab2dfabaa566", size = 8483109, upload-time = "2025-05-08T19:10:37.611Z" }, - { url = "https://files.pythonhosted.org/packages/ba/c7/473bc559beec08ebee9f86ca77a844b65747e1a6c2691e8c92e40b9f42a8/matplotlib-3.10.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6929fc618cb6db9cb75086f73b3219bbb25920cb24cee2ea7a12b04971a4158", size = 8618082, upload-time = "2025-05-08T19:10:39.892Z" }, - { url = "https://files.pythonhosted.org/packages/d8/e9/6ce8edd264c8819e37bbed8172e0ccdc7107fe86999b76ab5752276357a4/matplotlib-3.10.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6c7818292a5cc372a2dc4c795e5c356942eb8350b98ef913f7fda51fe175ac5d", size = 9413699, upload-time = "2025-05-08T19:10:42.376Z" }, - { url = "https://files.pythonhosted.org/packages/1b/92/9a45c91089c3cf690b5badd4be81e392ff086ccca8a1d4e3a08463d8a966/matplotlib-3.10.3-cp313-cp313t-win_amd64.whl", hash = "sha256:4f23ffe95c5667ef8a2b56eea9b53db7f43910fa4a2d5472ae0f72b64deab4d5", size = 8139044, upload-time = "2025-05-08T19:10:44.551Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/43/91/f2939bb60b7ebf12478b030e0d7f340247390f402b3b189616aad790c366/matplotlib-3.10.5.tar.gz", hash = "sha256:352ed6ccfb7998a00881692f38b4ca083c691d3e275b4145423704c34c909076", size = 34804044, upload-time = "2025-07-31T18:09:33.805Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/66/1e/c6f6bcd882d589410b475ca1fc22e34e34c82adff519caf18f3e6dd9d682/matplotlib-3.10.5-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:00b6feadc28a08bd3c65b2894f56cf3c94fc8f7adcbc6ab4516ae1e8ed8f62e2", size = 8253056, upload-time = "2025-07-31T18:08:05.385Z" }, + { url = "https://files.pythonhosted.org/packages/53/e6/d6f7d1b59413f233793dda14419776f5f443bcccb2dfc84b09f09fe05dbe/matplotlib-3.10.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ee98a5c5344dc7f48dc261b6ba5d9900c008fc12beb3fa6ebda81273602cc389", size = 8110131, upload-time = "2025-07-31T18:08:07.293Z" }, + { url = "https://files.pythonhosted.org/packages/66/2b/bed8a45e74957549197a2ac2e1259671cd80b55ed9e1fe2b5c94d88a9202/matplotlib-3.10.5-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a17e57e33de901d221a07af32c08870ed4528db0b6059dce7d7e65c1122d4bea", size = 8669603, upload-time = "2025-07-31T18:08:09.064Z" }, + { url = "https://files.pythonhosted.org/packages/7e/a7/315e9435b10d057f5e52dfc603cd353167ae28bb1a4e033d41540c0067a4/matplotlib-3.10.5-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:97b9d6443419085950ee4a5b1ee08c363e5c43d7176e55513479e53669e88468", size = 9508127, upload-time = "2025-07-31T18:08:10.845Z" }, + { url = "https://files.pythonhosted.org/packages/7f/d9/edcbb1f02ca99165365d2768d517898c22c6040187e2ae2ce7294437c413/matplotlib-3.10.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ceefe5d40807d29a66ae916c6a3915d60ef9f028ce1927b84e727be91d884369", size = 9566926, upload-time = "2025-07-31T18:08:13.186Z" }, + { url = "https://files.pythonhosted.org/packages/3b/d9/6dd924ad5616c97b7308e6320cf392c466237a82a2040381163b7500510a/matplotlib-3.10.5-cp312-cp312-win_amd64.whl", hash = "sha256:c04cba0f93d40e45b3c187c6c52c17f24535b27d545f757a2fffebc06c12b98b", size = 8107599, upload-time = "2025-07-31T18:08:15.116Z" }, + { url = "https://files.pythonhosted.org/packages/0e/f3/522dc319a50f7b0279fbe74f86f7a3506ce414bc23172098e8d2bdf21894/matplotlib-3.10.5-cp312-cp312-win_arm64.whl", hash = "sha256:a41bcb6e2c8e79dc99c5511ae6f7787d2fb52efd3d805fff06d5d4f667db16b2", size = 7978173, upload-time = "2025-07-31T18:08:21.518Z" }, + { url = "https://files.pythonhosted.org/packages/8d/05/4f3c1f396075f108515e45cb8d334aff011a922350e502a7472e24c52d77/matplotlib-3.10.5-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:354204db3f7d5caaa10e5de74549ef6a05a4550fdd1c8f831ab9bca81efd39ed", size = 8253586, upload-time = "2025-07-31T18:08:23.107Z" }, + { url = "https://files.pythonhosted.org/packages/2f/2c/e084415775aac7016c3719fe7006cdb462582c6c99ac142f27303c56e243/matplotlib-3.10.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b072aac0c3ad563a2b3318124756cb6112157017f7431626600ecbe890df57a1", size = 8110715, upload-time = "2025-07-31T18:08:24.675Z" }, + { url = "https://files.pythonhosted.org/packages/52/1b/233e3094b749df16e3e6cd5a44849fd33852e692ad009cf7de00cf58ddf6/matplotlib-3.10.5-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d52fd5b684d541b5a51fb276b2b97b010c75bee9aa392f96b4a07aeb491e33c7", size = 8669397, upload-time = "2025-07-31T18:08:26.778Z" }, + { url = "https://files.pythonhosted.org/packages/e8/ec/03f9e003a798f907d9f772eed9b7c6a9775d5bd00648b643ebfb88e25414/matplotlib-3.10.5-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee7a09ae2f4676276f5a65bd9f2bd91b4f9fbaedf49f40267ce3f9b448de501f", size = 9508646, upload-time = "2025-07-31T18:08:28.848Z" }, + { url = "https://files.pythonhosted.org/packages/91/e7/c051a7a386680c28487bca27d23b02d84f63e3d2a9b4d2fc478e6a42e37e/matplotlib-3.10.5-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ba6c3c9c067b83481d647af88b4e441d532acdb5ef22178a14935b0b881188f4", size = 9567424, upload-time = "2025-07-31T18:08:30.726Z" }, + { url = "https://files.pythonhosted.org/packages/36/c2/24302e93ff431b8f4173ee1dd88976c8d80483cadbc5d3d777cef47b3a1c/matplotlib-3.10.5-cp313-cp313-win_amd64.whl", hash = "sha256:07442d2692c9bd1cceaa4afb4bbe5b57b98a7599de4dabfcca92d3eea70f9ebe", size = 8107809, upload-time = "2025-07-31T18:08:33.928Z" }, + { url = "https://files.pythonhosted.org/packages/0b/33/423ec6a668d375dad825197557ed8fbdb74d62b432c1ed8235465945475f/matplotlib-3.10.5-cp313-cp313-win_arm64.whl", hash = "sha256:48fe6d47380b68a37ccfcc94f009530e84d41f71f5dae7eda7c4a5a84aa0a674", size = 7978078, upload-time = "2025-07-31T18:08:36.764Z" }, + { url = "https://files.pythonhosted.org/packages/51/17/521fc16ec766455c7bb52cc046550cf7652f6765ca8650ff120aa2d197b6/matplotlib-3.10.5-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3b80eb8621331449fc519541a7461987f10afa4f9cfd91afcd2276ebe19bd56c", size = 8295590, upload-time = "2025-07-31T18:08:38.521Z" }, + { url = "https://files.pythonhosted.org/packages/f8/12/23c28b2c21114c63999bae129fce7fd34515641c517ae48ce7b7dcd33458/matplotlib-3.10.5-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:47a388908e469d6ca2a6015858fa924e0e8a2345a37125948d8e93a91c47933e", size = 8158518, upload-time = "2025-07-31T18:08:40.195Z" }, + { url = "https://files.pythonhosted.org/packages/81/f8/aae4eb25e8e7190759f3cb91cbeaa344128159ac92bb6b409e24f8711f78/matplotlib-3.10.5-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8b6b49167d208358983ce26e43aa4196073b4702858670f2eb111f9a10652b4b", size = 8691815, upload-time = "2025-07-31T18:08:42.238Z" }, + { url = "https://files.pythonhosted.org/packages/d0/ba/450c39ebdd486bd33a359fc17365ade46c6a96bf637bbb0df7824de2886c/matplotlib-3.10.5-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8a8da0453a7fd8e3da114234ba70c5ba9ef0e98f190309ddfde0f089accd46ea", size = 9522814, upload-time = "2025-07-31T18:08:44.914Z" }, + { url = "https://files.pythonhosted.org/packages/89/11/9c66f6a990e27bb9aa023f7988d2d5809cb98aa39c09cbf20fba75a542ef/matplotlib-3.10.5-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:52c6573dfcb7726a9907b482cd5b92e6b5499b284ffacb04ffbfe06b3e568124", size = 9573917, upload-time = "2025-07-31T18:08:47.038Z" }, + { url = "https://files.pythonhosted.org/packages/b3/69/8b49394de92569419e5e05e82e83df9b749a0ff550d07631ea96ed2eb35a/matplotlib-3.10.5-cp313-cp313t-win_amd64.whl", hash = "sha256:a23193db2e9d64ece69cac0c8231849db7dd77ce59c7b89948cf9d0ce655a3ce", size = 8181034, upload-time = "2025-07-31T18:08:48.943Z" }, + { url = "https://files.pythonhosted.org/packages/47/23/82dc435bb98a2fc5c20dffcac8f0b083935ac28286413ed8835df40d0baa/matplotlib-3.10.5-cp313-cp313t-win_arm64.whl", hash = "sha256:56da3b102cf6da2776fef3e71cd96fcf22103a13594a18ac9a9b31314e0be154", size = 8023337, upload-time = "2025-07-31T18:08:50.791Z" }, + { url = "https://files.pythonhosted.org/packages/ac/e0/26b6cfde31f5383503ee45dcb7e691d45dadf0b3f54639332b59316a97f8/matplotlib-3.10.5-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:96ef8f5a3696f20f55597ffa91c28e2e73088df25c555f8d4754931515512715", size = 8253591, upload-time = "2025-07-31T18:08:53.254Z" }, + { url = "https://files.pythonhosted.org/packages/c1/89/98488c7ef7ea20ea659af7499628c240a608b337af4be2066d644cfd0a0f/matplotlib-3.10.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:77fab633e94b9da60512d4fa0213daeb76d5a7b05156840c4fd0399b4b818837", size = 8112566, upload-time = "2025-07-31T18:08:55.116Z" }, + { url = "https://files.pythonhosted.org/packages/52/67/42294dfedc82aea55e1a767daf3263aacfb5a125f44ba189e685bab41b6f/matplotlib-3.10.5-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:27f52634315e96b1debbfdc5c416592edcd9c4221bc2f520fd39c33db5d9f202", size = 9513281, upload-time = "2025-07-31T18:08:56.885Z" }, + { url = "https://files.pythonhosted.org/packages/e7/68/f258239e0cf34c2cbc816781c7ab6fca768452e6bf1119aedd2bd4a882a3/matplotlib-3.10.5-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:525f6e28c485c769d1f07935b660c864de41c37fd716bfa64158ea646f7084bb", size = 9780873, upload-time = "2025-07-31T18:08:59.241Z" }, + { url = "https://files.pythonhosted.org/packages/89/64/f4881554006bd12e4558bd66778bdd15d47b00a1f6c6e8b50f6208eda4b3/matplotlib-3.10.5-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:1f5f3ec4c191253c5f2b7c07096a142c6a1c024d9f738247bfc8e3f9643fc975", size = 9568954, upload-time = "2025-07-31T18:09:01.244Z" }, + { url = "https://files.pythonhosted.org/packages/06/f8/42779d39c3f757e1f012f2dda3319a89fb602bd2ef98ce8faf0281f4febd/matplotlib-3.10.5-cp314-cp314-win_amd64.whl", hash = "sha256:707f9c292c4cd4716f19ab8a1f93f26598222cd931e0cd98fbbb1c5994bf7667", size = 8237465, upload-time = "2025-07-31T18:09:03.206Z" }, + { url = "https://files.pythonhosted.org/packages/cf/f8/153fd06b5160f0cd27c8b9dd797fcc9fb56ac6a0ebf3c1f765b6b68d3c8a/matplotlib-3.10.5-cp314-cp314-win_arm64.whl", hash = "sha256:21a95b9bf408178d372814de7baacd61c712a62cae560b5e6f35d791776f6516", size = 8108898, upload-time = "2025-07-31T18:09:05.231Z" }, + { url = "https://files.pythonhosted.org/packages/9a/ee/c4b082a382a225fe0d2a73f1f57cf6f6f132308805b493a54c8641006238/matplotlib-3.10.5-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:a6b310f95e1102a8c7c817ef17b60ee5d1851b8c71b63d9286b66b177963039e", size = 8295636, upload-time = "2025-07-31T18:09:07.306Z" }, + { url = "https://files.pythonhosted.org/packages/30/73/2195fa2099718b21a20da82dfc753bf2af58d596b51aefe93e359dd5915a/matplotlib-3.10.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:94986a242747a0605cb3ff1cb98691c736f28a59f8ffe5175acaeb7397c49a5a", size = 8158575, upload-time = "2025-07-31T18:09:09.083Z" }, + { url = "https://files.pythonhosted.org/packages/f6/e9/a08cdb34618a91fa08f75e6738541da5cacde7c307cea18ff10f0d03fcff/matplotlib-3.10.5-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1ff10ea43288f0c8bab608a305dc6c918cc729d429c31dcbbecde3b9f4d5b569", size = 9522815, upload-time = "2025-07-31T18:09:11.191Z" }, + { url = "https://files.pythonhosted.org/packages/4e/bb/34d8b7e0d1bb6d06ef45db01dfa560d5a67b1c40c0b998ce9ccde934bb09/matplotlib-3.10.5-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f6adb644c9d040ffb0d3434e440490a66cf73dbfa118a6f79cd7568431f7a012", size = 9783514, upload-time = "2025-07-31T18:09:13.307Z" }, + { url = "https://files.pythonhosted.org/packages/12/09/d330d1e55dcca2e11b4d304cc5227f52e2512e46828d6249b88e0694176e/matplotlib-3.10.5-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:4fa40a8f98428f789a9dcacd625f59b7bc4e3ef6c8c7c80187a7a709475cf592", size = 9573932, upload-time = "2025-07-31T18:09:15.335Z" }, + { url = "https://files.pythonhosted.org/packages/eb/3b/f70258ac729aa004aca673800a53a2b0a26d49ca1df2eaa03289a1c40f81/matplotlib-3.10.5-cp314-cp314t-win_amd64.whl", hash = "sha256:95672a5d628b44207aab91ec20bf59c26da99de12b88f7e0b1fb0a84a86ff959", size = 8322003, upload-time = "2025-07-31T18:09:17.416Z" }, + { url = "https://files.pythonhosted.org/packages/5b/60/3601f8ce6d76a7c81c7f25a0e15fde0d6b66226dd187aa6d2838e6374161/matplotlib-3.10.5-cp314-cp314t-win_arm64.whl", hash = "sha256:2efaf97d72629e74252e0b5e3c46813e9eeaa94e011ecf8084a971a31a97f40b", size = 8153849, upload-time = "2025-07-31T18:09:19.673Z" }, ] [[package]] name = "mdit-py-plugins" -version = "0.4.2" +version = "0.5.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "markdown-it-py" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/19/03/a2ecab526543b152300717cf232bb4bb8605b6edb946c845016fa9c9c9fd/mdit_py_plugins-0.4.2.tar.gz", hash = "sha256:5f2cd1fdb606ddf152d37ec30e46101a60512bc0e5fa1a7002c36647b09e26b5", size = 43542, upload-time = "2024-09-09T20:27:49.564Z" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/fd/a756d36c0bfba5f6e39a1cdbdbfdd448dc02692467d83816dff4592a1ebc/mdit_py_plugins-0.5.0.tar.gz", hash = "sha256:f4918cb50119f50446560513a8e311d574ff6aaed72606ddae6d35716fe809c6", size = 44655, upload-time = "2025-08-11T07:25:49.083Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a7/f7/7782a043553ee469c1ff49cfa1cdace2d6bf99a1f333cf38676b3ddf30da/mdit_py_plugins-0.4.2-py3-none-any.whl", hash = "sha256:0c673c3f889399a33b95e88d2f0d111b4447bdfea7f237dab2d488f459835636", size = 55316, upload-time = "2024-09-09T20:27:48.397Z" }, + { url = "https://files.pythonhosted.org/packages/fb/86/dd6e5db36df29e76c7a7699123569a4a18c1623ce68d826ed96c62643cae/mdit_py_plugins-0.5.0-py3-none-any.whl", hash = "sha256:07a08422fc1936a5d26d146759e9155ea466e842f5ab2f7d2266dd084c8dab1f", size = 57205, upload-time = "2025-08-11T07:25:47.597Z" }, ] [[package]] @@ -2159,7 +2510,7 @@ dependencies = [ { name = "einops" }, { name = "flask-restful" }, { name = "nltk" }, - { name = "nvidia-modelopt", extra = ["torch"], marker = "sys_platform != 'darwin'" }, + { name = "nvidia-modelopt", marker = "sys_platform != 'darwin'" }, { name = "packaging" }, { name = "pytest" }, { name = "pytest-cov" }, @@ -2196,7 +2547,7 @@ requires-dist = [ [[package]] name = "mistral-common" -version = "1.8.3" +version = "1.8.4" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "jsonschema" }, @@ -2205,13 +2556,12 @@ dependencies = [ { name = "pydantic" }, { name = "pydantic-extra-types", extra = ["pycountry"] }, { name = "requests" }, - { name = "sentencepiece" }, { name = "tiktoken" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/3b/26/a537cf020b682f2af6927aa9180f29f0dbd542209890d7d2ebd00c004b25/mistral_common-1.8.3.tar.gz", hash = "sha256:0d1979d82227b625f6d71b3c828176f059da8d0f5a3307cdf53b48409a3970a4", size = 6331211, upload-time = "2025-07-25T15:55:40.899Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/dd/1beb1e3d56300f0e4b45ba975ffa7f4b07e6f96a6e06601483f58931893b/mistral_common-1.8.4.tar.gz", hash = "sha256:e611c16ef59c2b60ffdecef4d5e9158e1bf838fad6bad34aa050123601af703a", size = 6333167, upload-time = "2025-08-20T07:22:26.347Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c8/23/bfc9da018375ea1bf31cf94f325d98904003cd6891007ae900d70fc7bcf9/mistral_common-1.8.3-py3-none-any.whl", hash = "sha256:846b6e4bbe016dc2e64fd3169fa704a548f6c74467e0cb18dc165b7a7669abd6", size = 6516130, upload-time = "2025-07-25T15:55:38.424Z" }, + { url = "https://files.pythonhosted.org/packages/d6/4f/756a66c608a767c7af7010b23992343e97558ce7f86c5c15929f1215f6ef/mistral_common-1.8.4-py3-none-any.whl", hash = "sha256:bfaf2550046cebe8289946adc267ba807ac266e5325647af4c4f67292124bc2f", size = 6517094, upload-time = "2025-08-20T07:22:23.686Z" }, ] [package.optional-dependencies] @@ -2222,32 +2572,45 @@ audio = [ image = [ { name = "opencv-python-headless" }, ] +opencv = [ + { name = "opencv-python-headless" }, +] [[package]] name = "ml-dtypes" -version = "0.5.1" +version = "0.5.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "numpy" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/32/49/6e67c334872d2c114df3020e579f3718c333198f8312290e09ec0216703a/ml_dtypes-0.5.1.tar.gz", hash = "sha256:ac5b58559bb84a95848ed6984eb8013249f90b6bab62aa5acbad876e256002c9", size = 698772, upload-time = "2025-01-07T03:34:55.613Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/47/56/1bb21218e1e692506c220ffabd456af9733fba7aa1b14f73899979f4cc20/ml_dtypes-0.5.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:6f462f5eca22fb66d7ff9c4744a3db4463af06c49816c4b6ac89b16bfcdc592e", size = 670372, upload-time = "2025-01-07T03:34:15.258Z" }, - { url = "https://files.pythonhosted.org/packages/20/95/d8bd96a3b60e00bf31bd78ca4bdd2d6bbaf5acb09b42844432d719d34061/ml_dtypes-0.5.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f76232163b5b9c34291b54621ee60417601e2e4802a188a0ea7157cd9b323f4", size = 4635946, upload-time = "2025-01-07T03:34:20.412Z" }, - { url = "https://files.pythonhosted.org/packages/08/57/5d58fad4124192b1be42f68bd0c0ddaa26e44a730ff8c9337adade2f5632/ml_dtypes-0.5.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad4953c5eb9c25a56d11a913c2011d7e580a435ef5145f804d98efa14477d390", size = 4694804, upload-time = "2025-01-07T03:34:23.608Z" }, - { url = "https://files.pythonhosted.org/packages/38/bc/c4260e4a6c6bf684d0313308de1c860467275221d5e7daf69b3fcddfdd0b/ml_dtypes-0.5.1-cp312-cp312-win_amd64.whl", hash = "sha256:9626d0bca1fb387d5791ca36bacbba298c5ef554747b7ebeafefb4564fc83566", size = 210853, upload-time = "2025-01-07T03:34:26.027Z" }, - { url = "https://files.pythonhosted.org/packages/0f/92/bb6a3d18e16fddd18ce6d5f480e1919b33338c70e18cba831c6ae59812ee/ml_dtypes-0.5.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:12651420130ee7cc13059fc56dac6ad300c3af3848b802d475148c9defd27c23", size = 667696, upload-time = "2025-01-07T03:34:27.526Z" }, - { url = "https://files.pythonhosted.org/packages/6d/29/cfc89d842767e9a51146043b0fa18332c2b38f8831447e6cb1160e3c6102/ml_dtypes-0.5.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9945669d3dadf8acb40ec2e57d38c985d8c285ea73af57fc5b09872c516106d", size = 4638365, upload-time = "2025-01-07T03:34:30.43Z" }, - { url = "https://files.pythonhosted.org/packages/be/26/adc36e3ea09603d9f6d114894e1c1b7b8e8a9ef6d0b031cc270c6624a37c/ml_dtypes-0.5.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf9975bda82a99dc935f2ae4c83846d86df8fd6ba179614acac8e686910851da", size = 4702722, upload-time = "2025-01-07T03:34:33.813Z" }, - { url = "https://files.pythonhosted.org/packages/da/8a/a2b9375c94077e5a488a624a195621407846f504068ce22ccf805c674156/ml_dtypes-0.5.1-cp313-cp313-win_amd64.whl", hash = "sha256:fd918d4e6a4e0c110e2e05be7a7814d10dc1b95872accbf6512b80a109b71ae1", size = 210850, upload-time = "2025-01-07T03:34:36.897Z" }, - { url = "https://files.pythonhosted.org/packages/52/38/703169100fdde27957f061d4d0ea3e00525775a09acaccf7e655d9609d55/ml_dtypes-0.5.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:05f23447a1c20ddf4dc7c2c661aa9ed93fcb2658f1017c204d1e758714dc28a8", size = 693043, upload-time = "2025-01-07T03:34:38.457Z" }, - { url = "https://files.pythonhosted.org/packages/28/ff/4e234c9c23e0d456f5da5a326c103bf890c746d93351524d987e41f438b3/ml_dtypes-0.5.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b7fbe5571fdf28fd3aaab3ef4aafc847de9ebf263be959958c1ca58ec8eadf5", size = 4903946, upload-time = "2025-01-07T03:34:40.236Z" }, - { url = "https://files.pythonhosted.org/packages/b7/45/c1a1ccfdd02bc4173ca0f4a2d327683a27df85797b885eb1da1ca325b85c/ml_dtypes-0.5.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d13755f8e8445b3870114e5b6240facaa7cb0c3361e54beba3e07fa912a6e12b", size = 5052731, upload-time = "2025-01-07T03:34:45.308Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/78/a7/aad060393123cfb383956dca68402aff3db1e1caffd5764887ed5153f41b/ml_dtypes-0.5.3.tar.gz", hash = "sha256:95ce33057ba4d05df50b1f3cfefab22e351868a843b3b15a46c65836283670c9", size = 692316, upload-time = "2025-07-29T18:39:19.454Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0d/eb/bc07c88a6ab002b4635e44585d80fa0b350603f11a2097c9d1bfacc03357/ml_dtypes-0.5.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:156418abeeda48ea4797db6776db3c5bdab9ac7be197c1233771e0880c304057", size = 663864, upload-time = "2025-07-29T18:38:33.777Z" }, + { url = "https://files.pythonhosted.org/packages/cf/89/11af9b0f21b99e6386b6581ab40fb38d03225f9de5f55cf52097047e2826/ml_dtypes-0.5.3-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1db60c154989af253f6c4a34e8a540c2c9dce4d770784d426945e09908fbb177", size = 4951313, upload-time = "2025-07-29T18:38:36.45Z" }, + { url = "https://files.pythonhosted.org/packages/d8/a9/b98b86426c24900b0c754aad006dce2863df7ce0bb2bcc2c02f9cc7e8489/ml_dtypes-0.5.3-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1b255acada256d1fa8c35ed07b5f6d18bc21d1556f842fbc2d5718aea2cd9e55", size = 4928805, upload-time = "2025-07-29T18:38:38.29Z" }, + { url = "https://files.pythonhosted.org/packages/50/c1/85e6be4fc09c6175f36fb05a45917837f30af9a5146a5151cb3a3f0f9e09/ml_dtypes-0.5.3-cp312-cp312-win_amd64.whl", hash = "sha256:da65e5fd3eea434ccb8984c3624bc234ddcc0d9f4c81864af611aaebcc08a50e", size = 208182, upload-time = "2025-07-29T18:38:39.72Z" }, + { url = "https://files.pythonhosted.org/packages/9e/17/cf5326d6867be057f232d0610de1458f70a8ce7b6290e4b4a277ea62b4cd/ml_dtypes-0.5.3-cp312-cp312-win_arm64.whl", hash = "sha256:8bb9cd1ce63096567f5f42851f5843b5a0ea11511e50039a7649619abfb4ba6d", size = 161560, upload-time = "2025-07-29T18:38:41.072Z" }, + { url = "https://files.pythonhosted.org/packages/2d/87/1bcc98a66de7b2455dfb292f271452cac9edc4e870796e0d87033524d790/ml_dtypes-0.5.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:5103856a225465371fe119f2fef737402b705b810bd95ad5f348e6e1a6ae21af", size = 663781, upload-time = "2025-07-29T18:38:42.984Z" }, + { url = "https://files.pythonhosted.org/packages/fd/2c/bd2a79ba7c759ee192b5601b675b180a3fd6ccf48ffa27fe1782d280f1a7/ml_dtypes-0.5.3-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4cae435a68861660af81fa3c5af16b70ca11a17275c5b662d9c6f58294e0f113", size = 4956217, upload-time = "2025-07-29T18:38:44.65Z" }, + { url = "https://files.pythonhosted.org/packages/14/f3/091ba84e5395d7fe5b30c081a44dec881cd84b408db1763ee50768b2ab63/ml_dtypes-0.5.3-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6936283b56d74fbec431ca57ce58a90a908fdbd14d4e2d22eea6d72bb208a7b7", size = 4933109, upload-time = "2025-07-29T18:38:46.405Z" }, + { url = "https://files.pythonhosted.org/packages/bc/24/054036dbe32c43295382c90a1363241684c4d6aaa1ecc3df26bd0c8d5053/ml_dtypes-0.5.3-cp313-cp313-win_amd64.whl", hash = "sha256:d0f730a17cf4f343b2c7ad50cee3bd19e969e793d2be6ed911f43086460096e4", size = 208187, upload-time = "2025-07-29T18:38:48.24Z" }, + { url = "https://files.pythonhosted.org/packages/a6/3d/7dc3ec6794a4a9004c765e0c341e32355840b698f73fd2daff46f128afc1/ml_dtypes-0.5.3-cp313-cp313-win_arm64.whl", hash = "sha256:2db74788fc01914a3c7f7da0763427280adfc9cd377e9604b6b64eb8097284bd", size = 161559, upload-time = "2025-07-29T18:38:50.493Z" }, + { url = "https://files.pythonhosted.org/packages/12/91/e6c7a0d67a152b9330445f9f0cf8ae6eee9b83f990b8c57fe74631e42a90/ml_dtypes-0.5.3-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:93c36a08a6d158db44f2eb9ce3258e53f24a9a4a695325a689494f0fdbc71770", size = 689321, upload-time = "2025-07-29T18:38:52.03Z" }, + { url = "https://files.pythonhosted.org/packages/9e/6c/b7b94b84a104a5be1883305b87d4c6bd6ae781504474b4cca067cb2340ec/ml_dtypes-0.5.3-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0e44a3761f64bc009d71ddb6d6c71008ba21b53ab6ee588dadab65e2fa79eafc", size = 5274495, upload-time = "2025-07-29T18:38:53.797Z" }, + { url = "https://files.pythonhosted.org/packages/5b/38/6266604dffb43378055394ea110570cf261a49876fc48f548dfe876f34cc/ml_dtypes-0.5.3-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bdf40d2aaabd3913dec11840f0d0ebb1b93134f99af6a0a4fd88ffe924928ab4", size = 5285422, upload-time = "2025-07-29T18:38:56.603Z" }, + { url = "https://files.pythonhosted.org/packages/7c/88/8612ff177d043a474b9408f0382605d881eeb4125ba89d4d4b3286573a83/ml_dtypes-0.5.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:aec640bd94c4c85c0d11e2733bd13cbb10438fb004852996ec0efbc6cacdaf70", size = 661182, upload-time = "2025-07-29T18:38:58.414Z" }, + { url = "https://files.pythonhosted.org/packages/6f/2b/0569a5e88b29240d373e835107c94ae9256fb2191d3156b43b2601859eff/ml_dtypes-0.5.3-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bda32ce212baa724e03c68771e5c69f39e584ea426bfe1a701cb01508ffc7035", size = 4956187, upload-time = "2025-07-29T18:39:00.611Z" }, + { url = "https://files.pythonhosted.org/packages/51/66/273c2a06ae44562b104b61e6b14444da00061fd87652506579d7eb2c40b1/ml_dtypes-0.5.3-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c205cac07d24a29840c163d6469f61069ce4b065518519216297fc2f261f8db9", size = 4930911, upload-time = "2025-07-29T18:39:02.405Z" }, + { url = "https://files.pythonhosted.org/packages/93/ab/606be3e87dc0821bd360c8c1ee46108025c31a4f96942b63907bb441b87d/ml_dtypes-0.5.3-cp314-cp314-win_amd64.whl", hash = "sha256:cd7c0bb22d4ff86d65ad61b5dd246812e8993fbc95b558553624c33e8b6903ea", size = 216664, upload-time = "2025-07-29T18:39:03.927Z" }, + { url = "https://files.pythonhosted.org/packages/30/a2/e900690ca47d01dffffd66375c5de8c4f8ced0f1ef809ccd3b25b3e6b8fa/ml_dtypes-0.5.3-cp314-cp314-win_arm64.whl", hash = "sha256:9d55ea7f7baf2aed61bf1872116cefc9d0c3693b45cae3916897ee27ef4b835e", size = 160203, upload-time = "2025-07-29T18:39:05.671Z" }, + { url = "https://files.pythonhosted.org/packages/53/21/783dfb51f40d2660afeb9bccf3612b99f6a803d980d2a09132b0f9d216ab/ml_dtypes-0.5.3-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:e12e29764a0e66a7a31e9b8bf1de5cc0423ea72979f45909acd4292de834ccd3", size = 689324, upload-time = "2025-07-29T18:39:07.567Z" }, + { url = "https://files.pythonhosted.org/packages/09/f7/a82d249c711abf411ac027b7163f285487f5e615c3e0716c61033ce996ab/ml_dtypes-0.5.3-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:19f6c3a4f635c2fc9e2aa7d91416bd7a3d649b48350c51f7f715a09370a90d93", size = 5275917, upload-time = "2025-07-29T18:39:09.339Z" }, + { url = "https://files.pythonhosted.org/packages/7f/3c/541c4b30815ab90ebfbb51df15d0b4254f2f9f1e2b4907ab229300d5e6f2/ml_dtypes-0.5.3-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5ab039ffb40f3dc0aeeeba84fd6c3452781b5e15bef72e2d10bcb33e4bbffc39", size = 5285284, upload-time = "2025-07-29T18:39:11.532Z" }, ] [[package]] name = "mlflow" -version = "3.1.1" +version = "3.2.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "alembic" }, @@ -2257,6 +2620,7 @@ dependencies = [ { name = "gunicorn", marker = "sys_platform != 'win32'" }, { name = "matplotlib" }, { name = "mlflow-skinny" }, + { name = "mlflow-tracing" }, { name = "numpy" }, { name = "pandas" }, { name = "pyarrow" }, @@ -2265,14 +2629,14 @@ dependencies = [ { name = "sqlalchemy" }, { name = "waitress", marker = "sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/2b/e1/0cba7a8fc2c81078b4d31948f65fb1580cee1831e955a86028159724d057/mlflow-3.1.1.tar.gz", hash = "sha256:ee98fe929d61625b72ae5010fbf12a7c6d15527790397827191fd6e8246c33e5", size = 24098836, upload-time = "2025-06-25T09:12:56.416Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ae/84/c79bca3c13e6bc5a551411c8c253c43194fd109c2688194ffaf7771b0bed/mlflow-3.2.0.tar.gz", hash = "sha256:e96bd42238ea8b477691c8a8f6e8bdbf9247415ad7892e6e885994c6940bcf74", size = 25197246, upload-time = "2025-08-05T13:30:29.747Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a2/07/9f28e7e2b1c9552e64e6161cd3943b02349f8164176cea6b75e69d7df94a/mlflow-3.1.1-py3-none-any.whl", hash = "sha256:16853335292217fde203a645fd50f38d5567ce7818587ed5236040418918872e", size = 24673365, upload-time = "2025-06-25T09:12:53.482Z" }, + { url = "https://files.pythonhosted.org/packages/0a/24/f488e66c6f667c7468f439d48446b30adafdb81abfcc01262cf3a50267f5/mlflow-3.2.0-py3-none-any.whl", hash = "sha256:db97b925cc8afba15caf3749dcb4a95be83f9608e974f23253fbbc1d675247ea", size = 25803221, upload-time = "2025-08-05T13:30:26.089Z" }, ] [[package]] name = "mlflow-skinny" -version = "3.1.1" +version = "3.2.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cachetools" }, @@ -2293,52 +2657,70 @@ dependencies = [ { name = "typing-extensions" }, { name = "uvicorn" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/dd/52/e63c0244a24ed23b5f82b30efffce150c19f126b8ef977b78a56f6d192c9/mlflow_skinny-3.1.1.tar.gz", hash = "sha256:9c2ea510eef6c115c7241305b65f7090d7fdc02399de2a6e8ddae5f285bb7a99", size = 1603411, upload-time = "2025-06-25T05:52:22.717Z" } +sdist = { url = "https://files.pythonhosted.org/packages/75/0f/09f8a3eddf2585a3f21a18c4fc23fdc69fb6a1837e5d98a21841b861c51c/mlflow_skinny-3.2.0.tar.gz", hash = "sha256:b359ec082a0a966e4e8e80f03d850da7fa677ebe57e67b1c0877029e5eeee443", size = 1635555, upload-time = "2025-08-05T13:18:18.638Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f6/45/24d553e0f550f82aaadd8b9d08f1410a3d750c51733a5f43fcc6def1be00/mlflow_skinny-3.1.1-py3-none-any.whl", hash = "sha256:73b1be5d0ef3099c2d0e5ec3ca7fd0b85d4a6def7d7ab35feda9f06bf8bf7049", size = 1926660, upload-time = "2025-06-25T05:52:20.556Z" }, + { url = "https://files.pythonhosted.org/packages/8e/27/d643aff3652b665e2131b982752cd094b9efbd066a412f30d3e3af2e43a4/mlflow_skinny-3.2.0-py3-none-any.whl", hash = "sha256:ec33a6fc164973e3b4d208e4ab8bec118ea93ff890ffbd08817b66468235ed71", size = 1964743, upload-time = "2025-08-05T13:18:16.615Z" }, +] + +[[package]] +name = "mlflow-tracing" +version = "3.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cachetools" }, + { name = "databricks-sdk" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-sdk" }, + { name = "packaging" }, + { name = "protobuf" }, + { name = "pydantic" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/47/88/a4eac838bf4957994d636dd07cd114287b59c61369017af2d1bf8a5a948a/mlflow_tracing-3.2.0.tar.gz", hash = "sha256:6f3dd940752ca28871b09880e9426d1293460822faa8706b33af1d50c29a0355", size = 903660, upload-time = "2025-08-05T13:14:46.669Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a3/c9/748c70024375001b8840d00eb64c102d22fd3e808c2b4c2f7772dbf452f1/mlflow_tracing-3.2.0-py3-none-any.whl", hash = "sha256:4180d48b6b68a70b3e37987def3b0689d3f4ba722f5d2b98344c3717d2289b99", size = 1094770, upload-time = "2025-08-05T13:14:44.825Z" }, ] [[package]] name = "mlx" -version = "0.27.1" +version = "0.28.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "mlx-metal", marker = "sys_platform == 'darwin'" }, ] wheels = [ - { url = "https://files.pythonhosted.org/packages/65/43/125102bbb2be6825880ae2dc8d8702f99cfa7753407f574457b36e422218/mlx-0.27.1-cp312-cp312-macosx_13_0_arm64.whl", hash = "sha256:0c570c9afb57c697bd864504115be8a7c4de97f0b80557a597d496ee426a6812", size = 549869, upload-time = "2025-07-25T22:55:32.698Z" }, - { url = "https://files.pythonhosted.org/packages/f3/79/0bf681700fc8b165517e907f9ec777b5a5d628004a65a777148f68c6baa0/mlx-0.27.1-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:ccff7bbbd9df302b26e79013ef6d0c3531c9ba5963ead521e2d85856811b86a0", size = 531671, upload-time = "2025-07-25T22:57:03.392Z" }, - { url = "https://files.pythonhosted.org/packages/ec/97/f1367b4892bef7f78e38737d3a28094e93124f11684a28a9e92ed5a13b2b/mlx-0.27.1-cp312-cp312-macosx_15_0_arm64.whl", hash = "sha256:9ccadaed449c07dfeae484620992b904c17dfea7564f8df63095c60eed3af02b", size = 531672, upload-time = "2025-07-25T22:55:35.779Z" }, - { url = "https://files.pythonhosted.org/packages/86/f6/4324386b0764deb692e14a97282a348a9a938aa8b441bf8b6c7599f418d4/mlx-0.27.1-cp313-cp313-macosx_13_0_arm64.whl", hash = "sha256:803669a28031766c2b0fe94c0a3bfd030184e706092f0a831b33620c1e2ef865", size = 549847, upload-time = "2025-07-25T22:55:17.581Z" }, - { url = "https://files.pythonhosted.org/packages/cf/4b/3194ccb03527a050c04d837d731a11599f8620e6ce16d3971798caae1d44/mlx-0.27.1-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:e9649b3d86ce564000797384510c9d07af38a9ce2a07df8e2f7c6a3e0f0f059e", size = 531664, upload-time = "2025-07-25T22:56:02.928Z" }, - { url = "https://files.pythonhosted.org/packages/cc/57/a6e0d8dc6e7ba08a64d71fb89d743e77446040113ea1dbb7950be8f60f39/mlx-0.27.1-cp313-cp313-macosx_15_0_arm64.whl", hash = "sha256:5c501ceec7c6aa2ea1c850dd4e14e679f5416a142264b9d5d405a4e0aeb991b2", size = 531663, upload-time = "2025-07-25T22:55:38.62Z" }, + { url = "https://files.pythonhosted.org/packages/f2/c9/d12ed6a8393450e28eb1f552b50200f83f138b1268b5f4e8074a76d745a2/mlx-0.28.0-cp312-cp312-macosx_13_0_arm64.whl", hash = "sha256:97866d5e454e8f2d7bc42aadcbfd7565d40f4755564785e4fb964812fbad604b", size = 564160, upload-time = "2025-08-07T07:50:34.652Z" }, + { url = "https://files.pythonhosted.org/packages/71/4f/3951766a5edb75c0d2d860381f592d271b4c3b7241e730e78dd63926f5b4/mlx-0.28.0-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:5204ebf399439e5da374295f6c1b6961355824604eed7026c18edfe4c83e9243", size = 540098, upload-time = "2025-08-07T07:50:52.67Z" }, + { url = "https://files.pythonhosted.org/packages/f7/52/cb8eb03544eace055a500bd4a3b776a3ce48198d7b7b398e21a5a3256e89/mlx-0.28.0-cp312-cp312-macosx_15_0_arm64.whl", hash = "sha256:34776bd3fe97bca7c6c76d77f6104e0d6b05b3626bb3cf9ed48d3a9bbd46c180", size = 540100, upload-time = "2025-08-07T07:50:49.095Z" }, + { url = "https://files.pythonhosted.org/packages/cd/fb/795f3540057642bcf3a95fe7d17c14ffaca2102511328eee6cd92d49223e/mlx-0.28.0-cp313-cp313-macosx_13_0_arm64.whl", hash = "sha256:78c88e5cc4188f538935b23803e10eaf084caa8bfeaa2a6de983038ecee3fd78", size = 564139, upload-time = "2025-08-07T07:50:31.487Z" }, + { url = "https://files.pythonhosted.org/packages/7e/4a/39609e5e3fea14c429e8a61f9754e61e4ed5289422223ad213df9116fd55/mlx-0.28.0-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:0b7a57a584ea5e807ec0a17c4eb179a71e01eeff9f25dff6950abad1e30443c2", size = 540205, upload-time = "2025-08-07T07:50:47.284Z" }, + { url = "https://files.pythonhosted.org/packages/43/af/738ea855df6742a4ac4ee1c72f298ff6cf50f0af7e553e89a1a41060c12c/mlx-0.28.0-cp313-cp313-macosx_15_0_arm64.whl", hash = "sha256:a7cdcbd3faff45c18e9f51f95e9aa9410c71bbb4d5d86878a97eb996a0467505", size = 540201, upload-time = "2025-08-07T07:50:45.122Z" }, ] [[package]] name = "mlx-lm" -version = "0.26.1" +version = "0.26.3" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "jinja2", marker = "sys_platform == 'darwin'" }, - { name = "mlx", marker = "sys_platform == 'darwin'" }, - { name = "numpy", marker = "sys_platform == 'darwin'" }, - { name = "protobuf", marker = "sys_platform == 'darwin'" }, - { name = "pyyaml", marker = "sys_platform == 'darwin'" }, - { name = "transformers", marker = "sys_platform == 'darwin'" }, + { name = "jinja2", marker = "sys_platform != 'linux'" }, + { name = "mlx", marker = "sys_platform != 'linux'" }, + { name = "numpy", marker = "sys_platform != 'linux'" }, + { name = "protobuf", marker = "sys_platform != 'linux'" }, + { name = "pyyaml", marker = "sys_platform != 'linux'" }, + { name = "transformers", marker = "sys_platform != 'linux'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/2e/c7/dafbbb35755fe9bfaa9bf79bd620eb6080768cc4e62d1735c662206d7a61/mlx_lm-0.26.1.tar.gz", hash = "sha256:ef94eb148b738145af114b992860beca5db7cbff271e3c3c1bc4bf3a72868799", size = 162956, upload-time = "2025-07-26T04:39:28.404Z" } +sdist = { url = "https://files.pythonhosted.org/packages/af/4b/ed8ec01f182203b0897415a9d20f0cd8a141def77ad43deea18ffaba4c9c/mlx_lm-0.26.3.tar.gz", hash = "sha256:06cd74ee3eea920335c528e68feb854eede45fe4e5f149b464ac100c1dbeaded", size = 172096, upload-time = "2025-08-06T21:48:22.762Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/0b/70/baa1355e44ca993fdcc8caa2535b4e716c262e78d1caa94abd4f2c8a8412/mlx_lm-0.26.1-py3-none-any.whl", hash = "sha256:b26ab8ffde618fda885edc4177c2f06d4f056078a552f641450d0e19c8637f5d", size = 224564, upload-time = "2025-07-26T04:39:26.944Z" }, + { url = "https://files.pythonhosted.org/packages/4a/ff/142ba3ec53282e179bab3ba5608c5edec3b419bcc816df63c141bcc6e2e9/mlx_lm-0.26.3-py3-none-any.whl", hash = "sha256:c6a9e44bd707822bc165ce638723ab11252e8334b7b3bf79c7d399c8c3d6d48e", size = 235187, upload-time = "2025-08-06T21:48:21.73Z" }, ] [[package]] name = "mlx-metal" -version = "0.27.1" +version = "0.28.0" source = { registry = "https://pypi.org/simple" } wheels = [ - { url = "https://files.pythonhosted.org/packages/8b/77/89fa3327011f018638c9e943e1edc081ce9ca0ed296fe64d6daf93c6ff51/mlx_metal-0.27.1-py3-none-macosx_13_0_arm64.whl", hash = "sha256:c66d9b1adb3c0ea19492fba6493f672bc7542e65dd65f7e2995918815fbeb907", size = 33523035, upload-time = "2025-07-25T22:58:02.533Z" }, - { url = "https://files.pythonhosted.org/packages/d7/a8/ac706ad6ce834834762d5146d791f77710efc896c13ef47fd7d672099056/mlx_metal-0.27.1-py3-none-macosx_14_0_arm64.whl", hash = "sha256:fe4415ddd242974d91c7ca0699cd01507d17da8a5ba304122ef137cdb5e7fff4", size = 32926383, upload-time = "2025-07-25T22:57:54.194Z" }, - { url = "https://files.pythonhosted.org/packages/78/77/6963681fb54ecaa0ae5de4209c15504a803a0edd1a33fd074e6c558fd5e0/mlx_metal-0.27.1-py3-none-macosx_15_0_arm64.whl", hash = "sha256:d025dea30bda8baa32c928cfa333eac64a5adc8d07656f8fc55072d99403ebc9", size = 32897065, upload-time = "2025-07-25T22:58:26.362Z" }, + { url = "https://files.pythonhosted.org/packages/f1/71/879284c71dfb12ded986a6532a4ab7df5c2794385ccf2766c1b40aee74cb/mlx_metal-0.28.0-py3-none-macosx_13_0_arm64.whl", hash = "sha256:ce08d40f1fad4f0b3bc87bfff5d603c7fe7dd141c082ba9ce9328b41e8f8d46b", size = 33840007, upload-time = "2025-08-07T07:53:07.437Z" }, + { url = "https://files.pythonhosted.org/packages/06/90/44a261ccb9f6052c93c9da4faa4fc6d4f914938c51ecbbb68c546ab521b9/mlx_metal-0.28.0-py3-none-macosx_14_0_arm64.whl", hash = "sha256:424142ab843e2ac0b14edb58cf88d96723823c565291f46ddeeaa072abcc991e", size = 33196759, upload-time = "2025-08-07T07:52:59.436Z" }, + { url = "https://files.pythonhosted.org/packages/72/59/8e4dee2893a56fc68a27eec7ec7ed9559c7ea01099313a9b8196373bf3cf/mlx_metal-0.28.0-py3-none-macosx_15_0_arm64.whl", hash = "sha256:214ece3781d44f57eb9686561594b28915ec5568df4a5a73da59c66880b204ed", size = 33167706, upload-time = "2025-08-07T07:53:03.852Z" }, ] [[package]] @@ -2402,59 +2784,65 @@ wheels = [ [[package]] name = "multidict" -version = "6.5.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/5c/43/2d90c414d9efc4587d6e7cebae9f2c2d8001bcb4f89ed514ae837e9dcbe6/multidict-6.5.1.tar.gz", hash = "sha256:a835ea8103f4723915d7d621529c80ef48db48ae0c818afcabe0f95aa1febc3a", size = 98690, upload-time = "2025-06-24T22:16:05.117Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/33/36/225fb9b890607d740f61957febf622f5c9cd9e641a93502c7877934d57ef/multidict-6.5.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:48f95fe064f63d9601ef7a3dce2fc2a437d5fcc11bca960bc8be720330b13b6a", size = 74287, upload-time = "2025-06-24T22:14:29.456Z" }, - { url = "https://files.pythonhosted.org/packages/70/e5/c9eabb16ecf77275664413263527ab169e08371dfa6b168025d8f67261fd/multidict-6.5.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7b7b6e1ce9b61f721417c68eeeb37599b769f3b631e6b25c21f50f8f619420b9", size = 44092, upload-time = "2025-06-24T22:14:30.686Z" }, - { url = "https://files.pythonhosted.org/packages/df/0b/dd9322a432c477a2e6d089bbb53acb68ed25515b8292dbc60f27e7e45d70/multidict-6.5.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8b83b055889bda09fc866c0a652cdb6c36eeeafc2858259c9a7171fe82df5773", size = 42565, upload-time = "2025-06-24T22:14:31.8Z" }, - { url = "https://files.pythonhosted.org/packages/f9/ac/22f5b4e55a4bc99f9622de280f7da366c1d7f29ec4eec9d339cb2ba62019/multidict-6.5.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b7bd4d655dc460c7aebb73b58ed1c074e85f7286105b012556cf0f25c6d1dba3", size = 254896, upload-time = "2025-06-24T22:14:32.865Z" }, - { url = "https://files.pythonhosted.org/packages/09/dc/2f6d96d4a80ec731579cb69532fac33cbbda2a838079ae0c47c6e8f5545b/multidict-6.5.1-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:aa6dcf25ced31cdce10f004506dbc26129f28a911b32ed10e54453a0842a6173", size = 236854, upload-time = "2025-06-24T22:14:34.185Z" }, - { url = "https://files.pythonhosted.org/packages/4a/cb/ef38a69ee75e8b72e5cff9ed4cff92379eadd057a99eaf4893494bf6ab64/multidict-6.5.1-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:059fb556c3e6ce1a168496f92ef139ad839a47f898eaa512b1d43e5e05d78c6b", size = 265131, upload-time = "2025-06-24T22:14:35.534Z" }, - { url = "https://files.pythonhosted.org/packages/c0/9e/85d9fe9e658e0edf566c02181248fa2aaf5e53134df0c80f7231ce5fc689/multidict-6.5.1-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f97680c839dd9fa208e9584b1c2a5f1224bd01d31961f7f7d94984408c4a6b9e", size = 262187, upload-time = "2025-06-24T22:14:36.891Z" }, - { url = "https://files.pythonhosted.org/packages/2b/1c/b46ec1dd78c3faa55bffb354410c48fadd81029a144cd056828c82ca15b4/multidict-6.5.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7710c716243525cc05cd038c6e09f1807ee0fef2510a6e484450712c389c8d7f", size = 251220, upload-time = "2025-06-24T22:14:38.584Z" }, - { url = "https://files.pythonhosted.org/packages/6b/6b/481ec5179ddc7da8b05077ebae2dd51da3df3ae3e5842020fbfa939167c1/multidict-6.5.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:83eb172b4856ffff2814bdcf9c7792c0439302faab1b31376817b067b26cd8f5", size = 249949, upload-time = "2025-06-24T22:14:40.033Z" }, - { url = "https://files.pythonhosted.org/packages/00/e3/642f63e12c1b8e6662c23626a98e9d764fe5a63c3a6cb59002f6fdcb920f/multidict-6.5.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:562d4714fa43f6ebc043a657535e4575e7d6141a818c9b3055f0868d29a1a41b", size = 244438, upload-time = "2025-06-24T22:14:41.464Z" }, - { url = "https://files.pythonhosted.org/packages/dc/cf/797397f6d38b011912504aef213a4be43ef4ec134859caa47f94d810bad8/multidict-6.5.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:2d7def2fc47695c46a427b8f298fb5ace03d635c1fb17f30d6192c9a8fb69e70", size = 259921, upload-time = "2025-06-24T22:14:43.248Z" }, - { url = "https://files.pythonhosted.org/packages/82/b2/ae914a2d84eba21e956fa3727060248ca23ed4a5bf1beb057df0d10f9de3/multidict-6.5.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:77bc8ab5c6bfe696eff564824e73a451fdeca22f3b960261750836cee02bcbfa", size = 252691, upload-time = "2025-06-24T22:14:45.57Z" }, - { url = "https://files.pythonhosted.org/packages/01/fa/1ab4d79a236b871cfd40d36a1f9942906c630bd2b7822287bd3927addb62/multidict-6.5.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9eec51891d3c210948ead894ec1483d48748abec08db5ce9af52cc13fef37aee", size = 246224, upload-time = "2025-06-24T22:14:47.316Z" }, - { url = "https://files.pythonhosted.org/packages/78/dd/bf002fe04e952db73cad8ce10a5b5347358d0d17221aef156e050aff690b/multidict-6.5.1-cp312-cp312-win32.whl", hash = "sha256:189f0c2bd1c0ae5509e453707d0e187e030c9e873a0116d1f32d1c870d0fc347", size = 41354, upload-time = "2025-06-24T22:14:48.567Z" }, - { url = "https://files.pythonhosted.org/packages/95/ce/508a8487d98fdc3e693755bc19c543a2af293f5ce96da398bd1974efb802/multidict-6.5.1-cp312-cp312-win_amd64.whl", hash = "sha256:e81f23b4b6f2a588f15d5cb554b2d8b482bb6044223d64b86bc7079cae9ebaad", size = 45072, upload-time = "2025-06-24T22:14:50.898Z" }, - { url = "https://files.pythonhosted.org/packages/ae/da/4782cf2f274d0d56fff6c07fc5cc5a14acf821dec08350c17d66d0207a05/multidict-6.5.1-cp312-cp312-win_arm64.whl", hash = "sha256:79d13e06d5241f9c8479dfeaf0f7cce8f453a4a302c9a0b1fa9b1a6869ff7757", size = 42149, upload-time = "2025-06-24T22:14:53.138Z" }, - { url = "https://files.pythonhosted.org/packages/19/3f/c2e07031111d2513d260157933a8697ad52a935d8a2a2b8b7b317ddd9a96/multidict-6.5.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:98011312f36d1e496f15454a95578d1212bc2ffc25650a8484752b06d304fd9b", size = 73588, upload-time = "2025-06-24T22:14:54.332Z" }, - { url = "https://files.pythonhosted.org/packages/95/bb/f47aa21827202a9f889fd66de9a1db33d0e4bbaaa2567156e4efb3cc0e5e/multidict-6.5.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:bae589fb902b47bd94e6f539b34eefe55a1736099f616f614ec1544a43f95b05", size = 43756, upload-time = "2025-06-24T22:14:55.748Z" }, - { url = "https://files.pythonhosted.org/packages/9f/ec/24549de092c9b0bc3167e0beb31a11be58e8595dbcfed2b7821795bb3923/multidict-6.5.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6eb3bf26cd94eb306e4bc776d0964cc67a7967e4ad9299309f0ff5beec3c62be", size = 42222, upload-time = "2025-06-24T22:14:57.418Z" }, - { url = "https://files.pythonhosted.org/packages/13/45/54452027ebc0ba660667aab67ae11afb9aaba91f4b5d63cddef045279d94/multidict-6.5.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e5e1a5a99c72d1531501406fcc06b6bf699ebd079dacd6807bb43fc0ff260e5c", size = 253014, upload-time = "2025-06-24T22:14:58.738Z" }, - { url = "https://files.pythonhosted.org/packages/97/3c/76e7b4c0ce3a8bb43efca679674fba421333fbc8429134072db80e13dcb8/multidict-6.5.1-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:38755bcba18720cb2338bea23a5afcff234445ee75fa11518f6130e22f2ab970", size = 235939, upload-time = "2025-06-24T22:15:00.138Z" }, - { url = "https://files.pythonhosted.org/packages/86/ce/48e3123a9af61ff2f60e3764b0b15cf4fca22b1299aac281252ac3a590d6/multidict-6.5.1-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f42fef9bcba3c32fd4e4a23c5757fc807d218b249573aaffa8634879f95feb73", size = 262940, upload-time = "2025-06-24T22:15:01.52Z" }, - { url = "https://files.pythonhosted.org/packages/b3/ab/bccd739faf87051b55df619a0967c8545b4d4a4b90258c5f564ab1752f15/multidict-6.5.1-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:071b962f4cc87469cda90c7cc1c077b76496878b39851d7417a3d994e27fe2c6", size = 260652, upload-time = "2025-06-24T22:15:02.988Z" }, - { url = "https://files.pythonhosted.org/packages/9a/9c/01f654aad28a5d0d74f2678c1541ae15e711f99603fd84c780078205966e/multidict-6.5.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:627ba4b7ce7c0115981f0fd91921f5d101dfb9972622178aeef84ccce1c2bbf3", size = 250011, upload-time = "2025-06-24T22:15:04.317Z" }, - { url = "https://files.pythonhosted.org/packages/5c/bc/edf08906e1db7385c6bf36e4179957307f50c44a889493e9b251255be79c/multidict-6.5.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:05dcaed3e5e54f0d0f99a39762b0195274b75016cbf246f600900305581cf1a2", size = 248242, upload-time = "2025-06-24T22:15:06.035Z" }, - { url = "https://files.pythonhosted.org/packages/b7/c3/1ad054b88b889fda8b62ea9634ac7082567e8dc42b9b794a2c565ef102ab/multidict-6.5.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:11f5ecf3e741a18c578d118ad257c5588ca33cc7c46d51c0487d7ae76f072c32", size = 244683, upload-time = "2025-06-24T22:15:07.731Z" }, - { url = "https://files.pythonhosted.org/packages/57/63/119a76b2095e1bb765816175cafeac7b520f564691abef2572fb80f4f246/multidict-6.5.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b948eb625411c20b15088fca862c51a39140b9cf7875b5fb47a72bb249fa2f42", size = 257626, upload-time = "2025-06-24T22:15:09.013Z" }, - { url = "https://files.pythonhosted.org/packages/26/a9/b91a76af5ff49bd088ee76d11eb6134227f5ea50bcd5f6738443b2fe8e05/multidict-6.5.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:fc993a96dfc8300befd03d03df46efdb1d8d5a46911b014e956a4443035f470d", size = 251077, upload-time = "2025-06-24T22:15:10.366Z" }, - { url = "https://files.pythonhosted.org/packages/2a/fe/b1dc57aaa4de9f5a27543e28bd1f8bff00a316888b7344b5d33258b14b0a/multidict-6.5.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ee2d333380f22d35a56c6461f4579cfe186e143cd0b010b9524ac027de2a34cd", size = 244715, upload-time = "2025-06-24T22:15:11.76Z" }, - { url = "https://files.pythonhosted.org/packages/51/55/47a82690f71d0141eea49a623bbcc00a4d28770efc7cba8ead75602c9b90/multidict-6.5.1-cp313-cp313-win32.whl", hash = "sha256:5891e3327e6a426ddd443c87339b967c84feb8c022dd425e0c025fa0fcd71e68", size = 41156, upload-time = "2025-06-24T22:15:13.139Z" }, - { url = "https://files.pythonhosted.org/packages/25/b3/43306e4d7d3a9898574d1dc156b9607540dad581b1d767c992030751b82d/multidict-6.5.1-cp313-cp313-win_amd64.whl", hash = "sha256:fcdaa72261bff25fad93e7cb9bd7112bd4bac209148e698e380426489d8ed8a9", size = 44933, upload-time = "2025-06-24T22:15:14.639Z" }, - { url = "https://files.pythonhosted.org/packages/30/e2/34cb83c8a4e01b28e2abf30dc90178aa63c9db042be22fa02472cb744b86/multidict-6.5.1-cp313-cp313-win_arm64.whl", hash = "sha256:84292145303f354a35558e601c665cdf87059d87b12777417e2e57ba3eb98903", size = 41967, upload-time = "2025-06-24T22:15:15.856Z" }, - { url = "https://files.pythonhosted.org/packages/64/08/17d2de9cf749ea9589ecfb7532ab4988e8b113b7624826dba6b7527a58f3/multidict-6.5.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:f8316e58db799a1972afbc46770dfaaf20b0847003ab80de6fcb9861194faa3f", size = 80513, upload-time = "2025-06-24T22:15:16.946Z" }, - { url = "https://files.pythonhosted.org/packages/3e/b9/c9392465a21f7dff164633348b4cf66eef55c4ee48bdcdc00f0a71792779/multidict-6.5.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:d3468f0db187aca59eb56e0aa9f7c8c5427bcb844ad1c86557b4886aeb4484d8", size = 46854, upload-time = "2025-06-24T22:15:18.116Z" }, - { url = "https://files.pythonhosted.org/packages/2e/24/d79cbed5d0573304bc907dff0e5ad8788a4de891eec832809812b319930e/multidict-6.5.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:228533a5f99f1248cd79f6470779c424d63bc3e10d47c82511c65cc294458445", size = 45724, upload-time = "2025-06-24T22:15:19.241Z" }, - { url = "https://files.pythonhosted.org/packages/ec/22/232be6c077183719c78131f0e3c3d7134eb2d839e6e50e1c1e69e5ef5965/multidict-6.5.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:527076fdf5854901b1246c589af9a8a18b4a308375acb0020b585f696a10c794", size = 251895, upload-time = "2025-06-24T22:15:20.564Z" }, - { url = "https://files.pythonhosted.org/packages/57/80/85985e1441864b946e79538355b7b47f36206bf6bbaa2fa6d74d8232f2ab/multidict-6.5.1-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:9a17a17bad5c22f43e6a6b285dd9c16b1e8f8428202cd9bc22adaac68d0bbfed", size = 229357, upload-time = "2025-06-24T22:15:21.949Z" }, - { url = "https://files.pythonhosted.org/packages/b1/14/0024d1428b05aedaeea211da232aa6b6ad5c556a8a38b0942df1e54e1fa5/multidict-6.5.1-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:efd1951edab4a6cb65108d411867811f2b283f4b972337fb4269e40142f7f6a6", size = 259262, upload-time = "2025-06-24T22:15:23.455Z" }, - { url = "https://files.pythonhosted.org/packages/b1/cc/3fe63d61ffc9a48d62f36249e228e330144d990ac01f61169b615a3be471/multidict-6.5.1-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:c07d5f38b39acb4f8f61a7aa4166d140ed628245ff0441630df15340532e3b3c", size = 257998, upload-time = "2025-06-24T22:15:24.907Z" }, - { url = "https://files.pythonhosted.org/packages/e8/e4/46b38b9a565ccc5d86f55787090670582d51ab0a0d37cfeaf4313b053f7b/multidict-6.5.1-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8a6605dc74cd333be279e1fcb568ea24f7bdf1cf09f83a77360ce4dd32d67f14", size = 247951, upload-time = "2025-06-24T22:15:26.274Z" }, - { url = "https://files.pythonhosted.org/packages/af/78/58a9bc0674401f1f26418cd58a5ebf35ce91ead76a22b578908acfe0f4e2/multidict-6.5.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:8d64e30ae9ba66ce303a567548a06d64455d97c5dff7052fe428d154274d7174", size = 246786, upload-time = "2025-06-24T22:15:27.695Z" }, - { url = "https://files.pythonhosted.org/packages/66/24/51142ccee295992e22881cccc54b291308423bbcc836fcf4d2edef1a88d0/multidict-6.5.1-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:2fb5dde79a7f6d98ac5e26a4c9de77ccd2c5224a7ce89aeac6d99df7bbe06464", size = 235030, upload-time = "2025-06-24T22:15:29.391Z" }, - { url = "https://files.pythonhosted.org/packages/4b/9a/a6f7b75460d3e35b16bf7745c9e3ebb3293324a4295e586563bf50d361f4/multidict-6.5.1-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:8a0d22e8b07cf620e9aeb1582340d00f0031e6a1f3e39d9c2dcbefa8691443b4", size = 253964, upload-time = "2025-06-24T22:15:31.689Z" }, - { url = "https://files.pythonhosted.org/packages/3d/f8/0b690674bf8f78604eb0a2b0a85d1380ff3003f270440d40def2a3de8cf4/multidict-6.5.1-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:0120ed5cff2082c7a0ed62a8f80f4f6ac266010c722381816462f279bfa19487", size = 247370, upload-time = "2025-06-24T22:15:33.114Z" }, - { url = "https://files.pythonhosted.org/packages/7f/7d/ca55049d1041c517f294c1755c786539cb7a8dc5033361f20ce3a3d817be/multidict-6.5.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:3dea06ba27401c4b54317aa04791182dc9295e7aa623732dd459071a0e0f65db", size = 242920, upload-time = "2025-06-24T22:15:34.669Z" }, - { url = "https://files.pythonhosted.org/packages/1e/65/f4afa14f0921751864bb3ef80267f15ecae423483e8da9bc5d3757632bfa/multidict-6.5.1-cp313-cp313t-win32.whl", hash = "sha256:93b21be44f3cfee3be68ed5cd8848a3c0420d76dbd12d74f7776bde6b29e5f33", size = 46968, upload-time = "2025-06-24T22:15:36.023Z" }, - { url = "https://files.pythonhosted.org/packages/00/0a/13d08be1ca1523df515fb4efd3cf10f153e62d533f55c53f543cd73041e8/multidict-6.5.1-cp313-cp313t-win_amd64.whl", hash = "sha256:c5c18f8646a520cc34d00f65f9f6f77782b8a8c59fd8de10713e0de7f470b5d0", size = 52353, upload-time = "2025-06-24T22:15:37.247Z" }, - { url = "https://files.pythonhosted.org/packages/4b/dd/84aaf725b236677597a9570d8c1c99af0ba03712149852347969e014d826/multidict-6.5.1-cp313-cp313t-win_arm64.whl", hash = "sha256:eb27128141474a1d545f0531b496c7c2f1c4beff50cb5a828f36eb62fef16c67", size = 44500, upload-time = "2025-06-24T22:15:38.445Z" }, - { url = "https://files.pythonhosted.org/packages/07/9f/d4719ce55a1d8bf6619e8bb92f1e2e7399026ea85ae0c324ec77ee06c050/multidict-6.5.1-py3-none-any.whl", hash = "sha256:895354f4a38f53a1df2cc3fa2223fa714cff2b079a9f018a76cad35e7f0f044c", size = 12185, upload-time = "2025-06-24T22:16:03.816Z" }, +version = "6.6.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/69/7f/0652e6ed47ab288e3756ea9c0df8b14950781184d4bd7883f4d87dd41245/multidict-6.6.4.tar.gz", hash = "sha256:d2d4e4787672911b48350df02ed3fa3fffdc2f2e8ca06dd6afdf34189b76a9dd", size = 101843, upload-time = "2025-08-11T12:08:48.217Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/05/f6/512ffd8fd8b37fb2680e5ac35d788f1d71bbaf37789d21a820bdc441e565/multidict-6.6.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0ffb87be160942d56d7b87b0fdf098e81ed565add09eaa1294268c7f3caac4c8", size = 76516, upload-time = "2025-08-11T12:06:53.393Z" }, + { url = "https://files.pythonhosted.org/packages/99/58/45c3e75deb8855c36bd66cc1658007589662ba584dbf423d01df478dd1c5/multidict-6.6.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d191de6cbab2aff5de6c5723101705fd044b3e4c7cfd587a1929b5028b9714b3", size = 45394, upload-time = "2025-08-11T12:06:54.555Z" }, + { url = "https://files.pythonhosted.org/packages/fd/ca/e8c4472a93a26e4507c0b8e1f0762c0d8a32de1328ef72fd704ef9cc5447/multidict-6.6.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:38a0956dd92d918ad5feff3db8fcb4a5eb7dba114da917e1a88475619781b57b", size = 43591, upload-time = "2025-08-11T12:06:55.672Z" }, + { url = "https://files.pythonhosted.org/packages/05/51/edf414f4df058574a7265034d04c935aa84a89e79ce90fcf4df211f47b16/multidict-6.6.4-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:6865f6d3b7900ae020b495d599fcf3765653bc927951c1abb959017f81ae8287", size = 237215, upload-time = "2025-08-11T12:06:57.213Z" }, + { url = "https://files.pythonhosted.org/packages/c8/45/8b3d6dbad8cf3252553cc41abea09ad527b33ce47a5e199072620b296902/multidict-6.6.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0a2088c126b6f72db6c9212ad827d0ba088c01d951cee25e758c450da732c138", size = 258299, upload-time = "2025-08-11T12:06:58.946Z" }, + { url = "https://files.pythonhosted.org/packages/3c/e8/8ca2e9a9f5a435fc6db40438a55730a4bf4956b554e487fa1b9ae920f825/multidict-6.6.4-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:0f37bed7319b848097085d7d48116f545985db988e2256b2e6f00563a3416ee6", size = 242357, upload-time = "2025-08-11T12:07:00.301Z" }, + { url = "https://files.pythonhosted.org/packages/0f/84/80c77c99df05a75c28490b2af8f7cba2a12621186e0a8b0865d8e745c104/multidict-6.6.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:01368e3c94032ba6ca0b78e7ccb099643466cf24f8dc8eefcfdc0571d56e58f9", size = 268369, upload-time = "2025-08-11T12:07:01.638Z" }, + { url = "https://files.pythonhosted.org/packages/0d/e9/920bfa46c27b05fb3e1ad85121fd49f441492dca2449c5bcfe42e4565d8a/multidict-6.6.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8fe323540c255db0bffee79ad7f048c909f2ab0edb87a597e1c17da6a54e493c", size = 269341, upload-time = "2025-08-11T12:07:02.943Z" }, + { url = "https://files.pythonhosted.org/packages/af/65/753a2d8b05daf496f4a9c367fe844e90a1b2cac78e2be2c844200d10cc4c/multidict-6.6.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b8eb3025f17b0a4c3cd08cda49acf312a19ad6e8a4edd9dbd591e6506d999402", size = 256100, upload-time = "2025-08-11T12:07:04.564Z" }, + { url = "https://files.pythonhosted.org/packages/09/54/655be13ae324212bf0bc15d665a4e34844f34c206f78801be42f7a0a8aaa/multidict-6.6.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bbc14f0365534d35a06970d6a83478b249752e922d662dc24d489af1aa0d1be7", size = 253584, upload-time = "2025-08-11T12:07:05.914Z" }, + { url = "https://files.pythonhosted.org/packages/5c/74/ab2039ecc05264b5cec73eb018ce417af3ebb384ae9c0e9ed42cb33f8151/multidict-6.6.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:75aa52fba2d96bf972e85451b99d8e19cc37ce26fd016f6d4aa60da9ab2b005f", size = 251018, upload-time = "2025-08-11T12:07:08.301Z" }, + { url = "https://files.pythonhosted.org/packages/af/0a/ccbb244ac848e56c6427f2392741c06302bbfba49c0042f1eb3c5b606497/multidict-6.6.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4fefd4a815e362d4f011919d97d7b4a1e566f1dde83dc4ad8cfb5b41de1df68d", size = 251477, upload-time = "2025-08-11T12:07:10.248Z" }, + { url = "https://files.pythonhosted.org/packages/0e/b0/0ed49bba775b135937f52fe13922bc64a7eaf0a3ead84a36e8e4e446e096/multidict-6.6.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:db9801fe021f59a5b375ab778973127ca0ac52429a26e2fd86aa9508f4d26eb7", size = 263575, upload-time = "2025-08-11T12:07:11.928Z" }, + { url = "https://files.pythonhosted.org/packages/3e/d9/7fb85a85e14de2e44dfb6a24f03c41e2af8697a6df83daddb0e9b7569f73/multidict-6.6.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:a650629970fa21ac1fb06ba25dabfc5b8a2054fcbf6ae97c758aa956b8dba802", size = 259649, upload-time = "2025-08-11T12:07:13.244Z" }, + { url = "https://files.pythonhosted.org/packages/03/9e/b3a459bcf9b6e74fa461a5222a10ff9b544cb1cd52fd482fb1b75ecda2a2/multidict-6.6.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:452ff5da78d4720d7516a3a2abd804957532dd69296cb77319c193e3ffb87e24", size = 251505, upload-time = "2025-08-11T12:07:14.57Z" }, + { url = "https://files.pythonhosted.org/packages/86/a2/8022f78f041dfe6d71e364001a5cf987c30edfc83c8a5fb7a3f0974cff39/multidict-6.6.4-cp312-cp312-win32.whl", hash = "sha256:8c2fcb12136530ed19572bbba61b407f655e3953ba669b96a35036a11a485793", size = 41888, upload-time = "2025-08-11T12:07:15.904Z" }, + { url = "https://files.pythonhosted.org/packages/c7/eb/d88b1780d43a56db2cba24289fa744a9d216c1a8546a0dc3956563fd53ea/multidict-6.6.4-cp312-cp312-win_amd64.whl", hash = "sha256:047d9425860a8c9544fed1b9584f0c8bcd31bcde9568b047c5e567a1025ecd6e", size = 46072, upload-time = "2025-08-11T12:07:17.045Z" }, + { url = "https://files.pythonhosted.org/packages/9f/16/b929320bf5750e2d9d4931835a4c638a19d2494a5b519caaaa7492ebe105/multidict-6.6.4-cp312-cp312-win_arm64.whl", hash = "sha256:14754eb72feaa1e8ae528468f24250dd997b8e2188c3d2f593f9eba259e4b364", size = 43222, upload-time = "2025-08-11T12:07:18.328Z" }, + { url = "https://files.pythonhosted.org/packages/3a/5d/e1db626f64f60008320aab00fbe4f23fc3300d75892a3381275b3d284580/multidict-6.6.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:f46a6e8597f9bd71b31cc708195d42b634c8527fecbcf93febf1052cacc1f16e", size = 75848, upload-time = "2025-08-11T12:07:19.912Z" }, + { url = "https://files.pythonhosted.org/packages/4c/aa/8b6f548d839b6c13887253af4e29c939af22a18591bfb5d0ee6f1931dae8/multidict-6.6.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:22e38b2bc176c5eb9c0a0e379f9d188ae4cd8b28c0f53b52bce7ab0a9e534657", size = 45060, upload-time = "2025-08-11T12:07:21.163Z" }, + { url = "https://files.pythonhosted.org/packages/eb/c6/f5e97e5d99a729bc2aa58eb3ebfa9f1e56a9b517cc38c60537c81834a73f/multidict-6.6.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5df8afd26f162da59e218ac0eefaa01b01b2e6cd606cffa46608f699539246da", size = 43269, upload-time = "2025-08-11T12:07:22.392Z" }, + { url = "https://files.pythonhosted.org/packages/dc/31/d54eb0c62516776f36fe67f84a732f97e0b0e12f98d5685bebcc6d396910/multidict-6.6.4-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:49517449b58d043023720aa58e62b2f74ce9b28f740a0b5d33971149553d72aa", size = 237158, upload-time = "2025-08-11T12:07:23.636Z" }, + { url = "https://files.pythonhosted.org/packages/c4/1c/8a10c1c25b23156e63b12165a929d8eb49a6ed769fdbefb06e6f07c1e50d/multidict-6.6.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ae9408439537c5afdca05edd128a63f56a62680f4b3c234301055d7a2000220f", size = 257076, upload-time = "2025-08-11T12:07:25.049Z" }, + { url = "https://files.pythonhosted.org/packages/ad/86/90e20b5771d6805a119e483fd3d1e8393e745a11511aebca41f0da38c3e2/multidict-6.6.4-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:87a32d20759dc52a9e850fe1061b6e41ab28e2998d44168a8a341b99ded1dba0", size = 240694, upload-time = "2025-08-11T12:07:26.458Z" }, + { url = "https://files.pythonhosted.org/packages/e7/49/484d3e6b535bc0555b52a0a26ba86e4d8d03fd5587d4936dc59ba7583221/multidict-6.6.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:52e3c8d43cdfff587ceedce9deb25e6ae77daba560b626e97a56ddcad3756879", size = 266350, upload-time = "2025-08-11T12:07:27.94Z" }, + { url = "https://files.pythonhosted.org/packages/bf/b4/aa4c5c379b11895083d50021e229e90c408d7d875471cb3abf721e4670d6/multidict-6.6.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:ad8850921d3a8d8ff6fbef790e773cecfc260bbfa0566998980d3fa8f520bc4a", size = 267250, upload-time = "2025-08-11T12:07:29.303Z" }, + { url = "https://files.pythonhosted.org/packages/80/e5/5e22c5bf96a64bdd43518b1834c6d95a4922cc2066b7d8e467dae9b6cee6/multidict-6.6.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:497a2954adc25c08daff36f795077f63ad33e13f19bfff7736e72c785391534f", size = 254900, upload-time = "2025-08-11T12:07:30.764Z" }, + { url = "https://files.pythonhosted.org/packages/17/38/58b27fed927c07035abc02befacab42491e7388ca105e087e6e0215ead64/multidict-6.6.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:024ce601f92d780ca1617ad4be5ac15b501cc2414970ffa2bb2bbc2bd5a68fa5", size = 252355, upload-time = "2025-08-11T12:07:32.205Z" }, + { url = "https://files.pythonhosted.org/packages/d0/a1/dad75d23a90c29c02b5d6f3d7c10ab36c3197613be5d07ec49c7791e186c/multidict-6.6.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:a693fc5ed9bdd1c9e898013e0da4dcc640de7963a371c0bd458e50e046bf6438", size = 250061, upload-time = "2025-08-11T12:07:33.623Z" }, + { url = "https://files.pythonhosted.org/packages/b8/1a/ac2216b61c7f116edab6dc3378cca6c70dc019c9a457ff0d754067c58b20/multidict-6.6.4-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:190766dac95aab54cae5b152a56520fd99298f32a1266d66d27fdd1b5ac00f4e", size = 249675, upload-time = "2025-08-11T12:07:34.958Z" }, + { url = "https://files.pythonhosted.org/packages/d4/79/1916af833b800d13883e452e8e0977c065c4ee3ab7a26941fbfdebc11895/multidict-6.6.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:34d8f2a5ffdceab9dcd97c7a016deb2308531d5f0fced2bb0c9e1df45b3363d7", size = 261247, upload-time = "2025-08-11T12:07:36.588Z" }, + { url = "https://files.pythonhosted.org/packages/c5/65/d1f84fe08ac44a5fc7391cbc20a7cedc433ea616b266284413fd86062f8c/multidict-6.6.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:59e8d40ab1f5a8597abcef00d04845155a5693b5da00d2c93dbe88f2050f2812", size = 257960, upload-time = "2025-08-11T12:07:39.735Z" }, + { url = "https://files.pythonhosted.org/packages/13/b5/29ec78057d377b195ac2c5248c773703a6b602e132a763e20ec0457e7440/multidict-6.6.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:467fe64138cfac771f0e949b938c2e1ada2b5af22f39692aa9258715e9ea613a", size = 250078, upload-time = "2025-08-11T12:07:41.525Z" }, + { url = "https://files.pythonhosted.org/packages/c4/0e/7e79d38f70a872cae32e29b0d77024bef7834b0afb406ddae6558d9e2414/multidict-6.6.4-cp313-cp313-win32.whl", hash = "sha256:14616a30fe6d0a48d0a48d1a633ab3b8bec4cf293aac65f32ed116f620adfd69", size = 41708, upload-time = "2025-08-11T12:07:43.405Z" }, + { url = "https://files.pythonhosted.org/packages/9d/34/746696dffff742e97cd6a23da953e55d0ea51fa601fa2ff387b3edcfaa2c/multidict-6.6.4-cp313-cp313-win_amd64.whl", hash = "sha256:40cd05eaeb39e2bc8939451f033e57feaa2ac99e07dbca8afe2be450a4a3b6cf", size = 45912, upload-time = "2025-08-11T12:07:45.082Z" }, + { url = "https://files.pythonhosted.org/packages/c7/87/3bac136181e271e29170d8d71929cdeddeb77f3e8b6a0c08da3a8e9da114/multidict-6.6.4-cp313-cp313-win_arm64.whl", hash = "sha256:f6eb37d511bfae9e13e82cb4d1af36b91150466f24d9b2b8a9785816deb16605", size = 43076, upload-time = "2025-08-11T12:07:46.746Z" }, + { url = "https://files.pythonhosted.org/packages/64/94/0a8e63e36c049b571c9ae41ee301ada29c3fee9643d9c2548d7d558a1d99/multidict-6.6.4-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:6c84378acd4f37d1b507dfa0d459b449e2321b3ba5f2338f9b085cf7a7ba95eb", size = 82812, upload-time = "2025-08-11T12:07:48.402Z" }, + { url = "https://files.pythonhosted.org/packages/25/1a/be8e369dfcd260d2070a67e65dd3990dd635cbd735b98da31e00ea84cd4e/multidict-6.6.4-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0e0558693063c75f3d952abf645c78f3c5dfdd825a41d8c4d8156fc0b0da6e7e", size = 48313, upload-time = "2025-08-11T12:07:49.679Z" }, + { url = "https://files.pythonhosted.org/packages/26/5a/dd4ade298674b2f9a7b06a32c94ffbc0497354df8285f27317c66433ce3b/multidict-6.6.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3f8e2384cb83ebd23fd07e9eada8ba64afc4c759cd94817433ab8c81ee4b403f", size = 46777, upload-time = "2025-08-11T12:07:51.318Z" }, + { url = "https://files.pythonhosted.org/packages/89/db/98aa28bc7e071bfba611ac2ae803c24e96dd3a452b4118c587d3d872c64c/multidict-6.6.4-cp313-cp313t-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:f996b87b420995a9174b2a7c1a8daf7db4750be6848b03eb5e639674f7963773", size = 229321, upload-time = "2025-08-11T12:07:52.965Z" }, + { url = "https://files.pythonhosted.org/packages/c7/bc/01ddda2a73dd9d167bd85d0e8ef4293836a8f82b786c63fb1a429bc3e678/multidict-6.6.4-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cc356250cffd6e78416cf5b40dc6a74f1edf3be8e834cf8862d9ed5265cf9b0e", size = 249954, upload-time = "2025-08-11T12:07:54.423Z" }, + { url = "https://files.pythonhosted.org/packages/06/78/6b7c0f020f9aa0acf66d0ab4eb9f08375bac9a50ff5e3edb1c4ccd59eafc/multidict-6.6.4-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:dadf95aa862714ea468a49ad1e09fe00fcc9ec67d122f6596a8d40caf6cec7d0", size = 228612, upload-time = "2025-08-11T12:07:55.914Z" }, + { url = "https://files.pythonhosted.org/packages/00/44/3faa416f89b2d5d76e9d447296a81521e1c832ad6e40b92f990697b43192/multidict-6.6.4-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7dd57515bebffd8ebd714d101d4c434063322e4fe24042e90ced41f18b6d3395", size = 257528, upload-time = "2025-08-11T12:07:57.371Z" }, + { url = "https://files.pythonhosted.org/packages/05/5f/77c03b89af0fcb16f018f668207768191fb9dcfb5e3361a5e706a11db2c9/multidict-6.6.4-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:967af5f238ebc2eb1da4e77af5492219fbd9b4b812347da39a7b5f5c72c0fa45", size = 256329, upload-time = "2025-08-11T12:07:58.844Z" }, + { url = "https://files.pythonhosted.org/packages/cf/e9/ed750a2a9afb4f8dc6f13dc5b67b514832101b95714f1211cd42e0aafc26/multidict-6.6.4-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2a4c6875c37aae9794308ec43e3530e4aa0d36579ce38d89979bbf89582002bb", size = 247928, upload-time = "2025-08-11T12:08:01.037Z" }, + { url = "https://files.pythonhosted.org/packages/1f/b5/e0571bc13cda277db7e6e8a532791d4403dacc9850006cb66d2556e649c0/multidict-6.6.4-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:7f683a551e92bdb7fac545b9c6f9fa2aebdeefa61d607510b3533286fcab67f5", size = 245228, upload-time = "2025-08-11T12:08:02.96Z" }, + { url = "https://files.pythonhosted.org/packages/f3/a3/69a84b0eccb9824491f06368f5b86e72e4af54c3067c37c39099b6687109/multidict-6.6.4-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:3ba5aaf600edaf2a868a391779f7a85d93bed147854925f34edd24cc70a3e141", size = 235869, upload-time = "2025-08-11T12:08:04.746Z" }, + { url = "https://files.pythonhosted.org/packages/a9/9d/28802e8f9121a6a0804fa009debf4e753d0a59969ea9f70be5f5fdfcb18f/multidict-6.6.4-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:580b643b7fd2c295d83cad90d78419081f53fd532d1f1eb67ceb7060f61cff0d", size = 243446, upload-time = "2025-08-11T12:08:06.332Z" }, + { url = "https://files.pythonhosted.org/packages/38/ea/6c98add069b4878c1d66428a5f5149ddb6d32b1f9836a826ac764b9940be/multidict-6.6.4-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:37b7187197da6af3ee0b044dbc9625afd0c885f2800815b228a0e70f9a7f473d", size = 252299, upload-time = "2025-08-11T12:08:07.931Z" }, + { url = "https://files.pythonhosted.org/packages/3a/09/8fe02d204473e14c0af3affd50af9078839dfca1742f025cca765435d6b4/multidict-6.6.4-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:e1b93790ed0bc26feb72e2f08299691ceb6da5e9e14a0d13cc74f1869af327a0", size = 246926, upload-time = "2025-08-11T12:08:09.467Z" }, + { url = "https://files.pythonhosted.org/packages/37/3d/7b1e10d774a6df5175ecd3c92bff069e77bed9ec2a927fdd4ff5fe182f67/multidict-6.6.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:a506a77ddee1efcca81ecbeae27ade3e09cdf21a8ae854d766c2bb4f14053f92", size = 243383, upload-time = "2025-08-11T12:08:10.981Z" }, + { url = "https://files.pythonhosted.org/packages/50/b0/a6fae46071b645ae98786ab738447de1ef53742eaad949f27e960864bb49/multidict-6.6.4-cp313-cp313t-win32.whl", hash = "sha256:f93b2b2279883d1d0a9e1bd01f312d6fc315c5e4c1f09e112e4736e2f650bc4e", size = 47775, upload-time = "2025-08-11T12:08:12.439Z" }, + { url = "https://files.pythonhosted.org/packages/b2/0a/2436550b1520091af0600dff547913cb2d66fbac27a8c33bc1b1bccd8d98/multidict-6.6.4-cp313-cp313t-win_amd64.whl", hash = "sha256:6d46a180acdf6e87cc41dc15d8f5c2986e1e8739dc25dbb7dac826731ef381a4", size = 53100, upload-time = "2025-08-11T12:08:13.823Z" }, + { url = "https://files.pythonhosted.org/packages/97/ea/43ac51faff934086db9c072a94d327d71b7d8b40cd5dcb47311330929ef0/multidict-6.6.4-cp313-cp313t-win_arm64.whl", hash = "sha256:756989334015e3335d087a27331659820d53ba432befdef6a718398b0a8493ad", size = 45501, upload-time = "2025-08-11T12:08:15.173Z" }, + { url = "https://files.pythonhosted.org/packages/fd/69/b547032297c7e63ba2af494edba695d781af8a0c6e89e4d06cf848b21d80/multidict-6.6.4-py3-none-any.whl", hash = "sha256:27d8f8e125c07cb954e54d75d04905a9bba8a439c1d84aca94949d4d03d8601c", size = 12313, upload-time = "2025-08-11T12:08:46.891Z" }, ] [[package]] @@ -2501,11 +2889,118 @@ wheels = [ [[package]] name = "narwhals" -version = "1.44.0" +version = "2.1.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/56/e5/0b875d29e2a4d112c58fef6aac2ed3a73bbdd4d8d0dce722fd154357248a/narwhals-1.44.0.tar.gz", hash = "sha256:8cf0616d4f6f21225b3b56fcde96ccab6d05023561a0f162402aa9b8c33ad31d", size = 499250, upload-time = "2025-06-23T08:28:08.653Z" } +sdist = { url = "https://files.pythonhosted.org/packages/37/f0/b0550d9b84759f4d045fd43da2f811e8b23dc2001e38c3254456da7f3adb/narwhals-2.1.2.tar.gz", hash = "sha256:afb9597e76d5b38c2c4b7c37d27a2418b8cc8049a66b8a5aca9581c92ae8f8bf", size = 533772, upload-time = "2025-08-15T08:24:50.916Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ff/fb/12f4a971467aac3cb7cbccbbfca5d0f05e23722068112c1ac4a393613ebe/narwhals-1.44.0-py3-none-any.whl", hash = "sha256:a170ea0bab4cf1f323d9f8bf17f2d7042c3d73802bea321996b39bf075d57de5", size = 365240, upload-time = "2025-06-23T08:28:06.314Z" }, + { url = "https://files.pythonhosted.org/packages/a8/01/824fff6789ce92a53242d24b6f5f3a982df2f610c51020f934bf878d2a99/narwhals-2.1.2-py3-none-any.whl", hash = "sha256:136b2f533a4eb3245c54254f137c5d14cef5c4668cff67dc6e911a602acd3547", size = 392064, upload-time = "2025-08-15T08:24:48.788Z" }, +] + +[[package]] +name = "nemo-automodel" +source = { editable = "3rdparty/Automodel-workspace/Automodel" } +dependencies = [ + { name = "bitsandbytes", marker = "platform_machine == 'x86_64' and sys_platform != 'darwin'" }, + { name = "datasets" }, + { name = "liger-kernel", marker = "platform_machine == 'x86_64' and sys_platform != 'darwin'" }, + { name = "pyyaml" }, + { name = "torch" }, + { name = "torchao" }, + { name = "torchdata" }, + { name = "transformers" }, + { name = "wandb" }, +] + +[package.optional-dependencies] +fa = [ + { name = "flash-attn" }, +] +vlm = [ + { name = "backoff" }, + { name = "mistral-common", extra = ["opencv"] }, + { name = "numba" }, + { name = "numpy" }, + { name = "pillow" }, + { name = "qwen-vl-utils", extra = ["decord"] }, + { name = "timm" }, + { name = "torchcodec" }, + { name = "transformers" }, +] + +[package.dev-dependencies] +build = [ + { name = "setuptools" }, + { name = "torch" }, +] +dev = [ + { name = "cut-cross-entropy" }, +] +docs = [ + { name = "myst-parser" }, + { name = "nvidia-sphinx-theme" }, + { name = "sphinx" }, + { name = "sphinx-autobuild" }, + { name = "sphinx-autodoc2" }, + { name = "sphinx-copybutton" }, +] +linting = [ + { name = "import-linter" }, + { name = "pre-commit" }, + { name = "ruff" }, +] +test = [ + { name = "coverage" }, + { name = "peft" }, + { name = "pytest" }, +] + +[package.metadata] +requires-dist = [ + { name = "backoff", marker = "extra == 'vlm'" }, + { name = "bitsandbytes", marker = "platform_machine == 'x86_64' and sys_platform != 'darwin'", specifier = "==0.45.5" }, + { name = "datasets", specifier = ">=4.0.0" }, + { name = "flash-attn", marker = "extra == 'fa'" }, + { name = "liger-kernel", marker = "platform_machine == 'x86_64' and sys_platform != 'darwin'", specifier = "==0.5.8" }, + { name = "mistral-common", extras = ["opencv"], marker = "extra == 'vlm'" }, + { name = "numba", marker = "extra == 'vlm'" }, + { name = "numpy", marker = "extra == 'vlm'" }, + { name = "pillow", marker = "extra == 'vlm'" }, + { name = "pyyaml" }, + { name = "qwen-vl-utils", extras = ["decord"], marker = "extra == 'vlm'" }, + { name = "timm", marker = "extra == 'vlm'", specifier = "==1.0.16" }, + { name = "torch", index = "https://download.pytorch.org/whl/cu128" }, + { name = "torchao" }, + { name = "torchcodec", marker = "extra == 'vlm'" }, + { name = "torchdata" }, + { name = "transformers" }, + { name = "transformers", marker = "extra == 'vlm'", specifier = ">=4.51.0,<4.54.0" }, + { name = "wandb" }, +] +provides-extras = ["vlm", "fa"] + +[package.metadata.requires-dev] +build = [ + { name = "setuptools" }, + { name = "torch", index = "https://download.pytorch.org/whl/cu128" }, +] +dev = [{ name = "cut-cross-entropy", git = "https://github.com/apple/ml-cross-entropy.git?rev=87a86ab" }] +docs = [ + { name = "myst-parser" }, + { name = "nvidia-sphinx-theme" }, + { name = "sphinx" }, + { name = "sphinx-autobuild" }, + { name = "sphinx-autodoc2" }, + { name = "sphinx-copybutton" }, +] +linting = [ + { name = "import-linter", specifier = "~=2.4" }, + { name = "pre-commit", specifier = ">=4.2.0" }, + { name = "ruff", specifier = "~=0.9.0" }, +] +test = [ + { name = "coverage" }, + { name = "peft" }, + { name = "pytest" }, ] [[package]] @@ -2522,36 +3017,50 @@ dependencies = [ { name = "matplotlib" }, { name = "mlflow" }, { name = "ninja" }, + { name = "num2words" }, { name = "numpy" }, { name = "nvidia-ml-py" }, { name = "nvtx" }, { name = "omegaconf" }, + { name = "pillow" }, { name = "plotly" }, { name = "ray", extra = ["default"] }, { name = "rich" }, { name = "setuptools" }, + { name = "sympy" }, { name = "tensorboard" }, { name = "tiktoken" }, { name = "torch" }, { name = "torchdata" }, + { name = "torchvision", version = "0.22.1", source = { registry = "https://download.pytorch.org/whl/cu128" }, marker = "platform_machine == 'aarch64' and sys_platform == 'linux'" }, + { name = "torchvision", version = "0.22.1+cu128", source = { registry = "https://download.pytorch.org/whl/cu128" }, marker = "platform_machine != 'aarch64' or sys_platform != 'linux'" }, { name = "transformers" }, - { name = "triton", version = "3.3.0", source = { registry = "https://download.pytorch.org/whl/cu128" }, marker = "sys_platform != 'linux'" }, - { name = "triton", version = "3.3.1", source = { registry = "https://download.pytorch.org/whl/cu128" }, marker = "sys_platform == 'linux'" }, + { name = "triton" }, { name = "wandb" }, ] [package.optional-dependencies] automodel = [ + { name = "causal-conv1d" }, { name = "flash-attn" }, + { name = "mamba-ssm" }, + { name = "nemo-automodel" }, + { name = "vllm" }, ] mcore = [ { name = "flash-attn" }, { name = "megatron-core" }, { name = "nemo-tron" }, { name = "transformer-engine", extra = ["pytorch"] }, + { name = "vllm" }, ] vllm = [ + { name = "causal-conv1d" }, + { name = "cuda-python" }, + { name = "deep-gemm" }, { name = "flash-attn" }, + { name = "mamba-ssm" }, + { name = "num2words" }, { name = "vllm" }, ] @@ -2591,34 +3100,48 @@ test = [ requires-dist = [ { name = "accelerate", specifier = ">=0.26" }, { name = "blobfile" }, + { name = "causal-conv1d", marker = "extra == 'automodel'", git = "https://github.com/Dao-AILab/causal-conv1d?tag=v1.5.0.post8" }, + { name = "causal-conv1d", marker = "extra == 'vllm'", git = "https://github.com/Dao-AILab/causal-conv1d?tag=v1.5.0.post8" }, { name = "colored", specifier = "==2.2.3" }, + { name = "cuda-python", marker = "extra == 'vllm'" }, { name = "datasets", specifier = ">=4.0.0" }, { name = "debugpy" }, + { name = "deep-gemm", marker = "extra == 'vllm'", git = "https://github.com/deepseek-ai/DeepGEMM.git?rev=7b6b5563b9d4c1ae07ffbce7f78ad3ac9204827c" }, { name = "flash-attn", marker = "extra == 'automodel'", specifier = "==2.7.4.post1" }, { name = "flash-attn", marker = "extra == 'mcore'", specifier = "==2.7.4.post1" }, { name = "flash-attn", marker = "extra == 'vllm'", specifier = "==2.7.4.post1" }, { name = "hydra-core" }, + { name = "mamba-ssm", marker = "extra == 'automodel'", git = "https://github.com/state-spaces/mamba.git?rev=2e16fc3062cdcd4ebef27a9aa4442676e1c7edf4" }, + { name = "mamba-ssm", marker = "extra == 'vllm'", git = "https://github.com/state-spaces/mamba.git?rev=2e16fc3062cdcd4ebef27a9aa4442676e1c7edf4" }, { name = "math-verify" }, { name = "matplotlib" }, { name = "megatron-core", marker = "extra == 'mcore'", editable = "3rdparty/Megatron-LM-workspace" }, { name = "mlflow" }, + { name = "nemo-automodel", marker = "extra == 'automodel'", editable = "3rdparty/Automodel-workspace/Automodel" }, { name = "nemo-tron", marker = "extra == 'mcore'", editable = "3rdparty/NeMo-workspace" }, { name = "ninja" }, + { name = "num2words", specifier = ">=0.5.14" }, + { name = "num2words", marker = "extra == 'vllm'", specifier = ">=0.5.14" }, { name = "numpy" }, { name = "nvidia-ml-py" }, { name = "nvtx" }, { name = "omegaconf" }, + { name = "pillow", specifier = ">=11.3.0" }, { name = "plotly" }, { name = "ray", extras = ["default"], specifier = "==2.46.0" }, { name = "rich" }, { name = "setuptools" }, + { name = "sympy", specifier = ">=1.14.0" }, { name = "tensorboard" }, { name = "tiktoken" }, { name = "torch", specifier = "==2.7.1", index = "https://download.pytorch.org/whl/cu128" }, { name = "torchdata" }, + { name = "torchvision", specifier = ">=0.22.0", index = "https://download.pytorch.org/whl/cu128" }, { name = "transformer-engine", extras = ["pytorch"], marker = "extra == 'mcore'", specifier = "==2.3.0" }, { name = "transformers", specifier = ">=4.51.0,<4.54.0" }, { name = "triton", index = "https://download.pytorch.org/whl/cu128" }, + { name = "vllm", marker = "extra == 'automodel'", specifier = "==0.10.0" }, + { name = "vllm", marker = "extra == 'mcore'", specifier = "==0.10.0" }, { name = "vllm", marker = "extra == 'vllm'", specifier = "==0.10.0" }, { name = "wandb" }, ] @@ -2635,7 +3158,7 @@ build = [ { name = "torch", specifier = "==2.7.1", index = "https://download.pytorch.org/whl/cu128" }, ] dev = [ - { name = "pre-commit", specifier = "==3.6.0" }, + { name = "pre-commit", specifier = ">=4.2.0" }, { name = "pyrefly", specifier = "==0.24.2" }, { name = "ruff", specifier = "==0.9.9" }, { name = "types-pyyaml" }, @@ -2723,26 +3246,28 @@ wheels = [ [[package]] name = "ninja" -version = "1.11.1.4" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/95/d4/6b0324541018561c5e73e617bd16f20a4fc17d1179bb3b3520b6ca8beb7b/ninja-1.11.1.4.tar.gz", hash = "sha256:6aa39f6e894e0452e5b297327db00019383ae55d5d9c57c73b04f13bf79d438a", size = 201256, upload-time = "2025-03-22T06:46:43.46Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/4f/b1/3a61b348936b62a386465b1937cd778fa3a5748582e26d832dbab844ff27/ninja-1.11.1.4-py3-none-macosx_10_9_universal2.whl", hash = "sha256:b33923c8da88e8da20b6053e38deb433f53656441614207e01d283ad02c5e8e7", size = 279071, upload-time = "2025-03-22T06:46:17.806Z" }, - { url = "https://files.pythonhosted.org/packages/12/42/4c94fdad51fcf1f039a156e97de9e4d564c2a8cc0303782d36f9bd893a4b/ninja-1.11.1.4-py3-none-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:cede0af00b58e27b31f2482ba83292a8e9171cdb9acc2c867a3b6e40b3353e43", size = 472026, upload-time = "2025-03-22T06:46:19.974Z" }, - { url = "https://files.pythonhosted.org/packages/eb/7a/455d2877fe6cf99886849c7f9755d897df32eaf3a0fba47b56e615f880f7/ninja-1.11.1.4-py3-none-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:096487995473320de7f65d622c3f1d16c3ad174797602218ca8c967f51ec38a0", size = 422814, upload-time = "2025-03-22T06:46:21.235Z" }, - { url = "https://files.pythonhosted.org/packages/e3/ad/fb6cca942528e25e8e0ab0f0cf98fe007319bf05cf69d726c564b815c4af/ninja-1.11.1.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3090d4488fadf6047d0d7a1db0c9643a8d391f0d94729554dbb89b5bdc769d7", size = 156965, upload-time = "2025-03-22T06:46:23.45Z" }, - { url = "https://files.pythonhosted.org/packages/a8/e7/d94a1b60031b115dd88526834b3da69eaacdc3c1a6769773ca8e2b1386b5/ninja-1.11.1.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ecce44a00325a93631792974659cf253a815cc6da4ec96f89742925dfc295a0d", size = 179937, upload-time = "2025-03-22T06:46:24.728Z" }, - { url = "https://files.pythonhosted.org/packages/08/cc/e9316a28235409e9363794fc3d0b3083e48dd80d441006de66421e55f364/ninja-1.11.1.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c29bb66d2aa46a2409ab369ea804c730faec7652e8c22c1e428cc09216543e5", size = 157020, upload-time = "2025-03-22T06:46:26.046Z" }, - { url = "https://files.pythonhosted.org/packages/e3/30/389b22300541aa5f2e9dad322c4de2f84be4e32aa4e8babd9160d620b5f1/ninja-1.11.1.4-py3-none-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:055f386fb550c2c9d6157e45e20a84d29c47968876b9c5794ae2aec46f952306", size = 130389, upload-time = "2025-03-22T06:46:27.174Z" }, - { url = "https://files.pythonhosted.org/packages/a9/10/e27f35cb92813aabbb7ae771b1685b45be1cc8a0798ce7d4bfd08d142b93/ninja-1.11.1.4-py3-none-musllinux_1_1_aarch64.whl", hash = "sha256:f6186d7607bb090c3be1e10c8a56b690be238f953616626f5032238c66e56867", size = 372435, upload-time = "2025-03-22T06:46:28.637Z" }, - { url = "https://files.pythonhosted.org/packages/c2/26/e3559619756739aae124c6abf7fe41f7e546ab1209cfbffb13137bff2d2e/ninja-1.11.1.4-py3-none-musllinux_1_1_i686.whl", hash = "sha256:cf4453679d15babc04ba023d68d091bb613091b67101c88f85d2171c6621c6eb", size = 419300, upload-time = "2025-03-22T06:46:30.392Z" }, - { url = "https://files.pythonhosted.org/packages/35/46/809e4e9572570991b8e6f88f3583807d017371ab4cb09171cbc72a7eb3e4/ninja-1.11.1.4-py3-none-musllinux_1_1_ppc64le.whl", hash = "sha256:d4a6f159b08b0ac4aca5ee1572e3e402f969139e71d85d37c0e2872129098749", size = 420239, upload-time = "2025-03-22T06:46:32.442Z" }, - { url = "https://files.pythonhosted.org/packages/e6/64/5cb5710d15f844edf02ada577f8eddfdcd116f47eec15850f3371a3a4b33/ninja-1.11.1.4-py3-none-musllinux_1_1_s390x.whl", hash = "sha256:c3b96bd875f3ef1db782470e9e41d7508905a0986571f219d20ffed238befa15", size = 415986, upload-time = "2025-03-22T06:46:33.821Z" }, - { url = "https://files.pythonhosted.org/packages/95/b2/0e9ab1d926f423b12b09925f78afcc5e48b3c22e7121be3ddf6c35bf06a3/ninja-1.11.1.4-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:cf554e73f72c04deb04d0cf51f5fdb1903d9c9ca3d2344249c8ce3bd616ebc02", size = 379657, upload-time = "2025-03-22T06:46:36.166Z" }, - { url = "https://files.pythonhosted.org/packages/c8/3e/fd6d330d0434168e7fe070d414b57dd99c4c133faa69c05b42a3cbdc6c13/ninja-1.11.1.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:cfdd09776436a1ff3c4a2558d3fc50a689fb9d7f1bdbc3e6f7b8c2991341ddb3", size = 454466, upload-time = "2025-03-22T06:46:37.413Z" }, - { url = "https://files.pythonhosted.org/packages/e6/df/a25f3ad0b1c59d1b90564096e4fd89a6ca30d562b1e942f23880c3000b89/ninja-1.11.1.4-py3-none-win32.whl", hash = "sha256:2ab67a41c90bea5ec4b795bab084bc0b3b3bb69d3cd21ca0294fc0fc15a111eb", size = 255931, upload-time = "2025-03-22T06:46:39.171Z" }, - { url = "https://files.pythonhosted.org/packages/5b/10/9b8fe9ac004847490cc7b54896124c01ce2d87d95dc60aabd0b8591addff/ninja-1.11.1.4-py3-none-win_amd64.whl", hash = "sha256:4617b3c12ff64b611a7d93fd9e378275512bb36eff8babff7c83f5116b4f8d66", size = 296461, upload-time = "2025-03-22T06:46:40.532Z" }, - { url = "https://files.pythonhosted.org/packages/b9/58/612a17593c2d117f96c7f6b7f1e6570246bddc4b1e808519403a1417f217/ninja-1.11.1.4-py3-none-win_arm64.whl", hash = "sha256:5713cf50c5be50084a8693308a63ecf9e55c3132a78a41ab1363a28b6caaaee1", size = 271441, upload-time = "2025-03-22T06:46:42.147Z" }, +version = "1.13.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/73/79a0b22fc731989c708068427579e840a6cf4e937fe7ae5c5d0b7356ac22/ninja-1.13.0.tar.gz", hash = "sha256:4a40ce995ded54d9dc24f8ea37ff3bf62ad192b547f6c7126e7e25045e76f978", size = 242558, upload-time = "2025-08-11T15:10:19.421Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3c/74/d02409ed2aa865e051b7edda22ad416a39d81a84980f544f8de717cab133/ninja-1.13.0-py3-none-macosx_10_9_universal2.whl", hash = "sha256:fa2a8bfc62e31b08f83127d1613d10821775a0eb334197154c4d6067b7068ff1", size = 310125, upload-time = "2025-08-11T15:09:50.971Z" }, + { url = "https://files.pythonhosted.org/packages/8e/de/6e1cd6b84b412ac1ef327b76f0641aeb5dcc01e9d3f9eee0286d0c34fd93/ninja-1.13.0-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3d00c692fb717fd511abeb44b8c5d00340c36938c12d6538ba989fe764e79630", size = 177467, upload-time = "2025-08-11T15:09:52.767Z" }, + { url = "https://files.pythonhosted.org/packages/c8/83/49320fb6e58ae3c079381e333575fdbcf1cca3506ee160a2dcce775046fa/ninja-1.13.0-py3-none-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:be7f478ff9f96a128b599a964fc60a6a87b9fa332ee1bd44fa243ac88d50291c", size = 187834, upload-time = "2025-08-11T15:09:54.115Z" }, + { url = "https://files.pythonhosted.org/packages/56/c7/ba22748fb59f7f896b609cd3e568d28a0a367a6d953c24c461fe04fc4433/ninja-1.13.0-py3-none-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:60056592cf495e9a6a4bea3cd178903056ecb0943e4de45a2ea825edb6dc8d3e", size = 202736, upload-time = "2025-08-11T15:09:55.745Z" }, + { url = "https://files.pythonhosted.org/packages/79/22/d1de07632b78ac8e6b785f41fa9aad7a978ec8c0a1bf15772def36d77aac/ninja-1.13.0-py3-none-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:1c97223cdda0417f414bf864cfb73b72d8777e57ebb279c5f6de368de0062988", size = 179034, upload-time = "2025-08-11T15:09:57.394Z" }, + { url = "https://files.pythonhosted.org/packages/ed/de/0e6edf44d6a04dabd0318a519125ed0415ce437ad5a1ec9b9be03d9048cf/ninja-1.13.0-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fb46acf6b93b8dd0322adc3a4945452a4e774b75b91293bafcc7b7f8e6517dfa", size = 180716, upload-time = "2025-08-11T15:09:58.696Z" }, + { url = "https://files.pythonhosted.org/packages/54/28/938b562f9057aaa4d6bfbeaa05e81899a47aebb3ba6751e36c027a7f5ff7/ninja-1.13.0-py3-none-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:4be9c1b082d244b1ad7ef41eb8ab088aae8c109a9f3f0b3e56a252d3e00f42c1", size = 146843, upload-time = "2025-08-11T15:10:00.046Z" }, + { url = "https://files.pythonhosted.org/packages/2a/fb/d06a3838de4f8ab866e44ee52a797b5491df823901c54943b2adb0389fbb/ninja-1.13.0-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:6739d3352073341ad284246f81339a384eec091d9851a886dfa5b00a6d48b3e2", size = 154402, upload-time = "2025-08-11T15:10:01.657Z" }, + { url = "https://files.pythonhosted.org/packages/31/bf/0d7808af695ceddc763cf251b84a9892cd7f51622dc8b4c89d5012779f06/ninja-1.13.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:11be2d22027bde06f14c343f01d31446747dbb51e72d00decca2eb99be911e2f", size = 552388, upload-time = "2025-08-11T15:10:03.349Z" }, + { url = "https://files.pythonhosted.org/packages/9d/70/c99d0c2c809f992752453cce312848abb3b1607e56d4cd1b6cded317351a/ninja-1.13.0-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:aa45b4037b313c2f698bc13306239b8b93b4680eb47e287773156ac9e9304714", size = 472501, upload-time = "2025-08-11T15:10:04.735Z" }, + { url = "https://files.pythonhosted.org/packages/9f/43/c217b1153f0e499652f5e0766da8523ce3480f0a951039c7af115e224d55/ninja-1.13.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:5f8e1e8a1a30835eeb51db05cf5a67151ad37542f5a4af2a438e9490915e5b72", size = 638280, upload-time = "2025-08-11T15:10:06.512Z" }, + { url = "https://files.pythonhosted.org/packages/8c/45/9151bba2c8d0ae2b6260f71696330590de5850e5574b7b5694dce6023e20/ninja-1.13.0-py3-none-musllinux_1_2_ppc64le.whl", hash = "sha256:3d7d7779d12cb20c6d054c61b702139fd23a7a964ec8f2c823f1ab1b084150db", size = 642420, upload-time = "2025-08-11T15:10:08.35Z" }, + { url = "https://files.pythonhosted.org/packages/3c/fb/95752eb635bb8ad27d101d71bef15bc63049de23f299e312878fc21cb2da/ninja-1.13.0-py3-none-musllinux_1_2_riscv64.whl", hash = "sha256:d741a5e6754e0bda767e3274a0f0deeef4807f1fec6c0d7921a0244018926ae5", size = 585106, upload-time = "2025-08-11T15:10:09.818Z" }, + { url = "https://files.pythonhosted.org/packages/c1/31/aa56a1a286703800c0cbe39fb4e82811c277772dc8cd084f442dd8e2938a/ninja-1.13.0-py3-none-musllinux_1_2_s390x.whl", hash = "sha256:e8bad11f8a00b64137e9b315b137d8bb6cbf3086fbdc43bf1f90fd33324d2e96", size = 707138, upload-time = "2025-08-11T15:10:11.366Z" }, + { url = "https://files.pythonhosted.org/packages/34/6f/5f5a54a1041af945130abdb2b8529cbef0cdcbbf9bcf3f4195378319d29a/ninja-1.13.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:b4f2a072db3c0f944c32793e91532d8948d20d9ab83da9c0c7c15b5768072200", size = 581758, upload-time = "2025-08-11T15:10:13.295Z" }, + { url = "https://files.pythonhosted.org/packages/95/97/51359c77527d45943fe7a94d00a3843b81162e6c4244b3579fe8fc54cb9c/ninja-1.13.0-py3-none-win32.whl", hash = "sha256:8cfbb80b4a53456ae8a39f90ae3d7a2129f45ea164f43fadfa15dc38c4aef1c9", size = 267201, upload-time = "2025-08-11T15:10:15.158Z" }, + { url = "https://files.pythonhosted.org/packages/29/45/c0adfbfb0b5895aa18cec400c535b4f7ff3e52536e0403602fc1a23f7de9/ninja-1.13.0-py3-none-win_amd64.whl", hash = "sha256:fb8ee8719f8af47fed145cced4a85f0755dd55d45b2bddaf7431fa89803c5f3e", size = 309975, upload-time = "2025-08-11T15:10:16.697Z" }, + { url = "https://files.pythonhosted.org/packages/df/93/a7b983643d1253bb223234b5b226e69de6cda02b76cdca7770f684b795f5/ninja-1.13.0-py3-none-win_arm64.whl", hash = "sha256:3c0b40b1f0bba764644385319028650087b4c1b18cdfa6f45cb39a3669b81aa9", size = 290806, upload-time = "2025-08-11T15:10:18.018Z" }, ] [[package]] @@ -2769,6 +3294,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d2/1d/1b658dbd2b9fa9c4c9f32accbfc0205d532c8c6194dc0f2a4c0428e7128a/nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9", size = 22314, upload-time = "2024-06-04T18:44:08.352Z" }, ] +[[package]] +name = "num2words" +version = "0.5.14" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "docopt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f6/58/ad645bd38b4b648eb2fc2ba1b909398e54eb0cbb6a7dbd2b4953e38c9621/num2words-0.5.14.tar.gz", hash = "sha256:b066ec18e56b6616a3b38086b5747daafbaa8868b226a36127e0451c0cf379c6", size = 218213, upload-time = "2024-12-17T20:17:10.191Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d6/5b/545e9267a1cc080c8a1be2746113a063e34bcdd0f5173fd665a5c13cb234/num2words-0.5.14-py3-none-any.whl", hash = "sha256:1c8e5b00142fc2966fd8d685001e36c4a9911e070d1b120e1beb721fa1edb33d", size = 163525, upload-time = "2024-12-17T20:17:06.074Z" }, +] + [[package]] name = "numba" version = "0.61.2" @@ -2793,24 +3330,24 @@ wheels = [ [[package]] name = "numcodecs" -version = "0.16.1" +version = "0.16.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "numpy" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/00/35/49da850ce5371da3930d099da364a73ce9ae4fc64075e521674b48f4804d/numcodecs-0.16.1.tar.gz", hash = "sha256:c47f20d656454568c6b4697ce02081e6bbb512f198738c6a56fafe8029c97fb1", size = 6268134, upload-time = "2025-05-22T13:33:04.098Z" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/1d/837d946aab385abe1e472ec08a6816d84b00f4ceeae5445eb8f25c5c6ca9/numcodecs-0.16.2.tar.gz", hash = "sha256:9922dae0c3b01b5bed3b4bae239f4787e891daa3262c27971298669d029d10e9", size = 6271668, upload-time = "2025-08-13T16:09:26.125Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b7/ee/e2a903c88fed347dc74c70bbd7a8dab9aa22bb0dac68c5bc6393c2e9373b/numcodecs-0.16.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:1abe0651ecb6f207656ebfc802effa55c4ae3136cf172c295a067749a2699122", size = 1663434, upload-time = "2025-05-22T13:32:47.26Z" }, - { url = "https://files.pythonhosted.org/packages/f2/f0/37819d4f6896b1ac43a164ffd3ab99d7cbf63bf63cb375fef97aedaef4f0/numcodecs-0.16.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:abb39b7102d0816c8563669cdddca40392d34d0cbf31e3e996706b244586a458", size = 1150402, upload-time = "2025-05-22T13:32:48.574Z" }, - { url = "https://files.pythonhosted.org/packages/60/3c/5059a29750305b80b7428b1e6695878dea9ea3b537d7fba57875e4bbc2c7/numcodecs-0.16.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3359a951f8b23317f12736a7ad1e7375ec3d735465f92049c76d032ebca4c40", size = 8237455, upload-time = "2025-05-22T13:32:50.052Z" }, - { url = "https://files.pythonhosted.org/packages/1b/f5/515f98d659ab0cbe3738da153eddae22186fd38f05a808511e10f04cf679/numcodecs-0.16.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82cc70592ec18060786b1bfa0da23afd2a7807d7975d766e626954d6628ec609", size = 8770711, upload-time = "2025-05-22T13:32:52.198Z" }, - { url = "https://files.pythonhosted.org/packages/a2/3a/9fc6104f888af11bad804ebd32dffe0bcb83337f4525b4fe5b379942fefd/numcodecs-0.16.1-cp312-cp312-win_amd64.whl", hash = "sha256:4b48ddc8a7d132b7808bc53eb2705342de5c1e39289d725f988bd143c0fd86df", size = 788701, upload-time = "2025-05-22T13:32:54.28Z" }, - { url = "https://files.pythonhosted.org/packages/5e/1e/73ffb1074f03d52cb1c4f4deaba26a2008ca45262f3622ed26dbec7a7362/numcodecs-0.16.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2ad8ee940315f59188accfc3f2d39726a4ca0d76b49bf8d0018e121f01c49028", size = 1659453, upload-time = "2025-05-22T13:32:55.558Z" }, - { url = "https://files.pythonhosted.org/packages/42/72/5affb1ce92b7a6becee17921de7c6b521a48fa61fc3d36d9f1eea2cf83f5/numcodecs-0.16.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:179ca7bf3525a0f7379df7767d87dd495253de44597cb7e511198b28b09da633", size = 1143932, upload-time = "2025-05-22T13:32:56.908Z" }, - { url = "https://files.pythonhosted.org/packages/e3/f1/b092679d84c67c6ed62e4df5781d89bbb089f24a0df4187cbab9db51cf6b/numcodecs-0.16.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e2babbb50bf348ae982818d5560af330eab0dcd925fb0e49509785ad57d11db", size = 8187716, upload-time = "2025-05-22T13:32:58.421Z" }, - { url = "https://files.pythonhosted.org/packages/a8/e8/86e7741adb43261aff409b53c53c8bac2797bfca055d64dd65dc731d5141/numcodecs-0.16.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a4b29d8d3284b72bfad4fb83d672a17f497ae86ee1ef8087bac7222b620d3d91", size = 8728650, upload-time = "2025-05-22T13:33:00.337Z" }, - { url = "https://files.pythonhosted.org/packages/21/03/87c5c217232aa3515d350728c6dcefca252fa582246100ef68a51fbda456/numcodecs-0.16.1-cp313-cp313-win_amd64.whl", hash = "sha256:06489635f43e1a959aea73cb830d78cf3adb07ac5f34daccb92091e4d9ac6b07", size = 785553, upload-time = "2025-05-22T13:33:02.587Z" }, + { url = "https://files.pythonhosted.org/packages/03/c3/5470273d6d5c986521140ccec6476664ea4e03c0cfc51b370fb03368bb41/numcodecs-0.16.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:30f04c2b7bb802133866e7fb554d47943864f977dfe8a95c814eb801c797df3c", size = 1668488, upload-time = "2025-08-13T16:09:08.942Z" }, + { url = "https://files.pythonhosted.org/packages/db/bf/cc1aaea87371097d6b5236ec44f8eb96387b52204b4e671fac716e5de325/numcodecs-0.16.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8ee0b2776cf47b7702ba0ccc0b6afaad28afbf8d5bb7b9a5274b5f08ecc651b2", size = 1155083, upload-time = "2025-08-13T16:09:10.641Z" }, + { url = "https://files.pythonhosted.org/packages/6b/c1/ba5ab0cf4c4d737635d20d8b72a61c26f8f99c0529606dfbfa3e5d3a4221/numcodecs-0.16.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c8e94f0e90aaf4d01b2e26476d3b081c2cf8c17036af45e04e735de9c0cf64f", size = 8260568, upload-time = "2025-08-13T16:09:12.063Z" }, + { url = "https://files.pythonhosted.org/packages/3b/64/7177bf632520705893683fa4ca202ed540450bf971c0453ad1351baa2007/numcodecs-0.16.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b565b16366749011e290343617571db861b2b2e58b038697afde6d02f537c91", size = 8792262, upload-time = "2025-08-13T16:09:14.058Z" }, + { url = "https://files.pythonhosted.org/packages/10/90/df01799f4c1bb8618b842582d10d362829e980c4d5eb9701c1aeadf5c4e3/numcodecs-0.16.2-cp312-cp312-win_amd64.whl", hash = "sha256:e4cfdde4e99bf47580f4eb3a876630c73ba14e4a1380fec5959ac727e22ce0d2", size = 803444, upload-time = "2025-08-13T16:09:16.09Z" }, + { url = "https://files.pythonhosted.org/packages/b6/e3/f61c422259a4b6c8c2496d284f85ed17f8686b3a53feb797d3bd66ef499c/numcodecs-0.16.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0f965ccb2f6d215ffd2e3239ec02e33139d7ce311ff49704d340704b81dda653", size = 1664476, upload-time = "2025-08-13T16:09:17.327Z" }, + { url = "https://files.pythonhosted.org/packages/b9/a7/fa4d66b86e277643d135af263efc0dd1f98cf1228d3b4554b843c0c1a09b/numcodecs-0.16.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b4bc110b73d59de7f46310c680f075f9007ead915174c895368274c646c9ea74", size = 1148718, upload-time = "2025-08-13T16:09:19.075Z" }, + { url = "https://files.pythonhosted.org/packages/17/43/9656a6b0ed7250ca3a5c126a6077a29398c3dca9176224dba4634847a4a4/numcodecs-0.16.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51615cf2811343a8a3fa42953cb4120ac7187875a161260444f53ada5710063e", size = 8205330, upload-time = "2025-08-13T16:09:20.904Z" }, + { url = "https://files.pythonhosted.org/packages/55/7f/0ab8db32ef9b51c60f7b759c2b155e1edcb08febb508c22a9d04b19ec735/numcodecs-0.16.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a18ced1ecbdd0e9ee716820dbb3a094c896eed8005273bbcab9980bdac270ae", size = 8750769, upload-time = "2025-08-13T16:09:22.516Z" }, + { url = "https://files.pythonhosted.org/packages/f0/0c/25f96c7969bdbfcc1427dc82eba92f2ef4df84c63369c95ab99af6404c23/numcodecs-0.16.2-cp313-cp313-win_amd64.whl", hash = "sha256:f640ed8406e1eb5806787a3e5be223d455b75c99eb2088a290947ed6dbd77e8e", size = 800281, upload-time = "2025-08-13T16:09:24.691Z" }, ] [package.optional-dependencies] @@ -2893,7 +3430,7 @@ name = "nvidia-cudnn-cu12" version = "9.7.1.26" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "nvidia-cublas-cu12", marker = "(python_full_version >= '3.13' and platform_machine == 'arm64' and sys_platform == 'linux') or (platform_machine != 'aarch64' and platform_machine != 'arm64' and sys_platform == 'linux')" }, + { name = "nvidia-cublas-cu12", marker = "platform_machine != 'aarch64' and sys_platform == 'linux'" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/25/dc/dc825c4b1c83b538e207e34f48f86063c88deaa35d46c651c7c181364ba2/nvidia_cudnn_cu12-9.7.1.26-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:6d011159a158f3cfc47bf851aea79e31bcff60d530b70ef70474c84cac484d07", size = 726851421, upload-time = "2025-02-06T22:18:29.812Z" }, @@ -2904,7 +3441,7 @@ name = "nvidia-cufft-cu12" version = "11.3.3.41" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "nvidia-nvjitlink-cu12", marker = "(python_full_version >= '3.13' and platform_machine == 'arm64' and sys_platform == 'linux') or (platform_machine != 'aarch64' and platform_machine != 'arm64' and sys_platform == 'linux')" }, + { name = "nvidia-nvjitlink-cu12", marker = "platform_machine != 'aarch64' and sys_platform == 'linux'" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/ac/26/b53c493c38dccb1f1a42e1a21dc12cba2a77fbe36c652f7726d9ec4aba28/nvidia_cufft_cu12-11.3.3.41-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:da650080ab79fcdf7a4b06aa1b460e99860646b176a43f6208099bdc17836b6a", size = 193118795, upload-time = "2025-01-23T17:56:30.536Z" }, @@ -2931,9 +3468,9 @@ name = "nvidia-cusolver-cu12" version = "11.7.2.55" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "nvidia-cublas-cu12", marker = "(python_full_version >= '3.13' and platform_machine == 'arm64' and sys_platform == 'linux') or (platform_machine != 'aarch64' and platform_machine != 'arm64' and sys_platform == 'linux')" }, - { name = "nvidia-cusparse-cu12", marker = "(python_full_version >= '3.13' and platform_machine == 'arm64' and sys_platform == 'linux') or (platform_machine != 'aarch64' and platform_machine != 'arm64' and sys_platform == 'linux')" }, - { name = "nvidia-nvjitlink-cu12", marker = "(python_full_version >= '3.13' and platform_machine == 'arm64' and sys_platform == 'linux') or (platform_machine != 'aarch64' and platform_machine != 'arm64' and sys_platform == 'linux')" }, + { name = "nvidia-cublas-cu12", marker = "platform_machine != 'aarch64' and sys_platform == 'linux'" }, + { name = "nvidia-cusparse-cu12", marker = "platform_machine != 'aarch64' and sys_platform == 'linux'" }, + { name = "nvidia-nvjitlink-cu12", marker = "platform_machine != 'aarch64' and sys_platform == 'linux'" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/c2/08/953675873a136d96bb12f93b49ba045d1107bc94d2551c52b12fa6c7dec3/nvidia_cusolver_cu12-11.7.2.55-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:4d1354102f1e922cee9db51920dba9e2559877cf6ff5ad03a00d853adafb191b", size = 260373342, upload-time = "2025-01-23T17:58:56.406Z" }, @@ -2944,7 +3481,7 @@ name = "nvidia-cusparse-cu12" version = "12.5.7.53" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "nvidia-nvjitlink-cu12", marker = "(python_full_version >= '3.13' and platform_machine == 'arm64' and sys_platform == 'linux') or (platform_machine != 'aarch64' and platform_machine != 'arm64' and sys_platform == 'linux')" }, + { name = "nvidia-nvjitlink-cu12", marker = "platform_machine != 'aarch64' and sys_platform == 'linux'" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/c2/ab/31e8149c66213b846c082a3b41b1365b831f41191f9f40c6ddbc8a7d550e/nvidia_cusparse_cu12-12.5.7.53-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3c1b61eb8c85257ea07e9354606b26397612627fdcd327bfd91ccf6155e7c86d", size = 292064180, upload-time = "2025-01-23T18:00:23.233Z" }, @@ -2960,50 +3497,47 @@ wheels = [ [[package]] name = "nvidia-ml-py" -version = "12.575.51" +version = "13.580.65" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d2/4d/6f017814ed5ac28e08e1b8a62e3a258957da27582c89b7f8f8b15ac3d2e7/nvidia_ml_py-12.575.51.tar.gz", hash = "sha256:6490e93fea99eb4e966327ae18c6eec6256194c921f23459c8767aee28c54581", size = 46597, upload-time = "2025-05-06T20:46:37.962Z" } +sdist = { url = "https://files.pythonhosted.org/packages/8f/76/ff4a297c82b463ef17e7d0100d1bee5dbe6d1416721a9170e51ffcb8ecf3/nvidia_ml_py-13.580.65.tar.gz", hash = "sha256:7bf18b03c7d3658727011cf5f0c6c2155b36ce439e65359a0a4a906214f6a3c9", size = 47864, upload-time = "2025-08-05T16:11:49.71Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/db/24/552ebea28f0570b9e65e62b50287a273804c9f997cc1c2dcd4e2d64b9e7d/nvidia_ml_py-12.575.51-py3-none-any.whl", hash = "sha256:eb8641800d98ce40a22f479873f34b482e214a7e80349c63be51c3919845446e", size = 47547, upload-time = "2025-05-06T20:46:36.457Z" }, + { url = "https://files.pythonhosted.org/packages/f9/96/88a5cb161c61cab2ee65b5aa61e612901fbcb1660024f0ccb26fcb02a17c/nvidia_ml_py-13.580.65-py3-none-any.whl", hash = "sha256:f0c65306ed999d2d4ff793918bfd17d1e30895d1c4606413ef95a0ea42460792", size = 48866, upload-time = "2025-08-05T16:11:48.387Z" }, ] [[package]] name = "nvidia-modelopt" -version = "0.31.0" +version = "0.33.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "ninja", marker = "sys_platform != 'darwin'" }, - { name = "numpy", marker = "sys_platform != 'darwin'" }, - { name = "nvidia-modelopt-core", marker = "sys_platform != 'darwin'" }, - { name = "packaging", marker = "sys_platform != 'darwin'" }, - { name = "pydantic", marker = "sys_platform != 'darwin'" }, - { name = "rich", marker = "sys_platform != 'darwin'" }, - { name = "scipy", marker = "sys_platform != 'darwin'" }, - { name = "tqdm", marker = "sys_platform != 'darwin'" }, + { name = "ninja" }, + { name = "numpy" }, + { name = "nvidia-ml-py" }, + { name = "nvidia-modelopt-core" }, + { name = "packaging" }, + { name = "pulp" }, + { name = "pydantic" }, + { name = "regex" }, + { name = "rich" }, + { name = "safetensors" }, + { name = "scipy" }, + { name = "torch" }, + { name = "torchprofile" }, + { name = "torchvision", version = "0.22.1", source = { registry = "https://download.pytorch.org/whl/cu128" }, marker = "platform_machine == 'aarch64' and sys_platform == 'linux'" }, + { name = "torchvision", version = "0.22.1+cu128", source = { registry = "https://download.pytorch.org/whl/cu128" }, marker = "platform_machine != 'aarch64' or sys_platform != 'linux'" }, + { name = "tqdm" }, ] wheels = [ - { url = "https://files.pythonhosted.org/packages/94/d7/9201b1618ccf6babea08b07fb3f3266f319f1993afc5f1812f4bf9603080/nvidia_modelopt-0.31.0-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:77495c50700ef9ed1782f4999e17265751a0f4002a7f1185dee7bb46d5d05039", size = 717204, upload-time = "2025-06-05T19:35:29.397Z" }, - { url = "https://files.pythonhosted.org/packages/6f/6a/db5b41905cccc3f1d36b12cbb3f84dc40d0c352935d88f164047c6059f4d/nvidia_modelopt-0.31.0-py3-none-manylinux_2_28_x86_64.whl", hash = "sha256:8b1905122b0615aeff78f65aa39920c3971d6ebd1966b7ac57ee8da271d49913", size = 717202, upload-time = "2025-06-05T19:35:53.512Z" }, -] - -[package.optional-dependencies] -torch = [ - { name = "nvidia-ml-py", marker = "sys_platform != 'darwin'" }, - { name = "pulp", marker = "sys_platform != 'darwin'" }, - { name = "regex", marker = "sys_platform != 'darwin'" }, - { name = "safetensors", marker = "sys_platform != 'darwin'" }, - { name = "torch", marker = "sys_platform != 'darwin'" }, - { name = "torchprofile", marker = "sys_platform != 'darwin'" }, - { name = "torchvision", marker = "sys_platform != 'darwin'" }, + { url = "https://files.pythonhosted.org/packages/ca/cb/4af39357792a96f334c7877ea0380c9337aec210ff4794a7dd95beb7c349/nvidia_modelopt-0.33.1-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:6c51091683a117cd40fdb96a0ec28579f2276f6b627db7ccddc370df544e1dd7", size = 751683, upload-time = "2025-08-12T18:37:48.832Z" }, + { url = "https://files.pythonhosted.org/packages/0a/b1/fc2f468d140ef58e90fac584759d0cc449db9bc4f64668cdff750ef38fef/nvidia_modelopt-0.33.1-py3-none-manylinux_2_28_x86_64.whl", hash = "sha256:ef78a98901890f265596ec413dffac177d4a1865201d89a14f29f4fa0cf8e710", size = 751683, upload-time = "2025-08-12T18:36:59.964Z" }, ] [[package]] name = "nvidia-modelopt-core" -version = "0.31.0" +version = "0.33.1" source = { registry = "https://pypi.org/simple" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7d/9f/4114d67eeb2cb3abd8b955ebb73c654d73994f16b7bec0d12884764f5807/nvidia_modelopt_core-0.31.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:3a03f3b081322bdad71982de37898e5407c6a85c65d02a26470b735d8a454e74", size = 1335144, upload-time = "2025-06-05T19:39:28.776Z" }, - { url = "https://files.pythonhosted.org/packages/60/0b/81540db9bac816fa814baec0a7df976101d756e154764494dad8850035cb/nvidia_modelopt_core-0.31.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:4739f00f1797699fe4b9c256a5b75114b66e22749250dc87128491a8bdb2ce5a", size = 1359154, upload-time = "2025-06-05T19:41:04.928Z" }, + { url = "https://files.pythonhosted.org/packages/9b/b5/ba79b1c52b634b24e45dca409f133f947217a5c7ec5c256266e4ec5fa3eb/nvidia_modelopt_core-0.33.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:1ddd9279d8312f8e972b302692a26e6180f1c9fd277232f5925a5589f42b1b76", size = 1338081, upload-time = "2025-08-12T18:40:36.156Z" }, + { url = "https://files.pythonhosted.org/packages/13/40/4427583475dfd8eb1b8c7522d75d4d059f0512ff03dcc62d6986a22ab918/nvidia_modelopt_core-0.33.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:69d5ace564f2b056c916117be2023f2b7fc01cd1501073915e6b2ced2b8a5394", size = 1363366, upload-time = "2025-08-12T18:39:28.854Z" }, ] [[package]] @@ -3044,16 +3578,16 @@ wheels = [ [[package]] name = "nvtx" -version = "0.2.12" +version = "0.2.13" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/56/05/160dc24b6cd1e21e5b00d55a46abac5802ed7c15c675e6ce25febad2b0d7/nvtx-0.2.12.tar.gz", hash = "sha256:b871fae9b80b004e624b5755291799794287016fa6a0c8fd0fb3255393ae3bc8", size = 110848, upload-time = "2025-05-26T10:32:33.824Z" } +sdist = { url = "https://files.pythonhosted.org/packages/97/02/b3fd3da4ba51764cfc0e4d2b22d5a61511fa79d825344d4704f8429c0bd6/nvtx-0.2.13.tar.gz", hash = "sha256:9db7ba135168e14e1f038866100bf8ed42d3e00b404e9bc7b6280ee3af828b92", size = 112104, upload-time = "2025-08-05T03:27:16.383Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/08/e4/944e63039a0d652c843ecffb42700e2b4f596b745ac9ac6ebed937f1bce5/nvtx-0.2.12-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ea22a86eca22fd52e3c2905654182da1fcebea6f0107e87d7dc4ec6871604ca", size = 539647, upload-time = "2025-05-25T08:52:12.911Z" }, - { url = "https://files.pythonhosted.org/packages/cf/7b/6e25716c92039a3ecc2f6f4e1380b5492b0d23af78ea862cb84e8ffe0d7b/nvtx-0.2.12-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a279d880c27ec8c72632a0685456c170e7b12da2839c861ee461c121692aea6", size = 543614, upload-time = "2025-05-25T08:44:44.678Z" }, - { url = "https://files.pythonhosted.org/packages/7d/96/eb1078d7509b72e3e4b6dd7ff12a698951e81dcc5f20a3ad7f35d7455700/nvtx-0.2.12-cp312-cp312-win_amd64.whl", hash = "sha256:2f93e07add2544a85c202b3c710945b54b3abb6660a6a7e447395cb024938b35", size = 98894, upload-time = "2025-05-25T08:42:59.068Z" }, - { url = "https://files.pythonhosted.org/packages/55/78/88563935649f9202735ac5686fc451d3fa9f34e6592787ba224244c3570a/nvtx-0.2.12-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:871e54f95929a6c7c39b85d4111bf6af8ab43325bbc36c97a179270443896ef7", size = 520074, upload-time = "2025-05-25T08:52:50.144Z" }, - { url = "https://files.pythonhosted.org/packages/86/0c/62b1f76c84a8bed267421d11114953b5da631daeb0ec7894a91252f79b5d/nvtx-0.2.12-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b82ad84ac8d5408851947d1d2cef3e8e627627cc2290e5150c8af0dda1e3f63", size = 524516, upload-time = "2025-05-25T08:45:06.598Z" }, - { url = "https://files.pythonhosted.org/packages/eb/41/e74ec826e1585ad6d31f41de96f6faae8ffc712a45c2b880baca4ae87a64/nvtx-0.2.12-cp313-cp313-win_amd64.whl", hash = "sha256:a37e063c3c745a4c6b561993a2dae2f67fcc26f2a2c2653f24eeae5810a2180d", size = 97070, upload-time = "2025-05-25T08:43:41.323Z" }, + { url = "https://files.pythonhosted.org/packages/c5/73/ad21e09dc2534f1e9723bbe5871fa5f03361ac51ca4d411fea6f765b5b6a/nvtx-0.2.13-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3435cbbffa132f6aaba3abdb01e71a1b961a20858b4cb791883895a25b9305d6", size = 539358, upload-time = "2025-08-04T19:33:16.494Z" }, + { url = "https://files.pythonhosted.org/packages/12/ab/762da984e7671f7c34ae87e5b70523c3eeb4563759268bfaea07c97f32a6/nvtx-0.2.13-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:453d838dd1424a04303281ee57a73e2b8dca0e03039bc609a945861b8fe7d7d9", size = 545588, upload-time = "2025-08-04T19:37:40.64Z" }, + { url = "https://files.pythonhosted.org/packages/2a/b6/55bc5916386db70b93cbf543b1e880ead786d9ff0cdcfa262f5a2af46c74/nvtx-0.2.13-cp312-cp312-win_amd64.whl", hash = "sha256:0722d743e0e41e1fb866ebe6446e0cd0d268ca8671313f8da4f8c969956b74d3", size = 99123, upload-time = "2025-08-04T19:24:24.391Z" }, + { url = "https://files.pythonhosted.org/packages/41/73/98c0669d5f9387a36d56b0e62ea3919124dd8dd7582d896ed1cae2998f57/nvtx-0.2.13-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1561d2111c698b1b1075899ff9c3fa7ba83603fc27c2e8ef567de6bbbe85ce1", size = 519840, upload-time = "2025-08-04T19:34:00.877Z" }, + { url = "https://files.pythonhosted.org/packages/14/4b/21e975997def8a387543ba2bbe227551ad466781c39fc67f37f53555f37e/nvtx-0.2.13-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:edd7b729ed0211350258a21dd13422f59bc521de2b2fd21feb6c177af492f4e1", size = 524711, upload-time = "2025-08-04T19:38:03.559Z" }, + { url = "https://files.pythonhosted.org/packages/21/d7/0ca146afd875f1e02636323840960071f768b5d8ba3e7d37f2ac9192bfd9/nvtx-0.2.13-cp313-cp313-win_amd64.whl", hash = "sha256:f0524bb71443d5a1f19a6409a9a81405fc437e53c5edfc4c44b6f4504ccf46e3", size = 97317, upload-time = "2025-08-04T19:24:46.391Z" }, ] [[package]] @@ -3140,59 +3674,59 @@ wheels = [ [[package]] name = "opencv-python-headless" -version = "4.11.0.86" +version = "4.12.0.88" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "numpy" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/36/2f/5b2b3ba52c864848885ba988f24b7f105052f68da9ab0e693cc7c25b0b30/opencv-python-headless-4.11.0.86.tar.gz", hash = "sha256:996eb282ca4b43ec6a3972414de0e2331f5d9cda2b41091a49739c19fb843798", size = 95177929, upload-time = "2025-01-16T13:53:40.22Z" } +sdist = { url = "https://files.pythonhosted.org/packages/a4/63/6861102ec149c3cd298f4d1ea7ce9d6adbc7529221606ff1dab991a19adb/opencv-python-headless-4.12.0.88.tar.gz", hash = "sha256:cfdc017ddf2e59b6c2f53bc12d74b6b0be7ded4ec59083ea70763921af2b6c09", size = 95379675, upload-time = "2025-07-07T09:21:06.815Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/dc/53/2c50afa0b1e05ecdb4603818e85f7d174e683d874ef63a6abe3ac92220c8/opencv_python_headless-4.11.0.86-cp37-abi3-macosx_13_0_arm64.whl", hash = "sha256:48128188ade4a7e517237c8e1e11a9cdf5c282761473383e77beb875bb1e61ca", size = 37326460, upload-time = "2025-01-16T13:52:57.015Z" }, - { url = "https://files.pythonhosted.org/packages/3b/43/68555327df94bb9b59a1fd645f63fafb0762515344d2046698762fc19d58/opencv_python_headless-4.11.0.86-cp37-abi3-macosx_13_0_x86_64.whl", hash = "sha256:a66c1b286a9de872c343ee7c3553b084244299714ebb50fbdcd76f07ebbe6c81", size = 56723330, upload-time = "2025-01-16T13:55:45.731Z" }, - { url = "https://files.pythonhosted.org/packages/45/be/1438ce43ebe65317344a87e4b150865c5585f4c0db880a34cdae5ac46881/opencv_python_headless-4.11.0.86-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6efabcaa9df731f29e5ea9051776715b1bdd1845d7c9530065c7951d2a2899eb", size = 29487060, upload-time = "2025-01-16T13:51:59.625Z" }, - { url = "https://files.pythonhosted.org/packages/dd/5c/c139a7876099916879609372bfa513b7f1257f7f1a908b0bdc1c2328241b/opencv_python_headless-4.11.0.86-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e0a27c19dd1f40ddff94976cfe43066fbbe9dfbb2ec1907d66c19caef42a57b", size = 49969856, upload-time = "2025-01-16T13:53:29.654Z" }, - { url = "https://files.pythonhosted.org/packages/95/dd/ed1191c9dc91abcc9f752b499b7928aacabf10567bb2c2535944d848af18/opencv_python_headless-4.11.0.86-cp37-abi3-win32.whl", hash = "sha256:f447d8acbb0b6f2808da71fddd29c1cdd448d2bc98f72d9bb78a7a898fc9621b", size = 29324425, upload-time = "2025-01-16T13:52:49.048Z" }, - { url = "https://files.pythonhosted.org/packages/86/8a/69176a64335aed183529207ba8bc3d329c2999d852b4f3818027203f50e6/opencv_python_headless-4.11.0.86-cp37-abi3-win_amd64.whl", hash = "sha256:6c304df9caa7a6a5710b91709dd4786bf20a74d57672b3c31f7033cc638174ca", size = 39402386, upload-time = "2025-01-16T13:52:56.418Z" }, + { url = "https://files.pythonhosted.org/packages/f7/7d/414e243c5c8216a5277afd104a319cc1291c5e23f5eeef512db5629ee7f4/opencv_python_headless-4.12.0.88-cp37-abi3-macosx_13_0_arm64.whl", hash = "sha256:1e58d664809b3350c1123484dd441e1667cd7bed3086db1b9ea1b6f6cb20b50e", size = 37877864, upload-time = "2025-07-07T09:14:41.693Z" }, + { url = "https://files.pythonhosted.org/packages/05/14/7e162714beed1cd5e7b5eb66fcbcba2f065c51b1d9da2463024c84d2f7c0/opencv_python_headless-4.12.0.88-cp37-abi3-macosx_13_0_x86_64.whl", hash = "sha256:365bb2e486b50feffc2d07a405b953a8f3e8eaa63865bc650034e5c71e7a5154", size = 57326608, upload-time = "2025-07-07T09:14:51.885Z" }, + { url = "https://files.pythonhosted.org/packages/69/4e/116720df7f1f7f3b59abc608ca30fbec9d2b3ae810afe4e4d26483d9dfa0/opencv_python_headless-4.12.0.88-cp37-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:aeb4b13ecb8b4a0beb2668ea07928160ea7c2cd2d9b5ef571bbee6bafe9cc8d0", size = 33145800, upload-time = "2025-07-07T09:15:00.367Z" }, + { url = "https://files.pythonhosted.org/packages/89/53/e19c21e0c4eb1275c3e2c97b081103b6dfb3938172264d283a519bf728b9/opencv_python_headless-4.12.0.88-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:236c8df54a90f4d02076e6f9c1cc763d794542e886c576a6fee46ec8ff75a7a9", size = 54023419, upload-time = "2025-07-07T09:15:10.164Z" }, + { url = "https://files.pythonhosted.org/packages/bf/9c/a76fd5414de6ec9f21f763a600058a0c3e290053cea87e0275692b1375c0/opencv_python_headless-4.12.0.88-cp37-abi3-win32.whl", hash = "sha256:fde2cf5c51e4def5f2132d78e0c08f9c14783cd67356922182c6845b9af87dbd", size = 30225230, upload-time = "2025-07-07T09:15:17.045Z" }, + { url = "https://files.pythonhosted.org/packages/f2/35/0858e9e71b36948eafbc5e835874b63e515179dc3b742cbe3d76bc683439/opencv_python_headless-4.12.0.88-cp37-abi3-win_amd64.whl", hash = "sha256:86b413bdd6c6bf497832e346cd5371995de148e579b9774f8eba686dee3f5528", size = 38923559, upload-time = "2025-07-07T09:15:25.229Z" }, ] [[package]] name = "opentelemetry-api" -version = "1.34.1" +version = "1.36.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "importlib-metadata" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/4d/5e/94a8cb759e4e409022229418294e098ca7feca00eb3c467bb20cbd329bda/opentelemetry_api-1.34.1.tar.gz", hash = "sha256:64f0bd06d42824843731d05beea88d4d4b6ae59f9fe347ff7dfa2cc14233bbb3", size = 64987, upload-time = "2025-06-10T08:55:19.818Z" } +sdist = { url = "https://files.pythonhosted.org/packages/27/d2/c782c88b8afbf961d6972428821c302bd1e9e7bc361352172f0ca31296e2/opentelemetry_api-1.36.0.tar.gz", hash = "sha256:9a72572b9c416d004d492cbc6e61962c0501eaf945ece9b5a0f56597d8348aa0", size = 64780, upload-time = "2025-07-29T15:12:06.02Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a5/3a/2ba85557e8dc024c0842ad22c570418dc02c36cbd1ab4b832a93edf071b8/opentelemetry_api-1.34.1-py3-none-any.whl", hash = "sha256:b7df4cb0830d5a6c29ad0c0691dbae874d8daefa934b8b1d642de48323d32a8c", size = 65767, upload-time = "2025-06-10T08:54:56.717Z" }, + { url = "https://files.pythonhosted.org/packages/bb/ee/6b08dde0a022c463b88f55ae81149584b125a42183407dc1045c486cc870/opentelemetry_api-1.36.0-py3-none-any.whl", hash = "sha256:02f20bcacf666e1333b6b1f04e647dc1d5111f86b8e510238fcc56d7762cda8c", size = 65564, upload-time = "2025-07-29T15:11:47.998Z" }, ] [[package]] name = "opentelemetry-sdk" -version = "1.34.1" +version = "1.36.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "opentelemetry-api" }, { name = "opentelemetry-semantic-conventions" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/6f/41/fe20f9036433da8e0fcef568984da4c1d1c771fa072ecd1a4d98779dccdd/opentelemetry_sdk-1.34.1.tar.gz", hash = "sha256:8091db0d763fcd6098d4781bbc80ff0971f94e260739aa6afe6fd379cdf3aa4d", size = 159441, upload-time = "2025-06-10T08:55:33.028Z" } +sdist = { url = "https://files.pythonhosted.org/packages/4c/85/8567a966b85a2d3f971c4d42f781c305b2b91c043724fa08fd37d158e9dc/opentelemetry_sdk-1.36.0.tar.gz", hash = "sha256:19c8c81599f51b71670661ff7495c905d8fdf6976e41622d5245b791b06fa581", size = 162557, upload-time = "2025-07-29T15:12:16.76Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/07/1b/def4fe6aa73f483cabf4c748f4c25070d5f7604dcc8b52e962983491b29e/opentelemetry_sdk-1.34.1-py3-none-any.whl", hash = "sha256:308effad4059562f1d92163c61c8141df649da24ce361827812c40abb2a1e96e", size = 118477, upload-time = "2025-06-10T08:55:16.02Z" }, + { url = "https://files.pythonhosted.org/packages/0b/59/7bed362ad1137ba5886dac8439e84cd2df6d087be7c09574ece47ae9b22c/opentelemetry_sdk-1.36.0-py3-none-any.whl", hash = "sha256:19fe048b42e98c5c1ffe85b569b7073576ad4ce0bcb6e9b4c6a39e890a6c45fb", size = 119995, upload-time = "2025-07-29T15:12:03.181Z" }, ] [[package]] name = "opentelemetry-semantic-conventions" -version = "0.55b1" +version = "0.57b0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "opentelemetry-api" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/5d/f0/f33458486da911f47c4aa6db9bda308bb80f3236c111bf848bd870c16b16/opentelemetry_semantic_conventions-0.55b1.tar.gz", hash = "sha256:ef95b1f009159c28d7a7849f5cbc71c4c34c845bb514d66adfdf1b3fff3598b3", size = 119829, upload-time = "2025-06-10T08:55:33.881Z" } +sdist = { url = "https://files.pythonhosted.org/packages/7e/31/67dfa252ee88476a29200b0255bda8dfc2cf07b56ad66dc9a6221f7dc787/opentelemetry_semantic_conventions-0.57b0.tar.gz", hash = "sha256:609a4a79c7891b4620d64c7aac6898f872d790d75f22019913a660756f27ff32", size = 124225, upload-time = "2025-07-29T15:12:17.873Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/1a/89/267b0af1b1d0ba828f0e60642b6a5116ac1fd917cde7fc02821627029bd1/opentelemetry_semantic_conventions-0.55b1-py3-none-any.whl", hash = "sha256:5da81dfdf7d52e3d37f8fe88d5e771e191de924cfff5f550ab0b8f7b2409baed", size = 196223, upload-time = "2025-06-10T08:55:17.638Z" }, + { url = "https://files.pythonhosted.org/packages/05/75/7d591371c6c39c73de5ce5da5a2cc7b72d1d1cd3f8f4638f553c01c37b11/opentelemetry_semantic_conventions-0.57b0-py3-none-any.whl", hash = "sha256:757f7e76293294f124c827e514c2a3144f191ef175b069ce8d1211e1e38e9e78", size = 201627, upload-time = "2025-07-29T15:12:04.174Z" }, ] [[package]] @@ -3230,7 +3764,7 @@ wheels = [ [[package]] name = "pandas" -version = "2.3.0" +version = "2.3.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "numpy" }, @@ -3238,42 +3772,43 @@ dependencies = [ { name = "pytz" }, { name = "tzdata" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/72/51/48f713c4c728d7c55ef7444ba5ea027c26998d96d1a40953b346438602fc/pandas-2.3.0.tar.gz", hash = "sha256:34600ab34ebf1131a7613a260a61dbe8b62c188ec0ea4c296da7c9a06b004133", size = 4484490, upload-time = "2025-06-05T03:27:54.133Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/94/46/24192607058dd607dbfacdd060a2370f6afb19c2ccb617406469b9aeb8e7/pandas-2.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2eb4728a18dcd2908c7fccf74a982e241b467d178724545a48d0caf534b38ebf", size = 11573865, upload-time = "2025-06-05T03:26:46.774Z" }, - { url = "https://files.pythonhosted.org/packages/9f/cc/ae8ea3b800757a70c9fdccc68b67dc0280a6e814efcf74e4211fd5dea1ca/pandas-2.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b9d8c3187be7479ea5c3d30c32a5d73d62a621166675063b2edd21bc47614027", size = 10702154, upload-time = "2025-06-05T16:50:14.439Z" }, - { url = "https://files.pythonhosted.org/packages/d8/ba/a7883d7aab3d24c6540a2768f679e7414582cc389876d469b40ec749d78b/pandas-2.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ff730713d4c4f2f1c860e36c005c7cefc1c7c80c21c0688fd605aa43c9fcf09", size = 11262180, upload-time = "2025-06-05T16:50:17.453Z" }, - { url = "https://files.pythonhosted.org/packages/01/a5/931fc3ad333d9d87b10107d948d757d67ebcfc33b1988d5faccc39c6845c/pandas-2.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba24af48643b12ffe49b27065d3babd52702d95ab70f50e1b34f71ca703e2c0d", size = 11991493, upload-time = "2025-06-05T03:26:51.813Z" }, - { url = "https://files.pythonhosted.org/packages/d7/bf/0213986830a92d44d55153c1d69b509431a972eb73f204242988c4e66e86/pandas-2.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:404d681c698e3c8a40a61d0cd9412cc7364ab9a9cc6e144ae2992e11a2e77a20", size = 12470733, upload-time = "2025-06-06T00:00:18.651Z" }, - { url = "https://files.pythonhosted.org/packages/a4/0e/21eb48a3a34a7d4bac982afc2c4eb5ab09f2d988bdf29d92ba9ae8e90a79/pandas-2.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6021910b086b3ca756755e86ddc64e0ddafd5e58e076c72cb1585162e5ad259b", size = 13212406, upload-time = "2025-06-05T03:26:55.992Z" }, - { url = "https://files.pythonhosted.org/packages/1f/d9/74017c4eec7a28892d8d6e31ae9de3baef71f5a5286e74e6b7aad7f8c837/pandas-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:094e271a15b579650ebf4c5155c05dcd2a14fd4fdd72cf4854b2f7ad31ea30be", size = 10976199, upload-time = "2025-06-05T03:26:59.594Z" }, - { url = "https://files.pythonhosted.org/packages/d3/57/5cb75a56a4842bbd0511c3d1c79186d8315b82dac802118322b2de1194fe/pandas-2.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2c7e2fc25f89a49a11599ec1e76821322439d90820108309bf42130d2f36c983", size = 11518913, upload-time = "2025-06-05T03:27:02.757Z" }, - { url = "https://files.pythonhosted.org/packages/05/01/0c8785610e465e4948a01a059562176e4c8088aa257e2e074db868f86d4e/pandas-2.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c6da97aeb6a6d233fb6b17986234cc723b396b50a3c6804776351994f2a658fd", size = 10655249, upload-time = "2025-06-05T16:50:20.17Z" }, - { url = "https://files.pythonhosted.org/packages/e8/6a/47fd7517cd8abe72a58706aab2b99e9438360d36dcdb052cf917b7bf3bdc/pandas-2.3.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb32dc743b52467d488e7a7c8039b821da2826a9ba4f85b89ea95274f863280f", size = 11328359, upload-time = "2025-06-05T03:27:06.431Z" }, - { url = "https://files.pythonhosted.org/packages/2a/b3/463bfe819ed60fb7e7ddffb4ae2ee04b887b3444feee6c19437b8f834837/pandas-2.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:213cd63c43263dbb522c1f8a7c9d072e25900f6975596f883f4bebd77295d4f3", size = 12024789, upload-time = "2025-06-05T03:27:09.875Z" }, - { url = "https://files.pythonhosted.org/packages/04/0c/e0704ccdb0ac40aeb3434d1c641c43d05f75c92e67525df39575ace35468/pandas-2.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1d2b33e68d0ce64e26a4acc2e72d747292084f4e8db4c847c6f5f6cbe56ed6d8", size = 12480734, upload-time = "2025-06-06T00:00:22.246Z" }, - { url = "https://files.pythonhosted.org/packages/e9/df/815d6583967001153bb27f5cf075653d69d51ad887ebbf4cfe1173a1ac58/pandas-2.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:430a63bae10b5086995db1b02694996336e5a8ac9a96b4200572b413dfdfccb9", size = 13223381, upload-time = "2025-06-05T03:27:15.641Z" }, - { url = "https://files.pythonhosted.org/packages/79/88/ca5973ed07b7f484c493e941dbff990861ca55291ff7ac67c815ce347395/pandas-2.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:4930255e28ff5545e2ca404637bcc56f031893142773b3468dc021c6c32a1390", size = 10970135, upload-time = "2025-06-05T03:27:24.131Z" }, - { url = "https://files.pythonhosted.org/packages/24/fb/0994c14d1f7909ce83f0b1fb27958135513c4f3f2528bde216180aa73bfc/pandas-2.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:f925f1ef673b4bd0271b1809b72b3270384f2b7d9d14a189b12b7fc02574d575", size = 12141356, upload-time = "2025-06-05T03:27:34.547Z" }, - { url = "https://files.pythonhosted.org/packages/9d/a2/9b903e5962134497ac4f8a96f862ee3081cb2506f69f8e4778ce3d9c9d82/pandas-2.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e78ad363ddb873a631e92a3c063ade1ecfb34cae71e9a2be6ad100f875ac1042", size = 11474674, upload-time = "2025-06-05T03:27:39.448Z" }, - { url = "https://files.pythonhosted.org/packages/81/3a/3806d041bce032f8de44380f866059437fb79e36d6b22c82c187e65f765b/pandas-2.3.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:951805d146922aed8357e4cc5671b8b0b9be1027f0619cea132a9f3f65f2f09c", size = 11439876, upload-time = "2025-06-05T03:27:43.652Z" }, - { url = "https://files.pythonhosted.org/packages/15/aa/3fc3181d12b95da71f5c2537c3e3b3af6ab3a8c392ab41ebb766e0929bc6/pandas-2.3.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a881bc1309f3fce34696d07b00f13335c41f5f5a8770a33b09ebe23261cfc67", size = 11966182, upload-time = "2025-06-05T03:27:47.652Z" }, - { url = "https://files.pythonhosted.org/packages/37/e7/e12f2d9b0a2c4a2cc86e2aabff7ccfd24f03e597d770abfa2acd313ee46b/pandas-2.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:e1991bbb96f4050b09b5f811253c4f3cf05ee89a589379aa36cd623f21a31d6f", size = 12547686, upload-time = "2025-06-06T00:00:26.142Z" }, - { url = "https://files.pythonhosted.org/packages/39/c2/646d2e93e0af70f4e5359d870a63584dacbc324b54d73e6b3267920ff117/pandas-2.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:bb3be958022198531eb7ec2008cfc78c5b1eed51af8600c6c5d9160d89d8d249", size = 13231847, upload-time = "2025-06-05T03:27:51.465Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/d1/6f/75aa71f8a14267117adeeed5d21b204770189c0a0025acbdc03c337b28fc/pandas-2.3.1.tar.gz", hash = "sha256:0a95b9ac964fe83ce317827f80304d37388ea77616b1425f0ae41c9d2d0d7bb2", size = 4487493, upload-time = "2025-07-07T19:20:04.079Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/46/de/b8445e0f5d217a99fe0eeb2f4988070908979bec3587c0633e5428ab596c/pandas-2.3.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:689968e841136f9e542020698ee1c4fbe9caa2ed2213ae2388dc7b81721510d3", size = 11588172, upload-time = "2025-07-07T19:18:52.054Z" }, + { url = "https://files.pythonhosted.org/packages/1e/e0/801cdb3564e65a5ac041ab99ea6f1d802a6c325bb6e58c79c06a3f1cd010/pandas-2.3.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:025e92411c16cbe5bb2a4abc99732a6b132f439b8aab23a59fa593eb00704232", size = 10717365, upload-time = "2025-07-07T19:18:54.785Z" }, + { url = "https://files.pythonhosted.org/packages/51/a5/c76a8311833c24ae61a376dbf360eb1b1c9247a5d9c1e8b356563b31b80c/pandas-2.3.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b7ff55f31c4fcb3e316e8f7fa194566b286d6ac430afec0d461163312c5841e", size = 11280411, upload-time = "2025-07-07T19:18:57.045Z" }, + { url = "https://files.pythonhosted.org/packages/da/01/e383018feba0a1ead6cf5fe8728e5d767fee02f06a3d800e82c489e5daaf/pandas-2.3.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7dcb79bf373a47d2a40cf7232928eb7540155abbc460925c2c96d2d30b006eb4", size = 11988013, upload-time = "2025-07-07T19:18:59.771Z" }, + { url = "https://files.pythonhosted.org/packages/5b/14/cec7760d7c9507f11c97d64f29022e12a6cc4fc03ac694535e89f88ad2ec/pandas-2.3.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:56a342b231e8862c96bdb6ab97170e203ce511f4d0429589c8ede1ee8ece48b8", size = 12767210, upload-time = "2025-07-07T19:19:02.944Z" }, + { url = "https://files.pythonhosted.org/packages/50/b9/6e2d2c6728ed29fb3d4d4d302504fb66f1a543e37eb2e43f352a86365cdf/pandas-2.3.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ca7ed14832bce68baef331f4d7f294411bed8efd032f8109d690df45e00c4679", size = 13440571, upload-time = "2025-07-07T19:19:06.82Z" }, + { url = "https://files.pythonhosted.org/packages/80/a5/3a92893e7399a691bad7664d977cb5e7c81cf666c81f89ea76ba2bff483d/pandas-2.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:ac942bfd0aca577bef61f2bc8da8147c4ef6879965ef883d8e8d5d2dc3e744b8", size = 10987601, upload-time = "2025-07-07T19:19:09.589Z" }, + { url = "https://files.pythonhosted.org/packages/32/ed/ff0a67a2c5505e1854e6715586ac6693dd860fbf52ef9f81edee200266e7/pandas-2.3.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9026bd4a80108fac2239294a15ef9003c4ee191a0f64b90f170b40cfb7cf2d22", size = 11531393, upload-time = "2025-07-07T19:19:12.245Z" }, + { url = "https://files.pythonhosted.org/packages/c7/db/d8f24a7cc9fb0972adab0cc80b6817e8bef888cfd0024eeb5a21c0bb5c4a/pandas-2.3.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6de8547d4fdb12421e2d047a2c446c623ff4c11f47fddb6b9169eb98ffba485a", size = 10668750, upload-time = "2025-07-07T19:19:14.612Z" }, + { url = "https://files.pythonhosted.org/packages/0f/b0/80f6ec783313f1e2356b28b4fd8d2148c378370045da918c73145e6aab50/pandas-2.3.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:782647ddc63c83133b2506912cc6b108140a38a37292102aaa19c81c83db2928", size = 11342004, upload-time = "2025-07-07T19:19:16.857Z" }, + { url = "https://files.pythonhosted.org/packages/e9/e2/20a317688435470872885e7fc8f95109ae9683dec7c50be29b56911515a5/pandas-2.3.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ba6aff74075311fc88504b1db890187a3cd0f887a5b10f5525f8e2ef55bfdb9", size = 12050869, upload-time = "2025-07-07T19:19:19.265Z" }, + { url = "https://files.pythonhosted.org/packages/55/79/20d746b0a96c67203a5bee5fb4e00ac49c3e8009a39e1f78de264ecc5729/pandas-2.3.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e5635178b387bd2ba4ac040f82bc2ef6e6b500483975c4ebacd34bec945fda12", size = 12750218, upload-time = "2025-07-07T19:19:21.547Z" }, + { url = "https://files.pythonhosted.org/packages/7c/0f/145c8b41e48dbf03dd18fdd7f24f8ba95b8254a97a3379048378f33e7838/pandas-2.3.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6f3bf5ec947526106399a9e1d26d40ee2b259c66422efdf4de63c848492d91bb", size = 13416763, upload-time = "2025-07-07T19:19:23.939Z" }, + { url = "https://files.pythonhosted.org/packages/b2/c0/54415af59db5cdd86a3d3bf79863e8cc3fa9ed265f0745254061ac09d5f2/pandas-2.3.1-cp313-cp313-win_amd64.whl", hash = "sha256:1c78cf43c8fde236342a1cb2c34bcff89564a7bfed7e474ed2fffa6aed03a956", size = 10987482, upload-time = "2025-07-07T19:19:42.699Z" }, + { url = "https://files.pythonhosted.org/packages/48/64/2fd2e400073a1230e13b8cd604c9bc95d9e3b962e5d44088ead2e8f0cfec/pandas-2.3.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:8dfc17328e8da77be3cf9f47509e5637ba8f137148ed0e9b5241e1baf526e20a", size = 12029159, upload-time = "2025-07-07T19:19:26.362Z" }, + { url = "https://files.pythonhosted.org/packages/d8/0a/d84fd79b0293b7ef88c760d7dca69828d867c89b6d9bc52d6a27e4d87316/pandas-2.3.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:ec6c851509364c59a5344458ab935e6451b31b818be467eb24b0fe89bd05b6b9", size = 11393287, upload-time = "2025-07-07T19:19:29.157Z" }, + { url = "https://files.pythonhosted.org/packages/50/ae/ff885d2b6e88f3c7520bb74ba319268b42f05d7e583b5dded9837da2723f/pandas-2.3.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:911580460fc4884d9b05254b38a6bfadddfcc6aaef856fb5859e7ca202e45275", size = 11309381, upload-time = "2025-07-07T19:19:31.436Z" }, + { url = "https://files.pythonhosted.org/packages/85/86/1fa345fc17caf5d7780d2699985c03dbe186c68fee00b526813939062bb0/pandas-2.3.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2f4d6feeba91744872a600e6edbbd5b033005b431d5ae8379abee5bcfa479fab", size = 11883998, upload-time = "2025-07-07T19:19:34.267Z" }, + { url = "https://files.pythonhosted.org/packages/81/aa/e58541a49b5e6310d89474333e994ee57fea97c8aaa8fc7f00b873059bbf/pandas-2.3.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:fe37e757f462d31a9cd7580236a82f353f5713a80e059a29753cf938c6775d96", size = 12704705, upload-time = "2025-07-07T19:19:36.856Z" }, + { url = "https://files.pythonhosted.org/packages/d5/f9/07086f5b0f2a19872554abeea7658200824f5835c58a106fa8f2ae96a46c/pandas-2.3.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:5db9637dbc24b631ff3707269ae4559bce4b7fd75c1c4d7e13f40edc42df4444", size = 13189044, upload-time = "2025-07-07T19:19:39.999Z" }, ] [[package]] name = "paramiko" -version = "3.5.1" +version = "4.0.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "bcrypt" }, { name = "cryptography" }, + { name = "invoke" }, { name = "pynacl" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/7d/15/ad6ce226e8138315f2451c2aeea985bf35ee910afb477bae7477dc3a8f3b/paramiko-3.5.1.tar.gz", hash = "sha256:b2c665bc45b2b215bd7d7f039901b14b067da00f3a11e6640995fd58f2664822", size = 1566110, upload-time = "2025-02-04T02:37:59.783Z" } +sdist = { url = "https://files.pythonhosted.org/packages/1f/e7/81fdcbc7f190cdb058cffc9431587eb289833bdd633e2002455ca9bb13d4/paramiko-4.0.0.tar.gz", hash = "sha256:6a25f07b380cc9c9a88d2b920ad37167ac4667f8d9886ccebd8f90f654b5d69f", size = 1630743, upload-time = "2025-08-04T01:02:03.711Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/15/f8/c7bd0ef12954a81a1d3cea60a13946bd9a49a0036a5927770c461eade7ae/paramiko-3.5.1-py3-none-any.whl", hash = "sha256:43b9a0501fc2b5e70680388d9346cf252cfb7d00b0667c39e80eb43a408b8f61", size = 227298, upload-time = "2025-02-04T02:37:57.672Z" }, + { url = "https://files.pythonhosted.org/packages/a9/90/a744336f5af32c433bd09af7854599682a383b37cfd78f7de263de6ad6cb/paramiko-4.0.0-py3-none-any.whl", hash = "sha256:0e20e00ac666503bf0b4eda3b6d833465a2b7aff2e2b3d79a8bba5ef144ee3b9", size = 223932, upload-time = "2025-08-04T01:02:02.029Z" }, ] [[package]] @@ -3294,6 +3829,27 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191, upload-time = "2023-12-10T22:30:43.14Z" }, ] +[[package]] +name = "peft" +version = "0.17.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "accelerate" }, + { name = "huggingface-hub" }, + { name = "numpy" }, + { name = "packaging" }, + { name = "psutil" }, + { name = "pyyaml" }, + { name = "safetensors" }, + { name = "torch" }, + { name = "tqdm" }, + { name = "transformers" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/70/b8/2e79377efaa1e5f0d70a497db7914ffd355846e760ffa2f7883ab0f600fb/peft-0.17.1.tar.gz", hash = "sha256:e6002b42517976c290b3b8bbb9829a33dd5d470676b2dec7cb4df8501b77eb9f", size = 568192, upload-time = "2025-08-21T09:25:22.703Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/49/fe/a2da1627aa9cb6310b6034598363bd26ac301c4a99d21f415b1b2855891e/peft-0.17.1-py3-none-any.whl", hash = "sha256:3d129d64def3d74779c32a080d2567e5f7b674e77d546e3585138216d903f99e", size = 504896, upload-time = "2025-08-21T09:25:18.974Z" }, +] + [[package]] name = "pfzy" version = "0.3.4" @@ -3305,43 +3861,68 @@ wheels = [ [[package]] name = "pillow" -version = "11.2.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/af/cb/bb5c01fcd2a69335b86c22142b2bccfc3464087efb7fd382eee5ffc7fdf7/pillow-11.2.1.tar.gz", hash = "sha256:a64dd61998416367b7ef979b73d3a85853ba9bec4c2925f74e588879a58716b6", size = 47026707, upload-time = "2025-04-12T17:50:03.289Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c7/40/052610b15a1b8961f52537cc8326ca6a881408bc2bdad0d852edeb6ed33b/pillow-11.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:78afba22027b4accef10dbd5eed84425930ba41b3ea0a86fa8d20baaf19d807f", size = 3190185, upload-time = "2025-04-12T17:48:00.417Z" }, - { url = "https://files.pythonhosted.org/packages/e5/7e/b86dbd35a5f938632093dc40d1682874c33dcfe832558fc80ca56bfcb774/pillow-11.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:78092232a4ab376a35d68c4e6d5e00dfd73454bd12b230420025fbe178ee3b0b", size = 3030306, upload-time = "2025-04-12T17:48:02.391Z" }, - { url = "https://files.pythonhosted.org/packages/a4/5c/467a161f9ed53e5eab51a42923c33051bf8d1a2af4626ac04f5166e58e0c/pillow-11.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25a5f306095c6780c52e6bbb6109624b95c5b18e40aab1c3041da3e9e0cd3e2d", size = 4416121, upload-time = "2025-04-12T17:48:04.554Z" }, - { url = "https://files.pythonhosted.org/packages/62/73/972b7742e38ae0e2ac76ab137ca6005dcf877480da0d9d61d93b613065b4/pillow-11.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c7b29dbd4281923a2bfe562acb734cee96bbb129e96e6972d315ed9f232bef4", size = 4501707, upload-time = "2025-04-12T17:48:06.831Z" }, - { url = "https://files.pythonhosted.org/packages/e4/3a/427e4cb0b9e177efbc1a84798ed20498c4f233abde003c06d2650a6d60cb/pillow-11.2.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:3e645b020f3209a0181a418bffe7b4a93171eef6c4ef6cc20980b30bebf17b7d", size = 4522921, upload-time = "2025-04-12T17:48:09.229Z" }, - { url = "https://files.pythonhosted.org/packages/fe/7c/d8b1330458e4d2f3f45d9508796d7caf0c0d3764c00c823d10f6f1a3b76d/pillow-11.2.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:b2dbea1012ccb784a65349f57bbc93730b96e85b42e9bf7b01ef40443db720b4", size = 4612523, upload-time = "2025-04-12T17:48:11.631Z" }, - { url = "https://files.pythonhosted.org/packages/b3/2f/65738384e0b1acf451de5a573d8153fe84103772d139e1e0bdf1596be2ea/pillow-11.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:da3104c57bbd72948d75f6a9389e6727d2ab6333c3617f0a89d72d4940aa0443", size = 4587836, upload-time = "2025-04-12T17:48:13.592Z" }, - { url = "https://files.pythonhosted.org/packages/6a/c5/e795c9f2ddf3debb2dedd0df889f2fe4b053308bb59a3cc02a0cd144d641/pillow-11.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:598174aef4589af795f66f9caab87ba4ff860ce08cd5bb447c6fc553ffee603c", size = 4669390, upload-time = "2025-04-12T17:48:15.938Z" }, - { url = "https://files.pythonhosted.org/packages/96/ae/ca0099a3995976a9fce2f423166f7bff9b12244afdc7520f6ed38911539a/pillow-11.2.1-cp312-cp312-win32.whl", hash = "sha256:1d535df14716e7f8776b9e7fee118576d65572b4aad3ed639be9e4fa88a1cad3", size = 2332309, upload-time = "2025-04-12T17:48:17.885Z" }, - { url = "https://files.pythonhosted.org/packages/7c/18/24bff2ad716257fc03da964c5e8f05d9790a779a8895d6566e493ccf0189/pillow-11.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:14e33b28bf17c7a38eede290f77db7c664e4eb01f7869e37fa98a5aa95978941", size = 2676768, upload-time = "2025-04-12T17:48:19.655Z" }, - { url = "https://files.pythonhosted.org/packages/da/bb/e8d656c9543276517ee40184aaa39dcb41e683bca121022f9323ae11b39d/pillow-11.2.1-cp312-cp312-win_arm64.whl", hash = "sha256:21e1470ac9e5739ff880c211fc3af01e3ae505859392bf65458c224d0bf283eb", size = 2415087, upload-time = "2025-04-12T17:48:21.991Z" }, - { url = "https://files.pythonhosted.org/packages/36/9c/447528ee3776e7ab8897fe33697a7ff3f0475bb490c5ac1456a03dc57956/pillow-11.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:fdec757fea0b793056419bca3e9932eb2b0ceec90ef4813ea4c1e072c389eb28", size = 3190098, upload-time = "2025-04-12T17:48:23.915Z" }, - { url = "https://files.pythonhosted.org/packages/b5/09/29d5cd052f7566a63e5b506fac9c60526e9ecc553825551333e1e18a4858/pillow-11.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b0e130705d568e2f43a17bcbe74d90958e8a16263868a12c3e0d9c8162690830", size = 3030166, upload-time = "2025-04-12T17:48:25.738Z" }, - { url = "https://files.pythonhosted.org/packages/71/5d/446ee132ad35e7600652133f9c2840b4799bbd8e4adba881284860da0a36/pillow-11.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bdb5e09068332578214cadd9c05e3d64d99e0e87591be22a324bdbc18925be0", size = 4408674, upload-time = "2025-04-12T17:48:27.908Z" }, - { url = "https://files.pythonhosted.org/packages/69/5f/cbe509c0ddf91cc3a03bbacf40e5c2339c4912d16458fcb797bb47bcb269/pillow-11.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d189ba1bebfbc0c0e529159631ec72bb9e9bc041f01ec6d3233d6d82eb823bc1", size = 4496005, upload-time = "2025-04-12T17:48:29.888Z" }, - { url = "https://files.pythonhosted.org/packages/f9/b3/dd4338d8fb8a5f312021f2977fb8198a1184893f9b00b02b75d565c33b51/pillow-11.2.1-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:191955c55d8a712fab8934a42bfefbf99dd0b5875078240943f913bb66d46d9f", size = 4518707, upload-time = "2025-04-12T17:48:31.874Z" }, - { url = "https://files.pythonhosted.org/packages/13/eb/2552ecebc0b887f539111c2cd241f538b8ff5891b8903dfe672e997529be/pillow-11.2.1-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:ad275964d52e2243430472fc5d2c2334b4fc3ff9c16cb0a19254e25efa03a155", size = 4610008, upload-time = "2025-04-12T17:48:34.422Z" }, - { url = "https://files.pythonhosted.org/packages/72/d1/924ce51bea494cb6e7959522d69d7b1c7e74f6821d84c63c3dc430cbbf3b/pillow-11.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:750f96efe0597382660d8b53e90dd1dd44568a8edb51cb7f9d5d918b80d4de14", size = 4585420, upload-time = "2025-04-12T17:48:37.641Z" }, - { url = "https://files.pythonhosted.org/packages/43/ab/8f81312d255d713b99ca37479a4cb4b0f48195e530cdc1611990eb8fd04b/pillow-11.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fe15238d3798788d00716637b3d4e7bb6bde18b26e5d08335a96e88564a36b6b", size = 4667655, upload-time = "2025-04-12T17:48:39.652Z" }, - { url = "https://files.pythonhosted.org/packages/94/86/8f2e9d2dc3d308dfd137a07fe1cc478df0a23d42a6c4093b087e738e4827/pillow-11.2.1-cp313-cp313-win32.whl", hash = "sha256:3fe735ced9a607fee4f481423a9c36701a39719252a9bb251679635f99d0f7d2", size = 2332329, upload-time = "2025-04-12T17:48:41.765Z" }, - { url = "https://files.pythonhosted.org/packages/6d/ec/1179083b8d6067a613e4d595359b5fdea65d0a3b7ad623fee906e1b3c4d2/pillow-11.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:74ee3d7ecb3f3c05459ba95eed5efa28d6092d751ce9bf20e3e253a4e497e691", size = 2676388, upload-time = "2025-04-12T17:48:43.625Z" }, - { url = "https://files.pythonhosted.org/packages/23/f1/2fc1e1e294de897df39fa8622d829b8828ddad938b0eaea256d65b84dd72/pillow-11.2.1-cp313-cp313-win_arm64.whl", hash = "sha256:5119225c622403afb4b44bad4c1ca6c1f98eed79db8d3bc6e4e160fc6339d66c", size = 2414950, upload-time = "2025-04-12T17:48:45.475Z" }, - { url = "https://files.pythonhosted.org/packages/c4/3e/c328c48b3f0ead7bab765a84b4977acb29f101d10e4ef57a5e3400447c03/pillow-11.2.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:8ce2e8411c7aaef53e6bb29fe98f28cd4fbd9a1d9be2eeea434331aac0536b22", size = 3192759, upload-time = "2025-04-12T17:48:47.866Z" }, - { url = "https://files.pythonhosted.org/packages/18/0e/1c68532d833fc8b9f404d3a642991441d9058eccd5606eab31617f29b6d4/pillow-11.2.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:9ee66787e095127116d91dea2143db65c7bb1e232f617aa5957c0d9d2a3f23a7", size = 3033284, upload-time = "2025-04-12T17:48:50.189Z" }, - { url = "https://files.pythonhosted.org/packages/b7/cb/6faf3fb1e7705fd2db74e070f3bf6f88693601b0ed8e81049a8266de4754/pillow-11.2.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9622e3b6c1d8b551b6e6f21873bdcc55762b4b2126633014cea1803368a9aa16", size = 4445826, upload-time = "2025-04-12T17:48:52.346Z" }, - { url = "https://files.pythonhosted.org/packages/07/94/8be03d50b70ca47fb434a358919d6a8d6580f282bbb7af7e4aa40103461d/pillow-11.2.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63b5dff3a68f371ea06025a1a6966c9a1e1ee452fc8020c2cd0ea41b83e9037b", size = 4527329, upload-time = "2025-04-12T17:48:54.403Z" }, - { url = "https://files.pythonhosted.org/packages/fd/a4/bfe78777076dc405e3bd2080bc32da5ab3945b5a25dc5d8acaa9de64a162/pillow-11.2.1-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:31df6e2d3d8fc99f993fd253e97fae451a8db2e7207acf97859732273e108406", size = 4549049, upload-time = "2025-04-12T17:48:56.383Z" }, - { url = "https://files.pythonhosted.org/packages/65/4d/eaf9068dc687c24979e977ce5677e253624bd8b616b286f543f0c1b91662/pillow-11.2.1-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:062b7a42d672c45a70fa1f8b43d1d38ff76b63421cbbe7f88146b39e8a558d91", size = 4635408, upload-time = "2025-04-12T17:48:58.782Z" }, - { url = "https://files.pythonhosted.org/packages/1d/26/0fd443365d9c63bc79feb219f97d935cd4b93af28353cba78d8e77b61719/pillow-11.2.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4eb92eca2711ef8be42fd3f67533765d9fd043b8c80db204f16c8ea62ee1a751", size = 4614863, upload-time = "2025-04-12T17:49:00.709Z" }, - { url = "https://files.pythonhosted.org/packages/49/65/dca4d2506be482c2c6641cacdba5c602bc76d8ceb618fd37de855653a419/pillow-11.2.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f91ebf30830a48c825590aede79376cb40f110b387c17ee9bd59932c961044f9", size = 4692938, upload-time = "2025-04-12T17:49:02.946Z" }, - { url = "https://files.pythonhosted.org/packages/b3/92/1ca0c3f09233bd7decf8f7105a1c4e3162fb9142128c74adad0fb361b7eb/pillow-11.2.1-cp313-cp313t-win32.whl", hash = "sha256:e0b55f27f584ed623221cfe995c912c61606be8513bfa0e07d2c674b4516d9dd", size = 2335774, upload-time = "2025-04-12T17:49:04.889Z" }, - { url = "https://files.pythonhosted.org/packages/a5/ac/77525347cb43b83ae905ffe257bbe2cc6fd23acb9796639a1f56aa59d191/pillow-11.2.1-cp313-cp313t-win_amd64.whl", hash = "sha256:36d6b82164c39ce5482f649b437382c0fb2395eabc1e2b1702a6deb8ad647d6e", size = 2681895, upload-time = "2025-04-12T17:49:06.635Z" }, - { url = "https://files.pythonhosted.org/packages/67/32/32dc030cfa91ca0fc52baebbba2e009bb001122a1daa8b6a79ad830b38d3/pillow-11.2.1-cp313-cp313t-win_arm64.whl", hash = "sha256:225c832a13326e34f212d2072982bb1adb210e0cc0b153e688743018c94a2681", size = 2417234, upload-time = "2025-04-12T17:49:08.399Z" }, +version = "11.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f3/0d/d0d6dea55cd152ce3d6767bb38a8fc10e33796ba4ba210cbab9354b6d238/pillow-11.3.0.tar.gz", hash = "sha256:3828ee7586cd0b2091b6209e5ad53e20d0649bbe87164a459d0676e035e8f523", size = 47113069, upload-time = "2025-07-01T09:16:30.666Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/40/fe/1bc9b3ee13f68487a99ac9529968035cca2f0a51ec36892060edcc51d06a/pillow-11.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdae223722da47b024b867c1ea0be64e0df702c5e0a60e27daad39bf960dd1e4", size = 5278800, upload-time = "2025-07-01T09:14:17.648Z" }, + { url = "https://files.pythonhosted.org/packages/2c/32/7e2ac19b5713657384cec55f89065fb306b06af008cfd87e572035b27119/pillow-11.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:921bd305b10e82b4d1f5e802b6850677f965d8394203d182f078873851dada69", size = 4686296, upload-time = "2025-07-01T09:14:19.828Z" }, + { url = "https://files.pythonhosted.org/packages/8e/1e/b9e12bbe6e4c2220effebc09ea0923a07a6da1e1f1bfbc8d7d29a01ce32b/pillow-11.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:eb76541cba2f958032d79d143b98a3a6b3ea87f0959bbe256c0b5e416599fd5d", size = 5871726, upload-time = "2025-07-03T13:10:04.448Z" }, + { url = "https://files.pythonhosted.org/packages/8d/33/e9200d2bd7ba00dc3ddb78df1198a6e80d7669cce6c2bdbeb2530a74ec58/pillow-11.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:67172f2944ebba3d4a7b54f2e95c786a3a50c21b88456329314caaa28cda70f6", size = 7644652, upload-time = "2025-07-03T13:10:10.391Z" }, + { url = "https://files.pythonhosted.org/packages/41/f1/6f2427a26fc683e00d985bc391bdd76d8dd4e92fac33d841127eb8fb2313/pillow-11.3.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:97f07ed9f56a3b9b5f49d3661dc9607484e85c67e27f3e8be2c7d28ca032fec7", size = 5977787, upload-time = "2025-07-01T09:14:21.63Z" }, + { url = "https://files.pythonhosted.org/packages/e4/c9/06dd4a38974e24f932ff5f98ea3c546ce3f8c995d3f0985f8e5ba48bba19/pillow-11.3.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:676b2815362456b5b3216b4fd5bd89d362100dc6f4945154ff172e206a22c024", size = 6645236, upload-time = "2025-07-01T09:14:23.321Z" }, + { url = "https://files.pythonhosted.org/packages/40/e7/848f69fb79843b3d91241bad658e9c14f39a32f71a301bcd1d139416d1be/pillow-11.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3e184b2f26ff146363dd07bde8b711833d7b0202e27d13540bfe2e35a323a809", size = 6086950, upload-time = "2025-07-01T09:14:25.237Z" }, + { url = "https://files.pythonhosted.org/packages/0b/1a/7cff92e695a2a29ac1958c2a0fe4c0b2393b60aac13b04a4fe2735cad52d/pillow-11.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6be31e3fc9a621e071bc17bb7de63b85cbe0bfae91bb0363c893cbe67247780d", size = 6723358, upload-time = "2025-07-01T09:14:27.053Z" }, + { url = "https://files.pythonhosted.org/packages/26/7d/73699ad77895f69edff76b0f332acc3d497f22f5d75e5360f78cbcaff248/pillow-11.3.0-cp312-cp312-win32.whl", hash = "sha256:7b161756381f0918e05e7cb8a371fff367e807770f8fe92ecb20d905d0e1c149", size = 6275079, upload-time = "2025-07-01T09:14:30.104Z" }, + { url = "https://files.pythonhosted.org/packages/8c/ce/e7dfc873bdd9828f3b6e5c2bbb74e47a98ec23cc5c74fc4e54462f0d9204/pillow-11.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:a6444696fce635783440b7f7a9fc24b3ad10a9ea3f0ab66c5905be1c19ccf17d", size = 6986324, upload-time = "2025-07-01T09:14:31.899Z" }, + { url = "https://files.pythonhosted.org/packages/16/8f/b13447d1bf0b1f7467ce7d86f6e6edf66c0ad7cf44cf5c87a37f9bed9936/pillow-11.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:2aceea54f957dd4448264f9bf40875da0415c83eb85f55069d89c0ed436e3542", size = 2423067, upload-time = "2025-07-01T09:14:33.709Z" }, + { url = "https://files.pythonhosted.org/packages/1e/93/0952f2ed8db3a5a4c7a11f91965d6184ebc8cd7cbb7941a260d5f018cd2d/pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:1c627742b539bba4309df89171356fcb3cc5a9178355b2727d1b74a6cf155fbd", size = 2128328, upload-time = "2025-07-01T09:14:35.276Z" }, + { url = "https://files.pythonhosted.org/packages/4b/e8/100c3d114b1a0bf4042f27e0f87d2f25e857e838034e98ca98fe7b8c0a9c/pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:30b7c02f3899d10f13d7a48163c8969e4e653f8b43416d23d13d1bbfdc93b9f8", size = 2170652, upload-time = "2025-07-01T09:14:37.203Z" }, + { url = "https://files.pythonhosted.org/packages/aa/86/3f758a28a6e381758545f7cdb4942e1cb79abd271bea932998fc0db93cb6/pillow-11.3.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:7859a4cc7c9295f5838015d8cc0a9c215b77e43d07a25e460f35cf516df8626f", size = 2227443, upload-time = "2025-07-01T09:14:39.344Z" }, + { url = "https://files.pythonhosted.org/packages/01/f4/91d5b3ffa718df2f53b0dc109877993e511f4fd055d7e9508682e8aba092/pillow-11.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ec1ee50470b0d050984394423d96325b744d55c701a439d2bd66089bff963d3c", size = 5278474, upload-time = "2025-07-01T09:14:41.843Z" }, + { url = "https://files.pythonhosted.org/packages/f9/0e/37d7d3eca6c879fbd9dba21268427dffda1ab00d4eb05b32923d4fbe3b12/pillow-11.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7db51d222548ccfd274e4572fdbf3e810a5e66b00608862f947b163e613b67dd", size = 4686038, upload-time = "2025-07-01T09:14:44.008Z" }, + { url = "https://files.pythonhosted.org/packages/ff/b0/3426e5c7f6565e752d81221af9d3676fdbb4f352317ceafd42899aaf5d8a/pillow-11.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2d6fcc902a24ac74495df63faad1884282239265c6839a0a6416d33faedfae7e", size = 5864407, upload-time = "2025-07-03T13:10:15.628Z" }, + { url = "https://files.pythonhosted.org/packages/fc/c1/c6c423134229f2a221ee53f838d4be9d82bab86f7e2f8e75e47b6bf6cd77/pillow-11.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f0f5d8f4a08090c6d6d578351a2b91acf519a54986c055af27e7a93feae6d3f1", size = 7639094, upload-time = "2025-07-03T13:10:21.857Z" }, + { url = "https://files.pythonhosted.org/packages/ba/c9/09e6746630fe6372c67c648ff9deae52a2bc20897d51fa293571977ceb5d/pillow-11.3.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c37d8ba9411d6003bba9e518db0db0c58a680ab9fe5179f040b0463644bc9805", size = 5973503, upload-time = "2025-07-01T09:14:45.698Z" }, + { url = "https://files.pythonhosted.org/packages/d5/1c/a2a29649c0b1983d3ef57ee87a66487fdeb45132df66ab30dd37f7dbe162/pillow-11.3.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13f87d581e71d9189ab21fe0efb5a23e9f28552d5be6979e84001d3b8505abe8", size = 6642574, upload-time = "2025-07-01T09:14:47.415Z" }, + { url = "https://files.pythonhosted.org/packages/36/de/d5cc31cc4b055b6c6fd990e3e7f0f8aaf36229a2698501bcb0cdf67c7146/pillow-11.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:023f6d2d11784a465f09fd09a34b150ea4672e85fb3d05931d89f373ab14abb2", size = 6084060, upload-time = "2025-07-01T09:14:49.636Z" }, + { url = "https://files.pythonhosted.org/packages/d5/ea/502d938cbaeec836ac28a9b730193716f0114c41325db428e6b280513f09/pillow-11.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:45dfc51ac5975b938e9809451c51734124e73b04d0f0ac621649821a63852e7b", size = 6721407, upload-time = "2025-07-01T09:14:51.962Z" }, + { url = "https://files.pythonhosted.org/packages/45/9c/9c5e2a73f125f6cbc59cc7087c8f2d649a7ae453f83bd0362ff7c9e2aee2/pillow-11.3.0-cp313-cp313-win32.whl", hash = "sha256:a4d336baed65d50d37b88ca5b60c0fa9d81e3a87d4a7930d3880d1624d5b31f3", size = 6273841, upload-time = "2025-07-01T09:14:54.142Z" }, + { url = "https://files.pythonhosted.org/packages/23/85/397c73524e0cd212067e0c969aa245b01d50183439550d24d9f55781b776/pillow-11.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0bce5c4fd0921f99d2e858dc4d4d64193407e1b99478bc5cacecba2311abde51", size = 6978450, upload-time = "2025-07-01T09:14:56.436Z" }, + { url = "https://files.pythonhosted.org/packages/17/d2/622f4547f69cd173955194b78e4d19ca4935a1b0f03a302d655c9f6aae65/pillow-11.3.0-cp313-cp313-win_arm64.whl", hash = "sha256:1904e1264881f682f02b7f8167935cce37bc97db457f8e7849dc3a6a52b99580", size = 2423055, upload-time = "2025-07-01T09:14:58.072Z" }, + { url = "https://files.pythonhosted.org/packages/dd/80/a8a2ac21dda2e82480852978416cfacd439a4b490a501a288ecf4fe2532d/pillow-11.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4c834a3921375c48ee6b9624061076bc0a32a60b5532b322cc0ea64e639dd50e", size = 5281110, upload-time = "2025-07-01T09:14:59.79Z" }, + { url = "https://files.pythonhosted.org/packages/44/d6/b79754ca790f315918732e18f82a8146d33bcd7f4494380457ea89eb883d/pillow-11.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5e05688ccef30ea69b9317a9ead994b93975104a677a36a8ed8106be9260aa6d", size = 4689547, upload-time = "2025-07-01T09:15:01.648Z" }, + { url = "https://files.pythonhosted.org/packages/49/20/716b8717d331150cb00f7fdd78169c01e8e0c219732a78b0e59b6bdb2fd6/pillow-11.3.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1019b04af07fc0163e2810167918cb5add8d74674b6267616021ab558dc98ced", size = 5901554, upload-time = "2025-07-03T13:10:27.018Z" }, + { url = "https://files.pythonhosted.org/packages/74/cf/a9f3a2514a65bb071075063a96f0a5cf949c2f2fce683c15ccc83b1c1cab/pillow-11.3.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f944255db153ebb2b19c51fe85dd99ef0ce494123f21b9db4877ffdfc5590c7c", size = 7669132, upload-time = "2025-07-03T13:10:33.01Z" }, + { url = "https://files.pythonhosted.org/packages/98/3c/da78805cbdbee9cb43efe8261dd7cc0b4b93f2ac79b676c03159e9db2187/pillow-11.3.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f85acb69adf2aaee8b7da124efebbdb959a104db34d3a2cb0f3793dbae422a8", size = 6005001, upload-time = "2025-07-01T09:15:03.365Z" }, + { url = "https://files.pythonhosted.org/packages/6c/fa/ce044b91faecf30e635321351bba32bab5a7e034c60187fe9698191aef4f/pillow-11.3.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:05f6ecbeff5005399bb48d198f098a9b4b6bdf27b8487c7f38ca16eeb070cd59", size = 6668814, upload-time = "2025-07-01T09:15:05.655Z" }, + { url = "https://files.pythonhosted.org/packages/7b/51/90f9291406d09bf93686434f9183aba27b831c10c87746ff49f127ee80cb/pillow-11.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a7bc6e6fd0395bc052f16b1a8670859964dbd7003bd0af2ff08342eb6e442cfe", size = 6113124, upload-time = "2025-07-01T09:15:07.358Z" }, + { url = "https://files.pythonhosted.org/packages/cd/5a/6fec59b1dfb619234f7636d4157d11fb4e196caeee220232a8d2ec48488d/pillow-11.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:83e1b0161c9d148125083a35c1c5a89db5b7054834fd4387499e06552035236c", size = 6747186, upload-time = "2025-07-01T09:15:09.317Z" }, + { url = "https://files.pythonhosted.org/packages/49/6b/00187a044f98255225f172de653941e61da37104a9ea60e4f6887717e2b5/pillow-11.3.0-cp313-cp313t-win32.whl", hash = "sha256:2a3117c06b8fb646639dce83694f2f9eac405472713fcb1ae887469c0d4f6788", size = 6277546, upload-time = "2025-07-01T09:15:11.311Z" }, + { url = "https://files.pythonhosted.org/packages/e8/5c/6caaba7e261c0d75bab23be79f1d06b5ad2a2ae49f028ccec801b0e853d6/pillow-11.3.0-cp313-cp313t-win_amd64.whl", hash = "sha256:857844335c95bea93fb39e0fa2726b4d9d758850b34075a7e3ff4f4fa3aa3b31", size = 6985102, upload-time = "2025-07-01T09:15:13.164Z" }, + { url = "https://files.pythonhosted.org/packages/f3/7e/b623008460c09a0cb38263c93b828c666493caee2eb34ff67f778b87e58c/pillow-11.3.0-cp313-cp313t-win_arm64.whl", hash = "sha256:8797edc41f3e8536ae4b10897ee2f637235c94f27404cac7297f7b607dd0716e", size = 2424803, upload-time = "2025-07-01T09:15:15.695Z" }, + { url = "https://files.pythonhosted.org/packages/73/f4/04905af42837292ed86cb1b1dabe03dce1edc008ef14c473c5c7e1443c5d/pillow-11.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:d9da3df5f9ea2a89b81bb6087177fb1f4d1c7146d583a3fe5c672c0d94e55e12", size = 5278520, upload-time = "2025-07-01T09:15:17.429Z" }, + { url = "https://files.pythonhosted.org/packages/41/b0/33d79e377a336247df6348a54e6d2a2b85d644ca202555e3faa0cf811ecc/pillow-11.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:0b275ff9b04df7b640c59ec5a3cb113eefd3795a8df80bac69646ef699c6981a", size = 4686116, upload-time = "2025-07-01T09:15:19.423Z" }, + { url = "https://files.pythonhosted.org/packages/49/2d/ed8bc0ab219ae8768f529597d9509d184fe8a6c4741a6864fea334d25f3f/pillow-11.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0743841cabd3dba6a83f38a92672cccbd69af56e3e91777b0ee7f4dba4385632", size = 5864597, upload-time = "2025-07-03T13:10:38.404Z" }, + { url = "https://files.pythonhosted.org/packages/b5/3d/b932bb4225c80b58dfadaca9d42d08d0b7064d2d1791b6a237f87f661834/pillow-11.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2465a69cf967b8b49ee1b96d76718cd98c4e925414ead59fdf75cf0fd07df673", size = 7638246, upload-time = "2025-07-03T13:10:44.987Z" }, + { url = "https://files.pythonhosted.org/packages/09/b5/0487044b7c096f1b48f0d7ad416472c02e0e4bf6919541b111efd3cae690/pillow-11.3.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41742638139424703b4d01665b807c6468e23e699e8e90cffefe291c5832b027", size = 5973336, upload-time = "2025-07-01T09:15:21.237Z" }, + { url = "https://files.pythonhosted.org/packages/a8/2d/524f9318f6cbfcc79fbc004801ea6b607ec3f843977652fdee4857a7568b/pillow-11.3.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:93efb0b4de7e340d99057415c749175e24c8864302369e05914682ba642e5d77", size = 6642699, upload-time = "2025-07-01T09:15:23.186Z" }, + { url = "https://files.pythonhosted.org/packages/6f/d2/a9a4f280c6aefedce1e8f615baaa5474e0701d86dd6f1dede66726462bbd/pillow-11.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7966e38dcd0fa11ca390aed7c6f20454443581d758242023cf36fcb319b1a874", size = 6083789, upload-time = "2025-07-01T09:15:25.1Z" }, + { url = "https://files.pythonhosted.org/packages/fe/54/86b0cd9dbb683a9d5e960b66c7379e821a19be4ac5810e2e5a715c09a0c0/pillow-11.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:98a9afa7b9007c67ed84c57c9e0ad86a6000da96eaa638e4f8abe5b65ff83f0a", size = 6720386, upload-time = "2025-07-01T09:15:27.378Z" }, + { url = "https://files.pythonhosted.org/packages/e7/95/88efcaf384c3588e24259c4203b909cbe3e3c2d887af9e938c2022c9dd48/pillow-11.3.0-cp314-cp314-win32.whl", hash = "sha256:02a723e6bf909e7cea0dac1b0e0310be9d7650cd66222a5f1c571455c0a45214", size = 6370911, upload-time = "2025-07-01T09:15:29.294Z" }, + { url = "https://files.pythonhosted.org/packages/2e/cc/934e5820850ec5eb107e7b1a72dd278140731c669f396110ebc326f2a503/pillow-11.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:a418486160228f64dd9e9efcd132679b7a02a5f22c982c78b6fc7dab3fefb635", size = 7117383, upload-time = "2025-07-01T09:15:31.128Z" }, + { url = "https://files.pythonhosted.org/packages/d6/e9/9c0a616a71da2a5d163aa37405e8aced9a906d574b4a214bede134e731bc/pillow-11.3.0-cp314-cp314-win_arm64.whl", hash = "sha256:155658efb5e044669c08896c0c44231c5e9abcaadbc5cd3648df2f7c0b96b9a6", size = 2511385, upload-time = "2025-07-01T09:15:33.328Z" }, + { url = "https://files.pythonhosted.org/packages/1a/33/c88376898aff369658b225262cd4f2659b13e8178e7534df9e6e1fa289f6/pillow-11.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:59a03cdf019efbfeeed910bf79c7c93255c3d54bc45898ac2a4140071b02b4ae", size = 5281129, upload-time = "2025-07-01T09:15:35.194Z" }, + { url = "https://files.pythonhosted.org/packages/1f/70/d376247fb36f1844b42910911c83a02d5544ebd2a8bad9efcc0f707ea774/pillow-11.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f8a5827f84d973d8636e9dc5764af4f0cf2318d26744b3d902931701b0d46653", size = 4689580, upload-time = "2025-07-01T09:15:37.114Z" }, + { url = "https://files.pythonhosted.org/packages/eb/1c/537e930496149fbac69efd2fc4329035bbe2e5475b4165439e3be9cb183b/pillow-11.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ee92f2fd10f4adc4b43d07ec5e779932b4eb3dbfbc34790ada5a6669bc095aa6", size = 5902860, upload-time = "2025-07-03T13:10:50.248Z" }, + { url = "https://files.pythonhosted.org/packages/bd/57/80f53264954dcefeebcf9dae6e3eb1daea1b488f0be8b8fef12f79a3eb10/pillow-11.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c96d333dcf42d01f47b37e0979b6bd73ec91eae18614864622d9b87bbd5bbf36", size = 7670694, upload-time = "2025-07-03T13:10:56.432Z" }, + { url = "https://files.pythonhosted.org/packages/70/ff/4727d3b71a8578b4587d9c276e90efad2d6fe0335fd76742a6da08132e8c/pillow-11.3.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4c96f993ab8c98460cd0c001447bff6194403e8b1d7e149ade5f00594918128b", size = 6005888, upload-time = "2025-07-01T09:15:39.436Z" }, + { url = "https://files.pythonhosted.org/packages/05/ae/716592277934f85d3be51d7256f3636672d7b1abfafdc42cf3f8cbd4b4c8/pillow-11.3.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41342b64afeba938edb034d122b2dda5db2139b9a4af999729ba8818e0056477", size = 6670330, upload-time = "2025-07-01T09:15:41.269Z" }, + { url = "https://files.pythonhosted.org/packages/e7/bb/7fe6cddcc8827b01b1a9766f5fdeb7418680744f9082035bdbabecf1d57f/pillow-11.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:068d9c39a2d1b358eb9f245ce7ab1b5c3246c7c8c7d9ba58cfa5b43146c06e50", size = 6114089, upload-time = "2025-07-01T09:15:43.13Z" }, + { url = "https://files.pythonhosted.org/packages/8b/f5/06bfaa444c8e80f1a8e4bff98da9c83b37b5be3b1deaa43d27a0db37ef84/pillow-11.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:a1bc6ba083b145187f648b667e05a2534ecc4b9f2784c2cbe3089e44868f2b9b", size = 6748206, upload-time = "2025-07-01T09:15:44.937Z" }, + { url = "https://files.pythonhosted.org/packages/f0/77/bc6f92a3e8e6e46c0ca78abfffec0037845800ea38c73483760362804c41/pillow-11.3.0-cp314-cp314t-win32.whl", hash = "sha256:118ca10c0d60b06d006be10a501fd6bbdfef559251ed31b794668ed569c87e12", size = 6377370, upload-time = "2025-07-01T09:15:46.673Z" }, + { url = "https://files.pythonhosted.org/packages/4a/82/3a721f7d69dca802befb8af08b7c79ebcab461007ce1c18bd91a5d5896f9/pillow-11.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:8924748b688aa210d79883357d102cd64690e56b923a186f35a82cbc10f997db", size = 7121500, upload-time = "2025-07-01T09:15:48.512Z" }, + { url = "https://files.pythonhosted.org/packages/89/c7/5572fa4a3f45740eaab6ae86fcdf7195b55beac1371ac8c619d880cfe948/pillow-11.3.0-cp314-cp314t-win_arm64.whl", hash = "sha256:79ea0d14d3ebad43ec77ad5272e6ff9bba5b679ef73375ea760261207fa8e0aa", size = 2512835, upload-time = "2025-07-01T09:15:50.399Z" }, ] [[package]] @@ -3355,15 +3936,15 @@ wheels = [ [[package]] name = "plotly" -version = "6.1.2" +version = "6.3.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "narwhals" }, { name = "packaging" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ae/77/431447616eda6a432dc3ce541b3f808ecb8803ea3d4ab2573b67f8eb4208/plotly-6.1.2.tar.gz", hash = "sha256:4fdaa228926ba3e3a213f4d1713287e69dcad1a7e66cf2025bd7d7026d5014b4", size = 7662971, upload-time = "2025-05-27T20:21:52.56Z" } +sdist = { url = "https://files.pythonhosted.org/packages/a0/64/850de5076f4436410e1ce4f6a69f4313ef6215dfea155f3f6559335cad29/plotly-6.3.0.tar.gz", hash = "sha256:8840a184d18ccae0f9189c2b9a2943923fd5cae7717b723f36eef78f444e5a73", size = 6923926, upload-time = "2025-08-12T20:22:14.127Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/bf/6f/759d5da0517547a5d38aabf05d04d9f8adf83391d2c7fc33f904417d3ba2/plotly-6.1.2-py3-none-any.whl", hash = "sha256:f1548a8ed9158d59e03d7fed548c7db5549f3130d9ae19293c8638c202648f6d", size = 16265530, upload-time = "2025-05-27T20:21:46.6Z" }, + { url = "https://files.pythonhosted.org/packages/95/a9/12e2dc726ba1ba775a2c6922d5d5b4488ad60bdab0888c337c194c8e6de8/plotly-6.3.0-py3-none-any.whl", hash = "sha256:7ad806edce9d3cdd882eaebaf97c0c9e252043ed1ed3d382c3e3520ec07806d4", size = 9791257, upload-time = "2025-08-12T20:22:09.205Z" }, ] [[package]] @@ -3377,7 +3958,7 @@ wheels = [ [[package]] name = "pre-commit" -version = "3.6.0" +version = "4.3.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cfgv" }, @@ -3386,9 +3967,9 @@ dependencies = [ { name = "pyyaml" }, { name = "virtualenv" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/88/e8/4330d06f2b00ad3a9c66e07a68fe23f70233a4e7e1aaba5a738a93d2cb5d/pre_commit-3.6.0.tar.gz", hash = "sha256:d30bad9abf165f7785c15a21a1f46da7d0677cb00ee7ff4c579fd38922efe15d", size = 177069, upload-time = "2023-12-09T21:25:31.535Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ff/29/7cf5bbc236333876e4b41f56e06857a87937ce4bf91e117a6991a2dbb02a/pre_commit-4.3.0.tar.gz", hash = "sha256:499fe450cc9d42e9d58e606262795ecb64dd05438943c62b66f6a8673da30b16", size = 193792, upload-time = "2025-08-09T18:56:14.651Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e2/e3/54cd906d377e1766299df14710ded125e195d5c685c8f1bafecec073e9c6/pre_commit-3.6.0-py2.py3-none-any.whl", hash = "sha256:c255039ef399049a5544b6ce13d135caba8f2c28c3b4033277a788f434308376", size = 204021, upload-time = "2023-12-09T21:25:28.932Z" }, + { url = "https://files.pythonhosted.org/packages/5b/a5/987a405322d78a73b66e39e4a90e4ef156fd7141bf71df987e50717c321b/pre_commit-4.3.0-py2.py3-none-any.whl", hash = "sha256:2b0747ad7e6e967169136edffee14c16e148a778a54e4f967921aa1ebf2308d8", size = 220965, upload-time = "2025-08-09T18:56:13.192Z" }, ] [[package]] @@ -3496,16 +4077,16 @@ wheels = [ [[package]] name = "protobuf" -version = "5.29.5" +version = "6.32.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/43/29/d09e70352e4e88c9c7a198d5645d7277811448d76c23b00345670f7c8a38/protobuf-5.29.5.tar.gz", hash = "sha256:bc1463bafd4b0929216c35f437a8e28731a2b7fe3d98bb77a600efced5a15c84", size = 425226, upload-time = "2025-05-28T23:51:59.82Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c0/df/fb4a8eeea482eca989b51cffd274aac2ee24e825f0bf3cbce5281fa1567b/protobuf-6.32.0.tar.gz", hash = "sha256:a81439049127067fc49ec1d36e25c6ee1d1a2b7be930675f919258d03c04e7d2", size = 440614, upload-time = "2025-08-14T21:21:25.015Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/5f/11/6e40e9fc5bba02988a214c07cf324595789ca7820160bfd1f8be96e48539/protobuf-5.29.5-cp310-abi3-win32.whl", hash = "sha256:3f1c6468a2cfd102ff4703976138844f78ebd1fb45f49011afc5139e9e283079", size = 422963, upload-time = "2025-05-28T23:51:41.204Z" }, - { url = "https://files.pythonhosted.org/packages/81/7f/73cefb093e1a2a7c3ffd839e6f9fcafb7a427d300c7f8aef9c64405d8ac6/protobuf-5.29.5-cp310-abi3-win_amd64.whl", hash = "sha256:3f76e3a3675b4a4d867b52e4a5f5b78a2ef9565549d4037e06cf7b0942b1d3fc", size = 434818, upload-time = "2025-05-28T23:51:44.297Z" }, - { url = "https://files.pythonhosted.org/packages/dd/73/10e1661c21f139f2c6ad9b23040ff36fee624310dc28fba20d33fdae124c/protobuf-5.29.5-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e38c5add5a311f2a6eb0340716ef9b039c1dfa428b28f25a7838ac329204a671", size = 418091, upload-time = "2025-05-28T23:51:45.907Z" }, - { url = "https://files.pythonhosted.org/packages/6c/04/98f6f8cf5b07ab1294c13f34b4e69b3722bb609c5b701d6c169828f9f8aa/protobuf-5.29.5-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:fa18533a299d7ab6c55a238bf8629311439995f2e7eca5caaff08663606e9015", size = 319824, upload-time = "2025-05-28T23:51:47.545Z" }, - { url = "https://files.pythonhosted.org/packages/85/e4/07c80521879c2d15f321465ac24c70efe2381378c00bf5e56a0f4fbac8cd/protobuf-5.29.5-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:63848923da3325e1bf7e9003d680ce6e14b07e55d0473253a690c3a8b8fd6e61", size = 319942, upload-time = "2025-05-28T23:51:49.11Z" }, - { url = "https://files.pythonhosted.org/packages/7e/cc/7e77861000a0691aeea8f4566e5d3aa716f2b1dece4a24439437e41d3d25/protobuf-5.29.5-py3-none-any.whl", hash = "sha256:6cf42630262c59b2d8de33954443d94b746c952b01434fc58a417fdbd2e84bd5", size = 172823, upload-time = "2025-05-28T23:51:58.157Z" }, + { url = "https://files.pythonhosted.org/packages/33/18/df8c87da2e47f4f1dcc5153a81cd6bca4e429803f4069a299e236e4dd510/protobuf-6.32.0-cp310-abi3-win32.whl", hash = "sha256:84f9e3c1ff6fb0308dbacb0950d8aa90694b0d0ee68e75719cb044b7078fe741", size = 424409, upload-time = "2025-08-14T21:21:12.366Z" }, + { url = "https://files.pythonhosted.org/packages/e1/59/0a820b7310f8139bd8d5a9388e6a38e1786d179d6f33998448609296c229/protobuf-6.32.0-cp310-abi3-win_amd64.whl", hash = "sha256:a8bdbb2f009cfc22a36d031f22a625a38b615b5e19e558a7b756b3279723e68e", size = 435735, upload-time = "2025-08-14T21:21:15.046Z" }, + { url = "https://files.pythonhosted.org/packages/cc/5b/0d421533c59c789e9c9894683efac582c06246bf24bb26b753b149bd88e4/protobuf-6.32.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:d52691e5bee6c860fff9a1c86ad26a13afbeb4b168cd4445c922b7e2cf85aaf0", size = 426449, upload-time = "2025-08-14T21:21:16.687Z" }, + { url = "https://files.pythonhosted.org/packages/ec/7b/607764ebe6c7a23dcee06e054fd1de3d5841b7648a90fd6def9a3bb58c5e/protobuf-6.32.0-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:501fe6372fd1c8ea2a30b4d9be8f87955a64d6be9c88a973996cef5ef6f0abf1", size = 322869, upload-time = "2025-08-14T21:21:18.282Z" }, + { url = "https://files.pythonhosted.org/packages/40/01/2e730bd1c25392fc32e3268e02446f0d77cb51a2c3a8486b1798e34d5805/protobuf-6.32.0-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:75a2aab2bd1aeb1f5dc7c5f33bcb11d82ea8c055c9becbb41c26a8c43fd7092c", size = 322009, upload-time = "2025-08-14T21:21:19.893Z" }, + { url = "https://files.pythonhosted.org/packages/9c/f2/80ffc4677aac1bc3519b26bc7f7f5de7fce0ee2f7e36e59e27d8beb32dd1/protobuf-6.32.0-py3-none-any.whl", hash = "sha256:ba377e5b67b908c8f3072a57b63e2c6a4cbd18aea4ed98d2584350dbf46f2783", size = 169287, upload-time = "2025-08-14T21:21:23.515Z" }, ] [[package]] @@ -3525,11 +4106,11 @@ wheels = [ [[package]] name = "pulp" -version = "3.2.1" +version = "3.2.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/2f/cd/cb1308632ad5b092ebbfe64d0cd0b9906caec6e52bff88f54ddd3d434694/pulp-3.2.1.tar.gz", hash = "sha256:fc6c02c47c06342c586b175924add753cad7638ff6149b3b43e87ac6709ac469", size = 16297436, upload-time = "2025-05-29T09:25:51.647Z" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/4f/11cfa283228b5f259bcfc913f731f7c6f68748d26711594e14cf2cb5e39a/pulp-3.2.2.tar.gz", hash = "sha256:389a6ff1dc34ec4b093f34f7a9fa3553743ff0ea99b2a423e9f0dd16940f63d2", size = 16299367, upload-time = "2025-07-29T11:42:04.109Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/84/45/2bb878df73b5545405faff0b0b30f72929222356387a41b50ca268951d5d/pulp-3.2.1-py3-none-any.whl", hash = "sha256:c6cf7fe84cef15795bc7c27e2f3c6784db5cf6ebf68e94d5a659b02415f982c5", size = 16383592, upload-time = "2025-05-29T09:25:49.262Z" }, + { url = "https://files.pythonhosted.org/packages/15/8d/a6a9d58c929a869f7f1b99b3d37b3f14ef63e2826eef581416338d686c3f/pulp-3.2.2-py3-none-any.whl", hash = "sha256:d3ca5ff11a28b3e7b2508a992d7e51f3533471d89305f0560b5fe3b6cc821043", size = 16385354, upload-time = "2025-07-29T11:42:01.829Z" }, ] [[package]] @@ -3543,52 +4124,46 @@ wheels = [ [[package]] name = "py-spy" -version = "0.4.0" +version = "0.4.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/7c/cd/9dacc04604dc4398ce5bed77ed59918ad0940f15165954d4aaa651cc640c/py_spy-0.4.0.tar.gz", hash = "sha256:806602ce7972782cc9c1e383f339bfc27bfb822d42485e6a3e0530ae5040e1f0", size = 253236, upload-time = "2024-11-01T19:08:51.487Z" } +sdist = { url = "https://files.pythonhosted.org/packages/19/e2/ff811a367028b87e86714945bb9ecb5c1cc69114a8039a67b3a862cef921/py_spy-0.4.1.tar.gz", hash = "sha256:e53aa53daa2e47c2eef97dd2455b47bb3a7e7f962796a86cc3e7dbde8e6f4db4", size = 244726, upload-time = "2025-07-31T19:33:25.172Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/6a/7e/02ca3ee68507db47afce769504060d71b4dc1455f0f9faa8d32fc7762221/py_spy-0.4.0-py2.py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:f2cf3f7130e7d780471faa5957441d3b4e0ec39a79b2c00f4c33d494f7728428", size = 3617847, upload-time = "2024-11-01T19:08:37.44Z" }, - { url = "https://files.pythonhosted.org/packages/65/7c/d9e26cc4c8e91f96a3a65de04d2e2e4131fbcaf6830d10917d4fab9d6788/py_spy-0.4.0-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:47cdda4c34d9b6cb01f3aaeceb2e88faf57da880207fe72ff6ff97e9bb6cc8a9", size = 1761955, upload-time = "2024-11-01T19:08:39.632Z" }, - { url = "https://files.pythonhosted.org/packages/d2/e4/8fbfd219b7f282b80e6b2e74c9197850d2c51db8555705567bb65507b060/py_spy-0.4.0-py2.py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eee3d0bde85ca5cf4f01f012d461180ca76c24835a96f7b5c4ded64eb6a008ab", size = 2059471, upload-time = "2024-11-01T19:08:41.818Z" }, - { url = "https://files.pythonhosted.org/packages/a7/1d/79a94a5ace810c13b730ce96765ca465c171b4952034f1be7402d8accbc1/py_spy-0.4.0-py2.py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c5f06ffce4c9c98b7fc9f5e67e5e7db591173f1351837633f3f23d9378b1d18a", size = 2067486, upload-time = "2024-11-01T19:08:43.673Z" }, - { url = "https://files.pythonhosted.org/packages/6d/90/fbbb038f826a83ed15ebc4ae606815d6cad6c5c6399c86c7ab96f6c60817/py_spy-0.4.0-py2.py3-none-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:87573e64dbfdfc89ba2e0f5e2f525aa84e0299c7eb6454b47ea335fde583a7a0", size = 2141433, upload-time = "2024-11-01T19:08:45.988Z" }, - { url = "https://files.pythonhosted.org/packages/c9/c1/5e012669ebb687e546dc99fcfc4861ebfcf3a337b7a41af945df23140bb5/py_spy-0.4.0-py2.py3-none-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:8bf2f3702cef367a489faa45177b41a6c31b2a3e5bd78c978d44e29340152f5a", size = 2732951, upload-time = "2024-11-01T19:08:48.109Z" }, - { url = "https://files.pythonhosted.org/packages/74/8b/dd8490660019a6b0be28d9ffd2bf1db967604b19f3f2719c0e283a16ac7f/py_spy-0.4.0-py2.py3-none-win_amd64.whl", hash = "sha256:77d8f637ade38367d944874776f45b703b7ac5938b1f7be8891f3a5876ddbb96", size = 1810770, upload-time = "2024-11-01T19:08:50.229Z" }, + { url = "https://files.pythonhosted.org/packages/14/e3/3a32500d845bdd94f6a2b4ed6244982f42ec2bc64602ea8fcfe900678ae7/py_spy-0.4.1-py2.py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:809094208c6256c8f4ccadd31e9a513fe2429253f48e20066879239ba12cd8cc", size = 3682508, upload-time = "2025-07-31T19:33:13.753Z" }, + { url = "https://files.pythonhosted.org/packages/4f/bf/e4d280e9e0bec71d39fc646654097027d4bbe8e04af18fb68e49afcff404/py_spy-0.4.1-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:1fb8bf71ab8df95a95cc387deed6552934c50feef2cf6456bc06692a5508fd0c", size = 1796395, upload-time = "2025-07-31T19:33:15.325Z" }, + { url = "https://files.pythonhosted.org/packages/df/79/9ed50bb0a9de63ed023aa2db8b6265b04a7760d98c61eb54def6a5fddb68/py_spy-0.4.1-py2.py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee776b9d512a011d1ad3907ed53ae32ce2f3d9ff3e1782236554e22103b5c084", size = 2034938, upload-time = "2025-07-31T19:33:17.194Z" }, + { url = "https://files.pythonhosted.org/packages/53/a5/36862e3eea59f729dfb70ee6f9e14b051d8ddce1aa7e70e0b81d9fe18536/py_spy-0.4.1-py2.py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:532d3525538254d1859b49de1fbe9744df6b8865657c9f0e444bf36ce3f19226", size = 2658968, upload-time = "2025-07-31T19:33:18.916Z" }, + { url = "https://files.pythonhosted.org/packages/08/f8/9ea0b586b065a623f591e5e7961282ec944b5fbbdca33186c7c0296645b3/py_spy-0.4.1-py2.py3-none-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4972c21890b6814017e39ac233c22572c4a61fd874524ebc5ccab0f2237aee0a", size = 2147541, upload-time = "2025-07-31T19:33:20.565Z" }, + { url = "https://files.pythonhosted.org/packages/68/fb/bc7f639aed026bca6e7beb1e33f6951e16b7d315594e7635a4f7d21d63f4/py_spy-0.4.1-py2.py3-none-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6a80ec05eb8a6883863a367c6a4d4f2d57de68466f7956b6367d4edd5c61bb29", size = 2763338, upload-time = "2025-07-31T19:33:22.202Z" }, + { url = "https://files.pythonhosted.org/packages/e1/da/fcc9a9fcd4ca946ff402cff20348e838b051d69f50f5d1f5dca4cd3c5eb8/py_spy-0.4.1-py2.py3-none-win_amd64.whl", hash = "sha256:d92e522bd40e9bf7d87c204033ce5bb5c828fca45fa28d970f58d71128069fdc", size = 1818784, upload-time = "2025-07-31T19:33:23.802Z" }, ] [[package]] name = "pyarrow" -version = "20.0.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a2/ee/a7810cb9f3d6e9238e61d312076a9859bf3668fd21c69744de9532383912/pyarrow-20.0.0.tar.gz", hash = "sha256:febc4a913592573c8d5805091a6c2b5064c8bd6e002131f01061797d91c783c1", size = 1125187, upload-time = "2025-04-27T12:34:23.264Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a1/d6/0c10e0d54f6c13eb464ee9b67a68b8c71bcf2f67760ef5b6fbcddd2ab05f/pyarrow-20.0.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:75a51a5b0eef32727a247707d4755322cb970be7e935172b6a3a9f9ae98404ba", size = 30815067, upload-time = "2025-04-27T12:29:44.384Z" }, - { url = "https://files.pythonhosted.org/packages/7e/e2/04e9874abe4094a06fd8b0cbb0f1312d8dd7d707f144c2ec1e5e8f452ffa/pyarrow-20.0.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:211d5e84cecc640c7a3ab900f930aaff5cd2702177e0d562d426fb7c4f737781", size = 32297128, upload-time = "2025-04-27T12:29:52.038Z" }, - { url = "https://files.pythonhosted.org/packages/31/fd/c565e5dcc906a3b471a83273039cb75cb79aad4a2d4a12f76cc5ae90a4b8/pyarrow-20.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ba3cf4182828be7a896cbd232aa8dd6a31bd1f9e32776cc3796c012855e1199", size = 41334890, upload-time = "2025-04-27T12:29:59.452Z" }, - { url = "https://files.pythonhosted.org/packages/af/a9/3bdd799e2c9b20c1ea6dc6fa8e83f29480a97711cf806e823f808c2316ac/pyarrow-20.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c3a01f313ffe27ac4126f4c2e5ea0f36a5fc6ab51f8726cf41fee4b256680bd", size = 42421775, upload-time = "2025-04-27T12:30:06.875Z" }, - { url = "https://files.pythonhosted.org/packages/10/f7/da98ccd86354c332f593218101ae56568d5dcedb460e342000bd89c49cc1/pyarrow-20.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:a2791f69ad72addd33510fec7bb14ee06c2a448e06b649e264c094c5b5f7ce28", size = 40687231, upload-time = "2025-04-27T12:30:13.954Z" }, - { url = "https://files.pythonhosted.org/packages/bb/1b/2168d6050e52ff1e6cefc61d600723870bf569cbf41d13db939c8cf97a16/pyarrow-20.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:4250e28a22302ce8692d3a0e8ec9d9dde54ec00d237cff4dfa9c1fbf79e472a8", size = 42295639, upload-time = "2025-04-27T12:30:21.949Z" }, - { url = "https://files.pythonhosted.org/packages/b2/66/2d976c0c7158fd25591c8ca55aee026e6d5745a021915a1835578707feb3/pyarrow-20.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:89e030dc58fc760e4010148e6ff164d2f44441490280ef1e97a542375e41058e", size = 42908549, upload-time = "2025-04-27T12:30:29.551Z" }, - { url = "https://files.pythonhosted.org/packages/31/a9/dfb999c2fc6911201dcbf348247f9cc382a8990f9ab45c12eabfd7243a38/pyarrow-20.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6102b4864d77102dbbb72965618e204e550135a940c2534711d5ffa787df2a5a", size = 44557216, upload-time = "2025-04-27T12:30:36.977Z" }, - { url = "https://files.pythonhosted.org/packages/a0/8e/9adee63dfa3911be2382fb4d92e4b2e7d82610f9d9f668493bebaa2af50f/pyarrow-20.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:96d6a0a37d9c98be08f5ed6a10831d88d52cac7b13f5287f1e0f625a0de8062b", size = 25660496, upload-time = "2025-04-27T12:30:42.809Z" }, - { url = "https://files.pythonhosted.org/packages/9b/aa/daa413b81446d20d4dad2944110dcf4cf4f4179ef7f685dd5a6d7570dc8e/pyarrow-20.0.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:a15532e77b94c61efadde86d10957950392999503b3616b2ffcef7621a002893", size = 30798501, upload-time = "2025-04-27T12:30:48.351Z" }, - { url = "https://files.pythonhosted.org/packages/ff/75/2303d1caa410925de902d32ac215dc80a7ce7dd8dfe95358c165f2adf107/pyarrow-20.0.0-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:dd43f58037443af715f34f1322c782ec463a3c8a94a85fdb2d987ceb5658e061", size = 32277895, upload-time = "2025-04-27T12:30:55.238Z" }, - { url = "https://files.pythonhosted.org/packages/92/41/fe18c7c0b38b20811b73d1bdd54b1fccba0dab0e51d2048878042d84afa8/pyarrow-20.0.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa0d288143a8585806e3cc7c39566407aab646fb9ece164609dac1cfff45f6ae", size = 41327322, upload-time = "2025-04-27T12:31:05.587Z" }, - { url = "https://files.pythonhosted.org/packages/da/ab/7dbf3d11db67c72dbf36ae63dcbc9f30b866c153b3a22ef728523943eee6/pyarrow-20.0.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6953f0114f8d6f3d905d98e987d0924dabce59c3cda380bdfaa25a6201563b4", size = 42411441, upload-time = "2025-04-27T12:31:15.675Z" }, - { url = "https://files.pythonhosted.org/packages/90/c3/0c7da7b6dac863af75b64e2f827e4742161128c350bfe7955b426484e226/pyarrow-20.0.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:991f85b48a8a5e839b2128590ce07611fae48a904cae6cab1f089c5955b57eb5", size = 40677027, upload-time = "2025-04-27T12:31:24.631Z" }, - { url = "https://files.pythonhosted.org/packages/be/27/43a47fa0ff9053ab5203bb3faeec435d43c0d8bfa40179bfd076cdbd4e1c/pyarrow-20.0.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:97c8dc984ed09cb07d618d57d8d4b67a5100a30c3818c2fb0b04599f0da2de7b", size = 42281473, upload-time = "2025-04-27T12:31:31.311Z" }, - { url = "https://files.pythonhosted.org/packages/bc/0b/d56c63b078876da81bbb9ba695a596eabee9b085555ed12bf6eb3b7cab0e/pyarrow-20.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9b71daf534f4745818f96c214dbc1e6124d7daf059167330b610fc69b6f3d3e3", size = 42893897, upload-time = "2025-04-27T12:31:39.406Z" }, - { url = "https://files.pythonhosted.org/packages/92/ac/7d4bd020ba9145f354012838692d48300c1b8fe5634bfda886abcada67ed/pyarrow-20.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e8b88758f9303fa5a83d6c90e176714b2fd3852e776fc2d7e42a22dd6c2fb368", size = 44543847, upload-time = "2025-04-27T12:31:45.997Z" }, - { url = "https://files.pythonhosted.org/packages/9d/07/290f4abf9ca702c5df7b47739c1b2c83588641ddfa2cc75e34a301d42e55/pyarrow-20.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:30b3051b7975801c1e1d387e17c588d8ab05ced9b1e14eec57915f79869b5031", size = 25653219, upload-time = "2025-04-27T12:31:54.11Z" }, - { url = "https://files.pythonhosted.org/packages/95/df/720bb17704b10bd69dde086e1400b8eefb8f58df3f8ac9cff6c425bf57f1/pyarrow-20.0.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:ca151afa4f9b7bc45bcc791eb9a89e90a9eb2772767d0b1e5389609c7d03db63", size = 30853957, upload-time = "2025-04-27T12:31:59.215Z" }, - { url = "https://files.pythonhosted.org/packages/d9/72/0d5f875efc31baef742ba55a00a25213a19ea64d7176e0fe001c5d8b6e9a/pyarrow-20.0.0-cp313-cp313t-macosx_12_0_x86_64.whl", hash = "sha256:4680f01ecd86e0dd63e39eb5cd59ef9ff24a9d166db328679e36c108dc993d4c", size = 32247972, upload-time = "2025-04-27T12:32:05.369Z" }, - { url = "https://files.pythonhosted.org/packages/d5/bc/e48b4fa544d2eea72f7844180eb77f83f2030b84c8dad860f199f94307ed/pyarrow-20.0.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f4c8534e2ff059765647aa69b75d6543f9fef59e2cd4c6d18015192565d2b70", size = 41256434, upload-time = "2025-04-27T12:32:11.814Z" }, - { url = "https://files.pythonhosted.org/packages/c3/01/974043a29874aa2cf4f87fb07fd108828fc7362300265a2a64a94965e35b/pyarrow-20.0.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e1f8a47f4b4ae4c69c4d702cfbdfe4d41e18e5c7ef6f1bb1c50918c1e81c57b", size = 42353648, upload-time = "2025-04-27T12:32:20.766Z" }, - { url = "https://files.pythonhosted.org/packages/68/95/cc0d3634cde9ca69b0e51cbe830d8915ea32dda2157560dda27ff3b3337b/pyarrow-20.0.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:a1f60dc14658efaa927f8214734f6a01a806d7690be4b3232ba526836d216122", size = 40619853, upload-time = "2025-04-27T12:32:28.1Z" }, - { url = "https://files.pythonhosted.org/packages/29/c2/3ad40e07e96a3e74e7ed7cc8285aadfa84eb848a798c98ec0ad009eb6bcc/pyarrow-20.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:204a846dca751428991346976b914d6d2a82ae5b8316a6ed99789ebf976551e6", size = 42241743, upload-time = "2025-04-27T12:32:35.792Z" }, - { url = "https://files.pythonhosted.org/packages/eb/cb/65fa110b483339add6a9bc7b6373614166b14e20375d4daa73483755f830/pyarrow-20.0.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:f3b117b922af5e4c6b9a9115825726cac7d8b1421c37c2b5e24fbacc8930612c", size = 42839441, upload-time = "2025-04-27T12:32:46.64Z" }, - { url = "https://files.pythonhosted.org/packages/98/7b/f30b1954589243207d7a0fbc9997401044bf9a033eec78f6cb50da3f304a/pyarrow-20.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:e724a3fd23ae5b9c010e7be857f4405ed5e679db5c93e66204db1a69f733936a", size = 44503279, upload-time = "2025-04-27T12:32:56.503Z" }, - { url = "https://files.pythonhosted.org/packages/37/40/ad395740cd641869a13bcf60851296c89624662575621968dcfafabaa7f6/pyarrow-20.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:82f1ee5133bd8f49d31be1299dc07f585136679666b502540db854968576faf9", size = 25944982, upload-time = "2025-04-27T12:33:04.72Z" }, +version = "21.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ef/c2/ea068b8f00905c06329a3dfcd40d0fcc2b7d0f2e355bdb25b65e0a0e4cd4/pyarrow-21.0.0.tar.gz", hash = "sha256:5051f2dccf0e283ff56335760cbc8622cf52264d67e359d5569541ac11b6d5bc", size = 1133487, upload-time = "2025-07-18T00:57:31.761Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ca/d4/d4f817b21aacc30195cf6a46ba041dd1be827efa4a623cc8bf39a1c2a0c0/pyarrow-21.0.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:3a302f0e0963db37e0a24a70c56cf91a4faa0bca51c23812279ca2e23481fccd", size = 31160305, upload-time = "2025-07-18T00:55:35.373Z" }, + { url = "https://files.pythonhosted.org/packages/a2/9c/dcd38ce6e4b4d9a19e1d36914cb8e2b1da4e6003dd075474c4cfcdfe0601/pyarrow-21.0.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:b6b27cf01e243871390474a211a7922bfbe3bda21e39bc9160daf0da3fe48876", size = 32684264, upload-time = "2025-07-18T00:55:39.303Z" }, + { url = "https://files.pythonhosted.org/packages/4f/74/2a2d9f8d7a59b639523454bec12dba35ae3d0a07d8ab529dc0809f74b23c/pyarrow-21.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:e72a8ec6b868e258a2cd2672d91f2860ad532d590ce94cdf7d5e7ec674ccf03d", size = 41108099, upload-time = "2025-07-18T00:55:42.889Z" }, + { url = "https://files.pythonhosted.org/packages/ad/90/2660332eeb31303c13b653ea566a9918484b6e4d6b9d2d46879a33ab0622/pyarrow-21.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:b7ae0bbdc8c6674259b25bef5d2a1d6af5d39d7200c819cf99e07f7dfef1c51e", size = 42829529, upload-time = "2025-07-18T00:55:47.069Z" }, + { url = "https://files.pythonhosted.org/packages/33/27/1a93a25c92717f6aa0fca06eb4700860577d016cd3ae51aad0e0488ac899/pyarrow-21.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:58c30a1729f82d201627c173d91bd431db88ea74dcaa3885855bc6203e433b82", size = 43367883, upload-time = "2025-07-18T00:55:53.069Z" }, + { url = "https://files.pythonhosted.org/packages/05/d9/4d09d919f35d599bc05c6950095e358c3e15148ead26292dfca1fb659b0c/pyarrow-21.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:072116f65604b822a7f22945a7a6e581cfa28e3454fdcc6939d4ff6090126623", size = 45133802, upload-time = "2025-07-18T00:55:57.714Z" }, + { url = "https://files.pythonhosted.org/packages/71/30/f3795b6e192c3ab881325ffe172e526499eb3780e306a15103a2764916a2/pyarrow-21.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:cf56ec8b0a5c8c9d7021d6fd754e688104f9ebebf1bf4449613c9531f5346a18", size = 26203175, upload-time = "2025-07-18T00:56:01.364Z" }, + { url = "https://files.pythonhosted.org/packages/16/ca/c7eaa8e62db8fb37ce942b1ea0c6d7abfe3786ca193957afa25e71b81b66/pyarrow-21.0.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:e99310a4ebd4479bcd1964dff9e14af33746300cb014aa4a3781738ac63baf4a", size = 31154306, upload-time = "2025-07-18T00:56:04.42Z" }, + { url = "https://files.pythonhosted.org/packages/ce/e8/e87d9e3b2489302b3a1aea709aaca4b781c5252fcb812a17ab6275a9a484/pyarrow-21.0.0-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:d2fe8e7f3ce329a71b7ddd7498b3cfac0eeb200c2789bd840234f0dc271a8efe", size = 32680622, upload-time = "2025-07-18T00:56:07.505Z" }, + { url = "https://files.pythonhosted.org/packages/84/52/79095d73a742aa0aba370c7942b1b655f598069489ab387fe47261a849e1/pyarrow-21.0.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:f522e5709379d72fb3da7785aa489ff0bb87448a9dc5a75f45763a795a089ebd", size = 41104094, upload-time = "2025-07-18T00:56:10.994Z" }, + { url = "https://files.pythonhosted.org/packages/89/4b/7782438b551dbb0468892a276b8c789b8bbdb25ea5c5eb27faadd753e037/pyarrow-21.0.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:69cbbdf0631396e9925e048cfa5bce4e8c3d3b41562bbd70c685a8eb53a91e61", size = 42825576, upload-time = "2025-07-18T00:56:15.569Z" }, + { url = "https://files.pythonhosted.org/packages/b3/62/0f29de6e0a1e33518dec92c65be0351d32d7ca351e51ec5f4f837a9aab91/pyarrow-21.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:731c7022587006b755d0bdb27626a1a3bb004bb56b11fb30d98b6c1b4718579d", size = 43368342, upload-time = "2025-07-18T00:56:19.531Z" }, + { url = "https://files.pythonhosted.org/packages/90/c7/0fa1f3f29cf75f339768cc698c8ad4ddd2481c1742e9741459911c9ac477/pyarrow-21.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:dc56bc708f2d8ac71bd1dcb927e458c93cec10b98eb4120206a4091db7b67b99", size = 45131218, upload-time = "2025-07-18T00:56:23.347Z" }, + { url = "https://files.pythonhosted.org/packages/01/63/581f2076465e67b23bc5a37d4a2abff8362d389d29d8105832e82c9c811c/pyarrow-21.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:186aa00bca62139f75b7de8420f745f2af12941595bbbfa7ed3870ff63e25636", size = 26087551, upload-time = "2025-07-18T00:56:26.758Z" }, + { url = "https://files.pythonhosted.org/packages/c9/ab/357d0d9648bb8241ee7348e564f2479d206ebe6e1c47ac5027c2e31ecd39/pyarrow-21.0.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:a7a102574faa3f421141a64c10216e078df467ab9576684d5cd696952546e2da", size = 31290064, upload-time = "2025-07-18T00:56:30.214Z" }, + { url = "https://files.pythonhosted.org/packages/3f/8a/5685d62a990e4cac2043fc76b4661bf38d06efed55cf45a334b455bd2759/pyarrow-21.0.0-cp313-cp313t-macosx_12_0_x86_64.whl", hash = "sha256:1e005378c4a2c6db3ada3ad4c217b381f6c886f0a80d6a316fe586b90f77efd7", size = 32727837, upload-time = "2025-07-18T00:56:33.935Z" }, + { url = "https://files.pythonhosted.org/packages/fc/de/c0828ee09525c2bafefd3e736a248ebe764d07d0fd762d4f0929dbc516c9/pyarrow-21.0.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:65f8e85f79031449ec8706b74504a316805217b35b6099155dd7e227eef0d4b6", size = 41014158, upload-time = "2025-07-18T00:56:37.528Z" }, + { url = "https://files.pythonhosted.org/packages/6e/26/a2865c420c50b7a3748320b614f3484bfcde8347b2639b2b903b21ce6a72/pyarrow-21.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:3a81486adc665c7eb1a2bde0224cfca6ceaba344a82a971ef059678417880eb8", size = 42667885, upload-time = "2025-07-18T00:56:41.483Z" }, + { url = "https://files.pythonhosted.org/packages/0a/f9/4ee798dc902533159250fb4321267730bc0a107d8c6889e07c3add4fe3a5/pyarrow-21.0.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:fc0d2f88b81dcf3ccf9a6ae17f89183762c8a94a5bdcfa09e05cfe413acf0503", size = 43276625, upload-time = "2025-07-18T00:56:48.002Z" }, + { url = "https://files.pythonhosted.org/packages/5a/da/e02544d6997037a4b0d22d8e5f66bc9315c3671371a8b18c79ade1cefe14/pyarrow-21.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6299449adf89df38537837487a4f8d3bd91ec94354fdd2a7d30bc11c48ef6e79", size = 44951890, upload-time = "2025-07-18T00:56:52.568Z" }, + { url = "https://files.pythonhosted.org/packages/e5/4e/519c1bc1876625fe6b71e9a28287c43ec2f20f73c658b9ae1d485c0c206e/pyarrow-21.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:222c39e2c70113543982c6b34f3077962b44fca38c0bd9e68bb6781534425c10", size = 26371006, upload-time = "2025-07-18T00:56:56.379Z" }, ] [[package]] @@ -3725,11 +4300,11 @@ wheels = [ [[package]] name = "pybind11" -version = "2.13.6" +version = "3.0.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d2/c1/72b9622fcb32ff98b054f724e213c7f70d6898baa714f4516288456ceaba/pybind11-2.13.6.tar.gz", hash = "sha256:ba6af10348c12b24e92fa086b39cfba0eff619b61ac77c406167d813b096d39a", size = 218403, upload-time = "2024-09-14T00:35:22.606Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ef/83/698d120e257a116f2472c710932023ad779409adf2734d2e940f34eea2c5/pybind11-3.0.0.tar.gz", hash = "sha256:c3f07bce3ada51c3e4b76badfa85df11688d12c46111f9d242bc5c9415af7862", size = 544819, upload-time = "2025-07-10T16:52:09.335Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/13/2f/0f24b288e2ce56f51c920137620b4434a38fd80583dbbe24fc2a1656c388/pybind11-2.13.6-py3-none-any.whl", hash = "sha256:237c41e29157b962835d356b370ededd57594a26d5894a795960f0047cb5caf5", size = 243282, upload-time = "2024-09-14T00:35:20.361Z" }, + { url = "https://files.pythonhosted.org/packages/41/9c/85f50a5476832c3efc67b6d7997808388236ae4754bf53e1749b3bc27577/pybind11-3.0.0-py3-none-any.whl", hash = "sha256:7c5cac504da5a701b5163f0e6a7ba736c713a096a5378383c5b4b064b753f607", size = 292118, upload-time = "2025-07-10T16:52:07.828Z" }, ] [[package]] @@ -3795,6 +4370,11 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/6a/c0/ec2b1c8712ca690e5d61979dee872603e92b8a32f94cc1b72d53beab008a/pydantic-2.11.7-py3-none-any.whl", hash = "sha256:dde5df002701f6de26248661f6835bbe296a47bf73990135c7d07ce741b9623b", size = 444782, upload-time = "2025-06-14T08:33:14.905Z" }, ] +[package.optional-dependencies] +email = [ + { name = "email-validator" }, +] + [[package]] name = "pydantic-core" version = "2.33.2" @@ -3958,14 +4538,14 @@ wheels = [ [[package]] name = "pytest-asyncio" -version = "1.0.0" +version = "1.1.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "pytest" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d0/d4/14f53324cb1a6381bef29d698987625d80052bb33932d8e7cbf9b337b17c/pytest_asyncio-1.0.0.tar.gz", hash = "sha256:d15463d13f4456e1ead2594520216b225a16f781e144f8fdf6c5bb4667c48b3f", size = 46960, upload-time = "2025-05-26T04:54:40.484Z" } +sdist = { url = "https://files.pythonhosted.org/packages/4e/51/f8794af39eeb870e87a8c8068642fc07bce0c854d6865d7dd0f2a9d338c2/pytest_asyncio-1.1.0.tar.gz", hash = "sha256:796aa822981e01b68c12e4827b8697108f7205020f24b5793b3c41555dab68ea", size = 46652, upload-time = "2025-07-16T04:29:26.393Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/30/05/ce271016e351fddc8399e546f6e23761967ee09c8c568bbfbecb0c150171/pytest_asyncio-1.0.0-py3-none-any.whl", hash = "sha256:4f024da9f1ef945e680dc68610b52550e36590a67fd31bb3b4943979a1f90ef3", size = 15976, upload-time = "2025-05-26T04:54:39.035Z" }, + { url = "https://files.pythonhosted.org/packages/c7/9d/bf86eddabf8c6c9cb1ea9a869d6873b46f105a5d292d3a6f7071f5b07935/pytest_asyncio-1.1.0-py3-none-any.whl", hash = "sha256:5fe2d69607b0bd75c656d1211f969cadba035030156745ee09e7d71740e58ecf", size = 15157, upload-time = "2025-07-16T04:29:24.929Z" }, ] [[package]] @@ -4059,7 +4639,7 @@ wheels = [ [[package]] name = "pytorch-lightning" -version = "2.5.2" +version = "2.5.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "fsspec", extra = ["http"] }, @@ -4071,9 +4651,9 @@ dependencies = [ { name = "tqdm" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/01/3e/728fbdc671d07727ad447f9401d98a43570573965beb3cb2060f9a330b4f/pytorch_lightning-2.5.2.tar.gz", hash = "sha256:f817087d611be8d43b777dd4e543d72703e235510936677a13e6c29f7fd790e3", size = 636859, upload-time = "2025-06-20T15:58:27.062Z" } +sdist = { url = "https://files.pythonhosted.org/packages/32/a8/31fe79bf96dab33cee5537ed6f08230ed6f032834bb4ff529cc487fb40e8/pytorch_lightning-2.5.3.tar.gz", hash = "sha256:65f4eee774ee1adba181aacacffb9f677fe5c5f9fd3d01a95f603403f940be6a", size = 639897, upload-time = "2025-08-13T20:29:39.161Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e2/42/47c186c8f9e956e559c89e6c764d5d5d0d0af517c04ca0ad39bd0a357d3a/pytorch_lightning-2.5.2-py3-none-any.whl", hash = "sha256:17cfdf89bd98074e389101f097cdf34c486a1f5c6d3fdcefbaf4dea7f97ff0bf", size = 825366, upload-time = "2025-06-20T15:58:25.534Z" }, + { url = "https://files.pythonhosted.org/packages/6a/a2/5f2b7b40ec5213db5282e98dd32fd419fe5b73b5b53895dfff56fe12fed0/pytorch_lightning-2.5.3-py3-none-any.whl", hash = "sha256:7476bd36282d9253dda175b9263b07942489d70ad90bbd1bc0a59c46e012f353", size = 828186, upload-time = "2025-08-13T20:29:37.41Z" }, ] [[package]] @@ -4087,15 +4667,18 @@ wheels = [ [[package]] name = "pywin32" -version = "310" +version = "311" source = { registry = "https://pypi.org/simple" } wheels = [ - { url = "https://files.pythonhosted.org/packages/6b/ec/4fdbe47932f671d6e348474ea35ed94227fb5df56a7c30cbbb42cd396ed0/pywin32-310-cp312-cp312-win32.whl", hash = "sha256:8a75a5cc3893e83a108c05d82198880704c44bbaee4d06e442e471d3c9ea4f3d", size = 8796239, upload-time = "2025-03-17T00:55:58.807Z" }, - { url = "https://files.pythonhosted.org/packages/e3/e5/b0627f8bb84e06991bea89ad8153a9e50ace40b2e1195d68e9dff6b03d0f/pywin32-310-cp312-cp312-win_amd64.whl", hash = "sha256:bf5c397c9a9a19a6f62f3fb821fbf36cac08f03770056711f765ec1503972060", size = 9503839, upload-time = "2025-03-17T00:56:00.8Z" }, - { url = "https://files.pythonhosted.org/packages/1f/32/9ccf53748df72301a89713936645a664ec001abd35ecc8578beda593d37d/pywin32-310-cp312-cp312-win_arm64.whl", hash = "sha256:2349cc906eae872d0663d4d6290d13b90621eaf78964bb1578632ff20e152966", size = 8459470, upload-time = "2025-03-17T00:56:02.601Z" }, - { url = "https://files.pythonhosted.org/packages/1c/09/9c1b978ffc4ae53999e89c19c77ba882d9fce476729f23ef55211ea1c034/pywin32-310-cp313-cp313-win32.whl", hash = "sha256:5d241a659c496ada3253cd01cfaa779b048e90ce4b2b38cd44168ad555ce74ab", size = 8794384, upload-time = "2025-03-17T00:56:04.383Z" }, - { url = "https://files.pythonhosted.org/packages/45/3c/b4640f740ffebadd5d34df35fecba0e1cfef8fde9f3e594df91c28ad9b50/pywin32-310-cp313-cp313-win_amd64.whl", hash = "sha256:667827eb3a90208ddbdcc9e860c81bde63a135710e21e4cb3348968e4bd5249e", size = 9503039, upload-time = "2025-03-17T00:56:06.207Z" }, - { url = "https://files.pythonhosted.org/packages/b4/f4/f785020090fb050e7fb6d34b780f2231f302609dc964672f72bfaeb59a28/pywin32-310-cp313-cp313-win_arm64.whl", hash = "sha256:e308f831de771482b7cf692a1f308f8fca701b2d8f9dde6cc440c7da17e47b33", size = 8458152, upload-time = "2025-03-17T00:56:07.819Z" }, + { url = "https://files.pythonhosted.org/packages/e7/ab/01ea1943d4eba0f850c3c61e78e8dd59757ff815ff3ccd0a84de5f541f42/pywin32-311-cp312-cp312-win32.whl", hash = "sha256:750ec6e621af2b948540032557b10a2d43b0cee2ae9758c54154d711cc852d31", size = 8706543, upload-time = "2025-07-14T20:13:20.765Z" }, + { url = "https://files.pythonhosted.org/packages/d1/a8/a0e8d07d4d051ec7502cd58b291ec98dcc0c3fff027caad0470b72cfcc2f/pywin32-311-cp312-cp312-win_amd64.whl", hash = "sha256:b8c095edad5c211ff31c05223658e71bf7116daa0ecf3ad85f3201ea3190d067", size = 9495040, upload-time = "2025-07-14T20:13:22.543Z" }, + { url = "https://files.pythonhosted.org/packages/ba/3a/2ae996277b4b50f17d61f0603efd8253cb2d79cc7ae159468007b586396d/pywin32-311-cp312-cp312-win_arm64.whl", hash = "sha256:e286f46a9a39c4a18b319c28f59b61de793654af2f395c102b4f819e584b5852", size = 8710102, upload-time = "2025-07-14T20:13:24.682Z" }, + { url = "https://files.pythonhosted.org/packages/a5/be/3fd5de0979fcb3994bfee0d65ed8ca9506a8a1260651b86174f6a86f52b3/pywin32-311-cp313-cp313-win32.whl", hash = "sha256:f95ba5a847cba10dd8c4d8fefa9f2a6cf283b8b88ed6178fa8a6c1ab16054d0d", size = 8705700, upload-time = "2025-07-14T20:13:26.471Z" }, + { url = "https://files.pythonhosted.org/packages/e3/28/e0a1909523c6890208295a29e05c2adb2126364e289826c0a8bc7297bd5c/pywin32-311-cp313-cp313-win_amd64.whl", hash = "sha256:718a38f7e5b058e76aee1c56ddd06908116d35147e133427e59a3983f703a20d", size = 9494700, upload-time = "2025-07-14T20:13:28.243Z" }, + { url = "https://files.pythonhosted.org/packages/04/bf/90339ac0f55726dce7d794e6d79a18a91265bdf3aa70b6b9ca52f35e022a/pywin32-311-cp313-cp313-win_arm64.whl", hash = "sha256:7b4075d959648406202d92a2310cb990fea19b535c7f4a78d3f5e10b926eeb8a", size = 8709318, upload-time = "2025-07-14T20:13:30.348Z" }, + { url = "https://files.pythonhosted.org/packages/c9/31/097f2e132c4f16d99a22bfb777e0fd88bd8e1c634304e102f313af69ace5/pywin32-311-cp314-cp314-win32.whl", hash = "sha256:b7a2c10b93f8986666d0c803ee19b5990885872a7de910fc460f9b0c2fbf92ee", size = 8840714, upload-time = "2025-07-14T20:13:32.449Z" }, + { url = "https://files.pythonhosted.org/packages/90/4b/07c77d8ba0e01349358082713400435347df8426208171ce297da32c313d/pywin32-311-cp314-cp314-win_amd64.whl", hash = "sha256:3aca44c046bd2ed8c90de9cb8427f581c479e594e99b5c0bb19b29c10fd6cb87", size = 9656800, upload-time = "2025-07-14T20:13:34.312Z" }, + { url = "https://files.pythonhosted.org/packages/c0/d2/21af5c535501a7233e734b8af901574572da66fcc254cb35d0609c9080dd/pywin32-311-cp314-cp314-win_arm64.whl", hash = "sha256:a508e2d9025764a8270f93111a970e1d0fbfc33f4153b388bb649b7eec4f9b42", size = 8932540, upload-time = "2025-07-14T20:13:36.379Z" }, ] [[package]] @@ -4150,32 +4733,65 @@ wheels = [ [[package]] name = "pyzmq" -version = "27.0.0" +version = "27.0.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cffi", marker = "implementation_name == 'pypy'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f1/06/50a4e9648b3e8b992bef8eb632e457307553a89d294103213cfd47b3da69/pyzmq-27.0.0.tar.gz", hash = "sha256:b1f08eeb9ce1510e6939b6e5dcd46a17765e2333daae78ecf4606808442e52cf", size = 280478, upload-time = "2025-06-13T14:09:07.087Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/93/a7/9ad68f55b8834ede477842214feba6a4c786d936c022a67625497aacf61d/pyzmq-27.0.0-cp312-abi3-macosx_10_15_universal2.whl", hash = "sha256:cbabc59dcfaac66655c040dfcb8118f133fb5dde185e5fc152628354c1598e52", size = 1305438, upload-time = "2025-06-13T14:07:31.676Z" }, - { url = "https://files.pythonhosted.org/packages/ba/ee/26aa0f98665a22bc90ebe12dced1de5f3eaca05363b717f6fb229b3421b3/pyzmq-27.0.0-cp312-abi3-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:cb0ac5179cba4b2f94f1aa208fbb77b62c4c9bf24dd446278b8b602cf85fcda3", size = 895095, upload-time = "2025-06-13T14:07:33.104Z" }, - { url = "https://files.pythonhosted.org/packages/cf/85/c57e7ab216ecd8aa4cc7e3b83b06cc4e9cf45c87b0afc095f10cd5ce87c1/pyzmq-27.0.0-cp312-abi3-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:53a48f0228eab6cbf69fde3aa3c03cbe04e50e623ef92ae395fce47ef8a76152", size = 651826, upload-time = "2025-06-13T14:07:34.831Z" }, - { url = "https://files.pythonhosted.org/packages/69/9a/9ea7e230feda9400fb0ae0d61d7d6ddda635e718d941c44eeab22a179d34/pyzmq-27.0.0-cp312-abi3-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:111db5f395e09f7e775f759d598f43cb815fc58e0147623c4816486e1a39dc22", size = 839750, upload-time = "2025-06-13T14:07:36.553Z" }, - { url = "https://files.pythonhosted.org/packages/08/66/4cebfbe71f3dfbd417011daca267539f62ed0fbc68105357b68bbb1a25b7/pyzmq-27.0.0-cp312-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:c8878011653dcdc27cc2c57e04ff96f0471e797f5c19ac3d7813a245bcb24371", size = 1641357, upload-time = "2025-06-13T14:07:38.21Z" }, - { url = "https://files.pythonhosted.org/packages/ac/f6/b0f62578c08d2471c791287149cb8c2aaea414ae98c6e995c7dbe008adfb/pyzmq-27.0.0-cp312-abi3-musllinux_1_2_i686.whl", hash = "sha256:c0ed2c1f335ba55b5fdc964622254917d6b782311c50e138863eda409fbb3b6d", size = 2020281, upload-time = "2025-06-13T14:07:39.599Z" }, - { url = "https://files.pythonhosted.org/packages/37/b9/4f670b15c7498495da9159edc374ec09c88a86d9cd5a47d892f69df23450/pyzmq-27.0.0-cp312-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:e918d70862d4cfd4b1c187310015646a14e1f5917922ab45b29f28f345eeb6be", size = 1877110, upload-time = "2025-06-13T14:07:41.027Z" }, - { url = "https://files.pythonhosted.org/packages/66/31/9dee25c226295b740609f0d46db2fe972b23b6f5cf786360980524a3ba92/pyzmq-27.0.0-cp312-abi3-win32.whl", hash = "sha256:88b4e43cab04c3c0f0d55df3b1eef62df2b629a1a369b5289a58f6fa8b07c4f4", size = 559297, upload-time = "2025-06-13T14:07:42.533Z" }, - { url = "https://files.pythonhosted.org/packages/9b/12/52da5509800f7ff2d287b2f2b4e636e7ea0f001181cba6964ff6c1537778/pyzmq-27.0.0-cp312-abi3-win_amd64.whl", hash = "sha256:dce4199bf5f648a902ce37e7b3afa286f305cd2ef7a8b6ec907470ccb6c8b371", size = 619203, upload-time = "2025-06-13T14:07:43.843Z" }, - { url = "https://files.pythonhosted.org/packages/93/6d/7f2e53b19d1edb1eb4f09ec7c3a1f945ca0aac272099eab757d15699202b/pyzmq-27.0.0-cp312-abi3-win_arm64.whl", hash = "sha256:56e46bbb85d52c1072b3f809cc1ce77251d560bc036d3a312b96db1afe76db2e", size = 551927, upload-time = "2025-06-13T14:07:45.51Z" }, - { url = "https://files.pythonhosted.org/packages/19/62/876b27c4ff777db4ceba1c69ea90d3c825bb4f8d5e7cd987ce5802e33c55/pyzmq-27.0.0-cp313-cp313t-macosx_10_15_universal2.whl", hash = "sha256:c36ad534c0c29b4afa088dc53543c525b23c0797e01b69fef59b1a9c0e38b688", size = 1340826, upload-time = "2025-06-13T14:07:46.881Z" }, - { url = "https://files.pythonhosted.org/packages/43/69/58ef8f4f59d3bcd505260c73bee87b008850f45edca40ddaba54273c35f4/pyzmq-27.0.0-cp313-cp313t-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:67855c14173aec36395d7777aaba3cc527b393821f30143fd20b98e1ff31fd38", size = 897283, upload-time = "2025-06-13T14:07:49.562Z" }, - { url = "https://files.pythonhosted.org/packages/43/15/93a0d0396700a60475ad3c5d42c5f1c308d3570bc94626b86c71ef9953e0/pyzmq-27.0.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8617c7d43cd8ccdb62aebe984bfed77ca8f036e6c3e46dd3dddda64b10f0ab7a", size = 660567, upload-time = "2025-06-13T14:07:51.364Z" }, - { url = "https://files.pythonhosted.org/packages/0e/b3/fe055513e498ca32f64509abae19b9c9eb4d7c829e02bd8997dd51b029eb/pyzmq-27.0.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:67bfbcbd0a04c575e8103a6061d03e393d9f80ffdb9beb3189261e9e9bc5d5e9", size = 847681, upload-time = "2025-06-13T14:07:52.77Z" }, - { url = "https://files.pythonhosted.org/packages/b6/4f/ff15300b00b5b602191f3df06bbc8dd4164e805fdd65bb77ffbb9c5facdc/pyzmq-27.0.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:5cd11d46d7b7e5958121b3eaf4cd8638eff3a720ec527692132f05a57f14341d", size = 1650148, upload-time = "2025-06-13T14:07:54.178Z" }, - { url = "https://files.pythonhosted.org/packages/c4/6f/84bdfff2a224a6f26a24249a342e5906993c50b0761e311e81b39aef52a7/pyzmq-27.0.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:b801c2e40c5aa6072c2f4876de8dccd100af6d9918d4d0d7aa54a1d982fd4f44", size = 2023768, upload-time = "2025-06-13T14:07:55.714Z" }, - { url = "https://files.pythonhosted.org/packages/64/39/dc2db178c26a42228c5ac94a9cc595030458aa64c8d796a7727947afbf55/pyzmq-27.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:20d5cb29e8c5f76a127c75b6e7a77e846bc4b655c373baa098c26a61b7ecd0ef", size = 1885199, upload-time = "2025-06-13T14:07:57.166Z" }, - { url = "https://files.pythonhosted.org/packages/c7/21/dae7b06a1f8cdee5d8e7a63d99c5d129c401acc40410bef2cbf42025e26f/pyzmq-27.0.0-cp313-cp313t-win32.whl", hash = "sha256:a20528da85c7ac7a19b7384e8c3f8fa707841fd85afc4ed56eda59d93e3d98ad", size = 575439, upload-time = "2025-06-13T14:07:58.959Z" }, - { url = "https://files.pythonhosted.org/packages/eb/bc/1709dc55f0970cf4cb8259e435e6773f9946f41a045c2cb90e870b7072da/pyzmq-27.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:d8229f2efece6a660ee211d74d91dbc2a76b95544d46c74c615e491900dc107f", size = 639933, upload-time = "2025-06-13T14:08:00.777Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/30/5f/557d2032a2f471edbcc227da724c24a1c05887b5cda1e3ae53af98b9e0a5/pyzmq-27.0.1.tar.gz", hash = "sha256:45c549204bc20e7484ffd2555f6cf02e572440ecf2f3bdd60d4404b20fddf64b", size = 281158, upload-time = "2025-08-03T05:05:40.352Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0e/9b/c0957041067c7724b310f22c398be46399297c12ed834c3bc42200a2756f/pyzmq-27.0.1-cp312-abi3-macosx_10_15_universal2.whl", hash = "sha256:af7ebce2a1e7caf30c0bb64a845f63a69e76a2fadbc1cac47178f7bb6e657bdd", size = 1305432, upload-time = "2025-08-03T05:03:32.177Z" }, + { url = "https://files.pythonhosted.org/packages/8e/55/bd3a312790858f16b7def3897a0c3eb1804e974711bf7b9dcb5f47e7f82c/pyzmq-27.0.1-cp312-abi3-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:8f617f60a8b609a13099b313e7e525e67f84ef4524b6acad396d9ff153f6e4cd", size = 895095, upload-time = "2025-08-03T05:03:33.918Z" }, + { url = "https://files.pythonhosted.org/packages/20/50/fc384631d8282809fb1029a4460d2fe90fa0370a0e866a8318ed75c8d3bb/pyzmq-27.0.1-cp312-abi3-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1d59dad4173dc2a111f03e59315c7bd6e73da1a9d20a84a25cf08325b0582b1a", size = 651826, upload-time = "2025-08-03T05:03:35.818Z" }, + { url = "https://files.pythonhosted.org/packages/7e/0a/2356305c423a975000867de56888b79e44ec2192c690ff93c3109fd78081/pyzmq-27.0.1-cp312-abi3-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f5b6133c8d313bde8bd0d123c169d22525300ff164c2189f849de495e1344577", size = 839751, upload-time = "2025-08-03T05:03:37.265Z" }, + { url = "https://files.pythonhosted.org/packages/d7/1b/81e95ad256ca7e7ccd47f5294c1c6da6e2b64fbace65b84fe8a41470342e/pyzmq-27.0.1-cp312-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:58cca552567423f04d06a075f4b473e78ab5bdb906febe56bf4797633f54aa4e", size = 1641359, upload-time = "2025-08-03T05:03:38.799Z" }, + { url = "https://files.pythonhosted.org/packages/50/63/9f50ec965285f4e92c265c8f18344e46b12803666d8b73b65d254d441435/pyzmq-27.0.1-cp312-abi3-musllinux_1_2_i686.whl", hash = "sha256:4b9d8e26fb600d0d69cc9933e20af08552e97cc868a183d38a5c0d661e40dfbb", size = 2020281, upload-time = "2025-08-03T05:03:40.338Z" }, + { url = "https://files.pythonhosted.org/packages/02/4a/19e3398d0dc66ad2b463e4afa1fc541d697d7bc090305f9dfb948d3dfa29/pyzmq-27.0.1-cp312-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2329f0c87f0466dce45bba32b63f47018dda5ca40a0085cc5c8558fea7d9fc55", size = 1877112, upload-time = "2025-08-03T05:03:42.012Z" }, + { url = "https://files.pythonhosted.org/packages/bf/42/c562e9151aa90ed1d70aac381ea22a929d6b3a2ce4e1d6e2e135d34fd9c6/pyzmq-27.0.1-cp312-abi3-win32.whl", hash = "sha256:57bb92abdb48467b89c2d21da1ab01a07d0745e536d62afd2e30d5acbd0092eb", size = 558177, upload-time = "2025-08-03T05:03:43.979Z" }, + { url = "https://files.pythonhosted.org/packages/40/96/5c50a7d2d2b05b19994bf7336b97db254299353dd9b49b565bb71b485f03/pyzmq-27.0.1-cp312-abi3-win_amd64.whl", hash = "sha256:ff3f8757570e45da7a5bedaa140489846510014f7a9d5ee9301c61f3f1b8a686", size = 618923, upload-time = "2025-08-03T05:03:45.438Z" }, + { url = "https://files.pythonhosted.org/packages/13/33/1ec89c8f21c89d21a2eaff7def3676e21d8248d2675705e72554fb5a6f3f/pyzmq-27.0.1-cp312-abi3-win_arm64.whl", hash = "sha256:df2c55c958d3766bdb3e9d858b911288acec09a9aab15883f384fc7180df5bed", size = 552358, upload-time = "2025-08-03T05:03:46.887Z" }, + { url = "https://files.pythonhosted.org/packages/6c/a0/f26e276211ec8090a4d11e4ec70eb8a8b15781e591c1d44ce62f372963a0/pyzmq-27.0.1-cp313-cp313-android_24_arm64_v8a.whl", hash = "sha256:497bd8af534ae55dc4ef67eebd1c149ff2a0b0f1e146db73c8b5a53d83c1a5f5", size = 1122287, upload-time = "2025-08-03T05:03:48.838Z" }, + { url = "https://files.pythonhosted.org/packages/9c/d8/af4b507e4f7eeea478cc8ee873995a6fd55582bfb99140593ed460e1db3c/pyzmq-27.0.1-cp313-cp313-android_24_x86_64.whl", hash = "sha256:a066ea6ad6218b4c233906adf0ae67830f451ed238419c0db609310dd781fbe7", size = 1155756, upload-time = "2025-08-03T05:03:50.907Z" }, + { url = "https://files.pythonhosted.org/packages/ac/55/37fae0013e11f88681da42698e550b08a316d608242551f65095cc99232a/pyzmq-27.0.1-cp313-cp313t-macosx_10_15_universal2.whl", hash = "sha256:72d235d6365ca73d8ce92f7425065d70f5c1e19baa458eb3f0d570e425b73a96", size = 1340826, upload-time = "2025-08-03T05:03:52.568Z" }, + { url = "https://files.pythonhosted.org/packages/f2/e4/3a87854c64b26fcf63a9d1b6f4382bd727d4797c772ceb334a97b7489be9/pyzmq-27.0.1-cp313-cp313t-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:313a7b374e3dc64848644ca348a51004b41726f768b02e17e689f1322366a4d9", size = 897283, upload-time = "2025-08-03T05:03:54.167Z" }, + { url = "https://files.pythonhosted.org/packages/17/3e/4296c6b0ad2d07be11ae1395dccf9cae48a0a655cf9be1c3733ad2b591d1/pyzmq-27.0.1-cp313-cp313t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:119ce8590409702394f959c159d048002cbed2f3c0645ec9d6a88087fc70f0f1", size = 660565, upload-time = "2025-08-03T05:03:56.152Z" }, + { url = "https://files.pythonhosted.org/packages/72/41/a33ba3aa48b45b23c4cd4ac49aafde46f3e0f81939f2bfb3b6171a437122/pyzmq-27.0.1-cp313-cp313t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:45c3e00ce16896ace2cd770ab9057a7cf97d4613ea5f2a13f815141d8b6894b9", size = 847680, upload-time = "2025-08-03T05:03:57.696Z" }, + { url = "https://files.pythonhosted.org/packages/3f/8c/bf2350bb25b3b58d2e5b5d2290ffab0e923f0cc6d02288d3fbf4baa6e4d1/pyzmq-27.0.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:678e50ec112bdc6df5a83ac259a55a4ba97a8b314c325ab26b3b5b071151bc61", size = 1650151, upload-time = "2025-08-03T05:03:59.387Z" }, + { url = "https://files.pythonhosted.org/packages/f7/1a/a5a07c54890891344a8ddc3d5ab320dd3c4e39febb6e4472546e456d5157/pyzmq-27.0.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:d0b96c30be9f9387b18b18b6133c75a7b1b0065da64e150fe1feb5ebf31ece1c", size = 2023766, upload-time = "2025-08-03T05:04:01.883Z" }, + { url = "https://files.pythonhosted.org/packages/62/5e/514dcff08f02c6c8a45a6e23621901139cf853be7ac5ccd0b9407c3aa3de/pyzmq-27.0.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:88dc92d9eb5ea4968123e74db146d770b0c8d48f0e2bfb1dbc6c50a8edb12d64", size = 1885195, upload-time = "2025-08-03T05:04:03.923Z" }, + { url = "https://files.pythonhosted.org/packages/c8/91/87f74f98a487fbef0b115f6025e4a295129fd56b2b633a03ba7d5816ecc2/pyzmq-27.0.1-cp313-cp313t-win32.whl", hash = "sha256:6dcbcb34f5c9b0cefdfc71ff745459241b7d3cda5b27c7ad69d45afc0821d1e1", size = 574213, upload-time = "2025-08-03T05:04:05.905Z" }, + { url = "https://files.pythonhosted.org/packages/e6/d7/07f7d0d7f4c81e08be7b60e52ff2591c557377c017f96204d33d5fca1b07/pyzmq-27.0.1-cp313-cp313t-win_amd64.whl", hash = "sha256:b9fd0fda730461f510cfd9a40fafa5355d65f5e3dbdd8d6dfa342b5b3f5d1949", size = 640202, upload-time = "2025-08-03T05:04:07.439Z" }, + { url = "https://files.pythonhosted.org/packages/ab/83/21d66bcef6fb803647a223cbde95111b099e2176277c0cbc8b099c485510/pyzmq-27.0.1-cp313-cp313t-win_arm64.whl", hash = "sha256:56a3b1853f3954ec1f0e91085f1350cc57d18f11205e4ab6e83e4b7c414120e0", size = 561514, upload-time = "2025-08-03T05:04:09.071Z" }, + { url = "https://files.pythonhosted.org/packages/5a/0b/d5ea75cf46b52cdce85a85200c963cb498932953df443892238be49b1a01/pyzmq-27.0.1-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:f98f6b7787bd2beb1f0dde03f23a0621a0c978edf673b7d8f5e7bc039cbe1b60", size = 1340836, upload-time = "2025-08-03T05:04:10.774Z" }, + { url = "https://files.pythonhosted.org/packages/be/4c/0dbce882550e17db6846b29e9dc242aea7590e7594e1ca5043e8e58fff2d/pyzmq-27.0.1-cp314-cp314t-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:351bf5d8ca0788ca85327fda45843b6927593ff4c807faee368cc5aaf9f809c2", size = 897236, upload-time = "2025-08-03T05:04:13.221Z" }, + { url = "https://files.pythonhosted.org/packages/1b/22/461e131cf16b8814f3c356fa1ea0912697dbc4c64cddf01f7756ec704c1e/pyzmq-27.0.1-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5268a5a9177afff53dc6d70dffe63114ba2a6e7b20d9411cc3adeba09eeda403", size = 660374, upload-time = "2025-08-03T05:04:15.032Z" }, + { url = "https://files.pythonhosted.org/packages/3f/0c/bbd65a814395bf4fc3e57c6c13af27601c07e4009bdfb75ebcf500537bbd/pyzmq-27.0.1-cp314-cp314t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a4aca06ba295aa78bec9b33ec028d1ca08744c36294338c41432b7171060c808", size = 847497, upload-time = "2025-08-03T05:04:16.967Z" }, + { url = "https://files.pythonhosted.org/packages/1e/df/3d1f4a03b561d824cbd491394f67591957e2f1acf6dc85d96f970312a76a/pyzmq-27.0.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:1c363c6dc66352331d5ad64bb838765c6692766334a6a02fdb05e76bd408ae18", size = 1650028, upload-time = "2025-08-03T05:04:19.398Z" }, + { url = "https://files.pythonhosted.org/packages/41/c9/a3987540f59a412bdaae3f362f78e00e6769557a598c63b7e32956aade5a/pyzmq-27.0.1-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:87aebf4acd7249bdff8d3df03aed4f09e67078e6762cfe0aecf8d0748ff94cde", size = 2023808, upload-time = "2025-08-03T05:04:21.145Z" }, + { url = "https://files.pythonhosted.org/packages/b0/a5/c388f4cd80498a8eaef7535f2a8eaca0a35b82b87a0b47fa1856fc135004/pyzmq-27.0.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:e4f22d67756518d71901edf73b38dc0eb4765cce22c8fe122cc81748d425262b", size = 1884970, upload-time = "2025-08-03T05:04:22.908Z" }, + { url = "https://files.pythonhosted.org/packages/9a/ac/b2a89a1ed90526a1b9a260cdc5cd42f055fd44ee8d2a59902b5ac35ddeb1/pyzmq-27.0.1-cp314-cp314t-win32.whl", hash = "sha256:8c62297bc7aea2147b472ca5ca2b4389377ad82898c87cabab2a94aedd75e337", size = 586905, upload-time = "2025-08-03T05:04:24.492Z" }, + { url = "https://files.pythonhosted.org/packages/68/62/7aa5ea04e836f7a788b2a67405f83011cef59ca76d7bac91d1fc9a0476da/pyzmq-27.0.1-cp314-cp314t-win_amd64.whl", hash = "sha256:bee5248d5ec9223545f8cc4f368c2d571477ae828c99409125c3911511d98245", size = 660503, upload-time = "2025-08-03T05:04:26.382Z" }, + { url = "https://files.pythonhosted.org/packages/89/32/3836ed85947b06f1d67c07ce16c00b0cf8c053ab0b249d234f9f81ff95ff/pyzmq-27.0.1-cp314-cp314t-win_arm64.whl", hash = "sha256:0fc24bf45e4a454e55ef99d7f5c8b8712539200ce98533af25a5bfa954b6b390", size = 575098, upload-time = "2025-08-03T05:04:27.974Z" }, +] + +[[package]] +name = "qwen-vl-utils" +version = "0.0.11" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "av" }, + { name = "packaging" }, + { name = "pillow" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/9f/1229a40ebd49f689a0252144126f3865f31bb4151e942cf781a2936f0c4d/qwen_vl_utils-0.0.11.tar.gz", hash = "sha256:083ba1e5cfa5002165b1e3bddd4d6d26d1d6d34473884033ef12ae3fe8496cd5", size = 7924, upload-time = "2025-04-21T10:38:47.461Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0a/c2/ad7f93e1eea4ea0aefd1cc6fbe7a7095fd2f03a4d8fe2c3707e612b0866e/qwen_vl_utils-0.0.11-py3-none-any.whl", hash = "sha256:7fd5287ac04d6c1f01b93bf053b0be236a35149e414c9e864e3cc5bf2fe8cb7b", size = 7584, upload-time = "2025-04-21T10:38:45.595Z" }, +] + +[package.optional-dependencies] +decord = [ + { name = "decord" }, ] [[package]] @@ -4238,45 +4854,57 @@ wheels = [ [[package]] name = "regex" -version = "2024.11.6" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/8e/5f/bd69653fbfb76cf8604468d3b4ec4c403197144c7bfe0e6a5fc9e02a07cb/regex-2024.11.6.tar.gz", hash = "sha256:7ab159b063c52a0333c884e4679f8d7a85112ee3078fe3d9004b2dd875585519", size = 399494, upload-time = "2024-11-06T20:12:31.635Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ba/30/9a87ce8336b172cc232a0db89a3af97929d06c11ceaa19d97d84fa90a8f8/regex-2024.11.6-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:52fb28f528778f184f870b7cf8f225f5eef0a8f6e3778529bdd40c7b3920796a", size = 483781, upload-time = "2024-11-06T20:10:07.07Z" }, - { url = "https://files.pythonhosted.org/packages/01/e8/00008ad4ff4be8b1844786ba6636035f7ef926db5686e4c0f98093612add/regex-2024.11.6-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdd6028445d2460f33136c55eeb1f601ab06d74cb3347132e1c24250187500d9", size = 288455, upload-time = "2024-11-06T20:10:09.117Z" }, - { url = "https://files.pythonhosted.org/packages/60/85/cebcc0aff603ea0a201667b203f13ba75d9fc8668fab917ac5b2de3967bc/regex-2024.11.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:805e6b60c54bf766b251e94526ebad60b7de0c70f70a4e6210ee2891acb70bf2", size = 284759, upload-time = "2024-11-06T20:10:11.155Z" }, - { url = "https://files.pythonhosted.org/packages/94/2b/701a4b0585cb05472a4da28ee28fdfe155f3638f5e1ec92306d924e5faf0/regex-2024.11.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b85c2530be953a890eaffde05485238f07029600e8f098cdf1848d414a8b45e4", size = 794976, upload-time = "2024-11-06T20:10:13.24Z" }, - { url = "https://files.pythonhosted.org/packages/4b/bf/fa87e563bf5fee75db8915f7352e1887b1249126a1be4813837f5dbec965/regex-2024.11.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb26437975da7dc36b7efad18aa9dd4ea569d2357ae6b783bf1118dabd9ea577", size = 833077, upload-time = "2024-11-06T20:10:15.37Z" }, - { url = "https://files.pythonhosted.org/packages/a1/56/7295e6bad94b047f4d0834e4779491b81216583c00c288252ef625c01d23/regex-2024.11.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:abfa5080c374a76a251ba60683242bc17eeb2c9818d0d30117b4486be10c59d3", size = 823160, upload-time = "2024-11-06T20:10:19.027Z" }, - { url = "https://files.pythonhosted.org/packages/fb/13/e3b075031a738c9598c51cfbc4c7879e26729c53aa9cca59211c44235314/regex-2024.11.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b7fa6606c2881c1db9479b0eaa11ed5dfa11c8d60a474ff0e095099f39d98e", size = 796896, upload-time = "2024-11-06T20:10:21.85Z" }, - { url = "https://files.pythonhosted.org/packages/24/56/0b3f1b66d592be6efec23a795b37732682520b47c53da5a32c33ed7d84e3/regex-2024.11.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c32f75920cf99fe6b6c539c399a4a128452eaf1af27f39bce8909c9a3fd8cbe", size = 783997, upload-time = "2024-11-06T20:10:24.329Z" }, - { url = "https://files.pythonhosted.org/packages/f9/a1/eb378dada8b91c0e4c5f08ffb56f25fcae47bf52ad18f9b2f33b83e6d498/regex-2024.11.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:982e6d21414e78e1f51cf595d7f321dcd14de1f2881c5dc6a6e23bbbbd68435e", size = 781725, upload-time = "2024-11-06T20:10:28.067Z" }, - { url = "https://files.pythonhosted.org/packages/83/f2/033e7dec0cfd6dda93390089864732a3409246ffe8b042e9554afa9bff4e/regex-2024.11.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a7c2155f790e2fb448faed6dd241386719802296ec588a8b9051c1f5c481bc29", size = 789481, upload-time = "2024-11-06T20:10:31.612Z" }, - { url = "https://files.pythonhosted.org/packages/83/23/15d4552ea28990a74e7696780c438aadd73a20318c47e527b47a4a5a596d/regex-2024.11.6-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:149f5008d286636e48cd0b1dd65018548944e495b0265b45e1bffecce1ef7f39", size = 852896, upload-time = "2024-11-06T20:10:34.054Z" }, - { url = "https://files.pythonhosted.org/packages/e3/39/ed4416bc90deedbfdada2568b2cb0bc1fdb98efe11f5378d9892b2a88f8f/regex-2024.11.6-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:e5364a4502efca094731680e80009632ad6624084aff9a23ce8c8c6820de3e51", size = 860138, upload-time = "2024-11-06T20:10:36.142Z" }, - { url = "https://files.pythonhosted.org/packages/93/2d/dd56bb76bd8e95bbce684326302f287455b56242a4f9c61f1bc76e28360e/regex-2024.11.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0a86e7eeca091c09e021db8eb72d54751e527fa47b8d5787caf96d9831bd02ad", size = 787692, upload-time = "2024-11-06T20:10:38.394Z" }, - { url = "https://files.pythonhosted.org/packages/0b/55/31877a249ab7a5156758246b9c59539abbeba22461b7d8adc9e8475ff73e/regex-2024.11.6-cp312-cp312-win32.whl", hash = "sha256:32f9a4c643baad4efa81d549c2aadefaeba12249b2adc5af541759237eee1c54", size = 262135, upload-time = "2024-11-06T20:10:40.367Z" }, - { url = "https://files.pythonhosted.org/packages/38/ec/ad2d7de49a600cdb8dd78434a1aeffe28b9d6fc42eb36afab4a27ad23384/regex-2024.11.6-cp312-cp312-win_amd64.whl", hash = "sha256:a93c194e2df18f7d264092dc8539b8ffb86b45b899ab976aa15d48214138e81b", size = 273567, upload-time = "2024-11-06T20:10:43.467Z" }, - { url = "https://files.pythonhosted.org/packages/90/73/bcb0e36614601016552fa9344544a3a2ae1809dc1401b100eab02e772e1f/regex-2024.11.6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a6ba92c0bcdf96cbf43a12c717eae4bc98325ca3730f6b130ffa2e3c3c723d84", size = 483525, upload-time = "2024-11-06T20:10:45.19Z" }, - { url = "https://files.pythonhosted.org/packages/0f/3f/f1a082a46b31e25291d830b369b6b0c5576a6f7fb89d3053a354c24b8a83/regex-2024.11.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:525eab0b789891ac3be914d36893bdf972d483fe66551f79d3e27146191a37d4", size = 288324, upload-time = "2024-11-06T20:10:47.177Z" }, - { url = "https://files.pythonhosted.org/packages/09/c9/4e68181a4a652fb3ef5099e077faf4fd2a694ea6e0f806a7737aff9e758a/regex-2024.11.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:086a27a0b4ca227941700e0b31425e7a28ef1ae8e5e05a33826e17e47fbfdba0", size = 284617, upload-time = "2024-11-06T20:10:49.312Z" }, - { url = "https://files.pythonhosted.org/packages/fc/fd/37868b75eaf63843165f1d2122ca6cb94bfc0271e4428cf58c0616786dce/regex-2024.11.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bde01f35767c4a7899b7eb6e823b125a64de314a8ee9791367c9a34d56af18d0", size = 795023, upload-time = "2024-11-06T20:10:51.102Z" }, - { url = "https://files.pythonhosted.org/packages/c4/7c/d4cd9c528502a3dedb5c13c146e7a7a539a3853dc20209c8e75d9ba9d1b2/regex-2024.11.6-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b583904576650166b3d920d2bcce13971f6f9e9a396c673187f49811b2769dc7", size = 833072, upload-time = "2024-11-06T20:10:52.926Z" }, - { url = "https://files.pythonhosted.org/packages/4f/db/46f563a08f969159c5a0f0e722260568425363bea43bb7ae370becb66a67/regex-2024.11.6-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c4de13f06a0d54fa0d5ab1b7138bfa0d883220965a29616e3ea61b35d5f5fc7", size = 823130, upload-time = "2024-11-06T20:10:54.828Z" }, - { url = "https://files.pythonhosted.org/packages/db/60/1eeca2074f5b87df394fccaa432ae3fc06c9c9bfa97c5051aed70e6e00c2/regex-2024.11.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3cde6e9f2580eb1665965ce9bf17ff4952f34f5b126beb509fee8f4e994f143c", size = 796857, upload-time = "2024-11-06T20:10:56.634Z" }, - { url = "https://files.pythonhosted.org/packages/10/db/ac718a08fcee981554d2f7bb8402f1faa7e868c1345c16ab1ebec54b0d7b/regex-2024.11.6-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0d7f453dca13f40a02b79636a339c5b62b670141e63efd511d3f8f73fba162b3", size = 784006, upload-time = "2024-11-06T20:10:59.369Z" }, - { url = "https://files.pythonhosted.org/packages/c2/41/7da3fe70216cea93144bf12da2b87367590bcf07db97604edeea55dac9ad/regex-2024.11.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:59dfe1ed21aea057a65c6b586afd2a945de04fc7db3de0a6e3ed5397ad491b07", size = 781650, upload-time = "2024-11-06T20:11:02.042Z" }, - { url = "https://files.pythonhosted.org/packages/a7/d5/880921ee4eec393a4752e6ab9f0fe28009435417c3102fc413f3fe81c4e5/regex-2024.11.6-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b97c1e0bd37c5cd7902e65f410779d39eeda155800b65fc4d04cc432efa9bc6e", size = 789545, upload-time = "2024-11-06T20:11:03.933Z" }, - { url = "https://files.pythonhosted.org/packages/dc/96/53770115e507081122beca8899ab7f5ae28ae790bfcc82b5e38976df6a77/regex-2024.11.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f9d1e379028e0fc2ae3654bac3cbbef81bf3fd571272a42d56c24007979bafb6", size = 853045, upload-time = "2024-11-06T20:11:06.497Z" }, - { url = "https://files.pythonhosted.org/packages/31/d3/1372add5251cc2d44b451bd94f43b2ec78e15a6e82bff6a290ef9fd8f00a/regex-2024.11.6-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:13291b39131e2d002a7940fb176e120bec5145f3aeb7621be6534e46251912c4", size = 860182, upload-time = "2024-11-06T20:11:09.06Z" }, - { url = "https://files.pythonhosted.org/packages/ed/e3/c446a64984ea9f69982ba1a69d4658d5014bc7a0ea468a07e1a1265db6e2/regex-2024.11.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4f51f88c126370dcec4908576c5a627220da6c09d0bff31cfa89f2523843316d", size = 787733, upload-time = "2024-11-06T20:11:11.256Z" }, - { url = "https://files.pythonhosted.org/packages/2b/f1/e40c8373e3480e4f29f2692bd21b3e05f296d3afebc7e5dcf21b9756ca1c/regex-2024.11.6-cp313-cp313-win32.whl", hash = "sha256:63b13cfd72e9601125027202cad74995ab26921d8cd935c25f09c630436348ff", size = 262122, upload-time = "2024-11-06T20:11:13.161Z" }, - { url = "https://files.pythonhosted.org/packages/45/94/bc295babb3062a731f52621cdc992d123111282e291abaf23faa413443ea/regex-2024.11.6-cp313-cp313-win_amd64.whl", hash = "sha256:2b3361af3198667e99927da8b84c1b010752fa4b1115ee30beaa332cabc3ef1a", size = 273545, upload-time = "2024-11-06T20:11:15Z" }, +version = "2025.7.34" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0b/de/e13fa6dc61d78b30ba47481f99933a3b49a57779d625c392d8036770a60d/regex-2025.7.34.tar.gz", hash = "sha256:9ead9765217afd04a86822dfcd4ed2747dfe426e887da413b15ff0ac2457e21a", size = 400714, upload-time = "2025-07-31T00:21:16.262Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ff/f0/31d62596c75a33f979317658e8d261574785c6cd8672c06741ce2e2e2070/regex-2025.7.34-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:7f7211a746aced993bef487de69307a38c5ddd79257d7be83f7b202cb59ddb50", size = 485492, upload-time = "2025-07-31T00:19:35.57Z" }, + { url = "https://files.pythonhosted.org/packages/d8/16/b818d223f1c9758c3434be89aa1a01aae798e0e0df36c1f143d1963dd1ee/regex-2025.7.34-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fb31080f2bd0681484b275461b202b5ad182f52c9ec606052020fe13eb13a72f", size = 290000, upload-time = "2025-07-31T00:19:37.175Z" }, + { url = "https://files.pythonhosted.org/packages/cd/70/69506d53397b4bd6954061bae75677ad34deb7f6ca3ba199660d6f728ff5/regex-2025.7.34-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0200a5150c4cf61e407038f4b4d5cdad13e86345dac29ff9dab3d75d905cf130", size = 286072, upload-time = "2025-07-31T00:19:38.612Z" }, + { url = "https://files.pythonhosted.org/packages/b0/73/536a216d5f66084fb577bb0543b5cb7de3272eb70a157f0c3a542f1c2551/regex-2025.7.34-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:739a74970e736df0773788377969c9fea3876c2fc13d0563f98e5503e5185f46", size = 797341, upload-time = "2025-07-31T00:19:40.119Z" }, + { url = "https://files.pythonhosted.org/packages/26/af/733f8168449e56e8f404bb807ea7189f59507cbea1b67a7bbcd92f8bf844/regex-2025.7.34-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4fef81b2f7ea6a2029161ed6dea9ae13834c28eb5a95b8771828194a026621e4", size = 862556, upload-time = "2025-07-31T00:19:41.556Z" }, + { url = "https://files.pythonhosted.org/packages/19/dd/59c464d58c06c4f7d87de4ab1f590e430821345a40c5d345d449a636d15f/regex-2025.7.34-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:ea74cf81fe61a7e9d77989050d0089a927ab758c29dac4e8e1b6c06fccf3ebf0", size = 910762, upload-time = "2025-07-31T00:19:43Z" }, + { url = "https://files.pythonhosted.org/packages/37/a8/b05ccf33ceca0815a1e253693b2c86544932ebcc0049c16b0fbdf18b688b/regex-2025.7.34-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e4636a7f3b65a5f340ed9ddf53585c42e3ff37101d383ed321bfe5660481744b", size = 801892, upload-time = "2025-07-31T00:19:44.645Z" }, + { url = "https://files.pythonhosted.org/packages/5f/9a/b993cb2e634cc22810afd1652dba0cae156c40d4864285ff486c73cd1996/regex-2025.7.34-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6cef962d7834437fe8d3da6f9bfc6f93f20f218266dcefec0560ed7765f5fe01", size = 786551, upload-time = "2025-07-31T00:19:46.127Z" }, + { url = "https://files.pythonhosted.org/packages/2d/79/7849d67910a0de4e26834b5bb816e028e35473f3d7ae563552ea04f58ca2/regex-2025.7.34-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:cbe1698e5b80298dbce8df4d8d1182279fbdaf1044e864cbc9d53c20e4a2be77", size = 856457, upload-time = "2025-07-31T00:19:47.562Z" }, + { url = "https://files.pythonhosted.org/packages/91/c6/de516bc082524b27e45cb4f54e28bd800c01efb26d15646a65b87b13a91e/regex-2025.7.34-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:32b9f9bcf0f605eb094b08e8da72e44badabb63dde6b83bd530580b488d1c6da", size = 848902, upload-time = "2025-07-31T00:19:49.312Z" }, + { url = "https://files.pythonhosted.org/packages/7d/22/519ff8ba15f732db099b126f039586bd372da6cd4efb810d5d66a5daeda1/regex-2025.7.34-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:524c868ba527eab4e8744a9287809579f54ae8c62fbf07d62aacd89f6026b282", size = 788038, upload-time = "2025-07-31T00:19:50.794Z" }, + { url = "https://files.pythonhosted.org/packages/3f/7d/aabb467d8f57d8149895d133c88eb809a1a6a0fe262c1d508eb9dfabb6f9/regex-2025.7.34-cp312-cp312-win32.whl", hash = "sha256:d600e58ee6d036081c89696d2bdd55d507498a7180df2e19945c6642fac59588", size = 264417, upload-time = "2025-07-31T00:19:52.292Z" }, + { url = "https://files.pythonhosted.org/packages/3b/39/bd922b55a4fc5ad5c13753274e5b536f5b06ec8eb9747675668491c7ab7a/regex-2025.7.34-cp312-cp312-win_amd64.whl", hash = "sha256:9a9ab52a466a9b4b91564437b36417b76033e8778e5af8f36be835d8cb370d62", size = 275387, upload-time = "2025-07-31T00:19:53.593Z" }, + { url = "https://files.pythonhosted.org/packages/f7/3c/c61d2fdcecb754a40475a3d1ef9a000911d3e3fc75c096acf44b0dfb786a/regex-2025.7.34-cp312-cp312-win_arm64.whl", hash = "sha256:c83aec91af9c6fbf7c743274fd952272403ad9a9db05fe9bfc9df8d12b45f176", size = 268482, upload-time = "2025-07-31T00:19:55.183Z" }, + { url = "https://files.pythonhosted.org/packages/15/16/b709b2119975035169a25aa8e4940ca177b1a2e25e14f8d996d09130368e/regex-2025.7.34-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c3c9740a77aeef3f5e3aaab92403946a8d34437db930a0280e7e81ddcada61f5", size = 485334, upload-time = "2025-07-31T00:19:56.58Z" }, + { url = "https://files.pythonhosted.org/packages/94/a6/c09136046be0595f0331bc58a0e5f89c2d324cf734e0b0ec53cf4b12a636/regex-2025.7.34-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:69ed3bc611540f2ea70a4080f853741ec698be556b1df404599f8724690edbcd", size = 289942, upload-time = "2025-07-31T00:19:57.943Z" }, + { url = "https://files.pythonhosted.org/packages/36/91/08fc0fd0f40bdfb0e0df4134ee37cfb16e66a1044ac56d36911fd01c69d2/regex-2025.7.34-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d03c6f9dcd562c56527c42b8530aad93193e0b3254a588be1f2ed378cdfdea1b", size = 285991, upload-time = "2025-07-31T00:19:59.837Z" }, + { url = "https://files.pythonhosted.org/packages/be/2f/99dc8f6f756606f0c214d14c7b6c17270b6bbe26d5c1f05cde9dbb1c551f/regex-2025.7.34-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6164b1d99dee1dfad33f301f174d8139d4368a9fb50bf0a3603b2eaf579963ad", size = 797415, upload-time = "2025-07-31T00:20:01.668Z" }, + { url = "https://files.pythonhosted.org/packages/62/cf/2fcdca1110495458ba4e95c52ce73b361cf1cafd8a53b5c31542cde9a15b/regex-2025.7.34-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:1e4f4f62599b8142362f164ce776f19d79bdd21273e86920a7b604a4275b4f59", size = 862487, upload-time = "2025-07-31T00:20:03.142Z" }, + { url = "https://files.pythonhosted.org/packages/90/38/899105dd27fed394e3fae45607c1983e138273ec167e47882fc401f112b9/regex-2025.7.34-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:72a26dcc6a59c057b292f39d41465d8233a10fd69121fa24f8f43ec6294e5415", size = 910717, upload-time = "2025-07-31T00:20:04.727Z" }, + { url = "https://files.pythonhosted.org/packages/ee/f6/4716198dbd0bcc9c45625ac4c81a435d1c4d8ad662e8576dac06bab35b17/regex-2025.7.34-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d5273fddf7a3e602695c92716c420c377599ed3c853ea669c1fe26218867002f", size = 801943, upload-time = "2025-07-31T00:20:07.1Z" }, + { url = "https://files.pythonhosted.org/packages/40/5d/cff8896d27e4e3dd11dd72ac78797c7987eb50fe4debc2c0f2f1682eb06d/regex-2025.7.34-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c1844be23cd40135b3a5a4dd298e1e0c0cb36757364dd6cdc6025770363e06c1", size = 786664, upload-time = "2025-07-31T00:20:08.818Z" }, + { url = "https://files.pythonhosted.org/packages/10/29/758bf83cf7b4c34f07ac3423ea03cee3eb3176941641e4ccc05620f6c0b8/regex-2025.7.34-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:dde35e2afbbe2272f8abee3b9fe6772d9b5a07d82607b5788e8508974059925c", size = 856457, upload-time = "2025-07-31T00:20:10.328Z" }, + { url = "https://files.pythonhosted.org/packages/d7/30/c19d212b619963c5b460bfed0ea69a092c6a43cba52a973d46c27b3e2975/regex-2025.7.34-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:f3f6e8e7af516a7549412ce57613e859c3be27d55341a894aacaa11703a4c31a", size = 849008, upload-time = "2025-07-31T00:20:11.823Z" }, + { url = "https://files.pythonhosted.org/packages/9e/b8/3c35da3b12c87e3cc00010ef6c3a4ae787cff0bc381aa3d251def219969a/regex-2025.7.34-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:469142fb94a869beb25b5f18ea87646d21def10fbacb0bcb749224f3509476f0", size = 788101, upload-time = "2025-07-31T00:20:13.729Z" }, + { url = "https://files.pythonhosted.org/packages/47/80/2f46677c0b3c2b723b2c358d19f9346e714113865da0f5f736ca1a883bde/regex-2025.7.34-cp313-cp313-win32.whl", hash = "sha256:da7507d083ee33ccea1310447410c27ca11fb9ef18c95899ca57ff60a7e4d8f1", size = 264401, upload-time = "2025-07-31T00:20:15.233Z" }, + { url = "https://files.pythonhosted.org/packages/be/fa/917d64dd074682606a003cba33585c28138c77d848ef72fc77cbb1183849/regex-2025.7.34-cp313-cp313-win_amd64.whl", hash = "sha256:9d644de5520441e5f7e2db63aec2748948cc39ed4d7a87fd5db578ea4043d997", size = 275368, upload-time = "2025-07-31T00:20:16.711Z" }, + { url = "https://files.pythonhosted.org/packages/65/cd/f94383666704170a2154a5df7b16be28f0c27a266bffcd843e58bc84120f/regex-2025.7.34-cp313-cp313-win_arm64.whl", hash = "sha256:7bf1c5503a9f2cbd2f52d7e260acb3131b07b6273c470abb78568174fe6bde3f", size = 268482, upload-time = "2025-07-31T00:20:18.189Z" }, + { url = "https://files.pythonhosted.org/packages/ac/23/6376f3a23cf2f3c00514b1cdd8c990afb4dfbac3cb4a68b633c6b7e2e307/regex-2025.7.34-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:8283afe7042d8270cecf27cca558873168e771183d4d593e3c5fe5f12402212a", size = 485385, upload-time = "2025-07-31T00:20:19.692Z" }, + { url = "https://files.pythonhosted.org/packages/73/5b/6d4d3a0b4d312adbfd6d5694c8dddcf1396708976dd87e4d00af439d962b/regex-2025.7.34-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:6c053f9647e3421dd2f5dff8172eb7b4eec129df9d1d2f7133a4386319b47435", size = 289788, upload-time = "2025-07-31T00:20:21.941Z" }, + { url = "https://files.pythonhosted.org/packages/92/71/5862ac9913746e5054d01cb9fb8125b3d0802c0706ef547cae1e7f4428fa/regex-2025.7.34-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:a16dd56bbcb7d10e62861c3cd000290ddff28ea142ffb5eb3470f183628011ac", size = 286136, upload-time = "2025-07-31T00:20:26.146Z" }, + { url = "https://files.pythonhosted.org/packages/27/df/5b505dc447eb71278eba10d5ec940769ca89c1af70f0468bfbcb98035dc2/regex-2025.7.34-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:69c593ff5a24c0d5c1112b0df9b09eae42b33c014bdca7022d6523b210b69f72", size = 797753, upload-time = "2025-07-31T00:20:27.919Z" }, + { url = "https://files.pythonhosted.org/packages/86/38/3e3dc953d13998fa047e9a2414b556201dbd7147034fbac129392363253b/regex-2025.7.34-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:98d0ce170fcde1a03b5df19c5650db22ab58af375aaa6ff07978a85c9f250f0e", size = 863263, upload-time = "2025-07-31T00:20:29.803Z" }, + { url = "https://files.pythonhosted.org/packages/68/e5/3ff66b29dde12f5b874dda2d9dec7245c2051f2528d8c2a797901497f140/regex-2025.7.34-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d72765a4bff8c43711d5b0f5b452991a9947853dfa471972169b3cc0ba1d0751", size = 910103, upload-time = "2025-07-31T00:20:31.313Z" }, + { url = "https://files.pythonhosted.org/packages/9e/fe/14176f2182125977fba3711adea73f472a11f3f9288c1317c59cd16ad5e6/regex-2025.7.34-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4494f8fd95a77eb434039ad8460e64d57baa0434f1395b7da44015bef650d0e4", size = 801709, upload-time = "2025-07-31T00:20:33.323Z" }, + { url = "https://files.pythonhosted.org/packages/5a/0d/80d4e66ed24f1ba876a9e8e31b709f9fd22d5c266bf5f3ab3c1afe683d7d/regex-2025.7.34-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:4f42b522259c66e918a0121a12429b2abcf696c6f967fa37bdc7b72e61469f98", size = 786726, upload-time = "2025-07-31T00:20:35.252Z" }, + { url = "https://files.pythonhosted.org/packages/12/75/c3ebb30e04a56c046f5c85179dc173818551037daae2c0c940c7b19152cb/regex-2025.7.34-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:aaef1f056d96a0a5d53ad47d019d5b4c66fe4be2da87016e0d43b7242599ffc7", size = 857306, upload-time = "2025-07-31T00:20:37.12Z" }, + { url = "https://files.pythonhosted.org/packages/b1/b2/a4dc5d8b14f90924f27f0ac4c4c4f5e195b723be98adecc884f6716614b6/regex-2025.7.34-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:656433e5b7dccc9bc0da6312da8eb897b81f5e560321ec413500e5367fcd5d47", size = 848494, upload-time = "2025-07-31T00:20:38.818Z" }, + { url = "https://files.pythonhosted.org/packages/0d/21/9ac6e07a4c5e8646a90b56b61f7e9dac11ae0747c857f91d3d2bc7c241d9/regex-2025.7.34-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:e91eb2c62c39705e17b4d42d4b86c4e86c884c0d15d9c5a47d0835f8387add8e", size = 787850, upload-time = "2025-07-31T00:20:40.478Z" }, + { url = "https://files.pythonhosted.org/packages/be/6c/d51204e28e7bc54f9a03bb799b04730d7e54ff2718862b8d4e09e7110a6a/regex-2025.7.34-cp314-cp314-win32.whl", hash = "sha256:f978ddfb6216028c8f1d6b0f7ef779949498b64117fc35a939022f67f810bdcb", size = 269730, upload-time = "2025-07-31T00:20:42.253Z" }, + { url = "https://files.pythonhosted.org/packages/74/52/a7e92d02fa1fdef59d113098cb9f02c5d03289a0e9f9e5d4d6acccd10677/regex-2025.7.34-cp314-cp314-win_amd64.whl", hash = "sha256:4b7dc33b9b48fb37ead12ffc7bdb846ac72f99a80373c4da48f64b373a7abeae", size = 278640, upload-time = "2025-07-31T00:20:44.42Z" }, + { url = "https://files.pythonhosted.org/packages/d1/78/a815529b559b1771080faa90c3ab401730661f99d495ab0071649f139ebd/regex-2025.7.34-cp314-cp314-win_arm64.whl", hash = "sha256:4b8c4d39f451e64809912c82392933d80fe2e4a87eeef8859fcc5380d0173c64", size = 271757, upload-time = "2025-07-31T00:20:46.355Z" }, ] [[package]] name = "requests" -version = "2.32.4" +version = "2.32.5" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "certifi" }, @@ -4284,36 +4912,82 @@ dependencies = [ { name = "idna" }, { name = "urllib3" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/e1/0a/929373653770d8a0d7ea76c37de6e41f11eb07559b103b1c02cafb3f7cf8/requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422", size = 135258, upload-time = "2025-06-09T16:43:07.34Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7c/e4/56027c4a6b4ae70ca9de302488c5ca95ad4a39e190093d6c1a8ace08341b/requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c", size = 64847, upload-time = "2025-06-09T16:43:05.728Z" }, + { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" }, ] [[package]] name = "rich" -version = "14.0.0" +version = "14.1.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "markdown-it-py" }, { name = "pygments" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a1/53/830aa4c3066a8ab0ae9a9955976fb770fe9c6102117c8ec4ab3ea62d89e8/rich-14.0.0.tar.gz", hash = "sha256:82f1bc23a6a21ebca4ae0c45af9bdbc492ed20231dcb63f297d6d1021a9d5725", size = 224078, upload-time = "2025-03-30T14:15:14.23Z" } +sdist = { url = "https://files.pythonhosted.org/packages/fe/75/af448d8e52bf1d8fa6a9d089ca6c07ff4453d86c65c145d0a300bb073b9b/rich-14.1.0.tar.gz", hash = "sha256:e497a48b844b0320d45007cdebfeaeed8db2a4f4bcf49f15e455cfc4af11eaa8", size = 224441, upload-time = "2025-07-25T07:32:58.125Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/0d/9b/63f4c7ebc259242c89b3acafdb37b41d1185c07ff0011164674e9076b491/rich-14.0.0-py3-none-any.whl", hash = "sha256:1c9491e1951aac09caffd42f448ee3d04e58923ffe14993f6e83068dc395d7e0", size = 243229, upload-time = "2025-03-30T14:15:12.283Z" }, + { url = "https://files.pythonhosted.org/packages/e3/30/3c4d035596d3cf444529e0b2953ad0466f6049528a879d27534700580395/rich-14.1.0-py3-none-any.whl", hash = "sha256:536f5f1785986d6dbdea3c75205c473f970777b4a0d6c6dd1b696aa05a3fa04f", size = 243368, upload-time = "2025-07-25T07:32:56.73Z" }, ] [[package]] name = "rich-toolkit" -version = "0.14.7" +version = "0.15.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "click" }, { name = "rich" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/5b/7a/cb48b7024b247631ce39b1f14a0f1abedf311fb27b892b0e0387d809d4b5/rich_toolkit-0.14.7.tar.gz", hash = "sha256:6cca5a68850cc5778915f528eb785662c27ba3b4b2624612cce8340fa9701c5e", size = 104977, upload-time = "2025-05-27T15:48:09.377Z" } +sdist = { url = "https://files.pythonhosted.org/packages/65/36/cdb3d51371ad0cccbf1541506304783bd72d55790709b8eb68c0d401a13a/rich_toolkit-0.15.0.tar.gz", hash = "sha256:3f5730e9f2d36d0bfe01cf723948b7ecf4cc355d2b71e2c00e094f7963128c09", size = 115118, upload-time = "2025-08-11T10:55:37.909Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/0f/2e/95fde5b818dac9a37683ea064096323f593442d0f6358923c5f635974393/rich_toolkit-0.14.7-py3-none-any.whl", hash = "sha256:def05cc6e0f1176d6263b6a26648f16a62c4563b277ca2f8538683acdba1e0da", size = 24870, upload-time = "2025-05-27T15:48:07.942Z" }, + { url = "https://files.pythonhosted.org/packages/75/e4/b0794eefb3cf78566b15e5bf576492c1d4a92ce5f6da55675bc11e9ef5d8/rich_toolkit-0.15.0-py3-none-any.whl", hash = "sha256:ddb91008283d4a7989fd8ff0324a48773a7a2276229c6a3070755645538ef1bb", size = 29062, upload-time = "2025-08-11T10:55:37.152Z" }, +] + +[[package]] +name = "rignore" +version = "0.6.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/73/46/05a94dc55ac03cf931d18e43b86ecee5ee054cb88b7853fffd741e35009c/rignore-0.6.4.tar.gz", hash = "sha256:e893fdd2d7fdcfa9407d0b7600ef2c2e2df97f55e1c45d4a8f54364829ddb0ab", size = 11633, upload-time = "2025-07-19T19:24:46.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/6c/e5af4383cdd7829ef9aa63ac82a6507983e02dbc7c2e7b9aa64b7b8e2c7a/rignore-0.6.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:74720d074b79f32449d5d212ce732e0144a294a184246d1f1e7bcc1fc5c83b69", size = 885885, upload-time = "2025-07-19T19:23:53.236Z" }, + { url = "https://files.pythonhosted.org/packages/89/3e/1b02a868830e464769aa417ee195ac352fe71ff818df8ce50c4b998edb9c/rignore-0.6.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0a8184fcf567bd6b6d7b85a0c138d98dd40f63054141c96b175844414c5530d7", size = 819736, upload-time = "2025-07-19T19:23:46.565Z" }, + { url = "https://files.pythonhosted.org/packages/e0/75/b9be0c523d97c09f3c6508a67ce376aba4efe41c333c58903a0d7366439a/rignore-0.6.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bcb0d7d7ecc3fbccf6477bb187c04a091579ea139f15f139abe0b3b48bdfef69", size = 892779, upload-time = "2025-07-19T19:22:35.167Z" }, + { url = "https://files.pythonhosted.org/packages/91/f4/3064b06233697f2993485d132f06fe95061fef71631485da75aed246c4fd/rignore-0.6.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:feac73377a156fb77b3df626c76f7e5893d9b4e9e886ac8c0f9d44f1206a2a91", size = 872116, upload-time = "2025-07-19T19:22:47.828Z" }, + { url = "https://files.pythonhosted.org/packages/99/94/cb8e7af9a3c0a665f10e2366144e0ebc66167cf846aca5f1ac31b3661598/rignore-0.6.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:465179bc30beb1f7a3439e428739a2b5777ed26660712b8c4e351b15a7c04483", size = 1163345, upload-time = "2025-07-19T19:23:00.557Z" }, + { url = "https://files.pythonhosted.org/packages/86/6b/49faa7ad85ceb6ccef265df40091d9992232d7f6055fa664fe0a8b13781c/rignore-0.6.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4a4877b4dca9cf31a4d09845b300c677c86267657540d0b4d3e6d0ce3110e6e9", size = 939967, upload-time = "2025-07-19T19:23:13.494Z" }, + { url = "https://files.pythonhosted.org/packages/80/c8/b91afda10bd5ca1e3a80463340b899c0dc26a7750a9f3c94f668585c7f40/rignore-0.6.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:456456802b1e77d1e2d149320ee32505b8183e309e228129950b807d204ddd17", size = 949717, upload-time = "2025-07-19T19:23:36.404Z" }, + { url = "https://files.pythonhosted.org/packages/3f/f1/88bfdde58ae3fb1c1a92bb801f492eea8eafcdaf05ab9b75130023a4670b/rignore-0.6.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4c1ff2fc223f1d9473d36923160af37bf765548578eb9d47a2f52e90da8ae408", size = 975534, upload-time = "2025-07-19T19:23:25.988Z" }, + { url = "https://files.pythonhosted.org/packages/aa/8f/a80b4a2e48ceba56ba19e096d41263d844757e10aa36ede212571b5d8117/rignore-0.6.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e445fbc214ae18e0e644a78086ea5d0f579e210229a4fbe86367d11a4cd03c11", size = 1067837, upload-time = "2025-07-19T19:23:59.888Z" }, + { url = "https://files.pythonhosted.org/packages/7d/90/0905597af0e78748909ef58418442a480ddd93e9fc89b0ca9ab170c357c0/rignore-0.6.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:e07d9c5270fc869bc431aadcfb6ed0447f89b8aafaa666914c077435dc76a123", size = 1134959, upload-time = "2025-07-19T19:24:12.396Z" }, + { url = "https://files.pythonhosted.org/packages/cc/7d/0fa29adf9183b61947ce6dc8a1a9779a8ea16573f557be28ec893f6ddbaa/rignore-0.6.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:7a6ccc0ea83d2c0c6df6b166f2acacedcc220a516436490f41e99a5ae73b6019", size = 1109708, upload-time = "2025-07-19T19:24:24.176Z" }, + { url = "https://files.pythonhosted.org/packages/4e/a7/92892ed86b2e36da403dd3a0187829f2d880414cef75bd612bfdf4dedebc/rignore-0.6.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:536392c5ec91755db48389546c833c4ab1426fe03e5a8522992b54ef8a244e7e", size = 1120546, upload-time = "2025-07-19T19:24:36.377Z" }, + { url = "https://files.pythonhosted.org/packages/31/1b/d29ae1fe901d523741d6d1d3ffe0d630734dd0ed6b047628a69c1e15ea44/rignore-0.6.4-cp312-cp312-win32.whl", hash = "sha256:f5f9dca46fc41c0a1e236767f68be9d63bdd2726db13a0ae3a30f68414472969", size = 642005, upload-time = "2025-07-19T19:24:56.671Z" }, + { url = "https://files.pythonhosted.org/packages/1a/41/a224944824688995374e4525115ce85fecd82442fc85edd5bcd81f4f256d/rignore-0.6.4-cp312-cp312-win_amd64.whl", hash = "sha256:e02eecb9e1b9f9bf7c9030ae73308a777bed3b2486204cc74dfcfbe699ab1497", size = 720358, upload-time = "2025-07-19T19:24:49.959Z" }, + { url = "https://files.pythonhosted.org/packages/db/a3/edd7d0d5cc0720de132b6651cef95ee080ce5fca11c77d8a47db848e5f90/rignore-0.6.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:2b3b1e266ce45189240d14dfa1057f8013ea34b9bc8b3b44125ec8d25fdb3985", size = 885304, upload-time = "2025-07-19T19:23:54.268Z" }, + { url = "https://files.pythonhosted.org/packages/93/a1/d8d2fb97a6548307507d049b7e93885d4a0dfa1c907af5983fd9f9362a21/rignore-0.6.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45fe803628cc14714df10e8d6cdc23950a47eb9eb37dfea9a4779f4c672d2aa0", size = 818799, upload-time = "2025-07-19T19:23:47.544Z" }, + { url = "https://files.pythonhosted.org/packages/b1/cd/949981fcc180ad5ba7b31c52e78b74b2dea6b7bf744ad4c0c4b212f6da78/rignore-0.6.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e439f034277a947a4126e2da79dbb43e33d73d7c09d3d72a927e02f8a16f59aa", size = 892024, upload-time = "2025-07-19T19:22:36.18Z" }, + { url = "https://files.pythonhosted.org/packages/b0/d3/9042d701a8062d9c88f87760bbc2695ee2c23b3f002d34486b72a85f8efe/rignore-0.6.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:84b5121650ae24621154c7bdba8b8970b0739d8146505c9f38e0cda9385d1004", size = 871430, upload-time = "2025-07-19T19:22:49.62Z" }, + { url = "https://files.pythonhosted.org/packages/eb/50/3370249b984212b7355f3d9241aa6d02e706067c6d194a2614dfbc0f5b27/rignore-0.6.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52b0957b585ab48a445cf8ac1dbc33a272ab060835e583b4f95aa8c67c23fb2b", size = 1160559, upload-time = "2025-07-19T19:23:01.629Z" }, + { url = "https://files.pythonhosted.org/packages/6c/6f/2ad7f925838091d065524f30a8abda846d1813eee93328febf262b5cda21/rignore-0.6.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:50359e0d5287b5e2743bd2f2fbf05df619c8282fd3af12f6628ff97b9675551d", size = 939947, upload-time = "2025-07-19T19:23:14.608Z" }, + { url = "https://files.pythonhosted.org/packages/1f/01/626ec94d62475ae7ef8b00ef98cea61cbea52a389a666703c97c4673d406/rignore-0.6.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efe18096dcb1596757dfe0b412aab6d32564473ae7ee58dea0a8b4be5b1a2e3b", size = 949471, upload-time = "2025-07-19T19:23:37.521Z" }, + { url = "https://files.pythonhosted.org/packages/e8/c3/699c4f03b3c46f4b5c02f17a0a339225da65aad547daa5b03001e7c6a382/rignore-0.6.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b79c212d9990a273ad91e8d9765e1766ef6ecedd3be65375d786a252762ba385", size = 974912, upload-time = "2025-07-19T19:23:27.13Z" }, + { url = "https://files.pythonhosted.org/packages/cd/35/04626c12f9f92a9fc789afc2be32838a5d9b23b6fa8b2ad4a8625638d15b/rignore-0.6.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c6ffa7f2a8894c65aa5dc4e8ac8bbdf39a326c0c6589efd27686cfbb48f0197d", size = 1067281, upload-time = "2025-07-19T19:24:01.016Z" }, + { url = "https://files.pythonhosted.org/packages/fe/9c/8f17baf3b984afea151cb9094716f6f1fb8e8737db97fc6eb6d494bd0780/rignore-0.6.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:a63f5720dffc8d8fb0a4d02fafb8370a4031ebf3f99a4e79f334a91e905b7349", size = 1134414, upload-time = "2025-07-19T19:24:13.534Z" }, + { url = "https://files.pythonhosted.org/packages/10/88/ef84ffa916a96437c12cefcc39d474122da9626d75e3a2ebe09ec5d32f1b/rignore-0.6.4-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ce33982da47ac5dc09d19b04fa8d7c9aa6292fc0bd1ecf33076989faa8886094", size = 1109330, upload-time = "2025-07-19T19:24:25.303Z" }, + { url = "https://files.pythonhosted.org/packages/27/43/2ada5a2ec03b82e903610a1c483f516f78e47700ee6db9823f739e08b3af/rignore-0.6.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d899621867aa266824fbd9150e298f19d25b93903ef0133c09f70c65a3416eca", size = 1120381, upload-time = "2025-07-19T19:24:37.798Z" }, + { url = "https://files.pythonhosted.org/packages/3b/99/e7bcc643085131cb14dbea772def72bf1f6fe9037171ebe177c4f228abc8/rignore-0.6.4-cp313-cp313-win32.whl", hash = "sha256:d0615a6bf4890ec5a90b5fb83666822088fbd4e8fcd740c386fcce51e2f6feea", size = 641761, upload-time = "2025-07-19T19:24:58.096Z" }, + { url = "https://files.pythonhosted.org/packages/d9/25/7798908044f27dea1a8abdc75c14523e33770137651e5f775a15143f4218/rignore-0.6.4-cp313-cp313-win_amd64.whl", hash = "sha256:145177f0e32716dc2f220b07b3cde2385b994b7ea28d5c96fbec32639e9eac6f", size = 719876, upload-time = "2025-07-19T19:24:51.125Z" }, + { url = "https://files.pythonhosted.org/packages/b4/e3/ae1e30b045bf004ad77bbd1679b9afff2be8edb166520921c6f29420516a/rignore-0.6.4-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e55bf8f9bbd186f58ab646b4a08718c77131d28a9004e477612b0cbbd5202db2", size = 891776, upload-time = "2025-07-19T19:22:37.78Z" }, + { url = "https://files.pythonhosted.org/packages/45/a9/1193e3bc23ca0e6eb4f17cf4b99971237f97cfa6f241d98366dff90a6d09/rignore-0.6.4-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2521f7bf3ee1f2ab22a100a3a4eed39a97b025804e5afe4323528e9ce8f084a5", size = 871442, upload-time = "2025-07-19T19:22:50.972Z" }, + { url = "https://files.pythonhosted.org/packages/20/83/4c52ae429a0b2e1ce667e35b480e9a6846f9468c443baeaed5d775af9485/rignore-0.6.4-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0cc35773a8a9c119359ef974d0856988d4601d4daa6f532c05f66b4587cf35bc", size = 1159844, upload-time = "2025-07-19T19:23:02.751Z" }, + { url = "https://files.pythonhosted.org/packages/c1/2f/c740f5751f464c937bfe252dc15a024ae081352cfe80d94aa16d6a617482/rignore-0.6.4-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b665b1ea14457d7b49e834baabc635a3b8c10cfb5cca5c21161fabdbfc2b850e", size = 939456, upload-time = "2025-07-19T19:23:15.72Z" }, + { url = "https://files.pythonhosted.org/packages/fc/dd/68dbb08ac0edabf44dd144ff546a3fb0253c5af708e066847df39fc9188f/rignore-0.6.4-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c7fd339f344a8548724f289495b835bed7b81174a0bc1c28c6497854bd8855db", size = 1067070, upload-time = "2025-07-19T19:24:02.803Z" }, + { url = "https://files.pythonhosted.org/packages/3b/3a/7e7ea6f0d31d3f5beb0f2cf2c4c362672f5f7f125714458673fc579e2bed/rignore-0.6.4-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:91dc94b1cc5af8d6d25ce6edd29e7351830f19b0a03b75cb3adf1f76d00f3007", size = 1134598, upload-time = "2025-07-19T19:24:15.039Z" }, + { url = "https://files.pythonhosted.org/packages/7e/06/1b3307f6437d29bede5a95738aa89e6d910ba68d4054175c9f60d8e2c6b1/rignore-0.6.4-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:4d1918221a249e5342b60fd5fa513bf3d6bf272a8738e66023799f0c82ecd788", size = 1108862, upload-time = "2025-07-19T19:24:26.765Z" }, + { url = "https://files.pythonhosted.org/packages/b0/d5/b37c82519f335f2c472a63fc6215c6f4c51063ecf3166e3acf508011afbd/rignore-0.6.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:240777332b859dc89dcba59ab6e3f1e062bc8e862ffa3e5f456e93f7fd5cb415", size = 1120002, upload-time = "2025-07-19T19:24:38.952Z" }, + { url = "https://files.pythonhosted.org/packages/ac/72/2f05559ed5e69bdfdb56ea3982b48e6c0017c59f7241f7e1c5cae992b347/rignore-0.6.4-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:66b0e548753e55cc648f1e7b02d9f74285fe48bb49cec93643d31e563773ab3f", size = 949454, upload-time = "2025-07-19T19:23:38.664Z" }, + { url = "https://files.pythonhosted.org/packages/0b/92/186693c8f838d670510ac1dfb35afbe964320fbffb343ba18f3d24441941/rignore-0.6.4-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6971ac9fdd5a0bd299a181096f091c4f3fd286643adceba98eccc03c688a6637", size = 974663, upload-time = "2025-07-19T19:23:28.24Z" }, ] [[package]] @@ -4327,51 +5001,83 @@ wheels = [ [[package]] name = "rpds-py" -version = "0.25.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/8c/a6/60184b7fc00dd3ca80ac635dd5b8577d444c57e8e8742cecabfacb829921/rpds_py-0.25.1.tar.gz", hash = "sha256:8960b6dac09b62dac26e75d7e2c4a22efb835d827a7278c34f72b2b84fa160e3", size = 27304, upload-time = "2025-05-21T12:46:12.502Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/7f/81/28ab0408391b1dc57393653b6a0cf2014cc282cc2909e4615e63e58262be/rpds_py-0.25.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:b5ffe453cde61f73fea9430223c81d29e2fbf412a6073951102146c84e19e34c", size = 364647, upload-time = "2025-05-21T12:43:28.559Z" }, - { url = "https://files.pythonhosted.org/packages/2c/9a/7797f04cad0d5e56310e1238434f71fc6939d0bc517192a18bb99a72a95f/rpds_py-0.25.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:115874ae5e2fdcfc16b2aedc95b5eef4aebe91b28e7e21951eda8a5dc0d3461b", size = 350454, upload-time = "2025-05-21T12:43:30.615Z" }, - { url = "https://files.pythonhosted.org/packages/69/3c/93d2ef941b04898011e5d6eaa56a1acf46a3b4c9f4b3ad1bbcbafa0bee1f/rpds_py-0.25.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a714bf6e5e81b0e570d01f56e0c89c6375101b8463999ead3a93a5d2a4af91fa", size = 389665, upload-time = "2025-05-21T12:43:32.629Z" }, - { url = "https://files.pythonhosted.org/packages/c1/57/ad0e31e928751dde8903a11102559628d24173428a0f85e25e187defb2c1/rpds_py-0.25.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:35634369325906bcd01577da4c19e3b9541a15e99f31e91a02d010816b49bfda", size = 403873, upload-time = "2025-05-21T12:43:34.576Z" }, - { url = "https://files.pythonhosted.org/packages/16/ad/c0c652fa9bba778b4f54980a02962748479dc09632e1fd34e5282cf2556c/rpds_py-0.25.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d4cb2b3ddc16710548801c6fcc0cfcdeeff9dafbc983f77265877793f2660309", size = 525866, upload-time = "2025-05-21T12:43:36.123Z" }, - { url = "https://files.pythonhosted.org/packages/2a/39/3e1839bc527e6fcf48d5fec4770070f872cdee6c6fbc9b259932f4e88a38/rpds_py-0.25.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9ceca1cf097ed77e1a51f1dbc8d174d10cb5931c188a4505ff9f3e119dfe519b", size = 416886, upload-time = "2025-05-21T12:43:38.034Z" }, - { url = "https://files.pythonhosted.org/packages/7a/95/dd6b91cd4560da41df9d7030a038298a67d24f8ca38e150562644c829c48/rpds_py-0.25.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c2cd1a4b0c2b8c5e31ffff50d09f39906fe351389ba143c195566056c13a7ea", size = 390666, upload-time = "2025-05-21T12:43:40.065Z" }, - { url = "https://files.pythonhosted.org/packages/64/48/1be88a820e7494ce0a15c2d390ccb7c52212370badabf128e6a7bb4cb802/rpds_py-0.25.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1de336a4b164c9188cb23f3703adb74a7623ab32d20090d0e9bf499a2203ad65", size = 425109, upload-time = "2025-05-21T12:43:42.263Z" }, - { url = "https://files.pythonhosted.org/packages/cf/07/3e2a17927ef6d7720b9949ec1b37d1e963b829ad0387f7af18d923d5cfa5/rpds_py-0.25.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9fca84a15333e925dd59ce01da0ffe2ffe0d6e5d29a9eeba2148916d1824948c", size = 567244, upload-time = "2025-05-21T12:43:43.846Z" }, - { url = "https://files.pythonhosted.org/packages/d2/e5/76cf010998deccc4f95305d827847e2eae9c568099c06b405cf96384762b/rpds_py-0.25.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:88ec04afe0c59fa64e2f6ea0dd9657e04fc83e38de90f6de201954b4d4eb59bd", size = 596023, upload-time = "2025-05-21T12:43:45.932Z" }, - { url = "https://files.pythonhosted.org/packages/52/9a/df55efd84403736ba37a5a6377b70aad0fd1cb469a9109ee8a1e21299a1c/rpds_py-0.25.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a8bd2f19e312ce3e1d2c635618e8a8d8132892bb746a7cf74780a489f0f6cdcb", size = 561634, upload-time = "2025-05-21T12:43:48.263Z" }, - { url = "https://files.pythonhosted.org/packages/ab/aa/dc3620dd8db84454aaf9374bd318f1aa02578bba5e567f5bf6b79492aca4/rpds_py-0.25.1-cp312-cp312-win32.whl", hash = "sha256:e5e2f7280d8d0d3ef06f3ec1b4fd598d386cc6f0721e54f09109a8132182fbfe", size = 222713, upload-time = "2025-05-21T12:43:49.897Z" }, - { url = "https://files.pythonhosted.org/packages/a3/7f/7cef485269a50ed5b4e9bae145f512d2a111ca638ae70cc101f661b4defd/rpds_py-0.25.1-cp312-cp312-win_amd64.whl", hash = "sha256:db58483f71c5db67d643857404da360dce3573031586034b7d59f245144cc192", size = 235280, upload-time = "2025-05-21T12:43:51.893Z" }, - { url = "https://files.pythonhosted.org/packages/99/f2/c2d64f6564f32af913bf5f3f7ae41c7c263c5ae4c4e8f1a17af8af66cd46/rpds_py-0.25.1-cp312-cp312-win_arm64.whl", hash = "sha256:6d50841c425d16faf3206ddbba44c21aa3310a0cebc3c1cdfc3e3f4f9f6f5728", size = 225399, upload-time = "2025-05-21T12:43:53.351Z" }, - { url = "https://files.pythonhosted.org/packages/2b/da/323848a2b62abe6a0fec16ebe199dc6889c5d0a332458da8985b2980dffe/rpds_py-0.25.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:659d87430a8c8c704d52d094f5ba6fa72ef13b4d385b7e542a08fc240cb4a559", size = 364498, upload-time = "2025-05-21T12:43:54.841Z" }, - { url = "https://files.pythonhosted.org/packages/1f/b4/4d3820f731c80fd0cd823b3e95b9963fec681ae45ba35b5281a42382c67d/rpds_py-0.25.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:68f6f060f0bbdfb0245267da014d3a6da9be127fe3e8cc4a68c6f833f8a23bb1", size = 350083, upload-time = "2025-05-21T12:43:56.428Z" }, - { url = "https://files.pythonhosted.org/packages/d5/b1/3a8ee1c9d480e8493619a437dec685d005f706b69253286f50f498cbdbcf/rpds_py-0.25.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:083a9513a33e0b92cf6e7a6366036c6bb43ea595332c1ab5c8ae329e4bcc0a9c", size = 389023, upload-time = "2025-05-21T12:43:57.995Z" }, - { url = "https://files.pythonhosted.org/packages/3b/31/17293edcfc934dc62c3bf74a0cb449ecd549531f956b72287203e6880b87/rpds_py-0.25.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:816568614ecb22b18a010c7a12559c19f6fe993526af88e95a76d5a60b8b75fb", size = 403283, upload-time = "2025-05-21T12:43:59.546Z" }, - { url = "https://files.pythonhosted.org/packages/d1/ca/e0f0bc1a75a8925024f343258c8ecbd8828f8997ea2ac71e02f67b6f5299/rpds_py-0.25.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c6564c0947a7f52e4792983f8e6cf9bac140438ebf81f527a21d944f2fd0a40", size = 524634, upload-time = "2025-05-21T12:44:01.087Z" }, - { url = "https://files.pythonhosted.org/packages/3e/03/5d0be919037178fff33a6672ffc0afa04ea1cfcb61afd4119d1b5280ff0f/rpds_py-0.25.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c4a128527fe415d73cf1f70a9a688d06130d5810be69f3b553bf7b45e8acf79", size = 416233, upload-time = "2025-05-21T12:44:02.604Z" }, - { url = "https://files.pythonhosted.org/packages/05/7c/8abb70f9017a231c6c961a8941403ed6557664c0913e1bf413cbdc039e75/rpds_py-0.25.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a49e1d7a4978ed554f095430b89ecc23f42014a50ac385eb0c4d163ce213c325", size = 390375, upload-time = "2025-05-21T12:44:04.162Z" }, - { url = "https://files.pythonhosted.org/packages/7a/ac/a87f339f0e066b9535074a9f403b9313fd3892d4a164d5d5f5875ac9f29f/rpds_py-0.25.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d74ec9bc0e2feb81d3f16946b005748119c0f52a153f6db6a29e8cd68636f295", size = 424537, upload-time = "2025-05-21T12:44:06.175Z" }, - { url = "https://files.pythonhosted.org/packages/1f/8f/8d5c1567eaf8c8afe98a838dd24de5013ce6e8f53a01bd47fe8bb06b5533/rpds_py-0.25.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:3af5b4cc10fa41e5bc64e5c198a1b2d2864337f8fcbb9a67e747e34002ce812b", size = 566425, upload-time = "2025-05-21T12:44:08.242Z" }, - { url = "https://files.pythonhosted.org/packages/95/33/03016a6be5663b389c8ab0bbbcca68d9e96af14faeff0a04affcb587e776/rpds_py-0.25.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:79dc317a5f1c51fd9c6a0c4f48209c6b8526d0524a6904fc1076476e79b00f98", size = 595197, upload-time = "2025-05-21T12:44:10.449Z" }, - { url = "https://files.pythonhosted.org/packages/33/8d/da9f4d3e208c82fda311bff0cf0a19579afceb77cf456e46c559a1c075ba/rpds_py-0.25.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:1521031351865e0181bc585147624d66b3b00a84109b57fcb7a779c3ec3772cd", size = 561244, upload-time = "2025-05-21T12:44:12.387Z" }, - { url = "https://files.pythonhosted.org/packages/e2/b3/39d5dcf7c5f742ecd6dbc88f6f84ae54184b92f5f387a4053be2107b17f1/rpds_py-0.25.1-cp313-cp313-win32.whl", hash = "sha256:5d473be2b13600b93a5675d78f59e63b51b1ba2d0476893415dfbb5477e65b31", size = 222254, upload-time = "2025-05-21T12:44:14.261Z" }, - { url = "https://files.pythonhosted.org/packages/5f/19/2d6772c8eeb8302c5f834e6d0dfd83935a884e7c5ce16340c7eaf89ce925/rpds_py-0.25.1-cp313-cp313-win_amd64.whl", hash = "sha256:a7b74e92a3b212390bdce1d93da9f6488c3878c1d434c5e751cbc202c5e09500", size = 234741, upload-time = "2025-05-21T12:44:16.236Z" }, - { url = "https://files.pythonhosted.org/packages/5b/5a/145ada26cfaf86018d0eb304fe55eafdd4f0b6b84530246bb4a7c4fb5c4b/rpds_py-0.25.1-cp313-cp313-win_arm64.whl", hash = "sha256:dd326a81afe332ede08eb39ab75b301d5676802cdffd3a8f287a5f0b694dc3f5", size = 224830, upload-time = "2025-05-21T12:44:17.749Z" }, - { url = "https://files.pythonhosted.org/packages/4b/ca/d435844829c384fd2c22754ff65889c5c556a675d2ed9eb0e148435c6690/rpds_py-0.25.1-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:a58d1ed49a94d4183483a3ce0af22f20318d4a1434acee255d683ad90bf78129", size = 359668, upload-time = "2025-05-21T12:44:19.322Z" }, - { url = "https://files.pythonhosted.org/packages/1f/01/b056f21db3a09f89410d493d2f6614d87bb162499f98b649d1dbd2a81988/rpds_py-0.25.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:f251bf23deb8332823aef1da169d5d89fa84c89f67bdfb566c49dea1fccfd50d", size = 345649, upload-time = "2025-05-21T12:44:20.962Z" }, - { url = "https://files.pythonhosted.org/packages/e0/0f/e0d00dc991e3d40e03ca36383b44995126c36b3eafa0ccbbd19664709c88/rpds_py-0.25.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8dbd586bfa270c1103ece2109314dd423df1fa3d9719928b5d09e4840cec0d72", size = 384776, upload-time = "2025-05-21T12:44:22.516Z" }, - { url = "https://files.pythonhosted.org/packages/9f/a2/59374837f105f2ca79bde3c3cd1065b2f8c01678900924949f6392eab66d/rpds_py-0.25.1-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6d273f136e912aa101a9274c3145dcbddbe4bac560e77e6d5b3c9f6e0ed06d34", size = 395131, upload-time = "2025-05-21T12:44:24.147Z" }, - { url = "https://files.pythonhosted.org/packages/9c/dc/48e8d84887627a0fe0bac53f0b4631e90976fd5d35fff8be66b8e4f3916b/rpds_py-0.25.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:666fa7b1bd0a3810a7f18f6d3a25ccd8866291fbbc3c9b912b917a6715874bb9", size = 520942, upload-time = "2025-05-21T12:44:25.915Z" }, - { url = "https://files.pythonhosted.org/packages/7c/f5/ee056966aeae401913d37befeeab57a4a43a4f00099e0a20297f17b8f00c/rpds_py-0.25.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:921954d7fbf3fccc7de8f717799304b14b6d9a45bbeec5a8d7408ccbf531faf5", size = 411330, upload-time = "2025-05-21T12:44:27.638Z" }, - { url = "https://files.pythonhosted.org/packages/ab/74/b2cffb46a097cefe5d17f94ede7a174184b9d158a0aeb195f39f2c0361e8/rpds_py-0.25.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3d86373ff19ca0441ebeb696ef64cb58b8b5cbacffcda5a0ec2f3911732a194", size = 387339, upload-time = "2025-05-21T12:44:29.292Z" }, - { url = "https://files.pythonhosted.org/packages/7f/9a/0ff0b375dcb5161c2b7054e7d0b7575f1680127505945f5cabaac890bc07/rpds_py-0.25.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c8980cde3bb8575e7c956a530f2c217c1d6aac453474bf3ea0f9c89868b531b6", size = 418077, upload-time = "2025-05-21T12:44:30.877Z" }, - { url = "https://files.pythonhosted.org/packages/0d/a1/fda629bf20d6b698ae84c7c840cfb0e9e4200f664fc96e1f456f00e4ad6e/rpds_py-0.25.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:8eb8c84ecea987a2523e057c0d950bcb3f789696c0499290b8d7b3107a719d78", size = 562441, upload-time = "2025-05-21T12:44:32.541Z" }, - { url = "https://files.pythonhosted.org/packages/20/15/ce4b5257f654132f326f4acd87268e1006cc071e2c59794c5bdf4bebbb51/rpds_py-0.25.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:e43a005671a9ed5a650f3bc39e4dbccd6d4326b24fb5ea8be5f3a43a6f576c72", size = 590750, upload-time = "2025-05-21T12:44:34.557Z" }, - { url = "https://files.pythonhosted.org/packages/fb/ab/e04bf58a8d375aeedb5268edcc835c6a660ebf79d4384d8e0889439448b0/rpds_py-0.25.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:58f77c60956501a4a627749a6dcb78dac522f249dd96b5c9f1c6af29bfacfb66", size = 558891, upload-time = "2025-05-21T12:44:37.358Z" }, - { url = "https://files.pythonhosted.org/packages/90/82/cb8c6028a6ef6cd2b7991e2e4ced01c854b6236ecf51e81b64b569c43d73/rpds_py-0.25.1-cp313-cp313t-win32.whl", hash = "sha256:2cb9e5b5e26fc02c8a4345048cd9998c2aca7c2712bd1b36da0c72ee969a3523", size = 218718, upload-time = "2025-05-21T12:44:38.969Z" }, - { url = "https://files.pythonhosted.org/packages/b6/97/5a4b59697111c89477d20ba8a44df9ca16b41e737fa569d5ae8bff99e650/rpds_py-0.25.1-cp313-cp313t-win_amd64.whl", hash = "sha256:401ca1c4a20cc0510d3435d89c069fe0a9ae2ee6495135ac46bdd49ec0495763", size = 232218, upload-time = "2025-05-21T12:44:40.512Z" }, +version = "0.27.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1e/d9/991a0dee12d9fc53ed027e26a26a64b151d77252ac477e22666b9688bc16/rpds_py-0.27.0.tar.gz", hash = "sha256:8b23cf252f180cda89220b378d917180f29d313cd6a07b2431c0d3b776aae86f", size = 27420, upload-time = "2025-08-07T08:26:39.624Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cd/17/e67309ca1ac993fa1888a0d9b2f5ccc1f67196ace32e76c9f8e1dbbbd50c/rpds_py-0.27.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:19c990fdf5acecbf0623e906ae2e09ce1c58947197f9bced6bbd7482662231c4", size = 362611, upload-time = "2025-08-07T08:23:44.773Z" }, + { url = "https://files.pythonhosted.org/packages/93/2e/28c2fb84aa7aa5d75933d1862d0f7de6198ea22dfd9a0cca06e8a4e7509e/rpds_py-0.27.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6c27a7054b5224710fcfb1a626ec3ff4f28bcb89b899148c72873b18210e446b", size = 347680, upload-time = "2025-08-07T08:23:46.014Z" }, + { url = "https://files.pythonhosted.org/packages/44/3e/9834b4c8f4f5fe936b479e623832468aa4bd6beb8d014fecaee9eac6cdb1/rpds_py-0.27.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09965b314091829b378b60607022048953e25f0b396c2b70e7c4c81bcecf932e", size = 384600, upload-time = "2025-08-07T08:23:48Z" }, + { url = "https://files.pythonhosted.org/packages/19/78/744123c7b38865a965cd9e6f691fde7ef989a00a256fa8bf15b75240d12f/rpds_py-0.27.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:14f028eb47f59e9169bfdf9f7ceafd29dd64902141840633683d0bad5b04ff34", size = 400697, upload-time = "2025-08-07T08:23:49.407Z" }, + { url = "https://files.pythonhosted.org/packages/32/97/3c3d32fe7daee0a1f1a678b6d4dfb8c4dcf88197fa2441f9da7cb54a8466/rpds_py-0.27.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6168af0be75bba990a39f9431cdfae5f0ad501f4af32ae62e8856307200517b8", size = 517781, upload-time = "2025-08-07T08:23:50.557Z" }, + { url = "https://files.pythonhosted.org/packages/b2/be/28f0e3e733680aa13ecec1212fc0f585928a206292f14f89c0b8a684cad1/rpds_py-0.27.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ab47fe727c13c09d0e6f508e3a49e545008e23bf762a245b020391b621f5b726", size = 406449, upload-time = "2025-08-07T08:23:51.732Z" }, + { url = "https://files.pythonhosted.org/packages/95/ae/5d15c83e337c082d0367053baeb40bfba683f42459f6ebff63a2fd7e5518/rpds_py-0.27.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fa01b3d5e3b7d97efab65bd3d88f164e289ec323a8c033c5c38e53ee25c007e", size = 386150, upload-time = "2025-08-07T08:23:52.822Z" }, + { url = "https://files.pythonhosted.org/packages/bf/65/944e95f95d5931112829e040912b25a77b2e7ed913ea5fe5746aa5c1ce75/rpds_py-0.27.0-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:6c135708e987f46053e0a1246a206f53717f9fadfba27174a9769ad4befba5c3", size = 406100, upload-time = "2025-08-07T08:23:54.339Z" }, + { url = "https://files.pythonhosted.org/packages/21/a4/1664b83fae02894533cd11dc0b9f91d673797c2185b7be0f7496107ed6c5/rpds_py-0.27.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fc327f4497b7087d06204235199daf208fd01c82d80465dc5efa4ec9df1c5b4e", size = 421345, upload-time = "2025-08-07T08:23:55.832Z" }, + { url = "https://files.pythonhosted.org/packages/7c/26/b7303941c2b0823bfb34c71378249f8beedce57301f400acb04bb345d025/rpds_py-0.27.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7e57906e38583a2cba67046a09c2637e23297618dc1f3caddbc493f2be97c93f", size = 561891, upload-time = "2025-08-07T08:23:56.951Z" }, + { url = "https://files.pythonhosted.org/packages/9b/c8/48623d64d4a5a028fa99576c768a6159db49ab907230edddc0b8468b998b/rpds_py-0.27.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f4f69d7a4300fbf91efb1fb4916421bd57804c01ab938ab50ac9c4aa2212f03", size = 591756, upload-time = "2025-08-07T08:23:58.146Z" }, + { url = "https://files.pythonhosted.org/packages/b3/51/18f62617e8e61cc66334c9fb44b1ad7baae3438662098efbc55fb3fda453/rpds_py-0.27.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b4c4fbbcff474e1e5f38be1bf04511c03d492d42eec0babda5d03af3b5589374", size = 557088, upload-time = "2025-08-07T08:23:59.6Z" }, + { url = "https://files.pythonhosted.org/packages/bd/4c/e84c3a276e2496a93d245516be6b49e20499aa8ca1c94d59fada0d79addc/rpds_py-0.27.0-cp312-cp312-win32.whl", hash = "sha256:27bac29bbbf39601b2aab474daf99dbc8e7176ca3389237a23944b17f8913d97", size = 221926, upload-time = "2025-08-07T08:24:00.695Z" }, + { url = "https://files.pythonhosted.org/packages/83/89/9d0fbcef64340db0605eb0a0044f258076f3ae0a3b108983b2c614d96212/rpds_py-0.27.0-cp312-cp312-win_amd64.whl", hash = "sha256:8a06aa1197ec0281eb1d7daf6073e199eb832fe591ffa329b88bae28f25f5fe5", size = 233235, upload-time = "2025-08-07T08:24:01.846Z" }, + { url = "https://files.pythonhosted.org/packages/c9/b0/e177aa9f39cbab060f96de4a09df77d494f0279604dc2f509263e21b05f9/rpds_py-0.27.0-cp312-cp312-win_arm64.whl", hash = "sha256:e14aab02258cb776a108107bd15f5b5e4a1bbaa61ef33b36693dfab6f89d54f9", size = 223315, upload-time = "2025-08-07T08:24:03.337Z" }, + { url = "https://files.pythonhosted.org/packages/81/d2/dfdfd42565a923b9e5a29f93501664f5b984a802967d48d49200ad71be36/rpds_py-0.27.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:443d239d02d9ae55b74015234f2cd8eb09e59fbba30bf60baeb3123ad4c6d5ff", size = 362133, upload-time = "2025-08-07T08:24:04.508Z" }, + { url = "https://files.pythonhosted.org/packages/ac/4a/0a2e2460c4b66021d349ce9f6331df1d6c75d7eea90df9785d333a49df04/rpds_py-0.27.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b8a7acf04fda1f30f1007f3cc96d29d8cf0a53e626e4e1655fdf4eabc082d367", size = 347128, upload-time = "2025-08-07T08:24:05.695Z" }, + { url = "https://files.pythonhosted.org/packages/35/8d/7d1e4390dfe09d4213b3175a3f5a817514355cb3524593380733204f20b9/rpds_py-0.27.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d0f92b78cfc3b74a42239fdd8c1266f4715b573204c234d2f9fc3fc7a24f185", size = 384027, upload-time = "2025-08-07T08:24:06.841Z" }, + { url = "https://files.pythonhosted.org/packages/c1/65/78499d1a62172891c8cd45de737b2a4b84a414b6ad8315ab3ac4945a5b61/rpds_py-0.27.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ce4ed8e0c7dbc5b19352b9c2c6131dd23b95fa8698b5cdd076307a33626b72dc", size = 399973, upload-time = "2025-08-07T08:24:08.143Z" }, + { url = "https://files.pythonhosted.org/packages/10/a1/1c67c1d8cc889107b19570bb01f75cf49852068e95e6aee80d22915406fc/rpds_py-0.27.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fde355b02934cc6b07200cc3b27ab0c15870a757d1a72fd401aa92e2ea3c6bfe", size = 515295, upload-time = "2025-08-07T08:24:09.711Z" }, + { url = "https://files.pythonhosted.org/packages/df/27/700ec88e748436b6c7c4a2262d66e80f8c21ab585d5e98c45e02f13f21c0/rpds_py-0.27.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:13bbc4846ae4c993f07c93feb21a24d8ec637573d567a924b1001e81c8ae80f9", size = 406737, upload-time = "2025-08-07T08:24:11.182Z" }, + { url = "https://files.pythonhosted.org/packages/33/cc/6b0ee8f0ba3f2df2daac1beda17fde5cf10897a7d466f252bd184ef20162/rpds_py-0.27.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be0744661afbc4099fef7f4e604e7f1ea1be1dd7284f357924af12a705cc7d5c", size = 385898, upload-time = "2025-08-07T08:24:12.798Z" }, + { url = "https://files.pythonhosted.org/packages/e8/7e/c927b37d7d33c0a0ebf249cc268dc2fcec52864c1b6309ecb960497f2285/rpds_py-0.27.0-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:069e0384a54f427bd65d7fda83b68a90606a3835901aaff42185fcd94f5a9295", size = 405785, upload-time = "2025-08-07T08:24:14.906Z" }, + { url = "https://files.pythonhosted.org/packages/5b/d2/8ed50746d909dcf402af3fa58b83d5a590ed43e07251d6b08fad1a535ba6/rpds_py-0.27.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4bc262ace5a1a7dc3e2eac2fa97b8257ae795389f688b5adf22c5db1e2431c43", size = 419760, upload-time = "2025-08-07T08:24:16.129Z" }, + { url = "https://files.pythonhosted.org/packages/d3/60/2b2071aee781cb3bd49f94d5d35686990b925e9b9f3e3d149235a6f5d5c1/rpds_py-0.27.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:2fe6e18e5c8581f0361b35ae575043c7029d0a92cb3429e6e596c2cdde251432", size = 561201, upload-time = "2025-08-07T08:24:17.645Z" }, + { url = "https://files.pythonhosted.org/packages/98/1f/27b67304272521aaea02be293fecedce13fa351a4e41cdb9290576fc6d81/rpds_py-0.27.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d93ebdb82363d2e7bec64eecdc3632b59e84bd270d74fe5be1659f7787052f9b", size = 591021, upload-time = "2025-08-07T08:24:18.999Z" }, + { url = "https://files.pythonhosted.org/packages/db/9b/a2fadf823164dd085b1f894be6443b0762a54a7af6f36e98e8fcda69ee50/rpds_py-0.27.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0954e3a92e1d62e83a54ea7b3fdc9efa5d61acef8488a8a3d31fdafbfb00460d", size = 556368, upload-time = "2025-08-07T08:24:20.54Z" }, + { url = "https://files.pythonhosted.org/packages/24/f3/6d135d46a129cda2e3e6d4c5e91e2cc26ea0428c6cf152763f3f10b6dd05/rpds_py-0.27.0-cp313-cp313-win32.whl", hash = "sha256:2cff9bdd6c7b906cc562a505c04a57d92e82d37200027e8d362518df427f96cd", size = 221236, upload-time = "2025-08-07T08:24:22.144Z" }, + { url = "https://files.pythonhosted.org/packages/c5/44/65d7494f5448ecc755b545d78b188440f81da98b50ea0447ab5ebfdf9bd6/rpds_py-0.27.0-cp313-cp313-win_amd64.whl", hash = "sha256:dc79d192fb76fc0c84f2c58672c17bbbc383fd26c3cdc29daae16ce3d927e8b2", size = 232634, upload-time = "2025-08-07T08:24:23.642Z" }, + { url = "https://files.pythonhosted.org/packages/70/d9/23852410fadab2abb611733933401de42a1964ce6600a3badae35fbd573e/rpds_py-0.27.0-cp313-cp313-win_arm64.whl", hash = "sha256:5b3a5c8089eed498a3af23ce87a80805ff98f6ef8f7bdb70bd1b7dae5105f6ac", size = 222783, upload-time = "2025-08-07T08:24:25.098Z" }, + { url = "https://files.pythonhosted.org/packages/15/75/03447917f78512b34463f4ef11066516067099a0c466545655503bed0c77/rpds_py-0.27.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:90fb790138c1a89a2e58c9282fe1089638401f2f3b8dddd758499041bc6e0774", size = 359154, upload-time = "2025-08-07T08:24:26.249Z" }, + { url = "https://files.pythonhosted.org/packages/6b/fc/4dac4fa756451f2122ddaf136e2c6aeb758dc6fdbe9ccc4bc95c98451d50/rpds_py-0.27.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:010c4843a3b92b54373e3d2291a7447d6c3fc29f591772cc2ea0e9f5c1da434b", size = 343909, upload-time = "2025-08-07T08:24:27.405Z" }, + { url = "https://files.pythonhosted.org/packages/7b/81/723c1ed8e6f57ed9d8c0c07578747a2d3d554aaefc1ab89f4e42cfeefa07/rpds_py-0.27.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9ce7a9e967afc0a2af7caa0d15a3e9c1054815f73d6a8cb9225b61921b419bd", size = 379340, upload-time = "2025-08-07T08:24:28.714Z" }, + { url = "https://files.pythonhosted.org/packages/98/16/7e3740413de71818ce1997df82ba5f94bae9fff90c0a578c0e24658e6201/rpds_py-0.27.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:aa0bf113d15e8abdfee92aa4db86761b709a09954083afcb5bf0f952d6065fdb", size = 391655, upload-time = "2025-08-07T08:24:30.223Z" }, + { url = "https://files.pythonhosted.org/packages/e0/63/2a9f510e124d80660f60ecce07953f3f2d5f0b96192c1365443859b9c87f/rpds_py-0.27.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb91d252b35004a84670dfeafadb042528b19842a0080d8b53e5ec1128e8f433", size = 513017, upload-time = "2025-08-07T08:24:31.446Z" }, + { url = "https://files.pythonhosted.org/packages/2c/4e/cf6ff311d09776c53ea1b4f2e6700b9d43bb4e99551006817ade4bbd6f78/rpds_py-0.27.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:db8a6313dbac934193fc17fe7610f70cd8181c542a91382531bef5ed785e5615", size = 402058, upload-time = "2025-08-07T08:24:32.613Z" }, + { url = "https://files.pythonhosted.org/packages/88/11/5e36096d474cb10f2a2d68b22af60a3bc4164fd8db15078769a568d9d3ac/rpds_py-0.27.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce96ab0bdfcef1b8c371ada2100767ace6804ea35aacce0aef3aeb4f3f499ca8", size = 383474, upload-time = "2025-08-07T08:24:33.767Z" }, + { url = "https://files.pythonhosted.org/packages/db/a2/3dff02805b06058760b5eaa6d8cb8db3eb3e46c9e452453ad5fc5b5ad9fe/rpds_py-0.27.0-cp313-cp313t-manylinux_2_31_riscv64.whl", hash = "sha256:7451ede3560086abe1aa27dcdcf55cd15c96b56f543fb12e5826eee6f721f858", size = 400067, upload-time = "2025-08-07T08:24:35.021Z" }, + { url = "https://files.pythonhosted.org/packages/67/87/eed7369b0b265518e21ea836456a4ed4a6744c8c12422ce05bce760bb3cf/rpds_py-0.27.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:32196b5a99821476537b3f7732432d64d93a58d680a52c5e12a190ee0135d8b5", size = 412085, upload-time = "2025-08-07T08:24:36.267Z" }, + { url = "https://files.pythonhosted.org/packages/8b/48/f50b2ab2fbb422fbb389fe296e70b7a6b5ea31b263ada5c61377e710a924/rpds_py-0.27.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a029be818059870664157194e46ce0e995082ac49926f1423c1f058534d2aaa9", size = 555928, upload-time = "2025-08-07T08:24:37.573Z" }, + { url = "https://files.pythonhosted.org/packages/98/41/b18eb51045d06887666c3560cd4bbb6819127b43d758f5adb82b5f56f7d1/rpds_py-0.27.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3841f66c1ffdc6cebce8aed64e36db71466f1dc23c0d9a5592e2a782a3042c79", size = 585527, upload-time = "2025-08-07T08:24:39.391Z" }, + { url = "https://files.pythonhosted.org/packages/be/03/a3dd6470fc76499959b00ae56295b76b4bdf7c6ffc60d62006b1217567e1/rpds_py-0.27.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:42894616da0fc0dcb2ec08a77896c3f56e9cb2f4b66acd76fc8992c3557ceb1c", size = 554211, upload-time = "2025-08-07T08:24:40.6Z" }, + { url = "https://files.pythonhosted.org/packages/bf/d1/ee5fd1be395a07423ac4ca0bcc05280bf95db2b155d03adefeb47d5ebf7e/rpds_py-0.27.0-cp313-cp313t-win32.whl", hash = "sha256:b1fef1f13c842a39a03409e30ca0bf87b39a1e2a305a9924deadb75a43105d23", size = 216624, upload-time = "2025-08-07T08:24:42.204Z" }, + { url = "https://files.pythonhosted.org/packages/1c/94/4814c4c858833bf46706f87349c37ca45e154da7dbbec9ff09f1abeb08cc/rpds_py-0.27.0-cp313-cp313t-win_amd64.whl", hash = "sha256:183f5e221ba3e283cd36fdfbe311d95cd87699a083330b4f792543987167eff1", size = 230007, upload-time = "2025-08-07T08:24:43.329Z" }, + { url = "https://files.pythonhosted.org/packages/0e/a5/8fffe1c7dc7c055aa02df310f9fb71cfc693a4d5ccc5de2d3456ea5fb022/rpds_py-0.27.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:f3cd110e02c5bf17d8fb562f6c9df5c20e73029d587cf8602a2da6c5ef1e32cb", size = 362595, upload-time = "2025-08-07T08:24:44.478Z" }, + { url = "https://files.pythonhosted.org/packages/bc/c7/4e4253fd2d4bb0edbc0b0b10d9f280612ca4f0f990e3c04c599000fe7d71/rpds_py-0.27.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:8d0e09cf4863c74106b5265c2c310f36146e2b445ff7b3018a56799f28f39f6f", size = 347252, upload-time = "2025-08-07T08:24:45.678Z" }, + { url = "https://files.pythonhosted.org/packages/f3/c8/3d1a954d30f0174dd6baf18b57c215da03cf7846a9d6e0143304e784cddc/rpds_py-0.27.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:64f689ab822f9b5eb6dfc69893b4b9366db1d2420f7db1f6a2adf2a9ca15ad64", size = 384886, upload-time = "2025-08-07T08:24:46.86Z" }, + { url = "https://files.pythonhosted.org/packages/e0/52/3c5835f2df389832b28f9276dd5395b5a965cea34226e7c88c8fbec2093c/rpds_py-0.27.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e36c80c49853b3ffda7aa1831bf175c13356b210c73128c861f3aa93c3cc4015", size = 399716, upload-time = "2025-08-07T08:24:48.174Z" }, + { url = "https://files.pythonhosted.org/packages/40/73/176e46992461a1749686a2a441e24df51ff86b99c2d34bf39f2a5273b987/rpds_py-0.27.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6de6a7f622860af0146cb9ee148682ff4d0cea0b8fd3ad51ce4d40efb2f061d0", size = 517030, upload-time = "2025-08-07T08:24:49.52Z" }, + { url = "https://files.pythonhosted.org/packages/79/2a/7266c75840e8c6e70effeb0d38922a45720904f2cd695e68a0150e5407e2/rpds_py-0.27.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4045e2fc4b37ec4b48e8907a5819bdd3380708c139d7cc358f03a3653abedb89", size = 408448, upload-time = "2025-08-07T08:24:50.727Z" }, + { url = "https://files.pythonhosted.org/packages/e6/5f/a7efc572b8e235093dc6cf39f4dbc8a7f08e65fdbcec7ff4daeb3585eef1/rpds_py-0.27.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9da162b718b12c4219eeeeb68a5b7552fbc7aadedf2efee440f88b9c0e54b45d", size = 387320, upload-time = "2025-08-07T08:24:52.004Z" }, + { url = "https://files.pythonhosted.org/packages/a2/eb/9ff6bc92efe57cf5a2cb74dee20453ba444b6fdc85275d8c99e0d27239d1/rpds_py-0.27.0-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:0665be515767dc727ffa5f74bd2ef60b0ff85dad6bb8f50d91eaa6b5fb226f51", size = 407414, upload-time = "2025-08-07T08:24:53.664Z" }, + { url = "https://files.pythonhosted.org/packages/fb/bd/3b9b19b00d5c6e1bd0f418c229ab0f8d3b110ddf7ec5d9d689ef783d0268/rpds_py-0.27.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:203f581accef67300a942e49a37d74c12ceeef4514874c7cede21b012613ca2c", size = 420766, upload-time = "2025-08-07T08:24:55.917Z" }, + { url = "https://files.pythonhosted.org/packages/17/6b/521a7b1079ce16258c70805166e3ac6ec4ee2139d023fe07954dc9b2d568/rpds_py-0.27.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7873b65686a6471c0037139aa000d23fe94628e0daaa27b6e40607c90e3f5ec4", size = 562409, upload-time = "2025-08-07T08:24:57.17Z" }, + { url = "https://files.pythonhosted.org/packages/8b/bf/65db5bfb14ccc55e39de8419a659d05a2a9cd232f0a699a516bb0991da7b/rpds_py-0.27.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:249ab91ceaa6b41abc5f19513cb95b45c6f956f6b89f1fe3d99c81255a849f9e", size = 590793, upload-time = "2025-08-07T08:24:58.388Z" }, + { url = "https://files.pythonhosted.org/packages/db/b8/82d368b378325191ba7aae8f40f009b78057b598d4394d1f2cdabaf67b3f/rpds_py-0.27.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d2f184336bc1d6abfaaa1262ed42739c3789b1e3a65a29916a615307d22ffd2e", size = 558178, upload-time = "2025-08-07T08:24:59.756Z" }, + { url = "https://files.pythonhosted.org/packages/f6/ff/f270bddbfbc3812500f8131b1ebbd97afd014cd554b604a3f73f03133a36/rpds_py-0.27.0-cp314-cp314-win32.whl", hash = "sha256:d3c622c39f04d5751408f5b801ecb527e6e0a471b367f420a877f7a660d583f6", size = 222355, upload-time = "2025-08-07T08:25:01.027Z" }, + { url = "https://files.pythonhosted.org/packages/bf/20/fdab055b1460c02ed356a0e0b0a78c1dd32dc64e82a544f7b31c9ac643dc/rpds_py-0.27.0-cp314-cp314-win_amd64.whl", hash = "sha256:cf824aceaeffff029ccfba0da637d432ca71ab21f13e7f6f5179cd88ebc77a8a", size = 234007, upload-time = "2025-08-07T08:25:02.268Z" }, + { url = "https://files.pythonhosted.org/packages/4d/a8/694c060005421797a3be4943dab8347c76c2b429a9bef68fb2c87c9e70c7/rpds_py-0.27.0-cp314-cp314-win_arm64.whl", hash = "sha256:86aca1616922b40d8ac1b3073a1ead4255a2f13405e5700c01f7c8d29a03972d", size = 223527, upload-time = "2025-08-07T08:25:03.45Z" }, + { url = "https://files.pythonhosted.org/packages/1e/f9/77f4c90f79d2c5ca8ce6ec6a76cb4734ee247de6b3a4f337e289e1f00372/rpds_py-0.27.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:341d8acb6724c0c17bdf714319c393bb27f6d23d39bc74f94221b3e59fc31828", size = 359469, upload-time = "2025-08-07T08:25:04.648Z" }, + { url = "https://files.pythonhosted.org/packages/c0/22/b97878d2f1284286fef4172069e84b0b42b546ea7d053e5fb7adb9ac6494/rpds_py-0.27.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:6b96b0b784fe5fd03beffff2b1533dc0d85e92bab8d1b2c24ef3a5dc8fac5669", size = 343960, upload-time = "2025-08-07T08:25:05.863Z" }, + { url = "https://files.pythonhosted.org/packages/b1/b0/dfd55b5bb480eda0578ae94ef256d3061d20b19a0f5e18c482f03e65464f/rpds_py-0.27.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0c431bfb91478d7cbe368d0a699978050d3b112d7f1d440a41e90faa325557fd", size = 380201, upload-time = "2025-08-07T08:25:07.513Z" }, + { url = "https://files.pythonhosted.org/packages/28/22/e1fa64e50d58ad2b2053077e3ec81a979147c43428de9e6de68ddf6aff4e/rpds_py-0.27.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:20e222a44ae9f507d0f2678ee3dd0c45ec1e930f6875d99b8459631c24058aec", size = 392111, upload-time = "2025-08-07T08:25:09.149Z" }, + { url = "https://files.pythonhosted.org/packages/49/f9/43ab7a43e97aedf6cea6af70fdcbe18abbbc41d4ae6cdec1bfc23bbad403/rpds_py-0.27.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:184f0d7b342967f6cda94a07d0e1fae177d11d0b8f17d73e06e36ac02889f303", size = 515863, upload-time = "2025-08-07T08:25:10.431Z" }, + { url = "https://files.pythonhosted.org/packages/38/9b/9bd59dcc636cd04d86a2d20ad967770bf348f5eb5922a8f29b547c074243/rpds_py-0.27.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a00c91104c173c9043bc46f7b30ee5e6d2f6b1149f11f545580f5d6fdff42c0b", size = 402398, upload-time = "2025-08-07T08:25:11.819Z" }, + { url = "https://files.pythonhosted.org/packages/71/bf/f099328c6c85667aba6b66fa5c35a8882db06dcd462ea214be72813a0dd2/rpds_py-0.27.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7a37dd208f0d658e0487522078b1ed68cd6bce20ef4b5a915d2809b9094b410", size = 384665, upload-time = "2025-08-07T08:25:13.194Z" }, + { url = "https://files.pythonhosted.org/packages/a9/c5/9c1f03121ece6634818490bd3c8be2c82a70928a19de03467fb25a3ae2a8/rpds_py-0.27.0-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:92f3b3ec3e6008a1fe00b7c0946a170f161ac00645cde35e3c9a68c2475e8156", size = 400405, upload-time = "2025-08-07T08:25:14.417Z" }, + { url = "https://files.pythonhosted.org/packages/b5/b8/e25d54af3e63ac94f0c16d8fe143779fe71ff209445a0c00d0f6984b6b2c/rpds_py-0.27.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a1b3db5fae5cbce2131b7420a3f83553d4d89514c03d67804ced36161fe8b6b2", size = 413179, upload-time = "2025-08-07T08:25:15.664Z" }, + { url = "https://files.pythonhosted.org/packages/f9/d1/406b3316433fe49c3021546293a04bc33f1478e3ec7950215a7fce1a1208/rpds_py-0.27.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5355527adaa713ab693cbce7c1e0ec71682f599f61b128cf19d07e5c13c9b1f1", size = 556895, upload-time = "2025-08-07T08:25:17.061Z" }, + { url = "https://files.pythonhosted.org/packages/5f/bc/3697c0c21fcb9a54d46ae3b735eb2365eea0c2be076b8f770f98e07998de/rpds_py-0.27.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:fcc01c57ce6e70b728af02b2401c5bc853a9e14eb07deda30624374f0aebfe42", size = 585464, upload-time = "2025-08-07T08:25:18.406Z" }, + { url = "https://files.pythonhosted.org/packages/63/09/ee1bb5536f99f42c839b177d552f6114aa3142d82f49cef49261ed28dbe0/rpds_py-0.27.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:3001013dae10f806380ba739d40dee11db1ecb91684febb8406a87c2ded23dae", size = 555090, upload-time = "2025-08-07T08:25:20.461Z" }, + { url = "https://files.pythonhosted.org/packages/7d/2c/363eada9e89f7059199d3724135a86c47082cbf72790d6ba2f336d146ddb/rpds_py-0.27.0-cp314-cp314t-win32.whl", hash = "sha256:0f401c369186a5743694dd9fc08cba66cf70908757552e1f714bfc5219c655b5", size = 218001, upload-time = "2025-08-07T08:25:21.761Z" }, + { url = "https://files.pythonhosted.org/packages/e2/3f/d6c216ed5199c9ef79e2a33955601f454ed1e7420a93b89670133bca5ace/rpds_py-0.27.0-cp314-cp314t-win_amd64.whl", hash = "sha256:8a1dca5507fa1337f75dcd5070218b20bc68cf8844271c923c1b79dfcbc20391", size = 230993, upload-time = "2025-08-07T08:25:23.34Z" }, ] [[package]] @@ -4413,29 +5119,29 @@ wheels = [ [[package]] name = "safetensors" -version = "0.5.3" +version = "0.6.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/71/7e/2d5d6ee7b40c0682315367ec7475693d110f512922d582fef1bd4a63adc3/safetensors-0.5.3.tar.gz", hash = "sha256:b6b0d6ecacec39a4fdd99cc19f4576f5219ce858e6fd8dbe7609df0b8dc56965", size = 67210, upload-time = "2025-02-26T09:15:13.155Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ac/cc/738f3011628920e027a11754d9cae9abec1aed00f7ae860abbf843755233/safetensors-0.6.2.tar.gz", hash = "sha256:43ff2aa0e6fa2dc3ea5524ac7ad93a9839256b8703761e76e2d0b2a3fa4f15d9", size = 197968, upload-time = "2025-08-08T13:13:58.654Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/18/ae/88f6c49dbd0cc4da0e08610019a3c78a7d390879a919411a410a1876d03a/safetensors-0.5.3-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:bd20eb133db8ed15b40110b7c00c6df51655a2998132193de2f75f72d99c7073", size = 436917, upload-time = "2025-02-26T09:15:03.702Z" }, - { url = "https://files.pythonhosted.org/packages/b8/3b/11f1b4a2f5d2ab7da34ecc062b0bc301f2be024d110a6466726bec8c055c/safetensors-0.5.3-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:21d01c14ff6c415c485616b8b0bf961c46b3b343ca59110d38d744e577f9cce7", size = 418419, upload-time = "2025-02-26T09:15:01.765Z" }, - { url = "https://files.pythonhosted.org/packages/5d/9a/add3e6fef267658075c5a41573c26d42d80c935cdc992384dfae435feaef/safetensors-0.5.3-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:11bce6164887cd491ca75c2326a113ba934be596e22b28b1742ce27b1d076467", size = 459493, upload-time = "2025-02-26T09:14:51.812Z" }, - { url = "https://files.pythonhosted.org/packages/df/5c/bf2cae92222513cc23b3ff85c4a1bb2811a2c3583ac0f8e8d502751de934/safetensors-0.5.3-cp38-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4a243be3590bc3301c821da7a18d87224ef35cbd3e5f5727e4e0728b8172411e", size = 472400, upload-time = "2025-02-26T09:14:53.549Z" }, - { url = "https://files.pythonhosted.org/packages/58/11/7456afb740bd45782d0f4c8e8e1bb9e572f1bf82899fb6ace58af47b4282/safetensors-0.5.3-cp38-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8bd84b12b1670a6f8e50f01e28156422a2bc07fb16fc4e98bded13039d688a0d", size = 522891, upload-time = "2025-02-26T09:14:55.717Z" }, - { url = "https://files.pythonhosted.org/packages/57/3d/fe73a9d2ace487e7285f6e157afee2383bd1ddb911b7cb44a55cf812eae3/safetensors-0.5.3-cp38-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:391ac8cab7c829452175f871fcaf414aa1e292b5448bd02620f675a7f3e7abb9", size = 537694, upload-time = "2025-02-26T09:14:57.036Z" }, - { url = "https://files.pythonhosted.org/packages/a6/f8/dae3421624fcc87a89d42e1898a798bc7ff72c61f38973a65d60df8f124c/safetensors-0.5.3-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cead1fa41fc54b1e61089fa57452e8834f798cb1dc7a09ba3524f1eb08e0317a", size = 471642, upload-time = "2025-02-26T09:15:00.544Z" }, - { url = "https://files.pythonhosted.org/packages/ce/20/1fbe16f9b815f6c5a672f5b760951e20e17e43f67f231428f871909a37f6/safetensors-0.5.3-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1077f3e94182d72618357b04b5ced540ceb71c8a813d3319f1aba448e68a770d", size = 502241, upload-time = "2025-02-26T09:14:58.303Z" }, - { url = "https://files.pythonhosted.org/packages/5f/18/8e108846b506487aa4629fe4116b27db65c3dde922de2c8e0cc1133f3f29/safetensors-0.5.3-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:799021e78287bac619c7b3f3606730a22da4cda27759ddf55d37c8db7511c74b", size = 638001, upload-time = "2025-02-26T09:15:05.79Z" }, - { url = "https://files.pythonhosted.org/packages/82/5a/c116111d8291af6c8c8a8b40628fe833b9db97d8141c2a82359d14d9e078/safetensors-0.5.3-cp38-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:df26da01aaac504334644e1b7642fa000bfec820e7cef83aeac4e355e03195ff", size = 734013, upload-time = "2025-02-26T09:15:07.892Z" }, - { url = "https://files.pythonhosted.org/packages/7d/ff/41fcc4d3b7de837963622e8610d998710705bbde9a8a17221d85e5d0baad/safetensors-0.5.3-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:32c3ef2d7af8b9f52ff685ed0bc43913cdcde135089ae322ee576de93eae5135", size = 670687, upload-time = "2025-02-26T09:15:09.979Z" }, - { url = "https://files.pythonhosted.org/packages/40/ad/2b113098e69c985a3d8fbda4b902778eae4a35b7d5188859b4a63d30c161/safetensors-0.5.3-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:37f1521be045e56fc2b54c606d4455573e717b2d887c579ee1dbba5f868ece04", size = 643147, upload-time = "2025-02-26T09:15:11.185Z" }, - { url = "https://files.pythonhosted.org/packages/0a/0c/95aeb51d4246bd9a3242d3d8349c1112b4ee7611a4b40f0c5c93b05f001d/safetensors-0.5.3-cp38-abi3-win32.whl", hash = "sha256:cfc0ec0846dcf6763b0ed3d1846ff36008c6e7290683b61616c4b040f6a54ace", size = 296677, upload-time = "2025-02-26T09:15:16.554Z" }, - { url = "https://files.pythonhosted.org/packages/69/e2/b011c38e5394c4c18fb5500778a55ec43ad6106126e74723ffaee246f56e/safetensors-0.5.3-cp38-abi3-win_amd64.whl", hash = "sha256:836cbbc320b47e80acd40e44c8682db0e8ad7123209f69b093def21ec7cafd11", size = 308878, upload-time = "2025-02-26T09:15:14.99Z" }, + { url = "https://files.pythonhosted.org/packages/4d/b1/3f5fd73c039fc87dba3ff8b5d528bfc5a32b597fea8e7a6a4800343a17c7/safetensors-0.6.2-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:9c85ede8ec58f120bad982ec47746981e210492a6db876882aa021446af8ffba", size = 454797, upload-time = "2025-08-08T13:13:52.066Z" }, + { url = "https://files.pythonhosted.org/packages/8c/c9/bb114c158540ee17907ec470d01980957fdaf87b4aa07914c24eba87b9c6/safetensors-0.6.2-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:d6675cf4b39c98dbd7d940598028f3742e0375a6b4d4277e76beb0c35f4b843b", size = 432206, upload-time = "2025-08-08T13:13:50.931Z" }, + { url = "https://files.pythonhosted.org/packages/d3/8e/f70c34e47df3110e8e0bb268d90db8d4be8958a54ab0336c9be4fe86dac8/safetensors-0.6.2-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d2d2b3ce1e2509c68932ca03ab8f20570920cd9754b05063d4368ee52833ecd", size = 473261, upload-time = "2025-08-08T13:13:41.259Z" }, + { url = "https://files.pythonhosted.org/packages/2a/f5/be9c6a7c7ef773e1996dc214e73485286df1836dbd063e8085ee1976f9cb/safetensors-0.6.2-cp38-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:93de35a18f46b0f5a6a1f9e26d91b442094f2df02e9fd7acf224cfec4238821a", size = 485117, upload-time = "2025-08-08T13:13:43.506Z" }, + { url = "https://files.pythonhosted.org/packages/c9/55/23f2d0a2c96ed8665bf17a30ab4ce5270413f4d74b6d87dd663258b9af31/safetensors-0.6.2-cp38-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:89a89b505f335640f9120fac65ddeb83e40f1fd081cb8ed88b505bdccec8d0a1", size = 616154, upload-time = "2025-08-08T13:13:45.096Z" }, + { url = "https://files.pythonhosted.org/packages/98/c6/affb0bd9ce02aa46e7acddbe087912a04d953d7a4d74b708c91b5806ef3f/safetensors-0.6.2-cp38-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fc4d0d0b937e04bdf2ae6f70cd3ad51328635fe0e6214aa1fc811f3b576b3bda", size = 520713, upload-time = "2025-08-08T13:13:46.25Z" }, + { url = "https://files.pythonhosted.org/packages/fe/5d/5a514d7b88e310c8b146e2404e0dc161282e78634d9358975fd56dfd14be/safetensors-0.6.2-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8045db2c872db8f4cbe3faa0495932d89c38c899c603f21e9b6486951a5ecb8f", size = 485835, upload-time = "2025-08-08T13:13:49.373Z" }, + { url = "https://files.pythonhosted.org/packages/7a/7b/4fc3b2ba62c352b2071bea9cfbad330fadda70579f617506ae1a2f129cab/safetensors-0.6.2-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:81e67e8bab9878bb568cffbc5f5e655adb38d2418351dc0859ccac158f753e19", size = 521503, upload-time = "2025-08-08T13:13:47.651Z" }, + { url = "https://files.pythonhosted.org/packages/5a/50/0057e11fe1f3cead9254315a6c106a16dd4b1a19cd247f7cc6414f6b7866/safetensors-0.6.2-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:b0e4d029ab0a0e0e4fdf142b194514695b1d7d3735503ba700cf36d0fc7136ce", size = 652256, upload-time = "2025-08-08T13:13:53.167Z" }, + { url = "https://files.pythonhosted.org/packages/e9/29/473f789e4ac242593ac1656fbece6e1ecd860bb289e635e963667807afe3/safetensors-0.6.2-cp38-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:fa48268185c52bfe8771e46325a1e21d317207bcabcb72e65c6e28e9ffeb29c7", size = 747281, upload-time = "2025-08-08T13:13:54.656Z" }, + { url = "https://files.pythonhosted.org/packages/68/52/f7324aad7f2df99e05525c84d352dc217e0fa637a4f603e9f2eedfbe2c67/safetensors-0.6.2-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:d83c20c12c2d2f465997c51b7ecb00e407e5f94d7dec3ea0cc11d86f60d3fde5", size = 692286, upload-time = "2025-08-08T13:13:55.884Z" }, + { url = "https://files.pythonhosted.org/packages/ad/fe/cad1d9762868c7c5dc70c8620074df28ebb1a8e4c17d4c0cb031889c457e/safetensors-0.6.2-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:d944cea65fad0ead848b6ec2c37cc0b197194bec228f8020054742190e9312ac", size = 655957, upload-time = "2025-08-08T13:13:57.029Z" }, + { url = "https://files.pythonhosted.org/packages/59/a7/e2158e17bbe57d104f0abbd95dff60dda916cf277c9f9663b4bf9bad8b6e/safetensors-0.6.2-cp38-abi3-win32.whl", hash = "sha256:cab75ca7c064d3911411461151cb69380c9225798a20e712b102edda2542ddb1", size = 308926, upload-time = "2025-08-08T13:14:01.095Z" }, + { url = "https://files.pythonhosted.org/packages/2c/c3/c0be1135726618dc1e28d181b8c442403d8dbb9e273fd791de2d4384bcdd/safetensors-0.6.2-cp38-abi3-win_amd64.whl", hash = "sha256:c7b214870df923cbc1593c3faee16bec59ea462758699bd3fee399d00aac072c", size = 320192, upload-time = "2025-08-08T13:13:59.467Z" }, ] [[package]] name = "scikit-learn" -version = "1.7.0" +version = "1.7.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "joblib" }, @@ -4443,133 +5149,140 @@ dependencies = [ { name = "scipy" }, { name = "threadpoolctl" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/df/3b/29fa87e76b1d7b3b77cc1fcbe82e6e6b8cd704410705b008822de530277c/scikit_learn-1.7.0.tar.gz", hash = "sha256:c01e869b15aec88e2cdb73d27f15bdbe03bce8e2fb43afbe77c45d399e73a5a3", size = 7178217, upload-time = "2025-06-05T22:02:46.703Z" } +sdist = { url = "https://files.pythonhosted.org/packages/41/84/5f4af978fff619706b8961accac84780a6d298d82a8873446f72edb4ead0/scikit_learn-1.7.1.tar.gz", hash = "sha256:24b3f1e976a4665aa74ee0fcaac2b8fccc6ae77c8e07ab25da3ba6d3292b9802", size = 7190445, upload-time = "2025-07-18T08:01:54.5Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/70/3a/bffab14e974a665a3ee2d79766e7389572ffcaad941a246931c824afcdb2/scikit_learn-1.7.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c2c7243d34aaede0efca7a5a96d67fddaebb4ad7e14a70991b9abee9dc5c0379", size = 11646758, upload-time = "2025-06-05T22:02:09.51Z" }, - { url = "https://files.pythonhosted.org/packages/58/d8/f3249232fa79a70cb40595282813e61453c1e76da3e1a44b77a63dd8d0cb/scikit_learn-1.7.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:9f39f6a811bf3f15177b66c82cbe0d7b1ebad9f190737dcdef77cfca1ea3c19c", size = 10673971, upload-time = "2025-06-05T22:02:12.217Z" }, - { url = "https://files.pythonhosted.org/packages/67/93/eb14c50533bea2f77758abe7d60a10057e5f2e2cdcf0a75a14c6bc19c734/scikit_learn-1.7.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63017a5f9a74963d24aac7590287149a8d0f1a0799bbe7173c0d8ba1523293c0", size = 11818428, upload-time = "2025-06-05T22:02:14.947Z" }, - { url = "https://files.pythonhosted.org/packages/08/17/804cc13b22a8663564bb0b55fb89e661a577e4e88a61a39740d58b909efe/scikit_learn-1.7.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b2f8a0b1e73e9a08b7cc498bb2aeab36cdc1f571f8ab2b35c6e5d1c7115d97d", size = 12505887, upload-time = "2025-06-05T22:02:17.824Z" }, - { url = "https://files.pythonhosted.org/packages/68/c7/4e956281a077f4835458c3f9656c666300282d5199039f26d9de1dabd9be/scikit_learn-1.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:34cc8d9d010d29fb2b7cbcd5ccc24ffdd80515f65fe9f1e4894ace36b267ce19", size = 10668129, upload-time = "2025-06-05T22:02:20.536Z" }, - { url = "https://files.pythonhosted.org/packages/9a/c3/a85dcccdaf1e807e6f067fa95788a6485b0491d9ea44fd4c812050d04f45/scikit_learn-1.7.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5b7974f1f32bc586c90145df51130e02267e4b7e77cab76165c76cf43faca0d9", size = 11559841, upload-time = "2025-06-05T22:02:23.308Z" }, - { url = "https://files.pythonhosted.org/packages/d8/57/eea0de1562cc52d3196eae51a68c5736a31949a465f0b6bb3579b2d80282/scikit_learn-1.7.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:014e07a23fe02e65f9392898143c542a50b6001dbe89cb867e19688e468d049b", size = 10616463, upload-time = "2025-06-05T22:02:26.068Z" }, - { url = "https://files.pythonhosted.org/packages/10/a4/39717ca669296dfc3a62928393168da88ac9d8cbec88b6321ffa62c6776f/scikit_learn-1.7.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e7e7ced20582d3a5516fb6f405fd1d254e1f5ce712bfef2589f51326af6346e8", size = 11766512, upload-time = "2025-06-05T22:02:28.689Z" }, - { url = "https://files.pythonhosted.org/packages/d5/cd/a19722241d5f7b51e08351e1e82453e0057aeb7621b17805f31fcb57bb6c/scikit_learn-1.7.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1babf2511e6ffd695da7a983b4e4d6de45dce39577b26b721610711081850906", size = 12461075, upload-time = "2025-06-05T22:02:31.233Z" }, - { url = "https://files.pythonhosted.org/packages/f3/bc/282514272815c827a9acacbe5b99f4f1a4bc5961053719d319480aee0812/scikit_learn-1.7.0-cp313-cp313-win_amd64.whl", hash = "sha256:5abd2acff939d5bd4701283f009b01496832d50ddafa83c90125a4e41c33e314", size = 10652517, upload-time = "2025-06-05T22:02:34.139Z" }, - { url = "https://files.pythonhosted.org/packages/ea/78/7357d12b2e4c6674175f9a09a3ba10498cde8340e622715bcc71e532981d/scikit_learn-1.7.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:e39d95a929b112047c25b775035c8c234c5ca67e681ce60d12413afb501129f7", size = 12111822, upload-time = "2025-06-05T22:02:36.904Z" }, - { url = "https://files.pythonhosted.org/packages/d0/0c/9c3715393343f04232f9d81fe540eb3831d0b4ec351135a145855295110f/scikit_learn-1.7.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:0521cb460426c56fee7e07f9365b0f45ec8ca7b2d696534ac98bfb85e7ae4775", size = 11325286, upload-time = "2025-06-05T22:02:39.739Z" }, - { url = "https://files.pythonhosted.org/packages/64/e0/42282ad3dd70b7c1a5f65c412ac3841f6543502a8d6263cae7b466612dc9/scikit_learn-1.7.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:317ca9f83acbde2883bd6bb27116a741bfcb371369706b4f9973cf30e9a03b0d", size = 12380865, upload-time = "2025-06-05T22:02:42.137Z" }, - { url = "https://files.pythonhosted.org/packages/4e/d0/3ef4ab2c6be4aa910445cd09c5ef0b44512e3de2cfb2112a88bb647d2cf7/scikit_learn-1.7.0-cp313-cp313t-win_amd64.whl", hash = "sha256:126c09740a6f016e815ab985b21e3a0656835414521c81fc1a8da78b679bdb75", size = 11549609, upload-time = "2025-06-05T22:02:44.483Z" }, + { url = "https://files.pythonhosted.org/packages/cb/16/57f176585b35ed865f51b04117947fe20f130f78940c6477b6d66279c9c2/scikit_learn-1.7.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3cee419b49b5bbae8796ecd690f97aa412ef1674410c23fc3257c6b8b85b8087", size = 9260431, upload-time = "2025-07-18T08:01:22.77Z" }, + { url = "https://files.pythonhosted.org/packages/67/4e/899317092f5efcab0e9bc929e3391341cec8fb0e816c4789686770024580/scikit_learn-1.7.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:2fd8b8d35817b0d9ebf0b576f7d5ffbbabdb55536b0655a8aaae629d7ffd2e1f", size = 8637191, upload-time = "2025-07-18T08:01:24.731Z" }, + { url = "https://files.pythonhosted.org/packages/f3/1b/998312db6d361ded1dd56b457ada371a8d8d77ca2195a7d18fd8a1736f21/scikit_learn-1.7.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:588410fa19a96a69763202f1d6b7b91d5d7a5d73be36e189bc6396bfb355bd87", size = 9486346, upload-time = "2025-07-18T08:01:26.713Z" }, + { url = "https://files.pythonhosted.org/packages/ad/09/a2aa0b4e644e5c4ede7006748f24e72863ba2ae71897fecfd832afea01b4/scikit_learn-1.7.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e3142f0abe1ad1d1c31a2ae987621e41f6b578144a911ff4ac94781a583adad7", size = 9290988, upload-time = "2025-07-18T08:01:28.938Z" }, + { url = "https://files.pythonhosted.org/packages/15/fa/c61a787e35f05f17fc10523f567677ec4eeee5f95aa4798dbbbcd9625617/scikit_learn-1.7.1-cp312-cp312-win_amd64.whl", hash = "sha256:3ddd9092c1bd469acab337d87930067c87eac6bd544f8d5027430983f1e1ae88", size = 8735568, upload-time = "2025-07-18T08:01:30.936Z" }, + { url = "https://files.pythonhosted.org/packages/52/f8/e0533303f318a0f37b88300d21f79b6ac067188d4824f1047a37214ab718/scikit_learn-1.7.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b7839687fa46d02e01035ad775982f2470be2668e13ddd151f0f55a5bf123bae", size = 9213143, upload-time = "2025-07-18T08:01:32.942Z" }, + { url = "https://files.pythonhosted.org/packages/71/f3/f1df377d1bdfc3e3e2adc9c119c238b182293e6740df4cbeac6de2cc3e23/scikit_learn-1.7.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:a10f276639195a96c86aa572ee0698ad64ee939a7b042060b98bd1930c261d10", size = 8591977, upload-time = "2025-07-18T08:01:34.967Z" }, + { url = "https://files.pythonhosted.org/packages/99/72/c86a4cd867816350fe8dee13f30222340b9cd6b96173955819a5561810c5/scikit_learn-1.7.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:13679981fdaebc10cc4c13c43344416a86fcbc61449cb3e6517e1df9d12c8309", size = 9436142, upload-time = "2025-07-18T08:01:37.397Z" }, + { url = "https://files.pythonhosted.org/packages/e8/66/277967b29bd297538dc7a6ecfb1a7dce751beabd0d7f7a2233be7a4f7832/scikit_learn-1.7.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4f1262883c6a63f067a980a8cdd2d2e7f2513dddcef6a9eaada6416a7a7cbe43", size = 9282996, upload-time = "2025-07-18T08:01:39.721Z" }, + { url = "https://files.pythonhosted.org/packages/e2/47/9291cfa1db1dae9880420d1e07dbc7e8dd4a7cdbc42eaba22512e6bde958/scikit_learn-1.7.1-cp313-cp313-win_amd64.whl", hash = "sha256:ca6d31fb10e04d50bfd2b50d66744729dbb512d4efd0223b864e2fdbfc4cee11", size = 8707418, upload-time = "2025-07-18T08:01:42.124Z" }, + { url = "https://files.pythonhosted.org/packages/61/95/45726819beccdaa34d3362ea9b2ff9f2b5d3b8bf721bd632675870308ceb/scikit_learn-1.7.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:781674d096303cfe3d351ae6963ff7c958db61cde3421cd490e3a5a58f2a94ae", size = 9561466, upload-time = "2025-07-18T08:01:44.195Z" }, + { url = "https://files.pythonhosted.org/packages/ee/1c/6f4b3344805de783d20a51eb24d4c9ad4b11a7f75c1801e6ec6d777361fd/scikit_learn-1.7.1-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:10679f7f125fe7ecd5fad37dd1aa2daae7e3ad8df7f3eefa08901b8254b3e12c", size = 9040467, upload-time = "2025-07-18T08:01:46.671Z" }, + { url = "https://files.pythonhosted.org/packages/6f/80/abe18fe471af9f1d181904203d62697998b27d9b62124cd281d740ded2f9/scikit_learn-1.7.1-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1f812729e38c8cb37f760dce71a9b83ccfb04f59b3dca7c6079dcdc60544fa9e", size = 9532052, upload-time = "2025-07-18T08:01:48.676Z" }, + { url = "https://files.pythonhosted.org/packages/14/82/b21aa1e0c4cee7e74864d3a5a721ab8fcae5ca55033cb6263dca297ed35b/scikit_learn-1.7.1-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:88e1a20131cf741b84b89567e1717f27a2ced228e0f29103426102bc2e3b8ef7", size = 9361575, upload-time = "2025-07-18T08:01:50.639Z" }, + { url = "https://files.pythonhosted.org/packages/f2/20/f4777fcd5627dc6695fa6b92179d0edb7a3ac1b91bcd9a1c7f64fa7ade23/scikit_learn-1.7.1-cp313-cp313t-win_amd64.whl", hash = "sha256:b1bd1d919210b6a10b7554b717c9000b5485aa95a1d0f177ae0d7ee8ec750da5", size = 9277310, upload-time = "2025-07-18T08:01:52.547Z" }, ] [[package]] name = "scipy" -version = "1.16.0" +version = "1.16.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "numpy" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/81/18/b06a83f0c5ee8cddbde5e3f3d0bb9b702abfa5136ef6d4620ff67df7eee5/scipy-1.16.0.tar.gz", hash = "sha256:b5ef54021e832869c8cfb03bc3bf20366cbcd426e02a58e8a58d7584dfbb8f62", size = 30581216, upload-time = "2025-06-22T16:27:55.782Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/01/c0/c943bc8d2bbd28123ad0f4f1eef62525fa1723e84d136b32965dcb6bad3a/scipy-1.16.0-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:7eb6bd33cef4afb9fa5f1fb25df8feeb1e52d94f21a44f1d17805b41b1da3180", size = 36459071, upload-time = "2025-06-22T16:19:06.605Z" }, - { url = "https://files.pythonhosted.org/packages/99/0d/270e2e9f1a4db6ffbf84c9a0b648499842046e4e0d9b2275d150711b3aba/scipy-1.16.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:1dbc8fdba23e4d80394ddfab7a56808e3e6489176d559c6c71935b11a2d59db1", size = 28490500, upload-time = "2025-06-22T16:19:11.775Z" }, - { url = "https://files.pythonhosted.org/packages/1c/22/01d7ddb07cff937d4326198ec8d10831367a708c3da72dfd9b7ceaf13028/scipy-1.16.0-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:7dcf42c380e1e3737b343dec21095c9a9ad3f9cbe06f9c05830b44b1786c9e90", size = 20762345, upload-time = "2025-06-22T16:19:15.813Z" }, - { url = "https://files.pythonhosted.org/packages/34/7f/87fd69856569ccdd2a5873fe5d7b5bbf2ad9289d7311d6a3605ebde3a94b/scipy-1.16.0-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:26ec28675f4a9d41587266084c626b02899db373717d9312fa96ab17ca1ae94d", size = 23418563, upload-time = "2025-06-22T16:19:20.746Z" }, - { url = "https://files.pythonhosted.org/packages/f6/f1/e4f4324fef7f54160ab749efbab6a4bf43678a9eb2e9817ed71a0a2fd8de/scipy-1.16.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:952358b7e58bd3197cfbd2f2f2ba829f258404bdf5db59514b515a8fe7a36c52", size = 33203951, upload-time = "2025-06-22T16:19:25.813Z" }, - { url = "https://files.pythonhosted.org/packages/6d/f0/b6ac354a956384fd8abee2debbb624648125b298f2c4a7b4f0d6248048a5/scipy-1.16.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:03931b4e870c6fef5b5c0970d52c9f6ddd8c8d3e934a98f09308377eba6f3824", size = 35070225, upload-time = "2025-06-22T16:19:31.416Z" }, - { url = "https://files.pythonhosted.org/packages/e5/73/5cbe4a3fd4bc3e2d67ffad02c88b83edc88f381b73ab982f48f3df1a7790/scipy-1.16.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:512c4f4f85912767c351a0306824ccca6fd91307a9f4318efe8fdbd9d30562ef", size = 35389070, upload-time = "2025-06-22T16:19:37.387Z" }, - { url = "https://files.pythonhosted.org/packages/86/e8/a60da80ab9ed68b31ea5a9c6dfd3c2f199347429f229bf7f939a90d96383/scipy-1.16.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e69f798847e9add03d512eaf5081a9a5c9a98757d12e52e6186ed9681247a1ac", size = 37825287, upload-time = "2025-06-22T16:19:43.375Z" }, - { url = "https://files.pythonhosted.org/packages/ea/b5/29fece1a74c6a94247f8a6fb93f5b28b533338e9c34fdcc9cfe7a939a767/scipy-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:adf9b1999323ba335adc5d1dc7add4781cb5a4b0ef1e98b79768c05c796c4e49", size = 38431929, upload-time = "2025-06-22T16:19:49.385Z" }, - { url = "https://files.pythonhosted.org/packages/46/95/0746417bc24be0c2a7b7563946d61f670a3b491b76adede420e9d173841f/scipy-1.16.0-cp313-cp313-macosx_10_14_x86_64.whl", hash = "sha256:e9f414cbe9ca289a73e0cc92e33a6a791469b6619c240aa32ee18abdce8ab451", size = 36418162, upload-time = "2025-06-22T16:19:56.3Z" }, - { url = "https://files.pythonhosted.org/packages/19/5a/914355a74481b8e4bbccf67259bbde171348a3f160b67b4945fbc5f5c1e5/scipy-1.16.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:bbba55fb97ba3cdef9b1ee973f06b09d518c0c7c66a009c729c7d1592be1935e", size = 28465985, upload-time = "2025-06-22T16:20:01.238Z" }, - { url = "https://files.pythonhosted.org/packages/58/46/63477fc1246063855969cbefdcee8c648ba4b17f67370bd542ba56368d0b/scipy-1.16.0-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:58e0d4354eacb6004e7aa1cd350e5514bd0270acaa8d5b36c0627bb3bb486974", size = 20737961, upload-time = "2025-06-22T16:20:05.913Z" }, - { url = "https://files.pythonhosted.org/packages/93/86/0fbb5588b73555e40f9d3d6dde24ee6fac7d8e301a27f6f0cab9d8f66ff2/scipy-1.16.0-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:75b2094ec975c80efc273567436e16bb794660509c12c6a31eb5c195cbf4b6dc", size = 23377941, upload-time = "2025-06-22T16:20:10.668Z" }, - { url = "https://files.pythonhosted.org/packages/ca/80/a561f2bf4c2da89fa631b3cbf31d120e21ea95db71fd9ec00cb0247c7a93/scipy-1.16.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:6b65d232157a380fdd11a560e7e21cde34fdb69d65c09cb87f6cc024ee376351", size = 33196703, upload-time = "2025-06-22T16:20:16.097Z" }, - { url = "https://files.pythonhosted.org/packages/11/6b/3443abcd0707d52e48eb315e33cc669a95e29fc102229919646f5a501171/scipy-1.16.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1d8747f7736accd39289943f7fe53a8333be7f15a82eea08e4afe47d79568c32", size = 35083410, upload-time = "2025-06-22T16:20:21.734Z" }, - { url = "https://files.pythonhosted.org/packages/20/ab/eb0fc00e1e48961f1bd69b7ad7e7266896fe5bad4ead91b5fc6b3561bba4/scipy-1.16.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:eb9f147a1b8529bb7fec2a85cf4cf42bdfadf9e83535c309a11fdae598c88e8b", size = 35387829, upload-time = "2025-06-22T16:20:27.548Z" }, - { url = "https://files.pythonhosted.org/packages/57/9e/d6fc64e41fad5d481c029ee5a49eefc17f0b8071d636a02ceee44d4a0de2/scipy-1.16.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d2b83c37edbfa837a8923d19c749c1935ad3d41cf196006a24ed44dba2ec4358", size = 37841356, upload-time = "2025-06-22T16:20:35.112Z" }, - { url = "https://files.pythonhosted.org/packages/7c/a7/4c94bbe91f12126b8bf6709b2471900577b7373a4fd1f431f28ba6f81115/scipy-1.16.0-cp313-cp313-win_amd64.whl", hash = "sha256:79a3c13d43c95aa80b87328a46031cf52508cf5f4df2767602c984ed1d3c6bbe", size = 38403710, upload-time = "2025-06-22T16:21:54.473Z" }, - { url = "https://files.pythonhosted.org/packages/47/20/965da8497f6226e8fa90ad3447b82ed0e28d942532e92dd8b91b43f100d4/scipy-1.16.0-cp313-cp313t-macosx_10_14_x86_64.whl", hash = "sha256:f91b87e1689f0370690e8470916fe1b2308e5b2061317ff76977c8f836452a47", size = 36813833, upload-time = "2025-06-22T16:20:43.925Z" }, - { url = "https://files.pythonhosted.org/packages/28/f4/197580c3dac2d234e948806e164601c2df6f0078ed9f5ad4a62685b7c331/scipy-1.16.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:88a6ca658fb94640079e7a50b2ad3b67e33ef0f40e70bdb7dc22017dae73ac08", size = 28974431, upload-time = "2025-06-22T16:20:51.302Z" }, - { url = "https://files.pythonhosted.org/packages/8a/fc/e18b8550048d9224426e76906694c60028dbdb65d28b1372b5503914b89d/scipy-1.16.0-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:ae902626972f1bd7e4e86f58fd72322d7f4ec7b0cfc17b15d4b7006efc385176", size = 21246454, upload-time = "2025-06-22T16:20:57.276Z" }, - { url = "https://files.pythonhosted.org/packages/8c/48/07b97d167e0d6a324bfd7484cd0c209cc27338b67e5deadae578cf48e809/scipy-1.16.0-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:8cb824c1fc75ef29893bc32b3ddd7b11cf9ab13c1127fe26413a05953b8c32ed", size = 23772979, upload-time = "2025-06-22T16:21:03.363Z" }, - { url = "https://files.pythonhosted.org/packages/4c/4f/9efbd3f70baf9582edf271db3002b7882c875ddd37dc97f0f675ad68679f/scipy-1.16.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:de2db7250ff6514366a9709c2cba35cb6d08498e961cba20d7cff98a7ee88938", size = 33341972, upload-time = "2025-06-22T16:21:11.14Z" }, - { url = "https://files.pythonhosted.org/packages/3f/dc/9e496a3c5dbe24e76ee24525155ab7f659c20180bab058ef2c5fa7d9119c/scipy-1.16.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e85800274edf4db8dd2e4e93034f92d1b05c9421220e7ded9988b16976f849c1", size = 35185476, upload-time = "2025-06-22T16:21:19.156Z" }, - { url = "https://files.pythonhosted.org/packages/ce/b3/21001cff985a122ba434c33f2c9d7d1dc3b669827e94f4fc4e1fe8b9dfd8/scipy-1.16.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4f720300a3024c237ace1cb11f9a84c38beb19616ba7c4cdcd771047a10a1706", size = 35570990, upload-time = "2025-06-22T16:21:27.797Z" }, - { url = "https://files.pythonhosted.org/packages/e5/d3/7ba42647d6709251cdf97043d0c107e0317e152fa2f76873b656b509ff55/scipy-1.16.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:aad603e9339ddb676409b104c48a027e9916ce0d2838830691f39552b38a352e", size = 37950262, upload-time = "2025-06-22T16:21:36.976Z" }, - { url = "https://files.pythonhosted.org/packages/eb/c4/231cac7a8385394ebbbb4f1ca662203e9d8c332825ab4f36ffc3ead09a42/scipy-1.16.0-cp313-cp313t-win_amd64.whl", hash = "sha256:f56296fefca67ba605fd74d12f7bd23636267731a72cb3947963e76b8c0a25db", size = 38515076, upload-time = "2025-06-22T16:21:45.694Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/f5/4a/b927028464795439faec8eaf0b03b011005c487bb2d07409f28bf30879c4/scipy-1.16.1.tar.gz", hash = "sha256:44c76f9e8b6e8e488a586190ab38016e4ed2f8a038af7cd3defa903c0a2238b3", size = 30580861, upload-time = "2025-07-27T16:33:30.834Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f8/d9/ec4864f5896232133f51382b54a08de91a9d1af7a76dfa372894026dfee2/scipy-1.16.1-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:81b433bbeaf35728dad619afc002db9b189e45eebe2cd676effe1fb93fef2b9c", size = 36575194, upload-time = "2025-07-27T16:27:41.321Z" }, + { url = "https://files.pythonhosted.org/packages/5c/6d/40e81ecfb688e9d25d34a847dca361982a6addf8e31f0957b1a54fbfa994/scipy-1.16.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:886cc81fdb4c6903a3bb0464047c25a6d1016fef77bb97949817d0c0d79f9e04", size = 28594590, upload-time = "2025-07-27T16:27:49.204Z" }, + { url = "https://files.pythonhosted.org/packages/0e/37/9f65178edfcc629377ce9a64fc09baebea18c80a9e57ae09a52edf84880b/scipy-1.16.1-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:15240c3aac087a522b4eaedb09f0ad061753c5eebf1ea430859e5bf8640d5919", size = 20866458, upload-time = "2025-07-27T16:27:54.98Z" }, + { url = "https://files.pythonhosted.org/packages/2c/7b/749a66766871ea4cb1d1ea10f27004db63023074c22abed51f22f09770e0/scipy-1.16.1-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:65f81a25805f3659b48126b5053d9e823d3215e4a63730b5e1671852a1705921", size = 23539318, upload-time = "2025-07-27T16:28:01.604Z" }, + { url = "https://files.pythonhosted.org/packages/c4/db/8d4afec60eb833a666434d4541a3151eedbf2494ea6d4d468cbe877f00cd/scipy-1.16.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:6c62eea7f607f122069b9bad3f99489ddca1a5173bef8a0c75555d7488b6f725", size = 33292899, upload-time = "2025-07-27T16:28:09.147Z" }, + { url = "https://files.pythonhosted.org/packages/51/1e/79023ca3bbb13a015d7d2757ecca3b81293c663694c35d6541b4dca53e98/scipy-1.16.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f965bbf3235b01c776115ab18f092a95aa74c271a52577bcb0563e85738fd618", size = 35162637, upload-time = "2025-07-27T16:28:17.535Z" }, + { url = "https://files.pythonhosted.org/packages/b6/49/0648665f9c29fdaca4c679182eb972935b3b4f5ace41d323c32352f29816/scipy-1.16.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f006e323874ffd0b0b816d8c6a8e7f9a73d55ab3b8c3f72b752b226d0e3ac83d", size = 35490507, upload-time = "2025-07-27T16:28:25.705Z" }, + { url = "https://files.pythonhosted.org/packages/62/8f/66cbb9d6bbb18d8c658f774904f42a92078707a7c71e5347e8bf2f52bb89/scipy-1.16.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e8fd15fc5085ab4cca74cb91fe0a4263b1f32e4420761ddae531ad60934c2119", size = 37923998, upload-time = "2025-07-27T16:28:34.339Z" }, + { url = "https://files.pythonhosted.org/packages/14/c3/61f273ae550fbf1667675701112e380881905e28448c080b23b5a181df7c/scipy-1.16.1-cp312-cp312-win_amd64.whl", hash = "sha256:f7b8013c6c066609577d910d1a2a077021727af07b6fab0ee22c2f901f22352a", size = 38508060, upload-time = "2025-07-27T16:28:43.242Z" }, + { url = "https://files.pythonhosted.org/packages/93/0b/b5c99382b839854a71ca9482c684e3472badc62620287cbbdab499b75ce6/scipy-1.16.1-cp313-cp313-macosx_10_14_x86_64.whl", hash = "sha256:5451606823a5e73dfa621a89948096c6528e2896e40b39248295d3a0138d594f", size = 36533717, upload-time = "2025-07-27T16:28:51.706Z" }, + { url = "https://files.pythonhosted.org/packages/eb/e5/69ab2771062c91e23e07c12e7d5033a6b9b80b0903ee709c3c36b3eb520c/scipy-1.16.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:89728678c5ca5abd610aee148c199ac1afb16e19844401ca97d43dc548a354eb", size = 28570009, upload-time = "2025-07-27T16:28:57.017Z" }, + { url = "https://files.pythonhosted.org/packages/f4/69/bd75dbfdd3cf524f4d753484d723594aed62cfaac510123e91a6686d520b/scipy-1.16.1-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:e756d688cb03fd07de0fffad475649b03cb89bee696c98ce508b17c11a03f95c", size = 20841942, upload-time = "2025-07-27T16:29:01.152Z" }, + { url = "https://files.pythonhosted.org/packages/ea/74/add181c87663f178ba7d6144b370243a87af8476664d5435e57d599e6874/scipy-1.16.1-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:5aa2687b9935da3ed89c5dbed5234576589dd28d0bf7cd237501ccfbdf1ad608", size = 23498507, upload-time = "2025-07-27T16:29:05.202Z" }, + { url = "https://files.pythonhosted.org/packages/1d/74/ece2e582a0d9550cee33e2e416cc96737dce423a994d12bbe59716f47ff1/scipy-1.16.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0851f6a1e537fe9399f35986897e395a1aa61c574b178c0d456be5b1a0f5ca1f", size = 33286040, upload-time = "2025-07-27T16:29:10.201Z" }, + { url = "https://files.pythonhosted.org/packages/e4/82/08e4076df538fb56caa1d489588d880ec7c52d8273a606bb54d660528f7c/scipy-1.16.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fedc2cbd1baed37474b1924c331b97bdff611d762c196fac1a9b71e67b813b1b", size = 35176096, upload-time = "2025-07-27T16:29:17.091Z" }, + { url = "https://files.pythonhosted.org/packages/fa/79/cd710aab8c921375711a8321c6be696e705a120e3011a643efbbcdeeabcc/scipy-1.16.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:2ef500e72f9623a6735769e4b93e9dcb158d40752cdbb077f305487e3e2d1f45", size = 35490328, upload-time = "2025-07-27T16:29:22.928Z" }, + { url = "https://files.pythonhosted.org/packages/71/73/e9cc3d35ee4526d784520d4494a3e1ca969b071fb5ae5910c036a375ceec/scipy-1.16.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:978d8311674b05a8f7ff2ea6c6bce5d8b45a0cb09d4c5793e0318f448613ea65", size = 37939921, upload-time = "2025-07-27T16:29:29.108Z" }, + { url = "https://files.pythonhosted.org/packages/21/12/c0efd2941f01940119b5305c375ae5c0fcb7ec193f806bd8f158b73a1782/scipy-1.16.1-cp313-cp313-win_amd64.whl", hash = "sha256:81929ed0fa7a5713fcdd8b2e6f73697d3b4c4816d090dd34ff937c20fa90e8ab", size = 38479462, upload-time = "2025-07-27T16:30:24.078Z" }, + { url = "https://files.pythonhosted.org/packages/7a/19/c3d08b675260046a991040e1ea5d65f91f40c7df1045fffff412dcfc6765/scipy-1.16.1-cp313-cp313t-macosx_10_14_x86_64.whl", hash = "sha256:bcc12db731858abda693cecdb3bdc9e6d4bd200213f49d224fe22df82687bdd6", size = 36938832, upload-time = "2025-07-27T16:29:35.057Z" }, + { url = "https://files.pythonhosted.org/packages/81/f2/ce53db652c033a414a5b34598dba6b95f3d38153a2417c5a3883da429029/scipy-1.16.1-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:744d977daa4becb9fc59135e75c069f8d301a87d64f88f1e602a9ecf51e77b27", size = 29093084, upload-time = "2025-07-27T16:29:40.201Z" }, + { url = "https://files.pythonhosted.org/packages/a9/ae/7a10ff04a7dc15f9057d05b33737ade244e4bd195caa3f7cc04d77b9e214/scipy-1.16.1-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:dc54f76ac18073bcecffb98d93f03ed6b81a92ef91b5d3b135dcc81d55a724c7", size = 21365098, upload-time = "2025-07-27T16:29:44.295Z" }, + { url = "https://files.pythonhosted.org/packages/36/ac/029ff710959932ad3c2a98721b20b405f05f752f07344622fd61a47c5197/scipy-1.16.1-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:367d567ee9fc1e9e2047d31f39d9d6a7a04e0710c86e701e053f237d14a9b4f6", size = 23896858, upload-time = "2025-07-27T16:29:48.784Z" }, + { url = "https://files.pythonhosted.org/packages/71/13/d1ef77b6bd7898720e1f0b6b3743cb945f6c3cafa7718eaac8841035ab60/scipy-1.16.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:4cf5785e44e19dcd32a0e4807555e1e9a9b8d475c6afff3d21c3c543a6aa84f4", size = 33438311, upload-time = "2025-07-27T16:29:54.164Z" }, + { url = "https://files.pythonhosted.org/packages/2d/e0/e64a6821ffbb00b4c5b05169f1c1fddb4800e9307efe3db3788995a82a2c/scipy-1.16.1-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3d0b80fb26d3e13a794c71d4b837e2a589d839fd574a6bbb4ee1288c213ad4a3", size = 35279542, upload-time = "2025-07-27T16:30:00.249Z" }, + { url = "https://files.pythonhosted.org/packages/57/59/0dc3c8b43e118f1e4ee2b798dcc96ac21bb20014e5f1f7a8e85cc0653bdb/scipy-1.16.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:8503517c44c18d1030d666cb70aaac1cc8913608816e06742498833b128488b7", size = 35667665, upload-time = "2025-07-27T16:30:05.916Z" }, + { url = "https://files.pythonhosted.org/packages/45/5f/844ee26e34e2f3f9f8febb9343748e72daeaec64fe0c70e9bf1ff84ec955/scipy-1.16.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:30cc4bb81c41831ecfd6dc450baf48ffd80ef5aed0f5cf3ea775740e80f16ecc", size = 38045210, upload-time = "2025-07-27T16:30:11.655Z" }, + { url = "https://files.pythonhosted.org/packages/8d/d7/210f2b45290f444f1de64bc7353aa598ece9f0e90c384b4a156f9b1a5063/scipy-1.16.1-cp313-cp313t-win_amd64.whl", hash = "sha256:c24fa02f7ed23ae514460a22c57eca8f530dbfa50b1cfdbf4f37c05b5309cc39", size = 38593661, upload-time = "2025-07-27T16:30:17.825Z" }, + { url = "https://files.pythonhosted.org/packages/81/ea/84d481a5237ed223bd3d32d6e82d7a6a96e34756492666c260cef16011d1/scipy-1.16.1-cp314-cp314-macosx_10_14_x86_64.whl", hash = "sha256:796a5a9ad36fa3a782375db8f4241ab02a091308eb079746bc0f874c9b998318", size = 36525921, upload-time = "2025-07-27T16:30:30.081Z" }, + { url = "https://files.pythonhosted.org/packages/4e/9f/d9edbdeff9f3a664807ae3aea383e10afaa247e8e6255e6d2aa4515e8863/scipy-1.16.1-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:3ea0733a2ff73fd6fdc5fecca54ee9b459f4d74f00b99aced7d9a3adb43fb1cc", size = 28564152, upload-time = "2025-07-27T16:30:35.336Z" }, + { url = "https://files.pythonhosted.org/packages/3b/95/8125bcb1fe04bc267d103e76516243e8d5e11229e6b306bda1024a5423d1/scipy-1.16.1-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:85764fb15a2ad994e708258bb4ed8290d1305c62a4e1ef07c414356a24fcfbf8", size = 20836028, upload-time = "2025-07-27T16:30:39.421Z" }, + { url = "https://files.pythonhosted.org/packages/77/9c/bf92e215701fc70bbcd3d14d86337cf56a9b912a804b9c776a269524a9e9/scipy-1.16.1-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:ca66d980469cb623b1759bdd6e9fd97d4e33a9fad5b33771ced24d0cb24df67e", size = 23489666, upload-time = "2025-07-27T16:30:43.663Z" }, + { url = "https://files.pythonhosted.org/packages/5e/00/5e941d397d9adac41b02839011594620d54d99488d1be5be755c00cde9ee/scipy-1.16.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e7cc1ffcc230f568549fc56670bcf3df1884c30bd652c5da8138199c8c76dae0", size = 33358318, upload-time = "2025-07-27T16:30:48.982Z" }, + { url = "https://files.pythonhosted.org/packages/0e/87/8db3aa10dde6e3e8e7eb0133f24baa011377d543f5b19c71469cf2648026/scipy-1.16.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3ddfb1e8d0b540cb4ee9c53fc3dea3186f97711248fb94b4142a1b27178d8b4b", size = 35185724, upload-time = "2025-07-27T16:30:54.26Z" }, + { url = "https://files.pythonhosted.org/packages/89/b4/6ab9ae443216807622bcff02690262d8184078ea467efee2f8c93288a3b1/scipy-1.16.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:4dc0e7be79e95d8ba3435d193e0d8ce372f47f774cffd882f88ea4e1e1ddc731", size = 35554335, upload-time = "2025-07-27T16:30:59.765Z" }, + { url = "https://files.pythonhosted.org/packages/9c/9a/d0e9dc03c5269a1afb60661118296a32ed5d2c24298af61b676c11e05e56/scipy-1.16.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:f23634f9e5adb51b2a77766dac217063e764337fbc816aa8ad9aaebcd4397fd3", size = 37960310, upload-time = "2025-07-27T16:31:06.151Z" }, + { url = "https://files.pythonhosted.org/packages/5e/00/c8f3130a50521a7977874817ca89e0599b1b4ee8e938bad8ae798a0e1f0d/scipy-1.16.1-cp314-cp314-win_amd64.whl", hash = "sha256:57d75524cb1c5a374958a2eae3d84e1929bb971204cc9d52213fb8589183fc19", size = 39319239, upload-time = "2025-07-27T16:31:59.942Z" }, + { url = "https://files.pythonhosted.org/packages/f2/f2/1ca3eda54c3a7e4c92f6acef7db7b3a057deb135540d23aa6343ef8ad333/scipy-1.16.1-cp314-cp314t-macosx_10_14_x86_64.whl", hash = "sha256:d8da7c3dd67bcd93f15618938f43ed0995982eb38973023d46d4646c4283ad65", size = 36939460, upload-time = "2025-07-27T16:31:11.865Z" }, + { url = "https://files.pythonhosted.org/packages/80/30/98c2840b293a132400c0940bb9e140171dcb8189588619048f42b2ce7b4f/scipy-1.16.1-cp314-cp314t-macosx_12_0_arm64.whl", hash = "sha256:cc1d2f2fd48ba1e0620554fe5bc44d3e8f5d4185c8c109c7fbdf5af2792cfad2", size = 29093322, upload-time = "2025-07-27T16:31:17.045Z" }, + { url = "https://files.pythonhosted.org/packages/c1/e6/1e6e006e850622cf2a039b62d1a6ddc4497d4851e58b68008526f04a9a00/scipy-1.16.1-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:21a611ced9275cb861bacadbada0b8c0623bc00b05b09eb97f23b370fc2ae56d", size = 21365329, upload-time = "2025-07-27T16:31:21.188Z" }, + { url = "https://files.pythonhosted.org/packages/8e/02/72a5aa5b820589dda9a25e329ca752842bfbbaf635e36bc7065a9b42216e/scipy-1.16.1-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:8dfbb25dffc4c3dd9371d8ab456ca81beeaf6f9e1c2119f179392f0dc1ab7695", size = 23897544, upload-time = "2025-07-27T16:31:25.408Z" }, + { url = "https://files.pythonhosted.org/packages/2b/dc/7122d806a6f9eb8a33532982234bed91f90272e990f414f2830cfe656e0b/scipy-1.16.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:f0ebb7204f063fad87fc0a0e4ff4a2ff40b2a226e4ba1b7e34bf4b79bf97cd86", size = 33442112, upload-time = "2025-07-27T16:31:30.62Z" }, + { url = "https://files.pythonhosted.org/packages/24/39/e383af23564daa1021a5b3afbe0d8d6a68ec639b943661841f44ac92de85/scipy-1.16.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f1b9e5962656f2734c2b285a8745358ecb4e4efbadd00208c80a389227ec61ff", size = 35286594, upload-time = "2025-07-27T16:31:36.112Z" }, + { url = "https://files.pythonhosted.org/packages/95/47/1a0b0aff40c3056d955f38b0df5d178350c3d74734ec54f9c68d23910be5/scipy-1.16.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5e1a106f8c023d57a2a903e771228bf5c5b27b5d692088f457acacd3b54511e4", size = 35665080, upload-time = "2025-07-27T16:31:42.025Z" }, + { url = "https://files.pythonhosted.org/packages/64/df/ce88803e9ed6e27fe9b9abefa157cf2c80e4fa527cf17ee14be41f790ad4/scipy-1.16.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:709559a1db68a9abc3b2c8672c4badf1614f3b440b3ab326d86a5c0491eafae3", size = 38050306, upload-time = "2025-07-27T16:31:48.109Z" }, + { url = "https://files.pythonhosted.org/packages/6e/6c/a76329897a7cae4937d403e623aa6aaea616a0bb5b36588f0b9d1c9a3739/scipy-1.16.1-cp314-cp314t-win_amd64.whl", hash = "sha256:c0c804d60492a0aad7f5b2bb1862f4548b990049e27e828391ff2bf6f7199998", size = 39427705, upload-time = "2025-07-27T16:31:53.96Z" }, ] [[package]] name = "sentencepiece" -version = "0.2.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/c9/d2/b9c7ca067c26d8ff085d252c89b5f69609ca93fb85a00ede95f4857865d4/sentencepiece-0.2.0.tar.gz", hash = "sha256:a52c19171daaf2e697dc6cbe67684e0fa341b1248966f6aebb541de654d15843", size = 2632106, upload-time = "2024-02-19T17:06:47.428Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/27/5a/141b227ed54293360a9ffbb7bf8252b4e5efc0400cdeac5809340e5d2b21/sentencepiece-0.2.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ea5f536e32ea8ec96086ee00d7a4a131ce583a1b18d130711707c10e69601cb2", size = 2409370, upload-time = "2024-02-19T17:06:29.315Z" }, - { url = "https://files.pythonhosted.org/packages/2e/08/a4c135ad6fc2ce26798d14ab72790d66e813efc9589fd30a5316a88ca8d5/sentencepiece-0.2.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d0cb51f53b6aae3c36bafe41e86167c71af8370a039f542c43b0cce5ef24a68c", size = 1239288, upload-time = "2024-02-19T17:06:31.674Z" }, - { url = "https://files.pythonhosted.org/packages/49/0a/2fe387f825ac5aad5a0bfe221904882106cac58e1b693ba7818785a882b6/sentencepiece-0.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3212121805afc58d8b00ab4e7dd1f8f76c203ddb9dc94aa4079618a31cf5da0f", size = 1181597, upload-time = "2024-02-19T17:06:33.763Z" }, - { url = "https://files.pythonhosted.org/packages/cc/38/e4698ee2293fe4835dc033c49796a39b3eebd8752098f6bd0aa53a14af1f/sentencepiece-0.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a3149e3066c2a75e0d68a43eb632d7ae728c7925b517f4c05c40f6f7280ce08", size = 1259220, upload-time = "2024-02-19T17:06:35.85Z" }, - { url = "https://files.pythonhosted.org/packages/12/24/fd7ef967c9dad2f6e6e5386d0cadaf65cda8b7be6e3861a9ab3121035139/sentencepiece-0.2.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:632f3594d3e7ac8b367bca204cb3fd05a01d5b21455acd097ea4c0e30e2f63d7", size = 1355962, upload-time = "2024-02-19T17:06:38.616Z" }, - { url = "https://files.pythonhosted.org/packages/4f/d2/18246f43ca730bb81918f87b7e886531eda32d835811ad9f4657c54eee35/sentencepiece-0.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f295105c6bdbb05bd5e1b0cafbd78ff95036f5d3641e7949455a3f4e5e7c3109", size = 1301706, upload-time = "2024-02-19T17:06:40.712Z" }, - { url = "https://files.pythonhosted.org/packages/8a/47/ca237b562f420044ab56ddb4c278672f7e8c866e183730a20e413b38a989/sentencepiece-0.2.0-cp312-cp312-win32.whl", hash = "sha256:fb89f811e5efd18bab141afc3fea3de141c3f69f3fe9e898f710ae7fe3aab251", size = 936941, upload-time = "2024-02-19T17:06:42.802Z" }, - { url = "https://files.pythonhosted.org/packages/c6/97/d159c32642306ee2b70732077632895438867b3b6df282354bd550cf2a67/sentencepiece-0.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:7a673a72aab81fef5ebe755c6e0cc60087d1f3a4700835d40537183c1703a45f", size = 991994, upload-time = "2024-02-19T17:06:45.01Z" }, +version = "0.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/15/15/2e7a025fc62d764b151ae6d0f2a92f8081755ebe8d4a64099accc6f77ba6/sentencepiece-0.2.1.tar.gz", hash = "sha256:8138cec27c2f2282f4a34d9a016e3374cd40e5c6e9cb335063db66a0a3b71fad", size = 3228515, upload-time = "2025-08-12T07:00:51.718Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4a/be/32ce495aa1d0e0c323dcb1ba87096037358edee539cac5baf8755a6bd396/sentencepiece-0.2.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:57cae326c8727de58c85977b175af132a7138d84c764635d7e71bbee7e774133", size = 1943152, upload-time = "2025-08-12T06:59:40.048Z" }, + { url = "https://files.pythonhosted.org/packages/88/7e/ff23008899a58678e98c6ff592bf4d368eee5a71af96d0df6b38a039dd4f/sentencepiece-0.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:56dd39a3c4d6493db3cdca7e8cc68c6b633f0d4195495cbadfcf5af8a22d05a6", size = 1325651, upload-time = "2025-08-12T06:59:41.536Z" }, + { url = "https://files.pythonhosted.org/packages/19/84/42eb3ce4796777a1b5d3699dfd4dca85113e68b637f194a6c8d786f16a04/sentencepiece-0.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d9381351182ff9888cc80e41c632e7e274b106f450de33d67a9e8f6043da6f76", size = 1253645, upload-time = "2025-08-12T06:59:42.903Z" }, + { url = "https://files.pythonhosted.org/packages/89/fa/d3d5ebcba3cb9e6d3775a096251860c41a6bc53a1b9461151df83fe93255/sentencepiece-0.2.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:99f955df238021bf11f0fc37cdb54fd5e5b5f7fd30ecc3d93fb48b6815437167", size = 1316273, upload-time = "2025-08-12T06:59:44.476Z" }, + { url = "https://files.pythonhosted.org/packages/04/88/14f2f4a2b922d8b39be45bf63d79e6cd3a9b2f248b2fcb98a69b12af12f5/sentencepiece-0.2.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0cdfecef430d985f1c2bcbfff3defd1d95dae876fbd0173376012d2d7d24044b", size = 1387881, upload-time = "2025-08-12T06:59:46.09Z" }, + { url = "https://files.pythonhosted.org/packages/fd/b8/903e5ccb77b4ef140605d5d71b4f9e0ad95d456d6184688073ed11712809/sentencepiece-0.2.1-cp312-cp312-win32.whl", hash = "sha256:a483fd29a34c3e34c39ac5556b0a90942bec253d260235729e50976f5dba1068", size = 999540, upload-time = "2025-08-12T06:59:48.023Z" }, + { url = "https://files.pythonhosted.org/packages/2d/81/92df5673c067148c2545b1bfe49adfd775bcc3a169a047f5a0e6575ddaca/sentencepiece-0.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:4cdc7c36234fda305e85c32949c5211faaf8dd886096c7cea289ddc12a2d02de", size = 1054671, upload-time = "2025-08-12T06:59:49.895Z" }, + { url = "https://files.pythonhosted.org/packages/fe/02/c5e3bc518655d714622bec87d83db9cdba1cd0619a4a04e2109751c4f47f/sentencepiece-0.2.1-cp312-cp312-win_arm64.whl", hash = "sha256:daeb5e9e9fcad012324807856113708614d534f596d5008638eb9b40112cd9e4", size = 1033923, upload-time = "2025-08-12T06:59:51.952Z" }, + { url = "https://files.pythonhosted.org/packages/ba/4a/85fbe1706d4d04a7e826b53f327c4b80f849cf1c7b7c5e31a20a97d8f28b/sentencepiece-0.2.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dcd8161eee7b41aae57ded06272905dbd680a0a04b91edd0f64790c796b2f706", size = 1943150, upload-time = "2025-08-12T06:59:53.588Z" }, + { url = "https://files.pythonhosted.org/packages/c2/83/4cfb393e287509fc2155480b9d184706ef8d9fa8cbf5505d02a5792bf220/sentencepiece-0.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:c6c8f42949f419ff8c7e9960dbadcfbc982d7b5efc2f6748210d3dd53a7de062", size = 1325651, upload-time = "2025-08-12T06:59:55.073Z" }, + { url = "https://files.pythonhosted.org/packages/8d/de/5a007fb53b1ab0aafc69d11a5a3dd72a289d5a3e78dcf2c3a3d9b14ffe93/sentencepiece-0.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:097f3394e99456e9e4efba1737c3749d7e23563dd1588ce71a3d007f25475fff", size = 1253641, upload-time = "2025-08-12T06:59:56.562Z" }, + { url = "https://files.pythonhosted.org/packages/2c/d2/f552be5928105588f4f4d66ee37dd4c61460d8097e62d0e2e0eec41bc61d/sentencepiece-0.2.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d7b670879c370d350557edabadbad1f6561a9e6968126e6debca4029e5547820", size = 1316271, upload-time = "2025-08-12T06:59:58.109Z" }, + { url = "https://files.pythonhosted.org/packages/96/df/0cfe748ace5485be740fed9476dee7877f109da32ed0d280312c94ec259f/sentencepiece-0.2.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c7f0fd2f2693309e6628aeeb2e2faf6edd221134dfccac3308ca0de01f8dab47", size = 1387882, upload-time = "2025-08-12T07:00:00.701Z" }, + { url = "https://files.pythonhosted.org/packages/ac/dd/f7774d42a881ced8e1739f393ab1e82ece39fc9abd4779e28050c2e975b5/sentencepiece-0.2.1-cp313-cp313-win32.whl", hash = "sha256:92b3816aa2339355fda2c8c4e021a5de92180b00aaccaf5e2808972e77a4b22f", size = 999541, upload-time = "2025-08-12T07:00:02.709Z" }, + { url = "https://files.pythonhosted.org/packages/dd/e9/932b9eae6fd7019548321eee1ab8d5e3b3d1294df9d9a0c9ac517c7b636d/sentencepiece-0.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:10ed3dab2044c47f7a2e7b4969b0c430420cdd45735d78c8f853191fa0e3148b", size = 1054669, upload-time = "2025-08-12T07:00:04.915Z" }, + { url = "https://files.pythonhosted.org/packages/c9/3a/76488a00ea7d6931689cda28726a1447d66bf1a4837943489314593d5596/sentencepiece-0.2.1-cp313-cp313-win_arm64.whl", hash = "sha256:ac650534e2251083c5f75dde4ff28896ce7c8904133dc8fef42780f4d5588fcd", size = 1033922, upload-time = "2025-08-12T07:00:06.496Z" }, + { url = "https://files.pythonhosted.org/packages/4a/b6/08fe2ce819e02ccb0296f4843e3f195764ce9829cbda61b7513f29b95718/sentencepiece-0.2.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:8dd4b477a7b069648d19363aad0cab9bad2f4e83b2d179be668efa672500dc94", size = 1946052, upload-time = "2025-08-12T07:00:08.136Z" }, + { url = "https://files.pythonhosted.org/packages/ab/d9/1ea0e740591ff4c6fc2b6eb1d7510d02f3fb885093f19b2f3abd1363b402/sentencepiece-0.2.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0c0f672da370cc490e4c59d89e12289778310a0e71d176c541e4834759e1ae07", size = 1327408, upload-time = "2025-08-12T07:00:09.572Z" }, + { url = "https://files.pythonhosted.org/packages/99/7e/1fb26e8a21613f6200e1ab88824d5d203714162cf2883248b517deb500b7/sentencepiece-0.2.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:ad8493bea8432dae8d6830365352350f3b4144415a1d09c4c8cb8d30cf3b6c3c", size = 1254857, upload-time = "2025-08-12T07:00:11.021Z" }, + { url = "https://files.pythonhosted.org/packages/bc/85/c72fd1f3c7a6010544d6ae07f8ddb38b5e2a7e33bd4318f87266c0bbafbf/sentencepiece-0.2.1-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b81a24733726e3678d2db63619acc5a8dccd074f7aa7a54ecd5ca33ca6d2d596", size = 1315722, upload-time = "2025-08-12T07:00:12.989Z" }, + { url = "https://files.pythonhosted.org/packages/4a/e8/661e5bd82a8aa641fd6c1020bd0e890ef73230a2b7215ddf9c8cd8e941c2/sentencepiece-0.2.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0a81799d0a68d618e89063fb423c3001a034c893069135ffe51fee439ae474d6", size = 1387452, upload-time = "2025-08-12T07:00:15.088Z" }, + { url = "https://files.pythonhosted.org/packages/99/5e/ae66c361023a470afcbc1fbb8da722c72ea678a2fcd9a18f1a12598c7501/sentencepiece-0.2.1-cp313-cp313t-win32.whl", hash = "sha256:89a3ea015517c42c0341d0d962f3e6aaf2cf10d71b1932d475c44ba48d00aa2b", size = 1002501, upload-time = "2025-08-12T07:00:16.966Z" }, + { url = "https://files.pythonhosted.org/packages/c1/03/d332828c4ff764e16c1b56c2c8f9a33488bbe796b53fb6b9c4205ddbf167/sentencepiece-0.2.1-cp313-cp313t-win_amd64.whl", hash = "sha256:33f068c9382dc2e7c228eedfd8163b52baa86bb92f50d0488bf2b7da7032e484", size = 1057555, upload-time = "2025-08-12T07:00:18.573Z" }, + { url = "https://files.pythonhosted.org/packages/88/14/5aee0bf0864df9bd82bd59e7711362908e4935e3f9cdc1f57246b5d5c9b9/sentencepiece-0.2.1-cp313-cp313t-win_arm64.whl", hash = "sha256:b3616ad246f360e52c85781e47682d31abfb6554c779e42b65333d4b5f44ecc0", size = 1036042, upload-time = "2025-08-12T07:00:20.209Z" }, + { url = "https://files.pythonhosted.org/packages/24/9c/89eb8b2052f720a612478baf11c8227dcf1dc28cd4ea4c0c19506b5af2a2/sentencepiece-0.2.1-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:5d0350b686c320068702116276cfb26c066dc7e65cfef173980b11bb4d606719", size = 1943147, upload-time = "2025-08-12T07:00:21.809Z" }, + { url = "https://files.pythonhosted.org/packages/82/0b/a1432bc87f97c2ace36386ca23e8bd3b91fb40581b5e6148d24b24186419/sentencepiece-0.2.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:c7f54a31cde6fa5cb030370566f68152a742f433f8d2be458463d06c208aef33", size = 1325624, upload-time = "2025-08-12T07:00:23.289Z" }, + { url = "https://files.pythonhosted.org/packages/ea/99/bbe054ebb5a5039457c590e0a4156ed073fb0fe9ce4f7523404dd5b37463/sentencepiece-0.2.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c83b85ab2d6576607f31df77ff86f28182be4a8de6d175d2c33ca609925f5da1", size = 1253670, upload-time = "2025-08-12T07:00:24.69Z" }, + { url = "https://files.pythonhosted.org/packages/19/ad/d5c7075f701bd97971d7c2ac2904f227566f51ef0838dfbdfdccb58cd212/sentencepiece-0.2.1-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1855f57db07b51fb51ed6c9c452f570624d2b169b36f0f79ef71a6e6c618cd8b", size = 1316247, upload-time = "2025-08-12T07:00:26.435Z" }, + { url = "https://files.pythonhosted.org/packages/fb/03/35fbe5f3d9a7435eebd0b473e09584bd3cc354ce118b960445b060d33781/sentencepiece-0.2.1-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:01e6912125cb45d3792f530a4d38f8e21bf884d6b4d4ade1b2de5cf7a8d2a52b", size = 1387894, upload-time = "2025-08-12T07:00:28.339Z" }, + { url = "https://files.pythonhosted.org/packages/dc/aa/956ef729aafb6c8f9c443104c9636489093bb5c61d6b90fc27aa1a865574/sentencepiece-0.2.1-cp314-cp314-win32.whl", hash = "sha256:c415c9de1447e0a74ae3fdb2e52f967cb544113a3a5ce3a194df185cbc1f962f", size = 1096698, upload-time = "2025-08-12T07:00:29.764Z" }, + { url = "https://files.pythonhosted.org/packages/b8/cb/fe400d8836952cc535c81a0ce47dc6875160e5fedb71d2d9ff0e9894c2a6/sentencepiece-0.2.1-cp314-cp314-win_amd64.whl", hash = "sha256:881b2e44b14fc19feade3cbed314be37de639fc415375cefaa5bc81a4be137fd", size = 1155115, upload-time = "2025-08-12T07:00:32.865Z" }, + { url = "https://files.pythonhosted.org/packages/32/89/047921cf70f36c7b6b6390876b2399b3633ab73b8d0cb857e5a964238941/sentencepiece-0.2.1-cp314-cp314-win_arm64.whl", hash = "sha256:2005242a16d2dc3ac5fe18aa7667549134d37854823df4c4db244752453b78a8", size = 1133890, upload-time = "2025-08-12T07:00:34.763Z" }, + { url = "https://files.pythonhosted.org/packages/a1/11/5b414b9fae6255b5fb1e22e2ed3dc3a72d3a694e5703910e640ac78346bb/sentencepiece-0.2.1-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:a19adcec27c524cb7069a1c741060add95f942d1cbf7ad0d104dffa0a7d28a2b", size = 1946081, upload-time = "2025-08-12T07:00:36.97Z" }, + { url = "https://files.pythonhosted.org/packages/77/eb/7a5682bb25824db8545f8e5662e7f3e32d72a508fdce086029d89695106b/sentencepiece-0.2.1-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:e37e4b4c4a11662b5db521def4e44d4d30ae69a1743241412a93ae40fdcab4bb", size = 1327406, upload-time = "2025-08-12T07:00:38.669Z" }, + { url = "https://files.pythonhosted.org/packages/03/b0/811dae8fb9f2784e138785d481469788f2e0d0c109c5737372454415f55f/sentencepiece-0.2.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:477c81505db072b3ab627e7eab972ea1025331bd3a92bacbf798df2b75ea86ec", size = 1254846, upload-time = "2025-08-12T07:00:40.611Z" }, + { url = "https://files.pythonhosted.org/packages/ef/23/195b2e7ec85ebb6a547969f60b723c7aca5a75800ece6cc3f41da872d14e/sentencepiece-0.2.1-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:010f025a544ef770bb395091d57cb94deb9652d8972e0d09f71d85d5a0816c8c", size = 1315721, upload-time = "2025-08-12T07:00:42.914Z" }, + { url = "https://files.pythonhosted.org/packages/7e/aa/553dbe4178b5f23eb28e59393dddd64186178b56b81d9b8d5c3ff1c28395/sentencepiece-0.2.1-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:733e59ff1794d26db706cd41fc2d7ca5f6c64a820709cb801dc0ea31780d64ab", size = 1387458, upload-time = "2025-08-12T07:00:44.56Z" }, + { url = "https://files.pythonhosted.org/packages/66/7c/08ff0012507297a4dd74a5420fdc0eb9e3e80f4e88cab1538d7f28db303d/sentencepiece-0.2.1-cp314-cp314t-win32.whl", hash = "sha256:d3233770f78e637dc8b1fda2cd7c3b99ec77e7505041934188a4e7fe751de3b0", size = 1099765, upload-time = "2025-08-12T07:00:46.058Z" }, + { url = "https://files.pythonhosted.org/packages/91/d5/2a69e1ce15881beb9ddfc7e3f998322f5cedcd5e4d244cb74dade9441663/sentencepiece-0.2.1-cp314-cp314t-win_amd64.whl", hash = "sha256:5e4366c97b68218fd30ea72d70c525e6e78a6c0a88650f57ac4c43c63b234a9d", size = 1157807, upload-time = "2025-08-12T07:00:47.673Z" }, + { url = "https://files.pythonhosted.org/packages/f3/16/54f611fcfc2d1c46cbe3ec4169780b2cfa7cf63708ef2b71611136db7513/sentencepiece-0.2.1-cp314-cp314t-win_arm64.whl", hash = "sha256:105e36e75cbac1292642045458e8da677b2342dcd33df503e640f0b457cb6751", size = 1136264, upload-time = "2025-08-12T07:00:49.485Z" }, ] [[package]] name = "sentry-sdk" -version = "2.31.0" +version = "2.35.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "certifi" }, { name = "urllib3" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d0/45/c7ef7e12d8434fda8b61cdab432d8af64fb832480c93cdaf4bdcab7f5597/sentry_sdk-2.31.0.tar.gz", hash = "sha256:fed6d847f15105849cdf5dfdc64dcec356f936d41abb8c9d66adae45e60959ec", size = 334167, upload-time = "2025-06-24T16:36:26.066Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/7d/a2/9b6d8cc59f03251c583b3fec9d2f075dc09c0f6e030e0e0a3b223c6e64b2/sentry_sdk-2.31.0-py2.py3-none-any.whl", hash = "sha256:e953f5ab083e6599bab255b75d6829b33b3ddf9931a27ca00b4ab0081287e84f", size = 355638, upload-time = "2025-06-24T16:36:24.306Z" }, -] - -[[package]] -name = "setproctitle" -version = "1.3.6" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/9e/af/56efe21c53ac81ac87e000b15e60b3d8104224b4313b6eacac3597bd183d/setproctitle-1.3.6.tar.gz", hash = "sha256:c9f32b96c700bb384f33f7cf07954bb609d35dd82752cef57fb2ee0968409169", size = 26889, upload-time = "2025-04-29T13:35:00.184Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/8f/fb/99456fd94d4207c5f6c40746a048a33a52b4239cd7d9c8d4889e2210ec82/setproctitle-1.3.6-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:af44bb7a1af163806bbb679eb8432fa7b4fb6d83a5d403b541b675dcd3798638", size = 17399, upload-time = "2025-04-29T13:33:13.406Z" }, - { url = "https://files.pythonhosted.org/packages/d5/48/9699191fe6062827683c43bfa9caac33a2c89f8781dd8c7253fa3dba85fd/setproctitle-1.3.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3cca16fd055316a48f0debfcbfb6af7cea715429fc31515ab3fcac05abd527d8", size = 11966, upload-time = "2025-04-29T13:33:14.976Z" }, - { url = "https://files.pythonhosted.org/packages/33/03/b085d192b9ecb9c7ce6ad6ef30ecf4110b7f39430b58a56245569827fcf4/setproctitle-1.3.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea002088d5554fd75e619742cefc78b84a212ba21632e59931b3501f0cfc8f67", size = 32017, upload-time = "2025-04-29T13:33:16.163Z" }, - { url = "https://files.pythonhosted.org/packages/ae/68/c53162e645816f97212002111420d1b2f75bf6d02632e37e961dc2cd6d8b/setproctitle-1.3.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb465dd5825356c1191a038a86ee1b8166e3562d6e8add95eec04ab484cfb8a2", size = 33419, upload-time = "2025-04-29T13:33:18.239Z" }, - { url = "https://files.pythonhosted.org/packages/ac/0d/119a45d15a816a6cf5ccc61b19729f82620095b27a47e0a6838216a95fae/setproctitle-1.3.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d2c8e20487b3b73c1fa72c56f5c89430617296cd380373e7af3a538a82d4cd6d", size = 30711, upload-time = "2025-04-29T13:33:19.571Z" }, - { url = "https://files.pythonhosted.org/packages/e3/fb/5e9b5068df9e9f31a722a775a5e8322a29a638eaaa3eac5ea7f0b35e6314/setproctitle-1.3.6-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0d6252098e98129a1decb59b46920d4eca17b0395f3d71b0d327d086fefe77d", size = 31742, upload-time = "2025-04-29T13:33:21.172Z" }, - { url = "https://files.pythonhosted.org/packages/35/88/54de1e73e8fce87d587889c7eedb48fc4ee2bbe4e4ca6331690d03024f86/setproctitle-1.3.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:cf355fbf0d4275d86f9f57be705d8e5eaa7f8ddb12b24ced2ea6cbd68fdb14dc", size = 31925, upload-time = "2025-04-29T13:33:22.427Z" }, - { url = "https://files.pythonhosted.org/packages/f3/01/65948d7badd66e63e3db247b923143da142790fa293830fdecf832712c2d/setproctitle-1.3.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e288f8a162d663916060beb5e8165a8551312b08efee9cf68302687471a6545d", size = 30981, upload-time = "2025-04-29T13:33:23.739Z" }, - { url = "https://files.pythonhosted.org/packages/22/20/c495e61786f1d38d5dc340b9d9077fee9be3dfc7e89f515afe12e1526dbc/setproctitle-1.3.6-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:b2e54f4a2dc6edf0f5ea5b1d0a608d2af3dcb5aa8c8eeab9c8841b23e1b054fe", size = 33209, upload-time = "2025-04-29T13:33:24.915Z" }, - { url = "https://files.pythonhosted.org/packages/98/3f/a457b8550fbd34d5b482fe20b8376b529e76bf1fbf9a474a6d9a641ab4ad/setproctitle-1.3.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b6f4abde9a2946f57e8daaf1160b2351bcf64274ef539e6675c1d945dbd75e2a", size = 31587, upload-time = "2025-04-29T13:33:26.123Z" }, - { url = "https://files.pythonhosted.org/packages/44/fe/743517340e5a635e3f1c4310baea20c16c66202f96a6f4cead222ffd6d84/setproctitle-1.3.6-cp312-cp312-win32.whl", hash = "sha256:db608db98ccc21248370d30044a60843b3f0f3d34781ceeea67067c508cd5a28", size = 11487, upload-time = "2025-04-29T13:33:27.403Z" }, - { url = "https://files.pythonhosted.org/packages/60/9a/d88f1c1f0f4efff1bd29d9233583ee341114dda7d9613941453984849674/setproctitle-1.3.6-cp312-cp312-win_amd64.whl", hash = "sha256:082413db8a96b1f021088e8ec23f0a61fec352e649aba20881895815388b66d3", size = 12208, upload-time = "2025-04-29T13:33:28.852Z" }, - { url = "https://files.pythonhosted.org/packages/89/76/f1a2fdbf9b9602945a7489ba5c52e9863de37381ef1a85a2b9ed0ff8bc79/setproctitle-1.3.6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e2a9e62647dc040a76d55563580bf3bb8fe1f5b6ead08447c2ed0d7786e5e794", size = 17392, upload-time = "2025-04-29T13:33:30.925Z" }, - { url = "https://files.pythonhosted.org/packages/5c/5b/4e0db8b10b4543afcb3dbc0827793d46e43ec1de6b377e313af3703d08e0/setproctitle-1.3.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:751ba352ed922e0af60458e961167fa7b732ac31c0ddd1476a2dfd30ab5958c5", size = 11951, upload-time = "2025-04-29T13:33:32.296Z" }, - { url = "https://files.pythonhosted.org/packages/dc/fe/d5d00aaa700fe1f6160b6e95c225b29c01f4d9292176d48fd968815163ea/setproctitle-1.3.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7890e291bf4708e3b61db9069ea39b3ab0651e42923a5e1f4d78a7b9e4b18301", size = 32087, upload-time = "2025-04-29T13:33:33.469Z" }, - { url = "https://files.pythonhosted.org/packages/9f/b3/894b827b93ef813c082479bebf88185860f01ac243df737823dd705e7fff/setproctitle-1.3.6-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2b17855ed7f994f3f259cf2dfbfad78814538536fa1a91b50253d84d87fd88d", size = 33502, upload-time = "2025-04-29T13:33:34.831Z" }, - { url = "https://files.pythonhosted.org/packages/b2/cd/5330734cca1a4cfcb721432c22cb7899ff15a4101ba868b2ef452ffafea1/setproctitle-1.3.6-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e51ec673513465663008ce402171192a053564865c2fc6dc840620871a9bd7c", size = 30713, upload-time = "2025-04-29T13:33:36.739Z" }, - { url = "https://files.pythonhosted.org/packages/fa/d3/c2590c5daa2e9a008d3f2b16c0f4a351826193be55f147cb32af49c6d814/setproctitle-1.3.6-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63cc10352dc6cf35a33951656aa660d99f25f574eb78132ce41a85001a638aa7", size = 31792, upload-time = "2025-04-29T13:33:37.974Z" }, - { url = "https://files.pythonhosted.org/packages/e6/b1/c553ed5af8cfcecd5ae7737e63af58a17a03d26f3d61868c7eb20bf7e3cf/setproctitle-1.3.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0dba8faee2e4a96e934797c9f0f2d093f8239bf210406a99060b3eabe549628e", size = 31927, upload-time = "2025-04-29T13:33:39.203Z" }, - { url = "https://files.pythonhosted.org/packages/70/78/2d5385206540127a3dca0ff83225b1ac66873f5cc89d4a6d3806c92f5ae2/setproctitle-1.3.6-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:e3e44d08b61de0dd6f205528498f834a51a5c06689f8fb182fe26f3a3ce7dca9", size = 30981, upload-time = "2025-04-29T13:33:40.431Z" }, - { url = "https://files.pythonhosted.org/packages/31/62/e3e4a4e006d0e549748e53cded4ff3b667be0602860fc61b7de8b412b667/setproctitle-1.3.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:de004939fc3fd0c1200d26ea9264350bfe501ffbf46c8cf5dc7f345f2d87a7f1", size = 33244, upload-time = "2025-04-29T13:33:41.817Z" }, - { url = "https://files.pythonhosted.org/packages/aa/05/4b223fd4ef94e105dc7aff27fa502fb7200cf52be2bb0c064bd2406b5611/setproctitle-1.3.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:3f8194b4d631b003a1176a75d1acd545e04b1f54b821638e098a93e6e62830ef", size = 31630, upload-time = "2025-04-29T13:33:43.093Z" }, - { url = "https://files.pythonhosted.org/packages/1b/ba/5f68eb969f7336f54b54a599fd3ffbd7662f9733b080bc8598705971b3dd/setproctitle-1.3.6-cp313-cp313-win32.whl", hash = "sha256:d714e002dd3638170fe7376dc1b686dbac9cb712cde3f7224440af722cc9866a", size = 11480, upload-time = "2025-04-29T13:34:01.257Z" }, - { url = "https://files.pythonhosted.org/packages/ba/f5/7f47f0ca35c9c357f16187cee9229f3eda0237bc6fdd3061441336f361c0/setproctitle-1.3.6-cp313-cp313-win_amd64.whl", hash = "sha256:b70c07409d465f3a8b34d52f863871fb8a00755370791d2bd1d4f82b3cdaf3d5", size = 12198, upload-time = "2025-04-29T13:34:02.293Z" }, - { url = "https://files.pythonhosted.org/packages/39/ad/c3941b8fc6b32a976c9e2d9615a90ae793b69cd010ca8c3575dbc822104f/setproctitle-1.3.6-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:23a57d3b8f1549515c2dbe4a2880ebc1f27780dc126c5e064167563e015817f5", size = 17401, upload-time = "2025-04-29T13:33:44.186Z" }, - { url = "https://files.pythonhosted.org/packages/04/38/a184f857b988d3a9c401e470a4e38182a5c99ee77bf90432d7665e9d35a3/setproctitle-1.3.6-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:81c443310831e29fabbd07b75ebbfa29d0740b56f5907c6af218482d51260431", size = 11959, upload-time = "2025-04-29T13:33:45.71Z" }, - { url = "https://files.pythonhosted.org/packages/b7/b9/4878ef9d8483adfd1edf6bf95151362aaec0d05aac306a97ff0383f491b5/setproctitle-1.3.6-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d88c63bd395c787b0aa81d8bbc22c1809f311032ce3e823a6517b711129818e4", size = 33463, upload-time = "2025-04-29T13:33:46.913Z" }, - { url = "https://files.pythonhosted.org/packages/cc/60/3ef49d1931aff2a36a7324a49cca10d77ef03e0278452fd468c33a52d7e3/setproctitle-1.3.6-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d73f14b86d0e2858ece6bf5807c9889670e392c001d414b4293d0d9b291942c3", size = 34959, upload-time = "2025-04-29T13:33:48.216Z" }, - { url = "https://files.pythonhosted.org/packages/81/c6/dee0a973acecefb0db6c9c2e0ea7f18b7e4db773a72e534741ebdee8bbb8/setproctitle-1.3.6-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3393859eb8f19f5804049a685bf286cb08d447e28ba5c6d8543c7bf5500d5970", size = 32055, upload-time = "2025-04-29T13:33:49.443Z" }, - { url = "https://files.pythonhosted.org/packages/ea/a5/5dd5c4192cf18d16349a32a07f728a9a48a2a05178e16966cabd6645903e/setproctitle-1.3.6-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:785cd210c0311d9be28a70e281a914486d62bfd44ac926fcd70cf0b4d65dff1c", size = 32986, upload-time = "2025-04-29T13:33:51.519Z" }, - { url = "https://files.pythonhosted.org/packages/df/a6/1508d37eb8008670d33f13fcdb91cbd8ef54697276469abbfdd3d4428c59/setproctitle-1.3.6-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c051f46ed1e13ba8214b334cbf21902102807582fbfaf0fef341b9e52f0fafbf", size = 32736, upload-time = "2025-04-29T13:33:52.852Z" }, - { url = "https://files.pythonhosted.org/packages/1a/73/c84ec8880d543766a12fcd6b65dbd013770974a40577889f357409b0441e/setproctitle-1.3.6-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:49498ebf68ca3e75321ffe634fcea5cc720502bfaa79bd6b03ded92ce0dc3c24", size = 31945, upload-time = "2025-04-29T13:33:54.665Z" }, - { url = "https://files.pythonhosted.org/packages/95/0a/126b9ff7a406a69a62825fe5bd6d1ba8671919a7018c4f9e2c63f49bfcb6/setproctitle-1.3.6-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:4431629c178193f23c538cb1de3da285a99ccc86b20ee91d81eb5f1a80e0d2ba", size = 34333, upload-time = "2025-04-29T13:33:56.101Z" }, - { url = "https://files.pythonhosted.org/packages/9a/fd/5474b04f1c013ff460129d2bc774557dd6e186da4667865efef9a83bf378/setproctitle-1.3.6-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:d136fbf8ad4321716e44d6d6b3d8dffb4872626010884e07a1db54b7450836cf", size = 32508, upload-time = "2025-04-29T13:33:57.43Z" }, - { url = "https://files.pythonhosted.org/packages/32/21/2503e38520cb076a7ecaef6a35d6a6fa89cf02af3541c84c811fd7500d20/setproctitle-1.3.6-cp313-cp313t-win32.whl", hash = "sha256:d483cc23cc56ab32911ea0baa0d2d9ea7aa065987f47de847a0a93a58bf57905", size = 11482, upload-time = "2025-04-29T13:33:58.602Z" }, - { url = "https://files.pythonhosted.org/packages/65/23/7833d75a27fba25ddc5cd3b54cd03c4bf8e18b8e2dbec622eb6326278ce8/setproctitle-1.3.6-cp313-cp313t-win_amd64.whl", hash = "sha256:74973aebea3543ad033b9103db30579ec2b950a466e09f9c2180089e8346e0ec", size = 12209, upload-time = "2025-04-29T13:33:59.727Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/31/83/055dc157b719651ef13db569bb8cf2103df11174478649735c1b2bf3f6bc/sentry_sdk-2.35.0.tar.gz", hash = "sha256:5ea58d352779ce45d17bc2fa71ec7185205295b83a9dbb5707273deb64720092", size = 343014, upload-time = "2025-08-14T17:11:20.223Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/36/3d/742617a7c644deb0c1628dcf6bb2d2165ab7c6aab56fe5222758994007f8/sentry_sdk-2.35.0-py2.py3-none-any.whl", hash = "sha256:6e0c29b9a5d34de8575ffb04d289a987ff3053cf2c98ede445bea995e3830263", size = 363806, upload-time = "2025-08-14T17:11:18.29Z" }, ] [[package]] @@ -4601,14 +5314,14 @@ wheels = [ [[package]] name = "smart-open" -version = "7.1.0" +version = "7.3.0.post1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "wrapt" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/21/30/1f41c3d3b8cec82024b4b277bfd4e5b18b765ae7279eb9871fa25c503778/smart_open-7.1.0.tar.gz", hash = "sha256:a4f09f84f0f6d3637c6543aca7b5487438877a21360e7368ccf1f704789752ba", size = 72044, upload-time = "2024-12-17T13:19:17.71Z" } +sdist = { url = "https://files.pythonhosted.org/packages/18/2b/5e7234c68ed5bc872ad6ae77b8a421c2ed70dcb1190b44dc1abdeed5e347/smart_open-7.3.0.post1.tar.gz", hash = "sha256:ce6a3d9bc1afbf6234ad13c010b77f8cd36d24636811e3c52c3b5160f5214d1e", size = 51557, upload-time = "2025-07-03T10:06:31.271Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7a/18/9a8d9f01957aa1f8bbc5676d54c2e33102d247e146c1a3679d3bd5cc2e3a/smart_open-7.1.0-py3-none-any.whl", hash = "sha256:4b8489bb6058196258bafe901730c7db0dcf4f083f316e97269c66f45502055b", size = 61746, upload-time = "2024-12-17T13:19:21.076Z" }, + { url = "https://files.pythonhosted.org/packages/08/5b/a2a3d4514c64818925f4e886d39981f1926eeb5288a4549c6b3c17ed66bb/smart_open-7.3.0.post1-py3-none-any.whl", hash = "sha256:c73661a2c24bf045c1e04e08fffc585b59af023fe783d57896f590489db66fb4", size = 61946, upload-time = "2025-07-03T10:06:29.599Z" }, ] [[package]] @@ -4808,31 +5521,31 @@ wheels = [ [[package]] name = "sqlalchemy" -version = "2.0.41" +version = "2.0.43" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "greenlet", marker = "(python_full_version < '3.14' and platform_machine == 'AMD64') or (python_full_version < '3.14' and platform_machine == 'WIN32') or (python_full_version < '3.14' and platform_machine == 'aarch64') or (python_full_version < '3.14' and platform_machine == 'amd64') or (python_full_version < '3.14' and platform_machine == 'ppc64le') or (python_full_version < '3.14' and platform_machine == 'win32') or (python_full_version < '3.14' and platform_machine == 'x86_64')" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/63/66/45b165c595ec89aa7dcc2c1cd222ab269bc753f1fc7a1e68f8481bd957bf/sqlalchemy-2.0.41.tar.gz", hash = "sha256:edba70118c4be3c2b1f90754d308d0b79c6fe2c0fdc52d8ddf603916f83f4db9", size = 9689424, upload-time = "2025-05-14T17:10:32.339Z" } +sdist = { url = "https://files.pythonhosted.org/packages/d7/bc/d59b5d97d27229b0e009bd9098cd81af71c2fa5549c580a0a67b9bed0496/sqlalchemy-2.0.43.tar.gz", hash = "sha256:788bfcef6787a7764169cfe9859fe425bf44559619e1d9f56f5bddf2ebf6f417", size = 9762949, upload-time = "2025-08-11T14:24:58.438Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/3e/2a/f1f4e068b371154740dd10fb81afb5240d5af4aa0087b88d8b308b5429c2/sqlalchemy-2.0.41-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:81f413674d85cfd0dfcd6512e10e0f33c19c21860342a4890c3a2b59479929f9", size = 2119645, upload-time = "2025-05-14T17:55:24.854Z" }, - { url = "https://files.pythonhosted.org/packages/9b/e8/c664a7e73d36fbfc4730f8cf2bf930444ea87270f2825efbe17bf808b998/sqlalchemy-2.0.41-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:598d9ebc1e796431bbd068e41e4de4dc34312b7aa3292571bb3674a0cb415dd1", size = 2107399, upload-time = "2025-05-14T17:55:28.097Z" }, - { url = "https://files.pythonhosted.org/packages/5c/78/8a9cf6c5e7135540cb682128d091d6afa1b9e48bd049b0d691bf54114f70/sqlalchemy-2.0.41-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a104c5694dfd2d864a6f91b0956eb5d5883234119cb40010115fd45a16da5e70", size = 3293269, upload-time = "2025-05-14T17:50:38.227Z" }, - { url = "https://files.pythonhosted.org/packages/3c/35/f74add3978c20de6323fb11cb5162702670cc7a9420033befb43d8d5b7a4/sqlalchemy-2.0.41-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6145afea51ff0af7f2564a05fa95eb46f542919e6523729663a5d285ecb3cf5e", size = 3303364, upload-time = "2025-05-14T17:51:49.829Z" }, - { url = "https://files.pythonhosted.org/packages/6a/d4/c990f37f52c3f7748ebe98883e2a0f7d038108c2c5a82468d1ff3eec50b7/sqlalchemy-2.0.41-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b46fa6eae1cd1c20e6e6f44e19984d438b6b2d8616d21d783d150df714f44078", size = 3229072, upload-time = "2025-05-14T17:50:39.774Z" }, - { url = "https://files.pythonhosted.org/packages/15/69/cab11fecc7eb64bc561011be2bd03d065b762d87add52a4ca0aca2e12904/sqlalchemy-2.0.41-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:41836fe661cc98abfae476e14ba1906220f92c4e528771a8a3ae6a151242d2ae", size = 3268074, upload-time = "2025-05-14T17:51:51.736Z" }, - { url = "https://files.pythonhosted.org/packages/5c/ca/0c19ec16858585d37767b167fc9602593f98998a68a798450558239fb04a/sqlalchemy-2.0.41-cp312-cp312-win32.whl", hash = "sha256:a8808d5cf866c781150d36a3c8eb3adccfa41a8105d031bf27e92c251e3969d6", size = 2084514, upload-time = "2025-05-14T17:55:49.915Z" }, - { url = "https://files.pythonhosted.org/packages/7f/23/4c2833d78ff3010a4e17f984c734f52b531a8c9060a50429c9d4b0211be6/sqlalchemy-2.0.41-cp312-cp312-win_amd64.whl", hash = "sha256:5b14e97886199c1f52c14629c11d90c11fbb09e9334fa7bb5f6d068d9ced0ce0", size = 2111557, upload-time = "2025-05-14T17:55:51.349Z" }, - { url = "https://files.pythonhosted.org/packages/d3/ad/2e1c6d4f235a97eeef52d0200d8ddda16f6c4dd70ae5ad88c46963440480/sqlalchemy-2.0.41-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4eeb195cdedaf17aab6b247894ff2734dcead6c08f748e617bfe05bd5a218443", size = 2115491, upload-time = "2025-05-14T17:55:31.177Z" }, - { url = "https://files.pythonhosted.org/packages/cf/8d/be490e5db8400dacc89056f78a52d44b04fbf75e8439569d5b879623a53b/sqlalchemy-2.0.41-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d4ae769b9c1c7757e4ccce94b0641bc203bbdf43ba7a2413ab2523d8d047d8dc", size = 2102827, upload-time = "2025-05-14T17:55:34.921Z" }, - { url = "https://files.pythonhosted.org/packages/a0/72/c97ad430f0b0e78efaf2791342e13ffeafcbb3c06242f01a3bb8fe44f65d/sqlalchemy-2.0.41-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a62448526dd9ed3e3beedc93df9bb6b55a436ed1474db31a2af13b313a70a7e1", size = 3225224, upload-time = "2025-05-14T17:50:41.418Z" }, - { url = "https://files.pythonhosted.org/packages/5e/51/5ba9ea3246ea068630acf35a6ba0d181e99f1af1afd17e159eac7e8bc2b8/sqlalchemy-2.0.41-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc56c9788617b8964ad02e8fcfeed4001c1f8ba91a9e1f31483c0dffb207002a", size = 3230045, upload-time = "2025-05-14T17:51:54.722Z" }, - { url = "https://files.pythonhosted.org/packages/78/2f/8c14443b2acea700c62f9b4a8bad9e49fc1b65cfb260edead71fd38e9f19/sqlalchemy-2.0.41-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c153265408d18de4cc5ded1941dcd8315894572cddd3c58df5d5b5705b3fa28d", size = 3159357, upload-time = "2025-05-14T17:50:43.483Z" }, - { url = "https://files.pythonhosted.org/packages/fc/b2/43eacbf6ccc5276d76cea18cb7c3d73e294d6fb21f9ff8b4eef9b42bbfd5/sqlalchemy-2.0.41-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4f67766965996e63bb46cfbf2ce5355fc32d9dd3b8ad7e536a920ff9ee422e23", size = 3197511, upload-time = "2025-05-14T17:51:57.308Z" }, - { url = "https://files.pythonhosted.org/packages/fa/2e/677c17c5d6a004c3c45334ab1dbe7b7deb834430b282b8a0f75ae220c8eb/sqlalchemy-2.0.41-cp313-cp313-win32.whl", hash = "sha256:bfc9064f6658a3d1cadeaa0ba07570b83ce6801a1314985bf98ec9b95d74e15f", size = 2082420, upload-time = "2025-05-14T17:55:52.69Z" }, - { url = "https://files.pythonhosted.org/packages/e9/61/e8c1b9b6307c57157d328dd8b8348ddc4c47ffdf1279365a13b2b98b8049/sqlalchemy-2.0.41-cp313-cp313-win_amd64.whl", hash = "sha256:82ca366a844eb551daff9d2e6e7a9e5e76d2612c8564f58db6c19a726869c1df", size = 2108329, upload-time = "2025-05-14T17:55:54.495Z" }, - { url = "https://files.pythonhosted.org/packages/1c/fc/9ba22f01b5cdacc8f5ed0d22304718d2c758fce3fd49a5372b886a86f37c/sqlalchemy-2.0.41-py3-none-any.whl", hash = "sha256:57df5dc6fdb5ed1a88a1ed2195fd31927e705cad62dedd86b46972752a80f576", size = 1911224, upload-time = "2025-05-14T17:39:42.154Z" }, + { url = "https://files.pythonhosted.org/packages/61/db/20c78f1081446095450bdc6ee6cc10045fce67a8e003a5876b6eaafc5cc4/sqlalchemy-2.0.43-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:20d81fc2736509d7a2bd33292e489b056cbae543661bb7de7ce9f1c0cd6e7f24", size = 2134891, upload-time = "2025-08-11T15:51:13.019Z" }, + { url = "https://files.pythonhosted.org/packages/45/0a/3d89034ae62b200b4396f0f95319f7d86e9945ee64d2343dcad857150fa2/sqlalchemy-2.0.43-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:25b9fc27650ff5a2c9d490c13c14906b918b0de1f8fcbb4c992712d8caf40e83", size = 2123061, upload-time = "2025-08-11T15:51:14.319Z" }, + { url = "https://files.pythonhosted.org/packages/cb/10/2711f7ff1805919221ad5bee205971254845c069ee2e7036847103ca1e4c/sqlalchemy-2.0.43-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6772e3ca8a43a65a37c88e2f3e2adfd511b0b1da37ef11ed78dea16aeae85bd9", size = 3320384, upload-time = "2025-08-11T15:52:35.088Z" }, + { url = "https://files.pythonhosted.org/packages/6e/0e/3d155e264d2ed2778484006ef04647bc63f55b3e2d12e6a4f787747b5900/sqlalchemy-2.0.43-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a113da919c25f7f641ffbd07fbc9077abd4b3b75097c888ab818f962707eb48", size = 3329648, upload-time = "2025-08-11T15:56:34.153Z" }, + { url = "https://files.pythonhosted.org/packages/5b/81/635100fb19725c931622c673900da5efb1595c96ff5b441e07e3dd61f2be/sqlalchemy-2.0.43-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4286a1139f14b7d70141c67a8ae1582fc2b69105f1b09d9573494eb4bb4b2687", size = 3258030, upload-time = "2025-08-11T15:52:36.933Z" }, + { url = "https://files.pythonhosted.org/packages/0c/ed/a99302716d62b4965fded12520c1cbb189f99b17a6d8cf77611d21442e47/sqlalchemy-2.0.43-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:529064085be2f4d8a6e5fab12d36ad44f1909a18848fcfbdb59cc6d4bbe48efe", size = 3294469, upload-time = "2025-08-11T15:56:35.553Z" }, + { url = "https://files.pythonhosted.org/packages/5d/a2/3a11b06715149bf3310b55a98b5c1e84a42cfb949a7b800bc75cb4e33abc/sqlalchemy-2.0.43-cp312-cp312-win32.whl", hash = "sha256:b535d35dea8bbb8195e7e2b40059e2253acb2b7579b73c1b432a35363694641d", size = 2098906, upload-time = "2025-08-11T15:55:00.645Z" }, + { url = "https://files.pythonhosted.org/packages/bc/09/405c915a974814b90aa591280623adc6ad6b322f61fd5cff80aeaef216c9/sqlalchemy-2.0.43-cp312-cp312-win_amd64.whl", hash = "sha256:1c6d85327ca688dbae7e2b06d7d84cfe4f3fffa5b5f9e21bb6ce9d0e1a0e0e0a", size = 2126260, upload-time = "2025-08-11T15:55:02.965Z" }, + { url = "https://files.pythonhosted.org/packages/41/1c/a7260bd47a6fae7e03768bf66451437b36451143f36b285522b865987ced/sqlalchemy-2.0.43-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e7c08f57f75a2bb62d7ee80a89686a5e5669f199235c6d1dac75cd59374091c3", size = 2130598, upload-time = "2025-08-11T15:51:15.903Z" }, + { url = "https://files.pythonhosted.org/packages/8e/84/8a337454e82388283830b3586ad7847aa9c76fdd4f1df09cdd1f94591873/sqlalchemy-2.0.43-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:14111d22c29efad445cd5021a70a8b42f7d9152d8ba7f73304c4d82460946aaa", size = 2118415, upload-time = "2025-08-11T15:51:17.256Z" }, + { url = "https://files.pythonhosted.org/packages/cf/ff/22ab2328148492c4d71899d62a0e65370ea66c877aea017a244a35733685/sqlalchemy-2.0.43-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21b27b56eb2f82653168cefe6cb8e970cdaf4f3a6cb2c5e3c3c1cf3158968ff9", size = 3248707, upload-time = "2025-08-11T15:52:38.444Z" }, + { url = "https://files.pythonhosted.org/packages/dc/29/11ae2c2b981de60187f7cbc84277d9d21f101093d1b2e945c63774477aba/sqlalchemy-2.0.43-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c5a9da957c56e43d72126a3f5845603da00e0293720b03bde0aacffcf2dc04f", size = 3253602, upload-time = "2025-08-11T15:56:37.348Z" }, + { url = "https://files.pythonhosted.org/packages/b8/61/987b6c23b12c56d2be451bc70900f67dd7d989d52b1ee64f239cf19aec69/sqlalchemy-2.0.43-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5d79f9fdc9584ec83d1b3c75e9f4595c49017f5594fee1a2217117647225d738", size = 3183248, upload-time = "2025-08-11T15:52:39.865Z" }, + { url = "https://files.pythonhosted.org/packages/86/85/29d216002d4593c2ce1c0ec2cec46dda77bfbcd221e24caa6e85eff53d89/sqlalchemy-2.0.43-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9df7126fd9db49e3a5a3999442cc67e9ee8971f3cb9644250107d7296cb2a164", size = 3219363, upload-time = "2025-08-11T15:56:39.11Z" }, + { url = "https://files.pythonhosted.org/packages/b6/e4/bd78b01919c524f190b4905d47e7630bf4130b9f48fd971ae1c6225b6f6a/sqlalchemy-2.0.43-cp313-cp313-win32.whl", hash = "sha256:7f1ac7828857fcedb0361b48b9ac4821469f7694089d15550bbcf9ab22564a1d", size = 2096718, upload-time = "2025-08-11T15:55:05.349Z" }, + { url = "https://files.pythonhosted.org/packages/ac/a5/ca2f07a2a201f9497de1928f787926613db6307992fe5cda97624eb07c2f/sqlalchemy-2.0.43-cp313-cp313-win_amd64.whl", hash = "sha256:971ba928fcde01869361f504fcff3b7143b47d30de188b11c6357c0505824197", size = 2123200, upload-time = "2025-08-11T15:55:07.932Z" }, + { url = "https://files.pythonhosted.org/packages/b8/d9/13bdde6521f322861fab67473cec4b1cc8999f3871953531cf61945fad92/sqlalchemy-2.0.43-py3-none-any.whl", hash = "sha256:1681c21dd2ccee222c2fe0bef671d1aef7c504087c9c4e800371cfcc8ac966fc", size = 1924759, upload-time = "2025-08-11T15:39:53.024Z" }, ] [[package]] @@ -4846,14 +5559,15 @@ wheels = [ [[package]] name = "starlette" -version = "0.46.2" +version = "0.47.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ce/20/08dfcd9c983f6a6f4a1000d934b9e6d626cff8d2eeb77a89a68eef20a2b7/starlette-0.46.2.tar.gz", hash = "sha256:7f7361f34eed179294600af672f565727419830b54b7b084efe44bb82d2fccd5", size = 2580846, upload-time = "2025-04-13T13:56:17.942Z" } +sdist = { url = "https://files.pythonhosted.org/packages/04/57/d062573f391d062710d4088fa1369428c38d51460ab6fedff920efef932e/starlette-0.47.2.tar.gz", hash = "sha256:6ae9aa5db235e4846decc1e7b79c4f346adf41e9777aebeb49dfd09bbd7023d8", size = 2583948, upload-time = "2025-07-20T17:31:58.522Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/8b/0c/9d30a4ebeb6db2b25a841afbb80f6ef9a854fc3b41be131d249a977b4959/starlette-0.46.2-py3-none-any.whl", hash = "sha256:595633ce89f8ffa71a015caed34a5b2dc1c0cdb3f0f1fbd1e69339cf2abeec35", size = 72037, upload-time = "2025-04-13T13:56:16.21Z" }, + { url = "https://files.pythonhosted.org/packages/f7/1f/b876b1f83aef204198a42dc101613fefccb32258e5428b5f9259677864b4/starlette-0.47.2-py3-none-any.whl", hash = "sha256:c5847e96134e5c5371ee9fac6fdf1a67336d5815e09eb2a01fdb57a351ef915b", size = 72984, upload-time = "2025-07-20T17:31:56.738Z" }, ] [[package]] @@ -4879,7 +5593,7 @@ wheels = [ [[package]] name = "tensorboard" -version = "2.19.0" +version = "2.20.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "absl-py" }, @@ -4887,14 +5601,14 @@ dependencies = [ { name = "markdown" }, { name = "numpy" }, { name = "packaging" }, + { name = "pillow" }, { name = "protobuf" }, { name = "setuptools" }, - { name = "six" }, { name = "tensorboard-data-server" }, { name = "werkzeug" }, ] wheels = [ - { url = "https://files.pythonhosted.org/packages/5d/12/4f70e8e2ba0dbe72ea978429d8530b0333f0ed2140cc571a48802878ef99/tensorboard-2.19.0-py3-none-any.whl", hash = "sha256:5e71b98663a641a7ce8a6e70b0be8e1a4c0c45d48760b076383ac4755c35b9a0", size = 5503412, upload-time = "2025-02-12T08:17:27.21Z" }, + { url = "https://files.pythonhosted.org/packages/9c/d9/a5db55f88f258ac669a92858b70a714bbbd5acd993820b41ec4a96a4d77f/tensorboard-2.20.0-py3-none-any.whl", hash = "sha256:9dc9f978cb84c0723acf9a345d96c184f0293d18f166bb8d59ee098e6cfaaba6", size = 5525680, upload-time = "2025-07-17T19:20:49.638Z" }, ] [[package]] @@ -4909,24 +5623,24 @@ wheels = [ [[package]] name = "tensorstore" -version = "0.1.75" +version = "0.1.76" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "ml-dtypes" }, { name = "numpy" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/00/4e/5be077c63d01af420ca8a009cad3b30fef137ef37f6530c266f4f2628382/tensorstore-0.1.75.tar.gz", hash = "sha256:515cc90f5b6c316443f44794168083326fb29a0e50b0cd8fbd4cb3e0f32a3922", size = 6831417, upload-time = "2025-05-14T00:38:05.037Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ff/ae/947a9f232de7319b664ed8d278e9e0363e9294da73fd422c687ac4eb070e/tensorstore-0.1.76.tar.gz", hash = "sha256:ed0d565e7a038a84b1b5b5d9f7397caec200b53941d8889f44b7f63dd6abffe7", size = 6869230, upload-time = "2025-07-02T21:34:03.773Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ac/fb/28a5f8035cadbae34bdcaf03a8e0d731fd8bc8c9804ed8f54413cbfddeda/tensorstore-0.1.75-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:bc092152673a993df1867bac16622f5f382816184f2244df9ff78ba7f781e642", size = 15644019, upload-time = "2025-05-14T00:37:41.892Z" }, - { url = "https://files.pythonhosted.org/packages/16/52/b289ac969d7cee8c253b2f90e5cd6b37789f704147ff7fffa8a50e7b97c4/tensorstore-0.1.75-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d5c6c8ef6c6758f7e11a4cbc7fc4e23af5170128901df729185b7870f6dbc071", size = 13557511, upload-time = "2025-05-14T00:37:44.508Z" }, - { url = "https://files.pythonhosted.org/packages/35/50/a2c4271e2512ace24290d2d7cf166aaf6e251ef14d20255d98a96c6a9514/tensorstore-0.1.75-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8bbb30f24aef98d43657d132833f5577bfa91497769ef6b5238c5faccf7afe35", size = 17454887, upload-time = "2025-05-14T00:37:46.918Z" }, - { url = "https://files.pythonhosted.org/packages/a3/1d/9b2610a0770a2115e4a20c1a9377e2e14efabeb55852d150832ff82346f4/tensorstore-0.1.75-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f08d9c2b8b84c892c3c81f6025ec189f58bd7860bf624c32646e5bee81870f95", size = 18820501, upload-time = "2025-05-14T00:37:49.022Z" }, - { url = "https://files.pythonhosted.org/packages/b9/9c/06f7318bd56fe62ccd7743159cd9e133b5e0ead5b8b229a6f1f392e65666/tensorstore-0.1.75-cp312-cp312-win_amd64.whl", hash = "sha256:39d4173bdbbc1cf41e168fe730fd457a6b0c4100ba707254260f63cb9ad3ef0b", size = 12607424, upload-time = "2025-05-14T00:37:51.368Z" }, - { url = "https://files.pythonhosted.org/packages/c1/97/656252b262099fdc8b3f247c58ec147ba644f4fc4dec8f7af3ffb352704e/tensorstore-0.1.75-cp313-cp313-macosx_10_14_x86_64.whl", hash = "sha256:0d2f87ca268faf903d5ffba6157fd9aeb42e9f961cea01b98baa690f71f51a1e", size = 15644856, upload-time = "2025-05-14T00:37:53.28Z" }, - { url = "https://files.pythonhosted.org/packages/94/e1/66067a2aa5c2890c02397df65d748978de1dbbe91ce394f285f86390c149/tensorstore-0.1.75-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:17ee80f9869b5a4b0303cb37edca9c9665af7a9510fac85f59fb8de19f12efd1", size = 13557924, upload-time = "2025-05-14T00:37:55.249Z" }, - { url = "https://files.pythonhosted.org/packages/46/56/c1245f7bb674072bb0f9e8516bd60f7608ffe114e911c08ebcaefca58f46/tensorstore-0.1.75-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f00144d23eaf511104651b4479fcb111b9befc13db3018277d358144be503ef4", size = 17454695, upload-time = "2025-05-14T00:37:58.521Z" }, - { url = "https://files.pythonhosted.org/packages/db/78/8a103a9012662fb8d85c3d6daa9c9678d49f260a21b5426e0a1616e63c42/tensorstore-0.1.75-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c8697cab7b24440a13df8d9e6d000c1067ed3f97204a3dae5388e9e60606834", size = 18820794, upload-time = "2025-05-14T00:38:01.253Z" }, - { url = "https://files.pythonhosted.org/packages/7d/3d/69d7997fe67fd9cb8fce07ea0f3f3e754a6ea0d2c16f1c46e178abe7da0e/tensorstore-0.1.75-cp313-cp313-win_amd64.whl", hash = "sha256:df410ca28e679c1c8a5b361267ce02fe60a9c4d78964cb984d884d15c538f2f2", size = 12607428, upload-time = "2025-05-14T00:38:03.32Z" }, + { url = "https://files.pythonhosted.org/packages/09/37/f2254b4ae1dabd95e258fa3eb4783ac4db4261bb8c90ff9bfe15549d1238/tensorstore-0.1.76-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:b68450983ccad9e7774e81b2fa37daef1b72c774fd939d9eb4065d6aa70e666a", size = 15712650, upload-time = "2025-07-02T21:33:39.716Z" }, + { url = "https://files.pythonhosted.org/packages/93/3c/1cae56cbbe9610ff48cb2d7c0921a4d4c333a0540918e3b2db08b521c5f6/tensorstore-0.1.76-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6b7a3856f884279e40f90bad87d0da70869879e124835e650c6b16c80f64fbc4", size = 13624138, upload-time = "2025-07-02T21:33:41.758Z" }, + { url = "https://files.pythonhosted.org/packages/ba/d2/b92d34a896f608a59dc76c290d4ec9f7d0264a02e4d74864987a6adbd3c9/tensorstore-0.1.76-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8709a98ae0b453eb23525c07372c2be1f6bbd978bba53319f26a1f2a83a77c2a", size = 17538270, upload-time = "2025-07-02T21:33:44.911Z" }, + { url = "https://files.pythonhosted.org/packages/21/66/142b803541552b02a2fa033b1f48bcb50e1d2df6ac10131aab1857c5141d/tensorstore-0.1.76-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:267edea8f1596f2bd67017ff97b7b350bf3f95ff84947a8babadc5e17ca53663", size = 18910782, upload-time = "2025-07-02T21:33:47.401Z" }, + { url = "https://files.pythonhosted.org/packages/5a/3e/c264cf1435c04fb998a1f30dd1f066deb370b841412f89e1cb36d37ee4fc/tensorstore-0.1.76-cp312-cp312-win_amd64.whl", hash = "sha256:f66ac63d0c63c3336ac4dc61f1f97b6afe8b512e586ddfdbc91f19175787f321", size = 12611059, upload-time = "2025-07-02T21:33:49.596Z" }, + { url = "https://files.pythonhosted.org/packages/5f/66/1e3b819e1de98b048dad7843f3a814c5e739ead57f511dafb6aa0748f04a/tensorstore-0.1.76-cp313-cp313-macosx_10_14_x86_64.whl", hash = "sha256:a471994b156daa3cadb0e4968e29202fa2e8c7ddcd28d825499bb5637caa0983", size = 15713110, upload-time = "2025-07-02T21:33:51.973Z" }, + { url = "https://files.pythonhosted.org/packages/58/d3/226344e8822c5e02af929c89bd61964e08980253cda15286a201850eb3b1/tensorstore-0.1.76-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:98175dc64935b49467cb7664a431b9a06e9df9b5cab94f9a1fdb24a30b2d69d3", size = 13624514, upload-time = "2025-07-02T21:33:54.109Z" }, + { url = "https://files.pythonhosted.org/packages/94/9f/2b267c520dbbcf0a5ebc7a3c0a6cf852a445e22c8ea8b0f7450bf6b98783/tensorstore-0.1.76-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9e30577f1197ea3573102912482dced95e4c6ff72087ffeb99b5d8b496bf81a", size = 17539304, upload-time = "2025-07-02T21:33:56.172Z" }, + { url = "https://files.pythonhosted.org/packages/1d/9a/9dcc01c8f87047b09602ea16379233b8a308d1d83d5432bf8bc89163ca3e/tensorstore-0.1.76-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20782f833bfa3c59dd3787f657388054c54ee0ab48dad181b360e3e5e81e4c4b", size = 18911982, upload-time = "2025-07-02T21:33:58.928Z" }, + { url = "https://files.pythonhosted.org/packages/10/45/43d387027b3eac9f09de8bb736b1b432de287fbd807716877fe5fbaeee56/tensorstore-0.1.76-cp313-cp313-win_amd64.whl", hash = "sha256:e84fc11b36fcd55cfd1c5dfc60de9d54d7d95c3de074f4d854914067e82a6740", size = 12610851, upload-time = "2025-07-02T21:34:01.505Z" }, ] [[package]] @@ -4940,51 +5654,68 @@ wheels = [ [[package]] name = "tiktoken" -version = "0.9.0" +version = "0.11.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "regex" }, { name = "requests" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ea/cf/756fedf6981e82897f2d570dd25fa597eb3f4459068ae0572d7e888cfd6f/tiktoken-0.9.0.tar.gz", hash = "sha256:d02a5ca6a938e0490e1ff957bc48c8b078c88cb83977be1625b1fd8aac792c5d", size = 35991, upload-time = "2025-02-14T06:03:01.003Z" } +sdist = { url = "https://files.pythonhosted.org/packages/a7/86/ad0155a37c4f310935d5ac0b1ccf9bdb635dcb906e0a9a26b616dd55825a/tiktoken-0.11.0.tar.gz", hash = "sha256:3c518641aee1c52247c2b97e74d8d07d780092af79d5911a6ab5e79359d9b06a", size = 37648, upload-time = "2025-08-08T23:58:08.495Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/cf/e5/21ff33ecfa2101c1bb0f9b6df750553bd873b7fb532ce2cb276ff40b197f/tiktoken-0.9.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:e88f121c1c22b726649ce67c089b90ddda8b9662545a8aeb03cfef15967ddd03", size = 1065073, upload-time = "2025-02-14T06:02:24.768Z" }, - { url = "https://files.pythonhosted.org/packages/8e/03/a95e7b4863ee9ceec1c55983e4cc9558bcfd8f4f80e19c4f8a99642f697d/tiktoken-0.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a6600660f2f72369acb13a57fb3e212434ed38b045fd8cc6cdd74947b4b5d210", size = 1008075, upload-time = "2025-02-14T06:02:26.92Z" }, - { url = "https://files.pythonhosted.org/packages/40/10/1305bb02a561595088235a513ec73e50b32e74364fef4de519da69bc8010/tiktoken-0.9.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95e811743b5dfa74f4b227927ed86cbc57cad4df859cb3b643be797914e41794", size = 1140754, upload-time = "2025-02-14T06:02:28.124Z" }, - { url = "https://files.pythonhosted.org/packages/1b/40/da42522018ca496432ffd02793c3a72a739ac04c3794a4914570c9bb2925/tiktoken-0.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99376e1370d59bcf6935c933cb9ba64adc29033b7e73f5f7569f3aad86552b22", size = 1196678, upload-time = "2025-02-14T06:02:29.845Z" }, - { url = "https://files.pythonhosted.org/packages/5c/41/1e59dddaae270ba20187ceb8aa52c75b24ffc09f547233991d5fd822838b/tiktoken-0.9.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:badb947c32739fb6ddde173e14885fb3de4d32ab9d8c591cbd013c22b4c31dd2", size = 1259283, upload-time = "2025-02-14T06:02:33.838Z" }, - { url = "https://files.pythonhosted.org/packages/5b/64/b16003419a1d7728d0d8c0d56a4c24325e7b10a21a9dd1fc0f7115c02f0a/tiktoken-0.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:5a62d7a25225bafed786a524c1b9f0910a1128f4232615bf3f8257a73aaa3b16", size = 894897, upload-time = "2025-02-14T06:02:36.265Z" }, - { url = "https://files.pythonhosted.org/packages/7a/11/09d936d37f49f4f494ffe660af44acd2d99eb2429d60a57c71318af214e0/tiktoken-0.9.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2b0e8e05a26eda1249e824156d537015480af7ae222ccb798e5234ae0285dbdb", size = 1064919, upload-time = "2025-02-14T06:02:37.494Z" }, - { url = "https://files.pythonhosted.org/packages/80/0e/f38ba35713edb8d4197ae602e80837d574244ced7fb1b6070b31c29816e0/tiktoken-0.9.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:27d457f096f87685195eea0165a1807fae87b97b2161fe8c9b1df5bd74ca6f63", size = 1007877, upload-time = "2025-02-14T06:02:39.516Z" }, - { url = "https://files.pythonhosted.org/packages/fe/82/9197f77421e2a01373e27a79dd36efdd99e6b4115746ecc553318ecafbf0/tiktoken-0.9.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cf8ded49cddf825390e36dd1ad35cd49589e8161fdcb52aa25f0583e90a3e01", size = 1140095, upload-time = "2025-02-14T06:02:41.791Z" }, - { url = "https://files.pythonhosted.org/packages/f2/bb/4513da71cac187383541facd0291c4572b03ec23c561de5811781bbd988f/tiktoken-0.9.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc156cb314119a8bb9748257a2eaebd5cc0753b6cb491d26694ed42fc7cb3139", size = 1195649, upload-time = "2025-02-14T06:02:43Z" }, - { url = "https://files.pythonhosted.org/packages/fa/5c/74e4c137530dd8504e97e3a41729b1103a4ac29036cbfd3250b11fd29451/tiktoken-0.9.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:cd69372e8c9dd761f0ab873112aba55a0e3e506332dd9f7522ca466e817b1b7a", size = 1258465, upload-time = "2025-02-14T06:02:45.046Z" }, - { url = "https://files.pythonhosted.org/packages/de/a8/8f499c179ec900783ffe133e9aab10044481679bb9aad78436d239eee716/tiktoken-0.9.0-cp313-cp313-win_amd64.whl", hash = "sha256:5ea0edb6f83dc56d794723286215918c1cde03712cbbafa0348b33448faf5b95", size = 894669, upload-time = "2025-02-14T06:02:47.341Z" }, + { url = "https://files.pythonhosted.org/packages/e7/9e/eceddeffc169fc75fe0fd4f38471309f11cb1906f9b8aa39be4f5817df65/tiktoken-0.11.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fd9e6b23e860973cf9526544e220b223c60badf5b62e80a33509d6d40e6c8f5d", size = 1055199, upload-time = "2025-08-08T23:57:45.076Z" }, + { url = "https://files.pythonhosted.org/packages/4f/cf/5f02bfefffdc6b54e5094d2897bc80efd43050e5b09b576fd85936ee54bf/tiktoken-0.11.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6a76d53cee2da71ee2731c9caa747398762bda19d7f92665e882fef229cb0b5b", size = 996655, upload-time = "2025-08-08T23:57:46.304Z" }, + { url = "https://files.pythonhosted.org/packages/65/8e/c769b45ef379bc360c9978c4f6914c79fd432400a6733a8afc7ed7b0726a/tiktoken-0.11.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ef72aab3ea240646e642413cb363b73869fed4e604dcfd69eec63dc54d603e8", size = 1128867, upload-time = "2025-08-08T23:57:47.438Z" }, + { url = "https://files.pythonhosted.org/packages/d5/2d/4d77f6feb9292bfdd23d5813e442b3bba883f42d0ac78ef5fdc56873f756/tiktoken-0.11.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f929255c705efec7a28bf515e29dc74220b2f07544a8c81b8d69e8efc4578bd", size = 1183308, upload-time = "2025-08-08T23:57:48.566Z" }, + { url = "https://files.pythonhosted.org/packages/7a/65/7ff0a65d3bb0fc5a1fb6cc71b03e0f6e71a68c5eea230d1ff1ba3fd6df49/tiktoken-0.11.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:61f1d15822e4404953d499fd1dcc62817a12ae9fb1e4898033ec8fe3915fdf8e", size = 1244301, upload-time = "2025-08-08T23:57:49.642Z" }, + { url = "https://files.pythonhosted.org/packages/f5/6e/5b71578799b72e5bdcef206a214c3ce860d999d579a3b56e74a6c8989ee2/tiktoken-0.11.0-cp312-cp312-win_amd64.whl", hash = "sha256:45927a71ab6643dfd3ef57d515a5db3d199137adf551f66453be098502838b0f", size = 884282, upload-time = "2025-08-08T23:57:50.759Z" }, + { url = "https://files.pythonhosted.org/packages/cc/cd/a9034bcee638716d9310443818d73c6387a6a96db93cbcb0819b77f5b206/tiktoken-0.11.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a5f3f25ffb152ee7fec78e90a5e5ea5b03b4ea240beed03305615847f7a6ace2", size = 1055339, upload-time = "2025-08-08T23:57:51.802Z" }, + { url = "https://files.pythonhosted.org/packages/f1/91/9922b345f611b4e92581f234e64e9661e1c524875c8eadd513c4b2088472/tiktoken-0.11.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7dc6e9ad16a2a75b4c4be7208055a1f707c9510541d94d9cc31f7fbdc8db41d8", size = 997080, upload-time = "2025-08-08T23:57:53.442Z" }, + { url = "https://files.pythonhosted.org/packages/d0/9d/49cd047c71336bc4b4af460ac213ec1c457da67712bde59b892e84f1859f/tiktoken-0.11.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a0517634d67a8a48fd4a4ad73930c3022629a85a217d256a6e9b8b47439d1e4", size = 1128501, upload-time = "2025-08-08T23:57:54.808Z" }, + { url = "https://files.pythonhosted.org/packages/52/d5/a0dcdb40dd2ea357e83cb36258967f0ae96f5dd40c722d6e382ceee6bba9/tiktoken-0.11.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7fb4effe60574675118b73c6fbfd3b5868e5d7a1f570d6cc0d18724b09ecf318", size = 1182743, upload-time = "2025-08-08T23:57:56.307Z" }, + { url = "https://files.pythonhosted.org/packages/3b/17/a0fc51aefb66b7b5261ca1314afa83df0106b033f783f9a7bcbe8e741494/tiktoken-0.11.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:94f984c9831fd32688aef4348803b0905d4ae9c432303087bae370dc1381a2b8", size = 1244057, upload-time = "2025-08-08T23:57:57.628Z" }, + { url = "https://files.pythonhosted.org/packages/50/79/bcf350609f3a10f09fe4fc207f132085e497fdd3612f3925ab24d86a0ca0/tiktoken-0.11.0-cp313-cp313-win_amd64.whl", hash = "sha256:2177ffda31dec4023356a441793fed82f7af5291120751dee4d696414f54db0c", size = 883901, upload-time = "2025-08-08T23:57:59.359Z" }, +] + +[[package]] +name = "timm" +version = "1.0.16" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "huggingface-hub" }, + { name = "pyyaml" }, + { name = "safetensors" }, + { name = "torch" }, + { name = "torchvision", version = "0.22.1", source = { registry = "https://download.pytorch.org/whl/cu128" }, marker = "platform_machine == 'aarch64' and sys_platform == 'linux'" }, + { name = "torchvision", version = "0.22.1+cu128", source = { registry = "https://download.pytorch.org/whl/cu128" }, marker = "platform_machine != 'aarch64' or sys_platform != 'linux'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/94/f6/4d7a8c261341fa6ad281920618739f2a650f41043afcedb570f24e99a776/timm-1.0.16.tar.gz", hash = "sha256:a3b8130dd2cb8dc3b9f5e3d09ab6d677a6315a8695fd5264eb6d52a4a46c1044", size = 2339999, upload-time = "2025-06-26T17:09:44.208Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3b/14/10d0ea58a7580b8bd7c8d69420b3dc3a1deb890d4ff297deca9717689598/timm-1.0.16-py3-none-any.whl", hash = "sha256:a640e58f4ae41e0445517d1133b34be75bb2bd49cdb830d739925ce1fb7d2526", size = 2485733, upload-time = "2025-06-26T17:09:42.652Z" }, ] [[package]] name = "tokenizers" -version = "0.21.2" +version = "0.21.4" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "huggingface-hub" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ab/2d/b0fce2b8201635f60e8c95990080f58461cc9ca3d5026de2e900f38a7f21/tokenizers-0.21.2.tar.gz", hash = "sha256:fdc7cffde3e2113ba0e6cc7318c40e3438a4d74bbc62bf04bcc63bdfb082ac77", size = 351545, upload-time = "2025-06-24T10:24:52.449Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c2/2f/402986d0823f8d7ca139d969af2917fefaa9b947d1fb32f6168c509f2492/tokenizers-0.21.4.tar.gz", hash = "sha256:fa23f85fbc9a02ec5c6978da172cdcbac23498c3ca9f3645c5c68740ac007880", size = 351253, upload-time = "2025-07-28T15:48:54.325Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/1d/cc/2936e2d45ceb130a21d929743f1e9897514691bec123203e10837972296f/tokenizers-0.21.2-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:342b5dfb75009f2255ab8dec0041287260fed5ce00c323eb6bab639066fef8ec", size = 2875206, upload-time = "2025-06-24T10:24:42.755Z" }, - { url = "https://files.pythonhosted.org/packages/6c/e6/33f41f2cc7861faeba8988e7a77601407bf1d9d28fc79c5903f8f77df587/tokenizers-0.21.2-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:126df3205d6f3a93fea80c7a8a266a78c1bd8dd2fe043386bafdd7736a23e45f", size = 2732655, upload-time = "2025-06-24T10:24:41.56Z" }, - { url = "https://files.pythonhosted.org/packages/33/2b/1791eb329c07122a75b01035b1a3aa22ad139f3ce0ece1b059b506d9d9de/tokenizers-0.21.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a32cd81be21168bd0d6a0f0962d60177c447a1aa1b1e48fa6ec9fc728ee0b12", size = 3019202, upload-time = "2025-06-24T10:24:31.791Z" }, - { url = "https://files.pythonhosted.org/packages/05/15/fd2d8104faa9f86ac68748e6f7ece0b5eb7983c7efc3a2c197cb98c99030/tokenizers-0.21.2-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8bd8999538c405133c2ab999b83b17c08b7fc1b48c1ada2469964605a709ef91", size = 2934539, upload-time = "2025-06-24T10:24:34.567Z" }, - { url = "https://files.pythonhosted.org/packages/a5/2e/53e8fd053e1f3ffbe579ca5f9546f35ac67cf0039ed357ad7ec57f5f5af0/tokenizers-0.21.2-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5e9944e61239b083a41cf8fc42802f855e1dca0f499196df37a8ce219abac6eb", size = 3248665, upload-time = "2025-06-24T10:24:39.024Z" }, - { url = "https://files.pythonhosted.org/packages/00/15/79713359f4037aa8f4d1f06ffca35312ac83629da062670e8830917e2153/tokenizers-0.21.2-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:514cd43045c5d546f01142ff9c79a96ea69e4b5cda09e3027708cb2e6d5762ab", size = 3451305, upload-time = "2025-06-24T10:24:36.133Z" }, - { url = "https://files.pythonhosted.org/packages/38/5f/959f3a8756fc9396aeb704292777b84f02a5c6f25c3fc3ba7530db5feb2c/tokenizers-0.21.2-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b1b9405822527ec1e0f7d8d2fdb287a5730c3a6518189c968254a8441b21faae", size = 3214757, upload-time = "2025-06-24T10:24:37.784Z" }, - { url = "https://files.pythonhosted.org/packages/c5/74/f41a432a0733f61f3d21b288de6dfa78f7acff309c6f0f323b2833e9189f/tokenizers-0.21.2-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fed9a4d51c395103ad24f8e7eb976811c57fbec2af9f133df471afcd922e5020", size = 3121887, upload-time = "2025-06-24T10:24:40.293Z" }, - { url = "https://files.pythonhosted.org/packages/3c/6a/bc220a11a17e5d07b0dfb3b5c628621d4dcc084bccd27cfaead659963016/tokenizers-0.21.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:2c41862df3d873665ec78b6be36fcc30a26e3d4902e9dd8608ed61d49a48bc19", size = 9091965, upload-time = "2025-06-24T10:24:44.431Z" }, - { url = "https://files.pythonhosted.org/packages/6c/bd/ac386d79c4ef20dc6f39c4706640c24823dca7ebb6f703bfe6b5f0292d88/tokenizers-0.21.2-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:ed21dc7e624e4220e21758b2e62893be7101453525e3d23264081c9ef9a6d00d", size = 9053372, upload-time = "2025-06-24T10:24:46.455Z" }, - { url = "https://files.pythonhosted.org/packages/63/7b/5440bf203b2a5358f074408f7f9c42884849cd9972879e10ee6b7a8c3b3d/tokenizers-0.21.2-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:0e73770507e65a0e0e2a1affd6b03c36e3bc4377bd10c9ccf51a82c77c0fe365", size = 9298632, upload-time = "2025-06-24T10:24:48.446Z" }, - { url = "https://files.pythonhosted.org/packages/a4/d2/faa1acac3f96a7427866e94ed4289949b2524f0c1878512516567d80563c/tokenizers-0.21.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:106746e8aa9014a12109e58d540ad5465b4c183768ea96c03cbc24c44d329958", size = 9470074, upload-time = "2025-06-24T10:24:50.378Z" }, - { url = "https://files.pythonhosted.org/packages/d8/a5/896e1ef0707212745ae9f37e84c7d50269411aef2e9ccd0de63623feecdf/tokenizers-0.21.2-cp39-abi3-win32.whl", hash = "sha256:cabda5a6d15d620b6dfe711e1af52205266d05b379ea85a8a301b3593c60e962", size = 2330115, upload-time = "2025-06-24T10:24:55.069Z" }, - { url = "https://files.pythonhosted.org/packages/13/c3/cc2755ee10be859c4338c962a35b9a663788c0c0b50c0bdd8078fb6870cf/tokenizers-0.21.2-cp39-abi3-win_amd64.whl", hash = "sha256:58747bb898acdb1007f37a7bbe614346e98dc28708ffb66a3fd50ce169ac6c98", size = 2509918, upload-time = "2025-06-24T10:24:53.71Z" }, + { url = "https://files.pythonhosted.org/packages/98/c6/fdb6f72bf6454f52eb4a2510be7fb0f614e541a2554d6210e370d85efff4/tokenizers-0.21.4-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:2ccc10a7c3bcefe0f242867dc914fc1226ee44321eb618cfe3019b5df3400133", size = 2863987, upload-time = "2025-07-28T15:48:44.877Z" }, + { url = "https://files.pythonhosted.org/packages/8d/a6/28975479e35ddc751dc1ddc97b9b69bf7fcf074db31548aab37f8116674c/tokenizers-0.21.4-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:5e2f601a8e0cd5be5cc7506b20a79112370b9b3e9cb5f13f68ab11acd6ca7d60", size = 2732457, upload-time = "2025-07-28T15:48:43.265Z" }, + { url = "https://files.pythonhosted.org/packages/aa/8f/24f39d7b5c726b7b0be95dca04f344df278a3fe3a4deb15a975d194cbb32/tokenizers-0.21.4-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39b376f5a1aee67b4d29032ee85511bbd1b99007ec735f7f35c8a2eb104eade5", size = 3012624, upload-time = "2025-07-28T13:22:43.895Z" }, + { url = "https://files.pythonhosted.org/packages/58/47/26358925717687a58cb74d7a508de96649544fad5778f0cd9827398dc499/tokenizers-0.21.4-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2107ad649e2cda4488d41dfd031469e9da3fcbfd6183e74e4958fa729ffbf9c6", size = 2939681, upload-time = "2025-07-28T13:22:47.499Z" }, + { url = "https://files.pythonhosted.org/packages/99/6f/cc300fea5db2ab5ddc2c8aea5757a27b89c84469899710c3aeddc1d39801/tokenizers-0.21.4-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c73012da95afafdf235ba80047699df4384fdc481527448a078ffd00e45a7d9", size = 3247445, upload-time = "2025-07-28T15:48:39.711Z" }, + { url = "https://files.pythonhosted.org/packages/be/bf/98cb4b9c3c4afd8be89cfa6423704337dc20b73eb4180397a6e0d456c334/tokenizers-0.21.4-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f23186c40395fc390d27f519679a58023f368a0aad234af145e0f39ad1212732", size = 3428014, upload-time = "2025-07-28T13:22:49.569Z" }, + { url = "https://files.pythonhosted.org/packages/75/c7/96c1cc780e6ca7f01a57c13235dd05b7bc1c0f3588512ebe9d1331b5f5ae/tokenizers-0.21.4-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cc88bb34e23a54cc42713d6d98af5f1bf79c07653d24fe984d2d695ba2c922a2", size = 3193197, upload-time = "2025-07-28T13:22:51.471Z" }, + { url = "https://files.pythonhosted.org/packages/f2/90/273b6c7ec78af547694eddeea9e05de771278bd20476525ab930cecaf7d8/tokenizers-0.21.4-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51b7eabb104f46c1c50b486520555715457ae833d5aee9ff6ae853d1130506ff", size = 3115426, upload-time = "2025-07-28T15:48:41.439Z" }, + { url = "https://files.pythonhosted.org/packages/91/43/c640d5a07e95f1cf9d2c92501f20a25f179ac53a4f71e1489a3dcfcc67ee/tokenizers-0.21.4-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:714b05b2e1af1288bd1bc56ce496c4cebb64a20d158ee802887757791191e6e2", size = 9089127, upload-time = "2025-07-28T15:48:46.472Z" }, + { url = "https://files.pythonhosted.org/packages/44/a1/dd23edd6271d4dca788e5200a807b49ec3e6987815cd9d0a07ad9c96c7c2/tokenizers-0.21.4-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:1340ff877ceedfa937544b7d79f5b7becf33a4cfb58f89b3b49927004ef66f78", size = 9055243, upload-time = "2025-07-28T15:48:48.539Z" }, + { url = "https://files.pythonhosted.org/packages/21/2b/b410d6e9021c4b7ddb57248304dc817c4d4970b73b6ee343674914701197/tokenizers-0.21.4-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:3c1f4317576e465ac9ef0d165b247825a2a4078bcd01cba6b54b867bdf9fdd8b", size = 9298237, upload-time = "2025-07-28T15:48:50.443Z" }, + { url = "https://files.pythonhosted.org/packages/b7/0a/42348c995c67e2e6e5c89ffb9cfd68507cbaeb84ff39c49ee6e0a6dd0fd2/tokenizers-0.21.4-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:c212aa4e45ec0bb5274b16b6f31dd3f1c41944025c2358faaa5782c754e84c24", size = 9461980, upload-time = "2025-07-28T15:48:52.325Z" }, + { url = "https://files.pythonhosted.org/packages/3d/d3/dacccd834404cd71b5c334882f3ba40331ad2120e69ded32cf5fda9a7436/tokenizers-0.21.4-cp39-abi3-win32.whl", hash = "sha256:6c42a930bc5f4c47f4ea775c91de47d27910881902b0f20e4990ebe045a415d0", size = 2329871, upload-time = "2025-07-28T15:48:56.841Z" }, + { url = "https://files.pythonhosted.org/packages/41/f2/fd673d979185f5dcbac4be7d09461cbb99751554ffb6718d0013af8604cb/tokenizers-0.21.4-cp39-abi3-win_amd64.whl", hash = "sha256:475d807a5c3eb72c59ad9b5fcdb254f6e17f53dfcbb9903233b0dfa9c943b597", size = 2507568, upload-time = "2025-07-28T15:48:55.456Z" }, ] [[package]] @@ -5021,7 +5752,7 @@ dependencies = [ { name = "nvidia-nvtx-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, { name = "setuptools" }, { name = "sympy" }, - { name = "triton", version = "3.3.1", source = { registry = "https://download.pytorch.org/whl/cu128" }, marker = "sys_platform == 'linux'" }, + { name = "triton", marker = "sys_platform == 'linux'" }, { name = "typing-extensions" }, ] wheels = [ @@ -5036,6 +5767,15 @@ wheels = [ { url = "https://download.pytorch.org/whl/cu128/torch-2.7.1%2Bcu128-cp313-cp313t-win_amd64.whl", hash = "sha256:e27e5f7e74179fb5d814a0412e5026e4b50c9e0081e9050bc4c28c992a276eb1" }, ] +[[package]] +name = "torchao" +version = "0.12.0" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/72/5e/f5df5e0bbc7d84e5da3b4599b5bad58f4a6657b22bcae64dd741faee80ab/torchao-0.12.0-cp39-abi3-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:326ea2800cc7d9d50f0d17742ad923e5c6d4c4dd5942558f4ed13db00bdebc7c", size = 6777967, upload-time = "2025-07-17T17:50:13.567Z" }, + { url = "https://files.pythonhosted.org/packages/6c/5f/6bf9b5bed6d31e286516d23e1db7320d2ccfbf1b2234749833ad1e3d25a5/torchao-0.12.0-py3-none-any.whl", hash = "sha256:103f2a9164d2e4f705332af1aafbb8473eadd14d9164e45857ca187cde1f13d2", size = 962232, upload-time = "2025-07-17T17:50:15.119Z" }, +] + [[package]] name = "torchaudio" version = "2.7.1" @@ -5058,6 +5798,17 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/91/5e/9262a7e41e47bc87eb245c4fc485eb26ff41a05886b241c003440c9e0107/torchaudio-2.7.1-cp313-cp313t-win_amd64.whl", hash = "sha256:c802e0dcbf38669007327bb52f065573cc5cac106eaca987f6e1a32e6282263a", size = 2534956, upload-time = "2025-06-04T17:43:42.324Z" }, ] +[[package]] +name = "torchcodec" +version = "0.6.0" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d9/b3/11326a0e7a3c803a95975cfce4ac88fa4ea1a0d432bb876081046c5a5554/torchcodec-0.6.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fba260145a239b5afe13336e3a5bc1b089c9c31a073e9a7c2026d4cbd853fdd9", size = 3482584, upload-time = "2025-08-07T08:51:32.535Z" }, + { url = "https://files.pythonhosted.org/packages/a7/d1/3f90561df013f6a015ef19de22726b64073fee405f53d3c4b8255ab05a67/torchcodec-0.6.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:fdef91a17fb1f1a159ce23710324a9a4e6d6a885275de73700f94a9ad562c6b2", size = 1370954, upload-time = "2025-08-07T08:51:15.021Z" }, + { url = "https://files.pythonhosted.org/packages/87/d0/0b5dd42652e4527d578e1d6239dbb907bf83e502115e517b83a55d8b7f8b/torchcodec-0.6.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:de20cab5df7fa7cdd74ec1dc0d508324685573f86de6789f0ebb860b7ea20b33", size = 3446017, upload-time = "2025-08-07T08:51:34.484Z" }, + { url = "https://files.pythonhosted.org/packages/97/62/a938334e39101d4304619b90847d8aef7d1c607c6bcf33638f72931ae990/torchcodec-0.6.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:46dab701a2d809e975a8b07d7ee47ed34f1d903511e374c74cfc1de6a5ab0e3f", size = 1374794, upload-time = "2025-08-07T08:51:17.355Z" }, +] + [[package]] name = "torchdata" version = "0.11.0" @@ -5073,7 +5824,7 @@ wheels = [ [[package]] name = "torchmetrics" -version = "1.7.3" +version = "1.8.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "lightning-utilities" }, @@ -5081,9 +5832,9 @@ dependencies = [ { name = "packaging" }, { name = "torch" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/48/22/8b16c4ec34d93ee15024924cbbe84fbd235bb3e1df2cc8f48c865c1528e7/torchmetrics-1.7.3.tar.gz", hash = "sha256:08450a19cdb67ba1608aac0b213e5dc73033e11b60ad4719696ebcede591621e", size = 566545, upload-time = "2025-06-13T15:39:37.498Z" } +sdist = { url = "https://files.pythonhosted.org/packages/78/1f/2cd9eb8f3390c3ec4693ac0871913d4b468964b3833638e4091a70817e0a/torchmetrics-1.8.1.tar.gz", hash = "sha256:04ca021105871637c5d34d0a286b3ab665a1e3d2b395e561f14188a96e862fdb", size = 580373, upload-time = "2025-08-07T20:44:44.631Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/6f/f2/bed7da46003c26ed44fc7fa3ecc98a84216f0d4758e5e6a3693754d490d9/torchmetrics-1.7.3-py3-none-any.whl", hash = "sha256:7b6fd43e92f0a1071c8bcb029637f252b0630699140a93ed8817ce7afe9db34e", size = 962639, upload-time = "2025-06-13T15:39:35.69Z" }, + { url = "https://files.pythonhosted.org/packages/8f/59/5c1c1cb08c494621901cf549a543f87143019fac1e6dd191eb4630bbc8fb/torchmetrics-1.8.1-py3-none-any.whl", hash = "sha256:2437501351e0da3d294c71210ce8139b9c762b5e20604f7a051a725443db8f4b", size = 982961, upload-time = "2025-08-07T20:44:42.608Z" }, ] [[package]] @@ -5091,9 +5842,10 @@ name = "torchprofile" version = "0.0.4" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "numpy", marker = "sys_platform != 'darwin'" }, - { name = "torch", marker = "sys_platform != 'darwin'" }, - { name = "torchvision", marker = "sys_platform != 'darwin'" }, + { name = "numpy" }, + { name = "torch" }, + { name = "torchvision", version = "0.22.1", source = { registry = "https://download.pytorch.org/whl/cu128" }, marker = "platform_machine == 'aarch64' and sys_platform == 'linux'" }, + { name = "torchvision", version = "0.22.1+cu128", source = { registry = "https://download.pytorch.org/whl/cu128" }, marker = "platform_machine != 'aarch64' or sys_platform != 'linux'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/6f/36/574c0c46e818533b78b3c09505211162918188325ab4165ef11a3f295755/torchprofile-0.0.4.tar.gz", hash = "sha256:96b6da17d752a06b02977e078aea95614893b31d4117dd5dcd081f30ce65611b", size = 4557, upload-time = "2021-06-22T04:58:03.592Z" } wheels = [ @@ -5103,25 +5855,44 @@ wheels = [ [[package]] name = "torchvision" version = "0.22.1" -source = { registry = "https://pypi.org/simple" } +source = { registry = "https://download.pytorch.org/whl/cu128" } +resolution-markers = [ + "python_full_version >= '3.13' and platform_machine == 'aarch64' and sys_platform == 'linux'", + "python_full_version < '3.13' and platform_machine == 'aarch64' and sys_platform == 'linux'", +] dependencies = [ - { name = "numpy" }, - { name = "pillow" }, - { name = "torch" }, + { name = "numpy", marker = "platform_machine == 'aarch64' and sys_platform == 'linux'" }, + { name = "pillow", marker = "platform_machine == 'aarch64' and sys_platform == 'linux'" }, + { name = "torch", marker = "platform_machine == 'aarch64' and sys_platform == 'linux'" }, +] +wheels = [ + { url = "https://download.pytorch.org/whl/cu128/torchvision-0.22.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:2ad7fe412b821333fc05b4046263d77c14ba86f3965366adbada8dc397ea45b4" }, + { url = "https://download.pytorch.org/whl/cu128/torchvision-0.22.1-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:75f519ebe412ced95d727c71c30c68084cc6fd36347b88f338e88ff9d07a3ac8" }, + { url = "https://download.pytorch.org/whl/cu128/torchvision-0.22.1-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:f6565fd22e04e51f9600f34a3a20b120ee9f5a73161bfcb79c826225054aa44e" }, +] + +[[package]] +name = "torchvision" +version = "0.22.1+cu128" +source = { registry = "https://download.pytorch.org/whl/cu128" } +resolution-markers = [ + "python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version < '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux'", + "python_full_version >= '3.13' and sys_platform != 'linux'", + "python_full_version < '3.13' and sys_platform != 'linux'", +] +dependencies = [ + { name = "numpy", marker = "platform_machine != 'aarch64' or sys_platform != 'linux'" }, + { name = "pillow", marker = "platform_machine != 'aarch64' or sys_platform != 'linux'" }, + { name = "torch", marker = "platform_machine != 'aarch64' or sys_platform != 'linux'" }, ] wheels = [ - { url = "https://files.pythonhosted.org/packages/02/90/f4e99a5112dc221cf68a485e853cc3d9f3f1787cb950b895f3ea26d1ea98/torchvision-0.22.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:153f1790e505bd6da123e21eee6e83e2e155df05c0fe7d56347303067d8543c5", size = 1947827, upload-time = "2025-06-04T17:43:11.945Z" }, - { url = "https://files.pythonhosted.org/packages/25/f6/53e65384cdbbe732cc2106bb04f7fb908487e4fb02ae4a1613ce6904a122/torchvision-0.22.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:964414eef19459d55a10e886e2fca50677550e243586d1678f65e3f6f6bac47a", size = 2514576, upload-time = "2025-06-04T17:43:02.707Z" }, - { url = "https://files.pythonhosted.org/packages/17/8b/155f99042f9319bd7759536779b2a5b67cbd4f89c380854670850f89a2f4/torchvision-0.22.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:699c2d70d33951187f6ed910ea05720b9b4aaac1dcc1135f53162ce7d42481d3", size = 7485962, upload-time = "2025-06-04T17:42:43.606Z" }, - { url = "https://files.pythonhosted.org/packages/05/17/e45d5cd3627efdb47587a0634179a3533593436219de3f20c743672d2a79/torchvision-0.22.1-cp312-cp312-win_amd64.whl", hash = "sha256:75e0897da7a8e43d78632f66f2bdc4f6e26da8d3f021a7c0fa83746073c2597b", size = 1707992, upload-time = "2025-06-04T17:42:53.207Z" }, - { url = "https://files.pythonhosted.org/packages/7a/30/fecdd09fb973e963da68207fe9f3d03ec6f39a935516dc2a98397bf495c6/torchvision-0.22.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9c3ae3319624c43cc8127020f46c14aa878406781f0899bb6283ae474afeafbf", size = 1947818, upload-time = "2025-06-04T17:42:51.954Z" }, - { url = "https://files.pythonhosted.org/packages/55/f4/b45f6cd92fa0acfac5e31b8e9258232f25bcdb0709a604e8b8a39d76e411/torchvision-0.22.1-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:4a614a6a408d2ed74208d0ea6c28a2fbb68290e9a7df206c5fef3f0b6865d307", size = 2471597, upload-time = "2025-06-04T17:42:48.838Z" }, - { url = "https://files.pythonhosted.org/packages/8d/b0/3cffd6a285b5ffee3fe4a31caff49e350c98c5963854474d1c4f7a51dea5/torchvision-0.22.1-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:7ee682be589bb1a002b7704f06b8ec0b89e4b9068f48e79307d2c6e937a9fdf4", size = 7485894, upload-time = "2025-06-04T17:43:01.371Z" }, - { url = "https://files.pythonhosted.org/packages/fd/1d/0ede596fedc2080d18108149921278b59f220fbb398f29619495337b0f86/torchvision-0.22.1-cp313-cp313-win_amd64.whl", hash = "sha256:2566cafcfa47ecfdbeed04bab8cef1307c8d4ef75046f7624b9e55f384880dfe", size = 1708020, upload-time = "2025-06-04T17:43:06.085Z" }, - { url = "https://files.pythonhosted.org/packages/0f/ca/e9a06bd61ee8e04fb4962a3fb524fe6ee4051662db07840b702a9f339b24/torchvision-0.22.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:043d9e35ed69c2e586aff6eb9e2887382e7863707115668ac9d140da58f42cba", size = 2137623, upload-time = "2025-06-04T17:43:05.028Z" }, - { url = "https://files.pythonhosted.org/packages/ab/c8/2ebe90f18e7ffa2120f5c3eab62aa86923185f78d2d051a455ea91461608/torchvision-0.22.1-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:27142bcc8a984227a6dcf560985e83f52b82a7d3f5fe9051af586a2ccc46ef26", size = 2476561, upload-time = "2025-06-04T17:42:59.691Z" }, - { url = "https://files.pythonhosted.org/packages/94/8b/04c6b15f8c29b39f0679589753091cec8b192ab296d4fdaf9055544c4ec9/torchvision-0.22.1-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:ef46e065502f7300ad6abc98554131c35dc4c837b978d91306658f1a65c00baa", size = 7658543, upload-time = "2025-06-04T17:42:46.064Z" }, - { url = "https://files.pythonhosted.org/packages/ab/c0/131628e6d42682b0502c63fd7f647b8b5ca4bd94088f6c85ca7225db8ac4/torchvision-0.22.1-cp313-cp313t-win_amd64.whl", hash = "sha256:7414eeacfb941fa21acddcd725f1617da5630ec822e498660a4b864d7d998075", size = 1629892, upload-time = "2025-06-04T17:42:57.156Z" }, + { url = "https://download.pytorch.org/whl/cu128/torchvision-0.22.1%2Bcu128-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:f64ef9bb91d71ab35d8384912a19f7419e35928685bc67544d58f45148334373" }, + { url = "https://download.pytorch.org/whl/cu128/torchvision-0.22.1%2Bcu128-cp312-cp312-win_amd64.whl", hash = "sha256:650561ba326d21021243f5e064133dc62dc64d52f79623db5cd76637a9665f96" }, + { url = "https://download.pytorch.org/whl/cu128/torchvision-0.22.1%2Bcu128-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:bc4fef193917b51db6b409acd3ffdec9286d877baac0aee5dcfbb72592d00bfc" }, + { url = "https://download.pytorch.org/whl/cu128/torchvision-0.22.1%2Bcu128-cp313-cp313-win_amd64.whl", hash = "sha256:eb784cc75a66f3336a04ff3a992bf74160842132db69e8bdbb58b5ab9422c345" }, + { url = "https://download.pytorch.org/whl/cu128/torchvision-0.22.1%2Bcu128-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:02faf51fbf5070592768fa935327d13a484b745faef38b0fee01d85cfb35f5bc" }, + { url = "https://download.pytorch.org/whl/cu128/torchvision-0.22.1%2Bcu128-cp313-cp313t-win_amd64.whl", hash = "sha256:e5320bb2c9f69636f3dc18abc3291fe8c8e448cb9ef0112510a5413a5af3f8f2" }, ] [[package]] @@ -5216,33 +5987,12 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/41/b1/d7520cc5cb69c825599042eb3a7c986fa9baa8a8d2dea9acd78e152c81e2/transformers-4.53.3-py3-none-any.whl", hash = "sha256:5aba81c92095806b6baf12df35d756cf23b66c356975fb2a7fa9e536138d7c75", size = 10826382, upload-time = "2025-07-22T07:30:48.458Z" }, ] -[[package]] -name = "triton" -version = "3.3.0" -source = { registry = "https://download.pytorch.org/whl/cu128" } -resolution-markers = [ - "python_full_version >= '3.13' and sys_platform == 'darwin'", - "python_full_version < '3.13' and sys_platform == 'darwin'", - "python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'linux'", - "python_full_version < '3.13' and sys_platform != 'darwin' and sys_platform != 'linux'", -] -dependencies = [ - { name = "setuptools", marker = "sys_platform != 'linux'" }, -] - [[package]] name = "triton" version = "3.3.1" source = { registry = "https://download.pytorch.org/whl/cu128" } -resolution-markers = [ - "python_full_version < '3.13' and platform_machine == 'aarch64' and sys_platform == 'linux'", - "python_full_version < '3.13' and platform_machine != 'aarch64' and platform_machine != 'arm64' and sys_platform == 'linux'", - "python_full_version < '3.13' and platform_machine == 'arm64' and sys_platform == 'linux'", - "python_full_version >= '3.13' and platform_machine == 'aarch64' and sys_platform == 'linux'", - "python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux'", -] dependencies = [ - { name = "setuptools", marker = "sys_platform == 'linux'" }, + { name = "setuptools" }, ] wheels = [ { url = "https://download.pytorch.org/whl/triton-3.3.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl" }, @@ -5255,16 +6005,16 @@ wheels = [ [[package]] name = "trove-classifiers" -version = "2025.5.9.12" +version = "2025.8.6.13" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/38/04/1cd43f72c241fedcf0d9a18d0783953ee301eac9e5d9db1df0f0f089d9af/trove_classifiers-2025.5.9.12.tar.gz", hash = "sha256:7ca7c8a7a76e2cd314468c677c69d12cc2357711fcab4a60f87994c1589e5cb5", size = 16940, upload-time = "2025-05-09T12:04:48.829Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c3/21/707af14daa638b0df15b5d5700349e0abdd3e5140069f9ab6e0ccb922806/trove_classifiers-2025.8.6.13.tar.gz", hash = "sha256:5a0abad839d2ed810f213ab133d555d267124ddea29f1d8a50d6eca12a50ae6e", size = 16932, upload-time = "2025-08-06T13:26:26.479Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/92/ef/c6deb083748be3bcad6f471b6ae983950c161890bf5ae1b2af80cc56c530/trove_classifiers-2025.5.9.12-py3-none-any.whl", hash = "sha256:e381c05537adac78881c8fa345fd0e9970159f4e4a04fcc42cfd3129cca640ce", size = 14119, upload-time = "2025-05-09T12:04:46.38Z" }, + { url = "https://files.pythonhosted.org/packages/d5/44/323a87d78f04d5329092aada803af3612dd004a64b69ba8b13046601a8c9/trove_classifiers-2025.8.6.13-py3-none-any.whl", hash = "sha256:c4e7fc83012770d80b3ae95816111c32b085716374dccee0d3fbf5c235495f9f", size = 14121, upload-time = "2025-08-06T13:26:25.063Z" }, ] [[package]] name = "typer" -version = "0.16.0" +version = "0.16.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "click" }, @@ -5272,18 +6022,18 @@ dependencies = [ { name = "shellingham" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/c5/8c/7d682431efca5fd290017663ea4588bf6f2c6aad085c7f108c5dbc316e70/typer-0.16.0.tar.gz", hash = "sha256:af377ffaee1dbe37ae9440cb4e8f11686ea5ce4e9bae01b84ae7c63b87f1dd3b", size = 102625, upload-time = "2025-05-26T14:30:31.824Z" } +sdist = { url = "https://files.pythonhosted.org/packages/43/78/d90f616bf5f88f8710ad067c1f8705bf7618059836ca084e5bb2a0855d75/typer-0.16.1.tar.gz", hash = "sha256:d358c65a464a7a90f338e3bb7ff0c74ac081449e53884b12ba658cbd72990614", size = 102836, upload-time = "2025-08-18T19:18:22.898Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/76/42/3efaf858001d2c2913de7f354563e3a3a2f0decae3efe98427125a8f441e/typer-0.16.0-py3-none-any.whl", hash = "sha256:1f79bed11d4d02d4310e3c1b7ba594183bcedb0ac73b27a9e5f28f6fb5b98855", size = 46317, upload-time = "2025-05-26T14:30:30.523Z" }, + { url = "https://files.pythonhosted.org/packages/2d/76/06dbe78f39b2203d2a47d5facc5df5102d0561e2807396471b5f7c5a30a1/typer-0.16.1-py3-none-any.whl", hash = "sha256:90ee01cb02d9b8395ae21ee3368421faf21fa138cb2a541ed369c08cec5237c9", size = 46397, upload-time = "2025-08-18T19:18:21.663Z" }, ] [[package]] name = "types-pyyaml" -version = "6.0.12.20250516" +version = "6.0.12.20250809" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/4e/22/59e2aeb48ceeee1f7cd4537db9568df80d62bdb44a7f9e743502ea8aab9c/types_pyyaml-6.0.12.20250516.tar.gz", hash = "sha256:9f21a70216fc0fa1b216a8176db5f9e0af6eb35d2f2932acb87689d03a5bf6ba", size = 17378, upload-time = "2025-05-16T03:08:04.897Z" } +sdist = { url = "https://files.pythonhosted.org/packages/36/21/52ffdbddea3c826bc2758d811ccd7f766912de009c5cf096bd5ebba44680/types_pyyaml-6.0.12.20250809.tar.gz", hash = "sha256:af4a1aca028f18e75297da2ee0da465f799627370d74073e96fee876524f61b5", size = 17385, upload-time = "2025-08-09T03:14:34.867Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/99/5f/e0af6f7f6a260d9af67e1db4f54d732abad514252a7a378a6c4d17dd1036/types_pyyaml-6.0.12.20250516-py3-none-any.whl", hash = "sha256:8478208feaeb53a34cb5d970c56a7cd76b72659442e733e268a94dc72b2d0530", size = 20312, upload-time = "2025-05-16T03:08:04.019Z" }, + { url = "https://files.pythonhosted.org/packages/35/3e/0346d09d6e338401ebf406f12eaf9d0b54b315b86f1ec29e34f1a0aedae9/types_pyyaml-6.0.12.20250809-py3-none-any.whl", hash = "sha256:032b6003b798e7de1a1ddfeefee32fac6486bdfe4845e0ae0e7fb3ee4512b52f", size = 20277, upload-time = "2025-08-09T03:14:34.055Z" }, ] [[package]] @@ -5309,11 +6059,11 @@ wheels = [ [[package]] name = "typing-extensions" -version = "4.14.0" +version = "4.14.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d1/bc/51647cd02527e87d05cb083ccc402f93e441606ff1f01739a62c8ad09ba5/typing_extensions-4.14.0.tar.gz", hash = "sha256:8676b788e32f02ab42d9e7c61324048ae4c6d844a399eebace3d4979d75ceef4", size = 107423, upload-time = "2025-06-02T14:52:11.399Z" } +sdist = { url = "https://files.pythonhosted.org/packages/98/5a/da40306b885cc8c09109dc2e1abd358d5684b1425678151cdaed4731c822/typing_extensions-4.14.1.tar.gz", hash = "sha256:38b39f4aeeab64884ce9f74c94263ef78f3c22467c8724005483154c26648d36", size = 107673, upload-time = "2025-07-04T13:28:34.16Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/69/e0/552843e0d356fbb5256d21449fa957fa4eff3bbc135a74a691ee70c7c5da/typing_extensions-4.14.0-py3-none-any.whl", hash = "sha256:a1514509136dd0b477638fc68d6a91497af5076466ad0fa6c338e44e359944af", size = 43839, upload-time = "2025-06-02T14:52:10.026Z" }, + { url = "https://files.pythonhosted.org/packages/b5/00/d631e67a838026495268c2f6884f3711a15a9a2a96cd244fdaea53b823fb/typing_extensions-4.14.1-py3-none-any.whl", hash = "sha256:d1e1e3b58374dc93031d6eda2420a48ea44a36c2b4766a4fdeb3710755731d76", size = 43906, upload-time = "2025-07-04T13:28:32.743Z" }, ] [[package]] @@ -5361,15 +6111,15 @@ wheels = [ [[package]] name = "uvicorn" -version = "0.34.3" +version = "0.35.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "click" }, { name = "h11" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/de/ad/713be230bcda622eaa35c28f0d328c3675c371238470abdea52417f17a8e/uvicorn-0.34.3.tar.gz", hash = "sha256:35919a9a979d7a59334b6b10e05d77c1d0d574c50e0fc98b8b1a0f165708b55a", size = 76631, upload-time = "2025-06-01T07:48:17.531Z" } +sdist = { url = "https://files.pythonhosted.org/packages/5e/42/e0e305207bb88c6b8d3061399c6a961ffe5fbb7e2aa63c9234df7259e9cd/uvicorn-0.35.0.tar.gz", hash = "sha256:bc662f087f7cf2ce11a1d7fd70b90c9f98ef2e2831556dd078d131b96cc94a01", size = 78473, upload-time = "2025-06-28T16:15:46.058Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/6d/0d/8adfeaa62945f90d19ddc461c55f4a50c258af7662d34b6a3d5d1f8646f6/uvicorn-0.34.3-py3-none-any.whl", hash = "sha256:16246631db62bdfbf069b0645177d6e8a77ba950cfedbfd093acef9444e4d885", size = 62431, upload-time = "2025-06-01T07:48:15.664Z" }, + { url = "https://files.pythonhosted.org/packages/d2/e2/dc81b1bd1dcfe91735810265e9d26bc8ec5da45b4c0f6237e286819194c3/uvicorn-0.35.0-py3-none-any.whl", hash = "sha256:197535216b25ff9b785e29a0b79199f55222193d47f820816e7da751e9bc8d4a", size = 66406, upload-time = "2025-06-28T16:15:44.816Z" }, ] [package.optional-dependencies] @@ -5405,16 +6155,16 @@ wheels = [ [[package]] name = "virtualenv" -version = "20.31.2" +version = "20.34.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "distlib" }, { name = "filelock" }, { name = "platformdirs" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/56/2c/444f465fb2c65f40c3a104fd0c495184c4f2336d65baf398e3c75d72ea94/virtualenv-20.31.2.tar.gz", hash = "sha256:e10c0a9d02835e592521be48b332b6caee6887f332c111aa79a09b9e79efc2af", size = 6076316, upload-time = "2025-05-08T17:58:23.811Z" } +sdist = { url = "https://files.pythonhosted.org/packages/1c/14/37fcdba2808a6c615681cd216fecae00413c9dab44fb2e57805ecf3eaee3/virtualenv-20.34.0.tar.gz", hash = "sha256:44815b2c9dee7ed86e387b842a84f20b93f7f417f95886ca1996a72a4138eb1a", size = 6003808, upload-time = "2025-08-13T14:24:07.464Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f3/40/b1c265d4b2b62b58576588510fc4d1fe60a86319c8de99fd8e9fec617d2c/virtualenv-20.31.2-py3-none-any.whl", hash = "sha256:36efd0d9650ee985f0cad72065001e66d49a6f24eb44d98980f630686243cf11", size = 6057982, upload-time = "2025-05-08T17:58:21.15Z" }, + { url = "https://files.pythonhosted.org/packages/76/06/04c8e804f813cf972e3262f3f8584c232de64f0cde9f703b46cf53a45090/virtualenv-20.34.0-py3-none-any.whl", hash = "sha256:341f5afa7eee943e4984a9207c025feedd768baff6753cd660c857ceb3e36026", size = 5983279, upload-time = "2025-08-13T14:24:05.111Z" }, ] [[package]] @@ -5469,7 +6219,8 @@ dependencies = [ { name = "tokenizers" }, { name = "torch" }, { name = "torchaudio" }, - { name = "torchvision" }, + { name = "torchvision", version = "0.22.1", source = { registry = "https://download.pytorch.org/whl/cu128" }, marker = "platform_machine == 'aarch64' and sys_platform == 'linux'" }, + { name = "torchvision", version = "0.22.1+cu128", source = { registry = "https://download.pytorch.org/whl/cu128" }, marker = "platform_machine != 'aarch64' or sys_platform != 'linux'" }, { name = "tqdm" }, { name = "transformers" }, { name = "typing-extensions" }, @@ -5493,7 +6244,7 @@ wheels = [ [[package]] name = "wandb" -version = "0.20.1" +version = "0.21.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "click" }, @@ -5501,26 +6252,23 @@ dependencies = [ { name = "packaging" }, { name = "platformdirs" }, { name = "protobuf" }, - { name = "psutil" }, { name = "pydantic" }, { name = "pyyaml" }, { name = "requests" }, { name = "sentry-sdk" }, - { name = "setproctitle" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/62/1f/92be0ca87fb49eb48c16dcf0845a3579a57c4734fec2b95862cf5a0494a0/wandb-0.20.1.tar.gz", hash = "sha256:dbd3fc60dfe7bf83c4de24b206b99b44949fef323f817a783883db72fc5f3bfe", size = 40320062, upload-time = "2025-06-05T00:00:24.483Z" } +sdist = { url = "https://files.pythonhosted.org/packages/26/69/217598886af89350e36bc05c092a67c9c469cff1fd6446edd4c879027e36/wandb-0.21.1.tar.gz", hash = "sha256:753bbdaa3a7703344056e019425b39c17a3d31d8ca0c4d13c4efc046935b08b9", size = 40131395, upload-time = "2025-08-07T18:52:48.85Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c9/18/afcc37d0b93dd6f6d0f0c5683b9cfff9416ae1539931f58932a2938c0070/wandb-0.20.1-py3-none-any.whl", hash = "sha256:e6395cabf074247042be1cf0dc6ab0b06aa4c9538c2e1fdc5b507a690ce0cf17", size = 6458872, upload-time = "2025-06-04T23:59:55.441Z" }, - { url = "https://files.pythonhosted.org/packages/e6/b5/70f9e2a3d1380b729ae5853763d938edc50072df357f79bbd19b9aae8e3f/wandb-0.20.1-py3-none-macosx_10_14_x86_64.whl", hash = "sha256:2475a48c693adf677d40da9e1c8ceeaf86d745ffc3b7e3535731279d02f9e845", size = 22517483, upload-time = "2025-06-04T23:59:58.687Z" }, - { url = "https://files.pythonhosted.org/packages/cc/7e/4eb9aeb2fd974d410a8f2eb11b0219536503913a050d46a03206151705c8/wandb-0.20.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:99cce804c31ec1e0d1e691650a7d51773ed7329c41745d56384fa3655a0e9b2c", size = 22034511, upload-time = "2025-06-05T00:00:01.301Z" }, - { url = "https://files.pythonhosted.org/packages/34/38/1df22c2273e6f7ab0aae4fd032085d6d92ab112f5b261646e7dc5e675cfe/wandb-0.20.1-py3-none-macosx_11_0_x86_64.whl", hash = "sha256:ce3ee412677a1679e04b21e03a91e1e02eb90faf658d682bee86c33cf5f32e09", size = 22720771, upload-time = "2025-06-05T00:00:04.122Z" }, - { url = "https://files.pythonhosted.org/packages/38/96/78fc7a7ea7158d136c84f481423f8736c9346a2387287ec8a6d92019975c/wandb-0.20.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e58ca32c7147161158f09b0fb5f5896876f8569d0d10ae7b64d0510c868ce33", size = 21537453, upload-time = "2025-06-05T00:00:09.474Z" }, - { url = "https://files.pythonhosted.org/packages/88/c9/41b8bdb493e5eda32b502bc1cc49d539335a92cacaf0ef304d7dae0240aa/wandb-0.20.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:591506ecbdd396648cc323ba270f3ab4aed3158e1dbfa7636c09f9f7f0253e1c", size = 23161349, upload-time = "2025-06-05T00:00:11.903Z" }, - { url = "https://files.pythonhosted.org/packages/7d/f2/79e783cc50a47d373dfbda862eb5396de8139167e8c6443a16ef0166106f/wandb-0.20.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:382508532db09893f81cc926b1d333caa4c8a7db057878899fadf929bbdb3b56", size = 21550624, upload-time = "2025-06-05T00:00:14.28Z" }, - { url = "https://files.pythonhosted.org/packages/26/32/23890a726302e7be28bda9fff47ce9b491af64e339aba4d32b3b8d1a7aaf/wandb-0.20.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:29ea495e49393db860f17437fe37e48018da90436ce10949b471780f09293bd7", size = 23237996, upload-time = "2025-06-05T00:00:16.647Z" }, - { url = "https://files.pythonhosted.org/packages/af/94/296e520b086b2a4f10e99bcea3cd5856421b9c004824663501e3789a713b/wandb-0.20.1-py3-none-win32.whl", hash = "sha256:455ee0a652e59ab1e4b546fa1dc833dd3063aa7e64eb8abf95d22f0e9f08c574", size = 22518456, upload-time = "2025-06-05T00:00:19.006Z" }, - { url = "https://files.pythonhosted.org/packages/52/5f/c44ad7b2a062ca5f4da99ae475cea274c38f6ec37bdaca1b1c653ee87274/wandb-0.20.1-py3-none-win_amd64.whl", hash = "sha256:6d2431652f096b7e394c29a99135a6441c02ed3198b963f0b351a5b5e56aeca0", size = 22518459, upload-time = "2025-06-05T00:00:21.374Z" }, + { url = "https://files.pythonhosted.org/packages/65/d0/589f970741f3ead9ad28d4cbb668d1e6a39848df767f004ac9c7bed8f4b5/wandb-0.21.1-py3-none-macosx_10_14_x86_64.whl", hash = "sha256:96f9eedeae428de0d88f9751fb81f1b730ae7902f35c2f5a7a904d7733f124f3", size = 21701698, upload-time = "2025-08-07T18:52:22.399Z" }, + { url = "https://files.pythonhosted.org/packages/41/6c/a6140a0f395a99902aafdfe63088b7aff509e4f14cd7dd084d47eab36f27/wandb-0.21.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:41a1ec1b98d9d7e1bcafc483bce82e184b6cbae7531328a0fe8dd0f56d96a92e", size = 21221046, upload-time = "2025-08-07T18:52:26.134Z" }, + { url = "https://files.pythonhosted.org/packages/e9/d8/dacbb30ed35141d48a387d84f2e792d4b61b5bcdbf5ffdbd3f0b57beb346/wandb-0.21.1-py3-none-macosx_11_0_x86_64.whl", hash = "sha256:f74d4691c38318ed8611e00ca3246b4152a03ff390fdce41816bea5705452a73", size = 21885803, upload-time = "2025-08-07T18:52:28.489Z" }, + { url = "https://files.pythonhosted.org/packages/b0/48/3a7290a33b1f64e29ac8779dab4d4cdef31a9ed3c3d9ea656a4507d64332/wandb-0.21.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c8fbd60b9abf4b9bec201f311602f61394d41a3503c801750b03975a5e36d1b", size = 20825318, upload-time = "2025-08-07T18:52:31.282Z" }, + { url = "https://files.pythonhosted.org/packages/a9/54/c0a087114ff1bb6c32e64aaa58aea4342cebc0ad58b1378c0a5a831d2508/wandb-0.21.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ded9313672630c0630f5b13c598ce9aa0e932e811ebc18823fcc4d73acfb6bb", size = 22362500, upload-time = "2025-08-07T18:52:33.889Z" }, + { url = "https://files.pythonhosted.org/packages/65/68/3aae277ea9fb5d91eec066cf256755bed3a740d92b539888a7ce36cf3f6c/wandb-0.21.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:44f3194d697b409f91708c50c5f9d56e282434a0d60ac380b64f0fb6991cd630", size = 20830372, upload-time = "2025-08-07T18:52:36.76Z" }, + { url = "https://files.pythonhosted.org/packages/d2/bb/58d206e79be1f279ef06cb934ae1e208bcacd2cd73b7a7652236575010d6/wandb-0.21.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:e0b68bb6dbe94f1910c665c755f438292df40c272feb1a8b42208c1df52cce26", size = 22438521, upload-time = "2025-08-07T18:52:39.672Z" }, + { url = "https://files.pythonhosted.org/packages/e7/b8/dfe01f8e4c40d5dda820fd839c39431608a3453670f79404fa28915972d2/wandb-0.21.1-py3-none-win32.whl", hash = "sha256:98306c3fb369dfafb7194270b938b000ea2bb08dbddff10c19b5a805fd5cab80", size = 21569814, upload-time = "2025-08-07T18:52:42.58Z" }, + { url = "https://files.pythonhosted.org/packages/51/ba/81c77d5d831fcddb89661c85175fcbb91d2ffecf6b0591972829da3eb42f/wandb-0.21.1-py3-none-win_amd64.whl", hash = "sha256:8be92a7e92b5cb5ce00ec0961f9dbaad7757ffdbc5b5a8f2cc7188e23f653f0a", size = 21569817, upload-time = "2025-08-07T18:52:45.559Z" }, ] [[package]] @@ -5664,44 +6412,51 @@ sdist = { url = "https://files.pythonhosted.org/packages/47/6a/62e288da7bcda82b9 [[package]] name = "wrapt" -version = "1.17.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/c3/fc/e91cc220803d7bc4db93fb02facd8461c37364151b8494762cc88b0fbcef/wrapt-1.17.2.tar.gz", hash = "sha256:41388e9d4d1522446fe79d3213196bd9e3b301a336965b9e27ca2788ebd122f3", size = 55531, upload-time = "2025-01-14T10:35:45.465Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a1/bd/ab55f849fd1f9a58ed7ea47f5559ff09741b25f00c191231f9f059c83949/wrapt-1.17.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d5e2439eecc762cd85e7bd37161d4714aa03a33c5ba884e26c81559817ca0925", size = 53799, upload-time = "2025-01-14T10:33:57.4Z" }, - { url = "https://files.pythonhosted.org/packages/53/18/75ddc64c3f63988f5a1d7e10fb204ffe5762bc663f8023f18ecaf31a332e/wrapt-1.17.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3fc7cb4c1c744f8c05cd5f9438a3caa6ab94ce8344e952d7c45a8ed59dd88392", size = 38821, upload-time = "2025-01-14T10:33:59.334Z" }, - { url = "https://files.pythonhosted.org/packages/48/2a/97928387d6ed1c1ebbfd4efc4133a0633546bec8481a2dd5ec961313a1c7/wrapt-1.17.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8fdbdb757d5390f7c675e558fd3186d590973244fab0c5fe63d373ade3e99d40", size = 38919, upload-time = "2025-01-14T10:34:04.093Z" }, - { url = "https://files.pythonhosted.org/packages/73/54/3bfe5a1febbbccb7a2f77de47b989c0b85ed3a6a41614b104204a788c20e/wrapt-1.17.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bb1d0dbf99411f3d871deb6faa9aabb9d4e744d67dcaaa05399af89d847a91d", size = 88721, upload-time = "2025-01-14T10:34:07.163Z" }, - { url = "https://files.pythonhosted.org/packages/25/cb/7262bc1b0300b4b64af50c2720ef958c2c1917525238d661c3e9a2b71b7b/wrapt-1.17.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d18a4865f46b8579d44e4fe1e2bcbc6472ad83d98e22a26c963d46e4c125ef0b", size = 80899, upload-time = "2025-01-14T10:34:09.82Z" }, - { url = "https://files.pythonhosted.org/packages/2a/5a/04cde32b07a7431d4ed0553a76fdb7a61270e78c5fd5a603e190ac389f14/wrapt-1.17.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc570b5f14a79734437cb7b0500376b6b791153314986074486e0b0fa8d71d98", size = 89222, upload-time = "2025-01-14T10:34:11.258Z" }, - { url = "https://files.pythonhosted.org/packages/09/28/2e45a4f4771fcfb109e244d5dbe54259e970362a311b67a965555ba65026/wrapt-1.17.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6d9187b01bebc3875bac9b087948a2bccefe464a7d8f627cf6e48b1bbae30f82", size = 86707, upload-time = "2025-01-14T10:34:12.49Z" }, - { url = "https://files.pythonhosted.org/packages/c6/d2/dcb56bf5f32fcd4bd9aacc77b50a539abdd5b6536872413fd3f428b21bed/wrapt-1.17.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9e8659775f1adf02eb1e6f109751268e493c73716ca5761f8acb695e52a756ae", size = 79685, upload-time = "2025-01-14T10:34:15.043Z" }, - { url = "https://files.pythonhosted.org/packages/80/4e/eb8b353e36711347893f502ce91c770b0b0929f8f0bed2670a6856e667a9/wrapt-1.17.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e8b2816ebef96d83657b56306152a93909a83f23994f4b30ad4573b00bd11bb9", size = 87567, upload-time = "2025-01-14T10:34:16.563Z" }, - { url = "https://files.pythonhosted.org/packages/17/27/4fe749a54e7fae6e7146f1c7d914d28ef599dacd4416566c055564080fe2/wrapt-1.17.2-cp312-cp312-win32.whl", hash = "sha256:468090021f391fe0056ad3e807e3d9034e0fd01adcd3bdfba977b6fdf4213ea9", size = 36672, upload-time = "2025-01-14T10:34:17.727Z" }, - { url = "https://files.pythonhosted.org/packages/15/06/1dbf478ea45c03e78a6a8c4be4fdc3c3bddea5c8de8a93bc971415e47f0f/wrapt-1.17.2-cp312-cp312-win_amd64.whl", hash = "sha256:ec89ed91f2fa8e3f52ae53cd3cf640d6feff92ba90d62236a81e4e563ac0e991", size = 38865, upload-time = "2025-01-14T10:34:19.577Z" }, - { url = "https://files.pythonhosted.org/packages/ce/b9/0ffd557a92f3b11d4c5d5e0c5e4ad057bd9eb8586615cdaf901409920b14/wrapt-1.17.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6ed6ffac43aecfe6d86ec5b74b06a5be33d5bb9243d055141e8cabb12aa08125", size = 53800, upload-time = "2025-01-14T10:34:21.571Z" }, - { url = "https://files.pythonhosted.org/packages/c0/ef/8be90a0b7e73c32e550c73cfb2fa09db62234227ece47b0e80a05073b375/wrapt-1.17.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:35621ae4c00e056adb0009f8e86e28eb4a41a4bfa8f9bfa9fca7d343fe94f998", size = 38824, upload-time = "2025-01-14T10:34:22.999Z" }, - { url = "https://files.pythonhosted.org/packages/36/89/0aae34c10fe524cce30fe5fc433210376bce94cf74d05b0d68344c8ba46e/wrapt-1.17.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a604bf7a053f8362d27eb9fefd2097f82600b856d5abe996d623babd067b1ab5", size = 38920, upload-time = "2025-01-14T10:34:25.386Z" }, - { url = "https://files.pythonhosted.org/packages/3b/24/11c4510de906d77e0cfb5197f1b1445d4fec42c9a39ea853d482698ac681/wrapt-1.17.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cbabee4f083b6b4cd282f5b817a867cf0b1028c54d445b7ec7cfe6505057cf8", size = 88690, upload-time = "2025-01-14T10:34:28.058Z" }, - { url = "https://files.pythonhosted.org/packages/71/d7/cfcf842291267bf455b3e266c0c29dcb675b5540ee8b50ba1699abf3af45/wrapt-1.17.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49703ce2ddc220df165bd2962f8e03b84c89fee2d65e1c24a7defff6f988f4d6", size = 80861, upload-time = "2025-01-14T10:34:29.167Z" }, - { url = "https://files.pythonhosted.org/packages/d5/66/5d973e9f3e7370fd686fb47a9af3319418ed925c27d72ce16b791231576d/wrapt-1.17.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8112e52c5822fc4253f3901b676c55ddf288614dc7011634e2719718eaa187dc", size = 89174, upload-time = "2025-01-14T10:34:31.702Z" }, - { url = "https://files.pythonhosted.org/packages/a7/d3/8e17bb70f6ae25dabc1aaf990f86824e4fd98ee9cadf197054e068500d27/wrapt-1.17.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9fee687dce376205d9a494e9c121e27183b2a3df18037f89d69bd7b35bcf59e2", size = 86721, upload-time = "2025-01-14T10:34:32.91Z" }, - { url = "https://files.pythonhosted.org/packages/6f/54/f170dfb278fe1c30d0ff864513cff526d624ab8de3254b20abb9cffedc24/wrapt-1.17.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:18983c537e04d11cf027fbb60a1e8dfd5190e2b60cc27bc0808e653e7b218d1b", size = 79763, upload-time = "2025-01-14T10:34:34.903Z" }, - { url = "https://files.pythonhosted.org/packages/4a/98/de07243751f1c4a9b15c76019250210dd3486ce098c3d80d5f729cba029c/wrapt-1.17.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:703919b1633412ab54bcf920ab388735832fdcb9f9a00ae49387f0fe67dad504", size = 87585, upload-time = "2025-01-14T10:34:36.13Z" }, - { url = "https://files.pythonhosted.org/packages/f9/f0/13925f4bd6548013038cdeb11ee2cbd4e37c30f8bfd5db9e5a2a370d6e20/wrapt-1.17.2-cp313-cp313-win32.whl", hash = "sha256:abbb9e76177c35d4e8568e58650aa6926040d6a9f6f03435b7a522bf1c487f9a", size = 36676, upload-time = "2025-01-14T10:34:37.962Z" }, - { url = "https://files.pythonhosted.org/packages/bf/ae/743f16ef8c2e3628df3ddfd652b7d4c555d12c84b53f3d8218498f4ade9b/wrapt-1.17.2-cp313-cp313-win_amd64.whl", hash = "sha256:69606d7bb691b50a4240ce6b22ebb319c1cfb164e5f6569835058196e0f3a845", size = 38871, upload-time = "2025-01-14T10:34:39.13Z" }, - { url = "https://files.pythonhosted.org/packages/3d/bc/30f903f891a82d402ffb5fda27ec1d621cc97cb74c16fea0b6141f1d4e87/wrapt-1.17.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:4a721d3c943dae44f8e243b380cb645a709ba5bd35d3ad27bc2ed947e9c68192", size = 56312, upload-time = "2025-01-14T10:34:40.604Z" }, - { url = "https://files.pythonhosted.org/packages/8a/04/c97273eb491b5f1c918857cd26f314b74fc9b29224521f5b83f872253725/wrapt-1.17.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:766d8bbefcb9e00c3ac3b000d9acc51f1b399513f44d77dfe0eb026ad7c9a19b", size = 40062, upload-time = "2025-01-14T10:34:45.011Z" }, - { url = "https://files.pythonhosted.org/packages/4e/ca/3b7afa1eae3a9e7fefe499db9b96813f41828b9fdb016ee836c4c379dadb/wrapt-1.17.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e496a8ce2c256da1eb98bd15803a79bee00fc351f5dfb9ea82594a3f058309e0", size = 40155, upload-time = "2025-01-14T10:34:47.25Z" }, - { url = "https://files.pythonhosted.org/packages/89/be/7c1baed43290775cb9030c774bc53c860db140397047cc49aedaf0a15477/wrapt-1.17.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d615e4fe22f4ad3528448c193b218e077656ca9ccb22ce2cb20db730f8d306", size = 113471, upload-time = "2025-01-14T10:34:50.934Z" }, - { url = "https://files.pythonhosted.org/packages/32/98/4ed894cf012b6d6aae5f5cc974006bdeb92f0241775addad3f8cd6ab71c8/wrapt-1.17.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a5aaeff38654462bc4b09023918b7f21790efb807f54c000a39d41d69cf552cb", size = 101208, upload-time = "2025-01-14T10:34:52.297Z" }, - { url = "https://files.pythonhosted.org/packages/ea/fd/0c30f2301ca94e655e5e057012e83284ce8c545df7661a78d8bfca2fac7a/wrapt-1.17.2-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a7d15bbd2bc99e92e39f49a04653062ee6085c0e18b3b7512a4f2fe91f2d681", size = 109339, upload-time = "2025-01-14T10:34:53.489Z" }, - { url = "https://files.pythonhosted.org/packages/75/56/05d000de894c4cfcb84bcd6b1df6214297b8089a7bd324c21a4765e49b14/wrapt-1.17.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:e3890b508a23299083e065f435a492b5435eba6e304a7114d2f919d400888cc6", size = 110232, upload-time = "2025-01-14T10:34:55.327Z" }, - { url = "https://files.pythonhosted.org/packages/53/f8/c3f6b2cf9b9277fb0813418e1503e68414cd036b3b099c823379c9575e6d/wrapt-1.17.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:8c8b293cd65ad716d13d8dd3624e42e5a19cc2a2f1acc74b30c2c13f15cb61a6", size = 100476, upload-time = "2025-01-14T10:34:58.055Z" }, - { url = "https://files.pythonhosted.org/packages/a7/b1/0bb11e29aa5139d90b770ebbfa167267b1fc548d2302c30c8f7572851738/wrapt-1.17.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4c82b8785d98cdd9fed4cac84d765d234ed3251bd6afe34cb7ac523cb93e8b4f", size = 106377, upload-time = "2025-01-14T10:34:59.3Z" }, - { url = "https://files.pythonhosted.org/packages/6a/e1/0122853035b40b3f333bbb25f1939fc1045e21dd518f7f0922b60c156f7c/wrapt-1.17.2-cp313-cp313t-win32.whl", hash = "sha256:13e6afb7fe71fe7485a4550a8844cc9ffbe263c0f1a1eea569bc7091d4898555", size = 37986, upload-time = "2025-01-14T10:35:00.498Z" }, - { url = "https://files.pythonhosted.org/packages/09/5e/1655cf481e079c1f22d0cabdd4e51733679932718dc23bf2db175f329b76/wrapt-1.17.2-cp313-cp313t-win_amd64.whl", hash = "sha256:eaf675418ed6b3b31c7a989fd007fa7c3be66ce14e5c3b27336383604c9da85c", size = 40750, upload-time = "2025-01-14T10:35:03.378Z" }, - { url = "https://files.pythonhosted.org/packages/2d/82/f56956041adef78f849db6b289b282e72b55ab8045a75abad81898c28d19/wrapt-1.17.2-py3-none-any.whl", hash = "sha256:b18f2d1533a71f069c7f82d524a52599053d4c7166e9dd374ae2136b7f40f7c8", size = 23594, upload-time = "2025-01-14T10:35:44.018Z" }, +version = "1.17.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/95/8f/aeb76c5b46e273670962298c23e7ddde79916cb74db802131d49a85e4b7d/wrapt-1.17.3.tar.gz", hash = "sha256:f66eb08feaa410fe4eebd17f2a2c8e2e46d3476e9f8c783daa8e09e0faa666d0", size = 55547, upload-time = "2025-08-12T05:53:21.714Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9f/41/cad1aba93e752f1f9268c77270da3c469883d56e2798e7df6240dcb2287b/wrapt-1.17.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:ab232e7fdb44cdfbf55fc3afa31bcdb0d8980b9b95c38b6405df2acb672af0e0", size = 53998, upload-time = "2025-08-12T05:51:47.138Z" }, + { url = "https://files.pythonhosted.org/packages/60/f8/096a7cc13097a1869fe44efe68dace40d2a16ecb853141394047f0780b96/wrapt-1.17.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:9baa544e6acc91130e926e8c802a17f3b16fbea0fd441b5a60f5cf2cc5c3deba", size = 39020, upload-time = "2025-08-12T05:51:35.906Z" }, + { url = "https://files.pythonhosted.org/packages/33/df/bdf864b8997aab4febb96a9ae5c124f700a5abd9b5e13d2a3214ec4be705/wrapt-1.17.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6b538e31eca1a7ea4605e44f81a48aa24c4632a277431a6ed3f328835901f4fd", size = 39098, upload-time = "2025-08-12T05:51:57.474Z" }, + { url = "https://files.pythonhosted.org/packages/9f/81/5d931d78d0eb732b95dc3ddaeeb71c8bb572fb01356e9133916cd729ecdd/wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:042ec3bb8f319c147b1301f2393bc19dba6e176b7da446853406d041c36c7828", size = 88036, upload-time = "2025-08-12T05:52:34.784Z" }, + { url = "https://files.pythonhosted.org/packages/ca/38/2e1785df03b3d72d34fc6252d91d9d12dc27a5c89caef3335a1bbb8908ca/wrapt-1.17.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3af60380ba0b7b5aeb329bc4e402acd25bd877e98b3727b0135cb5c2efdaefe9", size = 88156, upload-time = "2025-08-12T05:52:13.599Z" }, + { url = "https://files.pythonhosted.org/packages/b3/8b/48cdb60fe0603e34e05cffda0b2a4adab81fd43718e11111a4b0100fd7c1/wrapt-1.17.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0b02e424deef65c9f7326d8c19220a2c9040c51dc165cddb732f16198c168396", size = 87102, upload-time = "2025-08-12T05:52:14.56Z" }, + { url = "https://files.pythonhosted.org/packages/3c/51/d81abca783b58f40a154f1b2c56db1d2d9e0d04fa2d4224e357529f57a57/wrapt-1.17.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:74afa28374a3c3a11b3b5e5fca0ae03bef8450d6aa3ab3a1e2c30e3a75d023dc", size = 87732, upload-time = "2025-08-12T05:52:36.165Z" }, + { url = "https://files.pythonhosted.org/packages/9e/b1/43b286ca1392a006d5336412d41663eeef1ad57485f3e52c767376ba7e5a/wrapt-1.17.3-cp312-cp312-win32.whl", hash = "sha256:4da9f45279fff3543c371d5ababc57a0384f70be244de7759c85a7f989cb4ebe", size = 36705, upload-time = "2025-08-12T05:53:07.123Z" }, + { url = "https://files.pythonhosted.org/packages/28/de/49493f962bd3c586ab4b88066e967aa2e0703d6ef2c43aa28cb83bf7b507/wrapt-1.17.3-cp312-cp312-win_amd64.whl", hash = "sha256:e71d5c6ebac14875668a1e90baf2ea0ef5b7ac7918355850c0908ae82bcb297c", size = 38877, upload-time = "2025-08-12T05:53:05.436Z" }, + { url = "https://files.pythonhosted.org/packages/f1/48/0f7102fe9cb1e8a5a77f80d4f0956d62d97034bbe88d33e94699f99d181d/wrapt-1.17.3-cp312-cp312-win_arm64.whl", hash = "sha256:604d076c55e2fdd4c1c03d06dc1a31b95130010517b5019db15365ec4a405fc6", size = 36885, upload-time = "2025-08-12T05:52:54.367Z" }, + { url = "https://files.pythonhosted.org/packages/fc/f6/759ece88472157acb55fc195e5b116e06730f1b651b5b314c66291729193/wrapt-1.17.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a47681378a0439215912ef542c45a783484d4dd82bac412b71e59cf9c0e1cea0", size = 54003, upload-time = "2025-08-12T05:51:48.627Z" }, + { url = "https://files.pythonhosted.org/packages/4f/a9/49940b9dc6d47027dc850c116d79b4155f15c08547d04db0f07121499347/wrapt-1.17.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:54a30837587c6ee3cd1a4d1c2ec5d24e77984d44e2f34547e2323ddb4e22eb77", size = 39025, upload-time = "2025-08-12T05:51:37.156Z" }, + { url = "https://files.pythonhosted.org/packages/45/35/6a08de0f2c96dcdd7fe464d7420ddb9a7655a6561150e5fc4da9356aeaab/wrapt-1.17.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:16ecf15d6af39246fe33e507105d67e4b81d8f8d2c6598ff7e3ca1b8a37213f7", size = 39108, upload-time = "2025-08-12T05:51:58.425Z" }, + { url = "https://files.pythonhosted.org/packages/0c/37/6faf15cfa41bf1f3dba80cd3f5ccc6622dfccb660ab26ed79f0178c7497f/wrapt-1.17.3-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:6fd1ad24dc235e4ab88cda009e19bf347aabb975e44fd5c2fb22a3f6e4141277", size = 88072, upload-time = "2025-08-12T05:52:37.53Z" }, + { url = "https://files.pythonhosted.org/packages/78/f2/efe19ada4a38e4e15b6dff39c3e3f3f73f5decf901f66e6f72fe79623a06/wrapt-1.17.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0ed61b7c2d49cee3c027372df5809a59d60cf1b6c2f81ee980a091f3afed6a2d", size = 88214, upload-time = "2025-08-12T05:52:15.886Z" }, + { url = "https://files.pythonhosted.org/packages/40/90/ca86701e9de1622b16e09689fc24b76f69b06bb0150990f6f4e8b0eeb576/wrapt-1.17.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:423ed5420ad5f5529db9ce89eac09c8a2f97da18eb1c870237e84c5a5c2d60aa", size = 87105, upload-time = "2025-08-12T05:52:17.914Z" }, + { url = "https://files.pythonhosted.org/packages/fd/e0/d10bd257c9a3e15cbf5523025252cc14d77468e8ed644aafb2d6f54cb95d/wrapt-1.17.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e01375f275f010fcbf7f643b4279896d04e571889b8a5b3f848423d91bf07050", size = 87766, upload-time = "2025-08-12T05:52:39.243Z" }, + { url = "https://files.pythonhosted.org/packages/e8/cf/7d848740203c7b4b27eb55dbfede11aca974a51c3d894f6cc4b865f42f58/wrapt-1.17.3-cp313-cp313-win32.whl", hash = "sha256:53e5e39ff71b3fc484df8a522c933ea2b7cdd0d5d15ae82e5b23fde87d44cbd8", size = 36711, upload-time = "2025-08-12T05:53:10.074Z" }, + { url = "https://files.pythonhosted.org/packages/57/54/35a84d0a4d23ea675994104e667ceff49227ce473ba6a59ba2c84f250b74/wrapt-1.17.3-cp313-cp313-win_amd64.whl", hash = "sha256:1f0b2f40cf341ee8cc1a97d51ff50dddb9fcc73241b9143ec74b30fc4f44f6cb", size = 38885, upload-time = "2025-08-12T05:53:08.695Z" }, + { url = "https://files.pythonhosted.org/packages/01/77/66e54407c59d7b02a3c4e0af3783168fff8e5d61def52cda8728439d86bc/wrapt-1.17.3-cp313-cp313-win_arm64.whl", hash = "sha256:7425ac3c54430f5fc5e7b6f41d41e704db073309acfc09305816bc6a0b26bb16", size = 36896, upload-time = "2025-08-12T05:52:55.34Z" }, + { url = "https://files.pythonhosted.org/packages/02/a2/cd864b2a14f20d14f4c496fab97802001560f9f41554eef6df201cd7f76c/wrapt-1.17.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:cf30f6e3c077c8e6a9a7809c94551203c8843e74ba0c960f4a98cd80d4665d39", size = 54132, upload-time = "2025-08-12T05:51:49.864Z" }, + { url = "https://files.pythonhosted.org/packages/d5/46/d011725b0c89e853dc44cceb738a307cde5d240d023d6d40a82d1b4e1182/wrapt-1.17.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:e228514a06843cae89621384cfe3a80418f3c04aadf8a3b14e46a7be704e4235", size = 39091, upload-time = "2025-08-12T05:51:38.935Z" }, + { url = "https://files.pythonhosted.org/packages/2e/9e/3ad852d77c35aae7ddebdbc3b6d35ec8013af7d7dddad0ad911f3d891dae/wrapt-1.17.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:5ea5eb3c0c071862997d6f3e02af1d055f381b1d25b286b9d6644b79db77657c", size = 39172, upload-time = "2025-08-12T05:51:59.365Z" }, + { url = "https://files.pythonhosted.org/packages/c3/f7/c983d2762bcce2326c317c26a6a1e7016f7eb039c27cdf5c4e30f4160f31/wrapt-1.17.3-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:281262213373b6d5e4bb4353bc36d1ba4084e6d6b5d242863721ef2bf2c2930b", size = 87163, upload-time = "2025-08-12T05:52:40.965Z" }, + { url = "https://files.pythonhosted.org/packages/e4/0f/f673f75d489c7f22d17fe0193e84b41540d962f75fce579cf6873167c29b/wrapt-1.17.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:dc4a8d2b25efb6681ecacad42fca8859f88092d8732b170de6a5dddd80a1c8fa", size = 87963, upload-time = "2025-08-12T05:52:20.326Z" }, + { url = "https://files.pythonhosted.org/packages/df/61/515ad6caca68995da2fac7a6af97faab8f78ebe3bf4f761e1b77efbc47b5/wrapt-1.17.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:373342dd05b1d07d752cecbec0c41817231f29f3a89aa8b8843f7b95992ed0c7", size = 86945, upload-time = "2025-08-12T05:52:21.581Z" }, + { url = "https://files.pythonhosted.org/packages/d3/bd/4e70162ce398462a467bc09e768bee112f1412e563620adc353de9055d33/wrapt-1.17.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d40770d7c0fd5cbed9d84b2c3f2e156431a12c9a37dc6284060fb4bec0b7ffd4", size = 86857, upload-time = "2025-08-12T05:52:43.043Z" }, + { url = "https://files.pythonhosted.org/packages/2b/b8/da8560695e9284810b8d3df8a19396a6e40e7518059584a1a394a2b35e0a/wrapt-1.17.3-cp314-cp314-win32.whl", hash = "sha256:fbd3c8319de8e1dc79d346929cd71d523622da527cca14e0c1d257e31c2b8b10", size = 37178, upload-time = "2025-08-12T05:53:12.605Z" }, + { url = "https://files.pythonhosted.org/packages/db/c8/b71eeb192c440d67a5a0449aaee2310a1a1e8eca41676046f99ed2487e9f/wrapt-1.17.3-cp314-cp314-win_amd64.whl", hash = "sha256:e1a4120ae5705f673727d3253de3ed0e016f7cd78dc463db1b31e2463e1f3cf6", size = 39310, upload-time = "2025-08-12T05:53:11.106Z" }, + { url = "https://files.pythonhosted.org/packages/45/20/2cda20fd4865fa40f86f6c46ed37a2a8356a7a2fde0773269311f2af56c7/wrapt-1.17.3-cp314-cp314-win_arm64.whl", hash = "sha256:507553480670cab08a800b9463bdb881b2edeed77dc677b0a5915e6106e91a58", size = 37266, upload-time = "2025-08-12T05:52:56.531Z" }, + { url = "https://files.pythonhosted.org/packages/77/ed/dd5cf21aec36c80443c6f900449260b80e2a65cf963668eaef3b9accce36/wrapt-1.17.3-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:ed7c635ae45cfbc1a7371f708727bf74690daedc49b4dba310590ca0bd28aa8a", size = 56544, upload-time = "2025-08-12T05:51:51.109Z" }, + { url = "https://files.pythonhosted.org/packages/8d/96/450c651cc753877ad100c7949ab4d2e2ecc4d97157e00fa8f45df682456a/wrapt-1.17.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:249f88ed15503f6492a71f01442abddd73856a0032ae860de6d75ca62eed8067", size = 40283, upload-time = "2025-08-12T05:51:39.912Z" }, + { url = "https://files.pythonhosted.org/packages/d1/86/2fcad95994d9b572db57632acb6f900695a648c3e063f2cd344b3f5c5a37/wrapt-1.17.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:5a03a38adec8066d5a37bea22f2ba6bbf39fcdefbe2d91419ab864c3fb515454", size = 40366, upload-time = "2025-08-12T05:52:00.693Z" }, + { url = "https://files.pythonhosted.org/packages/64/0e/f4472f2fdde2d4617975144311f8800ef73677a159be7fe61fa50997d6c0/wrapt-1.17.3-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:5d4478d72eb61c36e5b446e375bbc49ed002430d17cdec3cecb36993398e1a9e", size = 108571, upload-time = "2025-08-12T05:52:44.521Z" }, + { url = "https://files.pythonhosted.org/packages/cc/01/9b85a99996b0a97c8a17484684f206cbb6ba73c1ce6890ac668bcf3838fb/wrapt-1.17.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:223db574bb38637e8230eb14b185565023ab624474df94d2af18f1cdb625216f", size = 113094, upload-time = "2025-08-12T05:52:22.618Z" }, + { url = "https://files.pythonhosted.org/packages/25/02/78926c1efddcc7b3aa0bc3d6b33a822f7d898059f7cd9ace8c8318e559ef/wrapt-1.17.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e405adefb53a435f01efa7ccdec012c016b5a1d3f35459990afc39b6be4d5056", size = 110659, upload-time = "2025-08-12T05:52:24.057Z" }, + { url = "https://files.pythonhosted.org/packages/dc/ee/c414501ad518ac3e6fe184753632fe5e5ecacdcf0effc23f31c1e4f7bfcf/wrapt-1.17.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:88547535b787a6c9ce4086917b6e1d291aa8ed914fdd3a838b3539dc95c12804", size = 106946, upload-time = "2025-08-12T05:52:45.976Z" }, + { url = "https://files.pythonhosted.org/packages/be/44/a1bd64b723d13bb151d6cc91b986146a1952385e0392a78567e12149c7b4/wrapt-1.17.3-cp314-cp314t-win32.whl", hash = "sha256:41b1d2bc74c2cac6f9074df52b2efbef2b30bdfe5f40cb78f8ca22963bc62977", size = 38717, upload-time = "2025-08-12T05:53:15.214Z" }, + { url = "https://files.pythonhosted.org/packages/79/d9/7cfd5a312760ac4dd8bf0184a6ee9e43c33e47f3dadc303032ce012b8fa3/wrapt-1.17.3-cp314-cp314t-win_amd64.whl", hash = "sha256:73d496de46cd2cdbdbcce4ae4bcdb4afb6a11234a1df9c085249d55166b95116", size = 41334, upload-time = "2025-08-12T05:53:14.178Z" }, + { url = "https://files.pythonhosted.org/packages/46/78/10ad9781128ed2f99dbc474f43283b13fea8ba58723e98844367531c18e9/wrapt-1.17.3-cp314-cp314t-win_arm64.whl", hash = "sha256:f38e60678850c42461d4202739f9bf1e3a737c7ad283638251e79cc49effb6b6", size = 38471, upload-time = "2025-08-12T05:52:57.784Z" }, + { url = "https://files.pythonhosted.org/packages/1f/f6/a933bd70f98e9cf3e08167fc5cd7aaaca49147e48411c0bd5ae701bb2194/wrapt-1.17.3-py3-none-any.whl", hash = "sha256:7171ae35d2c33d326ac19dd8facb1e82e5fd04ef8c6c0e394d7af55a55051c22", size = 23591, upload-time = "2025-08-12T05:53:20.674Z" }, ] [[package]] @@ -5709,8 +6464,8 @@ name = "xformers" version = "0.0.31" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "numpy", marker = "(python_full_version >= '3.13' and platform_machine == 'arm64' and sys_platform == 'linux') or (platform_machine != 'aarch64' and platform_machine != 'arm64' and sys_platform == 'linux')" }, - { name = "torch", marker = "(python_full_version >= '3.13' and platform_machine == 'arm64' and sys_platform == 'linux') or (platform_machine != 'aarch64' and platform_machine != 'arm64' and sys_platform == 'linux')" }, + { name = "numpy", marker = "platform_machine != 'aarch64' and sys_platform == 'linux'" }, + { name = "torch", marker = "platform_machine != 'aarch64' and sys_platform == 'linux'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/33/35/91c172a57681e1c03de5ad1ca654dc87c282279b941052ed04e616ae5bcd/xformers-0.0.31.tar.gz", hash = "sha256:3fccb159c6327c13fc1b08f8b963c2779ca526e2e50755dee9bcc1bac67d20c6", size = 12102740, upload-time = "2025-06-25T15:12:10.241Z" } wheels = [ @@ -5727,7 +6482,7 @@ dependencies = [ { name = "pydantic" }, { name = "torch" }, { name = "transformers" }, - { name = "triton", version = "3.3.1", source = { registry = "https://download.pytorch.org/whl/cu128" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "triton", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/e3/52/ea664a56674f21c401b45f124c207a16ca4b2318364687172edbcf255375/xgrammar-0.1.21.tar.gz", hash = "sha256:2ce1e81417ff46aa7ef26d8c0627275cb20dd1f2e8ead5bb261aecde1cc8ba57", size = 2242013, upload-time = "2025-07-10T19:34:14.336Z" } wheels = [ @@ -5846,7 +6601,7 @@ wheels = [ [[package]] name = "zarr" -version = "3.0.8" +version = "3.1.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "donfig" }, @@ -5855,9 +6610,9 @@ dependencies = [ { name = "packaging" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/52/60/9652fd0536fbaca8d08cbc1a5572c52e0ce01773297df75da8bb47e45907/zarr-3.0.8.tar.gz", hash = "sha256:88505d095af899a88ae8ac4db02f4650ef0801d2ff6f65b6d1f0a45dcf760a6d", size = 256825, upload-time = "2025-05-19T14:19:00.123Z" } +sdist = { url = "https://files.pythonhosted.org/packages/15/a9/29fe1800380092ae03ac6207d757f3e5affaf1fcd2e5ef074cf4fc68f0fa/zarr-3.1.1.tar.gz", hash = "sha256:17db72f37f2489452d2137ac891c4133b8f976f9189d8efd3e75f3b3add84e8c", size = 314075, upload-time = "2025-07-30T11:51:36.81Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/00/3b/e20bdf84088c11f2c396d034506cbffadd53e024111c1aa4585c2aba1523/zarr-3.0.8-py3-none-any.whl", hash = "sha256:7f81e7aec086437d98882aa432209107114bd7f3a9f4958b2af9c6b5928a70a7", size = 205364, upload-time = "2025-05-19T14:18:58.789Z" }, + { url = "https://files.pythonhosted.org/packages/c8/48/bde2f58cfbc9fd6ab844e2f2fd79d5e54195c12a17aa9b47c0b0e701a421/zarr-3.1.1-py3-none-any.whl", hash = "sha256:9a0b7e7c27bf62965b8eef6b8b8fdb9b47381f0738be35e40f37be6479b546be", size = 255373, upload-time = "2025-07-30T11:51:34.623Z" }, ] [[package]] From 2449bba4495428b532240704ea5426f3fa68f0a4 Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Wed, 27 Aug 2025 12:25:39 -0700 Subject: [PATCH 33/47] squash unsigned commits resolving previous feedback Signed-off-by: Julien Veron Vialard --- docs/guides/dpo.md | 12 ++- docs/guides/rm.md | 10 +- examples/configs/dpo.yaml | 15 ++- examples/configs/rm.yaml | 11 ++ examples/run_dpo.py | 48 +++++---- examples/run_rm.py | 51 +++++---- nemo_rl/algorithms/dpo.py | 56 +++++++--- nemo_rl/algorithms/rm.py | 101 +++++++++++++----- nemo_rl/data/__init__.py | 3 + nemo_rl/data/datasets.py | 17 +-- nemo_rl/data/hf_datasets/dpo.py | 40 +++++-- nemo_rl/data/hf_datasets/helpsteer3.py | 10 +- .../data/hf_datasets/preference_dataset.py | 13 ++- pyrefly.toml | 1 + .../unit/data/hf_datasets/test_dpo_dataset.py | 11 +- tests/unit/data/hf_datasets/test_helpsteer.py | 50 ++++++--- .../hf_datasets/test_preference_dataset.py | 38 +++---- tests/unit/data/test_datasets.py | 71 ++++++++---- 18 files changed, 376 insertions(+), 182 deletions(-) diff --git a/docs/guides/dpo.md b/docs/guides/dpo.md index 2906d944b8..3f8554bb3e 100644 --- a/docs/guides/dpo.md +++ b/docs/guides/dpo.md @@ -46,7 +46,7 @@ Each DPO dataset class is expected to have the following attributes: ``` 2. `task_spec`: The `TaskDataSpec` for this dataset. This should specify the name you choose for this dataset. -Currently, DPO training supports only two completions (where the lowest rank is preferred and the highest one is rejected), with each completion being a single response. For example: +DPO training supports only two completions (where the lowest rank is preferred and the highest one is rejected), with each completion being a single response. For example: ``` { "context": [ @@ -86,7 +86,7 @@ Currently, DPO training supports only two completions (where the lowest rank is } ``` -NeMo RL supports the `HelpSteer3` dataset. This dataset is downloaded from Hugging Face and preprocessed on-the-fly, so there's no need to provide a path to any datasets on disk. +NeMo RL provides a DPO-compatible implementation of the [HelpSteer3](https://github.com/NVIDIA-NeMo/RL/blob/main/nemo_rl/data/hf_datasets/helpsteer3.py) dataset as an example. This dataset is downloaded from Hugging Face and preprocessed on-the-fly, so there's no need to provide a path to any datasets on disk. We also provide a [PreferenceDataset](../../nemo_rl/data/hf_datasets/preference_dataset.py) class that is compatible with JSONL-formatted preference datasets. You can modify your config as follows to use such a custom preference dataset: ``` @@ -101,10 +101,12 @@ data: dataset_name: PreferenceDataset train_data_path: val_data_paths: - : - : + : + : ``` -If you are using a logger, the prefix used for each validation set will be `validation-`. +If using multiple validation sets, please note: +- If you are using a logger, the prefix used for each validation set will be `val-`. +- If you are doing checkpointing, the `metric_name` value in your `checkpointing` config should reflect the metric and validation set to be tracked. For example, `val-_loss`. The older [DPODataset](../../nemo_rl/data/hf_datasets/dpo.py) class is deprecated. This class is also compatible with JSONL-formatted preference datsets. It assumes train and validation datasets have been split and processed into the expected format offline. The JSONL files should consist of examples with `prompt`, `chosen_response`, and `rejected_response` keys. diff --git a/docs/guides/rm.md b/docs/guides/rm.md index c256b891b7..d001dabaf7 100644 --- a/docs/guides/rm.md +++ b/docs/guides/rm.md @@ -75,7 +75,7 @@ Currently, RM training supports only two completions (where the lowest rank is p } ``` -NeMo RL supports the `HelpSteer3` dataset. This dataset is downloaded from Hugging Face and preprocessed on-the-fly, so there's no need to provide a path to any datasets on disk. +NeMo RL provides a RM-compatible implementation of the [HelpSteer3](https://github.com/NVIDIA-NeMo/RL/blob/main/nemo_rl/data/hf_datasets/helpsteer3.py) dataset as an example. This dataset is downloaded from Hugging Face and preprocessed on-the-fly, so there's no need to provide a path to any datasets on disk. We also provide a [PreferenceDataset](../../nemo_rl/data/hf_datasets/preference_dataset.py) class that is compatible with JSONL-formatted preference datasets. You can modify your config as follows to use such a custom preference dataset: ``` @@ -90,7 +90,9 @@ data: dataset_name: PreferenceDataset train_data_path: val_data_paths: - : - : + : + : ``` -If you are using a logger, the prefix used for each validation set will be `validation-`. \ No newline at end of file +If using multiple validation sets, please note: +- If you are using a logger, the prefix used for each validation set will be `val-`. +- If you are doing checkpointing, the `metric_name` value in your `checkpointing` config should reflect the metric and validation set to be tracked. For example, `val-_loss`. \ No newline at end of file diff --git a/examples/configs/dpo.yaml b/examples/configs/dpo.yaml index 11a2ed1acf..44f11eae8d 100755 --- a/examples/configs/dpo.yaml +++ b/examples/configs/dpo.yaml @@ -22,6 +22,17 @@ dpo: checkpointing: enabled: true checkpoint_dir: "results/dpo" + + # If you are using multiple validation sets, `metric_name` should reflect the metric and validation set to be tracked. For example: + # checkpointing: + # metric_name: "val-_loss" + # ... + # data: + # dataset_name: PreferenceDataset + # train_data_path: + # val_data_paths: + # : + # ... metric_name: "val_loss" higher_is_better: false keep_top_k: 3 @@ -45,8 +56,6 @@ policy: precision: "bfloat16" dtensor_cfg: - env_vars: - PYTORCH_CUDA_ALLOC_CONF: "" # Refers to https://docs.pytorch.org/docs/stable/notes/cuda.html#optimizing-memory-usage-with-pytorch-cuda-alloc-conf enabled: true cpu_offload: False sequence_parallel: false @@ -162,11 +171,9 @@ data: logger: log_dir: "logs" # Base directory for all logs wandb_enabled: false # Make sure you do a ``wandb login [Your API key]'' before running - tensorboard_enabled: false mlflow_enabled: false # Disable MLflow logging monitor_gpus: true # If true, will monitor GPU usage and log to wandb and/or tensorboard - num_val_samples_to_print: 0 # Number of validation samples to pretty print on terminal wandb: project: "dpo-dev" name: "dpo" diff --git a/examples/configs/rm.yaml b/examples/configs/rm.yaml index 6ff1f4b0ac..587ec6da88 100644 --- a/examples/configs/rm.yaml +++ b/examples/configs/rm.yaml @@ -15,6 +15,17 @@ rm: checkpointing: enabled: true checkpoint_dir: "results/rm" + + # If you are using multiple validation sets, `metric_name` should reflect the metric and validation set to be tracked. For example: + # checkpointing: + # metric_name: "val-_loss" + # ... + # data: + # dataset_name: PreferenceDataset + # train_data_path: + # val_data_paths: + # : + # ... metric_name: "val_loss" higher_is_better: false keep_top_k: 3 diff --git a/examples/run_dpo.py b/examples/run_dpo.py index 69fa38a3a9..6a69e1a0a6 100644 --- a/examples/run_dpo.py +++ b/examples/run_dpo.py @@ -207,38 +207,43 @@ def setup_data(data_config: DataConfig, policy_config: PolicyConfig): max_seq_length=data_config["max_input_seq_length"], ) - val_dataset = { - "validation": AllTaskProcessedDataset( - val_dataset, - tokenizer, - dpo_task_spec, - dpo_preprocessor, - max_seq_length=data_config["max_input_seq_length"], - ) - } if val_dataset else {} + val_dataset = ( + { + "validation": AllTaskProcessedDataset( + val_dataset, + tokenizer, + dpo_task_spec, + dpo_preprocessor, + max_seq_length=data_config["max_input_seq_length"], + ) + } + if val_dataset + else {} + ) if data_cls == "PreferenceDataset": if data_config.get("val_data_path"): - assert data_config.get("val_data_paths") is None, "val_data_path and val_data_paths cannot be used together" - val_data_paths = [{"validation": data_config.get("val_data_path")}] + assert data_config.get("val_data_paths") is None, ( + "val_data_path and val_data_paths cannot be used together" + ) + val_data_paths = {"validation": data_config.get("val_data_path")} elif data_config.get("val_data_paths"): - assert isinstance(data_config["val_data_paths"], list), f"Invalid type for val_data_paths: {type(data_config['val_data_paths'])}" + assert isinstance(data_config["val_data_paths"], dict), ( + f"Invalid type for val_data_paths: {type(data_config['val_data_paths'])}" + ) val_data_paths = data_config.get("val_data_paths") else: raise ValueError("Either val_data_path or val_data_paths must be provided") - for d in val_data_paths: - assert len(d) == 1, "val_data_paths must be a list of pairs." - val_dataset_name = list(d.keys())[0] - val_dataset_path = list(d.values())[0] - assert val_dataset_name not in val_dataset or val_dataset_name == "validation" # Users can override the default "validation" set - if val_dataset_name == "validation" and "validation" in val_dataset: - print(f" āœ“ Overriding the default validation dataset") - val_data = hf_datasets.PreferenceDataset(val_dataset_path, split="validation") + for val_dataset_name, val_dataset_path in val_data_paths.items(): + assert val_dataset_name not in val_dataset + val_data = hf_datasets.PreferenceDataset( + val_dataset_path, split="validation" + ) print( - f" āœ“ Validation dataset '{val_dataset_name}' loaded with {len(val_data.formatted_ds["validation"])} samples." + f" āœ“ Validation dataset '{val_dataset_name}' loaded with {len(val_data.formatted_ds['validation'])} samples." ) val_dataset[val_dataset_name] = AllTaskProcessedDataset( val_data.formatted_ds["validation"], @@ -250,6 +255,7 @@ def setup_data(data_config: DataConfig, policy_config: PolicyConfig): return train_dataset, val_dataset, tokenizer, dpo_task_spec + def main(): """Main entry point.""" args, overrides = parse_args() diff --git a/examples/run_rm.py b/examples/run_rm.py index c872579a15..1f0c7d00c5 100644 --- a/examples/run_rm.py +++ b/examples/run_rm.py @@ -56,7 +56,9 @@ def rm_preprocessor( idx: int, ) -> DatumSpec: """Process a datum dictionary for RM training.""" - assert len(datum_dict["completions"]) == 2 # Currently only supporting 2 completions + assert ( + len(datum_dict["completions"]) == 2 + ) # Currently only supporting 2 completions # Lower rank is preferred if datum_dict["completions"][0]["rank"] < datum_dict["completions"][1]["rank"]: chosen_completion = datum_dict["completions"][0] @@ -145,38 +147,43 @@ def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig): max_seq_length=data_config["max_input_seq_length"], ) - val_dataset = { - "validation": AllTaskProcessedDataset( - val_dataset, - tokenizer, - rm_task_spec, - rm_preprocessor, - max_seq_length=data_config["max_input_seq_length"], - ) - } if val_dataset else {} + val_dataset = ( + { + "validation": AllTaskProcessedDataset( + val_dataset, + tokenizer, + rm_task_spec, + rm_preprocessor, + max_seq_length=data_config["max_input_seq_length"], + ) + } + if val_dataset + else {} + ) if data_cls == "PreferenceDataset": if data_config.get("val_data_path"): - assert data_config.get("val_data_paths") is None, "val_data_path and val_data_paths cannot be used together" - val_data_paths = [{"validation": data_config.get("val_data_path")}] + assert data_config.get("val_data_paths") is None, ( + "val_data_path and val_data_paths cannot be used together" + ) + val_data_paths = {"validation": data_config.get("val_data_path")} elif data_config.get("val_data_paths"): - assert isinstance(data_config["val_data_paths"], list), f"Invalid type for val_data_paths: {type(data_config['val_data_paths'])}" + assert isinstance(data_config["val_data_paths"], dict), ( + f"Invalid type for val_data_paths: {type(data_config['val_data_paths'])}" + ) val_data_paths = data_config.get("val_data_paths") else: raise ValueError("Either val_data_path or val_data_paths must be provided") - for d in val_data_paths: - assert len(d) == 1, "val_data_paths must be a list of pairs." - val_dataset_name = list(d.keys())[0] - val_dataset_path = list(d.values())[0] - assert val_dataset_name not in val_dataset or val_dataset_name == "validation" # Users can override the default "validation" set - if val_dataset_name == "validation" and "validation" in val_dataset: - print(f" āœ“ Overriding the default validation dataset") - val_data = hf_datasets.PreferenceDataset(val_dataset_path, split="validation") + for val_dataset_name, val_dataset_path in val_data_paths.items(): + assert val_dataset_name not in val_dataset + val_data = hf_datasets.PreferenceDataset( + val_dataset_path, split="validation" + ) print( - f" āœ“ Validation dataset '{val_dataset_name}' loaded with {len(val_data.formatted_ds["validation"])} samples." + f" āœ“ Validation dataset '{val_dataset_name}' loaded with {len(val_data.formatted_ds['validation'])} samples." ) val_dataset[val_dataset_name] = AllTaskProcessedDataset( val_data.formatted_ds["validation"], diff --git a/nemo_rl/algorithms/dpo.py b/nemo_rl/algorithms/dpo.py index c00185578f..9028c3cd52 100644 --- a/nemo_rl/algorithms/dpo.py +++ b/nemo_rl/algorithms/dpo.py @@ -93,12 +93,12 @@ def setup( master_config: MasterConfig, tokenizer: AutoTokenizer, train_dataset: AllTaskProcessedDataset, - val_dataset: AllTaskProcessedDataset, + val_dataset: dict[str, AllTaskProcessedDataset], ) -> tuple[ Policy, RayVirtualCluster, StatefulDataLoader, - StatefulDataLoader, + dict[str, StatefulDataLoader], DPOLossFn, Logger, CheckpointManager, @@ -159,6 +159,7 @@ def setup( make_sequence_length_divisible_by=policy_config[ "make_sequence_length_divisible_by" ], + add_loss_mask=True, ), drop_last=True, ) @@ -183,9 +184,11 @@ def setup( make_sequence_length_divisible_by=policy_config[ "make_sequence_length_divisible_by" ], + add_loss_mask=True, ), drop_last=True, - ) for k, v in val_dataset.items() + ) + for k, v in val_dataset.items() } # ========================== @@ -239,7 +242,7 @@ def setup( ) -def add_ref_logprobs_to_data(dataloader, policy, master_config, tokenizer, is_val=False): +def add_ref_logprobs_to_data(dataloader, policy, master_config, is_val=False): dataloader_iter = iter(dataloader) while True: try: @@ -271,7 +274,7 @@ def add_ref_logprobs_to_data(dataloader, policy, master_config, tokenizer, is_va # ======================================================= def validate( policy: PolicyInterface, - val_dataloader: StatefulDataLoader | dict[str, StatefulDataLoader], + val_dataloader: dict[str, StatefulDataLoader], tokenizer, loss_fn, step: int, @@ -283,18 +286,31 @@ def validate( ): val_metrics, validation_timings = {}, {} for k, v in val_dataloader.items(): - k_val_metrics, k_validation_timings = validate_one_dataset(policy, v, tokenizer, loss_fn, step, master_config, val_batches, val_batch_size, val_mbs, k) + k_val_metrics, k_validation_timings = validate_one_dataset( + policy, + v, + tokenizer, + loss_fn, + step, + master_config, + val_batches, + val_batch_size, + val_mbs, + k, + ) if k == "validation": prefix = "val" else: - prefix = f"{k}-val" + prefix = f"val-{k}" logger.log_metrics(k_val_metrics, step, prefix=prefix) logger.log_metrics(k_validation_timings, step, prefix=f"timing/{prefix}") - val_metrics[prefix+"_loss"] = k_val_metrics["val_loss"] - val_metrics[prefix+"_accuracy"] = k_val_metrics["accuracy"] - validation_timings[prefix+"_total_validation_time"] = k_validation_timings["total_validation_time"] + val_metrics[prefix + "_loss"] = k_val_metrics["val_loss"] + val_metrics[prefix + "_accuracy"] = k_val_metrics["accuracy"] + validation_timings[prefix + "_total_val_time"] = k_validation_timings[ + "total_val_time" + ] return val_metrics, validation_timings @@ -318,13 +334,13 @@ def validate_one_dataset( timer = Timer() - with timer.time("total_validation_time"): + with timer.time("total_val_time"): print(f"ā–¶ Starting validation at step {step}...") val_metrics = defaultdict(lambda: 0.0) num_valid_batches = 0 for batch_idx, val_batch in enumerate( - add_ref_logprobs_to_data(val_dataloader, policy, master_config, tokenizer, is_val=True) + add_ref_logprobs_to_data(val_dataloader, policy, master_config, is_val=True) ): ## just run model fwd val_results = policy.train( @@ -362,7 +378,7 @@ def validate_one_dataset( # Get timing metrics timing_metrics = timer.get_timing_metrics(reduction_op="sum") - validation_time = timing_metrics.get("total_validation_time", 0) + validation_time = timing_metrics.get("total_val_time", 0) if len(val_metrics) == 0: warnings.warn( @@ -378,7 +394,7 @@ def validate_one_dataset( # Print timing information print(f"\n ā±ļø Validation Timing for `{dataset_name}` set:") - validation_time = timing_metrics.get("total_validation_time", 0) + validation_time = timing_metrics.get("total_val_time", 0) print(f" • Total validation time: {validation_time:.2f}s") # Make sure to reset the timer after validation @@ -450,7 +466,7 @@ def dpo_train( ): print(f"\n{'=' * 25} Epoch {current_epoch + 1}/{max_num_epochs} {'=' * 25}") - for batch in add_ref_logprobs_to_data(train_dataloader, policy, master_config, tokenizer): + for batch in add_ref_logprobs_to_data(train_dataloader, policy, master_config): print( f"\n{'=' * 25} Step {current_step + 1}/{min(len(train_dataloader), master_config['dpo']['max_num_steps'])} {'=' * 25}" ) @@ -517,10 +533,16 @@ def dpo_train( dpo_save_state["step"] = (current_step + 1) % len(train_dataloader) dpo_save_state["total_steps"] = total_steps + 1 dpo_save_state["epoch"] = current_epoch + # Remove outdated validation metrics + for key in list(dpo_save_state): + if ( + key.startswith("val") + and (key.endswith("_loss") or key.endswith("_accuracy")) + and (val_metrics is None or key not in val_metrics) + ): + del dpo_save_state[key] if val_metrics is not None: dpo_save_state.update(val_metrics) - elif "val_loss" in dpo_save_state: - del dpo_save_state["val_loss"] if master_config["checkpointing"]["metric_name"] is not None: if ( diff --git a/nemo_rl/algorithms/rm.py b/nemo_rl/algorithms/rm.py index 5a7610873b..33c874c10e 100644 --- a/nemo_rl/algorithms/rm.py +++ b/nemo_rl/algorithms/rm.py @@ -11,12 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from collections import defaultdict import os import warnings +from collections import defaultdict +from functools import partial from pathlib import Path from typing import Optional, TypedDict -from functools import partial import numpy as np import torch @@ -95,12 +95,12 @@ def setup( master_config: MasterConfig, tokenizer: AutoTokenizer, train_dataset: AllTaskProcessedDataset, - val_dataset: AllTaskProcessedDataset | dict[str, AllTaskProcessedDataset], + val_dataset: dict[str, AllTaskProcessedDataset], ) -> tuple[ Policy, RayVirtualCluster, StatefulDataLoader, - StatefulDataLoader | dict[str, StatefulDataLoader], + dict[str, StatefulDataLoader], PreferenceLoss, MasterConfig, Logger, @@ -149,6 +149,7 @@ def setup( make_sequence_length_divisible_by=policy_config[ "make_sequence_length_divisible_by" ], + add_loss_mask=False, ), drop_last=True, ) @@ -173,9 +174,11 @@ def setup( make_sequence_length_divisible_by=policy_config[ "make_sequence_length_divisible_by" ], + add_loss_mask=False, ), drop_last=True, - ) for k, v in val_dataset.items() + ) + for k, v in val_dataset.items() } # ========================== @@ -234,7 +237,7 @@ def setup( # ======================================================= def validate( policy: PolicyInterface, - val_dataloader: StatefulDataLoader | dict[str, StatefulDataLoader], + val_dataloader: dict[str, StatefulDataLoader], tokenizer, loss_fn, step: int, @@ -246,18 +249,31 @@ def validate( ): val_metrics, validation_timings = {}, {} for k, v in val_dataloader.items(): - k_val_metrics, k_validation_timings = validate_one_dataset(policy, v, tokenizer, loss_fn, step, master_config, val_batches, val_batch_size, val_mbs, k) + k_val_metrics, k_validation_timings = validate_one_dataset( + policy, + v, + tokenizer, + loss_fn, + step, + master_config, + val_batches, + val_batch_size, + val_mbs, + k, + ) if k == "validation": prefix = "val" else: - prefix = f"{k}-val" + prefix = f"val-{k}" logger.log_metrics(k_val_metrics, step, prefix=prefix) logger.log_metrics(k_validation_timings, step, prefix=f"timing/{prefix}") - val_metrics[prefix+"_loss"] = k_val_metrics["val_loss"] - val_metrics[prefix+"_accuracy"] = k_val_metrics["accuracy"] - validation_timings[prefix+"_total_validation_time"] = k_validation_timings["total_validation_time"] + val_metrics[prefix + "_loss"] = k_val_metrics["val_loss"] + val_metrics[prefix + "_accuracy"] = k_val_metrics["accuracy"] + validation_timings[prefix + "_total_val_time"] = k_validation_timings[ + "total_val_time" + ] return val_metrics, validation_timings @@ -281,7 +297,7 @@ def validate_one_dataset( timer = Timer() - with timer.time("total_validation_time"): + with timer.time("total_val_time"): print(f"ā–¶ Starting validation at step {step}...") # Show a progress indicator for validation @@ -310,12 +326,22 @@ def validate_one_dataset( " This is likely because there were no valid samples." ) else: - sum_num_valid_samples = sum(val_results["all_mb_metrics"]["num_valid_samples"]) - for k in ["loss", "accuracy", "rewards_chosen_mean", "rewards_rejected_mean"]: + sum_num_valid_samples = sum( + val_results["all_mb_metrics"]["num_valid_samples"] + ) + for k in [ + "loss", + "accuracy", + "rewards_chosen_mean", + "rewards_rejected_mean", + ]: dict_val_metrics[k if k != "loss" else "val_loss"] += [ - value * sum_num_valid_samples for value in val_results["all_mb_metrics"][k] + value * sum_num_valid_samples + for value in val_results["all_mb_metrics"][k] ] - dict_val_metrics["num_valid_samples"] += val_results["all_mb_metrics"]["num_valid_samples"] + dict_val_metrics["num_valid_samples"] += val_results["all_mb_metrics"][ + "num_valid_samples" + ] num_valid_batches += 1 @@ -323,18 +349,35 @@ def validate_one_dataset( break if num_valid_batches > 0: - assert len(dict_val_metrics["val_loss"]) == len(dict_val_metrics["accuracy"]) \ - == len(dict_val_metrics["rewards_chosen_mean"]) == len(dict_val_metrics["rewards_rejected_mean"]) \ - == len(dict_val_metrics["num_valid_samples"]) + assert ( + len(dict_val_metrics["val_loss"]) + == len(dict_val_metrics["accuracy"]) + == len(dict_val_metrics["rewards_chosen_mean"]) + == len(dict_val_metrics["rewards_rejected_mean"]) + == len(dict_val_metrics["num_valid_samples"]) + ) sum_num_valid_samples = sum(dict_val_metrics["num_valid_samples"]) val_metrics = RMValMetrics( num_valid_samples=sum_num_valid_samples, **{ - k: sum([value * weight for value, weight in zip(dict_val_metrics[k], dict_val_metrics["num_valid_samples"])]) + k: sum( + [ + value * weight + for value, weight in zip( + dict_val_metrics[k], + dict_val_metrics["num_valid_samples"], + ) + ] + ) / sum_num_valid_samples - for k in ["val_loss", "accuracy", "rewards_chosen_mean", "rewards_rejected_mean"] - } + for k in [ + "val_loss", + "accuracy", + "rewards_chosen_mean", + "rewards_rejected_mean", + ] + }, ) else: warnings.warn( @@ -354,7 +397,7 @@ def validate_one_dataset( # Get timing metrics timing_metrics = timer.get_timing_metrics(reduction_op="sum") - validation_time = timing_metrics.get("total_validation_time", 0) + validation_time = timing_metrics.get("total_val_time", 0) if num_valid_batches > 0: # Print summary of validation results @@ -373,7 +416,7 @@ def validate_one_dataset( # Print timing information print(f"\n ā±ļø Validation Timing for `{dataset_name}` set:") - validation_time = timing_metrics.get("total_validation_time", 0) + validation_time = timing_metrics.get("total_val_time", 0) print(f" • Total validation time: {validation_time:.2f}s") # Make sure to reset the timer after validation @@ -494,10 +537,16 @@ def rm_train( rm_save_state["step"] = (current_step + 1) % len(train_dataloader) rm_save_state["total_steps"] = total_steps + 1 rm_save_state["epoch"] = current_epoch + # Remove outdated validation metrics + for key in list(rm_save_state): + if ( + key.startswith("val") + and (key.endswith("_loss") or key.endswith("_accuracy")) + and (val_metrics is None or key not in val_metrics) + ): + del rm_save_state[key] if val_metrics is not None: rm_save_state.update(val_metrics) - elif "val_loss" in rm_save_state: - del rm_save_state["val_loss"] if master_config["checkpointing"]["metric_name"] is not None: if ( diff --git a/nemo_rl/data/__init__.py b/nemo_rl/data/__init__.py index ee0600bf47..13cbce6f9c 100644 --- a/nemo_rl/data/__init__.py +++ b/nemo_rl/data/__init__.py @@ -31,6 +31,9 @@ class DataConfig(TypedDict): shuffle: NotRequired[bool] seed: NotRequired[int] download_dir: NotRequired[str] + train_data_path: NotRequired[str] + val_data_path: NotRequired[str] + val_data_paths: NotRequired[dict[str, str]] class MathDataConfig(DataConfig): diff --git a/nemo_rl/data/datasets.py b/nemo_rl/data/datasets.py index 172b73ecd0..60dd0e091c 100644 --- a/nemo_rl/data/datasets.py +++ b/nemo_rl/data/datasets.py @@ -233,6 +233,7 @@ def preference_collate_fn( data_batch: list[DPODatumSpec], tokenizer: TokenizerType, make_sequence_length_divisible_by: int, + add_loss_mask: bool, ) -> BatchedDataDict[Any]: """Collate function for preference data training. @@ -244,8 +245,9 @@ def preference_collate_fn( data_batch: List of data samples with message_log_chosen, message_log_rejected, length_chosen, length_rejected, loss_multiplier, idx, and task_name fields. tokenizer: Tokenizer for text processing make_sequence_length_divisible_by: Make the sequence length divisible by this value + add_loss_mask: Whether to add a token_mask to the returned data Returns: - BatchedDataDict with input_ids, input_lengths, token_mask, and sample_mask fields. + BatchedDataDict with input_ids, input_lengths, token_mask (optional), and sample_mask fields. """ message_log = [] length = [] @@ -275,11 +277,11 @@ def preference_collate_fn( batch_max_length=batch_max_length, ) - add_loss_mask_to_message_log( - batch["message_log"], - only_unmask_final=True, - roles_to_train_on=["assistant"], - ) + if add_loss_mask: + add_loss_mask_to_message_log( + batch["message_log"], + only_unmask_final=True, + ) cat_and_padded, input_lengths = batched_message_log_to_flat_message( batch["message_log"], @@ -291,10 +293,11 @@ def preference_collate_fn( { "input_ids": cat_and_padded["token_ids"], "input_lengths": input_lengths, - "token_mask": cat_and_padded["token_loss_mask"], "sample_mask": batch["loss_multiplier"], } ) + if add_loss_mask: + data["token_mask"] = cat_and_padded["token_loss_mask"] return data diff --git a/nemo_rl/data/hf_datasets/dpo.py b/nemo_rl/data/hf_datasets/dpo.py index 26154cd779..d0c96a7e21 100644 --- a/nemo_rl/data/hf_datasets/dpo.py +++ b/nemo_rl/data/hf_datasets/dpo.py @@ -11,27 +11,41 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from datasets import load_dataset +import warnings from typing import Any -from nemo_rl.data.interfaces import TaskDataSpec +from datasets import load_dataset -import warnings +from nemo_rl.data.interfaces import TaskDataSpec def to_preference_data_format(data: dict[str, Any]) -> dict[str, list[dict[str, Any]]]: return { - "context": data["prompt"] if isinstance(data["prompt"], list) else [{"role": "user", "content": data["prompt"]}], + "context": data["prompt"] + if isinstance(data["prompt"], list) + else [{"role": "user", "content": data["prompt"]}], "completions": [ - {"rank": 0, "completion": [{"role": "assistant", "content": data["chosen_response"]}]}, - {"rank": 1, "completion": [{"role": "assistant", "content": data["rejected_response"]}]} - ] + { + "rank": 0, + "completion": [ + {"role": "assistant", "content": data["chosen_response"]} + ], + }, + { + "rank": 1, + "completion": [ + {"role": "assistant", "content": data["rejected_response"]} + ], + }, + ], } class DPODataset: """Dataset class for Direct Preference Optimization (DPO) training. + This class is deprecated and will be removed in a future version. Use PreferenceDataset instead. + This class handles loading of preference data for DPO training. The input JSON files should contain examples with the following structure: { @@ -48,14 +62,18 @@ class DPODataset: def __init__(self, train_data_path: str, val_data_path: str): warnings.warn( - "DPODataset is deprecated and will be removed in a future version. Use PreferenceDataset instead (see function `to_preference_data_format()` on how to convert your data to this new format).", + "DPODataset is deprecated and will be removed in a future version. Use PreferenceDataset instead (see function `to_preference_data_format()` on how to convert your data to this new format).", category=DeprecationWarning, - stacklevel=2 + stacklevel=2, ) self.formatted_ds = { - "train": load_dataset("json", data_files=train_data_path, split="train").map(to_preference_data_format), - "validation": load_dataset("json", data_files=val_data_path, split="train").map(to_preference_data_format), + "train": load_dataset( + "json", data_files=train_data_path, split="train" + ).map(to_preference_data_format), + "validation": load_dataset( + "json", data_files=val_data_path, split="train" + ).map(to_preference_data_format), } self.task_spec = TaskDataSpec( diff --git a/nemo_rl/data/hf_datasets/helpsteer3.py b/nemo_rl/data/hf_datasets/helpsteer3.py index 05c66cad64..e80fbff302 100644 --- a/nemo_rl/data/hf_datasets/helpsteer3.py +++ b/nemo_rl/data/hf_datasets/helpsteer3.py @@ -19,7 +19,11 @@ from nemo_rl.data.interfaces import TaskDataSpec -def to_preference_data_format(data: dict[str, Any]) -> dict[str, str | dict[str, str]]: +def to_preference_data_format( + data: dict[str, Any], +) -> dict[ + str, list[dict[str, int | list[dict[str, str | Any]]]] | list[dict[str, str]] +]: response_1 = data["response1"] response_2 = data["response2"] overall_preference = data["overall_preference"] @@ -40,7 +44,9 @@ def to_preference_data_format(data: dict[str, Any]) -> dict[str, str | dict[str, rejected = response_1 return { - "context": [{"role": "user", "content": data["context"]}] if isinstance(data["context"], str) else data["context"], + "context": [{"role": "user", "content": data["context"]}] + if isinstance(data["context"], str) + else data["context"], "completions": [ {"rank": 0, "completion": [{"role": "assistant", "content": chosen}]}, {"rank": 1, "completion": [{"role": "assistant", "content": rejected}]}, diff --git a/nemo_rl/data/hf_datasets/preference_dataset.py b/nemo_rl/data/hf_datasets/preference_dataset.py index 5d03125105..dea4d7213d 100644 --- a/nemo_rl/data/hf_datasets/preference_dataset.py +++ b/nemo_rl/data/hf_datasets/preference_dataset.py @@ -11,17 +11,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any -from absl import logging -from collections import defaultdict -from datasets import Dataset, DatasetDict, load_dataset +from datasets import DatasetDict, load_dataset from nemo_rl.data.interfaces import TaskDataSpec class PreferenceDataset: - """Preference dataset. + """Preference dataset. This class handles loading of custom preference data. The input JSONL file should contain valid JSON objects formatted like this: @@ -37,8 +34,10 @@ class PreferenceDataset: def __init__(self, dataset_path: str, split: str) -> None: # Specifying split="train" returns Dataset instead of DatasetDict({"train": Dataset}) - self.formatted_ds = DatasetDict({split: load_dataset("json", data_files=dataset_path, split="train")}) + self.formatted_ds = DatasetDict( + {split: load_dataset("json", data_files=dataset_path, split="train")} + ) self.task_spec = TaskDataSpec( task_name="PreferenceDataset", - ) \ No newline at end of file + ) diff --git a/pyrefly.toml b/pyrefly.toml index e9717a1ed0..f4b3c426fb 100644 --- a/pyrefly.toml +++ b/pyrefly.toml @@ -60,6 +60,7 @@ project-includes = [ "nemo_rl/data/hf_datasets/oai_format_dataset.py", "nemo_rl/data/hf_datasets/oasst.py", "nemo_rl/data/hf_datasets/openmathinstruct2.py", + "nemo_rl/data/hf_datasets/preference_dataset.py", "nemo_rl/data/hf_datasets/prompt_response_dataset.py", "nemo_rl/data/hf_datasets/squad.py", "nemo_rl/data/hf_datasets/tulu3.py", diff --git a/tests/unit/data/hf_datasets/test_dpo_dataset.py b/tests/unit/data/hf_datasets/test_dpo_dataset.py index 2e1fa3e2d2..85261ff958 100644 --- a/tests/unit/data/hf_datasets/test_dpo_dataset.py +++ b/tests/unit/data/hf_datasets/test_dpo_dataset.py @@ -99,8 +99,11 @@ def test_dpo_dataset_data_format(mock_dpo_data): # Verify data content print(train_sample["completions"]) - assert train_sample["context"] == [{'content': 'What is 2+2?', 'role': 'user'}] + assert train_sample["context"] == [{"content": "What is 2+2?", "role": "user"}] assert train_sample["completions"] == [ - {'completion': [{'content': 'The answer is 4.', 'role': 'assistant'}], 'rank': 0}, - {'completion': [{'content': "I don't know.", 'role': 'assistant'}], 'rank': 1} - ] \ No newline at end of file + { + "completion": [{"content": "The answer is 4.", "role": "assistant"}], + "rank": 0, + }, + {"completion": [{"content": "I don't know.", "role": "assistant"}], "rank": 1}, + ] diff --git a/tests/unit/data/hf_datasets/test_helpsteer.py b/tests/unit/data/hf_datasets/test_helpsteer.py index 9e6afdc67d..4015a83b72 100644 --- a/tests/unit/data/hf_datasets/test_helpsteer.py +++ b/tests/unit/data/hf_datasets/test_helpsteer.py @@ -43,9 +43,12 @@ def test_to_preference_data_format(): result1 = to_preference_data_format(data1) assert result1["context"] == [{"content": "What is 2+2?", "role": "user"}] assert result1["completions"] == [ - {"rank": 0, "completion": [{"role": "assistant", "content": "The answer is 4."}]}, - {"rank": 1, "completion": [{"role": "assistant", "content": "I don't know."}] - }] + { + "rank": 0, + "completion": [{"role": "assistant", "content": "The answer is 4."}], + }, + {"rank": 1, "completion": [{"role": "assistant", "content": "I don't know."}]}, + ] # Test case 2: response2 is preferred (overall_preference > 0) data2 = { @@ -55,10 +58,22 @@ def test_to_preference_data_format(): "overall_preference": 1, } result2 = to_preference_data_format(data2) - assert result2["context"] == [{"content": "What is the capital of France?", "role": "user"}] + assert result2["context"] == [ + {"content": "What is the capital of France?", "role": "user"} + ] assert result2["completions"] == [ - {"rank": 0, "completion": [{"role": "assistant", "content": "The capital of France is Paris."}]}, - {"rank": 1, "completion": [{"role": "assistant", "content": "The capital of France is London."}]} + { + "rank": 0, + "completion": [ + {"role": "assistant", "content": "The capital of France is Paris."} + ], + }, + { + "rank": 1, + "completion": [ + {"role": "assistant", "content": "The capital of France is London."} + ], + }, ] # Test case 3: no preference (overall_preference = 0) @@ -69,12 +84,20 @@ def test_to_preference_data_format(): "overall_preference": 0, } result3 = to_preference_data_format(data3) - assert result3["context"] == [{"content": "What is the weather like?", "role": "user"}] + assert result3["context"] == [ + {"content": "What is the weather like?", "role": "user"} + ] # When preference is 0, neither response is preferred, so # response 1 is used for both chosen and rejected assert result3["completions"] == [ - {"rank": 0, "completion": [{"role": "assistant", "content": "It's sunny today."}]}, - {"rank": 1, "completion": [{"role": "assistant", "content": "It's sunny today."}]} + { + "rank": 0, + "completion": [{"role": "assistant", "content": "It's sunny today."}], + }, + { + "rank": 1, + "completion": [{"role": "assistant", "content": "It's sunny today."}], + }, ] # Test case 4: context is a list of dicts @@ -82,7 +105,7 @@ def test_to_preference_data_format(): "context": [ {"role": "user", "content": "Can I ask you a question?"}, {"role": "assistant", "content": "Sure, what do you want to know?"}, - {"role": "user", "content": "What is 2+2?"} + {"role": "user", "content": "What is 2+2?"}, ], "response1": "4.", "response2": "I don't know.", @@ -92,12 +115,13 @@ def test_to_preference_data_format(): assert result1["context"] == [ {"role": "user", "content": "Can I ask you a question?"}, {"role": "assistant", "content": "Sure, what do you want to know?"}, - {"role": "user", "content": "What is 2+2?"} + {"role": "user", "content": "What is 2+2?"}, ] assert result1["completions"] == [ {"rank": 0, "completion": [{"role": "assistant", "content": "4."}]}, - {"rank": 1, "completion": [{"role": "assistant", "content": "I don't know."}] - }] + {"rank": 1, "completion": [{"role": "assistant", "content": "I don't know."}]}, + ] + def test_helpsteer3_dataset_initialization(helpsteer3_dataset): """Test that HelpSteer3Dataset initializes correctly.""" diff --git a/tests/unit/data/hf_datasets/test_preference_dataset.py b/tests/unit/data/hf_datasets/test_preference_dataset.py index 8ca97bcde4..955a91809f 100644 --- a/tests/unit/data/hf_datasets/test_preference_dataset.py +++ b/tests/unit/data/hf_datasets/test_preference_dataset.py @@ -26,43 +26,43 @@ def mock_preference_data(): """Create temporary preference dataset files with sample data.""" preference_data = [ { - "context": [ - {"role": "user", "content": "What is 2+2?"} - ], + "context": [{"role": "user", "content": "What is 2+2?"}], "completions": [ { "rank": 1, "completion": [ {"role": "assistant", "content": "The answer is 4."} - ] + ], }, { "rank": 2, - "completion": [ - {"role": "assistant", "content": "I don't know."} - ] - } - ] + "completion": [{"role": "assistant", "content": "I don't know."}], + }, + ], }, { - "context": [ - {"role": "user", "content": "What is the capital of France?"} - ], + "context": [{"role": "user", "content": "What is the capital of France?"}], "completions": [ { "rank": 1, "completion": [ - {"role": "assistant", "content": "The capital of France is Paris."} - ] + { + "role": "assistant", + "content": "The capital of France is Paris.", + } + ], }, { "rank": 2, "completion": [ - {"role": "assistant", "content": "The capital of France is London."} - ] + { + "role": "assistant", + "content": "The capital of France is London.", + } + ], }, - ] - } + ], + }, ] with tempfile.NamedTemporaryFile( @@ -70,7 +70,7 @@ def mock_preference_data(): ) as preference_file: json.dump(preference_data, preference_file) preference_path = preference_file.name - + try: yield preference_path finally: diff --git a/tests/unit/data/test_datasets.py b/tests/unit/data/test_datasets.py index 9050815480..6cdd8203b5 100755 --- a/tests/unit/data/test_datasets.py +++ b/tests/unit/data/test_datasets.py @@ -23,6 +23,10 @@ def test_preference_collate_fn(): """Test that preference_collate_fn correctly processes preference data.""" + # Create mock tokenizer + mock_tokenizer = MagicMock() + mock_tokenizer.pad_token_id = 0 + # Create test data with varying sequence lengths data_batch = [ DatumSpec( @@ -90,31 +94,58 @@ def test_preference_collate_fn(): ] # Call preference_collate_fn - train_data = preference_collate_fn(data_batch) + train_data = preference_collate_fn( + data_batch, + mock_tokenizer, + make_sequence_length_divisible_by=16, + add_loss_mask=True, + ) - # Verify the output structure matches the actual format - assert "message_log" in train_data - assert "length" in train_data - assert "loss_multiplier" in train_data - assert "task_name" in train_data - assert "idx" in train_data - assert "batch_max_length" in train_data + # Verify the output structure + assert isinstance(train_data, BatchedDataDict) + assert "input_ids" in train_data + assert "input_lengths" in train_data + assert "token_mask" in train_data + assert "sample_mask" in train_data # Verify batch size is doubled (chosen + rejected for each example) - assert len(train_data["message_log"]) == 4 # 2 examples * 2 (chosen + rejected) + assert train_data["input_ids"].shape[0] == 4 # 2 examples * 2 (chosen + rejected) + + # Verify input_ids shape and padding + max_length = 16 # max of all sequence lengths, padded to be divisible by 16 + assert train_data["input_ids"].shape == (4, max_length) - # Verify length tensor + # Verify input_lengths expected_lengths = [7, 5, 6, 7] # chosen1, rejected1, chosen2, rejected2 - assert torch.equal(train_data["length"], torch.tensor(expected_lengths)) + assert torch.equal(train_data["input_lengths"], torch.tensor(expected_lengths)) - # Verify loss_multiplier tensor - expected_loss_multiplier = [1.0, 1.0, 0.0, 0.0] # loss_multiplier repeated for chosen/rejected - assert torch.equal(train_data["loss_multiplier"], torch.tensor(expected_loss_multiplier)) + # Verify token_mask + assert train_data["token_mask"].shape == (4, max_length) + # First example chosen (length 7) + assert torch.all(train_data["token_mask"][0][0:3] == 0) + assert torch.all(train_data["token_mask"][0][3:7] == 1) + # First example rejected (length 5) + assert torch.all(train_data["token_mask"][1][0:3] == 0) + assert torch.all(train_data["token_mask"][1][3:5] == 1) + assert torch.all(train_data["token_mask"][1][5:] == 0) - # Verify idx list - expected_idx = [0, 0, 1, 1] # idx repeated for chosen/rejected - assert train_data["idx"] == expected_idx + # Verify sample_mask + expected_sample_mask = [ + 1.0, + 1.0, + 0.0, + 0.0, + ] # loss_multiplier repeated for chosen/rejected + assert torch.equal(train_data["sample_mask"], torch.tensor(expected_sample_mask)) - # Verify batch_max_length tensor - expected_batch_max_length = [7, 7, 7, 7] # max length for each sequence - assert torch.equal(train_data["batch_max_length"], torch.tensor(expected_batch_max_length)) + # Verify message content is preserved + # First example chosen + assert torch.equal(train_data["input_ids"][0][0:3], torch.tensor([1, 2, 3])) # user + assert torch.equal( + train_data["input_ids"][0][3:7], torch.tensor([4, 5, 6, 7]) + ) # assistant + # First example rejected + assert torch.equal(train_data["input_ids"][1][0:3], torch.tensor([1, 2, 3])) # user + assert torch.equal( + train_data["input_ids"][1][3:5], torch.tensor([8, 9]) + ) # assistant From 8ad7565554ca55aeadd9945466fc15bbc7da17d1 Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Wed, 27 Aug 2025 12:39:59 -0700 Subject: [PATCH 34/47] nit docs + lint Signed-off-by: Julien Veron Vialard --- examples/configs/dpo.yaml | 33 +++++++++++++++++---------------- examples/configs/rm.yaml | 33 +++++++++++++++++---------------- nemo_rl/algorithms/sft.py | 2 +- 3 files changed, 35 insertions(+), 33 deletions(-) diff --git a/examples/configs/dpo.yaml b/examples/configs/dpo.yaml index dbc0d04a22..343978cfe0 100755 --- a/examples/configs/dpo.yaml +++ b/examples/configs/dpo.yaml @@ -22,17 +22,6 @@ dpo: checkpointing: enabled: true checkpoint_dir: "results/dpo" - - # If you are using multiple validation sets, `metric_name` should reflect the metric and validation set to be tracked. For example: - # checkpointing: - # metric_name: "val-_loss" - # ... - # data: - # dataset_name: PreferenceDataset - # train_data_path: - # val_data_paths: - # : - # ... metric_name: "val_loss" higher_is_better: false keep_top_k: 3 @@ -163,13 +152,25 @@ policy: data: max_input_seq_length: ${policy.max_total_sequence_length} - dataset_name: HelpSteer3 - # You can use the following to configure a custom preference dataset for training and validation - # dataset_name: PreferenceDataset - # train_data_path: - # val_data_path: shuffle: true + dataset_name: HelpSteer3 + # You can use a custom preference dataset for training and validation. For example: + # dataset_name: PreferenceDataset + # train_data_path: + # val_data_path: + # + # If you are using multiple validation sets, `metric_name` should reflect the metric and validation set to be tracked. For example: + # checkpointing: + # metric_name: "val-_loss" + # ... + # data: + # dataset_name: PreferenceDataset + # train_data_path: + # val_data_paths: + # : + # ... + logger: log_dir: "logs" # Base directory for all logs wandb_enabled: false # Make sure you do a ``wandb login [Your API key]'' before running diff --git a/examples/configs/rm.yaml b/examples/configs/rm.yaml index 587ec6da88..bfb7748a22 100644 --- a/examples/configs/rm.yaml +++ b/examples/configs/rm.yaml @@ -15,17 +15,6 @@ rm: checkpointing: enabled: true checkpoint_dir: "results/rm" - - # If you are using multiple validation sets, `metric_name` should reflect the metric and validation set to be tracked. For example: - # checkpointing: - # metric_name: "val-_loss" - # ... - # data: - # dataset_name: PreferenceDataset - # train_data_path: - # val_data_paths: - # : - # ... metric_name: "val_loss" higher_is_better: false keep_top_k: 3 @@ -134,13 +123,25 @@ policy: data: max_input_seq_length: ${policy.max_total_sequence_length} - dataset_name: HelpSteer3 - # You can use the following to configure a custom preference dataset for training and validation - # dataset_name: PreferenceDataset - # train_data_path: - # val_data_path: shuffle: true + dataset_name: HelpSteer3 + # You can use a custom preference dataset for training and validation. For example: + # dataset_name: PreferenceDataset + # train_data_path: + # val_data_path: + # + # If you are using multiple validation sets, `metric_name` should reflect the metric and validation set to be tracked. For example: + # checkpointing: + # metric_name: "val-_loss" + # ... + # data: + # dataset_name: PreferenceDataset + # train_data_path: + # val_data_paths: + # : + # ... + logger: log_dir: "logs" # Base directory for all logs wandb_enabled: true # Make sure you do a ``wandb login [Your API key]'' before running diff --git a/nemo_rl/algorithms/sft.py b/nemo_rl/algorithms/sft.py index adc33d02c4..6de9ac81f1 100644 --- a/nemo_rl/algorithms/sft.py +++ b/nemo_rl/algorithms/sft.py @@ -568,4 +568,4 @@ def sft_train( return current_epoch += 1 - current_step = 0 # Reset step counter for new epoch \ No newline at end of file + current_step = 0 # Reset step counter for new epoch From 9efd72a6e79e3d606ab3c462744aaab4a312808a Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Wed, 27 Aug 2025 16:13:41 -0700 Subject: [PATCH 35/47] nit code and docs Signed-off-by: Julien Veron Vialard --- docs/guides/dpo.md | 8 ++++---- docs/guides/rm.md | 8 ++++---- examples/run_dpo.py | 23 ++++++++++------------- examples/run_rm.py | 19 +++++++++++++------ nemo_rl/algorithms/dpo.py | 28 +++++++++++++--------------- nemo_rl/algorithms/rm.py | 34 +++++++++++++++++++--------------- 6 files changed, 63 insertions(+), 57 deletions(-) diff --git a/docs/guides/dpo.md b/docs/guides/dpo.md index 3f8554bb3e..70f25ce364 100644 --- a/docs/guides/dpo.md +++ b/docs/guides/dpo.md @@ -34,7 +34,7 @@ uv run examples/run_dpo.py \ Each DPO dataset class is expected to have the following attributes: 1. `formatted_ds`: The dictionary of formatted datasets, where each dataset should be formatted like -``` +```jsonc { "context": list of dicts, # The prompt message (including previous turns, if any) "completions": list of dicts, # The list of completions @@ -47,7 +47,7 @@ Each DPO dataset class is expected to have the following attributes: 2. `task_spec`: The `TaskDataSpec` for this dataset. This should specify the name you choose for this dataset. DPO training supports only two completions (where the lowest rank is preferred and the highest one is rejected), with each completion being a single response. For example: -``` +```json { "context": [ { @@ -89,14 +89,14 @@ DPO training supports only two completions (where the lowest rank is preferred a NeMo RL provides a DPO-compatible implementation of the [HelpSteer3](https://github.com/NVIDIA-NeMo/RL/blob/main/nemo_rl/data/hf_datasets/helpsteer3.py) dataset as an example. This dataset is downloaded from Hugging Face and preprocessed on-the-fly, so there's no need to provide a path to any datasets on disk. We also provide a [PreferenceDataset](../../nemo_rl/data/hf_datasets/preference_dataset.py) class that is compatible with JSONL-formatted preference datasets. You can modify your config as follows to use such a custom preference dataset: -``` +```yaml data: dataset_name: PreferenceDataset train_data_path: val_data_path: ``` with support for multiple validation sets achieved with: -``` +```yaml data: dataset_name: PreferenceDataset train_data_path: diff --git a/docs/guides/rm.md b/docs/guides/rm.md index d001dabaf7..74f6eb9e08 100644 --- a/docs/guides/rm.md +++ b/docs/guides/rm.md @@ -23,7 +23,7 @@ The default YAML config shares the same base template as the SFT config but incl Each RM dataset class is expected to have the following attributes: 1. `formatted_ds`: The dictionary of formatted datasets, where each dataset should be formatted like -``` +```jsonc { "context": list of dicts, # The prompt message (including previous turns, if any) "completions": list of dicts, # The list of completions @@ -36,7 +36,7 @@ Each RM dataset class is expected to have the following attributes: 2. `task_spec`: The `TaskDataSpec` for this dataset. This should specify the name you choose for this dataset. Currently, RM training supports only two completions (where the lowest rank is preferred and the highest one is rejected), with each completion being a single response. For example: -``` +```json { "context": [ { @@ -78,14 +78,14 @@ Currently, RM training supports only two completions (where the lowest rank is p NeMo RL provides a RM-compatible implementation of the [HelpSteer3](https://github.com/NVIDIA-NeMo/RL/blob/main/nemo_rl/data/hf_datasets/helpsteer3.py) dataset as an example. This dataset is downloaded from Hugging Face and preprocessed on-the-fly, so there's no need to provide a path to any datasets on disk. We also provide a [PreferenceDataset](../../nemo_rl/data/hf_datasets/preference_dataset.py) class that is compatible with JSONL-formatted preference datasets. You can modify your config as follows to use such a custom preference dataset: -``` +```yaml data: dataset_name: PreferenceDataset train_data_path: val_data_path: ``` with support for multiple validation sets achieved with: -``` +```yaml data: dataset_name: PreferenceDataset train_data_path: diff --git a/examples/run_dpo.py b/examples/run_dpo.py index 6a69e1a0a6..74a3131b4c 100644 --- a/examples/run_dpo.py +++ b/examples/run_dpo.py @@ -166,23 +166,14 @@ def setup_data(data_config: DataConfig, policy_config: PolicyConfig): data = hf_datasets.PreferenceDataset(data_path, split="train") train_dataset = data.formatted_ds["train"] val_dataset = None - print( - f" āœ“ Training dataset loaded with {len(data.formatted_ds['train'])} samples." - ) elif data_cls == "HelpSteer3": data = hf_datasets.HelpSteer3Dataset() train_dataset = data.formatted_ds["train"] val_dataset = data.formatted_ds["validation"] - print( - f" āœ“ Training and validation datasets loaded with {len(data.formatted_ds['train'])} and {len(data.formatted_ds['validation'])} samples, respectively." - ) - elif data_config["dataset_name"] == "Tulu3Preference": + elif data_cls == "Tulu3Preference": data = hf_datasets.Tulu3PreferenceDataset() train_dataset = data.formatted_ds["train"] val_dataset = None - print( - f" āœ“ Training dataset loaded with {len(data.formatted_ds['train'])} samples." - ) elif data_cls == "DPODataset": data = hf_datasets.DPODataset( train_data_path=data_config["train_data_path"], @@ -190,12 +181,18 @@ def setup_data(data_config: DataConfig, policy_config: PolicyConfig): ) train_dataset = data.formatted_ds["train"] val_dataset = data.formatted_ds["validation"] - print( - f" āœ“ Training and validation datasets loaded with {len(data.formatted_ds['train'])} and {len(data.formatted_ds['validation'])} samples, respectively." - ) else: raise ValueError(f"Unknown dataset class: {data_cls}") + if train_dataset: + print( + f" āœ“ Training dataset loaded with {len(data.formatted_ds['train'])} samples." + ) + if val_dataset: + print( + f" āœ“ Validation dataset loaded with {len(data.formatted_ds['validation'])} samples." + ) + dpo_task_spec = data.task_spec tokenizer = get_tokenizer(policy_config["tokenizer"]) diff --git a/examples/run_rm.py b/examples/run_rm.py index 1f0c7d00c5..a4994ab464 100644 --- a/examples/run_rm.py +++ b/examples/run_rm.py @@ -124,19 +124,26 @@ def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig): data = hf_datasets.PreferenceDataset(data_path, split="train") train_dataset = data.formatted_ds["train"] val_dataset = None - print( - f" āœ“ Training dataset loaded with {len(data.formatted_ds['train'])} samples." - ) elif data_cls == "HelpSteer3": data = hf_datasets.HelpSteer3Dataset() train_dataset = data.formatted_ds["train"] val_dataset = data.formatted_ds["validation"] - print( - f" āœ“ Training and validation datasets loaded with {len(data.formatted_ds['train'])} and {len(data.formatted_ds['validation'])} samples, respectively." - ) + elif data_cls == "Tulu3Preference": + data = hf_datasets.Tulu3PreferenceDataset() + train_dataset = data.formatted_ds["train"] + val_dataset = None else: raise ValueError(f"Unknown dataset class: {data_cls}") + if train_dataset: + print( + f" āœ“ Training dataset loaded with {len(data.formatted_ds['train'])} samples." + ) + if val_dataset: + print( + f" āœ“ Validation dataset loaded with {len(data.formatted_ds['validation'])} samples." + ) + rm_task_spec = data.task_spec train_dataset = AllTaskProcessedDataset( diff --git a/nemo_rl/algorithms/dpo.py b/nemo_rl/algorithms/dpo.py index 9028c3cd52..540d863f47 100644 --- a/nemo_rl/algorithms/dpo.py +++ b/nemo_rl/algorithms/dpo.py @@ -285,23 +285,22 @@ def validate( logger: Logger, ): val_metrics, validation_timings = {}, {} - for k, v in val_dataloader.items(): + for val_dataset_name, v in val_dataloader.items(): k_val_metrics, k_validation_timings = validate_one_dataset( - policy, - v, - tokenizer, - loss_fn, - step, - master_config, - val_batches, - val_batch_size, - val_mbs, - k, + policy=policy, + val_dataloader=v, + loss_fn=loss_fn, + step=step, + master_config=master_config, + val_batches=val_batches, + val_batch_size=val_batch_size, + val_mbs=val_mbs, + dataset_name=val_dataset_name, ) - if k == "validation": + if val_dataset_name == "validation": prefix = "val" else: - prefix = f"val-{k}" + prefix = f"val-{val_dataset_name}" logger.log_metrics(k_val_metrics, step, prefix=prefix) logger.log_metrics(k_validation_timings, step, prefix=f"timing/{prefix}") @@ -318,7 +317,6 @@ def validate( def validate_one_dataset( policy: PolicyInterface, val_dataloader: StatefulDataLoader, - tokenizer, loss_fn, step: int, master_config: MasterConfig, @@ -335,7 +333,7 @@ def validate_one_dataset( timer = Timer() with timer.time("total_val_time"): - print(f"ā–¶ Starting validation at step {step}...") + print(f"ā–¶ Starting validation at step {step} for `{dataset_name}` set..") val_metrics = defaultdict(lambda: 0.0) num_valid_batches = 0 diff --git a/nemo_rl/algorithms/rm.py b/nemo_rl/algorithms/rm.py index 33c874c10e..692cf0f121 100644 --- a/nemo_rl/algorithms/rm.py +++ b/nemo_rl/algorithms/rm.py @@ -248,23 +248,22 @@ def validate( logger: Logger, ): val_metrics, validation_timings = {}, {} - for k, v in val_dataloader.items(): + for val_dataset_name, v in val_dataloader.items(): k_val_metrics, k_validation_timings = validate_one_dataset( - policy, - v, - tokenizer, - loss_fn, - step, - master_config, - val_batches, - val_batch_size, - val_mbs, - k, + policy=policy, + val_dataloader=v, + loss_fn=loss_fn, + step=step, + master_config=master_config, + val_batches=val_batches, + val_batch_size=val_batch_size, + val_mbs=val_mbs, + dataset_name=val_dataset_name, ) - if k == "validation": + if val_dataset_name == "validation": prefix = "val" else: - prefix = f"val-{k}" + prefix = f"val-{val_dataset_name}" logger.log_metrics(k_val_metrics, step, prefix=prefix) logger.log_metrics(k_validation_timings, step, prefix=f"timing/{prefix}") @@ -281,7 +280,6 @@ def validate( def validate_one_dataset( policy: PolicyInterface, val_dataloader: StatefulDataLoader, - tokenizer, loss_fn, step: int, master_config: MasterConfig, @@ -298,7 +296,7 @@ def validate_one_dataset( timer = Timer() with timer.time("total_val_time"): - print(f"ā–¶ Starting validation at step {step}...") + print(f"ā–¶ Starting validation at step {step} for `{dataset_name}` set..") # Show a progress indicator for validation # val_total = len(val_dataloader) @@ -355,6 +353,12 @@ def validate_one_dataset( == len(dict_val_metrics["rewards_chosen_mean"]) == len(dict_val_metrics["rewards_rejected_mean"]) == len(dict_val_metrics["num_valid_samples"]) + ), ( + f"len(dict_val_metrics['val_loss']) == {len(dict_val_metrics['val_loss'])}\n" + f"len(dict_val_metrics['accuracy']) == {len(dict_val_metrics['accuracy'])}\n" + f"len(dict_val_metrics['rewards_chosen_mean']) == {len(dict_val_metrics['rewards_chosen_mean'])}\n" + f"len(dict_val_metrics['rewards_rejected_mean']) == {len(dict_val_metrics['rewards_rejected_mean'])}\n" + f"len(dict_val_metrics['num_valid_samples']) == {len(dict_val_metrics['num_valid_samples'])}" ) sum_num_valid_samples = sum(dict_val_metrics["num_valid_samples"]) From 8129c2348b00b49f7fa000ec1ecf97e75b9a62f4 Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Wed, 27 Aug 2025 16:35:33 -0700 Subject: [PATCH 36/47] better jsonc Signed-off-by: Julien Veron Vialard --- docs/guides/dpo.md | 17 +++++++++++------ docs/guides/rm.md | 17 +++++++++++------ 2 files changed, 22 insertions(+), 12 deletions(-) diff --git a/docs/guides/dpo.md b/docs/guides/dpo.md index 70f25ce364..5648d2c878 100644 --- a/docs/guides/dpo.md +++ b/docs/guides/dpo.md @@ -36,12 +36,17 @@ Each DPO dataset class is expected to have the following attributes: 1. `formatted_ds`: The dictionary of formatted datasets, where each dataset should be formatted like ```jsonc { - "context": list of dicts, # The prompt message (including previous turns, if any) - "completions": list of dicts, # The list of completions - { - "rank": int, # The rank of the completion (lower rank is preferred) - "completion": list of dicts, # The completion message(s) - } + "context": [], // list of dicts - The prompt message (including previous turns, if any) + "completions": [ // list of dicts — The list of completions + { + "rank": 0, // int — The rank of the completion (lower rank is preferred) + "completion": [] // list of dicts — The completion message(s) + }, + { + "rank": 1, // int — The rank of the completion (lower rank is preferred) + "completion": [] // list of dicts — The completion message(s) + } + ] } ``` 2. `task_spec`: The `TaskDataSpec` for this dataset. This should specify the name you choose for this dataset. diff --git a/docs/guides/rm.md b/docs/guides/rm.md index 74f6eb9e08..b217833882 100644 --- a/docs/guides/rm.md +++ b/docs/guides/rm.md @@ -25,12 +25,17 @@ Each RM dataset class is expected to have the following attributes: 1. `formatted_ds`: The dictionary of formatted datasets, where each dataset should be formatted like ```jsonc { - "context": list of dicts, # The prompt message (including previous turns, if any) - "completions": list of dicts, # The list of completions - { - "rank": int, # The rank of the completion (lower rank is preferred) - "completion": list of dicts, # The completion message(s) - } + "context": [], // list of dicts - The prompt message (including previous turns, if any) + "completions": [ // list of dicts — The list of completions + { + "rank": 0, // int — The rank of the completion (lower rank is preferred) + "completion": [] // list of dicts — The completion message(s) + }, + { + "rank": 1, // int — The rank of the completion (lower rank is preferred) + "completion": [] // list of dicts — The completion message(s) + } + ] } ``` 2. `task_spec`: The `TaskDataSpec` for this dataset. This should specify the name you choose for this dataset. From c4e3bda8b41b075695216438c70e97ec8f98a400 Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Wed, 27 Aug 2025 20:05:08 -0700 Subject: [PATCH 37/47] adding overall val time Signed-off-by: Julien Veron Vialard --- nemo_rl/algorithms/dpo.py | 6 +++++- nemo_rl/algorithms/rm.py | 4 ++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/nemo_rl/algorithms/dpo.py b/nemo_rl/algorithms/dpo.py index 540d863f47..0ffcd437f1 100644 --- a/nemo_rl/algorithms/dpo.py +++ b/nemo_rl/algorithms/dpo.py @@ -311,6 +311,10 @@ def validate( "total_val_time" ] + total_validation_time = sum(validation_timings.values()) + logger.log_metrics({"total_val_time": total_validation_time}, step, prefix=f"timing/validation") + validation_timings["total_val_time"] = total_validation_time + return val_metrics, validation_timings @@ -325,7 +329,7 @@ def validate_one_dataset( val_mbs: int, dataset_name: str, ): - """Run validation on the validation dataset.""" + """Run validation on one validation dataset.""" if val_dataloader is None: print(" āš ļø No validation dataloader provided, skipping validation") return diff --git a/nemo_rl/algorithms/rm.py b/nemo_rl/algorithms/rm.py index 692cf0f121..114d718c8f 100644 --- a/nemo_rl/algorithms/rm.py +++ b/nemo_rl/algorithms/rm.py @@ -274,6 +274,10 @@ def validate( "total_val_time" ] + total_validation_time = sum(validation_timings.values()) + logger.log_metrics({"total_val_time": total_validation_time}, step, prefix=f"timing/validation") + validation_timings["total_val_time"] = total_validation_time + return val_metrics, validation_timings From 578441f1c6323c4a04470a5cb77e78665c245855 Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Wed, 27 Aug 2025 20:06:42 -0700 Subject: [PATCH 38/47] aggregate metrics at the batch level first Signed-off-by: Julien Veron Vialard --- nemo_rl/algorithms/rm.py | 23 ++--------------------- 1 file changed, 2 insertions(+), 21 deletions(-) diff --git a/nemo_rl/algorithms/rm.py b/nemo_rl/algorithms/rm.py index 114d718c8f..b0cf9de894 100644 --- a/nemo_rl/algorithms/rm.py +++ b/nemo_rl/algorithms/rm.py @@ -306,10 +306,7 @@ def validate_one_dataset( # val_total = len(val_dataloader) dict_val_metrics = defaultdict(list) - num_valid_batches = 0 - - policy.prepare_for_training() for batch_idx, val_batch in enumerate(val_dataloader): ## just run model fwd val_results = policy.train( @@ -336,14 +333,11 @@ def validate_one_dataset( "accuracy", "rewards_chosen_mean", "rewards_rejected_mean", + "num_valid_samples", ]: dict_val_metrics[k if k != "loss" else "val_loss"] += [ - value * sum_num_valid_samples - for value in val_results["all_mb_metrics"][k] + sum(val_results["all_mb_metrics"][k]) ] - dict_val_metrics["num_valid_samples"] += val_results["all_mb_metrics"][ - "num_valid_samples" - ] num_valid_batches += 1 @@ -351,19 +345,6 @@ def validate_one_dataset( break if num_valid_batches > 0: - assert ( - len(dict_val_metrics["val_loss"]) - == len(dict_val_metrics["accuracy"]) - == len(dict_val_metrics["rewards_chosen_mean"]) - == len(dict_val_metrics["rewards_rejected_mean"]) - == len(dict_val_metrics["num_valid_samples"]) - ), ( - f"len(dict_val_metrics['val_loss']) == {len(dict_val_metrics['val_loss'])}\n" - f"len(dict_val_metrics['accuracy']) == {len(dict_val_metrics['accuracy'])}\n" - f"len(dict_val_metrics['rewards_chosen_mean']) == {len(dict_val_metrics['rewards_chosen_mean'])}\n" - f"len(dict_val_metrics['rewards_rejected_mean']) == {len(dict_val_metrics['rewards_rejected_mean'])}\n" - f"len(dict_val_metrics['num_valid_samples']) == {len(dict_val_metrics['num_valid_samples'])}" - ) sum_num_valid_samples = sum(dict_val_metrics["num_valid_samples"]) val_metrics = RMValMetrics( From 52a20bc29a2909143118838330bb91562a154741 Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Wed, 27 Aug 2025 20:44:22 -0700 Subject: [PATCH 39/47] lint Signed-off-by: Julien Veron Vialard --- nemo_rl/algorithms/dpo.py | 4 +++- nemo_rl/algorithms/rm.py | 5 +++-- nemo_rl/models/policy/dtensor_policy_worker.py | 2 ++ 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/nemo_rl/algorithms/dpo.py b/nemo_rl/algorithms/dpo.py index 0ffcd437f1..9eba2c7d6a 100644 --- a/nemo_rl/algorithms/dpo.py +++ b/nemo_rl/algorithms/dpo.py @@ -312,7 +312,9 @@ def validate( ] total_validation_time = sum(validation_timings.values()) - logger.log_metrics({"total_val_time": total_validation_time}, step, prefix=f"timing/validation") + logger.log_metrics( + {"total_val_time": total_validation_time}, step, prefix="timing/validation" + ) validation_timings["total_val_time"] = total_validation_time return val_metrics, validation_timings diff --git a/nemo_rl/algorithms/rm.py b/nemo_rl/algorithms/rm.py index b0cf9de894..a2694d21fd 100644 --- a/nemo_rl/algorithms/rm.py +++ b/nemo_rl/algorithms/rm.py @@ -275,7 +275,9 @@ def validate( ] total_validation_time = sum(validation_timings.values()) - logger.log_metrics({"total_val_time": total_validation_time}, step, prefix=f"timing/validation") + logger.log_metrics( + {"total_val_time": total_validation_time}, step, prefix="timing/validation" + ) validation_timings["total_val_time"] = total_validation_time return val_metrics, validation_timings @@ -345,7 +347,6 @@ def validate_one_dataset( break if num_valid_batches > 0: - sum_num_valid_samples = sum(dict_val_metrics["num_valid_samples"]) val_metrics = RMValMetrics( num_valid_samples=sum_num_valid_samples, diff --git a/nemo_rl/models/policy/dtensor_policy_worker.py b/nemo_rl/models/policy/dtensor_policy_worker.py index 7b2f0de271..a4b8b1ceba 100644 --- a/nemo_rl/models/policy/dtensor_policy_worker.py +++ b/nemo_rl/models/policy/dtensor_policy_worker.py @@ -586,6 +586,8 @@ def train( torch.distributed.all_reduce(to_reduce, group=self.dp_mesh.get_group()) global_valid_seqs, global_valid_toks = to_reduce[0], to_reduce[1] + print("global_valid_seqs", global_valid_seqs) + if ( hasattr(loss_fn, "loss_type") and loss_fn.loss_type == LossType.TOKEN_LEVEL From f97ee6d11dd93df040ac981ac0ad5b85bc3b64c9 Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Wed, 27 Aug 2025 20:51:11 -0700 Subject: [PATCH 40/47] nit Signed-off-by: Julien Veron Vialard --- nemo_rl/models/policy/dtensor_policy_worker.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nemo_rl/models/policy/dtensor_policy_worker.py b/nemo_rl/models/policy/dtensor_policy_worker.py index a4b8b1ceba..7b2f0de271 100644 --- a/nemo_rl/models/policy/dtensor_policy_worker.py +++ b/nemo_rl/models/policy/dtensor_policy_worker.py @@ -586,8 +586,6 @@ def train( torch.distributed.all_reduce(to_reduce, group=self.dp_mesh.get_group()) global_valid_seqs, global_valid_toks = to_reduce[0], to_reduce[1] - print("global_valid_seqs", global_valid_seqs) - if ( hasattr(loss_fn, "loss_type") and loss_fn.loss_type == LossType.TOKEN_LEVEL From c4b4e6a9fb18e5152961d3dea2ddafbe22a3d200 Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Thu, 28 Aug 2025 14:08:08 -0700 Subject: [PATCH 41/47] fix tulu3 Signed-off-by: Julien Veron Vialard --- examples/run_dpo.py | 4 +++- examples/run_rm.py | 4 +++- nemo_rl/data/hf_datasets/tulu3.py | 20 ++++++++++++++++---- 3 files changed, 22 insertions(+), 6 deletions(-) diff --git a/examples/run_dpo.py b/examples/run_dpo.py index 74a3131b4c..719c8b7f3e 100644 --- a/examples/run_dpo.py +++ b/examples/run_dpo.py @@ -182,7 +182,9 @@ def setup_data(data_config: DataConfig, policy_config: PolicyConfig): train_dataset = data.formatted_ds["train"] val_dataset = data.formatted_ds["validation"] else: - raise ValueError(f"Unknown dataset class: {data_cls}") + raise ValueError( + f"Unknown dataset class: {data_cls}. Supported datasets are: PreferenceDataset, HelpSteer3, Tulu3Preference, and DPODataset (deprecated)." + ) if train_dataset: print( diff --git a/examples/run_rm.py b/examples/run_rm.py index a4994ab464..1aab8718f4 100644 --- a/examples/run_rm.py +++ b/examples/run_rm.py @@ -133,7 +133,9 @@ def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig): train_dataset = data.formatted_ds["train"] val_dataset = None else: - raise ValueError(f"Unknown dataset class: {data_cls}") + raise ValueError( + f"Unknown dataset class: {data_cls}. Supported datasets are: PreferenceDataset, HelpSteer3, and Tulu3Preference." + ) if train_dataset: print( diff --git a/nemo_rl/data/hf_datasets/tulu3.py b/nemo_rl/data/hf_datasets/tulu3.py index ab3fa62623..1d686cb507 100644 --- a/nemo_rl/data/hf_datasets/tulu3.py +++ b/nemo_rl/data/hf_datasets/tulu3.py @@ -20,7 +20,11 @@ from nemo_rl.data.interfaces import TaskDataSpec -def format_tulu3_preference(data: dict[str, Any]) -> dict[str, str | dict[str, str]]: +def format_tulu3_preference( + data: dict[str, Any], +) -> dict[ + str, list[dict[str, int | list[dict[str, str | Any]]]] | list[dict[str, str]] +]: chosen_conversation = data["chosen"] rejected_conversation = data["rejected"] @@ -46,9 +50,17 @@ def format_tulu3_preference(data: dict[str, Any]) -> dict[str, str | dict[str, s rejected_response = rejected_conversation[-1]["content"] return { - "prompt": context, - "chosen_response": chosen_response, - "rejected_response": rejected_response, + "context": context, + "completions": [ + { + "rank": 0, + "completion": [{"role": "assistant", "content": chosen_response}], + }, + { + "rank": 1, + "completion": [{"role": "assistant", "content": rejected_response}], + }, + ], } From bd16f9b8e85cc557a3b262787693912331a393ed Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Thu, 28 Aug 2025 14:48:02 -0700 Subject: [PATCH 42/47] adding tulu3 unit test Signed-off-by: Julien Veron Vialard --- nemo_rl/data/hf_datasets/tulu3.py | 4 +- tests/unit/data/hf_datasets/test_tulu3.py | 77 +++++++++++++++++++++++ 2 files changed, 79 insertions(+), 2 deletions(-) create mode 100644 tests/unit/data/hf_datasets/test_tulu3.py diff --git a/nemo_rl/data/hf_datasets/tulu3.py b/nemo_rl/data/hf_datasets/tulu3.py index 1d686cb507..266daea186 100644 --- a/nemo_rl/data/hf_datasets/tulu3.py +++ b/nemo_rl/data/hf_datasets/tulu3.py @@ -20,7 +20,7 @@ from nemo_rl.data.interfaces import TaskDataSpec -def format_tulu3_preference( +def to_preference_data_format( data: dict[str, Any], ) -> dict[ str, list[dict[str, int | list[dict[str, str | Any]]]] | list[dict[str, str]] @@ -72,7 +72,7 @@ def __init__(self) -> None: path="allenai/llama-3.1-tulu-3-8b-preference-mixture", trust_remote_code=True, ) - self.formatted_ds = ds.map(format_tulu3_preference) + self.formatted_ds = ds.map(to_preference_data_format) self.task_spec = TaskDataSpec( task_name="Tulu3Preference", diff --git a/tests/unit/data/hf_datasets/test_tulu3.py b/tests/unit/data/hf_datasets/test_tulu3.py new file mode 100644 index 0000000000..6cc1560ae1 --- /dev/null +++ b/tests/unit/data/hf_datasets/test_tulu3.py @@ -0,0 +1,77 @@ +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import pytest + +from nemo_rl.data.hf_datasets.tulu3 import ( + Tulu3PreferenceDataset, + to_preference_data_format, +) + + +@pytest.fixture(scope="module") +def tulu3_dataset(): + try: + dataset = Tulu3PreferenceDataset() + yield dataset + except Exception as e: + print(f"Error during loading Tulu3PreferenceDataset: {e}") + yield + + +def test_to_preference_data_format(): + """Test the `to_preference_data_format()` function with different preference values.""" + data = { + "prompt": "What is 2+2?", + "chosen": [{"content": "What is 2+2?", "role": "user"}, {"role": "assistant", "content": "The answer is 4."}], + "rejected": [{"content": "What is 2+2?", "role": "user"}, {"role": "assistant", "content": "I don't know."}], + } + result = to_preference_data_format(data) + assert result["context"] == [{"content": "What is 2+2?", "role": "user"}] + assert result["completions"] == [ + { + "rank": 0, + "completion": [{"role": "assistant", "content": "The answer is 4."}], + }, + {"rank": 1, "completion": [{"role": "assistant", "content": "I don't know."}]}, + ] + + +def test_tulu3_dataset_initialization(tulu3_dataset): + """Test that Tulu3PreferenceDataset initializes correctly.""" + + dataset = tulu3_dataset + if dataset is None: + pytest.skip("dataset download is flaky") + + # Verify dataset initialization + assert dataset.task_spec.task_name == "Tulu3Preference" + + +def test_tulu3_dataset_data_format(tulu3_dataset): + """Test that Tulu3PreferenceDataset correctly formats the data.""" + + dataset = tulu3_dataset + if dataset is None: + pytest.skip("dataset download is flaky") + + assert isinstance(dataset.formatted_ds, dict) + assert "train" in dataset.formatted_ds + + # Verify data format + sample = dataset.formatted_ds["train"][0] + assert "prompt" in sample + assert "chosen" in sample + assert "rejected" in sample From 5f6cc52124a21693e0a7e7041842e3cbee0d57e5 Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Thu, 28 Aug 2025 15:16:16 -0700 Subject: [PATCH 43/47] nit Signed-off-by: Julien Veron Vialard --- examples/run_dpo.py | 5 ++++- examples/run_rm.py | 5 ++++- nemo_rl/algorithms/dpo.py | 16 +++++++++------- nemo_rl/algorithms/rm.py | 16 +++++++++------- tests/unit/data/hf_datasets/test_tulu3.py | 10 ++++++++-- 5 files changed, 34 insertions(+), 18 deletions(-) diff --git a/examples/run_dpo.py b/examples/run_dpo.py index 719c8b7f3e..b82f11e188 100644 --- a/examples/run_dpo.py +++ b/examples/run_dpo.py @@ -115,7 +115,10 @@ def dpo_preprocessor( chosen_completion = datum_dict["completions"][1] rejected_completion = datum_dict["completions"][0] else: - raise NotImplementedError("Ties are not supported yet.") + raise NotImplementedError( + "Ties are not supported yet. You can use the following command to filter out ties: `cat | jq 'select(.completions[0].rank != .completions[1].rank)'`." + ) + messages_chosen = datum_dict["context"] + chosen_completion["completion"] messages_rejected = datum_dict["context"] + rejected_completion["completion"] diff --git a/examples/run_rm.py b/examples/run_rm.py index 1aab8718f4..6880f73078 100644 --- a/examples/run_rm.py +++ b/examples/run_rm.py @@ -67,7 +67,10 @@ def rm_preprocessor( chosen_completion = datum_dict["completions"][1] rejected_completion = datum_dict["completions"][0] else: - raise NotImplementedError("Ties are not supported yet.") + raise NotImplementedError( + "Ties are not supported yet. You can use the following command to filter out ties: `cat | jq 'select(.completions[0].rank != .completions[1].rank)'`." + ) + messages_chosen = datum_dict["context"] + chosen_completion["completion"] messages_rejected = datum_dict["context"] + rejected_completion["completion"] diff --git a/nemo_rl/algorithms/dpo.py b/nemo_rl/algorithms/dpo.py index 9eba2c7d6a..2871e8e1af 100644 --- a/nemo_rl/algorithms/dpo.py +++ b/nemo_rl/algorithms/dpo.py @@ -307,15 +307,17 @@ def validate( val_metrics[prefix + "_loss"] = k_val_metrics["val_loss"] val_metrics[prefix + "_accuracy"] = k_val_metrics["accuracy"] - validation_timings[prefix + "_total_val_time"] = k_validation_timings[ - "total_val_time" + validation_timings[prefix + "_total_validation_time"] = k_validation_timings[ + "total_validation_time" ] total_validation_time = sum(validation_timings.values()) logger.log_metrics( - {"total_val_time": total_validation_time}, step, prefix="timing/validation" + {"total_validation_time": total_validation_time}, + step, + prefix="timing/validation", ) - validation_timings["total_val_time"] = total_validation_time + validation_timings["total_validation_time"] = total_validation_time return val_metrics, validation_timings @@ -338,7 +340,7 @@ def validate_one_dataset( timer = Timer() - with timer.time("total_val_time"): + with timer.time("total_validation_time"): print(f"ā–¶ Starting validation at step {step} for `{dataset_name}` set..") val_metrics = defaultdict(lambda: 0.0) @@ -382,7 +384,7 @@ def validate_one_dataset( # Get timing metrics timing_metrics = timer.get_timing_metrics(reduction_op="sum") - validation_time = timing_metrics.get("total_val_time", 0) + validation_time = timing_metrics.get("total_validation_time", 0) if len(val_metrics) == 0: warnings.warn( @@ -398,7 +400,7 @@ def validate_one_dataset( # Print timing information print(f"\n ā±ļø Validation Timing for `{dataset_name}` set:") - validation_time = timing_metrics.get("total_val_time", 0) + validation_time = timing_metrics.get("total_validation_time", 0) print(f" • Total validation time: {validation_time:.2f}s") # Make sure to reset the timer after validation diff --git a/nemo_rl/algorithms/rm.py b/nemo_rl/algorithms/rm.py index a2694d21fd..0b7551983c 100644 --- a/nemo_rl/algorithms/rm.py +++ b/nemo_rl/algorithms/rm.py @@ -270,15 +270,17 @@ def validate( val_metrics[prefix + "_loss"] = k_val_metrics["val_loss"] val_metrics[prefix + "_accuracy"] = k_val_metrics["accuracy"] - validation_timings[prefix + "_total_val_time"] = k_validation_timings[ - "total_val_time" + validation_timings[prefix + "_total_validation_time"] = k_validation_timings[ + "total_validation_time" ] total_validation_time = sum(validation_timings.values()) logger.log_metrics( - {"total_val_time": total_validation_time}, step, prefix="timing/validation" + {"total_validation_time": total_validation_time}, + step, + prefix="timing/validation", ) - validation_timings["total_val_time"] = total_validation_time + validation_timings["total_validation_time"] = total_validation_time return val_metrics, validation_timings @@ -301,7 +303,7 @@ def validate_one_dataset( timer = Timer() - with timer.time("total_val_time"): + with timer.time("total_validation_time"): print(f"ā–¶ Starting validation at step {step} for `{dataset_name}` set..") # Show a progress indicator for validation @@ -387,7 +389,7 @@ def validate_one_dataset( # Get timing metrics timing_metrics = timer.get_timing_metrics(reduction_op="sum") - validation_time = timing_metrics.get("total_val_time", 0) + validation_time = timing_metrics.get("total_validation_time", 0) if num_valid_batches > 0: # Print summary of validation results @@ -406,7 +408,7 @@ def validate_one_dataset( # Print timing information print(f"\n ā±ļø Validation Timing for `{dataset_name}` set:") - validation_time = timing_metrics.get("total_val_time", 0) + validation_time = timing_metrics.get("total_validation_time", 0) print(f" • Total validation time: {validation_time:.2f}s") # Make sure to reset the timer after validation diff --git a/tests/unit/data/hf_datasets/test_tulu3.py b/tests/unit/data/hf_datasets/test_tulu3.py index 6cc1560ae1..d5ccf2d254 100644 --- a/tests/unit/data/hf_datasets/test_tulu3.py +++ b/tests/unit/data/hf_datasets/test_tulu3.py @@ -35,8 +35,14 @@ def test_to_preference_data_format(): """Test the `to_preference_data_format()` function with different preference values.""" data = { "prompt": "What is 2+2?", - "chosen": [{"content": "What is 2+2?", "role": "user"}, {"role": "assistant", "content": "The answer is 4."}], - "rejected": [{"content": "What is 2+2?", "role": "user"}, {"role": "assistant", "content": "I don't know."}], + "chosen": [ + {"content": "What is 2+2?", "role": "user"}, + {"role": "assistant", "content": "The answer is 4."}, + ], + "rejected": [ + {"content": "What is 2+2?", "role": "user"}, + {"role": "assistant", "content": "I don't know."}, + ], } result = to_preference_data_format(data) assert result["context"] == [{"content": "What is 2+2?", "role": "user"}] From 0dc7a6fa12b64e40150d97f344533d72ae5bd7e6 Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Thu, 28 Aug 2025 17:23:55 -0700 Subject: [PATCH 44/47] validation metrics Signed-off-by: Julien Veron Vialard --- docs/guides/dpo.md | 9 ++-- docs/guides/rm.md | 9 ++-- examples/configs/dpo.yaml | 14 ++---- examples/configs/rm.yaml | 14 ++---- examples/run_dpo.py | 47 ++++++++----------- examples/run_rm.py | 45 +++++++----------- nemo_rl/algorithms/dpo.py | 28 +++++------ nemo_rl/algorithms/rm.py | 99 +++++++++++++-------------------------- nemo_rl/data/__init__.py | 1 - 9 files changed, 102 insertions(+), 164 deletions(-) diff --git a/docs/guides/dpo.md b/docs/guides/dpo.md index 5648d2c878..7d5167d7e2 100644 --- a/docs/guides/dpo.md +++ b/docs/guides/dpo.md @@ -98,7 +98,8 @@ We also provide a [PreferenceDataset](../../nemo_rl/data/hf_datasets/preference_ data: dataset_name: PreferenceDataset train_data_path: - val_data_path: + val_data_paths: + : ``` with support for multiple validation sets achieved with: ```yaml @@ -109,9 +110,9 @@ data: : : ``` -If using multiple validation sets, please note: -- If you are using a logger, the prefix used for each validation set will be `val-`. -- If you are doing checkpointing, the `metric_name` value in your `checkpointing` config should reflect the metric and validation set to be tracked. For example, `val-_loss`. +Please note: +- If you are using a logger, the prefix used for each validation set will be `validation-`. +- If you are doing checkpointing, the `metric_name` value in your `checkpointing` config should reflect the metric and validation set to be tracked. For example, `validation-_loss`. The older [DPODataset](../../nemo_rl/data/hf_datasets/dpo.py) class is deprecated. This class is also compatible with JSONL-formatted preference datsets. It assumes train and validation datasets have been split and processed into the expected format offline. The JSONL files should consist of examples with `prompt`, `chosen_response`, and `rejected_response` keys. diff --git a/docs/guides/rm.md b/docs/guides/rm.md index b217833882..55221e4b1e 100644 --- a/docs/guides/rm.md +++ b/docs/guides/rm.md @@ -87,7 +87,8 @@ We also provide a [PreferenceDataset](../../nemo_rl/data/hf_datasets/preference_ data: dataset_name: PreferenceDataset train_data_path: - val_data_path: + val_data_paths: + : ``` with support for multiple validation sets achieved with: ```yaml @@ -98,6 +99,6 @@ data: : : ``` -If using multiple validation sets, please note: -- If you are using a logger, the prefix used for each validation set will be `val-`. -- If you are doing checkpointing, the `metric_name` value in your `checkpointing` config should reflect the metric and validation set to be tracked. For example, `val-_loss`. \ No newline at end of file +Please note: +- If you are using a logger, the prefix used for each validation set will be `validation-`. +- If you are doing checkpointing, the `metric_name` value in your `checkpointing` config should reflect the metric and validation set to be tracked. For example, `validation-_loss`. \ No newline at end of file diff --git a/examples/configs/dpo.yaml b/examples/configs/dpo.yaml index 343978cfe0..7de72fcc3f 100755 --- a/examples/configs/dpo.yaml +++ b/examples/configs/dpo.yaml @@ -155,21 +155,17 @@ data: shuffle: true dataset_name: HelpSteer3 - # You can use a custom preference dataset for training and validation. For example: - # dataset_name: PreferenceDataset - # train_data_path: - # val_data_path: - # - # If you are using multiple validation sets, `metric_name` should reflect the metric and validation set to be tracked. For example: - # checkpointing: - # metric_name: "val-_loss" - # ... + # You can use custom preference datasets for training and validation. For example: # data: # dataset_name: PreferenceDataset # train_data_path: # val_data_paths: # : # ... + # If you are doing checkpointing, `metric_name` should reflect the metric and validation set to be tracked. For example: + # checkpointing: + # metric_name: "val-_loss" + # ... logger: log_dir: "logs" # Base directory for all logs diff --git a/examples/configs/rm.yaml b/examples/configs/rm.yaml index bfb7748a22..f0818fc163 100644 --- a/examples/configs/rm.yaml +++ b/examples/configs/rm.yaml @@ -126,21 +126,17 @@ data: shuffle: true dataset_name: HelpSteer3 - # You can use a custom preference dataset for training and validation. For example: - # dataset_name: PreferenceDataset - # train_data_path: - # val_data_path: - # - # If you are using multiple validation sets, `metric_name` should reflect the metric and validation set to be tracked. For example: - # checkpointing: - # metric_name: "val-_loss" - # ... + # You can use custom preference datasets for training and validation. For example: # data: # dataset_name: PreferenceDataset # train_data_path: # val_data_paths: # : # ... + # If you are doing checkpointing, `metric_name` should reflect the metric and validation set to be tracked. For example: + # checkpointing: + # metric_name: "val-_loss" + # ... logger: log_dir: "logs" # Base directory for all logs diff --git a/examples/run_dpo.py b/examples/run_dpo.py index b82f11e188..c4b0a77ffd 100644 --- a/examples/run_dpo.py +++ b/examples/run_dpo.py @@ -209,35 +209,14 @@ def setup_data(data_config: DataConfig, policy_config: PolicyConfig): max_seq_length=data_config["max_input_seq_length"], ) - val_dataset = ( - { - "validation": AllTaskProcessedDataset( - val_dataset, - tokenizer, - dpo_task_spec, - dpo_preprocessor, - max_seq_length=data_config["max_input_seq_length"], - ) - } - if val_dataset - else {} - ) - if data_cls == "PreferenceDataset": - if data_config.get("val_data_path"): - assert data_config.get("val_data_paths") is None, ( - "val_data_path and val_data_paths cannot be used together" - ) - val_data_paths = {"validation": data_config.get("val_data_path")} - - elif data_config.get("val_data_paths"): - assert isinstance(data_config["val_data_paths"], dict), ( - f"Invalid type for val_data_paths: {type(data_config['val_data_paths'])}" - ) - val_data_paths = data_config.get("val_data_paths") + val_dataset = {} - else: - raise ValueError("Either val_data_path or val_data_paths must be provided") + assert "val_data_paths" in data_config, "val_data_paths must be provided" + assert isinstance(data_config["val_data_paths"], dict), ( + f"Invalid type for val_data_paths: {type(data_config['val_data_paths'])}" + ) + val_data_paths = data_config.get("val_data_paths") for val_dataset_name, val_dataset_path in val_data_paths.items(): assert val_dataset_name not in val_dataset @@ -254,6 +233,20 @@ def setup_data(data_config: DataConfig, policy_config: PolicyConfig): dpo_preprocessor, max_seq_length=data_config["max_input_seq_length"], ) + else: + val_dataset = ( + { + "default": AllTaskProcessedDataset( + val_dataset, + tokenizer, + dpo_task_spec, + dpo_preprocessor, + max_seq_length=data_config["max_input_seq_length"], + ) + } + if val_dataset + else {} + ) return train_dataset, val_dataset, tokenizer, dpo_task_spec diff --git a/examples/run_rm.py b/examples/run_rm.py index 6880f73078..176a25521d 100644 --- a/examples/run_rm.py +++ b/examples/run_rm.py @@ -159,35 +159,14 @@ def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig): max_seq_length=data_config["max_input_seq_length"], ) - val_dataset = ( - { - "validation": AllTaskProcessedDataset( - val_dataset, - tokenizer, - rm_task_spec, - rm_preprocessor, - max_seq_length=data_config["max_input_seq_length"], - ) - } - if val_dataset - else {} - ) - if data_cls == "PreferenceDataset": - if data_config.get("val_data_path"): - assert data_config.get("val_data_paths") is None, ( - "val_data_path and val_data_paths cannot be used together" - ) - val_data_paths = {"validation": data_config.get("val_data_path")} - - elif data_config.get("val_data_paths"): - assert isinstance(data_config["val_data_paths"], dict), ( - f"Invalid type for val_data_paths: {type(data_config['val_data_paths'])}" - ) - val_data_paths = data_config.get("val_data_paths") + val_dataset = {} - else: - raise ValueError("Either val_data_path or val_data_paths must be provided") + assert "val_data_paths" in data_config, "val_data_paths must be provided" + assert isinstance(data_config["val_data_paths"], dict), ( + f"Invalid type for val_data_paths: {type(data_config['val_data_paths'])}" + ) + val_data_paths = data_config.get("val_data_paths") for val_dataset_name, val_dataset_path in val_data_paths.items(): assert val_dataset_name not in val_dataset @@ -204,6 +183,18 @@ def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig): rm_preprocessor, max_seq_length=data_config["max_input_seq_length"], ) + else: + val_dataset = ( + { + "default": AllTaskProcessedDataset( + val_dataset, + tokenizer, + rm_task_spec, + rm_preprocessor, + max_seq_length=data_config["max_input_seq_length"], + ) + } if val_dataset else {} + ) return train_dataset, val_dataset, rm_task_spec diff --git a/nemo_rl/algorithms/dpo.py b/nemo_rl/algorithms/dpo.py index 2871e8e1af..c2d3b66d2b 100644 --- a/nemo_rl/algorithms/dpo.py +++ b/nemo_rl/algorithms/dpo.py @@ -43,7 +43,6 @@ class DPOSaveState(TypedDict): epoch: int # Track current epoch step: int # Track step within current epoch total_steps: int # Track total number of steps across all epochs - val_loss: NotRequired[float] # Optional field - may not be present during training consumed_samples: int @@ -170,9 +169,6 @@ def setup( ) train_dataloader.load_state_dict(dataloader_state_dict) - if not isinstance(val_dataset, dict): - val_dataset = {"validation": val_dataset} - val_dataloader = { k: StatefulDataLoader( v, @@ -297,27 +293,25 @@ def validate( val_mbs=val_mbs, dataset_name=val_dataset_name, ) - if val_dataset_name == "validation": - prefix = "val" - else: - prefix = f"val-{val_dataset_name}" + prefix = f"validation-{val_dataset_name}" logger.log_metrics(k_val_metrics, step, prefix=prefix) logger.log_metrics(k_validation_timings, step, prefix=f"timing/{prefix}") - val_metrics[prefix + "_loss"] = k_val_metrics["val_loss"] - val_metrics[prefix + "_accuracy"] = k_val_metrics["accuracy"] + val_metrics[f"{prefix}_loss"] = k_val_metrics["loss"] + val_metrics[f"{prefix}_accuracy"] = k_val_metrics["accuracy"] validation_timings[prefix + "_total_validation_time"] = k_validation_timings[ "total_validation_time" ] - total_validation_time = sum(validation_timings.values()) - logger.log_metrics( - {"total_validation_time": total_validation_time}, - step, - prefix="timing/validation", - ) - validation_timings["total_validation_time"] = total_validation_time + if len(validation_timings) > 0: + total_validation_time = sum(validation_timings.values()) + logger.log_metrics( + {"total_validation_time": total_validation_time}, + step, + prefix="timing/validation", + ) + validation_timings["total_validation_time"] = total_validation_time return val_metrics, validation_timings diff --git a/nemo_rl/algorithms/rm.py b/nemo_rl/algorithms/rm.py index 0b7551983c..2e419ee11b 100644 --- a/nemo_rl/algorithms/rm.py +++ b/nemo_rl/algorithms/rm.py @@ -47,7 +47,6 @@ class RMSaveState(TypedDict): epoch: int # Track current epoch step: int # Track step within current epoch total_steps: int # Track total number of steps across all epochs - val_loss: float consumed_samples: int @@ -81,7 +80,7 @@ class MasterConfig(TypedDict): class RMValMetrics(TypedDict): - val_loss: float + loss: float accuracy: float rewards_chosen_mean: float rewards_rejected_mean: float @@ -160,9 +159,6 @@ def setup( ) train_dataloader.load_state_dict(dataloader_state_dict) - if not isinstance(val_dataset, dict): - val_dataset = {"validation": val_dataset} - val_dataloader = { k: StatefulDataLoader( v, @@ -260,27 +256,26 @@ def validate( val_mbs=val_mbs, dataset_name=val_dataset_name, ) - if val_dataset_name == "validation": - prefix = "val" - else: - prefix = f"val-{val_dataset_name}" + prefix = f"validation-{val_dataset_name}" logger.log_metrics(k_val_metrics, step, prefix=prefix) logger.log_metrics(k_validation_timings, step, prefix=f"timing/{prefix}") - val_metrics[prefix + "_loss"] = k_val_metrics["val_loss"] - val_metrics[prefix + "_accuracy"] = k_val_metrics["accuracy"] + for metric_name in RMValMetrics.__annotations__.keys(): + if metric_name != "num_valid_samples": + val_metrics[f"{prefix}_{metric_name}"] = k_val_metrics[metric_name] validation_timings[prefix + "_total_validation_time"] = k_validation_timings[ "total_validation_time" ] - total_validation_time = sum(validation_timings.values()) - logger.log_metrics( - {"total_validation_time": total_validation_time}, - step, - prefix="timing/validation", - ) - validation_timings["total_validation_time"] = total_validation_time + if len(validation_timings) > 0: + total_validation_time = sum(validation_timings.values()) + logger.log_metrics( + {"total_validation_time": total_validation_time}, + step, + prefix="timing/validation", + ) + validation_timings["total_validation_time"] = total_validation_time return val_metrics, validation_timings @@ -329,18 +324,9 @@ def validate_one_dataset( " This is likely because there were no valid samples." ) else: - sum_num_valid_samples = sum( - val_results["all_mb_metrics"]["num_valid_samples"] - ) - for k in [ - "loss", - "accuracy", - "rewards_chosen_mean", - "rewards_rejected_mean", - "num_valid_samples", - ]: - dict_val_metrics[k if k != "loss" else "val_loss"] += [ - sum(val_results["all_mb_metrics"][k]) + for metric_name in RMValMetrics.__annotations__.keys(): + dict_val_metrics[metric_name] += [ + sum(val_results["all_mb_metrics"][metric_name]) ] num_valid_batches += 1 @@ -353,22 +339,17 @@ def validate_one_dataset( val_metrics = RMValMetrics( num_valid_samples=sum_num_valid_samples, **{ - k: sum( + metric_name: sum( [ value * weight for value, weight in zip( - dict_val_metrics[k], + dict_val_metrics[metric_name], dict_val_metrics["num_valid_samples"], ) ] ) / sum_num_valid_samples - for k in [ - "val_loss", - "accuracy", - "rewards_chosen_mean", - "rewards_rejected_mean", - ] + for metric_name in RMValMetrics.__annotations__.keys() if metric_name != "num_valid_samples" }, ) else: @@ -376,13 +357,7 @@ def validate_one_dataset( "No validation metrics were collected." " This is likely because there were no valid samples in the validation set." ) - val_metrics = RMValMetrics( - val_loss=0.0, - accuracy=0.0, - rewards_chosen_mean=0.0, - rewards_rejected_mean=0.0, - num_valid_samples=0.0, - ) + val_metrics = RMValMetrics(**{metric_name: 0.0 for metric_name in RMValMetrics.__annotations__.keys()}) # Calculate validation metrics policy.prepare_for_training() @@ -394,17 +369,13 @@ def validate_one_dataset( if num_valid_batches > 0: # Print summary of validation results print(f"\nšŸ“Š Validation Results for `{dataset_name}` set:") - print(f" • Validation loss: {val_metrics['val_loss']:.4f}") - print(f" • Validation accuracy: {val_metrics['accuracy']:.4f}") - print( - f" • Validation rewards chosen mean: {val_metrics['rewards_chosen_mean']:.4f}" - ) - print( - f" • Validation rewards rejected mean: {val_metrics['rewards_rejected_mean']:.4f}" - ) - print( - f" • Validation num valid samples: {val_metrics['num_valid_samples']:.0f}" - ) + for metric_name in RMValMetrics.__annotations__.keys(): + if metric_name != "num_valid_samples": + print(f" • Validation {metric_name}: {val_metrics[metric_name]:.4f}") + else: + print( + f" • Validation num valid samples: {val_metrics['num_valid_samples']:.0f}" + ) # Print timing information print(f"\n ā±ļø Validation Timing for `{dataset_name}` set:") @@ -533,7 +504,7 @@ def rm_train( for key in list(rm_save_state): if ( key.startswith("val") - and (key.endswith("_loss") or key.endswith("_accuracy")) + and any([key.endswith(f"_{metric_name}") for metric_name in RMValMetrics.__annotations__.keys() if metric_name != "num_valid_samples"]) and (val_metrics is None or key not in val_metrics) ): del rm_save_state[key] @@ -588,15 +559,11 @@ def rm_train( timing_metrics = timer.get_timing_metrics(reduction_op="sum") print("\nšŸ“Š Training Results:") - print(f" • Loss: {float(metrics['loss']):.4f}") - print(f" • Accuracy: {float(metrics['accuracy']):.4f}") - print( - f" • Rewards chosen mean: {float(metrics['rewards_chosen_mean']):.4f}" - ) - print( - f" • Rewards rejected mean: {float(metrics['rewards_rejected_mean']):.4f}" - ) - print(f" • Num valid samples: {float(metrics['num_valid_samples']):.0f}") + for metric_name in RMValMetrics.__annotations__.keys(): + if metric_name != "num_valid_samples": + print(f" • {metric_name}: {float(metrics[metric_name]):.4f}") + else: + print(f" • num valid samples: {float(metrics[metric_name]):.0f}") print("\nā±ļø Timing:") # Display total time first, separately diff --git a/nemo_rl/data/__init__.py b/nemo_rl/data/__init__.py index 13cbce6f9c..e15526e736 100644 --- a/nemo_rl/data/__init__.py +++ b/nemo_rl/data/__init__.py @@ -32,7 +32,6 @@ class DataConfig(TypedDict): seed: NotRequired[int] download_dir: NotRequired[str] train_data_path: NotRequired[str] - val_data_path: NotRequired[str] val_data_paths: NotRequired[dict[str, str]] From 1137407cdcb6f2b9583338e66b752e4d82aa4a2f Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Thu, 28 Aug 2025 18:28:16 -0700 Subject: [PATCH 45/47] nit code and docs Signed-off-by: Julien Veron Vialard --- docs/guides/dpo.md | 2 +- docs/guides/rm.md | 2 +- examples/configs/dpo.yaml | 2 +- examples/configs/rm.yaml | 2 +- examples/run_dpo.py | 15 +++++++++++---- examples/run_rm.py | 21 ++++++++++++++------- nemo_rl/algorithms/dpo.py | 3 ++- nemo_rl/algorithms/rm.py | 18 +++++++++++++++--- 8 files changed, 46 insertions(+), 19 deletions(-) diff --git a/docs/guides/dpo.md b/docs/guides/dpo.md index 7d5167d7e2..bcd34ea077 100644 --- a/docs/guides/dpo.md +++ b/docs/guides/dpo.md @@ -111,7 +111,7 @@ data: : ``` Please note: -- If you are using a logger, the prefix used for each validation set will be `validation-`. +- If you are using a logger, the prefix used for each validation set will be `validation-`. The total validation time, summed across all validation sets, is reported under `timing/validation/total_validation_time`. - If you are doing checkpointing, the `metric_name` value in your `checkpointing` config should reflect the metric and validation set to be tracked. For example, `validation-_loss`. The older [DPODataset](../../nemo_rl/data/hf_datasets/dpo.py) class is deprecated. This class is also compatible with JSONL-formatted preference datsets. It assumes train and validation datasets have been split and processed into the expected format offline. The JSONL files should consist of examples with `prompt`, `chosen_response`, and `rejected_response` keys. diff --git a/docs/guides/rm.md b/docs/guides/rm.md index 55221e4b1e..456e540cca 100644 --- a/docs/guides/rm.md +++ b/docs/guides/rm.md @@ -100,5 +100,5 @@ data: : ``` Please note: -- If you are using a logger, the prefix used for each validation set will be `validation-`. +- If you are using a logger, the prefix used for each validation set will be `validation-`. The total validation time, summed across all validation sets, is reported under `timing/validation/total_validation_time`. - If you are doing checkpointing, the `metric_name` value in your `checkpointing` config should reflect the metric and validation set to be tracked. For example, `validation-_loss`. \ No newline at end of file diff --git a/examples/configs/dpo.yaml b/examples/configs/dpo.yaml index 7de72fcc3f..4a438e127e 100755 --- a/examples/configs/dpo.yaml +++ b/examples/configs/dpo.yaml @@ -164,7 +164,7 @@ data: # ... # If you are doing checkpointing, `metric_name` should reflect the metric and validation set to be tracked. For example: # checkpointing: - # metric_name: "val-_loss" + # metric_name: "validation-_loss" # ... logger: diff --git a/examples/configs/rm.yaml b/examples/configs/rm.yaml index f0818fc163..744538d5ed 100644 --- a/examples/configs/rm.yaml +++ b/examples/configs/rm.yaml @@ -135,7 +135,7 @@ data: # ... # If you are doing checkpointing, `metric_name` should reflect the metric and validation set to be tracked. For example: # checkpointing: - # metric_name: "val-_loss" + # metric_name: "validation-_loss" # ... logger: diff --git a/examples/run_dpo.py b/examples/run_dpo.py index c4b0a77ffd..b9b31cfcf6 100644 --- a/examples/run_dpo.py +++ b/examples/run_dpo.py @@ -106,7 +106,9 @@ def dpo_preprocessor( ``` """ - assert len(datum_dict["completions"]) == 2 + assert len(datum_dict["completions"]) == 2, ( + "DPO training supports only two completions" + ) # Lower rank is preferred if datum_dict["completions"][0]["rank"] < datum_dict["completions"][1]["rank"]: chosen_completion = datum_dict["completions"][0] @@ -212,11 +214,16 @@ def setup_data(data_config: DataConfig, policy_config: PolicyConfig): if data_cls == "PreferenceDataset": val_dataset = {} - assert "val_data_paths" in data_config, "val_data_paths must be provided" + assert "val_data_path" not in data_config, ( + "`val_data_path` cannot be provided for PreferenceDataset. You should use `val_data_paths` instead." + ) + assert "val_data_paths" in data_config, ( + "`val_data_paths` must be provided for PreferenceDataset" + ) assert isinstance(data_config["val_data_paths"], dict), ( - f"Invalid type for val_data_paths: {type(data_config['val_data_paths'])}" + f"Invalid type for val_data_paths: {type(data_config['val_data_paths'])}. val_data_paths must be a dictionary." ) - val_data_paths = data_config.get("val_data_paths") + val_data_paths = data_config["val_data_paths"] for val_dataset_name, val_dataset_path in val_data_paths.items(): assert val_dataset_name not in val_dataset diff --git a/examples/run_rm.py b/examples/run_rm.py index 176a25521d..0adf84490d 100644 --- a/examples/run_rm.py +++ b/examples/run_rm.py @@ -56,9 +56,9 @@ def rm_preprocessor( idx: int, ) -> DatumSpec: """Process a datum dictionary for RM training.""" - assert ( - len(datum_dict["completions"]) == 2 - ) # Currently only supporting 2 completions + assert len(datum_dict["completions"]) == 2, ( + "RM training supports only two completions" + ) # Lower rank is preferred if datum_dict["completions"][0]["rank"] < datum_dict["completions"][1]["rank"]: chosen_completion = datum_dict["completions"][0] @@ -162,11 +162,16 @@ def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig): if data_cls == "PreferenceDataset": val_dataset = {} - assert "val_data_paths" in data_config, "val_data_paths must be provided" + assert "val_data_path" not in data_config, ( + "`val_data_path` cannot be provided for PreferenceDataset. You should use `val_data_paths` instead." + ) + assert "val_data_paths" in data_config, ( + "`val_data_paths` must be provided for PreferenceDataset" + ) assert isinstance(data_config["val_data_paths"], dict), ( - f"Invalid type for val_data_paths: {type(data_config['val_data_paths'])}" + f"Invalid type for val_data_paths: {type(data_config['val_data_paths'])}. val_data_paths must be a dictionary." ) - val_data_paths = data_config.get("val_data_paths") + val_data_paths = data_config["val_data_paths"] for val_dataset_name, val_dataset_path in val_data_paths.items(): assert val_dataset_name not in val_dataset @@ -193,7 +198,9 @@ def setup_data(tokenizer: AutoTokenizer, data_config: DataConfig): rm_preprocessor, max_seq_length=data_config["max_input_seq_length"], ) - } if val_dataset else {} + } + if val_dataset + else {} ) return train_dataset, val_dataset, rm_task_spec diff --git a/nemo_rl/algorithms/dpo.py b/nemo_rl/algorithms/dpo.py index c2d3b66d2b..924873ec3d 100644 --- a/nemo_rl/algorithms/dpo.py +++ b/nemo_rl/algorithms/dpo.py @@ -16,7 +16,7 @@ from collections import defaultdict from functools import partial from pathlib import Path -from typing import NotRequired, Optional, TypedDict, cast +from typing import Optional, TypedDict, cast import numpy as np import torch @@ -592,6 +592,7 @@ def dpo_train( print("\nšŸ“Š Training Results:") print(f" • Loss: {float(metrics['loss']):.4f}") + print(f" • Accuracy: {float(metrics['accuracy']):.4f}") if "total_flops" in train_results: total_tflops = ( train_results["total_flops"] diff --git a/nemo_rl/algorithms/rm.py b/nemo_rl/algorithms/rm.py index 2e419ee11b..b1aa9f01be 100644 --- a/nemo_rl/algorithms/rm.py +++ b/nemo_rl/algorithms/rm.py @@ -349,7 +349,8 @@ def validate_one_dataset( ] ) / sum_num_valid_samples - for metric_name in RMValMetrics.__annotations__.keys() if metric_name != "num_valid_samples" + for metric_name in RMValMetrics.__annotations__.keys() + if metric_name != "num_valid_samples" }, ) else: @@ -357,7 +358,12 @@ def validate_one_dataset( "No validation metrics were collected." " This is likely because there were no valid samples in the validation set." ) - val_metrics = RMValMetrics(**{metric_name: 0.0 for metric_name in RMValMetrics.__annotations__.keys()}) + val_metrics = RMValMetrics( + **{ + metric_name: 0.0 + for metric_name in RMValMetrics.__annotations__.keys() + } + ) # Calculate validation metrics policy.prepare_for_training() @@ -504,7 +510,13 @@ def rm_train( for key in list(rm_save_state): if ( key.startswith("val") - and any([key.endswith(f"_{metric_name}") for metric_name in RMValMetrics.__annotations__.keys() if metric_name != "num_valid_samples"]) + and any( + [ + key.endswith(f"_{metric_name}") + for metric_name in RMValMetrics.__annotations__.keys() + if metric_name != "num_valid_samples" + ] + ) and (val_metrics is None or key not in val_metrics) ): del rm_save_state[key] From 30571c24135ab9ecbd91035140a283d3eee7666c Mon Sep 17 00:00:00 2001 From: Julien Veron Vialard Date: Fri, 29 Aug 2025 12:40:45 -0700 Subject: [PATCH 46/47] adding DPOValMetrics Signed-off-by: Julien Veron Vialard --- nemo_rl/algorithms/dpo.py | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/nemo_rl/algorithms/dpo.py b/nemo_rl/algorithms/dpo.py index 924873ec3d..579099c530 100644 --- a/nemo_rl/algorithms/dpo.py +++ b/nemo_rl/algorithms/dpo.py @@ -85,6 +85,11 @@ class MasterConfig(TypedDict): checkpointing: CheckpointingConfig +class DPOValMetrics(TypedDict): + loss: float + accuracy: float + + # ======================================================= # Setup & Initialization # ======================================================= @@ -298,8 +303,8 @@ def validate( logger.log_metrics(k_val_metrics, step, prefix=prefix) logger.log_metrics(k_validation_timings, step, prefix=f"timing/{prefix}") - val_metrics[f"{prefix}_loss"] = k_val_metrics["loss"] - val_metrics[f"{prefix}_accuracy"] = k_val_metrics["accuracy"] + for metric_name in DPOValMetrics.__annotations__.keys(): + val_metrics[f"{prefix}_{metric_name}"] = k_val_metrics[metric_name] validation_timings[prefix + "_total_validation_time"] = k_validation_timings[ "total_validation_time" ] @@ -389,8 +394,8 @@ def validate_one_dataset( else: # Print summary of validation results print(f"\nšŸ“Š Validation Results for `{dataset_name}` set:") - print(f" • Validation loss: {float(val_metrics['loss']):.4f}") - print(f" • Validation accuracy: {float(val_metrics['accuracy']):.4f}") + for metric_name in DPOValMetrics.__annotations__.keys(): + print(f" • Validation {metric_name}: {val_metrics[metric_name]:.4f}") # Print timing information print(f"\n ā±ļø Validation Timing for `{dataset_name}` set:") @@ -537,7 +542,13 @@ def dpo_train( for key in list(dpo_save_state): if ( key.startswith("val") - and (key.endswith("_loss") or key.endswith("_accuracy")) + and any( + [ + key.endswith(f"_{metric_name}") + for metric_name in DPOValMetrics.__annotations__.keys() + if metric_name != "num_valid_samples" + ] + ) and (val_metrics is None or key not in val_metrics) ): del dpo_save_state[key] @@ -591,8 +602,8 @@ def dpo_train( timing_metrics = timer.get_timing_metrics(reduction_op="sum") print("\nšŸ“Š Training Results:") - print(f" • Loss: {float(metrics['loss']):.4f}") - print(f" • Accuracy: {float(metrics['accuracy']):.4f}") + for metric_name in DPOValMetrics.__annotations__.keys(): + print(f" • {metric_name}: {float(metrics[metric_name]):.4f}") if "total_flops" in train_results: total_tflops = ( train_results["total_flops"] From e58d9eef40371a3ad8055e01d5ffb8552e12c433 Mon Sep 17 00:00:00 2001 From: Terry Kong Date: Sat, 30 Aug 2025 17:18:49 +0000 Subject: [PATCH 47/47] revert jsonc to json since sphinx didn't like Signed-off-by: Terry Kong --- docs/guides/dpo.md | 2 +- docs/guides/rm.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/guides/dpo.md b/docs/guides/dpo.md index bcd34ea077..ce68546993 100644 --- a/docs/guides/dpo.md +++ b/docs/guides/dpo.md @@ -34,7 +34,7 @@ uv run examples/run_dpo.py \ Each DPO dataset class is expected to have the following attributes: 1. `formatted_ds`: The dictionary of formatted datasets, where each dataset should be formatted like -```jsonc +```json { "context": [], // list of dicts - The prompt message (including previous turns, if any) "completions": [ // list of dicts — The list of completions diff --git a/docs/guides/rm.md b/docs/guides/rm.md index 456e540cca..f1843cd92c 100644 --- a/docs/guides/rm.md +++ b/docs/guides/rm.md @@ -23,7 +23,7 @@ The default YAML config shares the same base template as the SFT config but incl Each RM dataset class is expected to have the following attributes: 1. `formatted_ds`: The dictionary of formatted datasets, where each dataset should be formatted like -```jsonc +```json { "context": [], // list of dicts - The prompt message (including previous turns, if any) "completions": [ // list of dicts — The list of completions